From 5ac3d31bee5447e2d4fb85ecb4e68e21c18bc778 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Mon, 31 Jul 2023 10:40:53 -0700 Subject: [PATCH 001/648] Updated --- 1.architectures/1.vpc_network/deploy.sh | 6 +++++ .../2.aws-parallelcluster/create-key-pair.sh | 4 +++ .../pcluster-workshop-key.pem | 27 +++++++++++++++++++ 2.amazon_machine_images/Makefile | 2 +- 2.amazon_machine_images/packer-ami.pkr.hcl | 2 +- 5 files changed, 39 insertions(+), 2 deletions(-) create mode 100755 1.architectures/1.vpc_network/deploy.sh create mode 100755 1.architectures/2.aws-parallelcluster/create-key-pair.sh create mode 100644 1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem diff --git a/1.architectures/1.vpc_network/deploy.sh b/1.architectures/1.vpc_network/deploy.sh new file mode 100755 index 00000000..6973efbc --- /dev/null +++ b/1.architectures/1.vpc_network/deploy.sh @@ -0,0 +1,6 @@ +aws cloudformation create-stack --stack-name vpc-stack-ml\ + --template-body file://2.vpc-one-az.yaml \ + --parameters ParameterKey=SubnetsAZ,ParameterValue=us-west-2a \ + ParameterKey=VPCName,ParameterValue="ML HPC VPC" \ + --capabilities CAPABILITY_IAM + diff --git a/1.architectures/2.aws-parallelcluster/create-key-pair.sh b/1.architectures/2.aws-parallelcluster/create-key-pair.sh new file mode 100755 index 00000000..d729c3b8 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/create-key-pair.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +aws ec2 create-key-pair --key-name pcluster-workshop-key --query KeyMaterial --output text > pcluster-workshop-key.pem +sudo chmod 600 pcluster-workshop-key.pem diff --git a/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem b/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem new file mode 100644 index 00000000..5fcf7be2 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApTVlw+55oXXOilUame4D6SFeeJ5asdXUOIJVzHxA2D5sMRJH +jPBRb3/16XOZtJ/tYkylkD4KRq6aD/fTtxnkQTWRB9hbRZHo5dDcla7MtnwiDIsc +0kXUh/B8Nzkq2LTCSrj07ePBGbijfktUwxwrbiQrEijET8S4NFsKvr8KqTO3VAHT +mqaOQNgXeNqw+cv/VmtqYqrK7FpPwKxvj0GlvRw2qGtjEN/HnX4KVaNRPm4czMpz +mrTLRukvXOEihztQlpavlNnakQ3AeeF7TbrZXN5Bzb8HVJQjLlw3yso5XaImbrW1 +0C7d2JjMnZyGIfD9NFCo3txOhDVPCyid2ovZTQIDAQABAoIBAQCfS3BXTl7a7vN6 +cHqYx2Xf6PvlQ5Lhe/MHlaxS4c/Icc/yNNTmsWVkJDahpYQkJDxIluzguuE/q+UC +1ou+Nenfcl3n+ZZv6zNsPEokzYm3vtM50vy0Rv6ZIKk0uht1Hj1kvRf1NLChM2X5 +RXtyMXwy1A7vkH1RCLYwTUR8DUqjP5yl3kZSsPJAQTIJjAJukLNRfsnpZTT9zbQi +FnDG46mCfA9D+eWyin6XF0SeQIH4UJlugzNWwmrafKL1ey84HmS4sez/Zuu4pCLW +jU6uFkM62MealNZfvK1tW5zKasBwFcQcjxmH1y5O0YCZ0onmySEcm/ocM0bfqm+a +0INLeH9lAoGBANKliuTL5RJYYzlFu8pxBktL25wEKkgq4JYyOB3nEHcoE8Nj6fXX +XdlMfwhq/CNjjgoAATZClZIgX5atzzponSFc8yOW3IgsdXxKZ12pILWI5ku4lTPC +oGyIEWsdFBpmZCsskiAFW0Rs7Hjy3HkquBF3kA9bC9rDkmcwSPyGoGvHAoGBAMjH +ZFljEtQYDWgwYcQNVd6/kcHVfl/MPq2/fvllYLaTneiUDfG2sGwNKcoY0Ont1zsN +ZGjf6EhFIVc9ej0d5wSMoiRvDRB5MyeBEFFIOw+STnohYUqvfkOhrHQN3mUGJlVx +8Dk47ndg10rRXaK+6AbWQBOACoBrvOnRnKzm9YpLAoGAWltISPxtNoFTjq2q8Bur +Z4ESosnmmS5LSCIikDEdjv9R+u5Q3XOUfjGiDCpYHZYGx6NY4b2OCzB30FqeGZ/m +RvloEkwULRVlD/uae0KhFT20C6cR2TITPnrffeLrTKe/MAXeaH3juGZAyWDj0dXR +PyzaTrvvSde6ZSkBJdFZ3uECgYBJaCU3TTLAGr8kHbRQ8rM3ioDnBoB60/kCm2X4 +FumiNxxfwWnLuwiLqNBjlGklbhDoBUB1249YhPXoqJB9seYZ0rUhbvyuzjE2u0hU +/3ygd9R7cKLapIkcfwrHL57k/rebO9ohgcAmI2DzeNKsQvIgNcsdlg3A4CMktb97 +s8GIOwKBgHs7d9yXGZJoxkt1g4HgDeo3H2DEAqyiAnxL4IaihJF5hRk8dNT6CiIh +ZBi275aUP4tZk+3UMp2TtR6e5QdbGnR5FBQ13uP4N27h9N5R4bgWmjhFwo3tFyzr +ea5Ltc3pjC6dgl8bGjsTqc/WMjnjQqxK34Kj74tiuect0O8iCfWe +-----END RSA PRIVATE KEY----- diff --git a/2.amazon_machine_images/Makefile b/2.amazon_machine_images/Makefile index f62bd9c5..bab77d1c 100644 --- a/2.amazon_machine_images/Makefile +++ b/2.amazon_machine_images/Makefile @@ -1,6 +1,6 @@ IMAGE=registry.gitlab.aws.dev/smml/benchmarking/tools/preflight DEPLOY_IMAGE=deploy -AWS_REGION=us-east-1 +AWS_REGION=us-west-2 container_build: docker build -t ${IMAGE} ./preflight container_run: diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index 36c3f377..19fba55a 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -33,7 +33,7 @@ variable "eks_version" { variable "aws_region" { type = string - default = "us-east-1" + default = "us-west-2" } variable "instance_type" { From 1ccf0f61342483f49acb30dd0dac52ed5b4c7c29 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Mon, 31 Jul 2023 10:40:53 -0700 Subject: [PATCH 002/648] Updated --- 1.architectures/1.vpc_network/deploy.sh | 6 +++++ .../2.aws-parallelcluster/create-key-pair.sh | 4 +++ .../pcluster-workshop-key.pem | 27 +++++++++++++++++++ 2.amazon_machine_images/Makefile | 2 +- 2.amazon_machine_images/packer-ami.pkr.hcl | 2 +- 5 files changed, 39 insertions(+), 2 deletions(-) create mode 100755 1.architectures/1.vpc_network/deploy.sh create mode 100755 1.architectures/2.aws-parallelcluster/create-key-pair.sh create mode 100644 1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem diff --git a/1.architectures/1.vpc_network/deploy.sh b/1.architectures/1.vpc_network/deploy.sh new file mode 100755 index 00000000..6973efbc --- /dev/null +++ b/1.architectures/1.vpc_network/deploy.sh @@ -0,0 +1,6 @@ +aws cloudformation create-stack --stack-name vpc-stack-ml\ + --template-body file://2.vpc-one-az.yaml \ + --parameters ParameterKey=SubnetsAZ,ParameterValue=us-west-2a \ + ParameterKey=VPCName,ParameterValue="ML HPC VPC" \ + --capabilities CAPABILITY_IAM + diff --git a/1.architectures/2.aws-parallelcluster/create-key-pair.sh b/1.architectures/2.aws-parallelcluster/create-key-pair.sh new file mode 100755 index 00000000..d729c3b8 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/create-key-pair.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +aws ec2 create-key-pair --key-name pcluster-workshop-key --query KeyMaterial --output text > pcluster-workshop-key.pem +sudo chmod 600 pcluster-workshop-key.pem diff --git a/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem b/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem new file mode 100644 index 00000000..5fcf7be2 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApTVlw+55oXXOilUame4D6SFeeJ5asdXUOIJVzHxA2D5sMRJH +jPBRb3/16XOZtJ/tYkylkD4KRq6aD/fTtxnkQTWRB9hbRZHo5dDcla7MtnwiDIsc +0kXUh/B8Nzkq2LTCSrj07ePBGbijfktUwxwrbiQrEijET8S4NFsKvr8KqTO3VAHT +mqaOQNgXeNqw+cv/VmtqYqrK7FpPwKxvj0GlvRw2qGtjEN/HnX4KVaNRPm4czMpz +mrTLRukvXOEihztQlpavlNnakQ3AeeF7TbrZXN5Bzb8HVJQjLlw3yso5XaImbrW1 +0C7d2JjMnZyGIfD9NFCo3txOhDVPCyid2ovZTQIDAQABAoIBAQCfS3BXTl7a7vN6 +cHqYx2Xf6PvlQ5Lhe/MHlaxS4c/Icc/yNNTmsWVkJDahpYQkJDxIluzguuE/q+UC +1ou+Nenfcl3n+ZZv6zNsPEokzYm3vtM50vy0Rv6ZIKk0uht1Hj1kvRf1NLChM2X5 +RXtyMXwy1A7vkH1RCLYwTUR8DUqjP5yl3kZSsPJAQTIJjAJukLNRfsnpZTT9zbQi +FnDG46mCfA9D+eWyin6XF0SeQIH4UJlugzNWwmrafKL1ey84HmS4sez/Zuu4pCLW +jU6uFkM62MealNZfvK1tW5zKasBwFcQcjxmH1y5O0YCZ0onmySEcm/ocM0bfqm+a +0INLeH9lAoGBANKliuTL5RJYYzlFu8pxBktL25wEKkgq4JYyOB3nEHcoE8Nj6fXX +XdlMfwhq/CNjjgoAATZClZIgX5atzzponSFc8yOW3IgsdXxKZ12pILWI5ku4lTPC +oGyIEWsdFBpmZCsskiAFW0Rs7Hjy3HkquBF3kA9bC9rDkmcwSPyGoGvHAoGBAMjH +ZFljEtQYDWgwYcQNVd6/kcHVfl/MPq2/fvllYLaTneiUDfG2sGwNKcoY0Ont1zsN +ZGjf6EhFIVc9ej0d5wSMoiRvDRB5MyeBEFFIOw+STnohYUqvfkOhrHQN3mUGJlVx +8Dk47ndg10rRXaK+6AbWQBOACoBrvOnRnKzm9YpLAoGAWltISPxtNoFTjq2q8Bur +Z4ESosnmmS5LSCIikDEdjv9R+u5Q3XOUfjGiDCpYHZYGx6NY4b2OCzB30FqeGZ/m +RvloEkwULRVlD/uae0KhFT20C6cR2TITPnrffeLrTKe/MAXeaH3juGZAyWDj0dXR +PyzaTrvvSde6ZSkBJdFZ3uECgYBJaCU3TTLAGr8kHbRQ8rM3ioDnBoB60/kCm2X4 +FumiNxxfwWnLuwiLqNBjlGklbhDoBUB1249YhPXoqJB9seYZ0rUhbvyuzjE2u0hU +/3ygd9R7cKLapIkcfwrHL57k/rebO9ohgcAmI2DzeNKsQvIgNcsdlg3A4CMktb97 +s8GIOwKBgHs7d9yXGZJoxkt1g4HgDeo3H2DEAqyiAnxL4IaihJF5hRk8dNT6CiIh +ZBi275aUP4tZk+3UMp2TtR6e5QdbGnR5FBQ13uP4N27h9N5R4bgWmjhFwo3tFyzr +ea5Ltc3pjC6dgl8bGjsTqc/WMjnjQqxK34Kj74tiuect0O8iCfWe +-----END RSA PRIVATE KEY----- diff --git a/2.amazon_machine_images/Makefile b/2.amazon_machine_images/Makefile index f62bd9c5..bab77d1c 100644 --- a/2.amazon_machine_images/Makefile +++ b/2.amazon_machine_images/Makefile @@ -1,6 +1,6 @@ IMAGE=registry.gitlab.aws.dev/smml/benchmarking/tools/preflight DEPLOY_IMAGE=deploy -AWS_REGION=us-east-1 +AWS_REGION=us-west-2 container_build: docker build -t ${IMAGE} ./preflight container_run: diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index 36c3f377..19fba55a 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -33,7 +33,7 @@ variable "eks_version" { variable "aws_region" { type = string - default = "us-east-1" + default = "us-west-2" } variable "instance_type" { From 3a5726d9c825d9d98883bc5b0163223cb67ee59d Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Tue, 1 Aug 2023 23:24:43 +0000 Subject: [PATCH 003/648] avoid commiting slurm logs and key pairs --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 28357095..85e6c13c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ .env.unversioned - +*.out +*.err *.log +*.pem inventory/packer-provisioner-ansible* # vscode: retain launch.json for sample invocation From f63d47a811c3184b9e374d0bdb263c01ac2a55ee Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Tue, 1 Aug 2023 23:24:43 +0000 Subject: [PATCH 004/648] avoid commiting slurm logs and key pairs --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 28357095..85e6c13c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ .env.unversioned - +*.out +*.err *.log +*.pem inventory/packer-provisioner-ansible* # vscode: retain launch.json for sample invocation From 609311d435ded425b14a4fec37dc7b4a28154c6c Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 3 Aug 2023 15:01:54 -0700 Subject: [PATCH 005/648] Updated --- .../create-additional-sg.sh | 7 ++ .../pcluster-with-monitoring-config.yaml | 103 ++++++++++++++++++ .../2.aws-parallelcluster/create-cluster.sh | 3 + .../2.aws-parallelcluster/install-pcluster.sh | 13 +++ .../pcluster-workshop-key.pem | 27 ----- 3.test_cases/4.DDP/0-setup.sh | 29 ----- .../1-create_environment.sh | 0 .../4.DDP/1-install-parallel-cluster.sh | 31 ------ .../2-calc_mean_std.slurm | 0 3.test_cases/4.DDP/2-download-data.sh | 17 --- 3.test_cases/4.DDP/3-create-vpc.sh | 3 - .../{head-node-scripts => }/3-main_dino.slurm | 0 .../4-compute_cls_features.slurm | 0 3.test_cases/4.DDP/4-create-ami.sh | 5 - 3.test_cases/4.DDP/5-create-key.sh | 4 - .../5-extract_labels.slurm | 0 3.test_cases/4.DDP/6-create-cluster.sh | 29 ----- .../6-global_knn.slurm | 0 3.test_cases/4.DDP/7-connect-to-cluster.sh | 3 - .../{head-node-scripts => }/7-plots.slurm | 0 3.test_cases/4.DDP/8-delete-cluster.sh | 3 - 3.test_cases/4.DDP/CODE_OF_CONDUCT.md | 4 - 3.test_cases/4.DDP/CONTRIBUTING.md | 59 ---------- 3.test_cases/4.DDP/LICENSE | 17 --- 3.test_cases/4.DDP/README.md | 92 +--------------- .../4.DDP/create-cluster-template.yaml | 81 -------------- 3.test_cases/4.DDP/create-cluster.yaml | 81 -------------- .../__pycache__/utils.cpython-38.pyc | Bin .../vision_transformer.cpython-38.pyc | Bin .../pyscripts/compute_CLS_features.py | 0 .../pyscripts/extract_image_labels.py | 0 .../pyscripts/global_kNN.py | 0 .../pyscripts/main_dino.py | 0 .../pyscripts/mean_std_dataset.py | 0 .../pyscripts/plot_in_2D.py | 0 .../pyscripts/setup.py | 0 .../pyscripts/utils.py | 0 .../pyscripts/vision_transformer.py | 0 .../pyscripts/visualise_attention.py | 0 .../{head-node-scripts => }/requirements.txt | 0 .../scDINO_full_pipeline.yaml | 0 41 files changed, 127 insertions(+), 484 deletions(-) create mode 100755 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh create mode 100644 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml create mode 100644 1.architectures/2.aws-parallelcluster/create-cluster.sh create mode 100755 1.architectures/2.aws-parallelcluster/install-pcluster.sh delete mode 100644 1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem delete mode 100755 3.test_cases/4.DDP/0-setup.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/1-create_environment.sh (100%) delete mode 100755 3.test_cases/4.DDP/1-install-parallel-cluster.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/2-calc_mean_std.slurm (100%) delete mode 100755 3.test_cases/4.DDP/2-download-data.sh delete mode 100755 3.test_cases/4.DDP/3-create-vpc.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/3-main_dino.slurm (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/4-compute_cls_features.slurm (100%) delete mode 100755 3.test_cases/4.DDP/4-create-ami.sh delete mode 100755 3.test_cases/4.DDP/5-create-key.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/5-extract_labels.slurm (100%) delete mode 100755 3.test_cases/4.DDP/6-create-cluster.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/6-global_knn.slurm (100%) delete mode 100755 3.test_cases/4.DDP/7-connect-to-cluster.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/7-plots.slurm (100%) delete mode 100644 3.test_cases/4.DDP/8-delete-cluster.sh delete mode 100644 3.test_cases/4.DDP/CODE_OF_CONDUCT.md delete mode 100644 3.test_cases/4.DDP/CONTRIBUTING.md delete mode 100644 3.test_cases/4.DDP/LICENSE delete mode 100644 3.test_cases/4.DDP/create-cluster-template.yaml delete mode 100644 3.test_cases/4.DDP/create-cluster.yaml rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/__pycache__/utils.cpython-38.pyc (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/__pycache__/vision_transformer.cpython-38.pyc (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/compute_CLS_features.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/extract_image_labels.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/global_kNN.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/main_dino.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/mean_std_dataset.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/plot_in_2D.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/setup.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/utils.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/vision_transformer.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/visualise_attention.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/requirements.txt (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/scDINO_full_pipeline.yaml (100%) diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh new file mode 100755 index 00000000..40e2a322 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +read -p "Please enter the vpc id of your cluster: " vpc_id +echo -e "creating a security group with $vpc_id..." +security_group=$(aws ec2 create-security-group --group-name grafana-sg --description "Open HTTP/HTTPS ports" --vpc-id ${vpc_id} --output text) +aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 443 --cidr 0.0.0.0/0 +aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 80 —-cidr 0.0.0.0/0 diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml new file mode 100644 index 00000000..b0fcd985 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml @@ -0,0 +1,103 @@ +Imds: + ImdsSupport: v2.0 +Image: + Os: alinux2 + CustomAmi: ami-053d893ccc907c49c +Tags: + - Key: 'Grafana' + Value: 'true' +HeadNode: + InstanceType: c6i.8xlarge + Networking: + SubnetId: subnet-08cdcb1f4d6abc7f3 + AdditionalSecurityGroups: + - sg-0bbb389be5f1e6563 + Ssh: + KeyName: pcluster-key + LocalStorage: + RootVolume: + Size: 100 + DeleteOnTermination: true # that's your root and /home volume for users + CustomActions: + OnNodeConfigured: + Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh + Args: + - v0.9 + Iam: + AdditionalIamPolicies: # grant ECR, SSM and S3 read access + - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - Policy: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess + - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess + - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess + - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess + - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess +Scheduling: + Scheduler: slurm + SlurmSettings: + ScaledownIdletime: 60 + SlurmQueues: + - Name: compute-gpu + CapacityType: ONDEMAND + Networking: + SubnetIds: + - subnet-04226fa682376b4f6 + PlacementGroup: + Enabled: true + ComputeSettings: + LocalStorage: + EphemeralVolume: + MountDir: /local_scratch # each instance has a local scratch on NVMe + RootVolume: + Size: 200 + CustomActions: + OnNodeConfigured: + Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh + Args: + - v0.9 + Iam: + AdditionalIamPolicies: + - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess + - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess + - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess + - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess + # The capacity reservation section is recommended if you use instances + # with a targeted ODCRs. You can also use a capacity resource group and + # CapacityReservationResourceGroupArn if you want to regroup + # multiple reservations + #CapacityReservationTarget: + # CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + ComputeResources: + - Name: distributed-ml + InstanceType: g4dn.12xlarge + MinCount: 0 # if min = max then capacity is maintained and will + MaxCount: 4 # not scale down + Efa: + Enabled: true +SharedStorage: + - MountDir: /fsx + Name: fsx + StorageType: FsxLustre + FsxLustreSettings: + StorageCapacity: 4800 # size it to your storage and throughput needs + PerUnitStorageThroughput: 250 # this can be increased to 500 and 100 + DeploymentType: PERSISTENT_2 + - Name: SharedEBS + StorageType: Ebs + MountDir: /apps # Store your shared apps & scripts here + EbsSettings: + VolumeType: gp3 + Size: 200 + Throughput: 300 + Iops: 6000 +Monitoring: + DetailedMonitoring: true + Logs: + CloudWatch: + Enabled: true # good for debug + Dashboards: + CloudWatch: + Enabled: false # provide basic dashboards + + diff --git a/1.architectures/2.aws-parallelcluster/create-cluster.sh b/1.architectures/2.aws-parallelcluster/create-cluster.sh new file mode 100644 index 00000000..af5827f2 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/create-cluster.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +pcluster create-cluster --cluster-configuration pcluster-config.yaml --cluster-name pcluster-ml --region us-west-2 --suppress-validators "type:InstanceTypeBaseAMICompatibleValidator" --rollback-on-failure "false" diff --git a/1.architectures/2.aws-parallelcluster/install-pcluster.sh b/1.architectures/2.aws-parallelcluster/install-pcluster.sh new file mode 100755 index 00000000..5ca2aaef --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/install-pcluster.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Create Virtual env +python3 -m pip install --upgrade pip +python3 -m pip install --user --upgrade virtualenv + +python3 -m virtualenv ~/apc-ve + +source ~/apc-ve/bin/activate + +pip3 install awscli + +pip3 install aws-parallelcluster diff --git a/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem b/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem deleted file mode 100644 index 5fcf7be2..00000000 --- a/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEApTVlw+55oXXOilUame4D6SFeeJ5asdXUOIJVzHxA2D5sMRJH -jPBRb3/16XOZtJ/tYkylkD4KRq6aD/fTtxnkQTWRB9hbRZHo5dDcla7MtnwiDIsc -0kXUh/B8Nzkq2LTCSrj07ePBGbijfktUwxwrbiQrEijET8S4NFsKvr8KqTO3VAHT -mqaOQNgXeNqw+cv/VmtqYqrK7FpPwKxvj0GlvRw2qGtjEN/HnX4KVaNRPm4czMpz -mrTLRukvXOEihztQlpavlNnakQ3AeeF7TbrZXN5Bzb8HVJQjLlw3yso5XaImbrW1 -0C7d2JjMnZyGIfD9NFCo3txOhDVPCyid2ovZTQIDAQABAoIBAQCfS3BXTl7a7vN6 -cHqYx2Xf6PvlQ5Lhe/MHlaxS4c/Icc/yNNTmsWVkJDahpYQkJDxIluzguuE/q+UC -1ou+Nenfcl3n+ZZv6zNsPEokzYm3vtM50vy0Rv6ZIKk0uht1Hj1kvRf1NLChM2X5 -RXtyMXwy1A7vkH1RCLYwTUR8DUqjP5yl3kZSsPJAQTIJjAJukLNRfsnpZTT9zbQi -FnDG46mCfA9D+eWyin6XF0SeQIH4UJlugzNWwmrafKL1ey84HmS4sez/Zuu4pCLW -jU6uFkM62MealNZfvK1tW5zKasBwFcQcjxmH1y5O0YCZ0onmySEcm/ocM0bfqm+a -0INLeH9lAoGBANKliuTL5RJYYzlFu8pxBktL25wEKkgq4JYyOB3nEHcoE8Nj6fXX -XdlMfwhq/CNjjgoAATZClZIgX5atzzponSFc8yOW3IgsdXxKZ12pILWI5ku4lTPC -oGyIEWsdFBpmZCsskiAFW0Rs7Hjy3HkquBF3kA9bC9rDkmcwSPyGoGvHAoGBAMjH -ZFljEtQYDWgwYcQNVd6/kcHVfl/MPq2/fvllYLaTneiUDfG2sGwNKcoY0Ont1zsN -ZGjf6EhFIVc9ej0d5wSMoiRvDRB5MyeBEFFIOw+STnohYUqvfkOhrHQN3mUGJlVx -8Dk47ndg10rRXaK+6AbWQBOACoBrvOnRnKzm9YpLAoGAWltISPxtNoFTjq2q8Bur -Z4ESosnmmS5LSCIikDEdjv9R+u5Q3XOUfjGiDCpYHZYGx6NY4b2OCzB30FqeGZ/m -RvloEkwULRVlD/uae0KhFT20C6cR2TITPnrffeLrTKe/MAXeaH3juGZAyWDj0dXR -PyzaTrvvSde6ZSkBJdFZ3uECgYBJaCU3TTLAGr8kHbRQ8rM3ioDnBoB60/kCm2X4 -FumiNxxfwWnLuwiLqNBjlGklbhDoBUB1249YhPXoqJB9seYZ0rUhbvyuzjE2u0hU -/3ygd9R7cKLapIkcfwrHL57k/rebO9ohgcAmI2DzeNKsQvIgNcsdlg3A4CMktb97 -s8GIOwKBgHs7d9yXGZJoxkt1g4HgDeo3H2DEAqyiAnxL4IaihJF5hRk8dNT6CiIh -ZBi275aUP4tZk+3UMp2TtR6e5QdbGnR5FBQ13uP4N27h9N5R4bgWmjhFwo3tFyzr -ea5Ltc3pjC6dgl8bGjsTqc/WMjnjQqxK34Kj74tiuect0O8iCfWe ------END RSA PRIVATE KEY----- diff --git a/3.test_cases/4.DDP/0-setup.sh b/3.test_cases/4.DDP/0-setup.sh deleted file mode 100755 index f7caf544..00000000 --- a/3.test_cases/4.DDP/0-setup.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -aws configure - -# Create a New Role -aws iam create-role --role-name EC2Role --assume-role-policy-document file://./.env/EC2Role-Trust-Policy.json - -# Attach S3 policy -aws iam attach-role-policy --role-name EC2Role --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess - -# Create New Instance Profile -aws iam create-instance-profile --instance-profile-name Workshop-Instance-Profile -aws iam add-role-to-instance-profile --role-name EC2Role --instance-profile-name Workshop-Instance-Profile - -instance_id=$(wget -q -O - http://169.254.169.254/latest/meta-data/instance-id) -echo $instance_id -# Associate new instance profile to EC2 instance -aws ec2 associate-iam-instance-profile --instance-id $instance_id --iam-instance-profile Name=Workshop-Instance-Profile - -# Verify -#aws ec2 describe-iam-instance-profile-associations - -# Create Virtual env -python3 -m pip install --upgrade pip -python3 -m pip install --user --upgrade virtualenv - -python3 -m virtualenv ~/apc-ve -# ACTIVATE ENV BEFORE STEP 2 -source ~/apc-ve/bin/activate diff --git a/3.test_cases/4.DDP/head-node-scripts/1-create_environment.sh b/3.test_cases/4.DDP/1-create_environment.sh similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/1-create_environment.sh rename to 3.test_cases/4.DDP/1-create_environment.sh diff --git a/3.test_cases/4.DDP/1-install-parallel-cluster.sh b/3.test_cases/4.DDP/1-install-parallel-cluster.sh deleted file mode 100755 index d0346c24..00000000 --- a/3.test_cases/4.DDP/1-install-parallel-cluster.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Install pip and other dependencies -sudo apt install python3-pip -sudo apt-get install unzip - -# Make sure you have installed the AWS Command Line Interface: -pip3 install awscli - -# Packer - Ubuntu -curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - -sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" -sudo apt-get update && sudo apt-get install packer - -# Packer - Amazon Linux -#sudo yum install -y yum-utils -#sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo -#sudo yum -y install packer - -# Pcluster Dependencies -python3 -m pip install flask==2.2.5 -# Node.js -curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.38.0/install.sh | bash -chmod ug+x ~/.nvm/nvm.sh -source ~/.nvm/nvm.sh -nvm install --lts -node --version - - -# Install AWS ParallelCluster: -pip3 install aws-parallelcluster==3.1.4 diff --git a/3.test_cases/4.DDP/head-node-scripts/2-calc_mean_std.slurm b/3.test_cases/4.DDP/2-calc_mean_std.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/2-calc_mean_std.slurm rename to 3.test_cases/4.DDP/2-calc_mean_std.slurm diff --git a/3.test_cases/4.DDP/2-download-data.sh b/3.test_cases/4.DDP/2-download-data.sh deleted file mode 100755 index 993951fa..00000000 --- a/3.test_cases/4.DDP/2-download-data.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -mkdir data -cd data - -echo "" -echo "Downloading Deep Phenotyping PMBC Image Set Data ..." -wget https://www.research-collection.ethz.ch/bitstream/handle/20.500.11850/343106/DeepPhenotype_PBMC_ImageSet_YSeverin.zip -unzip DeepPhenotype_PBMC_ImageSet_YSeverin.zip -d ./data - -rm DeepPhenotype_PBMC_ImageSet_YSeverin.zip - -# Make S3 bucket -aws s3 mb s3://pcluster-ml-workshop - -# Upload Data to S3 -aws s3 cp ./data s3://pcluster-ml-workshop/ --recursive diff --git a/3.test_cases/4.DDP/3-create-vpc.sh b/3.test_cases/4.DDP/3-create-vpc.sh deleted file mode 100755 index 99357770..00000000 --- a/3.test_cases/4.DDP/3-create-vpc.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -aws cloudformation create-stack --stack-name create-large-scale-vpc-stack --template-body file://./.env/Large-Scale-VPC.yaml --parameters ParameterKey=SubnetsAZ,ParameterValue=us-west-2a --capabilities CAPABILITY_IAM diff --git a/3.test_cases/4.DDP/head-node-scripts/3-main_dino.slurm b/3.test_cases/4.DDP/3-main_dino.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/3-main_dino.slurm rename to 3.test_cases/4.DDP/3-main_dino.slurm diff --git a/3.test_cases/4.DDP/head-node-scripts/4-compute_cls_features.slurm b/3.test_cases/4.DDP/4-compute_cls_features.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/4-compute_cls_features.slurm rename to 3.test_cases/4.DDP/4-compute_cls_features.slurm diff --git a/3.test_cases/4.DDP/4-create-ami.sh b/3.test_cases/4.DDP/4-create-ami.sh deleted file mode 100755 index 61ca4be7..00000000 --- a/3.test_cases/4.DDP/4-create-ami.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd ./.env/ami/ -packer build -color=true -var-file variables.json nvidia-efa-ml-al2-enroot_pyxis.json | tee build_AL2.log -cd ../.. diff --git a/3.test_cases/4.DDP/5-create-key.sh b/3.test_cases/4.DDP/5-create-key.sh deleted file mode 100755 index d151afc4..00000000 --- a/3.test_cases/4.DDP/5-create-key.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -aws ec2 create-key-pair --key-name pcluster-key --query KeyMaterial --output text > pcluster-key.pem -sudo chmod 600 pcluster-key.pem diff --git a/3.test_cases/4.DDP/head-node-scripts/5-extract_labels.slurm b/3.test_cases/4.DDP/5-extract_labels.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/5-extract_labels.slurm rename to 3.test_cases/4.DDP/5-extract_labels.slurm diff --git a/3.test_cases/4.DDP/6-create-cluster.sh b/3.test_cases/4.DDP/6-create-cluster.sh deleted file mode 100755 index f6ae2a42..00000000 --- a/3.test_cases/4.DDP/6-create-cluster.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Get Private Subnet ID -export private_subnet_id=$(aws cloudformation --region us-west-2 describe-stacks --query "Stacks[?StackName=='create-large-scale-vpc-stack'][].Outputs[?OutputKey=='PrivateSubnet'].OutputValue" --output text) - -echo "Private Subnet ID: ${private_subnet_id}" - -# Get Public Subnet ID -export public_subnet_id=$(aws cloudformation --region us-west-2 describe-stacks --query "Stacks[?StackName=='create-large-scale-vpc-stack'][].Outputs[?OutputKey=='PublicSubnet'].OutputValue" --output text) - -echo "Public Subnet ID: ${public_subnet_id}" - -# Get AMI ID -export ami_id=$( aws ec2 describe-images --region us-west-2 --filters "Name=name,Values=pcluster-dist-training-ami-*" --query 'Images[*].[ImageId]' --output text) - -echo "AMI ID: ${ami_id}" - -#List Keys -aws ec2 describe-key-pairs --query "KeyPairs[*].{KeyPairId:KeyPairId,KeyName:KeyName,KeyType:KeyType}" --output table - -# Update Cluster creation config template -cat create-cluster-template.yaml | envsubst > create-cluster.yaml - -# Create Cluster -pcluster create-cluster --cluster-configuration create-cluster.yaml --cluster-name pcluster-ml --region us-west-2 --suppress-validators "type:InstanceTypeBaseAMICompatibleValidator" --rollback-on-failure "false" - - - - diff --git a/3.test_cases/4.DDP/head-node-scripts/6-global_knn.slurm b/3.test_cases/4.DDP/6-global_knn.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/6-global_knn.slurm rename to 3.test_cases/4.DDP/6-global_knn.slurm diff --git a/3.test_cases/4.DDP/7-connect-to-cluster.sh b/3.test_cases/4.DDP/7-connect-to-cluster.sh deleted file mode 100755 index e3ea5844..00000000 --- a/3.test_cases/4.DDP/7-connect-to-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pcluster ssh -i 'pcluster-key.pem' -n pcluster-ml diff --git a/3.test_cases/4.DDP/head-node-scripts/7-plots.slurm b/3.test_cases/4.DDP/7-plots.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/7-plots.slurm rename to 3.test_cases/4.DDP/7-plots.slurm diff --git a/3.test_cases/4.DDP/8-delete-cluster.sh b/3.test_cases/4.DDP/8-delete-cluster.sh deleted file mode 100644 index 60413b89..00000000 --- a/3.test_cases/4.DDP/8-delete-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pcluster delete-cluster --cluster-name pcluster-ml --region us-west-2 diff --git a/3.test_cases/4.DDP/CODE_OF_CONDUCT.md b/3.test_cases/4.DDP/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa..00000000 --- a/3.test_cases/4.DDP/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/3.test_cases/4.DDP/CONTRIBUTING.md b/3.test_cases/4.DDP/CONTRIBUTING.md deleted file mode 100644 index c4b6a1c5..00000000 --- a/3.test_cases/4.DDP/CONTRIBUTING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *main* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/3.test_cases/4.DDP/LICENSE b/3.test_cases/4.DDP/LICENSE deleted file mode 100644 index 09951d9f..00000000 --- a/3.test_cases/4.DDP/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -MIT No Attribution - -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/3.test_cases/4.DDP/README.md b/3.test_cases/4.DDP/README.md index 65170add..93816b93 100644 --- a/3.test_cases/4.DDP/README.md +++ b/3.test_cases/4.DDP/README.md @@ -1,102 +1,14 @@ # Distributed Training of Self-Supervised Vision Transformers in PyTorch for multi-channel single-cells images with AWS ParallelCluster -Orchestrating distributed training jobs on a cluster of EC2 instances can be challemging from a Compute, Storage and Networking point of view. [AWS ParallelCluster](https://github.com/aws/aws-parallelcluster) is an open source cluster management tool that provides a user friendly way of creating a HPC cluster that can be used to run distributed training jobs. - Vision Transformers are increasingly popular in the computational drug discovery space, especially in the area of phenotypic characterisation using multi-channel single cell images. In this project, we will show how to pretrain and do downstream analyses for [scDINO](https://github.com/JacobHanimann/scDINO/tree/master) model. -

- -Fig. 1 - ParallelCluster Architecture for Distributed Training -
- -Figure 1. shows the architecture for distributed training with AWS ParallelCluster. The project is designed to introduce the concepts of deploying this architecture and running distributed training for training self-supervised vision transformers at a small scale, however the same architecture can be applied for training at large scale by adjusting the number and type of nodes, using other accelerators (NVIDIA A100 GPUs, AWS Trainium, Intel Habana Gaudi). - -This project is organized in a number of sequential steps. The scripts that belong to each step are organized in folders with corresponding names. To execute a step, we will change the current directory accordingly and execute scripts in their designated order. The prerequisites section is required, but there are no scripts associated with it. We will complete setting up prerequisites by following instructions. Steps 1 through 6 are required to complete the workshop. Step 7-Cleanup is optional. - -## 0. Prerequisites -The only prerequisite we have is that the user is able to launch an EC2 instance and has permission to access to GPU based instances. - -## 1. Create ParallelCluster -In this section, we will walk step-by-step how to create the cluster. Once you have launched an EC2 instance (ex: c6a.4xlarge), clone this repo and execute `source 0-setup.sh`. This script would run: -a) `aws configure`, -b) create and attach a new role `EC2Role` which has S3 access policy, -c) create and activate a new virual environment `apc-ve` which we will use next to create the cluster. - -### 1.1 Install ParallelCluster Tools -Once the virtual environment is created, `./1-install-parallel-cluster.sh` will install the necessary tools like `awscli`, `packer`to create custom AMIs, dependencies like `flask` and `node.js` and `aws-parallelcluster`. - -### 1.2 Download Deep Phenotyping Image Data +### Download Deep Phenotyping Image Data Execute `./2-download-data.sh` to download the image data, unzip it, create a S3 bucket `pcluster-ml-workshop` and upload to the S3 bucket. You can find more information about the dataset [here](https://www.research-collection.ethz.ch/handle/20.500.11850/343106). This dataset has 90852 images in the Training set and 22712 images in the Test set. The directory structure looks like below. For more information about the eight immune classes and donors, please refer to the [paper](https://www.biorxiv.org/content/10.1101/2023.01.16.524226v1.full.pdf) Training Directory | Test Directory for Donor1 --- | --- | ` -### 1.3 Create VPC -Next step before creating the cluster, we need to create a VPC with a public and a private subnet in a user specified Availability Zone (AZ). In this work, we used `us-west-2a` as the AZ in which the Compute Nodes and the FSx for Lustre file system will reside. Executing `./3-create-vpc.sh` will read the [Large-Scale-VPC.yaml](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/Large-Scale-VPC.yaml) and create a cloud formation stack. In addition to the subnets, this stack will also deploy an Internet Gateway and NAT Gateway in the public subnet and deploy endpoints for Amzon S3 and Amazon DynamoDB. The VPC contains 2 CIDR blocks with 10.0.0.0/16 and 10.1.0.0/16. The first CIDR is used for the public subnet, the second is used for the private. - -You can check the progress of VPC creation in the CloudFormation stack `create-large-scale-vpc-stack`. Once the VPC creation is done in a few minutes, you can check the outputs of the stack which will show subnet and vpc IDs. We will use these next to create a cluster. - -

-Fig. 2 - Outputs of the VPC creation stack -
- -### 1.4 Create an Amazon Machine Image (AMI) -Before we can create a cluster, we need to create an AMI that will be used to startup the compute nodes and will have all the necessary dependencies for running a distributed training workload. We can also use a published Deep Learning (DLAMI) but they are often quite big and have a number of other features not needed for distributed training. To build a targeted AMI for distributed training you can execute `4-create-ami.sh`. This will kickoff [Packer](https://www.packer.io/) from [HashiCorp](https://www.hashicorp.com/) that will build an AMI using a configuration json [nvidia-efa-ml-al2-enroot_pyxis.json](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/ami/nvidia-efa-ml-al2-enroot_pyxis.json) and [variables.json](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/ami/variables.json). This AMI will be specific for Nvidia GPUs and will base on Amazon Linux 2. The software stack installed on the AMI through packer consists of: - -1. Nvidia Driver - 510.47.03 -2. CUDA - 11.6 -3. CUDNN - v8 -4. NVIDIA Container Toolkit - system latest -5. Docker - system latest -6. NCCL - v2.12.7-1 -7. Pyxis - v0.12.0" -8. Enroot - latest -9. AWS CLI V2 - latest for Nvidia Driver 510.47.03 -10. Nvidia Fabric Manager - latest -11. EFA Driver - latest -12. EFA OFI NCCL plugin - latest -13. NCCL Tests - Latest -14. Intel MKL - 2020.0-088 - -The [variables.json](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/ami/variables.json) config specifies `us-west-2` as the default region and `parallel_cluster_version` as 3.1.4 but could be changed if other newer versions are needed. When the Packer run kicks off, it starts a `g4dn.12xlarge` instance as specified in the `instance_type` variable in `variables.json`, creates the AMI and saves in your account. Once the run is done, the `g4dn.12xlarge` instance is automatically terminated. The AMI creation could take up to ~30mins and once done it will show up in the AMI section of the EC2 console in the `Private Images` section as `pcluster-dist-training-ami-parallelcluster-3.1.4-amzn2`. - -This AMI also installs Pyxis and Enroot that enable us to run containers using Slurm. Please refer to the [official documentation](https://github.com/NVIDIA/pyxis#usage=) on Github for examples with Slurm. However, in this work, we will not use containers. We will show how to use Conda environments within your cluster in the next section. - -### 1.5 Create key-pair -One final step before we actually create the cluster is to create a key-pair that would be needed to access into the head node of the cluster. Executing `5-create-key.sh` will create a key pair and put the `pcluster-key.pem` in your repo. - -### 1.6 Create cluster -To create a cluster, execute `6-create-cluster.sh`. This will first print out the necessary inputs needed in the cluster configuration file such as public and private subnet-ids, ami-id and key name. Next it will substitute these variables in the file [create-cluster-template.yaml](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/create-cluster-template.yaml) and create a new cluster config file `create-cluster.yaml`. An important point to keep in mind is that the `--rollback-on-failure "false"` flag in [6-create-cluster.sh](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/6-create-cluster.sh) does not allow resources like the head node or compute nodes to terminate if for any reason the cluster fails to create. This is important for debugging purposes. You can view the progress of cluster creation in CloudFormation with a stack name of `pcluster-ml` which is the name specified for the cluster. - -A couple of key points to keep in mind when making changes to the cluster creation config file. -1. The Head Node does not need to have a GPU accelerator on it. A small instance such as a `c5.4xlarge` should suffice. -2. The Head Node will lie in the `Public Subnet` and the Compute Nodes will lie in the `Private Subnet`. -3. This config file will actually create a FSx for Lustre volume in the same AZ as the Compute Nodes. -4. To attach an existing FileSystemId, please follow steps in this [blog](https://swsmith.cc/posts/fsx-persistent-2-pcluster.html). For an existing FileSystem you need to create a security group which allows communication between the cluster and the file system. You would need to specify `FileSystemId` in the `FsxLustreSettings` section. -5. In `FsxLustreSettings` section, you can add `DeletionPolicy: Retain` to retain the FSx for Lustre filesystem even when cluster is deleted. -6. In the `ComputeResources` section, you can actually specify 0 resources. -7. Finally, to use EFA, please set the `Enabled` flag in the `Efa` section in `ComputeResources` as `true`. For a list of instance types that support EFA, please see [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html#efa-instance-types). -8. Also, please keep in mind that all instance types (Ex: `p3.8xlarge`) do not support Instance store. If you want to use an instance that does not support Instance store please comment out the `EphemeralVolume` section. For a list of instances that support Instance store, please see [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-volumes.html) - -To list clusters and view their status you can run `pcluster list-clusters`. And after the cluster is creadted you will see an output like below: - -

-
- -After the cluster is created, you can see the Head Node and Compute Nodes ready in the EC2 console and FSx for Lustre file system created in the FSx console which looks like below. You can check the AZ, Storage Capacity and Throughput per unit of storage. - -

-
- -If the cluster fails to create for any reason, you can view the `CloudFormation` events in the stack with the cluster name. You can also view detailed log files in `/var/log/parallelcluster` in the head node and compute nodes. In addition, you can also view logs in `chef-client.log` and `cloud-init.log` files in the head and compute nodes. - -### 1.7 Connect to the cluster -Once the cluster is created, you can ssh into the head node by executing `./7-connect-to-cluster.sh`. In the head node, you can run `sinfo` to see the available compute nodes and it will look like below: -

-
-This output shows that there are 2 `static` nodes (same as Min Capacity in cluster creation config) `train-g4dn-12xl-st-train-g4dn-12xl-1` and `train-g4dn-12xl-st-train-g4dn-12xl-2` and 4 `dynamic` nodes `train-g4dn-12xl-dy-train-g4dn-12xl-[1-4]`. The `dynamic` nodes are those that can be spun up on demand. From the head node you can ssh into the compute nodes by executing `ssh train-g4dn-12xl-st-train-g4dn-12xl-1` or `ssh train-g4dn-12xl-st-train-g4dn-12xl-2`. - ## 2. Run Distributed Training jobs In this section, we will show how distributed training slurm jobs can be submitted from the head node. But first we need to make sure the cluster has access to the training data and has all the relevant dependencies installed in the conda environment. @@ -135,8 +47,6 @@ Executing `squeue` will list all running jobs on the static and dynamic compute c. Similarly run the rest of the downstream analyses to extract CLS features (`sbatch 4-compute_cls_features.slurm`), extract labels (`sbatch 5-extract_labels.slurm`), run knn analyses (`sbatch 6-global_knn.slurm`), and generate plots (`sbatch 7-plots.slurm`) -## 3. Cleanup -Finally, to delete the cluster you can execute `./8-delete-cluster.sh`. This will terminate the compute and head nodes and also the FSx for Lustre volume if `DeletionPolicy: Retain` is not explicitly mentioned in `FsxLustreSettings`. diff --git a/3.test_cases/4.DDP/create-cluster-template.yaml b/3.test_cases/4.DDP/create-cluster-template.yaml deleted file mode 100644 index b08548b1..00000000 --- a/3.test_cases/4.DDP/create-cluster-template.yaml +++ /dev/null @@ -1,81 +0,0 @@ -Region: us-west-2 -Image: - Os: alinux2 - CustomAmi: $ami_id -HeadNode: - InstanceType: c5.4xlarge - Ssh: - KeyName: pcluster-key - Iam: - ## (Un)comment S3 Access and provide one of your S3 buckets name - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/HeadNode-v3.html#HeadNode-v3-Iam - S3Access: - - BucketName: pcluster-ml-workshop - EnableWriteAccess: true - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - Networking: - SubnetId: ${public_subnet_id} - LocalStorage: - RootVolume: - Size: 100 -SharedStorage: - # install your shared applications in /apps, it can be archived through - # a snapshot that you can reuse with other clusters. - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 - - Name: FsxLustre0 - StorageType: FsxLustre - MountDir: /fsx - FsxLustreSettings: - StorageCapacity: 4800 - DeploymentType: PERSISTENT_1 - PerUnitStorageThroughput: 200 - DataCompressionType: LZ4 -## Review the following doc: -## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html -## If local storage is needed this can be configured as required -Scheduling: - Scheduler: slurm - SlurmQueues: - - Name: train-g4dn-2xl - ComputeSettings: - LocalStorage: - # NVMe drives will be set in RAID0 - EphemeralVolume: - MountDir: /local_scratch - # This is your root volume - RootVolume: - Size: 200 - ## We set the MinCount default to 0 to confirm that we get the - ## instances from our ODCR when running a job. Then we change MinCount - ## to 16 in order to avoid scale down - ComputeResources: - - MinCount: 2 - MaxCount: 6 - InstanceType: g4dn.2xlarge - Name: train-g4dn-2xl - Efa: - Enabled: false - GdrSupport: false - Networking: - PlacementGroup: - Enabled: true - SubnetIds: - - ${private_subnet_id} - ## for IAM see this doc: - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#Scheduling-v3-SlurmQueues-Iam - Iam: - S3Access: - - BucketName: pcluster-ml-workshop - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - diff --git a/3.test_cases/4.DDP/create-cluster.yaml b/3.test_cases/4.DDP/create-cluster.yaml deleted file mode 100644 index 229be885..00000000 --- a/3.test_cases/4.DDP/create-cluster.yaml +++ /dev/null @@ -1,81 +0,0 @@ -Region: us-west-2 -Image: - Os: alinux2 - CustomAmi: ami-04912fde9e80170b8 -HeadNode: - InstanceType: c5.4xlarge - Ssh: - KeyName: pcluster-key - Iam: - ## (Un)comment S3 Access and provide one of your S3 buckets name - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/HeadNode-v3.html#HeadNode-v3-Iam - S3Access: - - BucketName: pcluster-ml-workshop - EnableWriteAccess: true - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - Networking: - SubnetId: subnet-0e0944955bfdce9f1 - LocalStorage: - RootVolume: - Size: 100 -SharedStorage: - # install your shared applications in /apps, it can be archived through - # a snapshot that you can reuse with other clusters. - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 - - Name: FsxLustre0 - StorageType: FsxLustre - MountDir: /fsx - FsxLustreSettings: - StorageCapacity: 4800 - DeploymentType: PERSISTENT_1 - PerUnitStorageThroughput: 200 - DataCompressionType: LZ4 -## Review the following doc: -## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html -## If local storage is needed this can be configured as required -Scheduling: - Scheduler: slurm - SlurmQueues: - - Name: train-g4dn-2xl - ComputeSettings: - LocalStorage: - # NVMe drives will be set in RAID0 - EphemeralVolume: - MountDir: /local_scratch - # This is your root volume - RootVolume: - Size: 200 - ## We set the MinCount default to 0 to confirm that we get the - ## instances from our ODCR when running a job. Then we change MinCount - ## to 16 in order to avoid scale down - ComputeResources: - - MinCount: 2 - MaxCount: 6 - InstanceType: g4dn.2xlarge - Name: train-g4dn-2xl - Efa: - Enabled: false - GdrSupport: false - Networking: - PlacementGroup: - Enabled: true - SubnetIds: - - subnet-01526f003e8c4e085 - ## for IAM see this doc: - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#Scheduling-v3-SlurmQueues-Iam - Iam: - S3Access: - - BucketName: pcluster-ml-workshop - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/utils.cpython-38.pyc b/3.test_cases/4.DDP/pyscripts/__pycache__/utils.cpython-38.pyc similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/utils.cpython-38.pyc rename to 3.test_cases/4.DDP/pyscripts/__pycache__/utils.cpython-38.pyc diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/vision_transformer.cpython-38.pyc b/3.test_cases/4.DDP/pyscripts/__pycache__/vision_transformer.cpython-38.pyc similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/vision_transformer.cpython-38.pyc rename to 3.test_cases/4.DDP/pyscripts/__pycache__/vision_transformer.cpython-38.pyc diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/compute_CLS_features.py b/3.test_cases/4.DDP/pyscripts/compute_CLS_features.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/compute_CLS_features.py rename to 3.test_cases/4.DDP/pyscripts/compute_CLS_features.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/extract_image_labels.py b/3.test_cases/4.DDP/pyscripts/extract_image_labels.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/extract_image_labels.py rename to 3.test_cases/4.DDP/pyscripts/extract_image_labels.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/global_kNN.py b/3.test_cases/4.DDP/pyscripts/global_kNN.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/global_kNN.py rename to 3.test_cases/4.DDP/pyscripts/global_kNN.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/main_dino.py b/3.test_cases/4.DDP/pyscripts/main_dino.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/main_dino.py rename to 3.test_cases/4.DDP/pyscripts/main_dino.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/mean_std_dataset.py b/3.test_cases/4.DDP/pyscripts/mean_std_dataset.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/mean_std_dataset.py rename to 3.test_cases/4.DDP/pyscripts/mean_std_dataset.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/plot_in_2D.py b/3.test_cases/4.DDP/pyscripts/plot_in_2D.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/plot_in_2D.py rename to 3.test_cases/4.DDP/pyscripts/plot_in_2D.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/setup.py b/3.test_cases/4.DDP/pyscripts/setup.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/setup.py rename to 3.test_cases/4.DDP/pyscripts/setup.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/utils.py b/3.test_cases/4.DDP/pyscripts/utils.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/utils.py rename to 3.test_cases/4.DDP/pyscripts/utils.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/vision_transformer.py b/3.test_cases/4.DDP/pyscripts/vision_transformer.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/vision_transformer.py rename to 3.test_cases/4.DDP/pyscripts/vision_transformer.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/visualise_attention.py b/3.test_cases/4.DDP/pyscripts/visualise_attention.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/visualise_attention.py rename to 3.test_cases/4.DDP/pyscripts/visualise_attention.py diff --git a/3.test_cases/4.DDP/head-node-scripts/requirements.txt b/3.test_cases/4.DDP/requirements.txt similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/requirements.txt rename to 3.test_cases/4.DDP/requirements.txt diff --git a/3.test_cases/4.DDP/head-node-scripts/scDINO_full_pipeline.yaml b/3.test_cases/4.DDP/scDINO_full_pipeline.yaml similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/scDINO_full_pipeline.yaml rename to 3.test_cases/4.DDP/scDINO_full_pipeline.yaml From 8f457063d003d23126681572ec33c7d2cc90fa40 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 3 Aug 2023 15:01:54 -0700 Subject: [PATCH 006/648] Updated --- .../create-additional-sg.sh | 7 ++ .../pcluster-with-monitoring-config.yaml | 103 ++++++++++++++++++ .../2.aws-parallelcluster/create-cluster.sh | 3 + .../2.aws-parallelcluster/install-pcluster.sh | 13 +++ .../pcluster-workshop-key.pem | 27 ----- 3.test_cases/4.DDP/0-setup.sh | 29 ----- .../1-create_environment.sh | 0 .../4.DDP/1-install-parallel-cluster.sh | 31 ------ .../2-calc_mean_std.slurm | 0 3.test_cases/4.DDP/2-download-data.sh | 17 --- 3.test_cases/4.DDP/3-create-vpc.sh | 3 - .../{head-node-scripts => }/3-main_dino.slurm | 0 .../4-compute_cls_features.slurm | 0 3.test_cases/4.DDP/4-create-ami.sh | 5 - 3.test_cases/4.DDP/5-create-key.sh | 4 - .../5-extract_labels.slurm | 0 3.test_cases/4.DDP/6-create-cluster.sh | 29 ----- .../6-global_knn.slurm | 0 3.test_cases/4.DDP/7-connect-to-cluster.sh | 3 - .../{head-node-scripts => }/7-plots.slurm | 0 3.test_cases/4.DDP/8-delete-cluster.sh | 3 - 3.test_cases/4.DDP/CODE_OF_CONDUCT.md | 4 - 3.test_cases/4.DDP/CONTRIBUTING.md | 59 ---------- 3.test_cases/4.DDP/LICENSE | 17 --- 3.test_cases/4.DDP/README.md | 92 +--------------- .../4.DDP/create-cluster-template.yaml | 81 -------------- 3.test_cases/4.DDP/create-cluster.yaml | 81 -------------- .../__pycache__/utils.cpython-38.pyc | Bin .../vision_transformer.cpython-38.pyc | Bin .../pyscripts/compute_CLS_features.py | 0 .../pyscripts/extract_image_labels.py | 0 .../pyscripts/global_kNN.py | 0 .../pyscripts/main_dino.py | 0 .../pyscripts/mean_std_dataset.py | 0 .../pyscripts/plot_in_2D.py | 0 .../pyscripts/setup.py | 0 .../pyscripts/utils.py | 0 .../pyscripts/vision_transformer.py | 0 .../pyscripts/visualise_attention.py | 0 .../{head-node-scripts => }/requirements.txt | 0 .../scDINO_full_pipeline.yaml | 0 41 files changed, 127 insertions(+), 484 deletions(-) create mode 100755 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh create mode 100644 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml create mode 100644 1.architectures/2.aws-parallelcluster/create-cluster.sh create mode 100755 1.architectures/2.aws-parallelcluster/install-pcluster.sh delete mode 100644 1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem delete mode 100755 3.test_cases/4.DDP/0-setup.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/1-create_environment.sh (100%) delete mode 100755 3.test_cases/4.DDP/1-install-parallel-cluster.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/2-calc_mean_std.slurm (100%) delete mode 100755 3.test_cases/4.DDP/2-download-data.sh delete mode 100755 3.test_cases/4.DDP/3-create-vpc.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/3-main_dino.slurm (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/4-compute_cls_features.slurm (100%) delete mode 100755 3.test_cases/4.DDP/4-create-ami.sh delete mode 100755 3.test_cases/4.DDP/5-create-key.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/5-extract_labels.slurm (100%) delete mode 100755 3.test_cases/4.DDP/6-create-cluster.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/6-global_knn.slurm (100%) delete mode 100755 3.test_cases/4.DDP/7-connect-to-cluster.sh rename 3.test_cases/4.DDP/{head-node-scripts => }/7-plots.slurm (100%) delete mode 100644 3.test_cases/4.DDP/8-delete-cluster.sh delete mode 100644 3.test_cases/4.DDP/CODE_OF_CONDUCT.md delete mode 100644 3.test_cases/4.DDP/CONTRIBUTING.md delete mode 100644 3.test_cases/4.DDP/LICENSE delete mode 100644 3.test_cases/4.DDP/create-cluster-template.yaml delete mode 100644 3.test_cases/4.DDP/create-cluster.yaml rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/__pycache__/utils.cpython-38.pyc (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/__pycache__/vision_transformer.cpython-38.pyc (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/compute_CLS_features.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/extract_image_labels.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/global_kNN.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/main_dino.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/mean_std_dataset.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/plot_in_2D.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/setup.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/utils.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/vision_transformer.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/pyscripts/visualise_attention.py (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/requirements.txt (100%) rename 3.test_cases/4.DDP/{head-node-scripts => }/scDINO_full_pipeline.yaml (100%) diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh new file mode 100755 index 00000000..40e2a322 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +read -p "Please enter the vpc id of your cluster: " vpc_id +echo -e "creating a security group with $vpc_id..." +security_group=$(aws ec2 create-security-group --group-name grafana-sg --description "Open HTTP/HTTPS ports" --vpc-id ${vpc_id} --output text) +aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 443 --cidr 0.0.0.0/0 +aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 80 —-cidr 0.0.0.0/0 diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml new file mode 100644 index 00000000..b0fcd985 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml @@ -0,0 +1,103 @@ +Imds: + ImdsSupport: v2.0 +Image: + Os: alinux2 + CustomAmi: ami-053d893ccc907c49c +Tags: + - Key: 'Grafana' + Value: 'true' +HeadNode: + InstanceType: c6i.8xlarge + Networking: + SubnetId: subnet-08cdcb1f4d6abc7f3 + AdditionalSecurityGroups: + - sg-0bbb389be5f1e6563 + Ssh: + KeyName: pcluster-key + LocalStorage: + RootVolume: + Size: 100 + DeleteOnTermination: true # that's your root and /home volume for users + CustomActions: + OnNodeConfigured: + Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh + Args: + - v0.9 + Iam: + AdditionalIamPolicies: # grant ECR, SSM and S3 read access + - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - Policy: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess + - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess + - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess + - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess + - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess +Scheduling: + Scheduler: slurm + SlurmSettings: + ScaledownIdletime: 60 + SlurmQueues: + - Name: compute-gpu + CapacityType: ONDEMAND + Networking: + SubnetIds: + - subnet-04226fa682376b4f6 + PlacementGroup: + Enabled: true + ComputeSettings: + LocalStorage: + EphemeralVolume: + MountDir: /local_scratch # each instance has a local scratch on NVMe + RootVolume: + Size: 200 + CustomActions: + OnNodeConfigured: + Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh + Args: + - v0.9 + Iam: + AdditionalIamPolicies: + - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess + - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess + - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess + - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess + # The capacity reservation section is recommended if you use instances + # with a targeted ODCRs. You can also use a capacity resource group and + # CapacityReservationResourceGroupArn if you want to regroup + # multiple reservations + #CapacityReservationTarget: + # CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + ComputeResources: + - Name: distributed-ml + InstanceType: g4dn.12xlarge + MinCount: 0 # if min = max then capacity is maintained and will + MaxCount: 4 # not scale down + Efa: + Enabled: true +SharedStorage: + - MountDir: /fsx + Name: fsx + StorageType: FsxLustre + FsxLustreSettings: + StorageCapacity: 4800 # size it to your storage and throughput needs + PerUnitStorageThroughput: 250 # this can be increased to 500 and 100 + DeploymentType: PERSISTENT_2 + - Name: SharedEBS + StorageType: Ebs + MountDir: /apps # Store your shared apps & scripts here + EbsSettings: + VolumeType: gp3 + Size: 200 + Throughput: 300 + Iops: 6000 +Monitoring: + DetailedMonitoring: true + Logs: + CloudWatch: + Enabled: true # good for debug + Dashboards: + CloudWatch: + Enabled: false # provide basic dashboards + + diff --git a/1.architectures/2.aws-parallelcluster/create-cluster.sh b/1.architectures/2.aws-parallelcluster/create-cluster.sh new file mode 100644 index 00000000..af5827f2 --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/create-cluster.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +pcluster create-cluster --cluster-configuration pcluster-config.yaml --cluster-name pcluster-ml --region us-west-2 --suppress-validators "type:InstanceTypeBaseAMICompatibleValidator" --rollback-on-failure "false" diff --git a/1.architectures/2.aws-parallelcluster/install-pcluster.sh b/1.architectures/2.aws-parallelcluster/install-pcluster.sh new file mode 100755 index 00000000..5ca2aaef --- /dev/null +++ b/1.architectures/2.aws-parallelcluster/install-pcluster.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Create Virtual env +python3 -m pip install --upgrade pip +python3 -m pip install --user --upgrade virtualenv + +python3 -m virtualenv ~/apc-ve + +source ~/apc-ve/bin/activate + +pip3 install awscli + +pip3 install aws-parallelcluster diff --git a/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem b/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem deleted file mode 100644 index 5fcf7be2..00000000 --- a/1.architectures/2.aws-parallelcluster/pcluster-workshop-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEApTVlw+55oXXOilUame4D6SFeeJ5asdXUOIJVzHxA2D5sMRJH -jPBRb3/16XOZtJ/tYkylkD4KRq6aD/fTtxnkQTWRB9hbRZHo5dDcla7MtnwiDIsc -0kXUh/B8Nzkq2LTCSrj07ePBGbijfktUwxwrbiQrEijET8S4NFsKvr8KqTO3VAHT -mqaOQNgXeNqw+cv/VmtqYqrK7FpPwKxvj0GlvRw2qGtjEN/HnX4KVaNRPm4czMpz -mrTLRukvXOEihztQlpavlNnakQ3AeeF7TbrZXN5Bzb8HVJQjLlw3yso5XaImbrW1 -0C7d2JjMnZyGIfD9NFCo3txOhDVPCyid2ovZTQIDAQABAoIBAQCfS3BXTl7a7vN6 -cHqYx2Xf6PvlQ5Lhe/MHlaxS4c/Icc/yNNTmsWVkJDahpYQkJDxIluzguuE/q+UC -1ou+Nenfcl3n+ZZv6zNsPEokzYm3vtM50vy0Rv6ZIKk0uht1Hj1kvRf1NLChM2X5 -RXtyMXwy1A7vkH1RCLYwTUR8DUqjP5yl3kZSsPJAQTIJjAJukLNRfsnpZTT9zbQi -FnDG46mCfA9D+eWyin6XF0SeQIH4UJlugzNWwmrafKL1ey84HmS4sez/Zuu4pCLW -jU6uFkM62MealNZfvK1tW5zKasBwFcQcjxmH1y5O0YCZ0onmySEcm/ocM0bfqm+a -0INLeH9lAoGBANKliuTL5RJYYzlFu8pxBktL25wEKkgq4JYyOB3nEHcoE8Nj6fXX -XdlMfwhq/CNjjgoAATZClZIgX5atzzponSFc8yOW3IgsdXxKZ12pILWI5ku4lTPC -oGyIEWsdFBpmZCsskiAFW0Rs7Hjy3HkquBF3kA9bC9rDkmcwSPyGoGvHAoGBAMjH -ZFljEtQYDWgwYcQNVd6/kcHVfl/MPq2/fvllYLaTneiUDfG2sGwNKcoY0Ont1zsN -ZGjf6EhFIVc9ej0d5wSMoiRvDRB5MyeBEFFIOw+STnohYUqvfkOhrHQN3mUGJlVx -8Dk47ndg10rRXaK+6AbWQBOACoBrvOnRnKzm9YpLAoGAWltISPxtNoFTjq2q8Bur -Z4ESosnmmS5LSCIikDEdjv9R+u5Q3XOUfjGiDCpYHZYGx6NY4b2OCzB30FqeGZ/m -RvloEkwULRVlD/uae0KhFT20C6cR2TITPnrffeLrTKe/MAXeaH3juGZAyWDj0dXR -PyzaTrvvSde6ZSkBJdFZ3uECgYBJaCU3TTLAGr8kHbRQ8rM3ioDnBoB60/kCm2X4 -FumiNxxfwWnLuwiLqNBjlGklbhDoBUB1249YhPXoqJB9seYZ0rUhbvyuzjE2u0hU -/3ygd9R7cKLapIkcfwrHL57k/rebO9ohgcAmI2DzeNKsQvIgNcsdlg3A4CMktb97 -s8GIOwKBgHs7d9yXGZJoxkt1g4HgDeo3H2DEAqyiAnxL4IaihJF5hRk8dNT6CiIh -ZBi275aUP4tZk+3UMp2TtR6e5QdbGnR5FBQ13uP4N27h9N5R4bgWmjhFwo3tFyzr -ea5Ltc3pjC6dgl8bGjsTqc/WMjnjQqxK34Kj74tiuect0O8iCfWe ------END RSA PRIVATE KEY----- diff --git a/3.test_cases/4.DDP/0-setup.sh b/3.test_cases/4.DDP/0-setup.sh deleted file mode 100755 index f7caf544..00000000 --- a/3.test_cases/4.DDP/0-setup.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -aws configure - -# Create a New Role -aws iam create-role --role-name EC2Role --assume-role-policy-document file://./.env/EC2Role-Trust-Policy.json - -# Attach S3 policy -aws iam attach-role-policy --role-name EC2Role --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess - -# Create New Instance Profile -aws iam create-instance-profile --instance-profile-name Workshop-Instance-Profile -aws iam add-role-to-instance-profile --role-name EC2Role --instance-profile-name Workshop-Instance-Profile - -instance_id=$(wget -q -O - http://169.254.169.254/latest/meta-data/instance-id) -echo $instance_id -# Associate new instance profile to EC2 instance -aws ec2 associate-iam-instance-profile --instance-id $instance_id --iam-instance-profile Name=Workshop-Instance-Profile - -# Verify -#aws ec2 describe-iam-instance-profile-associations - -# Create Virtual env -python3 -m pip install --upgrade pip -python3 -m pip install --user --upgrade virtualenv - -python3 -m virtualenv ~/apc-ve -# ACTIVATE ENV BEFORE STEP 2 -source ~/apc-ve/bin/activate diff --git a/3.test_cases/4.DDP/head-node-scripts/1-create_environment.sh b/3.test_cases/4.DDP/1-create_environment.sh similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/1-create_environment.sh rename to 3.test_cases/4.DDP/1-create_environment.sh diff --git a/3.test_cases/4.DDP/1-install-parallel-cluster.sh b/3.test_cases/4.DDP/1-install-parallel-cluster.sh deleted file mode 100755 index d0346c24..00000000 --- a/3.test_cases/4.DDP/1-install-parallel-cluster.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Install pip and other dependencies -sudo apt install python3-pip -sudo apt-get install unzip - -# Make sure you have installed the AWS Command Line Interface: -pip3 install awscli - -# Packer - Ubuntu -curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - -sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" -sudo apt-get update && sudo apt-get install packer - -# Packer - Amazon Linux -#sudo yum install -y yum-utils -#sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo -#sudo yum -y install packer - -# Pcluster Dependencies -python3 -m pip install flask==2.2.5 -# Node.js -curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.38.0/install.sh | bash -chmod ug+x ~/.nvm/nvm.sh -source ~/.nvm/nvm.sh -nvm install --lts -node --version - - -# Install AWS ParallelCluster: -pip3 install aws-parallelcluster==3.1.4 diff --git a/3.test_cases/4.DDP/head-node-scripts/2-calc_mean_std.slurm b/3.test_cases/4.DDP/2-calc_mean_std.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/2-calc_mean_std.slurm rename to 3.test_cases/4.DDP/2-calc_mean_std.slurm diff --git a/3.test_cases/4.DDP/2-download-data.sh b/3.test_cases/4.DDP/2-download-data.sh deleted file mode 100755 index 993951fa..00000000 --- a/3.test_cases/4.DDP/2-download-data.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -mkdir data -cd data - -echo "" -echo "Downloading Deep Phenotyping PMBC Image Set Data ..." -wget https://www.research-collection.ethz.ch/bitstream/handle/20.500.11850/343106/DeepPhenotype_PBMC_ImageSet_YSeverin.zip -unzip DeepPhenotype_PBMC_ImageSet_YSeverin.zip -d ./data - -rm DeepPhenotype_PBMC_ImageSet_YSeverin.zip - -# Make S3 bucket -aws s3 mb s3://pcluster-ml-workshop - -# Upload Data to S3 -aws s3 cp ./data s3://pcluster-ml-workshop/ --recursive diff --git a/3.test_cases/4.DDP/3-create-vpc.sh b/3.test_cases/4.DDP/3-create-vpc.sh deleted file mode 100755 index 99357770..00000000 --- a/3.test_cases/4.DDP/3-create-vpc.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -aws cloudformation create-stack --stack-name create-large-scale-vpc-stack --template-body file://./.env/Large-Scale-VPC.yaml --parameters ParameterKey=SubnetsAZ,ParameterValue=us-west-2a --capabilities CAPABILITY_IAM diff --git a/3.test_cases/4.DDP/head-node-scripts/3-main_dino.slurm b/3.test_cases/4.DDP/3-main_dino.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/3-main_dino.slurm rename to 3.test_cases/4.DDP/3-main_dino.slurm diff --git a/3.test_cases/4.DDP/head-node-scripts/4-compute_cls_features.slurm b/3.test_cases/4.DDP/4-compute_cls_features.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/4-compute_cls_features.slurm rename to 3.test_cases/4.DDP/4-compute_cls_features.slurm diff --git a/3.test_cases/4.DDP/4-create-ami.sh b/3.test_cases/4.DDP/4-create-ami.sh deleted file mode 100755 index 61ca4be7..00000000 --- a/3.test_cases/4.DDP/4-create-ami.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd ./.env/ami/ -packer build -color=true -var-file variables.json nvidia-efa-ml-al2-enroot_pyxis.json | tee build_AL2.log -cd ../.. diff --git a/3.test_cases/4.DDP/5-create-key.sh b/3.test_cases/4.DDP/5-create-key.sh deleted file mode 100755 index d151afc4..00000000 --- a/3.test_cases/4.DDP/5-create-key.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -aws ec2 create-key-pair --key-name pcluster-key --query KeyMaterial --output text > pcluster-key.pem -sudo chmod 600 pcluster-key.pem diff --git a/3.test_cases/4.DDP/head-node-scripts/5-extract_labels.slurm b/3.test_cases/4.DDP/5-extract_labels.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/5-extract_labels.slurm rename to 3.test_cases/4.DDP/5-extract_labels.slurm diff --git a/3.test_cases/4.DDP/6-create-cluster.sh b/3.test_cases/4.DDP/6-create-cluster.sh deleted file mode 100755 index f6ae2a42..00000000 --- a/3.test_cases/4.DDP/6-create-cluster.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Get Private Subnet ID -export private_subnet_id=$(aws cloudformation --region us-west-2 describe-stacks --query "Stacks[?StackName=='create-large-scale-vpc-stack'][].Outputs[?OutputKey=='PrivateSubnet'].OutputValue" --output text) - -echo "Private Subnet ID: ${private_subnet_id}" - -# Get Public Subnet ID -export public_subnet_id=$(aws cloudformation --region us-west-2 describe-stacks --query "Stacks[?StackName=='create-large-scale-vpc-stack'][].Outputs[?OutputKey=='PublicSubnet'].OutputValue" --output text) - -echo "Public Subnet ID: ${public_subnet_id}" - -# Get AMI ID -export ami_id=$( aws ec2 describe-images --region us-west-2 --filters "Name=name,Values=pcluster-dist-training-ami-*" --query 'Images[*].[ImageId]' --output text) - -echo "AMI ID: ${ami_id}" - -#List Keys -aws ec2 describe-key-pairs --query "KeyPairs[*].{KeyPairId:KeyPairId,KeyName:KeyName,KeyType:KeyType}" --output table - -# Update Cluster creation config template -cat create-cluster-template.yaml | envsubst > create-cluster.yaml - -# Create Cluster -pcluster create-cluster --cluster-configuration create-cluster.yaml --cluster-name pcluster-ml --region us-west-2 --suppress-validators "type:InstanceTypeBaseAMICompatibleValidator" --rollback-on-failure "false" - - - - diff --git a/3.test_cases/4.DDP/head-node-scripts/6-global_knn.slurm b/3.test_cases/4.DDP/6-global_knn.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/6-global_knn.slurm rename to 3.test_cases/4.DDP/6-global_knn.slurm diff --git a/3.test_cases/4.DDP/7-connect-to-cluster.sh b/3.test_cases/4.DDP/7-connect-to-cluster.sh deleted file mode 100755 index e3ea5844..00000000 --- a/3.test_cases/4.DDP/7-connect-to-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pcluster ssh -i 'pcluster-key.pem' -n pcluster-ml diff --git a/3.test_cases/4.DDP/head-node-scripts/7-plots.slurm b/3.test_cases/4.DDP/7-plots.slurm similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/7-plots.slurm rename to 3.test_cases/4.DDP/7-plots.slurm diff --git a/3.test_cases/4.DDP/8-delete-cluster.sh b/3.test_cases/4.DDP/8-delete-cluster.sh deleted file mode 100644 index 60413b89..00000000 --- a/3.test_cases/4.DDP/8-delete-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pcluster delete-cluster --cluster-name pcluster-ml --region us-west-2 diff --git a/3.test_cases/4.DDP/CODE_OF_CONDUCT.md b/3.test_cases/4.DDP/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa..00000000 --- a/3.test_cases/4.DDP/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/3.test_cases/4.DDP/CONTRIBUTING.md b/3.test_cases/4.DDP/CONTRIBUTING.md deleted file mode 100644 index c4b6a1c5..00000000 --- a/3.test_cases/4.DDP/CONTRIBUTING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *main* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/3.test_cases/4.DDP/LICENSE b/3.test_cases/4.DDP/LICENSE deleted file mode 100644 index 09951d9f..00000000 --- a/3.test_cases/4.DDP/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -MIT No Attribution - -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/3.test_cases/4.DDP/README.md b/3.test_cases/4.DDP/README.md index 65170add..93816b93 100644 --- a/3.test_cases/4.DDP/README.md +++ b/3.test_cases/4.DDP/README.md @@ -1,102 +1,14 @@ # Distributed Training of Self-Supervised Vision Transformers in PyTorch for multi-channel single-cells images with AWS ParallelCluster -Orchestrating distributed training jobs on a cluster of EC2 instances can be challemging from a Compute, Storage and Networking point of view. [AWS ParallelCluster](https://github.com/aws/aws-parallelcluster) is an open source cluster management tool that provides a user friendly way of creating a HPC cluster that can be used to run distributed training jobs. - Vision Transformers are increasingly popular in the computational drug discovery space, especially in the area of phenotypic characterisation using multi-channel single cell images. In this project, we will show how to pretrain and do downstream analyses for [scDINO](https://github.com/JacobHanimann/scDINO/tree/master) model. -

- -Fig. 1 - ParallelCluster Architecture for Distributed Training -
- -Figure 1. shows the architecture for distributed training with AWS ParallelCluster. The project is designed to introduce the concepts of deploying this architecture and running distributed training for training self-supervised vision transformers at a small scale, however the same architecture can be applied for training at large scale by adjusting the number and type of nodes, using other accelerators (NVIDIA A100 GPUs, AWS Trainium, Intel Habana Gaudi). - -This project is organized in a number of sequential steps. The scripts that belong to each step are organized in folders with corresponding names. To execute a step, we will change the current directory accordingly and execute scripts in their designated order. The prerequisites section is required, but there are no scripts associated with it. We will complete setting up prerequisites by following instructions. Steps 1 through 6 are required to complete the workshop. Step 7-Cleanup is optional. - -## 0. Prerequisites -The only prerequisite we have is that the user is able to launch an EC2 instance and has permission to access to GPU based instances. - -## 1. Create ParallelCluster -In this section, we will walk step-by-step how to create the cluster. Once you have launched an EC2 instance (ex: c6a.4xlarge), clone this repo and execute `source 0-setup.sh`. This script would run: -a) `aws configure`, -b) create and attach a new role `EC2Role` which has S3 access policy, -c) create and activate a new virual environment `apc-ve` which we will use next to create the cluster. - -### 1.1 Install ParallelCluster Tools -Once the virtual environment is created, `./1-install-parallel-cluster.sh` will install the necessary tools like `awscli`, `packer`to create custom AMIs, dependencies like `flask` and `node.js` and `aws-parallelcluster`. - -### 1.2 Download Deep Phenotyping Image Data +### Download Deep Phenotyping Image Data Execute `./2-download-data.sh` to download the image data, unzip it, create a S3 bucket `pcluster-ml-workshop` and upload to the S3 bucket. You can find more information about the dataset [here](https://www.research-collection.ethz.ch/handle/20.500.11850/343106). This dataset has 90852 images in the Training set and 22712 images in the Test set. The directory structure looks like below. For more information about the eight immune classes and donors, please refer to the [paper](https://www.biorxiv.org/content/10.1101/2023.01.16.524226v1.full.pdf) Training Directory | Test Directory for Donor1 --- | --- | ` -### 1.3 Create VPC -Next step before creating the cluster, we need to create a VPC with a public and a private subnet in a user specified Availability Zone (AZ). In this work, we used `us-west-2a` as the AZ in which the Compute Nodes and the FSx for Lustre file system will reside. Executing `./3-create-vpc.sh` will read the [Large-Scale-VPC.yaml](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/Large-Scale-VPC.yaml) and create a cloud formation stack. In addition to the subnets, this stack will also deploy an Internet Gateway and NAT Gateway in the public subnet and deploy endpoints for Amzon S3 and Amazon DynamoDB. The VPC contains 2 CIDR blocks with 10.0.0.0/16 and 10.1.0.0/16. The first CIDR is used for the public subnet, the second is used for the private. - -You can check the progress of VPC creation in the CloudFormation stack `create-large-scale-vpc-stack`. Once the VPC creation is done in a few minutes, you can check the outputs of the stack which will show subnet and vpc IDs. We will use these next to create a cluster. - -

-Fig. 2 - Outputs of the VPC creation stack -
- -### 1.4 Create an Amazon Machine Image (AMI) -Before we can create a cluster, we need to create an AMI that will be used to startup the compute nodes and will have all the necessary dependencies for running a distributed training workload. We can also use a published Deep Learning (DLAMI) but they are often quite big and have a number of other features not needed for distributed training. To build a targeted AMI for distributed training you can execute `4-create-ami.sh`. This will kickoff [Packer](https://www.packer.io/) from [HashiCorp](https://www.hashicorp.com/) that will build an AMI using a configuration json [nvidia-efa-ml-al2-enroot_pyxis.json](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/ami/nvidia-efa-ml-al2-enroot_pyxis.json) and [variables.json](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/ami/variables.json). This AMI will be specific for Nvidia GPUs and will base on Amazon Linux 2. The software stack installed on the AMI through packer consists of: - -1. Nvidia Driver - 510.47.03 -2. CUDA - 11.6 -3. CUDNN - v8 -4. NVIDIA Container Toolkit - system latest -5. Docker - system latest -6. NCCL - v2.12.7-1 -7. Pyxis - v0.12.0" -8. Enroot - latest -9. AWS CLI V2 - latest for Nvidia Driver 510.47.03 -10. Nvidia Fabric Manager - latest -11. EFA Driver - latest -12. EFA OFI NCCL plugin - latest -13. NCCL Tests - Latest -14. Intel MKL - 2020.0-088 - -The [variables.json](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/.env/ami/variables.json) config specifies `us-west-2` as the default region and `parallel_cluster_version` as 3.1.4 but could be changed if other newer versions are needed. When the Packer run kicks off, it starts a `g4dn.12xlarge` instance as specified in the `instance_type` variable in `variables.json`, creates the AMI and saves in your account. Once the run is done, the `g4dn.12xlarge` instance is automatically terminated. The AMI creation could take up to ~30mins and once done it will show up in the AMI section of the EC2 console in the `Private Images` section as `pcluster-dist-training-ami-parallelcluster-3.1.4-amzn2`. - -This AMI also installs Pyxis and Enroot that enable us to run containers using Slurm. Please refer to the [official documentation](https://github.com/NVIDIA/pyxis#usage=) on Github for examples with Slurm. However, in this work, we will not use containers. We will show how to use Conda environments within your cluster in the next section. - -### 1.5 Create key-pair -One final step before we actually create the cluster is to create a key-pair that would be needed to access into the head node of the cluster. Executing `5-create-key.sh` will create a key pair and put the `pcluster-key.pem` in your repo. - -### 1.6 Create cluster -To create a cluster, execute `6-create-cluster.sh`. This will first print out the necessary inputs needed in the cluster configuration file such as public and private subnet-ids, ami-id and key name. Next it will substitute these variables in the file [create-cluster-template.yaml](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/create-cluster-template.yaml) and create a new cluster config file `create-cluster.yaml`. An important point to keep in mind is that the `--rollback-on-failure "false"` flag in [6-create-cluster.sh](https://github.com/awsankur/aws-distributed-training-workshop-pcluster/blob/main/6-create-cluster.sh) does not allow resources like the head node or compute nodes to terminate if for any reason the cluster fails to create. This is important for debugging purposes. You can view the progress of cluster creation in CloudFormation with a stack name of `pcluster-ml` which is the name specified for the cluster. - -A couple of key points to keep in mind when making changes to the cluster creation config file. -1. The Head Node does not need to have a GPU accelerator on it. A small instance such as a `c5.4xlarge` should suffice. -2. The Head Node will lie in the `Public Subnet` and the Compute Nodes will lie in the `Private Subnet`. -3. This config file will actually create a FSx for Lustre volume in the same AZ as the Compute Nodes. -4. To attach an existing FileSystemId, please follow steps in this [blog](https://swsmith.cc/posts/fsx-persistent-2-pcluster.html). For an existing FileSystem you need to create a security group which allows communication between the cluster and the file system. You would need to specify `FileSystemId` in the `FsxLustreSettings` section. -5. In `FsxLustreSettings` section, you can add `DeletionPolicy: Retain` to retain the FSx for Lustre filesystem even when cluster is deleted. -6. In the `ComputeResources` section, you can actually specify 0 resources. -7. Finally, to use EFA, please set the `Enabled` flag in the `Efa` section in `ComputeResources` as `true`. For a list of instance types that support EFA, please see [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html#efa-instance-types). -8. Also, please keep in mind that all instance types (Ex: `p3.8xlarge`) do not support Instance store. If you want to use an instance that does not support Instance store please comment out the `EphemeralVolume` section. For a list of instances that support Instance store, please see [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-volumes.html) - -To list clusters and view their status you can run `pcluster list-clusters`. And after the cluster is creadted you will see an output like below: - -

-
- -After the cluster is created, you can see the Head Node and Compute Nodes ready in the EC2 console and FSx for Lustre file system created in the FSx console which looks like below. You can check the AZ, Storage Capacity and Throughput per unit of storage. - -

-
- -If the cluster fails to create for any reason, you can view the `CloudFormation` events in the stack with the cluster name. You can also view detailed log files in `/var/log/parallelcluster` in the head node and compute nodes. In addition, you can also view logs in `chef-client.log` and `cloud-init.log` files in the head and compute nodes. - -### 1.7 Connect to the cluster -Once the cluster is created, you can ssh into the head node by executing `./7-connect-to-cluster.sh`. In the head node, you can run `sinfo` to see the available compute nodes and it will look like below: -

-
-This output shows that there are 2 `static` nodes (same as Min Capacity in cluster creation config) `train-g4dn-12xl-st-train-g4dn-12xl-1` and `train-g4dn-12xl-st-train-g4dn-12xl-2` and 4 `dynamic` nodes `train-g4dn-12xl-dy-train-g4dn-12xl-[1-4]`. The `dynamic` nodes are those that can be spun up on demand. From the head node you can ssh into the compute nodes by executing `ssh train-g4dn-12xl-st-train-g4dn-12xl-1` or `ssh train-g4dn-12xl-st-train-g4dn-12xl-2`. - ## 2. Run Distributed Training jobs In this section, we will show how distributed training slurm jobs can be submitted from the head node. But first we need to make sure the cluster has access to the training data and has all the relevant dependencies installed in the conda environment. @@ -135,8 +47,6 @@ Executing `squeue` will list all running jobs on the static and dynamic compute c. Similarly run the rest of the downstream analyses to extract CLS features (`sbatch 4-compute_cls_features.slurm`), extract labels (`sbatch 5-extract_labels.slurm`), run knn analyses (`sbatch 6-global_knn.slurm`), and generate plots (`sbatch 7-plots.slurm`) -## 3. Cleanup -Finally, to delete the cluster you can execute `./8-delete-cluster.sh`. This will terminate the compute and head nodes and also the FSx for Lustre volume if `DeletionPolicy: Retain` is not explicitly mentioned in `FsxLustreSettings`. diff --git a/3.test_cases/4.DDP/create-cluster-template.yaml b/3.test_cases/4.DDP/create-cluster-template.yaml deleted file mode 100644 index b08548b1..00000000 --- a/3.test_cases/4.DDP/create-cluster-template.yaml +++ /dev/null @@ -1,81 +0,0 @@ -Region: us-west-2 -Image: - Os: alinux2 - CustomAmi: $ami_id -HeadNode: - InstanceType: c5.4xlarge - Ssh: - KeyName: pcluster-key - Iam: - ## (Un)comment S3 Access and provide one of your S3 buckets name - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/HeadNode-v3.html#HeadNode-v3-Iam - S3Access: - - BucketName: pcluster-ml-workshop - EnableWriteAccess: true - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - Networking: - SubnetId: ${public_subnet_id} - LocalStorage: - RootVolume: - Size: 100 -SharedStorage: - # install your shared applications in /apps, it can be archived through - # a snapshot that you can reuse with other clusters. - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 - - Name: FsxLustre0 - StorageType: FsxLustre - MountDir: /fsx - FsxLustreSettings: - StorageCapacity: 4800 - DeploymentType: PERSISTENT_1 - PerUnitStorageThroughput: 200 - DataCompressionType: LZ4 -## Review the following doc: -## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html -## If local storage is needed this can be configured as required -Scheduling: - Scheduler: slurm - SlurmQueues: - - Name: train-g4dn-2xl - ComputeSettings: - LocalStorage: - # NVMe drives will be set in RAID0 - EphemeralVolume: - MountDir: /local_scratch - # This is your root volume - RootVolume: - Size: 200 - ## We set the MinCount default to 0 to confirm that we get the - ## instances from our ODCR when running a job. Then we change MinCount - ## to 16 in order to avoid scale down - ComputeResources: - - MinCount: 2 - MaxCount: 6 - InstanceType: g4dn.2xlarge - Name: train-g4dn-2xl - Efa: - Enabled: false - GdrSupport: false - Networking: - PlacementGroup: - Enabled: true - SubnetIds: - - ${private_subnet_id} - ## for IAM see this doc: - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#Scheduling-v3-SlurmQueues-Iam - Iam: - S3Access: - - BucketName: pcluster-ml-workshop - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - diff --git a/3.test_cases/4.DDP/create-cluster.yaml b/3.test_cases/4.DDP/create-cluster.yaml deleted file mode 100644 index 229be885..00000000 --- a/3.test_cases/4.DDP/create-cluster.yaml +++ /dev/null @@ -1,81 +0,0 @@ -Region: us-west-2 -Image: - Os: alinux2 - CustomAmi: ami-04912fde9e80170b8 -HeadNode: - InstanceType: c5.4xlarge - Ssh: - KeyName: pcluster-key - Iam: - ## (Un)comment S3 Access and provide one of your S3 buckets name - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/HeadNode-v3.html#HeadNode-v3-Iam - S3Access: - - BucketName: pcluster-ml-workshop - EnableWriteAccess: true - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - Networking: - SubnetId: subnet-0e0944955bfdce9f1 - LocalStorage: - RootVolume: - Size: 100 -SharedStorage: - # install your shared applications in /apps, it can be archived through - # a snapshot that you can reuse with other clusters. - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 - - Name: FsxLustre0 - StorageType: FsxLustre - MountDir: /fsx - FsxLustreSettings: - StorageCapacity: 4800 - DeploymentType: PERSISTENT_1 - PerUnitStorageThroughput: 200 - DataCompressionType: LZ4 -## Review the following doc: -## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html -## If local storage is needed this can be configured as required -Scheduling: - Scheduler: slurm - SlurmQueues: - - Name: train-g4dn-2xl - ComputeSettings: - LocalStorage: - # NVMe drives will be set in RAID0 - EphemeralVolume: - MountDir: /local_scratch - # This is your root volume - RootVolume: - Size: 200 - ## We set the MinCount default to 0 to confirm that we get the - ## instances from our ODCR when running a job. Then we change MinCount - ## to 16 in order to avoid scale down - ComputeResources: - - MinCount: 2 - MaxCount: 6 - InstanceType: g4dn.2xlarge - Name: train-g4dn-2xl - Efa: - Enabled: false - GdrSupport: false - Networking: - PlacementGroup: - Enabled: true - SubnetIds: - - subnet-01526f003e8c4e085 - ## for IAM see this doc: - ## https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#Scheduling-v3-SlurmQueues-Iam - Iam: - S3Access: - - BucketName: pcluster-ml-workshop - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/utils.cpython-38.pyc b/3.test_cases/4.DDP/pyscripts/__pycache__/utils.cpython-38.pyc similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/utils.cpython-38.pyc rename to 3.test_cases/4.DDP/pyscripts/__pycache__/utils.cpython-38.pyc diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/vision_transformer.cpython-38.pyc b/3.test_cases/4.DDP/pyscripts/__pycache__/vision_transformer.cpython-38.pyc similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/__pycache__/vision_transformer.cpython-38.pyc rename to 3.test_cases/4.DDP/pyscripts/__pycache__/vision_transformer.cpython-38.pyc diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/compute_CLS_features.py b/3.test_cases/4.DDP/pyscripts/compute_CLS_features.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/compute_CLS_features.py rename to 3.test_cases/4.DDP/pyscripts/compute_CLS_features.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/extract_image_labels.py b/3.test_cases/4.DDP/pyscripts/extract_image_labels.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/extract_image_labels.py rename to 3.test_cases/4.DDP/pyscripts/extract_image_labels.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/global_kNN.py b/3.test_cases/4.DDP/pyscripts/global_kNN.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/global_kNN.py rename to 3.test_cases/4.DDP/pyscripts/global_kNN.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/main_dino.py b/3.test_cases/4.DDP/pyscripts/main_dino.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/main_dino.py rename to 3.test_cases/4.DDP/pyscripts/main_dino.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/mean_std_dataset.py b/3.test_cases/4.DDP/pyscripts/mean_std_dataset.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/mean_std_dataset.py rename to 3.test_cases/4.DDP/pyscripts/mean_std_dataset.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/plot_in_2D.py b/3.test_cases/4.DDP/pyscripts/plot_in_2D.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/plot_in_2D.py rename to 3.test_cases/4.DDP/pyscripts/plot_in_2D.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/setup.py b/3.test_cases/4.DDP/pyscripts/setup.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/setup.py rename to 3.test_cases/4.DDP/pyscripts/setup.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/utils.py b/3.test_cases/4.DDP/pyscripts/utils.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/utils.py rename to 3.test_cases/4.DDP/pyscripts/utils.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/vision_transformer.py b/3.test_cases/4.DDP/pyscripts/vision_transformer.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/vision_transformer.py rename to 3.test_cases/4.DDP/pyscripts/vision_transformer.py diff --git a/3.test_cases/4.DDP/head-node-scripts/pyscripts/visualise_attention.py b/3.test_cases/4.DDP/pyscripts/visualise_attention.py similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/pyscripts/visualise_attention.py rename to 3.test_cases/4.DDP/pyscripts/visualise_attention.py diff --git a/3.test_cases/4.DDP/head-node-scripts/requirements.txt b/3.test_cases/4.DDP/requirements.txt similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/requirements.txt rename to 3.test_cases/4.DDP/requirements.txt diff --git a/3.test_cases/4.DDP/head-node-scripts/scDINO_full_pipeline.yaml b/3.test_cases/4.DDP/scDINO_full_pipeline.yaml similarity index 100% rename from 3.test_cases/4.DDP/head-node-scripts/scDINO_full_pipeline.yaml rename to 3.test_cases/4.DDP/scDINO_full_pipeline.yaml From ad191f225be5d88088afce4ac966f3e6cfaa5b72 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:07:32 -0500 Subject: [PATCH 007/648] Update documentation and remove additional scripts for parallelcluster archs --- .../2.aws-parallelcluster/README.md | 59 ++++++++++++++++++- .../2.aws-parallelcluster/create-cluster.sh | 3 - .../2.aws-parallelcluster/create-key-pair.sh | 4 -- .../2.aws-parallelcluster/install-pcluster.sh | 13 ---- 4 files changed, 58 insertions(+), 21 deletions(-) delete mode 100644 1.architectures/2.aws-parallelcluster/create-cluster.sh delete mode 100755 1.architectures/2.aws-parallelcluster/create-key-pair.sh delete mode 100755 1.architectures/2.aws-parallelcluster/install-pcluster.sh diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 73bf4440..86e563d9 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -4,6 +4,56 @@ Clusters in AWS ParallelCluster share similar components: a head-node, compute nodes (typically P or Trn EC2 family of instances) and one or multiple shared filesystems (FSx for Lustre). You will find below a section on the architectures themselves and how to deploy them. After this section, you will be brief on key elements of these templates (or things you wanna know to avoid potential mistakes). +### Initial setup + +Before deploying a cluster, let's ensure you have AWS ParallelCluster (PC) accessible and that you have generated an EC2 key pair that you can use to connect to your head-node. If you have both PC installed and the key pair generated then skip this section and go deploy a cluster. + +#### Install AWS ParallelCluster + +Run the script below to install AWS ParallelCluster in a Python virtual environment and access this environment. + + +```bash +#!/bin/bash + +VIRTUAL_ENV_PATH=~/apc-ve # change the path to your liking + +# Update pip and the virtual env module +python3 -m pip install --upgrade pip +python3 -m pip install --user --upgrade virtualenv + +python3 -m virtualenv $VIRTUAL_ENV_PATH # create the virtual env + +source $VIRTUAL_ENV_PATH/bin/activate # activate the environment +pip3 install awscli # install the AWS CLI +pip3 install aws-parallelcluster # then AWS ParallelCluster +``` + +#### Create your EC2 Keypair (if needed) + +The EC2 key pair enables your to connect to your cluster on the head-node through ssh or [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html). We will cover for SSH here. + +You can list your public keys on your [AWS Console](https://console.aws.amazon.com/ec2/home?#KeyPairs:) and you may also check your SSH directory for the private keys (`~/ssh` if using Linux or OSX). + +If you do not have a keypair that you can use then we will create one with the command below (see [this documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/set-up-keypair.html)). + + +```bash +#!/bin/bash + +AWS_TARGET_REGION=us-east-1 +KEYPAIR_NAME=pcluster-workshop-key + +# Create the key pair using the AWS CLI and retrieve the private part (.pem file) +aws ec2 create-key-pair --key-name pcluster-workshop-key \ + --query KeyMaterial \ + --region $AWS_TARGET_REGION \ + --output text > $KEYPAIR_NAME.pem +# the private part of your key pair is located in the current directory +# we change the access rights to the current user only +sudo chmod 600 $KEYPAIR_NAME.pem +``` + ### How to deploy a cluster To create the cluster use the command below and replace `CLUSTER_CONFIG_FILE` by the path to the cluster configuration file (see next section) and `NAME_OF_YOUR_CLUSTER` by the name of your cluster (`realpotato` is a cool name). @@ -34,7 +84,7 @@ The templates contain placeholder variables that you need to replace before use. - `PLACEHOLDER_CUSTOM_AMI_ID`: if using a custom AMI then replace with the custom AMI ID (`ami-12356790abcd`). - `PLACEHOLDER_PUBLIC_SUBNET`: change to the id of a public subnet to host the head-node (`subnet-12356790abcd`). - `PLACEHOLDER_PRIVATE_SUBNET`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). -- `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node. You can also use AWS Systems Manager Session Manager (SSM). +- `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node, use the name of the key. You can also use AWS Systems Manager Session Manager (SSM). - `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). @@ -84,3 +134,10 @@ You can chose to use a custom image or post-install scripts to install your appl ### Diagram ![AWS ParallelCluster diagram](../../0.docs/parallelcluster-arch-diagram.png) + + +### Troubleshooting + +A common issue we see customer face is a problem with the post install scripts or issue to access capacity due to a mis-configuration. This can manifest itself through a `HeadNodeWaitCondition` that'll cause the ParallelCluster to fail a cluster deployment. + +To solve that, you can look at the cluster logs in CloudWatch in the cluster loggroup, otherwise use the option `--rollback-on-failure false` to keep resources up upon failure for further troubleshooting. diff --git a/1.architectures/2.aws-parallelcluster/create-cluster.sh b/1.architectures/2.aws-parallelcluster/create-cluster.sh deleted file mode 100644 index af5827f2..00000000 --- a/1.architectures/2.aws-parallelcluster/create-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pcluster create-cluster --cluster-configuration pcluster-config.yaml --cluster-name pcluster-ml --region us-west-2 --suppress-validators "type:InstanceTypeBaseAMICompatibleValidator" --rollback-on-failure "false" diff --git a/1.architectures/2.aws-parallelcluster/create-key-pair.sh b/1.architectures/2.aws-parallelcluster/create-key-pair.sh deleted file mode 100755 index d729c3b8..00000000 --- a/1.architectures/2.aws-parallelcluster/create-key-pair.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -aws ec2 create-key-pair --key-name pcluster-workshop-key --query KeyMaterial --output text > pcluster-workshop-key.pem -sudo chmod 600 pcluster-workshop-key.pem diff --git a/1.architectures/2.aws-parallelcluster/install-pcluster.sh b/1.architectures/2.aws-parallelcluster/install-pcluster.sh deleted file mode 100755 index 5ca2aaef..00000000 --- a/1.architectures/2.aws-parallelcluster/install-pcluster.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -# Create Virtual env -python3 -m pip install --upgrade pip -python3 -m pip install --user --upgrade virtualenv - -python3 -m virtualenv ~/apc-ve - -source ~/apc-ve/bin/activate - -pip3 install awscli - -pip3 install aws-parallelcluster From 9c60fe0fdac343323652b8bdd178b64c5e4fbe89 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:07:32 -0500 Subject: [PATCH 008/648] Update documentation and remove additional scripts for parallelcluster archs --- .../2.aws-parallelcluster/README.md | 59 ++++++++++++++++++- .../2.aws-parallelcluster/create-cluster.sh | 3 - .../2.aws-parallelcluster/create-key-pair.sh | 4 -- .../2.aws-parallelcluster/install-pcluster.sh | 13 ---- 4 files changed, 58 insertions(+), 21 deletions(-) delete mode 100644 1.architectures/2.aws-parallelcluster/create-cluster.sh delete mode 100755 1.architectures/2.aws-parallelcluster/create-key-pair.sh delete mode 100755 1.architectures/2.aws-parallelcluster/install-pcluster.sh diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 73bf4440..86e563d9 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -4,6 +4,56 @@ Clusters in AWS ParallelCluster share similar components: a head-node, compute nodes (typically P or Trn EC2 family of instances) and one or multiple shared filesystems (FSx for Lustre). You will find below a section on the architectures themselves and how to deploy them. After this section, you will be brief on key elements of these templates (or things you wanna know to avoid potential mistakes). +### Initial setup + +Before deploying a cluster, let's ensure you have AWS ParallelCluster (PC) accessible and that you have generated an EC2 key pair that you can use to connect to your head-node. If you have both PC installed and the key pair generated then skip this section and go deploy a cluster. + +#### Install AWS ParallelCluster + +Run the script below to install AWS ParallelCluster in a Python virtual environment and access this environment. + + +```bash +#!/bin/bash + +VIRTUAL_ENV_PATH=~/apc-ve # change the path to your liking + +# Update pip and the virtual env module +python3 -m pip install --upgrade pip +python3 -m pip install --user --upgrade virtualenv + +python3 -m virtualenv $VIRTUAL_ENV_PATH # create the virtual env + +source $VIRTUAL_ENV_PATH/bin/activate # activate the environment +pip3 install awscli # install the AWS CLI +pip3 install aws-parallelcluster # then AWS ParallelCluster +``` + +#### Create your EC2 Keypair (if needed) + +The EC2 key pair enables your to connect to your cluster on the head-node through ssh or [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html). We will cover for SSH here. + +You can list your public keys on your [AWS Console](https://console.aws.amazon.com/ec2/home?#KeyPairs:) and you may also check your SSH directory for the private keys (`~/ssh` if using Linux or OSX). + +If you do not have a keypair that you can use then we will create one with the command below (see [this documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/set-up-keypair.html)). + + +```bash +#!/bin/bash + +AWS_TARGET_REGION=us-east-1 +KEYPAIR_NAME=pcluster-workshop-key + +# Create the key pair using the AWS CLI and retrieve the private part (.pem file) +aws ec2 create-key-pair --key-name pcluster-workshop-key \ + --query KeyMaterial \ + --region $AWS_TARGET_REGION \ + --output text > $KEYPAIR_NAME.pem +# the private part of your key pair is located in the current directory +# we change the access rights to the current user only +sudo chmod 600 $KEYPAIR_NAME.pem +``` + ### How to deploy a cluster To create the cluster use the command below and replace `CLUSTER_CONFIG_FILE` by the path to the cluster configuration file (see next section) and `NAME_OF_YOUR_CLUSTER` by the name of your cluster (`realpotato` is a cool name). @@ -34,7 +84,7 @@ The templates contain placeholder variables that you need to replace before use. - `PLACEHOLDER_CUSTOM_AMI_ID`: if using a custom AMI then replace with the custom AMI ID (`ami-12356790abcd`). - `PLACEHOLDER_PUBLIC_SUBNET`: change to the id of a public subnet to host the head-node (`subnet-12356790abcd`). - `PLACEHOLDER_PRIVATE_SUBNET`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). -- `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node. You can also use AWS Systems Manager Session Manager (SSM). +- `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node, use the name of the key. You can also use AWS Systems Manager Session Manager (SSM). - `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). @@ -84,3 +134,10 @@ You can chose to use a custom image or post-install scripts to install your appl ### Diagram ![AWS ParallelCluster diagram](../../0.docs/parallelcluster-arch-diagram.png) + + +### Troubleshooting + +A common issue we see customer face is a problem with the post install scripts or issue to access capacity due to a mis-configuration. This can manifest itself through a `HeadNodeWaitCondition` that'll cause the ParallelCluster to fail a cluster deployment. + +To solve that, you can look at the cluster logs in CloudWatch in the cluster loggroup, otherwise use the option `--rollback-on-failure false` to keep resources up upon failure for further troubleshooting. diff --git a/1.architectures/2.aws-parallelcluster/create-cluster.sh b/1.architectures/2.aws-parallelcluster/create-cluster.sh deleted file mode 100644 index af5827f2..00000000 --- a/1.architectures/2.aws-parallelcluster/create-cluster.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pcluster create-cluster --cluster-configuration pcluster-config.yaml --cluster-name pcluster-ml --region us-west-2 --suppress-validators "type:InstanceTypeBaseAMICompatibleValidator" --rollback-on-failure "false" diff --git a/1.architectures/2.aws-parallelcluster/create-key-pair.sh b/1.architectures/2.aws-parallelcluster/create-key-pair.sh deleted file mode 100755 index d729c3b8..00000000 --- a/1.architectures/2.aws-parallelcluster/create-key-pair.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -aws ec2 create-key-pair --key-name pcluster-workshop-key --query KeyMaterial --output text > pcluster-workshop-key.pem -sudo chmod 600 pcluster-workshop-key.pem diff --git a/1.architectures/2.aws-parallelcluster/install-pcluster.sh b/1.architectures/2.aws-parallelcluster/install-pcluster.sh deleted file mode 100755 index 5ca2aaef..00000000 --- a/1.architectures/2.aws-parallelcluster/install-pcluster.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -# Create Virtual env -python3 -m pip install --upgrade pip -python3 -m pip install --user --upgrade virtualenv - -python3 -m virtualenv ~/apc-ve - -source ~/apc-ve/bin/activate - -pip3 install awscli - -pip3 install aws-parallelcluster From 24d853d40d337ee6f4675caf8b0fb2c06bd4fae4 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:09:37 -0500 Subject: [PATCH 009/648] Resize VPCs images in readme --- 1.architectures/1.vpc_network/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index fdaf3826..de35aad2 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -44,7 +44,7 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram -![img](../../0.docs/vpc-one-az.png) + ### 2. Template VPC One AZs @@ -79,4 +79,4 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram -![img](../../0.docs/vpc-all-az.png) + From c26f011b7fe9baadb9086074628842d3db26902e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:09:37 -0500 Subject: [PATCH 010/648] Resize VPCs images in readme --- 1.architectures/1.vpc_network/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index fdaf3826..de35aad2 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -44,7 +44,7 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram -![img](../../0.docs/vpc-one-az.png) + ### 2. Template VPC One AZs @@ -79,4 +79,4 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram -![img](../../0.docs/vpc-all-az.png) + From 85f27c3402a2943b07af98ceaf9d3a67ef84530e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:34:35 -0500 Subject: [PATCH 011/648] Change documentation based on tjm@ improvements --- 2.amazon_machine_images/README.md | 9 +++++++-- 3.test_cases/0.nccl-tests/README.md | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index ca417d31..e920b389 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -1,9 +1,14 @@ # Usage -Run `make ami_gpu` or `make ami_cpu` to build AMI for GPU with EFA and CPU supporting [pyxies](https://github.com/NVIDIA/pyxis) (see [here](https://github.com/NVIDIA/enroot/blob/9c6e979059699e93cfc1cce0967b78e54ad0e263/doc/cmd/import.md) to configure [AWS ECR](https://aws.amazon.com/ecr/) authentication out of the box ), while `make docker` builds container to use with GPUs and EFA. Run `make deploy` to deploy test cluster in `./test/cluster.yaml` assuming you have credentials in config file with default profile (`${HOME}/.aws`) and different parameters (AMI, subnets, ssh keys) are updated. + + +Run `make ami_pcluster_cpu` or `make ami_pcluster_cpu` to build AMI for GPU with EFA and CPU supporting [Pixys](https://github.com/NVIDIA/pyxis) (see [here](https://github.com/NVIDIA/enroot/blob/9c6e979059699e93cfc1cce0967b78e54ad0e263/doc/cmd/import.md) to configure [AWS ECR](https://aws.amazon.com/ecr/) authentication out of the box ), while `make docker` builds container to use with GPUs and EFA. Run `make deploy` to deploy test cluster in `./test/cluster.yaml` assuming you have credentials in config file with default profile (`${HOME}/.aws`) and different parameters (AMI, subnets, ssh keys) are updated. + + ## Notes * Review `packer-ami.pkr.hcl` for all available variables. * We are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. * Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). +* These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. # Preflight @@ -15,7 +20,7 @@ Code is in `./preflight` directory. It consists of sanity checks for: * `torch.cuda.nccl.version()` in `preflight/preflight.sh` will return built in version, while searching for `NCCL version` if `NCCL_DEBUG=info` is exported will get preloaded version. -# using Deep Learning AMI +# Using the Deep Learning AMI [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/what-is-dlami.html) contains common DL dependencies, it can be used with parallel cluster. We can use following configuration: ``` diff --git a/3.test_cases/0.nccl-tests/README.md b/3.test_cases/0.nccl-tests/README.md index 11a19c6d..4c2028b1 100644 --- a/3.test_cases/0.nccl-tests/README.md +++ b/3.test_cases/0.nccl-tests/README.md @@ -8,6 +8,7 @@ This guide assumes that you have the following: - A functional Slurm cluster on AWS. - Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- Enroot requires libmd to compile and squashfs-tools to execute. - A shared directory mounted on `/apps` It is recommended that you use the templates in the architectures [directory](../../1.architectures) From 652ec5846922ba77a24d3895a3a027a9bf114d84 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:34:35 -0500 Subject: [PATCH 012/648] Change documentation based on tjm@ improvements --- 2.amazon_machine_images/README.md | 9 +++++++-- 3.test_cases/0.nccl-tests/README.md | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index ca417d31..e920b389 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -1,9 +1,14 @@ # Usage -Run `make ami_gpu` or `make ami_cpu` to build AMI for GPU with EFA and CPU supporting [pyxies](https://github.com/NVIDIA/pyxis) (see [here](https://github.com/NVIDIA/enroot/blob/9c6e979059699e93cfc1cce0967b78e54ad0e263/doc/cmd/import.md) to configure [AWS ECR](https://aws.amazon.com/ecr/) authentication out of the box ), while `make docker` builds container to use with GPUs and EFA. Run `make deploy` to deploy test cluster in `./test/cluster.yaml` assuming you have credentials in config file with default profile (`${HOME}/.aws`) and different parameters (AMI, subnets, ssh keys) are updated. + + +Run `make ami_pcluster_cpu` or `make ami_pcluster_cpu` to build AMI for GPU with EFA and CPU supporting [Pixys](https://github.com/NVIDIA/pyxis) (see [here](https://github.com/NVIDIA/enroot/blob/9c6e979059699e93cfc1cce0967b78e54ad0e263/doc/cmd/import.md) to configure [AWS ECR](https://aws.amazon.com/ecr/) authentication out of the box ), while `make docker` builds container to use with GPUs and EFA. Run `make deploy` to deploy test cluster in `./test/cluster.yaml` assuming you have credentials in config file with default profile (`${HOME}/.aws`) and different parameters (AMI, subnets, ssh keys) are updated. + + ## Notes * Review `packer-ami.pkr.hcl` for all available variables. * We are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. * Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). +* These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. # Preflight @@ -15,7 +20,7 @@ Code is in `./preflight` directory. It consists of sanity checks for: * `torch.cuda.nccl.version()` in `preflight/preflight.sh` will return built in version, while searching for `NCCL version` if `NCCL_DEBUG=info` is exported will get preloaded version. -# using Deep Learning AMI +# Using the Deep Learning AMI [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/what-is-dlami.html) contains common DL dependencies, it can be used with parallel cluster. We can use following configuration: ``` diff --git a/3.test_cases/0.nccl-tests/README.md b/3.test_cases/0.nccl-tests/README.md index 11a19c6d..4c2028b1 100644 --- a/3.test_cases/0.nccl-tests/README.md +++ b/3.test_cases/0.nccl-tests/README.md @@ -8,6 +8,7 @@ This guide assumes that you have the following: - A functional Slurm cluster on AWS. - Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- Enroot requires libmd to compile and squashfs-tools to execute. - A shared directory mounted on `/apps` It is recommended that you use the templates in the architectures [directory](../../1.architectures) From f492e97356d1511de6d2817f66c7aa801752f358 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:38:38 -0500 Subject: [PATCH 013/648] Remove config with grafana files for cleanup --- .../create-additional-sg.sh | 7 -- .../pcluster-with-monitoring-config.yaml | 103 ------------------ 2 files changed, 110 deletions(-) delete mode 100755 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh delete mode 100644 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh deleted file mode 100755 index 40e2a322..00000000 --- a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -read -p "Please enter the vpc id of your cluster: " vpc_id -echo -e "creating a security group with $vpc_id..." -security_group=$(aws ec2 create-security-group --group-name grafana-sg --description "Open HTTP/HTTPS ports" --vpc-id ${vpc_id} --output text) -aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 443 --cidr 0.0.0.0/0 -aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 80 —-cidr 0.0.0.0/0 diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml deleted file mode 100644 index b0fcd985..00000000 --- a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml +++ /dev/null @@ -1,103 +0,0 @@ -Imds: - ImdsSupport: v2.0 -Image: - Os: alinux2 - CustomAmi: ami-053d893ccc907c49c -Tags: - - Key: 'Grafana' - Value: 'true' -HeadNode: - InstanceType: c6i.8xlarge - Networking: - SubnetId: subnet-08cdcb1f4d6abc7f3 - AdditionalSecurityGroups: - - sg-0bbb389be5f1e6563 - Ssh: - KeyName: pcluster-key - LocalStorage: - RootVolume: - Size: 100 - DeleteOnTermination: true # that's your root and /home volume for users - CustomActions: - OnNodeConfigured: - Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh - Args: - - v0.9 - Iam: - AdditionalIamPolicies: # grant ECR, SSM and S3 read access - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess - - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess - - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess - - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess -Scheduling: - Scheduler: slurm - SlurmSettings: - ScaledownIdletime: 60 - SlurmQueues: - - Name: compute-gpu - CapacityType: ONDEMAND - Networking: - SubnetIds: - - subnet-04226fa682376b4f6 - PlacementGroup: - Enabled: true - ComputeSettings: - LocalStorage: - EphemeralVolume: - MountDir: /local_scratch # each instance has a local scratch on NVMe - RootVolume: - Size: 200 - CustomActions: - OnNodeConfigured: - Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh - Args: - - v0.9 - Iam: - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess - - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess - - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess - - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess - # The capacity reservation section is recommended if you use instances - # with a targeted ODCRs. You can also use a capacity resource group and - # CapacityReservationResourceGroupArn if you want to regroup - # multiple reservations - #CapacityReservationTarget: - # CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID - ComputeResources: - - Name: distributed-ml - InstanceType: g4dn.12xlarge - MinCount: 0 # if min = max then capacity is maintained and will - MaxCount: 4 # not scale down - Efa: - Enabled: true -SharedStorage: - - MountDir: /fsx - Name: fsx - StorageType: FsxLustre - FsxLustreSettings: - StorageCapacity: 4800 # size it to your storage and throughput needs - PerUnitStorageThroughput: 250 # this can be increased to 500 and 100 - DeploymentType: PERSISTENT_2 - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps # Store your shared apps & scripts here - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 -Monitoring: - DetailedMonitoring: true - Logs: - CloudWatch: - Enabled: true # good for debug - Dashboards: - CloudWatch: - Enabled: false # provide basic dashboards - - From dd2b3ca8657eedc66997c0f30edf16330e365bf4 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:38:38 -0500 Subject: [PATCH 014/648] Remove config with grafana files for cleanup --- .../create-additional-sg.sh | 7 -- .../pcluster-with-monitoring-config.yaml | 103 ------------------ 2 files changed, 110 deletions(-) delete mode 100755 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh delete mode 100644 1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh deleted file mode 100755 index 40e2a322..00000000 --- a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/create-additional-sg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -read -p "Please enter the vpc id of your cluster: " vpc_id -echo -e "creating a security group with $vpc_id..." -security_group=$(aws ec2 create-security-group --group-name grafana-sg --description "Open HTTP/HTTPS ports" --vpc-id ${vpc_id} --output text) -aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 443 --cidr 0.0.0.0/0 -aws ec2 authorize-security-group-ingress --group-id ${security_group} --protocol tcp --port 80 —-cidr 0.0.0.0/0 diff --git a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml b/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml deleted file mode 100644 index b0fcd985..00000000 --- a/1.architectures/2.aws-parallelcluster/config_with_grafana_monitoring/pcluster-with-monitoring-config.yaml +++ /dev/null @@ -1,103 +0,0 @@ -Imds: - ImdsSupport: v2.0 -Image: - Os: alinux2 - CustomAmi: ami-053d893ccc907c49c -Tags: - - Key: 'Grafana' - Value: 'true' -HeadNode: - InstanceType: c6i.8xlarge - Networking: - SubnetId: subnet-08cdcb1f4d6abc7f3 - AdditionalSecurityGroups: - - sg-0bbb389be5f1e6563 - Ssh: - KeyName: pcluster-key - LocalStorage: - RootVolume: - Size: 100 - DeleteOnTermination: true # that's your root and /home volume for users - CustomActions: - OnNodeConfigured: - Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh - Args: - - v0.9 - Iam: - AdditionalIamPolicies: # grant ECR, SSM and S3 read access - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess - - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess - - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess - - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess -Scheduling: - Scheduler: slurm - SlurmSettings: - ScaledownIdletime: 60 - SlurmQueues: - - Name: compute-gpu - CapacityType: ONDEMAND - Networking: - SubnetIds: - - subnet-04226fa682376b4f6 - PlacementGroup: - Enabled: true - ComputeSettings: - LocalStorage: - EphemeralVolume: - MountDir: /local_scratch # each instance has a local scratch on NVMe - RootVolume: - Size: 200 - CustomActions: - OnNodeConfigured: - Script: https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-monitoring/main/post-install.sh - Args: - - v0.9 - Iam: - AdditionalIamPolicies: - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/CloudWatchFullAccess - - Policy: arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess - - Policy: arn:aws:iam::aws:policy/AmazonSSMFullAccess - - Policy: arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess - # The capacity reservation section is recommended if you use instances - # with a targeted ODCRs. You can also use a capacity resource group and - # CapacityReservationResourceGroupArn if you want to regroup - # multiple reservations - #CapacityReservationTarget: - # CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID - ComputeResources: - - Name: distributed-ml - InstanceType: g4dn.12xlarge - MinCount: 0 # if min = max then capacity is maintained and will - MaxCount: 4 # not scale down - Efa: - Enabled: true -SharedStorage: - - MountDir: /fsx - Name: fsx - StorageType: FsxLustre - FsxLustreSettings: - StorageCapacity: 4800 # size it to your storage and throughput needs - PerUnitStorageThroughput: 250 # this can be increased to 500 and 100 - DeploymentType: PERSISTENT_2 - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps # Store your shared apps & scripts here - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 -Monitoring: - DetailedMonitoring: true - Logs: - CloudWatch: - Enabled: true # good for debug - Dashboards: - CloudWatch: - Enabled: false # provide basic dashboards - - From 85fd9232e393d457ac4586d7614b756a06efdffd Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:46:42 -0500 Subject: [PATCH 015/648] Remove duplicate file for post-install w/ containers --- ...ted-training-clususter-with-container.yaml | 87 ------------------- 1 file changed, 87 deletions(-) delete mode 100644 1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml deleted file mode 100644 index f2673403..00000000 --- a/1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml +++ /dev/null @@ -1,87 +0,0 @@ -Imds: - ImdsSupport: v2.0 -Image: - Os: alinux2 -HeadNode: - InstanceType: m5.8xlarge - Networking: - SubnetId: ${PUBLIC_SUBNET_ID} - Ssh: - KeyName: ${SSH_KEY} - LocalStorage: - RootVolume: - Size: 100 - DeleteOnTermination: true # that's your root and /home volume for users - Iam: - AdditionalIamPolicies: # grant ECR, SSM and S3 read access - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - CustomActions: - OnNodeConfigured: - Sequence: - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/docker/postinstall.sh' - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/pyxis/postinstall.sh' -Scheduling: - Scheduler: slurm - SlurmSettings: - ScaledownIdletime: 60 - SlurmQueues: - - Name: compute-gpu - CapacityType: ONDEMAND - Networking: - SubnetIds: - - ${PRIVATE_SUBNET_ID} - ComputeSettings: - LocalStorage: - EphemeralVolume: - MountDir: /local_scratch # each instance has a local scratch on NVMe - RootVolume: - Size: 200 - ComputeResources: - - Name: distributed-ml - InstanceType: ${COMPUTE_INSTANCE_TYPE} - MinCount: ${COMPUTE_INSTANCE_MIN_COUNT} # if min = max then capacity is maintained and will - MaxCount: ${COMPUTE_INSTANCE_MAX_COUNT} # not scale down - Efa: - Enabled: true - # The capacity reservation section is recommended if you use instances - # with a targeted ODCRs. You can also use a capacity resource group and - # CapacityReservationResourceGroupArn if you want to regroup - # multiple reservations - CapacityReservationTarget: - CapacityReservationId: ${CAPACITY_RESERVATION_ID} - Networking: - PlacementGroup: - Enabled: true - Id: ${PLACEMENT_GROUP_ID} - CustomActions: - OnNodeConfigured: - Sequence: - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/docker/postinstall.sh' - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/pyxis/postinstall.sh' -SharedStorage: - - MountDir: /fsx - Name: fsx - StorageType: FsxLustre - FsxLustreSettings: - StorageCapacity: 4800 # size it to your storage and throughput needs - # PerUnitStorageThroughput: 250 # this can be increased to 500 and 100 - DeploymentType: SCRATCH_2 - ImportPath: s3://${S3_BUCKET_NAME} - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps # Store your shared apps & scripts here - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 -Monitoring: - DetailedMonitoring: true - Logs: - CloudWatch: - Enabled: true # good for debug - Dashboards: - CloudWatch: - Enabled: false # provide basic dashboards From 97a3c419a5c7cfa462d4051edf95b8abcfc8df66 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:46:42 -0500 Subject: [PATCH 016/648] Remove duplicate file for post-install w/ containers --- ...ted-training-clususter-with-container.yaml | 87 ------------------- 1 file changed, 87 deletions(-) delete mode 100644 1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml deleted file mode 100644 index f2673403..00000000 --- a/1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml +++ /dev/null @@ -1,87 +0,0 @@ -Imds: - ImdsSupport: v2.0 -Image: - Os: alinux2 -HeadNode: - InstanceType: m5.8xlarge - Networking: - SubnetId: ${PUBLIC_SUBNET_ID} - Ssh: - KeyName: ${SSH_KEY} - LocalStorage: - RootVolume: - Size: 100 - DeleteOnTermination: true # that's your root and /home volume for users - Iam: - AdditionalIamPolicies: # grant ECR, SSM and S3 read access - - Policy: arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - Policy: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess - - Policy: arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - CustomActions: - OnNodeConfigured: - Sequence: - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/docker/postinstall.sh' - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/pyxis/postinstall.sh' -Scheduling: - Scheduler: slurm - SlurmSettings: - ScaledownIdletime: 60 - SlurmQueues: - - Name: compute-gpu - CapacityType: ONDEMAND - Networking: - SubnetIds: - - ${PRIVATE_SUBNET_ID} - ComputeSettings: - LocalStorage: - EphemeralVolume: - MountDir: /local_scratch # each instance has a local scratch on NVMe - RootVolume: - Size: 200 - ComputeResources: - - Name: distributed-ml - InstanceType: ${COMPUTE_INSTANCE_TYPE} - MinCount: ${COMPUTE_INSTANCE_MIN_COUNT} # if min = max then capacity is maintained and will - MaxCount: ${COMPUTE_INSTANCE_MAX_COUNT} # not scale down - Efa: - Enabled: true - # The capacity reservation section is recommended if you use instances - # with a targeted ODCRs. You can also use a capacity resource group and - # CapacityReservationResourceGroupArn if you want to regroup - # multiple reservations - CapacityReservationTarget: - CapacityReservationId: ${CAPACITY_RESERVATION_ID} - Networking: - PlacementGroup: - Enabled: true - Id: ${PLACEMENT_GROUP_ID} - CustomActions: - OnNodeConfigured: - Sequence: - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/docker/postinstall.sh' - - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/pyxis/postinstall.sh' -SharedStorage: - - MountDir: /fsx - Name: fsx - StorageType: FsxLustre - FsxLustreSettings: - StorageCapacity: 4800 # size it to your storage and throughput needs - # PerUnitStorageThroughput: 250 # this can be increased to 500 and 100 - DeploymentType: SCRATCH_2 - ImportPath: s3://${S3_BUCKET_NAME} - - Name: SharedEBS - StorageType: Ebs - MountDir: /apps # Store your shared apps & scripts here - EbsSettings: - VolumeType: gp3 - Size: 200 - Throughput: 300 - Iops: 6000 -Monitoring: - DetailedMonitoring: true - Logs: - CloudWatch: - Enabled: true # good for debug - Dashboards: - CloudWatch: - Enabled: false # provide basic dashboards From 4ce7b5ed355b4974e2c364001782bb498ecf3799 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:48:09 -0500 Subject: [PATCH 017/648] Update doc to resize Batch architecture diagram --- 1.architectures/3.aws-batch/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/1.architectures/3.aws-batch/README.md b/1.architectures/3.aws-batch/README.md index cf7b0f03..f0f656bf 100644 --- a/1.architectures/3.aws-batch/README.md +++ b/1.architectures/3.aws-batch/README.md @@ -54,5 +54,4 @@ There are a few things to know as you evaluate this architecture: ## Architecture Diagram -![img](../../0.docs/batch-arch.png) - + From 4feae5e18af4119ecc5ac060bdd0975b97bbc7f9 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 10:48:09 -0500 Subject: [PATCH 018/648] Update doc to resize Batch architecture diagram --- 1.architectures/3.aws-batch/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/1.architectures/3.aws-batch/README.md b/1.architectures/3.aws-batch/README.md index cf7b0f03..f0f656bf 100644 --- a/1.architectures/3.aws-batch/README.md +++ b/1.architectures/3.aws-batch/README.md @@ -54,5 +54,4 @@ There are a few things to know as you evaluate this architecture: ## Architecture Diagram -![img](../../0.docs/batch-arch.png) - + From 5effc9a74f020edb30eb8c14633582678e36ea9c Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 18:29:09 -0500 Subject: [PATCH 019/648] Change documentation for AMIs and playbooks cleanup --- .../2.aws-parallelcluster/README.md | 12 +- 2.amazon_machine_images/README.md | 146 +++++++++++++++--- .../playbook-dlami-neuron.yml | 1 - 2.amazon_machine_images/playbook-eks-gpu.yml | 2 - .../playbook-pcluster-gpu.yml | 1 - .../roles/nvidia_driver/tasks/main.yml | 2 +- 6 files changed, 134 insertions(+), 30 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 86e563d9..66d26575 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -4,11 +4,11 @@ Clusters in AWS ParallelCluster share similar components: a head-node, compute nodes (typically P or Trn EC2 family of instances) and one or multiple shared filesystems (FSx for Lustre). You will find below a section on the architectures themselves and how to deploy them. After this section, you will be brief on key elements of these templates (or things you wanna know to avoid potential mistakes). -### Initial setup +## Initial setup Before deploying a cluster, let's ensure you have AWS ParallelCluster (PC) accessible and that you have generated an EC2 key pair that you can use to connect to your head-node. If you have both PC installed and the key pair generated then skip this section and go deploy a cluster. -#### Install AWS ParallelCluster +### Install AWS ParallelCluster Run the script below to install AWS ParallelCluster in a Python virtual environment and access this environment. @@ -29,7 +29,7 @@ pip3 install awscli # install the AWS CLI pip3 install aws-parallelcluster # then AWS ParallelCluster ``` -#### Create your EC2 Keypair (if needed) +### Create your EC2 Keypair (if needed) The EC2 key pair enables your to connect to your cluster on the head-node through ssh or [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html). We will cover for SSH here. @@ -49,11 +49,16 @@ aws ec2 create-key-pair --key-name pcluster-workshop-key \ --query KeyMaterial \ --region $AWS_TARGET_REGION \ --output text > $KEYPAIR_NAME.pem + # the private part of your key pair is located in the current directory # we change the access rights to the current user only sudo chmod 600 $KEYPAIR_NAME.pem ``` +## Deploy Clusters + +We will show you how to + ### How to deploy a cluster To create the cluster use the command below and replace `CLUSTER_CONFIG_FILE` by the path to the cluster configuration file (see next section) and `NAME_OF_YOUR_CLUSTER` by the name of your cluster (`realpotato` is a cool name). @@ -87,7 +92,6 @@ The templates contain placeholder variables that you need to replace before use. - `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node, use the name of the key. You can also use AWS Systems Manager Session Manager (SSM). - `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). - ## AWS ParallelCluster must know ### Compute diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index e920b389..305dc778 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -1,31 +1,135 @@ -# Usage +# Amazon Machine Images for Self-Managed ML Workloads +This package contains a Packer script to build Amazon Machine Images for self-managed Ml training and inference. The images can be built for difference AWS ParallelCluster, EKS), platforms (CPU, GPU, Neuron) for training and inference workloads. -Run `make ami_pcluster_cpu` or `make ami_pcluster_cpu` to build AMI for GPU with EFA and CPU supporting [Pixys](https://github.com/NVIDIA/pyxis) (see [here](https://github.com/NVIDIA/enroot/blob/9c6e979059699e93cfc1cce0967b78e54ad0e263/doc/cmd/import.md) to configure [AWS ECR](https://aws.amazon.com/ecr/) authentication out of the box ), while `make docker` builds container to use with GPUs and EFA. Run `make deploy` to deploy test cluster in `./test/cluster.yaml` assuming you have credentials in config file with default profile (`${HOME}/.aws`) and different parameters (AMI, subnets, ssh keys) are updated. +### Initial setup +To build images you will need: +- **GNU Make**: install it via `yum` or `apt` if using Linux, via [`brew`](https://formulae.brew.sh/formula/make) if using OSX or [Chocolatey](https://community.chocolatey.org/packages/make) on MS Windows. +- **Packer**: it can be downloaded via [Hashicorp](https://www.packer.io/)'s website, you can also use [`brew`](https://formulae.brew.sh/formula/packer#default) on OSX. +- **Ansible**: get it via your package manager, we recommend via [`brew`](https://formulae.brew.sh/formula/ansible#default) if using OSX. -## Notes -* Review `packer-ami.pkr.hcl` for all available variables. -* We are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. -* Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). -* These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. +### Build a custom AMI +Assuming that GNU Make, Packer and Ansible installed, you can build AMIs by typing `make` in your terminal with an argument corresponding to the desired AMI you want to build. -# Preflight -Code is in `./preflight` directory. It consists of sanity checks for: -* Nvidia GPUs -* EFA and Nvidia NCCL -* PyTorch -## Notes -* `torch.cuda.nccl.version()` in `preflight/preflight.sh` will return built in version, while searching for `NCCL version` if `NCCL_DEBUG=info` is exported will get preloaded version. +Here is an example to build a AMI for training or inference on GPU with AWS ParallelCluster: + +```bash +make ami_pcluster_gpu +``` + +The list of arguments you can use is shown in the table below with the AMI origin (what are we starting our custom AMI from) and notes regarding their content. + +| Argument | Source AMI | Notes | +|--------------------|------------|------------------------------------------------------------------------------------| +| `ami_pcluster_cpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for CPU based workloads | +| `ami_pcluster_gpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for GPU based workloads, training or inference | +| `ami_base` | [EC2 AL2 AMI](https://aws.amazon.com/amazon-linux-2/) | EC2 AMI with updates, Docker, Lustre, EFA, Pyxis and Enroot (everything) | +| `ami_dlami_gpu` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI with updated drivers, Pyxis, enroot, Lustre module client and Docker. | +| `ami_dlami_neuron` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI for Neuron, same as above without the Nvidia stack | +| `ami_eks_gpu` | [EKS AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) | EKS GPU AMI with Lustre, EFA | +| `ami` |AMI dependent| Build all the images | + + +Once a build is launched, Packer will create an instance and install packages for a period of 10-25 minutes depending on how much software is installed. + +### Software stack: Ansible roles + +Each image is build using a base image and different Ansible roles used to install and configure the software stack installed on the AMI. The stack for each AMI is defined into *playbooks* files containing each a list of packages. + +You will find below the list of images you can build and which roles are deployed in these. The `ami` argument will build all of these images. + +| Ansible Roles | `ami_pcluster_cpu` | `ami_pcluster_gpu`| `ami_base` | `ami_dlami_gpu` | `ami_dlami_neuron` | `ami_eks_gpu` | +|-----------------------|--------------------|-------------------|------------|-----------------|--------------------|---------------| +| `base` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `packages` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `aws_cliv2` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `aws_lustre` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `nvidia_enroot_pyxis` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `docker` | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | +| `nvidia_docker` | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `nvidia_driver` | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ | +| `nvidia_cuda` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `nvidia_gdrcopy` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `nvidia_nccl` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `aws_efa` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `aws_efa_ofi` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | + + +## Customizing your AMIs + +You can customize your AMIs by: +- Modifying existing roles to install specific software versions: for example a specific version of the EFA driver, Nvidia CUDA or Nvidia GPU driver. +- Add new roles to install or configure new software or libraries. + +Before going further, let's see how we defined our Ansible Roles. + +#### More on roles + +Our Ansible roles consist of 3 components: `defaults`, `files` and `tasks`. + +- `defaults`: contain default values for conditionals and versions of software being installed. +- `files`: hold files that will be copied to the custom AMI such as config files. +- `tasks`: is the list of tasks executed by Ansible to install and configure software. -# Using the Deep Learning AMI -[DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/what-is-dlami.html) contains common DL dependencies, it can be used with parallel cluster. -We can use following configuration: +#### Example + +To illustrate that, here's an example using the Nvidia Driver. By looking at the structure below you will see the 3 role components: `defaults`, `files` and `tasks` + +``` +├── nvidia_driver +│ ├── defaults +│ │ └── main.yml +│ ├── files +│ │ └── nvidia-persistenced-override.service +│ └── tasks +│ └── main.yml ``` -Build: - InstanceType: p2.xlarge - ParentImage: ami-123 + +##### `defaults` + +The defaults contain variables for the role and default values. In the case of the Nvidia driver we set the version to a default with `nvidia_driver_version` and if needed we can change it to a newer or older version. Then you will find two booleans that'll be used in the tasks as conditionals on whether to install the Nvidia Fabric Manager (required A100,H100) via `install_nvidia_fabric_manager` and allow a reboot after installing the driver using the variable `allow_reboot`. + +```yaml +nvidia_driver_version: "535.54.03" +install_nvidia_fabric_manager: true +allow_reboot: true ``` -where `ami-123` is ID of DLAMI of your choice. Run [pcluster build-image](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster-v3.html) to add all pcluster dependencies. + +##### `files` + +In the case of the Nvidia driver we have 1 file in `files` named `nvidia-persistenced-override.service`. It is an SystemD service module that we use to force driver persistence. This file is copied to the custom AMI through one of the `tasks`. + +##### `tasks` + +The tasks are a list of instructions that Ansible will run through to deploy the role and will be based of Ansible default modules. Here's an excerpt of task below, feel free to open the original file to see the full list of tasks. + +```yaml +- name: "Install additional dependencies" + ansible.builtin.yum: + name: + - gcc10 + - kernel-devel + - kernel-headers + - dkms + state: present + +- name: "Blacklist nouveau" + community.general.kernel_blacklist: + name: nouveau + state: present +``` + +### Modify the roles + +As shared earlier, you can modify the roles and add new ones. Most users would modify the roles defaults to change the default versions of software being installed. If you need to modify the installation or configuration process you may want to modify the `tasks` file. + +Alternatively, you can add a new role to install a new software component, ensure that you respect the structure used by other roles. Don't forget to list your role in the playbook you want to use, for example `playbook-eks-gpu.yaml`, to add the role as part of your custom AMI deployment. + +## Notes +* Review `packer-ami.pkr.hcl` for all available variables. +* For Enroot, we are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. +* Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). +* These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. diff --git a/2.amazon_machine_images/playbook-dlami-neuron.yml b/2.amazon_machine_images/playbook-dlami-neuron.yml index 5b0a3e71..b9748a58 100644 --- a/2.amazon_machine_images/playbook-dlami-neuron.yml +++ b/2.amazon_machine_images/playbook-dlami-neuron.yml @@ -13,4 +13,3 @@ - docker - nvidia_enroot_pyxis - aws_lustre - # - observability diff --git a/2.amazon_machine_images/playbook-eks-gpu.yml b/2.amazon_machine_images/playbook-eks-gpu.yml index 7040ff1f..b9141651 100644 --- a/2.amazon_machine_images/playbook-eks-gpu.yml +++ b/2.amazon_machine_images/playbook-eks-gpu.yml @@ -11,5 +11,3 @@ - packages - aws_efa - nvidia_driver - - # - observability diff --git a/2.amazon_machine_images/playbook-pcluster-gpu.yml b/2.amazon_machine_images/playbook-pcluster-gpu.yml index 4f21d219..72de65be 100644 --- a/2.amazon_machine_images/playbook-pcluster-gpu.yml +++ b/2.amazon_machine_images/playbook-pcluster-gpu.yml @@ -20,4 +20,3 @@ - nvidia_enroot_pyxis - aws_efa_ofi - aws_lustre - # - observability diff --git a/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml index 625e601e..88b488e1 100644 --- a/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml @@ -33,7 +33,7 @@ name: nouveau state: present -- name: "Download the NVidia Installer" +- name: "Download the Nvidia Installer" ansible.builtin.get_url: url: "https://us.download.nvidia.com/tesla/{{ nvidia_driver_version }}/NVIDIA-Linux-x86_64-{{ nvidia_driver_version }}.run" dest: "/tmp/nvidia-installer.run" From bec4ae8cff2b1d62e556bd360734a7305e82d994 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 18:29:09 -0500 Subject: [PATCH 020/648] Change documentation for AMIs and playbooks cleanup --- .../2.aws-parallelcluster/README.md | 12 +- 2.amazon_machine_images/README.md | 146 +++++++++++++++--- .../playbook-dlami-neuron.yml | 1 - 2.amazon_machine_images/playbook-eks-gpu.yml | 2 - .../playbook-pcluster-gpu.yml | 1 - .../roles/nvidia_driver/tasks/main.yml | 2 +- 6 files changed, 134 insertions(+), 30 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 86e563d9..66d26575 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -4,11 +4,11 @@ Clusters in AWS ParallelCluster share similar components: a head-node, compute nodes (typically P or Trn EC2 family of instances) and one or multiple shared filesystems (FSx for Lustre). You will find below a section on the architectures themselves and how to deploy them. After this section, you will be brief on key elements of these templates (or things you wanna know to avoid potential mistakes). -### Initial setup +## Initial setup Before deploying a cluster, let's ensure you have AWS ParallelCluster (PC) accessible and that you have generated an EC2 key pair that you can use to connect to your head-node. If you have both PC installed and the key pair generated then skip this section and go deploy a cluster. -#### Install AWS ParallelCluster +### Install AWS ParallelCluster Run the script below to install AWS ParallelCluster in a Python virtual environment and access this environment. @@ -29,7 +29,7 @@ pip3 install awscli # install the AWS CLI pip3 install aws-parallelcluster # then AWS ParallelCluster ``` -#### Create your EC2 Keypair (if needed) +### Create your EC2 Keypair (if needed) The EC2 key pair enables your to connect to your cluster on the head-node through ssh or [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html). We will cover for SSH here. @@ -49,11 +49,16 @@ aws ec2 create-key-pair --key-name pcluster-workshop-key \ --query KeyMaterial \ --region $AWS_TARGET_REGION \ --output text > $KEYPAIR_NAME.pem + # the private part of your key pair is located in the current directory # we change the access rights to the current user only sudo chmod 600 $KEYPAIR_NAME.pem ``` +## Deploy Clusters + +We will show you how to + ### How to deploy a cluster To create the cluster use the command below and replace `CLUSTER_CONFIG_FILE` by the path to the cluster configuration file (see next section) and `NAME_OF_YOUR_CLUSTER` by the name of your cluster (`realpotato` is a cool name). @@ -87,7 +92,6 @@ The templates contain placeholder variables that you need to replace before use. - `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node, use the name of the key. You can also use AWS Systems Manager Session Manager (SSM). - `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). - ## AWS ParallelCluster must know ### Compute diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index e920b389..305dc778 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -1,31 +1,135 @@ -# Usage +# Amazon Machine Images for Self-Managed ML Workloads +This package contains a Packer script to build Amazon Machine Images for self-managed Ml training and inference. The images can be built for difference AWS ParallelCluster, EKS), platforms (CPU, GPU, Neuron) for training and inference workloads. -Run `make ami_pcluster_cpu` or `make ami_pcluster_cpu` to build AMI for GPU with EFA and CPU supporting [Pixys](https://github.com/NVIDIA/pyxis) (see [here](https://github.com/NVIDIA/enroot/blob/9c6e979059699e93cfc1cce0967b78e54ad0e263/doc/cmd/import.md) to configure [AWS ECR](https://aws.amazon.com/ecr/) authentication out of the box ), while `make docker` builds container to use with GPUs and EFA. Run `make deploy` to deploy test cluster in `./test/cluster.yaml` assuming you have credentials in config file with default profile (`${HOME}/.aws`) and different parameters (AMI, subnets, ssh keys) are updated. +### Initial setup +To build images you will need: +- **GNU Make**: install it via `yum` or `apt` if using Linux, via [`brew`](https://formulae.brew.sh/formula/make) if using OSX or [Chocolatey](https://community.chocolatey.org/packages/make) on MS Windows. +- **Packer**: it can be downloaded via [Hashicorp](https://www.packer.io/)'s website, you can also use [`brew`](https://formulae.brew.sh/formula/packer#default) on OSX. +- **Ansible**: get it via your package manager, we recommend via [`brew`](https://formulae.brew.sh/formula/ansible#default) if using OSX. -## Notes -* Review `packer-ami.pkr.hcl` for all available variables. -* We are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. -* Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). -* These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. +### Build a custom AMI +Assuming that GNU Make, Packer and Ansible installed, you can build AMIs by typing `make` in your terminal with an argument corresponding to the desired AMI you want to build. -# Preflight -Code is in `./preflight` directory. It consists of sanity checks for: -* Nvidia GPUs -* EFA and Nvidia NCCL -* PyTorch -## Notes -* `torch.cuda.nccl.version()` in `preflight/preflight.sh` will return built in version, while searching for `NCCL version` if `NCCL_DEBUG=info` is exported will get preloaded version. +Here is an example to build a AMI for training or inference on GPU with AWS ParallelCluster: + +```bash +make ami_pcluster_gpu +``` + +The list of arguments you can use is shown in the table below with the AMI origin (what are we starting our custom AMI from) and notes regarding their content. + +| Argument | Source AMI | Notes | +|--------------------|------------|------------------------------------------------------------------------------------| +| `ami_pcluster_cpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for CPU based workloads | +| `ami_pcluster_gpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for GPU based workloads, training or inference | +| `ami_base` | [EC2 AL2 AMI](https://aws.amazon.com/amazon-linux-2/) | EC2 AMI with updates, Docker, Lustre, EFA, Pyxis and Enroot (everything) | +| `ami_dlami_gpu` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI with updated drivers, Pyxis, enroot, Lustre module client and Docker. | +| `ami_dlami_neuron` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI for Neuron, same as above without the Nvidia stack | +| `ami_eks_gpu` | [EKS AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) | EKS GPU AMI with Lustre, EFA | +| `ami` |AMI dependent| Build all the images | + + +Once a build is launched, Packer will create an instance and install packages for a period of 10-25 minutes depending on how much software is installed. + +### Software stack: Ansible roles + +Each image is build using a base image and different Ansible roles used to install and configure the software stack installed on the AMI. The stack for each AMI is defined into *playbooks* files containing each a list of packages. + +You will find below the list of images you can build and which roles are deployed in these. The `ami` argument will build all of these images. + +| Ansible Roles | `ami_pcluster_cpu` | `ami_pcluster_gpu`| `ami_base` | `ami_dlami_gpu` | `ami_dlami_neuron` | `ami_eks_gpu` | +|-----------------------|--------------------|-------------------|------------|-----------------|--------------------|---------------| +| `base` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `packages` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `aws_cliv2` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `aws_lustre` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `nvidia_enroot_pyxis` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `docker` | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | +| `nvidia_docker` | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `nvidia_driver` | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ | +| `nvidia_cuda` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `nvidia_gdrcopy` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `nvidia_nccl` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `aws_efa` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `aws_efa_ofi` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | + + +## Customizing your AMIs + +You can customize your AMIs by: +- Modifying existing roles to install specific software versions: for example a specific version of the EFA driver, Nvidia CUDA or Nvidia GPU driver. +- Add new roles to install or configure new software or libraries. + +Before going further, let's see how we defined our Ansible Roles. + +#### More on roles + +Our Ansible roles consist of 3 components: `defaults`, `files` and `tasks`. + +- `defaults`: contain default values for conditionals and versions of software being installed. +- `files`: hold files that will be copied to the custom AMI such as config files. +- `tasks`: is the list of tasks executed by Ansible to install and configure software. -# Using the Deep Learning AMI -[DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/what-is-dlami.html) contains common DL dependencies, it can be used with parallel cluster. -We can use following configuration: +#### Example + +To illustrate that, here's an example using the Nvidia Driver. By looking at the structure below you will see the 3 role components: `defaults`, `files` and `tasks` + +``` +├── nvidia_driver +│ ├── defaults +│ │ └── main.yml +│ ├── files +│ │ └── nvidia-persistenced-override.service +│ └── tasks +│ └── main.yml ``` -Build: - InstanceType: p2.xlarge - ParentImage: ami-123 + +##### `defaults` + +The defaults contain variables for the role and default values. In the case of the Nvidia driver we set the version to a default with `nvidia_driver_version` and if needed we can change it to a newer or older version. Then you will find two booleans that'll be used in the tasks as conditionals on whether to install the Nvidia Fabric Manager (required A100,H100) via `install_nvidia_fabric_manager` and allow a reboot after installing the driver using the variable `allow_reboot`. + +```yaml +nvidia_driver_version: "535.54.03" +install_nvidia_fabric_manager: true +allow_reboot: true ``` -where `ami-123` is ID of DLAMI of your choice. Run [pcluster build-image](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster-v3.html) to add all pcluster dependencies. + +##### `files` + +In the case of the Nvidia driver we have 1 file in `files` named `nvidia-persistenced-override.service`. It is an SystemD service module that we use to force driver persistence. This file is copied to the custom AMI through one of the `tasks`. + +##### `tasks` + +The tasks are a list of instructions that Ansible will run through to deploy the role and will be based of Ansible default modules. Here's an excerpt of task below, feel free to open the original file to see the full list of tasks. + +```yaml +- name: "Install additional dependencies" + ansible.builtin.yum: + name: + - gcc10 + - kernel-devel + - kernel-headers + - dkms + state: present + +- name: "Blacklist nouveau" + community.general.kernel_blacklist: + name: nouveau + state: present +``` + +### Modify the roles + +As shared earlier, you can modify the roles and add new ones. Most users would modify the roles defaults to change the default versions of software being installed. If you need to modify the installation or configuration process you may want to modify the `tasks` file. + +Alternatively, you can add a new role to install a new software component, ensure that you respect the structure used by other roles. Don't forget to list your role in the playbook you want to use, for example `playbook-eks-gpu.yaml`, to add the role as part of your custom AMI deployment. + +## Notes +* Review `packer-ami.pkr.hcl` for all available variables. +* For Enroot, we are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. +* Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). +* These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. diff --git a/2.amazon_machine_images/playbook-dlami-neuron.yml b/2.amazon_machine_images/playbook-dlami-neuron.yml index 5b0a3e71..b9748a58 100644 --- a/2.amazon_machine_images/playbook-dlami-neuron.yml +++ b/2.amazon_machine_images/playbook-dlami-neuron.yml @@ -13,4 +13,3 @@ - docker - nvidia_enroot_pyxis - aws_lustre - # - observability diff --git a/2.amazon_machine_images/playbook-eks-gpu.yml b/2.amazon_machine_images/playbook-eks-gpu.yml index 7040ff1f..b9141651 100644 --- a/2.amazon_machine_images/playbook-eks-gpu.yml +++ b/2.amazon_machine_images/playbook-eks-gpu.yml @@ -11,5 +11,3 @@ - packages - aws_efa - nvidia_driver - - # - observability diff --git a/2.amazon_machine_images/playbook-pcluster-gpu.yml b/2.amazon_machine_images/playbook-pcluster-gpu.yml index 4f21d219..72de65be 100644 --- a/2.amazon_machine_images/playbook-pcluster-gpu.yml +++ b/2.amazon_machine_images/playbook-pcluster-gpu.yml @@ -20,4 +20,3 @@ - nvidia_enroot_pyxis - aws_efa_ofi - aws_lustre - # - observability diff --git a/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml index 625e601e..88b488e1 100644 --- a/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml @@ -33,7 +33,7 @@ name: nouveau state: present -- name: "Download the NVidia Installer" +- name: "Download the Nvidia Installer" ansible.builtin.get_url: url: "https://us.download.nvidia.com/tesla/{{ nvidia_driver_version }}/NVIDIA-Linux-x86_64-{{ nvidia_driver_version }}.run" dest: "/tmp/nvidia-installer.run" From b705d6c2dfb4e781ca8b471e11d941cb77185d8e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 18:34:14 -0500 Subject: [PATCH 021/648] Fix wording in Batch template --- 1.architectures/3.aws-batch/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1.architectures/3.aws-batch/README.md b/1.architectures/3.aws-batch/README.md index f0f656bf..20578a7c 100644 --- a/1.architectures/3.aws-batch/README.md +++ b/1.architectures/3.aws-batch/README.md @@ -34,7 +34,7 @@ The templates takes parameters that are mandatory and optional, see below for mo ## Deploy with the AWS CLI -The command to deploy the template through the CLI is shown below. Feel free to edit for your own configuration and parameters. +The command to deploy the template through the CLI is shown below. Please edit the parameters values with your own configuration. ```bash From 9929a46668e96c2054ab629e47aa82ddcaa8b12c Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 15 Aug 2023 18:34:14 -0500 Subject: [PATCH 022/648] Fix wording in Batch template --- 1.architectures/3.aws-batch/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1.architectures/3.aws-batch/README.md b/1.architectures/3.aws-batch/README.md index f0f656bf..20578a7c 100644 --- a/1.architectures/3.aws-batch/README.md +++ b/1.architectures/3.aws-batch/README.md @@ -34,7 +34,7 @@ The templates takes parameters that are mandatory and optional, see below for mo ## Deploy with the AWS CLI -The command to deploy the template through the CLI is shown below. Feel free to edit for your own configuration and parameters. +The command to deploy the template through the CLI is shown below. Please edit the parameters values with your own configuration. ```bash From da9273302c9c3fd5318ac9471e77c1ba992d571a Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 16 Aug 2023 11:38:22 -0500 Subject: [PATCH 023/648] Change main readme file --- README.md | 87 +++++++++---------------------------------------------- 1 file changed, 14 insertions(+), 73 deletions(-) diff --git a/README.md b/README.md index 86abda22..7dc1d22a 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,7 @@ The major components of this directory are: ```bash reference-architectures/ -|-- 1.architectures # CloudFormation templates for various ref arch -| |-- 0.s3 # Create S3 bucket -| |-- 1.vpc_network # Create VPC -| |-- 2.aws-parallelcluster # Parallel Cluster -| `-- 3.aws-batch # AWS Batch +|-- 1.architectures # CloudFormation templates for reference arch |-- 2.amazon_machine_images/ # Scripts to create AMIs |-- 3.test_cases/ # Reference test cases and/or benchmark scripts `-- ... @@ -18,82 +14,27 @@ reference-architectures/ **NOTE**: the architectures are designed to work with the S3 bucket and VPC created using reference templates `1.architectures/0.s3/` and `1.architectures/1.vpc_network/`. _You're strongly recommended to deploy these two templates **before** deploying any of the reference architectures._ -## 1. AWS ParallelCluster -![AWS ParallelCluster diagram](0.docs/parallelcluster-arch-diagram.png) +## 1. Architectures -This reference architecture consists of the following components: +Architectures are located in `1.architectures` and consists of utilities and service related architectures -1. **Compute** is represented through the following: - - **head-node**: login and controller node that users will use to submit jobs. It is set to an [m5.8xlarge](https://aws.amazon.com/ec2/instance-types/m5/). - - **compute-gpu**: is the queue (or partition) to run your ML training jobs. The instances are either [p4de.24xlarge](https://aws.amazon.com/ec2/instance-types/p4/) or [trn1.32xlarge](https://aws.amazon.com/ec2/instance-types/trn1/) which are recommended for training, especially for LLMs or large models. The default number of instances in the queue has been set to _4_ and can be changed as necessary. - - **inference-gpu**: is an optional queue that can be used to run inference workloads and uses [g5.12xlarge](https://aws.amazon.com/ec2/instance-types/m5/). +| Name | Category | Usage +|-------------------------|----------|-----------------------------------------------------| +| `0.s3` | Storage | Create an S3 bucket | +| `1.vpc_network` | Network | Create a VPC with subnets required resources | +| `2.aws-parallelcluster` | Compute | Cluster templates for GPU & custom silicon training | +| `3.aws-batch` | Compute | AWS Batch template for distributed training | -2. **Storage** comes in 3 flavors: - - **Local**: head and compute nodes have 200GiB of EBS volume mounted on `/`. In addition, the headnode has an EBS volume of `200GiB` mounted on `/apps` The compute nodes have NVMe drives striped in RAID0 and mounted as `/local_scratch`. - - **File network storage**: The head-node shares `/home` and `/apps` to the whole cluster through NFS. These directories are automatically mounted on every instance in the cluster and accessible through the same path. `/home` is a regular home directory, `/apps` is a shared directory where applications or shared files can be stored. Please note that none should be used for data intensive tasks. - - **High performance filesystem**: An [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html) filesystem can be access from every cluster node on `/fsx`. This is where users would store their datasets. This file system has been sized to 4.8TiB and provides 1.2GB/s of aggregated throughput. You can modify its size and the throughput per TB provisioned in the config file following the service [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/performance.html). +More will come, feel free to add new ones (EKS, Ray?) -3. **Network**: Applications will make use of [Elastic Fabric Adapter (EFA)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) for distributed training. In addition, instances will be placed to one another through the use of placement groups or assistance from AWS. +## 2. Custom Amazon Machine Images - Placement groups are only relevant for distributed training, not inference. You may remove the placement groups declaration in the config file if requested. In which case you will need to delete these lines +Custom machine images can be built using [Packer](www.packer.io) for AWS ParallelCluster, Amazon EKS and plain EC2. These images are based are on Ansible roles and playbooks. - ```yaml - PlacementGroup: - Enabled: true - ``` +## 3. Test Cases: Support Matrix -### 1.1. Pre-Deployment decision: AMI vs post-install scripts - -Parallel Cluster offers you two options to install applications & libraries into the nodes via _custom AMI_ or _post-install scripts_. - -- **Custom AMI**: the image needs to be pre-built **before** creating a cluster. They are preferred for drivers, kernel modules or libraries regularly used and seeing little to no updates. This option is preferred to ensure repeatability. To create an AMI, refer to `2.amazon_machine_images/`. Once AMI is created, you can tell Parallel Cluster to use custom images as follows: - - ```yaml - Image: - Os: alinux2 #system type - CustomAmi: PLACEHOLDER_CUSTOM_AMI_ID #replace by custom imageAMI ID - ``` - - If not using a custom image, remove the `CustomAmi` field. -- **Post-install scripts**: these scripts will be executed **during** deployment which is at instance boot (head+compute). This option is recommended for quick testing and will increase instance boot time. You can run post-install scripts through `CustomActions` for the head node and the compute nodes. - -### 1.2. Select cluster template - -Choose one of the available Parallel Cluster templates. Each template provides an example of cluster for different use cases. The architectures most commonly used are: - -- `distributed-training-gpu`: base template, uses the default AMI with no software installed. -- `distributed-training-p4de_custom_ami`: base cluster with a custom AMI to install custom software. -- `distributed-training-p4de_postinstall_scripts`: same as above but uses post-install scripts to install Docker, Pyxis and Enroot. - -Alternatively you can refer to these architectures for more specific use cases: - -- `distributed-training-p4de_batch-inference-g5_custom_ami`: multi-queue template with p4de for training and g5 for inference. It assumes a custom AMI. -- `distributed-training-trn1_custom_ami`: uses Trainium instances for distributed training. Assumes a custom AMI. - -### 1.3. Deploy a cluster - -The templates contain placeholder variables that you **must** to replace before use. - -- `PLACEHOLDER_CUSTOM_AMI_ID`: if using a custom AMI then replace with the custom AMI ID (`ami-12356790abcd`). -- `PLACEHOLDER_PUBLIC_SUBNET`: change to the id of a public subnet to host the head-node (`subnet-12356790abcd`). -- `PLACEHOLDER_PRIVATE_SUBNET`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). -- `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node. You can also use AWS Systems Manager Session Manager (SSM). -- `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). - -By this point, if you haven't done so, please install Parallel Cluster CLI on your workstation by following this [guide](https://docs.aws.amazon.com/parallelcluster/latest/ug/what-is-aws-parallelcluster.html). - -To create the cluster, use the command below: - -```bash -pcluster create-cluster --cluster-configuration cluster.yaml --cluster-name cluster-g4v7 --region us-east-1 -``` - -You can follow the [documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/commands-v3.html) to review the list of all AWS ParallelCluster commands. - -## 2. Test Cases: Support Matrix - -All test cases are under `3.test_cases/`. +All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. | Test case | PC | EKS | AWS Batch | | ----------------------- | --- | --- | --------- | From 799912ebf267fecd90e7b18bbb357b56f0e3e9a5 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 16 Aug 2023 11:38:22 -0500 Subject: [PATCH 024/648] Change main readme file --- README.md | 87 +++++++++---------------------------------------------- 1 file changed, 14 insertions(+), 73 deletions(-) diff --git a/README.md b/README.md index 86abda22..7dc1d22a 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,7 @@ The major components of this directory are: ```bash reference-architectures/ -|-- 1.architectures # CloudFormation templates for various ref arch -| |-- 0.s3 # Create S3 bucket -| |-- 1.vpc_network # Create VPC -| |-- 2.aws-parallelcluster # Parallel Cluster -| `-- 3.aws-batch # AWS Batch +|-- 1.architectures # CloudFormation templates for reference arch |-- 2.amazon_machine_images/ # Scripts to create AMIs |-- 3.test_cases/ # Reference test cases and/or benchmark scripts `-- ... @@ -18,82 +14,27 @@ reference-architectures/ **NOTE**: the architectures are designed to work with the S3 bucket and VPC created using reference templates `1.architectures/0.s3/` and `1.architectures/1.vpc_network/`. _You're strongly recommended to deploy these two templates **before** deploying any of the reference architectures._ -## 1. AWS ParallelCluster -![AWS ParallelCluster diagram](0.docs/parallelcluster-arch-diagram.png) +## 1. Architectures -This reference architecture consists of the following components: +Architectures are located in `1.architectures` and consists of utilities and service related architectures -1. **Compute** is represented through the following: - - **head-node**: login and controller node that users will use to submit jobs. It is set to an [m5.8xlarge](https://aws.amazon.com/ec2/instance-types/m5/). - - **compute-gpu**: is the queue (or partition) to run your ML training jobs. The instances are either [p4de.24xlarge](https://aws.amazon.com/ec2/instance-types/p4/) or [trn1.32xlarge](https://aws.amazon.com/ec2/instance-types/trn1/) which are recommended for training, especially for LLMs or large models. The default number of instances in the queue has been set to _4_ and can be changed as necessary. - - **inference-gpu**: is an optional queue that can be used to run inference workloads and uses [g5.12xlarge](https://aws.amazon.com/ec2/instance-types/m5/). +| Name | Category | Usage +|-------------------------|----------|-----------------------------------------------------| +| `0.s3` | Storage | Create an S3 bucket | +| `1.vpc_network` | Network | Create a VPC with subnets required resources | +| `2.aws-parallelcluster` | Compute | Cluster templates for GPU & custom silicon training | +| `3.aws-batch` | Compute | AWS Batch template for distributed training | -2. **Storage** comes in 3 flavors: - - **Local**: head and compute nodes have 200GiB of EBS volume mounted on `/`. In addition, the headnode has an EBS volume of `200GiB` mounted on `/apps` The compute nodes have NVMe drives striped in RAID0 and mounted as `/local_scratch`. - - **File network storage**: The head-node shares `/home` and `/apps` to the whole cluster through NFS. These directories are automatically mounted on every instance in the cluster and accessible through the same path. `/home` is a regular home directory, `/apps` is a shared directory where applications or shared files can be stored. Please note that none should be used for data intensive tasks. - - **High performance filesystem**: An [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html) filesystem can be access from every cluster node on `/fsx`. This is where users would store their datasets. This file system has been sized to 4.8TiB and provides 1.2GB/s of aggregated throughput. You can modify its size and the throughput per TB provisioned in the config file following the service [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/performance.html). +More will come, feel free to add new ones (EKS, Ray?) -3. **Network**: Applications will make use of [Elastic Fabric Adapter (EFA)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) for distributed training. In addition, instances will be placed to one another through the use of placement groups or assistance from AWS. +## 2. Custom Amazon Machine Images - Placement groups are only relevant for distributed training, not inference. You may remove the placement groups declaration in the config file if requested. In which case you will need to delete these lines +Custom machine images can be built using [Packer](www.packer.io) for AWS ParallelCluster, Amazon EKS and plain EC2. These images are based are on Ansible roles and playbooks. - ```yaml - PlacementGroup: - Enabled: true - ``` +## 3. Test Cases: Support Matrix -### 1.1. Pre-Deployment decision: AMI vs post-install scripts - -Parallel Cluster offers you two options to install applications & libraries into the nodes via _custom AMI_ or _post-install scripts_. - -- **Custom AMI**: the image needs to be pre-built **before** creating a cluster. They are preferred for drivers, kernel modules or libraries regularly used and seeing little to no updates. This option is preferred to ensure repeatability. To create an AMI, refer to `2.amazon_machine_images/`. Once AMI is created, you can tell Parallel Cluster to use custom images as follows: - - ```yaml - Image: - Os: alinux2 #system type - CustomAmi: PLACEHOLDER_CUSTOM_AMI_ID #replace by custom imageAMI ID - ``` - - If not using a custom image, remove the `CustomAmi` field. -- **Post-install scripts**: these scripts will be executed **during** deployment which is at instance boot (head+compute). This option is recommended for quick testing and will increase instance boot time. You can run post-install scripts through `CustomActions` for the head node and the compute nodes. - -### 1.2. Select cluster template - -Choose one of the available Parallel Cluster templates. Each template provides an example of cluster for different use cases. The architectures most commonly used are: - -- `distributed-training-gpu`: base template, uses the default AMI with no software installed. -- `distributed-training-p4de_custom_ami`: base cluster with a custom AMI to install custom software. -- `distributed-training-p4de_postinstall_scripts`: same as above but uses post-install scripts to install Docker, Pyxis and Enroot. - -Alternatively you can refer to these architectures for more specific use cases: - -- `distributed-training-p4de_batch-inference-g5_custom_ami`: multi-queue template with p4de for training and g5 for inference. It assumes a custom AMI. -- `distributed-training-trn1_custom_ami`: uses Trainium instances for distributed training. Assumes a custom AMI. - -### 1.3. Deploy a cluster - -The templates contain placeholder variables that you **must** to replace before use. - -- `PLACEHOLDER_CUSTOM_AMI_ID`: if using a custom AMI then replace with the custom AMI ID (`ami-12356790abcd`). -- `PLACEHOLDER_PUBLIC_SUBNET`: change to the id of a public subnet to host the head-node (`subnet-12356790abcd`). -- `PLACEHOLDER_PRIVATE_SUBNET`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). -- `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node. You can also use AWS Systems Manager Session Manager (SSM). -- `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). - -By this point, if you haven't done so, please install Parallel Cluster CLI on your workstation by following this [guide](https://docs.aws.amazon.com/parallelcluster/latest/ug/what-is-aws-parallelcluster.html). - -To create the cluster, use the command below: - -```bash -pcluster create-cluster --cluster-configuration cluster.yaml --cluster-name cluster-g4v7 --region us-east-1 -``` - -You can follow the [documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/commands-v3.html) to review the list of all AWS ParallelCluster commands. - -## 2. Test Cases: Support Matrix - -All test cases are under `3.test_cases/`. +All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. | Test case | PC | EKS | AWS Batch | | ----------------------- | --- | --- | --------- | From 1a6659e60a1257c313e87e89064f96acb4927ab6 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 21 Aug 2023 20:31:50 -0500 Subject: [PATCH 025/648] Update on Nemo documentation --- .../1.vpc_network/1.vpc-all-az.yaml | 6 +- 2.amazon_machine_images/packer-ami.pkr.hcl | 4 +- ...> 0.NemoMegatron-aws-optimized.Dockerfile} | 0 3.test_cases/2.nemo-launcher-23.03/README.md | 101 +++++++++++------- .../build-enroot-image.sh | 2 +- README.md | 14 +-- 6 files changed, 73 insertions(+), 54 deletions(-) rename 3.test_cases/2.nemo-launcher-23.03/{Dockerfile => 0.NemoMegatron-aws-optimized.Dockerfile} (100%) diff --git a/1.architectures/1.vpc_network/1.vpc-all-az.yaml b/1.architectures/1.vpc_network/1.vpc-all-az.yaml index e182fade..8a992be7 100644 --- a/1.architectures/1.vpc_network/1.vpc-all-az.yaml +++ b/1.architectures/1.vpc_network/1.vpc-all-az.yaml @@ -40,11 +40,11 @@ Metadata: NumberOfAZs: default: Number of Availability Zones CreatePublicSubnets: - default: Create a Public Subnet + default: Create public subnets CreateS3Endpoint: - default: Create an S3 Endpoint + default: Create an S3 endpoint CreateDynamoDBEndpoint: - default: Create a DynamoDB Endpoint + default: Create a DynamoDB endpoint ###################### ## Stack Parameters ## diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index 19fba55a..986482fe 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -23,7 +23,7 @@ variable "ami_version" { variable "parallel_cluster_version" { type = string - default = "3.6.0" + default = "3.6.1" } variable "eks_version" { @@ -33,7 +33,7 @@ variable "eks_version" { variable "aws_region" { type = string - default = "us-west-2" + default = "us-east-1" } variable "instance_type" { diff --git a/3.test_cases/2.nemo-launcher-23.03/Dockerfile b/3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile similarity index 100% rename from 3.test_cases/2.nemo-launcher-23.03/Dockerfile rename to 3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index 5676c6b8..cd740133 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -11,10 +11,20 @@ Table of contents: ## 1. Pre-requisites -1. As of this writing, the base image - [bignlp-training](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is - still under NVIDIA's open-beta, and you need to register - [here](https://developer.nvidia.com/nemo-framework-open-beta). +The following pre-requisites are needed to run this example: + +- You have access to the base image [`bignlp-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). +- Docker, Enroot and Pixys installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) + + +You will need to setup the following environment variables before running the scripts: + +```bash +export VERSION=23.03 +export REPO=aws-nemo-megatron +export TAG=$VERSION-py3 +export TARGET_PATH=/fsx/nemo-launcher-$VERSION +``` 2. This directory is already located on the FSx Lustre filesystem. For simplicity, assume the path is `/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/`. @@ -30,59 +40,68 @@ Table of contents: ## 2. Build AWS-optimized Nemo-Launcher image -All the way to the enroot format. +You will retrieve the container image from Nvidia, build an optimized container for EFA and, convert it into an Enroot file so we can run it on our cluster. +1. You have a registered account with Nvidia and can access NGC. Retrieve the NGC API key following [instructions from Nvidia](https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key). +2. Configure NGC as shown below using the command below, when requested use `$oauthtoken` for the login and the API key from NGC fro the password. ```bash docker login nvcr.io -# Username: $oauthtoken -# Password: - -/usr/bin/time bash ./build-enroot-image.sh -# EC2: us-west-2 / m5.4xlarge / EBS gp3 3k IOPS, 350 MB/s throughput -# -# docker pull (cold): 3:34.85elapsed -# docker build: ~6min -# Enroot import: 7:29.12elapsed -# Total (as reported): 17:11.90elapsed ``` - -
-[OPTIONAL] Try enroot image by starting a container out of it. - +3. Copy the file `0.NemoMegatron-aws-optimized.Dockerfile` to the local directory and run the command below. Docker will retrieve the NemoMegatron container image from NGC then build an optimized container for AWS. This stage takes a few minutes and you can follow progress ```bash - /usr/bin/time enroot create --name test-nemo /fsx/ubuntu/aws-nemo-megatron_23.03-py3.sqsh - # 3.21user 29.64system 0:27.88elapsed 117%CPU (0avgtext+0avgdata 581864maxresident)k - # Will create /tmp/enroot/data/user-1000/test-nemo/ taking up the same size of sqsh file - - # Show containers - enroot list +docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized.Dockerfile . +``` +4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. +```bash +IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE ; /usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} +``` - declare -a ENROOT_START_ARGS=( - # Needed when starting on CPU-only instances (e.g., on head node). - -e NVIDIA_VISIBLE_DEVICES=void - ) - enroot start "${ENROOT_START_ARGS[@]}" test-nemo +The Enroot squash file will be placed into the `/apps` directory. - # After exiting the enroot container, remove it and list to make sure it's gone. - # This command will remove /tmp/enroot/data/user-1000/test-nemo/ - enroot remove -f test-nemo - enroot list - ``` -
+## 3. Set-up NemoMegatron -## 3. Seed Nemo-Launcher on head node +You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm) -Run this helper script, which faithfully implements the [official Nemo-Launcher -documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm): +1. Create the target directory with the command below: +```bash +mkdir -p $TARGET_PATH +``` +2. Retrieve files from the container and place them in the target directory. You execute the container on your head-node for this task using Enroot [start](https://github.com/NVIDIA/enroot/blob/master/doc/cmd/start.md) command. ```bash -./step-00-bootstrap-launcher.sh +enroot start --mount $TARGET_PATH:/workspace/mount_dir \ + --env NVIDIA_VISIBLE_DEVICES=void \ + /apps/aws-nemo-megatron_23.03-py3.sqsh \ + cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ +``` +The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). +3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. +```bash +cd $TARGET_PATH +/usr/bin/python3 -m venv .venv +source .venv +pip install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.03/requirements.txt) ``` Next, you need to prepare the configuration files as follow: -1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. +1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Specifically these values. + +| Value | Default | Definition | +| ---------------- | ---------- | ------------------------------------ | +| partition | `null` | Slurm partition, same as a job queue | +| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | +| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | +| gpus_per_task | `null` | Number of instances of GPUs per job | +| gpus_per_node | `8` | Number of GPUs to use per node | +| mem | `0` | Requested memory (all) | +| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | +| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | +| srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | +| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | + + 2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: diff --git a/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh b/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh index 5db24fc9..bf48a4c8 100755 --- a/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh +++ b/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh @@ -15,6 +15,6 @@ TAG=23.03-py3 docker build --progress plain -t ${REPO}:${TAG} . # On m5.8xlarge (32 vcpu). /fsx is FSxL 1.2TB configured with 500 MB/s/TB throughput. -IMAGE=/fsx/ubuntu/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE +IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE /usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} # 25.09user 102.21system 2:17.85elapsed 92%CPU (0avgtext+0avgdata 17450056maxresident)k diff --git a/README.md b/README.md index 7dc1d22a..5a707491 100644 --- a/README.md +++ b/README.md @@ -36,10 +36,10 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test case | PC | EKS | AWS Batch | -| ----------------------- | --- | --- | --------- | -| `0.nccl-tests` | ✅ | ❓ | ❓ | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | -| `3.MPT` | ❓ | ❓ | ❓ | -| `4.DDP` | ❓ | ❓ | ❓ | +| Test case | PC | EKS | AWS Batch | +| ----------------------- | ---- | ----- | --------- | +| `0.nccl-tests` | ✅ | ❓ | ❓ | +| `1.megatron-lm` | ✅ | ❓ | ❓ | +| `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | +| `3.MPT` | ❓ | ❓ | ❓ | +| `4.DDP` | ❓ | ❓ | ❓ | From fc7b052a86e3571cb6f95f9fc0b794445abc3f27 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 21 Aug 2023 20:31:50 -0500 Subject: [PATCH 026/648] Update on Nemo documentation --- .../1.vpc_network/1.vpc-all-az.yaml | 6 +- 2.amazon_machine_images/packer-ami.pkr.hcl | 4 +- ...> 0.NemoMegatron-aws-optimized.Dockerfile} | 0 3.test_cases/2.nemo-launcher-23.03/README.md | 101 +++++++++++------- .../build-enroot-image.sh | 2 +- README.md | 14 +-- 6 files changed, 73 insertions(+), 54 deletions(-) rename 3.test_cases/2.nemo-launcher-23.03/{Dockerfile => 0.NemoMegatron-aws-optimized.Dockerfile} (100%) diff --git a/1.architectures/1.vpc_network/1.vpc-all-az.yaml b/1.architectures/1.vpc_network/1.vpc-all-az.yaml index e182fade..8a992be7 100644 --- a/1.architectures/1.vpc_network/1.vpc-all-az.yaml +++ b/1.architectures/1.vpc_network/1.vpc-all-az.yaml @@ -40,11 +40,11 @@ Metadata: NumberOfAZs: default: Number of Availability Zones CreatePublicSubnets: - default: Create a Public Subnet + default: Create public subnets CreateS3Endpoint: - default: Create an S3 Endpoint + default: Create an S3 endpoint CreateDynamoDBEndpoint: - default: Create a DynamoDB Endpoint + default: Create a DynamoDB endpoint ###################### ## Stack Parameters ## diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index 19fba55a..986482fe 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -23,7 +23,7 @@ variable "ami_version" { variable "parallel_cluster_version" { type = string - default = "3.6.0" + default = "3.6.1" } variable "eks_version" { @@ -33,7 +33,7 @@ variable "eks_version" { variable "aws_region" { type = string - default = "us-west-2" + default = "us-east-1" } variable "instance_type" { diff --git a/3.test_cases/2.nemo-launcher-23.03/Dockerfile b/3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile similarity index 100% rename from 3.test_cases/2.nemo-launcher-23.03/Dockerfile rename to 3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index 5676c6b8..cd740133 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -11,10 +11,20 @@ Table of contents: ## 1. Pre-requisites -1. As of this writing, the base image - [bignlp-training](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is - still under NVIDIA's open-beta, and you need to register - [here](https://developer.nvidia.com/nemo-framework-open-beta). +The following pre-requisites are needed to run this example: + +- You have access to the base image [`bignlp-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). +- Docker, Enroot and Pixys installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) + + +You will need to setup the following environment variables before running the scripts: + +```bash +export VERSION=23.03 +export REPO=aws-nemo-megatron +export TAG=$VERSION-py3 +export TARGET_PATH=/fsx/nemo-launcher-$VERSION +``` 2. This directory is already located on the FSx Lustre filesystem. For simplicity, assume the path is `/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/`. @@ -30,59 +40,68 @@ Table of contents: ## 2. Build AWS-optimized Nemo-Launcher image -All the way to the enroot format. +You will retrieve the container image from Nvidia, build an optimized container for EFA and, convert it into an Enroot file so we can run it on our cluster. +1. You have a registered account with Nvidia and can access NGC. Retrieve the NGC API key following [instructions from Nvidia](https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key). +2. Configure NGC as shown below using the command below, when requested use `$oauthtoken` for the login and the API key from NGC fro the password. ```bash docker login nvcr.io -# Username: $oauthtoken -# Password: - -/usr/bin/time bash ./build-enroot-image.sh -# EC2: us-west-2 / m5.4xlarge / EBS gp3 3k IOPS, 350 MB/s throughput -# -# docker pull (cold): 3:34.85elapsed -# docker build: ~6min -# Enroot import: 7:29.12elapsed -# Total (as reported): 17:11.90elapsed ``` - -
-[OPTIONAL] Try enroot image by starting a container out of it. - +3. Copy the file `0.NemoMegatron-aws-optimized.Dockerfile` to the local directory and run the command below. Docker will retrieve the NemoMegatron container image from NGC then build an optimized container for AWS. This stage takes a few minutes and you can follow progress ```bash - /usr/bin/time enroot create --name test-nemo /fsx/ubuntu/aws-nemo-megatron_23.03-py3.sqsh - # 3.21user 29.64system 0:27.88elapsed 117%CPU (0avgtext+0avgdata 581864maxresident)k - # Will create /tmp/enroot/data/user-1000/test-nemo/ taking up the same size of sqsh file - - # Show containers - enroot list +docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized.Dockerfile . +``` +4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. +```bash +IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE ; /usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} +``` - declare -a ENROOT_START_ARGS=( - # Needed when starting on CPU-only instances (e.g., on head node). - -e NVIDIA_VISIBLE_DEVICES=void - ) - enroot start "${ENROOT_START_ARGS[@]}" test-nemo +The Enroot squash file will be placed into the `/apps` directory. - # After exiting the enroot container, remove it and list to make sure it's gone. - # This command will remove /tmp/enroot/data/user-1000/test-nemo/ - enroot remove -f test-nemo - enroot list - ``` -
+## 3. Set-up NemoMegatron -## 3. Seed Nemo-Launcher on head node +You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm) -Run this helper script, which faithfully implements the [official Nemo-Launcher -documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm): +1. Create the target directory with the command below: +```bash +mkdir -p $TARGET_PATH +``` +2. Retrieve files from the container and place them in the target directory. You execute the container on your head-node for this task using Enroot [start](https://github.com/NVIDIA/enroot/blob/master/doc/cmd/start.md) command. ```bash -./step-00-bootstrap-launcher.sh +enroot start --mount $TARGET_PATH:/workspace/mount_dir \ + --env NVIDIA_VISIBLE_DEVICES=void \ + /apps/aws-nemo-megatron_23.03-py3.sqsh \ + cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ +``` +The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). +3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. +```bash +cd $TARGET_PATH +/usr/bin/python3 -m venv .venv +source .venv +pip install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.03/requirements.txt) ``` Next, you need to prepare the configuration files as follow: -1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. +1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Specifically these values. + +| Value | Default | Definition | +| ---------------- | ---------- | ------------------------------------ | +| partition | `null` | Slurm partition, same as a job queue | +| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | +| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | +| gpus_per_task | `null` | Number of instances of GPUs per job | +| gpus_per_node | `8` | Number of GPUs to use per node | +| mem | `0` | Requested memory (all) | +| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | +| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | +| srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | +| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | + + 2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: diff --git a/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh b/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh index 5db24fc9..bf48a4c8 100755 --- a/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh +++ b/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh @@ -15,6 +15,6 @@ TAG=23.03-py3 docker build --progress plain -t ${REPO}:${TAG} . # On m5.8xlarge (32 vcpu). /fsx is FSxL 1.2TB configured with 500 MB/s/TB throughput. -IMAGE=/fsx/ubuntu/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE +IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE /usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} # 25.09user 102.21system 2:17.85elapsed 92%CPU (0avgtext+0avgdata 17450056maxresident)k diff --git a/README.md b/README.md index 7dc1d22a..5a707491 100644 --- a/README.md +++ b/README.md @@ -36,10 +36,10 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test case | PC | EKS | AWS Batch | -| ----------------------- | --- | --- | --------- | -| `0.nccl-tests` | ✅ | ❓ | ❓ | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | -| `3.MPT` | ❓ | ❓ | ❓ | -| `4.DDP` | ❓ | ❓ | ❓ | +| Test case | PC | EKS | AWS Batch | +| ----------------------- | ---- | ----- | --------- | +| `0.nccl-tests` | ✅ | ❓ | ❓ | +| `1.megatron-lm` | ✅ | ❓ | ❓ | +| `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | +| `3.MPT` | ❓ | ❓ | ❓ | +| `4.DDP` | ❓ | ❓ | ❓ | From b5b81b1753068f023b5bc252d2b9f024cc58a808 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 11:17:07 -0500 Subject: [PATCH 027/648] Update readme and data prep for Nemo --- ...a_preparation.sh => 1.data-preparation.sh} | 7 +- 3.test_cases/2.nemo-launcher-23.03/README.md | 87 ++++++++----------- 2 files changed, 40 insertions(+), 54 deletions(-) rename 3.test_cases/2.nemo-launcher-23.03/{step-01-data_preparation.sh => 1.data-preparation.sh} (64%) diff --git a/3.test_cases/2.nemo-launcher-23.03/step-01-data_preparation.sh b/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh similarity index 64% rename from 3.test_cases/2.nemo-launcher-23.03/step-01-data_preparation.sh rename to 3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh index d86416c6..4626a638 100755 --- a/3.test_cases/2.nemo-launcher-23.03/step-01-data_preparation.sh +++ b/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh @@ -5,13 +5,12 @@ set -exuo pipefail -: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.03}" -CONT_DATA_DIR=${WORKSPACE_CONT}/data/the_pile_gpt3 -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe +CONT_DATA_DIR=${TARGET_PATH}/data/the_pile_gpt3 +CONT_TOKENIZER_DIR=${TARGET_PATH}/data/bpe # data_preparation.file_numbers='0-29' \ mkdir -p $CONT_DATA_DIR -HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/launcher_scripts/main.py \ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ stages=[data_preparation] \ data_dir=$CONT_DATA_DIR \ data_preparation.file_numbers='0-0' \ diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index cd740133..4c1ad314 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -14,29 +14,20 @@ Table of contents: The following pre-requisites are needed to run this example: - You have access to the base image [`bignlp-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). -- Docker, Enroot and Pixys installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) +- Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) -You will need to setup the following environment variables before running the scripts: +You will need to setup the following environment variables before running the scripts. : ```bash -export VERSION=23.03 +export NEMO_VERSION=23.03 export REPO=aws-nemo-megatron -export TAG=$VERSION-py3 -export TARGET_PATH=/fsx/nemo-launcher-$VERSION -``` - -2. This directory is already located on the FSx Lustre filesystem. For simplicity, assume the path - is `/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/`. - -3. You have set the executable bits of the shell scripts +export TAG=$NEMO_VERSION-py3 +export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # +export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.03 # where you copy the test case or set to your test case path - ```bash - find /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03 \ - -name '*.sh' ! -executable -exec chmod ugo+x {} \; - ``` - -4. Your current working directory is `/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/`. +cd $TEST_CASE_PATH +``` ## 2. Build AWS-optimized Nemo-Launcher image @@ -59,7 +50,7 @@ IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE ; /usr/bin/time e The Enroot squash file will be placed into the `/apps` directory. -## 3. Set-up NemoMegatron +## 3. Set-up the NemoMegatron environment You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm) @@ -70,61 +61,57 @@ mkdir -p $TARGET_PATH ``` 2. Retrieve files from the container and place them in the target directory. You execute the container on your head-node for this task using Enroot [start](https://github.com/NVIDIA/enroot/blob/master/doc/cmd/start.md) command. ```bash +cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ - /apps/aws-nemo-megatron_23.03-py3.sqsh \ + /apps/${REPO}_${TAG}.sqsh \ cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). 3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. ```bash cd $TARGET_PATH -/usr/bin/python3 -m venv .venv -source .venv -pip install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.03/requirements.txt) +sudo amazon-linux-extras install -y python3.8 # we need Python =>3.8 +/usr/bin/python3.8 -m venv .venv +source .venv/bin/activate +pip3.8 install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_VERSION/requirements.txt) ``` Next, you need to prepare the configuration files as follow: -1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Specifically these values. - -| Value | Default | Definition | -| ---------------- | ---------- | ------------------------------------ | -| partition | `null` | Slurm partition, same as a job queue | -| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | -| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | -| gpus_per_task | `null` | Number of instances of GPUs per job | -| gpus_per_node | `8` | Number of GPUs to use per node | -| mem | `0` | Requested memory (all) | -| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | -| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | +1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. +| Value | Default | Definition | +| ---------------- | ---------- | --------------------------------------------------------------------------------------------------------- | +| partition | `null` | Slurm partition, same as a job queue | +| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | +| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | +| gpus_per_task | `null` | Number of instances of GPUs per job | +| gpus_per_node | `8` | Number of GPUs to use per node. This is set to 8 GPUs as for th p4d.24xlarge | +| mem | `0` | Requested memory (all) | +| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | +| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | | srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | -| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | +| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | +2. OnCopy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: +```bash +cp -TRv ${TEST_CASE_PATH}/conf.template/ ${TARGET_PATH}/launcher_scripts/conf/ +``` -2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: - - ```console - $ cp -TRv conf.template/ launcher_scripts/conf/ - 'conf.template/cluster/bcm.yaml' -> 'launcher_scripts/conf/cluster/bcm.yaml' - 'conf.template/config.yaml' -> 'launcher_scripts/conf/config.yaml' - 'conf.template/data_preparation/gpt3/download_gpt3_pile.yaml' -> 'launcher_scripts/conf/data_preparation/gpt3/download_gpt3_pile.yaml' - ``` - ## 4. Launch Nemo pipeline -This section assumes the following has been done: - +This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment. To start, source the NemoMegatron environment: ```bash -source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/.venv/bin/activate +source ${TARGET_PATH}/.venv/bin/activate ``` -### 4.1. Prepare Sample Dataset +### 4.1. Prepare the sample dataset + +Run the data preparation stage by executing the following script. It will submit the data preparation jobs. You can check progress with `squeue` to get the jobs in your Slurm queues and their status. ```bash -# Edit launch-data_preparation.sh to override Hydra config. Or, modify the config directly. -./step-01-data_preparation.sh +bash 1.data-preparation.sh ``` Once completed, expect the training data (vocab and the pre-processed Pile dataset) as follows: From 1628e7edf39fc966377e64bcd183448d9a284f2c Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 11:17:07 -0500 Subject: [PATCH 028/648] Update readme and data prep for Nemo --- ...a_preparation.sh => 1.data-preparation.sh} | 7 +- 3.test_cases/2.nemo-launcher-23.03/README.md | 87 ++++++++----------- 2 files changed, 40 insertions(+), 54 deletions(-) rename 3.test_cases/2.nemo-launcher-23.03/{step-01-data_preparation.sh => 1.data-preparation.sh} (64%) diff --git a/3.test_cases/2.nemo-launcher-23.03/step-01-data_preparation.sh b/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh similarity index 64% rename from 3.test_cases/2.nemo-launcher-23.03/step-01-data_preparation.sh rename to 3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh index d86416c6..4626a638 100755 --- a/3.test_cases/2.nemo-launcher-23.03/step-01-data_preparation.sh +++ b/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh @@ -5,13 +5,12 @@ set -exuo pipefail -: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.03}" -CONT_DATA_DIR=${WORKSPACE_CONT}/data/the_pile_gpt3 -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe +CONT_DATA_DIR=${TARGET_PATH}/data/the_pile_gpt3 +CONT_TOKENIZER_DIR=${TARGET_PATH}/data/bpe # data_preparation.file_numbers='0-29' \ mkdir -p $CONT_DATA_DIR -HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/launcher_scripts/main.py \ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ stages=[data_preparation] \ data_dir=$CONT_DATA_DIR \ data_preparation.file_numbers='0-0' \ diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index cd740133..4c1ad314 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -14,29 +14,20 @@ Table of contents: The following pre-requisites are needed to run this example: - You have access to the base image [`bignlp-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). -- Docker, Enroot and Pixys installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) +- Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) -You will need to setup the following environment variables before running the scripts: +You will need to setup the following environment variables before running the scripts. : ```bash -export VERSION=23.03 +export NEMO_VERSION=23.03 export REPO=aws-nemo-megatron -export TAG=$VERSION-py3 -export TARGET_PATH=/fsx/nemo-launcher-$VERSION -``` - -2. This directory is already located on the FSx Lustre filesystem. For simplicity, assume the path - is `/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/`. - -3. You have set the executable bits of the shell scripts +export TAG=$NEMO_VERSION-py3 +export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # +export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.03 # where you copy the test case or set to your test case path - ```bash - find /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03 \ - -name '*.sh' ! -executable -exec chmod ugo+x {} \; - ``` - -4. Your current working directory is `/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/`. +cd $TEST_CASE_PATH +``` ## 2. Build AWS-optimized Nemo-Launcher image @@ -59,7 +50,7 @@ IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE ; /usr/bin/time e The Enroot squash file will be placed into the `/apps` directory. -## 3. Set-up NemoMegatron +## 3. Set-up the NemoMegatron environment You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm) @@ -70,61 +61,57 @@ mkdir -p $TARGET_PATH ``` 2. Retrieve files from the container and place them in the target directory. You execute the container on your head-node for this task using Enroot [start](https://github.com/NVIDIA/enroot/blob/master/doc/cmd/start.md) command. ```bash +cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ - /apps/aws-nemo-megatron_23.03-py3.sqsh \ + /apps/${REPO}_${TAG}.sqsh \ cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). 3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. ```bash cd $TARGET_PATH -/usr/bin/python3 -m venv .venv -source .venv -pip install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.03/requirements.txt) +sudo amazon-linux-extras install -y python3.8 # we need Python =>3.8 +/usr/bin/python3.8 -m venv .venv +source .venv/bin/activate +pip3.8 install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_VERSION/requirements.txt) ``` Next, you need to prepare the configuration files as follow: -1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Specifically these values. - -| Value | Default | Definition | -| ---------------- | ---------- | ------------------------------------ | -| partition | `null` | Slurm partition, same as a job queue | -| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | -| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | -| gpus_per_task | `null` | Number of instances of GPUs per job | -| gpus_per_node | `8` | Number of GPUs to use per node | -| mem | `0` | Requested memory (all) | -| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | -| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | +1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. +| Value | Default | Definition | +| ---------------- | ---------- | --------------------------------------------------------------------------------------------------------- | +| partition | `null` | Slurm partition, same as a job queue | +| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | +| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | +| gpus_per_task | `null` | Number of instances of GPUs per job | +| gpus_per_node | `8` | Number of GPUs to use per node. This is set to 8 GPUs as for th p4d.24xlarge | +| mem | `0` | Requested memory (all) | +| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | +| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | | srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | -| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | +| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | +2. OnCopy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: +```bash +cp -TRv ${TEST_CASE_PATH}/conf.template/ ${TARGET_PATH}/launcher_scripts/conf/ +``` -2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: - - ```console - $ cp -TRv conf.template/ launcher_scripts/conf/ - 'conf.template/cluster/bcm.yaml' -> 'launcher_scripts/conf/cluster/bcm.yaml' - 'conf.template/config.yaml' -> 'launcher_scripts/conf/config.yaml' - 'conf.template/data_preparation/gpt3/download_gpt3_pile.yaml' -> 'launcher_scripts/conf/data_preparation/gpt3/download_gpt3_pile.yaml' - ``` - ## 4. Launch Nemo pipeline -This section assumes the following has been done: - +This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment. To start, source the NemoMegatron environment: ```bash -source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/.venv/bin/activate +source ${TARGET_PATH}/.venv/bin/activate ``` -### 4.1. Prepare Sample Dataset +### 4.1. Prepare the sample dataset + +Run the data preparation stage by executing the following script. It will submit the data preparation jobs. You can check progress with `squeue` to get the jobs in your Slurm queues and their status. ```bash -# Edit launch-data_preparation.sh to override Hydra config. Or, modify the config directly. -./step-01-data_preparation.sh +bash 1.data-preparation.sh ``` Once completed, expect the training data (vocab and the pre-processed Pile dataset) as follows: From 77cd614d0dafa974b4eac8a2025f71d981a75f55 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 16:50:43 -0500 Subject: [PATCH 029/648] Fixed configuration with new path --- 3.test_cases/2.nemo-launcher-23.03/README.md | 13 ++++++++----- .../2.nemo-launcher-23.03/conf.template/config.yaml | 10 +++++----- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index 4c1ad314..b3cf4d1b 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -25,6 +25,7 @@ export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION-py3 export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.03 # where you copy the test case or set to your test case path +export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH ``` @@ -44,7 +45,7 @@ docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized. ``` 4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. ```bash -IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE ; /usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} +[[ -e $ENROOT_IMAGE ]] && rm $ENROOT_IMAGE ; /usr/bin/time enroot import -o $ENROOT_IMAGE dockerd://${REPO}:${TAG} ``` The Enroot squash file will be placed into the `/apps` directory. @@ -64,7 +65,7 @@ mkdir -p $TARGET_PATH cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ - /apps/${REPO}_${TAG}.sqsh \ + $ENROOT_IMAGE \ cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). @@ -93,9 +94,11 @@ Next, you need to prepare the configuration files as follow: | srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | | stderr_to_stdout | `True` | Merge `stderr` and `stdout` | -2. OnCopy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: +2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` and substitute environment variables as follows: ```bash -cp -TRv ${TEST_CASE_PATH}/conf.template/ ${TARGET_PATH}/launcher_scripts/conf/ +cp -Rv ${TEST_CASE_PATH}/conf.template/cluster ${TARGET_PATH}/launcher_scripts/conf/cluster +cp -Rv ${TEST_CASE_PATH}/conf.template/data_preparation ${TARGET_PATH}/launcher_scripts/conf/data_preparation +envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml ``` @@ -111,7 +114,7 @@ source ${TARGET_PATH}/.venv/bin/activate Run the data preparation stage by executing the following script. It will submit the data preparation jobs. You can check progress with `squeue` to get the jobs in your Slurm queues and their status. ```bash -bash 1.data-preparation.sh +bash ${TEST_CASE_PATH}/1.data-preparation.sh ``` Once completed, expect the training data (vocab and the pre-processed Pile dataset) as follows: diff --git a/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml b/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml index 3b2c30e1..33b5dbc8 100644 --- a/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml +++ b/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml @@ -26,12 +26,12 @@ stages: - export cluster_type: bcm # bcm or bcp. If bcm, it must match - cluster above. -launcher_scripts_path: /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts -data_dir: /fsx/ubuntu/nemo-megatron-23.03/data # Location to store and read the data. -base_results_dir: /fsx/ubuntu/nemo-megatron-23.03/results # Location to store the results, checkpoints and logs. +launcher_scripts_path: ${TARGET_PATH}/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts +data_dir: ${TARGET_PATH}/data # Location to store and read the data. +base_results_dir: ${TARGET_PATH}/results # Location to store the results, checkpoints and logs. container_mounts: # List of additional paths to mount to container. They will be mounted to same path. - - /fsx/ubuntu:/fsx/ubuntu -container: /fsx/ubuntu/aws-nemo-megatron_23.03-py3.sqsh + - /fsx:/fsx +container: ${ENROOT_IMAGE} wandb_api_key_file: null # File where the w&B api key is stored. Key must be on the first line. From 4735330600ecfa34ef63db563a44fdb63a6c6850 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 16:50:43 -0500 Subject: [PATCH 030/648] Fixed configuration with new path --- 3.test_cases/2.nemo-launcher-23.03/README.md | 13 ++++++++----- .../2.nemo-launcher-23.03/conf.template/config.yaml | 10 +++++----- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index 4c1ad314..b3cf4d1b 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -25,6 +25,7 @@ export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION-py3 export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.03 # where you copy the test case or set to your test case path +export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH ``` @@ -44,7 +45,7 @@ docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized. ``` 4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. ```bash -IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE ; /usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} +[[ -e $ENROOT_IMAGE ]] && rm $ENROOT_IMAGE ; /usr/bin/time enroot import -o $ENROOT_IMAGE dockerd://${REPO}:${TAG} ``` The Enroot squash file will be placed into the `/apps` directory. @@ -64,7 +65,7 @@ mkdir -p $TARGET_PATH cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ - /apps/${REPO}_${TAG}.sqsh \ + $ENROOT_IMAGE \ cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). @@ -93,9 +94,11 @@ Next, you need to prepare the configuration files as follow: | srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | | stderr_to_stdout | `True` | Merge `stderr` and `stdout` | -2. OnCopy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` with this command: +2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` and substitute environment variables as follows: ```bash -cp -TRv ${TEST_CASE_PATH}/conf.template/ ${TARGET_PATH}/launcher_scripts/conf/ +cp -Rv ${TEST_CASE_PATH}/conf.template/cluster ${TARGET_PATH}/launcher_scripts/conf/cluster +cp -Rv ${TEST_CASE_PATH}/conf.template/data_preparation ${TARGET_PATH}/launcher_scripts/conf/data_preparation +envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml ``` @@ -111,7 +114,7 @@ source ${TARGET_PATH}/.venv/bin/activate Run the data preparation stage by executing the following script. It will submit the data preparation jobs. You can check progress with `squeue` to get the jobs in your Slurm queues and their status. ```bash -bash 1.data-preparation.sh +bash ${TEST_CASE_PATH}/1.data-preparation.sh ``` Once completed, expect the training data (vocab and the pre-processed Pile dataset) as follows: diff --git a/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml b/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml index 3b2c30e1..33b5dbc8 100644 --- a/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml +++ b/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml @@ -26,12 +26,12 @@ stages: - export cluster_type: bcm # bcm or bcp. If bcm, it must match - cluster above. -launcher_scripts_path: /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts -data_dir: /fsx/ubuntu/nemo-megatron-23.03/data # Location to store and read the data. -base_results_dir: /fsx/ubuntu/nemo-megatron-23.03/results # Location to store the results, checkpoints and logs. +launcher_scripts_path: ${TARGET_PATH}/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts +data_dir: ${TARGET_PATH}/data # Location to store and read the data. +base_results_dir: ${TARGET_PATH}/results # Location to store the results, checkpoints and logs. container_mounts: # List of additional paths to mount to container. They will be mounted to same path. - - /fsx/ubuntu:/fsx/ubuntu -container: /fsx/ubuntu/aws-nemo-megatron_23.03-py3.sqsh + - /fsx:/fsx +container: ${ENROOT_IMAGE} wandb_api_key_file: null # File where the w&B api key is stored. Key must be on the first line. From 7ca488e6f7cb5983dd92621c6fb719c05584e213 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 18:38:28 -0500 Subject: [PATCH 031/648] Fix table formatting in nemo doc --- 3.test_cases/2.nemo-launcher-23.03/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index b3cf4d1b..5be868f3 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -81,8 +81,9 @@ pip3.8 install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Meg Next, you need to prepare the configuration files as follow: 1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. -| Value | Default | Definition | -| ---------------- | ---------- | --------------------------------------------------------------------------------------------------------- | + +| Value | Default | Definition | +| ---------------- | ----------------------------- | -------------------------------------------------------------------------------------- | | partition | `null` | Slurm partition, same as a job queue | | account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | | exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | From 4b5a5dec66947c08e1b8f0502088768cea8d6c81 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 18:38:28 -0500 Subject: [PATCH 032/648] Fix table formatting in nemo doc --- 3.test_cases/2.nemo-launcher-23.03/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher-23.03/README.md index b3cf4d1b..5be868f3 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher-23.03/README.md @@ -81,8 +81,9 @@ pip3.8 install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Meg Next, you need to prepare the configuration files as follow: 1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. -| Value | Default | Definition | -| ---------------- | ---------- | --------------------------------------------------------------------------------------------------------- | + +| Value | Default | Definition | +| ---------------- | ----------------------------- | -------------------------------------------------------------------------------------- | | partition | `null` | Slurm partition, same as a job queue | | account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | | exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | From 77e05436fc49d470869dcf80e728b81767692db8 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 19:59:56 -0500 Subject: [PATCH 033/648] Change nccl test case location and create directory for micro-tests --- .../0.nccl-tests/0.nccl-tests.Dockerfile | 0 .../0.nccl-tests/1.nccl-tests.sbatch | 0 .../0.nccl-tests/2.nccl-3collectives.sbatch | 72 +++++++++++++++++++ .../0.nccl-tests/README.md | 9 +++ README.md | 3 +- 5 files changed, 82 insertions(+), 2 deletions(-) rename {3.test_cases => 4.validation_scripts}/0.nccl-tests/0.nccl-tests.Dockerfile (100%) rename {3.test_cases => 4.validation_scripts}/0.nccl-tests/1.nccl-tests.sbatch (100%) create mode 100644 4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch rename {3.test_cases => 4.validation_scripts}/0.nccl-tests/README.md (92%) diff --git a/3.test_cases/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile similarity index 100% rename from 3.test_cases/0.nccl-tests/0.nccl-tests.Dockerfile rename to 4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile diff --git a/3.test_cases/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch similarity index 100% rename from 3.test_cases/0.nccl-tests/1.nccl-tests.sbatch rename to 4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch diff --git a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch new file mode 100644 index 00000000..63e418c7 --- /dev/null +++ b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch @@ -0,0 +1,72 @@ +#!/bin/bash + +#SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --job-name=megatron_gpt # name of your job +#SBATCH --ntasks-per-node 8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive +#SBATCH --wait-all-nodes=1 + +### Disable hyperthreading by setting the tasks per core to 1 +#SBATCH --ntasks-per-core=1 + +########################### +###### User Variables ##### +########################### + + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${NCCL_TESTS_PATH:=/opt/nccl-tests/build}" + +: "${IMAGE:=$APPS_PATH/nccl.sqsh}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 + +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 + +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO +export NCCL_DEBUG_FILE=nccl-debug-%h-%p.txt + +declare -a ARGS=( + --container-image $IMAGE +) + + +echo " +Hostname: $(hostname) +" + +env + + +echo " +################################################################################ +# $NCCL_TEST_PATH/all_reduce_perf +################################################################################ +" +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TEST_PATH/all_reduce_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 + +sleep 5 +echo " +################################################################################ +# $NCCL_TEST_PATH/all_gather_perf +################################################################################ +" +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TEST_PATH/all_gather_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 + +sleep 5 +echo " +################################################################################ +# $NCCL_TEST_PATH/reduce_scatter_perf +################################################################################ +" +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TEST_PATH/reduce_scatter_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 diff --git a/3.test_cases/0.nccl-tests/README.md b/4.validation_scripts/0.nccl-tests/README.md similarity index 92% rename from 3.test_cases/0.nccl-tests/README.md rename to 4.validation_scripts/0.nccl-tests/README.md index 4c2028b1..1dfa9ee2 100644 --- a/3.test_cases/0.nccl-tests/README.md +++ b/4.validation_scripts/0.nccl-tests/README.md @@ -74,6 +74,15 @@ A Scatter performance test will be executed from 8B to 2 GB, the output should l To change the type of collective to test, modify the line with `srun` in the file `1.nccl-tests.sbatch` and change `scatter_perf` to any of: `all_gather_perf`, `alltoall_perf`, `gather_perf`, `reduce_perf`, `scatter_perf`, `all_reduce_perf`, `broadcast_perf`, `hypercube_perf`, `reduce_scatter_perf`, `sendrecv_perf`. +### 2.1 Measure multiple collectives with one job + +Run the NCCL tests for different collectives in one job using the submission script `2.nccl-3collectives.sbatch`. It will execute tests on the collectives [AllReduce](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allreduce), [AllGather](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allgather) and [ReduceScatter](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#reducescatter). + +```bash +sbatch 2.nccl-3collectives.sbatch +``` + + ## 3. Understanding NCCL Bandwidth The NCCL tests reports metrics for the time to execute a given communication collective operation, the Algorithmic bandwidth and the bus bandwidth. diff --git a/README.md b/README.md index 5a707491..c65b5f12 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,8 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test case | PC | EKS | AWS Batch | +| Test cases | PC | EKS | AWS Batch | | ----------------------- | ---- | ----- | --------- | -| `0.nccl-tests` | ✅ | ❓ | ❓ | | `1.megatron-lm` | ✅ | ❓ | ❓ | | `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | | `3.MPT` | ❓ | ❓ | ❓ | From 1d19ab05de32b4d51ccbf219bdb1197f67333d2c Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 19:59:56 -0500 Subject: [PATCH 034/648] Change nccl test case location and create directory for micro-tests --- .../0.nccl-tests/0.nccl-tests.Dockerfile | 0 .../0.nccl-tests/1.nccl-tests.sbatch | 0 .../0.nccl-tests/2.nccl-3collectives.sbatch | 72 +++++++++++++++++++ .../0.nccl-tests/README.md | 9 +++ README.md | 3 +- 5 files changed, 82 insertions(+), 2 deletions(-) rename {3.test_cases => 4.validation_scripts}/0.nccl-tests/0.nccl-tests.Dockerfile (100%) rename {3.test_cases => 4.validation_scripts}/0.nccl-tests/1.nccl-tests.sbatch (100%) create mode 100644 4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch rename {3.test_cases => 4.validation_scripts}/0.nccl-tests/README.md (92%) diff --git a/3.test_cases/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile similarity index 100% rename from 3.test_cases/0.nccl-tests/0.nccl-tests.Dockerfile rename to 4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile diff --git a/3.test_cases/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch similarity index 100% rename from 3.test_cases/0.nccl-tests/1.nccl-tests.sbatch rename to 4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch diff --git a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch new file mode 100644 index 00000000..63e418c7 --- /dev/null +++ b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch @@ -0,0 +1,72 @@ +#!/bin/bash + +#SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --job-name=megatron_gpt # name of your job +#SBATCH --ntasks-per-node 8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive +#SBATCH --wait-all-nodes=1 + +### Disable hyperthreading by setting the tasks per core to 1 +#SBATCH --ntasks-per-core=1 + +########################### +###### User Variables ##### +########################### + + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${NCCL_TESTS_PATH:=/opt/nccl-tests/build}" + +: "${IMAGE:=$APPS_PATH/nccl.sqsh}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 + +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 + +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO +export NCCL_DEBUG_FILE=nccl-debug-%h-%p.txt + +declare -a ARGS=( + --container-image $IMAGE +) + + +echo " +Hostname: $(hostname) +" + +env + + +echo " +################################################################################ +# $NCCL_TEST_PATH/all_reduce_perf +################################################################################ +" +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TEST_PATH/all_reduce_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 + +sleep 5 +echo " +################################################################################ +# $NCCL_TEST_PATH/all_gather_perf +################################################################################ +" +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TEST_PATH/all_gather_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 + +sleep 5 +echo " +################################################################################ +# $NCCL_TEST_PATH/reduce_scatter_perf +################################################################################ +" +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TEST_PATH/reduce_scatter_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 diff --git a/3.test_cases/0.nccl-tests/README.md b/4.validation_scripts/0.nccl-tests/README.md similarity index 92% rename from 3.test_cases/0.nccl-tests/README.md rename to 4.validation_scripts/0.nccl-tests/README.md index 4c2028b1..1dfa9ee2 100644 --- a/3.test_cases/0.nccl-tests/README.md +++ b/4.validation_scripts/0.nccl-tests/README.md @@ -74,6 +74,15 @@ A Scatter performance test will be executed from 8B to 2 GB, the output should l To change the type of collective to test, modify the line with `srun` in the file `1.nccl-tests.sbatch` and change `scatter_perf` to any of: `all_gather_perf`, `alltoall_perf`, `gather_perf`, `reduce_perf`, `scatter_perf`, `all_reduce_perf`, `broadcast_perf`, `hypercube_perf`, `reduce_scatter_perf`, `sendrecv_perf`. +### 2.1 Measure multiple collectives with one job + +Run the NCCL tests for different collectives in one job using the submission script `2.nccl-3collectives.sbatch`. It will execute tests on the collectives [AllReduce](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allreduce), [AllGather](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allgather) and [ReduceScatter](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#reducescatter). + +```bash +sbatch 2.nccl-3collectives.sbatch +``` + + ## 3. Understanding NCCL Bandwidth The NCCL tests reports metrics for the time to execute a given communication collective operation, the Algorithmic bandwidth and the bus bandwidth. diff --git a/README.md b/README.md index 5a707491..c65b5f12 100644 --- a/README.md +++ b/README.md @@ -36,9 +36,8 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test case | PC | EKS | AWS Batch | +| Test cases | PC | EKS | AWS Batch | | ----------------------- | ---- | ----- | --------- | -| `0.nccl-tests` | ✅ | ❓ | ❓ | | `1.megatron-lm` | ✅ | ❓ | ❓ | | `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | | `3.MPT` | ❓ | ❓ | ❓ | From c6b0d60fda8706729a957b2b584a16fc08104d2e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 20:21:03 -0500 Subject: [PATCH 035/648] Add validation script for nccl-env --- .../0.nccl-tests/3.nccl-validate.sbatch | 68 +++++++++++++++++++ 4.validation_scripts/0.nccl-tests/README.md | 9 ++- 2 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch diff --git a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch new file mode 100644 index 00000000..1f29c6c3 --- /dev/null +++ b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --job-name=megatron_gpt # name of your job +#SBATCH --ntasks-per-node 8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive +#SBATCH --wait-all-nodes=1 + +### Disable hyperthreading by setting the tasks per core to 1 +#SBATCH --ntasks-per-core=1 + +set -ex + +# Validate that mpirun does not need -x to propagate env vars defined in .sbatch script + +########################### +###### User Variables ##### +########################### + + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${NCCL_TESTS_PATH:=/opt/nccl-tests/build}" + +: "${IMAGE:=$APPS_PATH/nccl.sqsh}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +#export NCCL_SOCKET_IFNAME=ens +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO + + +declare -a ARGS=( + --container-image $IMAGE +) + + +export HELLO_WORLD="${1-undefined}" + +echo " +Hostname: $(hostname) +" + +env + +echo " +######################################## +# srun +########################################" +srun bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' + +echo " +######################################## +# mpirun (WITHOUT -x) +########################################" +mpirun --tag-output bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' diff --git a/4.validation_scripts/0.nccl-tests/README.md b/4.validation_scripts/0.nccl-tests/README.md index 1dfa9ee2..880196cf 100644 --- a/4.validation_scripts/0.nccl-tests/README.md +++ b/4.validation_scripts/0.nccl-tests/README.md @@ -73,7 +73,6 @@ A Scatter performance test will be executed from 8B to 2 GB, the output should l To change the type of collective to test, modify the line with `srun` in the file `1.nccl-tests.sbatch` and change `scatter_perf` to any of: `all_gather_perf`, `alltoall_perf`, `gather_perf`, `reduce_perf`, `scatter_perf`, `all_reduce_perf`, `broadcast_perf`, `hypercube_perf`, `reduce_scatter_perf`, `sendrecv_perf`. - ### 2.1 Measure multiple collectives with one job Run the NCCL tests for different collectives in one job using the submission script `2.nccl-3collectives.sbatch`. It will execute tests on the collectives [AllReduce](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allreduce), [AllGather](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allgather) and [ReduceScatter](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#reducescatter). @@ -83,6 +82,14 @@ sbatch 2.nccl-3collectives.sbatch ``` +### 2.2 Validate the NCCL configuration + +You can validate your environment for NCCL using the batch file `3.nccl-validate.sbatch`. Submit it as follows: + +```bash +sbatch 3.nccl-validate.sbatch +``` + ## 3. Understanding NCCL Bandwidth The NCCL tests reports metrics for the time to execute a given communication collective operation, the Algorithmic bandwidth and the bus bandwidth. From 39bbe5773bd18bb404002a82440c2548422fd09e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 22 Aug 2023 20:21:03 -0500 Subject: [PATCH 036/648] Add validation script for nccl-env --- .../0.nccl-tests/3.nccl-validate.sbatch | 68 +++++++++++++++++++ 4.validation_scripts/0.nccl-tests/README.md | 9 ++- 2 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch diff --git a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch new file mode 100644 index 00000000..1f29c6c3 --- /dev/null +++ b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --job-name=megatron_gpt # name of your job +#SBATCH --ntasks-per-node 8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive +#SBATCH --wait-all-nodes=1 + +### Disable hyperthreading by setting the tasks per core to 1 +#SBATCH --ntasks-per-core=1 + +set -ex + +# Validate that mpirun does not need -x to propagate env vars defined in .sbatch script + +########################### +###### User Variables ##### +########################### + + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${NCCL_TESTS_PATH:=/opt/nccl-tests/build}" + +: "${IMAGE:=$APPS_PATH/nccl.sqsh}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +#export NCCL_SOCKET_IFNAME=ens +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO + + +declare -a ARGS=( + --container-image $IMAGE +) + + +export HELLO_WORLD="${1-undefined}" + +echo " +Hostname: $(hostname) +" + +env + +echo " +######################################## +# srun +########################################" +srun bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' + +echo " +######################################## +# mpirun (WITHOUT -x) +########################################" +mpirun --tag-output bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' diff --git a/4.validation_scripts/0.nccl-tests/README.md b/4.validation_scripts/0.nccl-tests/README.md index 1dfa9ee2..880196cf 100644 --- a/4.validation_scripts/0.nccl-tests/README.md +++ b/4.validation_scripts/0.nccl-tests/README.md @@ -73,7 +73,6 @@ A Scatter performance test will be executed from 8B to 2 GB, the output should l To change the type of collective to test, modify the line with `srun` in the file `1.nccl-tests.sbatch` and change `scatter_perf` to any of: `all_gather_perf`, `alltoall_perf`, `gather_perf`, `reduce_perf`, `scatter_perf`, `all_reduce_perf`, `broadcast_perf`, `hypercube_perf`, `reduce_scatter_perf`, `sendrecv_perf`. - ### 2.1 Measure multiple collectives with one job Run the NCCL tests for different collectives in one job using the submission script `2.nccl-3collectives.sbatch`. It will execute tests on the collectives [AllReduce](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allreduce), [AllGather](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#allgather) and [ReduceScatter](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/usage/collectives.html#reducescatter). @@ -83,6 +82,14 @@ sbatch 2.nccl-3collectives.sbatch ``` +### 2.2 Validate the NCCL configuration + +You can validate your environment for NCCL using the batch file `3.nccl-validate.sbatch`. Submit it as follows: + +```bash +sbatch 3.nccl-validate.sbatch +``` + ## 3. Understanding NCCL Bandwidth The NCCL tests reports metrics for the time to execute a given communication collective operation, the Algorithmic bandwidth and the bus bandwidth. From 1676c5dbd971b7aedc86a58624ea86a458f07444 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 24 Aug 2023 04:51:19 +0000 Subject: [PATCH 037/648] Fixed DATA_PATH 1.data-preprocessing.sbatch --- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index c6eef41b..19dc87ce 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -12,7 +12,7 @@ # default variables for Enroot : "${APPS_PATH:=/apps}" -: "${DATA_PATH:=/fsx}" +: "${DATA_PATH:=/fsx/gpt2}" declare -a ARGS=( --container-image $IMAGE From 43b40f0db9db2afb44fc8e65ab09c71c143e0507 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 24 Aug 2023 04:51:19 +0000 Subject: [PATCH 038/648] Fixed DATA_PATH 1.data-preprocessing.sbatch --- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index c6eef41b..19dc87ce 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -12,7 +12,7 @@ # default variables for Enroot : "${APPS_PATH:=/apps}" -: "${DATA_PATH:=/fsx}" +: "${DATA_PATH:=/fsx/gpt2}" declare -a ARGS=( --container-image $IMAGE From c3bd4946afdb2b8fecd0aedfc61a2c442c90bd0b Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 24 Aug 2023 05:16:28 +0000 Subject: [PATCH 039/648] Update 1.data-preprocessing.sbatch --- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index 19dc87ce..fcec307a 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -8,11 +8,11 @@ ########################### : "${IMAGE:=$APPS_PATH/megatron-preprocess.sqsh}" -: "${FSX_MOUNT:=/fsx:/fsx}" +: "${FSX_MOUNT:=/fsx/gpt2:/fsx}" # default variables for Enroot : "${APPS_PATH:=/apps}" -: "${DATA_PATH:=/fsx/gpt2}" +: "${DATA_PATH:=/fsx}" declare -a ARGS=( --container-image $IMAGE From 22b99dfe37881e3abf62857fa3b7b517aad93f57 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 24 Aug 2023 05:16:28 +0000 Subject: [PATCH 040/648] Update 1.data-preprocessing.sbatch --- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index 19dc87ce..fcec307a 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -8,11 +8,11 @@ ########################### : "${IMAGE:=$APPS_PATH/megatron-preprocess.sqsh}" -: "${FSX_MOUNT:=/fsx:/fsx}" +: "${FSX_MOUNT:=/fsx/gpt2:/fsx}" # default variables for Enroot : "${APPS_PATH:=/apps}" -: "${DATA_PATH:=/fsx/gpt2}" +: "${DATA_PATH:=/fsx}" declare -a ARGS=( --container-image $IMAGE From de17b512597c6012fba9afa383f338323e593dfd Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 25 Aug 2023 16:55:39 -0500 Subject: [PATCH 041/648] Fix diagrams sections in vpc readme --- 1.architectures/1.vpc_network/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index de35aad2..3ec077e1 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -44,7 +44,7 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram - + ### 2. Template VPC One AZs @@ -79,4 +79,4 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram - + From 19106b29784098b0f6e6faeeec8b5563f3f470d2 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 25 Aug 2023 16:55:39 -0500 Subject: [PATCH 042/648] Fix diagrams sections in vpc readme --- 1.architectures/1.vpc_network/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index de35aad2..3ec077e1 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -44,7 +44,7 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram - + ### 2. Template VPC One AZs @@ -79,4 +79,4 @@ aws cloudformation create-stack --stack-name vpc-stack-ml\ #### Architecture Diagram - + From 560a3b75be66397d93dad734af7bf32e81f52af4 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 29 Aug 2023 22:47:42 +0800 Subject: [PATCH 043/648] Nemo-launcher: use mock dataset --- .../1.data-preparation.sh | 19 ---- .../download_pile_dataset.sh | 33 ------ .../2.nemo-launcher-23.03/url_list.txt | 7 -- .../.gitignore | 0 .../0.NemoMegatron-aws-optimized.Dockerfile | 4 +- .../README.md | 106 +++++------------- .../bmk-pretrain-gpt3-126m2.sh | 5 +- .../bmk-pretrain-gpt3-5b2.sh | 1 - .../build-enroot-image.sh | 4 +- .../conf.template/cluster/bcm.yaml | 0 .../conf.template/config.yaml | 0 .../gpt3/download_gpt3_pile.yaml | 0 .../step-00-bootstrap-launcher.sh | 12 +- .../step-01-bmk-pretrain-gpt3.sh} | 10 +- README.md | 17 ++- 15 files changed, 57 insertions(+), 161 deletions(-) delete mode 100755 3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh delete mode 100755 3.test_cases/2.nemo-launcher-23.03/download_pile_dataset.sh delete mode 100644 3.test_cases/2.nemo-launcher-23.03/url_list.txt rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/.gitignore (100%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/0.NemoMegatron-aws-optimized.Dockerfile (96%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/README.md (70%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/bmk-pretrain-gpt3-126m2.sh (69%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/bmk-pretrain-gpt3-5b2.sh (91%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/build-enroot-image.sh (97%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/conf.template/cluster/bcm.yaml (100%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/conf.template/config.yaml (100%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml (100%) rename 3.test_cases/{2.nemo-launcher-23.03 => 2.nemo-launcher}/step-00-bootstrap-launcher.sh (68%) rename 3.test_cases/{2.nemo-launcher-23.03/step-02-bmk-pretrain-gpt3.sh => 2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh} (84%) diff --git a/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh b/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh deleted file mode 100755 index 4626a638..00000000 --- a/3.test_cases/2.nemo-launcher-23.03/1.data-preparation.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -set -exuo pipefail - -CONT_DATA_DIR=${TARGET_PATH}/data/the_pile_gpt3 -CONT_TOKENIZER_DIR=${TARGET_PATH}/data/bpe - -# data_preparation.file_numbers='0-29' \ -mkdir -p $CONT_DATA_DIR -HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ - stages=[data_preparation] \ - data_dir=$CONT_DATA_DIR \ - data_preparation.file_numbers='0-0' \ - data_preparation.vocab_save_dir=$CONT_TOKENIZER_DIR \ - data_preparation.merges_save_dir=$CONT_TOKENIZER_DIR \ - data_preparation.run.node_array_size=1 diff --git a/3.test_cases/2.nemo-launcher-23.03/download_pile_dataset.sh b/3.test_cases/2.nemo-launcher-23.03/download_pile_dataset.sh deleted file mode 100755 index 348cd91a..00000000 --- a/3.test_cases/2.nemo-launcher-23.03/download_pile_dataset.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# Define variables -NUM_CONNECTIONS=16 -NUM_FILES_PARALLEL_DOWNLOAD=16 -URL_LIST_FILE_NAME="./url_list.txt" -OUTPUT_DIR="" - -# Check for output directory argument -if [ $# -eq 1 ] -then - OUTPUT_DIR="$1" -else - echo "OUTPUT_DIR is not defined. So using default ./" - OUTPUT_DIR="./" - exit 1 -fi - -# Install aria2 if not already installed -if ! command -v aria2c &> /dev/null -then - echo "aria2c not found. Installing..." - sudo apt update - sudo apt install -y aria2 -fi - -# Download PILE dataset with aria2c -aria2c -x${NUM_CONNECTIONS} -s${NUM_FILES_PARALLEL_DOWNLOAD} -i ${URL_LIST_FILE_NAME} -d ${OUTPUT_DIR} - -echo "PILE dataset download complete!" diff --git a/3.test_cases/2.nemo-launcher-23.03/url_list.txt b/3.test_cases/2.nemo-launcher-23.03/url_list.txt deleted file mode 100644 index bec928ef..00000000 --- a/3.test_cases/2.nemo-launcher-23.03/url_list.txt +++ /dev/null @@ -1,7 +0,0 @@ -https://the-eye.eu/public/AI/pile/train/00.jsonl.zst -https://the-eye.eu/public/AI/pile/train/01.jsonl.zst -https://the-eye.eu/public/AI/pile/train/02.jsonl.zst -https://the-eye.eu/public/AI/pile/train/03.jsonl.zst -https://the-eye.eu/public/AI/pile/train/04.jsonl.zst -https://the-eye.eu/public/AI/pile/val.jsonl.zst -https://the-eye.eu/public/AI/pile/test.jsonl.zst diff --git a/3.test_cases/2.nemo-launcher-23.03/.gitignore b/3.test_cases/2.nemo-launcher/.gitignore similarity index 100% rename from 3.test_cases/2.nemo-launcher-23.03/.gitignore rename to 3.test_cases/2.nemo-launcher/.gitignore diff --git a/3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile similarity index 96% rename from 3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile rename to 3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile index d3e3e1be..a572c4bf 100644 --- a/3.test_cases/2.nemo-launcher-23.03/0.NemoMegatron-aws-optimized.Dockerfile +++ b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # DOCKER_BUILDKIT=1 docker build --progress plain -t aws-nemo-megatron:latest . -# Customized from: https://github.com/NVIDIA/NeMo-Megatron-Launcher/blob/23.03/csp_tools/aws/Dockerfile +# Customized from: https://github.com/NVIDIA/NeMo-Megatron-Launcher/blob/23.05/csp_tools/aws/Dockerfile -FROM nvcr.io/ea-bignlp/bignlp-training:23.03-py3 +FROM nvcr.io/ea-bignlp/bignlp-training:23.05-py3 ARG DEBIAN_FRONTEND=noninteractive ENV EFA_INSTALLER_VERSION=latest diff --git a/3.test_cases/2.nemo-launcher-23.03/README.md b/3.test_cases/2.nemo-launcher/README.md similarity index 70% rename from 3.test_cases/2.nemo-launcher-23.03/README.md rename to 3.test_cases/2.nemo-launcher/README.md index 5be868f3..f18240e8 100644 --- a/3.test_cases/2.nemo-launcher-23.03/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -4,10 +4,8 @@ Table of contents: - [1. Pre-requisites](#1-pre-requisites) - [2. Build AWS-optimized Nemo-Launcher image](#2-build-aws-optimized-nemo-launcher-image) -- [3. Seed Nemo-Launcher on head node](#3-seed-nemo-launcher-on-head-node) -- [4. Launch Nemo pipeline](#4-launch-nemo-pipeline) - - [4.1. Prepare Sample Dataset](#41-prepare-sample-dataset) - - [4.2. Pre-training GPT3](#42-pre-training-gpt3) +- [3. Set-up the NemoMegatron environment](#3-set-up-the-nemomegatron-environment) +- [4. Pre-training GPT3](#4-pre-training-gpt3) ## 1. Pre-requisites @@ -16,15 +14,14 @@ The following pre-requisites are needed to run this example: - You have access to the base image [`bignlp-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). - Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) - You will need to setup the following environment variables before running the scripts. : ```bash -export NEMO_VERSION=23.03 +export NEMO_VERSION=23.05 export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION-py3 export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # -export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.03 # where you copy the test case or set to your test case path +export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.05 # where you copy the test case or set to your test case path export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH @@ -36,31 +33,37 @@ You will retrieve the container image from Nvidia, build an optimized container 1. You have a registered account with Nvidia and can access NGC. Retrieve the NGC API key following [instructions from Nvidia](https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key). 2. Configure NGC as shown below using the command below, when requested use `$oauthtoken` for the login and the API key from NGC fro the password. + ```bash docker login nvcr.io ``` + 3. Copy the file `0.NemoMegatron-aws-optimized.Dockerfile` to the local directory and run the command below. Docker will retrieve the NemoMegatron container image from NGC then build an optimized container for AWS. This stage takes a few minutes and you can follow progress + ```bash docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized.Dockerfile . ``` + 4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. + ```bash [[ -e $ENROOT_IMAGE ]] && rm $ENROOT_IMAGE ; /usr/bin/time enroot import -o $ENROOT_IMAGE dockerd://${REPO}:${TAG} ``` The Enroot squash file will be placed into the `/apps` directory. - ## 3. Set-up the NemoMegatron environment -You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm) - +You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.05#5111-slurm) 1. Create the target directory with the command below: + ```bash mkdir -p $TARGET_PATH ``` + 2. Retrieve files from the container and place them in the target directory. You execute the container on your head-node for this task using Enroot [start](https://github.com/NVIDIA/enroot/blob/master/doc/cmd/start.md) command. + ```bash cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ @@ -68,8 +71,10 @@ enroot start --mount $TARGET_PATH:/workspace/mount_dir \ $ENROOT_IMAGE \ cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ ``` + The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). 3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. + ```bash cd $TARGET_PATH sudo amazon-linux-extras install -y python3.8 # we need Python =>3.8 @@ -82,85 +87,36 @@ Next, you need to prepare the configuration files as follow: 1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. -| Value | Default | Definition | -| ---------------- | ----------------------------- | -------------------------------------------------------------------------------------- | -| partition | `null` | Slurm partition, same as a job queue | -| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | -| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | -| gpus_per_task | `null` | Number of instances of GPUs per job | -| gpus_per_node | `8` | Number of GPUs to use per node. This is set to 8 GPUs as for th p4d.24xlarge | -| mem | `0` | Requested memory (all) | -| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | -| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | -| srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | -| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | +| Value | Default | Definition | +| ---------------- | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| partition | `null` | Slurm partition, same as a job queue | +| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | +| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | +| gpus_per_task | `null` | Number of instances of GPUs per job | +| gpus_per_node | `8` | Number of GPUs to use per node. This is set to 8 GPUs as for th p4d.24xlarge | +| mem | `0` | Requested memory (all) | +| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | +| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | +| srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | +| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | 2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` and substitute environment variables as follows: + ```bash cp -Rv ${TEST_CASE_PATH}/conf.template/cluster ${TARGET_PATH}/launcher_scripts/conf/cluster cp -Rv ${TEST_CASE_PATH}/conf.template/data_preparation ${TARGET_PATH}/launcher_scripts/conf/data_preparation envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml ``` - -## 4. Launch Nemo pipeline +## 4. Pre-training GPT3 This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment. To start, source the NemoMegatron environment: -```bash -source ${TARGET_PATH}/.venv/bin/activate -``` - -### 4.1. Prepare the sample dataset - -Run the data preparation stage by executing the following script. It will submit the data preparation jobs. You can check progress with `squeue` to get the jobs in your Slurm queues and their status. ```bash -bash ${TEST_CASE_PATH}/1.data-preparation.sh -``` - -Once completed, expect the training data (vocab and the pre-processed Pile dataset) as follows: - -```text -/fsx/ubuntu/data -├── bpe # Vocabulary from HF Hub -│ ├── merges.txt -│ └── vocab.json -└── the_pile_gpt3 # Pre-processed the Pile data set (in Nemo format) - ├── my-gpt3_00_text_document.bin - ├── my-gpt3_00_text_document.idx - ├── ... - ├── ... - ├── my-gpt3_04_text_document.bin - └── my-gpt3_04_text_document.idx -``` - -Job logs available here: - -```text -/fsx/ubuntu/nemo-megatron-23.03/results/ -└── download_gpt3_pile # Correspond to stage - ├── download # Job within a stage - │ ├── download_gpt3_pile_hydra.yaml # Interpolated config - │ ├── launcher.log # Status of job submission - │ ├── log-nemo-megatron-download_gpt3_pile_.out # Std{err,out} of this array task - │ ├── ... - │ └── nemo-megatron-download_gpt3_pile_submission.sh # Script to submit a Slurm array job - ├── extract - │ ├── download_gpt3_pile_hydra.yaml - │ ├── launcher.log - │ ├── log-nemo-megatron-download_gpt3_pile_.out - │ ├── ... - │ └── nemo-megatron-download_gpt3_pile_submission.sh - ├── launcher_cmd.log - └── preprocess - ├── download_gpt3_pile_hydra.yaml - ├── launcher.log - ├── log-nemo-megatron-download_gpt3_pile_.out - ├── ... - └── nemo-megatron-download_gpt3_pile_submission.sh +source ${TARGET_PATH}/.venv/bin/activate ``` -### 4.2. Pre-training GPT3 +Run pre-training as follows: ```bash # Choose one of these options: diff --git a/3.test_cases/2.nemo-launcher-23.03/bmk-pretrain-gpt3-126m2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh similarity index 69% rename from 3.test_cases/2.nemo-launcher-23.03/bmk-pretrain-gpt3-126m2.sh rename to 3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh index ae8de765..c5d3a7e2 100755 --- a/3.test_cases/2.nemo-launcher-23.03/bmk-pretrain-gpt3-126m2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh @@ -14,6 +14,5 @@ export UNIQUE_OUTPUT_DIR=1 BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) -# Node_count == 8 can work without full activations checkpointing. -$BIN_DIR/step-02-bmk-pretrain-gpt3.sh \ - training.model.data.data_prefix=[1.0,\${data_dir}/my-gpt3_00_text_document] +# Node_count == 2 can work without full activations checkpointing. +$BIN_DIR/step-02-bmk-pretrain-gpt3.sh diff --git a/3.test_cases/2.nemo-launcher-23.03/bmk-pretrain-gpt3-5b2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh similarity index 91% rename from 3.test_cases/2.nemo-launcher-23.03/bmk-pretrain-gpt3-5b2.sh rename to 3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh index 9751ac3f..1e2b9b41 100755 --- a/3.test_cases/2.nemo-launcher-23.03/bmk-pretrain-gpt3-5b2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh @@ -19,7 +19,6 @@ BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with # activations_checkpoint_num_layers=0. $BIN_DIR/step-02-bmk-pretrain-gpt3.sh \ - training.model.data.data_prefix=[1.0,\${data_dir}/my-gpt3_00_text_document] \ training.model.activations_checkpoint_granularity='full' \ training.model.activations_checkpoint_method='block' \ training.model.activations_checkpoint_num_layers=1 diff --git a/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh b/3.test_cases/2.nemo-launcher/build-enroot-image.sh similarity index 97% rename from 3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh rename to 3.test_cases/2.nemo-launcher/build-enroot-image.sh index bf48a4c8..b0187135 100755 --- a/3.test_cases/2.nemo-launcher-23.03/build-enroot-image.sh +++ b/3.test_cases/2.nemo-launcher/build-enroot-image.sh @@ -4,11 +4,11 @@ # SPDX-License-Identifier: Apache-2.0 REPO=aws-nemo-megatron -TAG=23.03-py3 +TAG=23.05-py3 # EC2 instance: us-west-2, EBS: gp3, 3k IOPS, 350 MB/s throughput. # Time: ~3min -/usr/bin/time docker pull nvcr.io/ea-bignlp/bignlp-training:23.03-py3 +/usr/bin/time docker pull nvcr.io/ea-bignlp/bignlp-training:23.05-py3 # EC2 instance: m5.4xlarge, EBS: gp3, 3k IOPS, 350 MB/s throughput. # Time: ~6min diff --git a/3.test_cases/2.nemo-launcher-23.03/conf.template/cluster/bcm.yaml b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml similarity index 100% rename from 3.test_cases/2.nemo-launcher-23.03/conf.template/cluster/bcm.yaml rename to 3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml diff --git a/3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml b/3.test_cases/2.nemo-launcher/conf.template/config.yaml similarity index 100% rename from 3.test_cases/2.nemo-launcher-23.03/conf.template/config.yaml rename to 3.test_cases/2.nemo-launcher/conf.template/config.yaml diff --git a/3.test_cases/2.nemo-launcher-23.03/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml b/3.test_cases/2.nemo-launcher/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml similarity index 100% rename from 3.test_cases/2.nemo-launcher-23.03/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml rename to 3.test_cases/2.nemo-launcher/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml diff --git a/3.test_cases/2.nemo-launcher-23.03/step-00-bootstrap-launcher.sh b/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh similarity index 68% rename from 3.test_cases/2.nemo-launcher-23.03/step-00-bootstrap-launcher.sh rename to 3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh index 58fe6512..cd794d14 100755 --- a/3.test_cases/2.nemo-launcher-23.03/step-00-bootstrap-launcher.sh +++ b/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh @@ -3,17 +3,17 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -# Based on https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.03#5111-slurm +# Based on https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.05#5111-slurm set -exuo pipefail srun -N 1 \ - --container-mounts=/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03:/workspace/mount_dir \ - --container-image=/fsx/ubuntu/aws-nemo-megatron_23.03-py3.sqsh \ + --container-mounts=/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05:/workspace/mount_dir \ + --container-image=/fsx/ubuntu/aws-nemo-megatron_23.05-py3.sqsh \ bash -c "cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/" -cd /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/ +cd /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05/ /usr/bin/python3 -m venv .venv -source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/.venv/bin/activate -curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.03/requirements.txt +source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05/.venv/bin/activate +curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.05/requirements.txt pip3 install -r requirements.txt diff --git a/3.test_cases/2.nemo-launcher-23.03/step-02-bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh similarity index 84% rename from 3.test_cases/2.nemo-launcher-23.03/step-02-bmk-pretrain-gpt3.sh rename to 3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh index c198ab6a..be745978 100755 --- a/3.test_cases/2.nemo-launcher-23.03/step-02-bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh @@ -10,7 +10,7 @@ set -exuo pipefail : "${NUM_NODES:=8}" : "${RUNTIME:=4h}" : "${MAX_STEPS:=5}" -: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.03}" +: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.05}" CONT_DATA_DIR=${WORKSPACE_CONT}/data/the_pile_gpt3 CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe CONT_RESULT_DIR=${WORKSPACE_CONT}/results @@ -36,9 +36,13 @@ declare -a BMK_ARGS=( # Ignore checkpoints training.exp_manager.create_checkpoint_callback=False training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] ) -HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/launcher_scripts/main.py \ +HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05/launcher_scripts/main.py \ stages=[training] \ training=${MODEL}/${MODEL_SIZE} \ data_dir=${CONT_DATA_DIR} \ @@ -46,7 +50,5 @@ HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.03/lau training.trainer.num_nodes=$NUM_NODES \ training.trainer.max_steps=$MAX_STEPS \ training.trainer.val_check_interval=$MAX_STEPS \ - training.model.tokenizer.vocab_file=${CONT_TOKENIZER_DIR}/vocab.json \ - training.model.tokenizer.merge_file=${CONT_TOKENIZER_DIR}/merges.txt \ "${BMK_ARGS[@]}" \ "$@" diff --git a/README.md b/README.md index c65b5f12..fa95165e 100644 --- a/README.md +++ b/README.md @@ -14,13 +14,12 @@ reference-architectures/ **NOTE**: the architectures are designed to work with the S3 bucket and VPC created using reference templates `1.architectures/0.s3/` and `1.architectures/1.vpc_network/`. _You're strongly recommended to deploy these two templates **before** deploying any of the reference architectures._ - ## 1. Architectures Architectures are located in `1.architectures` and consists of utilities and service related architectures -| Name | Category | Usage -|-------------------------|----------|-----------------------------------------------------| +| Name | Category | Usage | +| ----------------------- | -------- | --------------------------------------------------- | | `0.s3` | Storage | Create an S3 bucket | | `1.vpc_network` | Network | Create a VPC with subnets required resources | | `2.aws-parallelcluster` | Compute | Cluster templates for GPU & custom silicon training | @@ -36,9 +35,9 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test cases | PC | EKS | AWS Batch | -| ----------------------- | ---- | ----- | --------- | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | -| `3.MPT` | ❓ | ❓ | ❓ | -| `4.DDP` | ❓ | ❓ | ❓ | +| Test cases | PC | EKS | AWS Batch | +| ----------------- | --- | --- | --------- | +| `1.megatron-lm` | ✅ | ❓ | ❓ | +| `2.nemo-launcher` | ✅ | ❌ | ❌ | +| `3.MPT` | ❓ | ❓ | ❓ | +| `4.DDP` | ❓ | ❓ | ❓ | From 60a2a01759e12b4639a45c30bab2993da5ca9f05 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 30 Aug 2023 10:40:10 +0000 Subject: [PATCH 044/648] nemo-megatron: update base container image --- .../2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile | 4 ++-- 3.test_cases/2.nemo-launcher/README.md | 6 +++--- 3.test_cases/2.nemo-launcher/build-enroot-image.sh | 4 ++-- 3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile index a572c4bf..139763ab 100644 --- a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile +++ b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 # DOCKER_BUILDKIT=1 docker build --progress plain -t aws-nemo-megatron:latest . -# Customized from: https://github.com/NVIDIA/NeMo-Megatron-Launcher/blob/23.05/csp_tools/aws/Dockerfile +# Customized from: https://github.com/NVIDIA/NeMo-Megatron-Launcher/blob//csp_tools/aws/Dockerfile -FROM nvcr.io/ea-bignlp/bignlp-training:23.05-py3 +FROM nvcr.io/ea-bignlp/nemofw-training:23.07-py3 ARG DEBIAN_FRONTEND=noninteractive ENV EFA_INSTALLER_VERSION=latest diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index f18240e8..6fad7130 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -17,11 +17,11 @@ The following pre-requisites are needed to run this example: You will need to setup the following environment variables before running the scripts. : ```bash -export NEMO_VERSION=23.05 +export NEMO_VERSION=23.07 export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION-py3 export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # -export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.05 # where you copy the test case or set to your test case path +export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.07 # where you copy the test case or set to your test case path export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH @@ -54,7 +54,7 @@ The Enroot squash file will be placed into the `/apps` directory. ## 3. Set-up the NemoMegatron environment -You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.05#5111-slurm) +You will setup the target directory to host the configurations and requirements for NemoMegatron. It is assumed that your have an FSx for Lustre file system available to all nodes of your cluster via the mountpoint `/fsx`. We follow the same logic as in the [NemoMegatron Launcher documentation](https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.07#5111-slurm) 1. Create the target directory with the command below: diff --git a/3.test_cases/2.nemo-launcher/build-enroot-image.sh b/3.test_cases/2.nemo-launcher/build-enroot-image.sh index b0187135..0407bbdf 100755 --- a/3.test_cases/2.nemo-launcher/build-enroot-image.sh +++ b/3.test_cases/2.nemo-launcher/build-enroot-image.sh @@ -4,11 +4,11 @@ # SPDX-License-Identifier: Apache-2.0 REPO=aws-nemo-megatron -TAG=23.05-py3 +TAG=23.07-py3 # EC2 instance: us-west-2, EBS: gp3, 3k IOPS, 350 MB/s throughput. # Time: ~3min -/usr/bin/time docker pull nvcr.io/ea-bignlp/bignlp-training:23.05-py3 +#/usr/bin/time docker pull nvcr.io/ea-bignlp/nemofw-training:$TAG # EC2 instance: m5.4xlarge, EBS: gp3, 3k IOPS, 350 MB/s throughput. # Time: ~6min diff --git a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh index be745978..aca12ded 100755 --- a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh @@ -10,7 +10,7 @@ set -exuo pipefail : "${NUM_NODES:=8}" : "${RUNTIME:=4h}" : "${MAX_STEPS:=5}" -: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.05}" +: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.07}" CONT_DATA_DIR=${WORKSPACE_CONT}/data/the_pile_gpt3 CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe CONT_RESULT_DIR=${WORKSPACE_CONT}/results @@ -42,7 +42,7 @@ declare -a BMK_ARGS=( training.model.data.data_prefix=[] ) -HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05/launcher_scripts/main.py \ +HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.07/launcher_scripts/main.py \ stages=[training] \ training=${MODEL}/${MODEL_SIZE} \ data_dir=${CONT_DATA_DIR} \ From eaa51b8f4f6c30cf00ba2e34cdfe8a6e8615cffe Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 30 Aug 2023 10:45:32 +0000 Subject: [PATCH 045/648] Fix build-enroot-image.sh --- 3.test_cases/2.nemo-launcher/build-enroot-image.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/build-enroot-image.sh b/3.test_cases/2.nemo-launcher/build-enroot-image.sh index 0407bbdf..8fe0ff56 100755 --- a/3.test_cases/2.nemo-launcher/build-enroot-image.sh +++ b/3.test_cases/2.nemo-launcher/build-enroot-image.sh @@ -12,7 +12,7 @@ TAG=23.07-py3 # EC2 instance: m5.4xlarge, EBS: gp3, 3k IOPS, 350 MB/s throughput. # Time: ~6min -docker build --progress plain -t ${REPO}:${TAG} . +docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized.Dockerfile . # On m5.8xlarge (32 vcpu). /fsx is FSxL 1.2TB configured with 500 MB/s/TB throughput. IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE From 4e535fd980c76a0de59dc76c336d6ac9df2e130b Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 30 Aug 2023 12:58:06 +0000 Subject: [PATCH 046/648] nemo-launcher: working, with dummy dataset --- 3.test_cases/2.nemo-launcher/README.md | 38 +++++++++++-------- .../bmk-pretrain-gpt3-126m2.sh | 2 +- .../2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh | 2 +- .../step-00-bootstrap-launcher.sh | 12 +++--- .../step-01-bmk-pretrain-gpt3.sh | 7 +--- 5 files changed, 33 insertions(+), 28 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 6fad7130..db22fe2d 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -20,8 +20,8 @@ You will need to setup the following environment variables before running the sc export NEMO_VERSION=23.07 export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION-py3 -export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # -export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.07 # where you copy the test case or set to your test case path +export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem +export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.07 # where you copy the test case or set to your test case path export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH @@ -73,6 +73,7 @@ enroot start --mount $TARGET_PATH:/workspace/mount_dir \ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). + 3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. ```bash @@ -80,6 +81,7 @@ cd $TARGET_PATH sudo amazon-linux-extras install -y python3.8 # we need Python =>3.8 /usr/bin/python3.8 -m venv .venv source .venv/bin/activate +pip3.8 install --upgrade pip setuptools pip3.8 install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_VERSION/requirements.txt) ``` @@ -87,24 +89,23 @@ Next, you need to prepare the configuration files as follow: 1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. -| Value | Default | Definition | -| ---------------- | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| partition | `null` | Slurm partition, same as a job queue | -| account | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | -| exclusive | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | -| gpus_per_task | `null` | Number of instances of GPUs per job | -| gpus_per_node | `8` | Number of GPUs to use per node. This is set to 8 GPUs as for th p4d.24xlarge | -| mem | `0` | Requested memory (all) | -| job_name_prefix | `"nemo-megatron-"` | Prefix for your job names | -| gres | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | -| srun_args | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | -| stderr_to_stdout | `True` | Merge `stderr` and `stdout` | +| Value | Default | Definition | +| ------------------ | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `partition` | `null` | Slurm partition, same as a job queue | +| `account` | `null` | Account if using [accounting](https://slurm.schedmd.com/accounting.html) | +| `exclusive` | `True` | The job has [exclusive](https://stackoverflow.com/questions/66817279/what-does-the-keyword-exclusive-mean-in-slurm) use the instances it runs on (no other job can take it) | +| `gpus_per_task` | `null` | Number of instances of GPUs per job | +| `gpus_per_node` | `8` | Number of GPUs to use per node. This is set to 8 GPUs as for th p4d.24xlarge | +| `mem` | `0` | Requested memory (all) | +| `job_name_prefix` | `"nemo-megatron-"` | Prefix for your job names | +| `gres` | `"gpu:8"` | Generic resource [scheduling](https://slurm.schedmd.com/gres.html) | +| `srun_args` | `"--no-container-mount-home"` | Arguments for the [srun](https://slurm.schedmd.com/srun.html) command (here for Pyxis) | +| `stderr_to_stdout` | `True` | Merge `stderr` and `stdout` | 2. Copy all the .yaml config files `{conf.template/ => launcher_scripts/conf/}` and substitute environment variables as follows: ```bash cp -Rv ${TEST_CASE_PATH}/conf.template/cluster ${TARGET_PATH}/launcher_scripts/conf/cluster -cp -Rv ${TEST_CASE_PATH}/conf.template/data_preparation ${TARGET_PATH}/launcher_scripts/conf/data_preparation envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml ``` @@ -114,13 +115,18 @@ This section assumes that you went through the previous sections and 1/ retrieve ```bash source ${TARGET_PATH}/.venv/bin/activate + +# Download tokenizer data (one-time activity) +mkdir -p $TARGET_PATH/bpe +curl -L https://huggingface.co/gpt2/raw/main/config.json > $TARGET_PATH/bpe/vocab.json +curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/bpe/merges.txt ``` Run pre-training as follows: ```bash # Choose one of these options: -# 1. edit then run step-02-pretrain-gpt3.sh, or +# 1. edit then run step-01-pretrain-gpt3.sh, or # 2. review, edit (if necessary), then run pretrain-gpt3-*.sh. # # Below show option 2. diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh index c5d3a7e2..b57d729b 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh @@ -15,4 +15,4 @@ export UNIQUE_OUTPUT_DIR=1 BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) # Node_count == 2 can work without full activations checkpointing. -$BIN_DIR/step-02-bmk-pretrain-gpt3.sh +$BIN_DIR/step-01-bmk-pretrain-gpt3.sh diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh index 1e2b9b41..efe96249 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh @@ -18,7 +18,7 @@ BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) # # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with # activations_checkpoint_num_layers=0. -$BIN_DIR/step-02-bmk-pretrain-gpt3.sh \ +$BIN_DIR/step-01-bmk-pretrain-gpt3.sh \ training.model.activations_checkpoint_granularity='full' \ training.model.activations_checkpoint_method='block' \ training.model.activations_checkpoint_num_layers=1 diff --git a/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh b/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh index cd794d14..5a3121ac 100755 --- a/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh +++ b/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh @@ -7,13 +7,15 @@ set -exuo pipefail +NEMO_LAUNCHER_VERSION=23.07 srun -N 1 \ - --container-mounts=/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05:/workspace/mount_dir \ - --container-image=/fsx/ubuntu/aws-nemo-megatron_23.05-py3.sqsh \ + --container-mounts=/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-$NEMO_LAUNCHER_VERSION:/workspace/mount_dir \ + --container-image=/fsx/ubuntu/aws-nemo-megatron_$NEMO_LAUNCHER_VERSION-py3.sqsh \ bash -c "cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/" -cd /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05/ +cd /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-$NEMO_LAUNCHER_VERSION/ /usr/bin/python3 -m venv .venv -source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.05/.venv/bin/activate -curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/23.05/requirements.txt +source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-$NEMO_LAUNCHER_VERSION/.venv/bin/activate +curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_LAUNCHER_VERSION/requirements.txt +pip3 install --upgrade pip setuptools pip3 install -r requirements.txt diff --git a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh index aca12ded..5f4a6705 100755 --- a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh @@ -11,8 +11,6 @@ set -exuo pipefail : "${RUNTIME:=4h}" : "${MAX_STEPS:=5}" : "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.07}" -CONT_DATA_DIR=${WORKSPACE_CONT}/data/the_pile_gpt3 -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe CONT_RESULT_DIR=${WORKSPACE_CONT}/results : "${UNIQUE_OUTPUT_DIR:=0}" @@ -42,11 +40,10 @@ declare -a BMK_ARGS=( training.model.data.data_prefix=[] ) -HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-23.07/launcher_scripts/main.py \ +# base_results_dir=${CONT_RESULT_DIR} \ +HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/nemo-launcher-23.07/launcher_scripts/main.py \ stages=[training] \ training=${MODEL}/${MODEL_SIZE} \ - data_dir=${CONT_DATA_DIR} \ - base_results_dir=${CONT_RESULT_DIR} \ training.trainer.num_nodes=$NUM_NODES \ training.trainer.max_steps=$MAX_STEPS \ training.trainer.val_check_interval=$MAX_STEPS \ From 658884fcd10be8ad57f15bd90b22b1fd1ef695be Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 09:03:08 -0500 Subject: [PATCH 047/648] Update python screen script from marcverd@ --- .../0.pytorch-screen.Dockerfile | 59 +++++++++++++ .../1.torch-screen.sbatch | 49 +++++++++++ .../1.pytorch-env-validation/README.md | 88 +++++++++++++++++++ .../pytorch-screen.py | 41 +++++++++ 4 files changed, 237 insertions(+) create mode 100644 4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile create mode 100644 4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch create mode 100644 4.validation_scripts/1.pytorch-env-validation/README.md create mode 100644 4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py diff --git a/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile b/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile new file mode 100644 index 00000000..424c2fc1 --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile @@ -0,0 +1,59 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +ARG AWS_REGION=us-west-2 + +FROM 763104351884.dkr.ecr.${AWS_REGION}.amazonaws.com/pytorch-training:2.0.1-gpu-py310-cu118-ubuntu20.04-ec2 + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y libpmix-dev libpmix2 + +# reinstall EFA, to restore the openmpi +ENV EFA_INSTALLER_VERSION=latest +RUN apt-get update && \ + cd /tmp && \ + rm -fr /opt/amazon/openmpi && \ + curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ + tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ + cd aws-efa-installer && \ + ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf && \ + ldconfig && \ + rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* && \ + /opt/amazon/efa/bin/fi_info --version + +# Repeat this from base Dockefile +# Install AWS OFI NCCL plug-in +ENV AWS_OFI_NCCL_VERSION=1.7.1 +RUN apt-get update && apt-get install -y autoconf +RUN mkdir /tmp/efa-ofi-nccl \ + && cd /tmp/efa-ofi-nccl \ + && git clone https://github.com/aws/aws-ofi-nccl.git -b v${AWS_OFI_NCCL_VERSION}-aws \ + && cd aws-ofi-nccl \ + && ./autogen.sh \ + && ./configure --with-libfabric=/opt/amazon/efa \ + --with-mpi=/opt/amazon/openmpi \ + --with-cuda=/opt/conda --prefix=/usr/local \ + && make \ + && make install \ + && rm -rf /tmp/efa-ofi-nccl \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +ENV LD_LIBRARY_PATH=/opt/amazon/efa/lib:$LD_LIBRARY_PATH +ENV PATH=/opt/amazon/efa/bin:$PATH + +RUN cd /opt && git clone https://github.com/NVIDIA/nccl-tests \ + && cd nccl-tests \ + && make MPI=1 MPI_HOME=/opt/amazon/openmpi + + +################################################# +## Install NVIDIA GDRCopy +RUN apt-get update && apt-get install -y check libsubunit0 libsubunit-dev pkg-config \ + && git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ + && cd /opt/gdrcopy \ + && CUDA=/opt/conda make lib_install \ + # Optional: tests tool. Need to point to the stub libcuda.so + # See: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2004/devel/Dockerfile#L68 + && LIBRARY_PATH=/opt/conda/lib/stubs CUDA=/opt/conda make exes_install diff --git a/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch b/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch new file mode 100644 index 00000000..a3147074 --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH -N 2 # number of nodes to run the scrip on, use 2 here +#SBATCH --job-name=megatron_gpt # name of your job +#SBATCH --ntasks-per-node 1 # Number of tasks per node, we need one here +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive +#SBATCH --wait-all-nodes=1 + +### Disable hyperthreading by setting the tasks per core to 1 +#SBATCH --ntasks-per-core=1 + +set -ex + +# Validate that mpirun does not need -x to propagate env vars defined in .sbatch script + +########################### +###### User Variables ##### +########################### + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${IMAGE:=$APPS_PATH/pytorch-screen.sqsh}" +: "${FSX_MOUNT:=/fsx:/fsx}" +: "${SCREEN_PT_SCRIPT_PATH:=$PWD}" + + +declare -a ARGS=( + --container-image $IMAGE + --container-mount-home + --container-mounts $FSX_MOUNT +) + +echo " +Hostname: $(hostname) +" + +env + +/usr/bin/time srun -l "${ARGS[@]}" --mpi=pmix bash -c " +which nvidia-smi +nvidia-smi +which python +python --version +python ${SCREEN_PT_SCRIPT_PATH}/screen-pytorch.py +" diff --git a/4.validation_scripts/1.pytorch-env-validation/README.md b/4.validation_scripts/1.pytorch-env-validation/README.md new file mode 100644 index 00000000..7dc20794 --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/README.md @@ -0,0 +1,88 @@ +# PyTorch Environment Validation + +This test runs a PyTorch script to screen for NCCL, MPI, OpenMP, CUDA.... on your environment. This script is executed once per instance and helps you verify your environment: The AWS [Deep Learning Container](https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html) is used for that purpose. + +Here you will: +- Build a container from the AWS [Deep Learning Container](https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html) and convert it to a squash file using [Enroot](https://github.com/NVIDIA/enroot). +- Run a Python script to screen the PyTorch environment with [Pyxis](https://github.com/NVIDIA/pyxis) via Slurm. +- Mount a local directory in the container via Pyxis. + +## 0. Preparation + +This guide assumes that you have the following: + +- A functional Slurm cluster on AWS. +- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- Enroot requires libmd to compile and squashfs-tools to execute. +- A shared directory mounted on `/apps` + +It is recommended that you use the templates in the architectures [directory](../../1.architectures) to deploy Slurm (for example AWS ParallelCluster). + + +## 1. Build the container and the squash file + +We use the AWS [Deep Learning Container](https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html) as a base for your validation container and the EFA libraries to use the latest versions. Here, you will start by building your container image then convert it to a squash file via Enroot. + +To build the container: + +1. Copy the file `0.pytorch-screenl.Dockerfile` or its content to your head-node. +2. Build the container image with the command below + ```bash + # get the region, this assumes we run on EC2 + AWS_AZ=$(ec2-metadata --availability-zone | cut -d' ' -f2) + AWS_REGION=${AWS_AZ::-1} + + # Authenticate with ECR to get the AWS Deep Learning Container + aws ecr get-login-password | docker login --username AWS \ + --password-stdin 763104351884.dkr.ecr.${AWS_REGION}.amazonaws.com/pytorch-training + + # Build the container + docker build -t pytorch-screenl -f 0.pytorch-screenl.Dockerfile --build-arg="AWS_REGION=${AWS_AZ::-1}" . + ``` +3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: + ``` + REPOSITORY TAG IMAGE ID CREATED SIZE + pytorch-screen latest 2892fe08195a 2 minutes ago 21.6GB + ... + 763104351884.dkr.ecr.ap-northeast-2.amazonaws.com/pytorch-training 2.0.1-gpu-py310-cu118-ubuntu20.04-ec2 3d25d3d0f25e 2 months ago 20.8GB + ... + ``` +3. Convert the container image to a squash file via Enroot + ```bash + enroot import -o /apps/pytorch-screen.sqsh dockerd://pytorch-screen:latest + ``` + The file will be stored in the `/apps` directory. + +> You can set versions and the branch for NCCL and EFA by editing the variables below in the Dockerfile. + +> | Variable | Default | +> |-----------------------|-------------| +> |`EFA_INSTALLER_VERSION`| `latest` | +> |`AWS_OFI_NCCL_VERSION` | `aws` | + +## 2. Running the Pytorch screening + +Now you copy the files `1.torch-screen.sbatch` and `pytorch-screen.py` to your cluster in the same directory then submit a test job with the command below from where the files are placed: + +```bash +sbatch 1.torch-screen.sbatch +``` + +An output file named `slurm-XX.out`, with `XX` being the job ID, will be placed in the directory. It will report the environment variables, location of `python`, `nvidia-smi` and PyTorch environment variables for each node (instance). Please keep in mind that each process, 1 per node, will write concurrently to the output file. Each process output is prepended by their ID `:0` for process 0, `:1` for process 1. These can be interleaved. Below is an example of output: + + +```bash +0: torch.backends.opt_einsum.strategy=None +0: torch.distributed.is_available()=True +0: torch.distributed.is_mpi_available()=True +0: torch.distributed.is_nccl_available()=True +1: torch.cuda.is_available()=True +1: torch.backends.cuda.is_built()=True +1: torch.backends.cuda.matmul.allow_tf32=False +1: torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction=True +1: torch.backends.cuda.cufft_plan_cache= +1: torch.backends.cuda.preferred_linalg_library(backend=None)=<_LinalgBackend.Default: 0> +1: torch.backends.cuda.flash_sdp_enabled()=True +``` + +> **Execute on X number nodes?**: to change the number of nodes modify the line `SBATCH -N 2` and change `2` to the desired number of nodes on which you'd like to run this script. diff --git a/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py b/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py new file mode 100644 index 00000000..52fdb68a --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py @@ -0,0 +1,41 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import torch + +try: + from rich.console import Console + + print = Console(force_terminal=True, force_jupyter=False).out +except ModuleNotFoundError: + pass + +print(f"{torch.cuda.is_available()=}") +print(f"{torch.backends.cuda.is_built()=}") +print(f"{torch.backends.cuda.matmul.allow_tf32=}") +print(f"{torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction=}") +print(f"{torch.backends.cuda.cufft_plan_cache=}") +print(f"{torch.backends.cuda.preferred_linalg_library(backend=None)=}") +print(f"{torch.backends.cuda.flash_sdp_enabled()=}") +print(f"{torch.backends.cuda.math_sdp_enabled()=}") + +print(f"{torch.backends.cudnn.version()=}") +print(f"{torch.backends.cudnn.is_available()=}") +print(f"{torch.backends.cudnn.enabled=}") +print(f"{torch.backends.cudnn.allow_tf32=}") +print(f"{torch.backends.cudnn.deterministic=}") +print(f"{torch.backends.cudnn.benchmark=}") +print(f"{torch.backends.cudnn.benchmark_limit=}") + +print(f"{torch.backends.mkl.is_available()=}") +print(f"{torch.backends.mkldnn.is_available()=}") + +print(f"{torch.backends.openmp.is_available()=}") +print(f"{torch.backends.opt_einsum.is_available()=}") +print(f"{torch.backends.opt_einsum.get_opt_einsum()=}") +print(f"{torch.backends.opt_einsum.enabled=}") +print(f"{torch.backends.opt_einsum.strategy=}") + +print(f"{torch.distributed.is_available()=}") +print(f"{torch.distributed.is_mpi_available()=}") +print(f"{torch.distributed.is_nccl_available()=}") From d513984857187571ee3bcb8355e2a9f17e524c16 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 09:03:08 -0500 Subject: [PATCH 048/648] Update python screen script from marcverd@ --- .../0.pytorch-screen.Dockerfile | 59 +++++++++++++ .../1.torch-screen.sbatch | 49 +++++++++++ .../1.pytorch-env-validation/README.md | 88 +++++++++++++++++++ .../pytorch-screen.py | 41 +++++++++ 4 files changed, 237 insertions(+) create mode 100644 4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile create mode 100644 4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch create mode 100644 4.validation_scripts/1.pytorch-env-validation/README.md create mode 100644 4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py diff --git a/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile b/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile new file mode 100644 index 00000000..424c2fc1 --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile @@ -0,0 +1,59 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +ARG AWS_REGION=us-west-2 + +FROM 763104351884.dkr.ecr.${AWS_REGION}.amazonaws.com/pytorch-training:2.0.1-gpu-py310-cu118-ubuntu20.04-ec2 + +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y libpmix-dev libpmix2 + +# reinstall EFA, to restore the openmpi +ENV EFA_INSTALLER_VERSION=latest +RUN apt-get update && \ + cd /tmp && \ + rm -fr /opt/amazon/openmpi && \ + curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ + tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ + cd aws-efa-installer && \ + ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf && \ + ldconfig && \ + rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* && \ + /opt/amazon/efa/bin/fi_info --version + +# Repeat this from base Dockefile +# Install AWS OFI NCCL plug-in +ENV AWS_OFI_NCCL_VERSION=1.7.1 +RUN apt-get update && apt-get install -y autoconf +RUN mkdir /tmp/efa-ofi-nccl \ + && cd /tmp/efa-ofi-nccl \ + && git clone https://github.com/aws/aws-ofi-nccl.git -b v${AWS_OFI_NCCL_VERSION}-aws \ + && cd aws-ofi-nccl \ + && ./autogen.sh \ + && ./configure --with-libfabric=/opt/amazon/efa \ + --with-mpi=/opt/amazon/openmpi \ + --with-cuda=/opt/conda --prefix=/usr/local \ + && make \ + && make install \ + && rm -rf /tmp/efa-ofi-nccl \ + && rm -rf /var/lib/apt/lists/* \ + && apt-get clean + +ENV LD_LIBRARY_PATH=/opt/amazon/efa/lib:$LD_LIBRARY_PATH +ENV PATH=/opt/amazon/efa/bin:$PATH + +RUN cd /opt && git clone https://github.com/NVIDIA/nccl-tests \ + && cd nccl-tests \ + && make MPI=1 MPI_HOME=/opt/amazon/openmpi + + +################################################# +## Install NVIDIA GDRCopy +RUN apt-get update && apt-get install -y check libsubunit0 libsubunit-dev pkg-config \ + && git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ + && cd /opt/gdrcopy \ + && CUDA=/opt/conda make lib_install \ + # Optional: tests tool. Need to point to the stub libcuda.so + # See: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2004/devel/Dockerfile#L68 + && LIBRARY_PATH=/opt/conda/lib/stubs CUDA=/opt/conda make exes_install diff --git a/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch b/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch new file mode 100644 index 00000000..a3147074 --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH -N 2 # number of nodes to run the scrip on, use 2 here +#SBATCH --job-name=megatron_gpt # name of your job +#SBATCH --ntasks-per-node 1 # Number of tasks per node, we need one here +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive +#SBATCH --wait-all-nodes=1 + +### Disable hyperthreading by setting the tasks per core to 1 +#SBATCH --ntasks-per-core=1 + +set -ex + +# Validate that mpirun does not need -x to propagate env vars defined in .sbatch script + +########################### +###### User Variables ##### +########################### + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${IMAGE:=$APPS_PATH/pytorch-screen.sqsh}" +: "${FSX_MOUNT:=/fsx:/fsx}" +: "${SCREEN_PT_SCRIPT_PATH:=$PWD}" + + +declare -a ARGS=( + --container-image $IMAGE + --container-mount-home + --container-mounts $FSX_MOUNT +) + +echo " +Hostname: $(hostname) +" + +env + +/usr/bin/time srun -l "${ARGS[@]}" --mpi=pmix bash -c " +which nvidia-smi +nvidia-smi +which python +python --version +python ${SCREEN_PT_SCRIPT_PATH}/screen-pytorch.py +" diff --git a/4.validation_scripts/1.pytorch-env-validation/README.md b/4.validation_scripts/1.pytorch-env-validation/README.md new file mode 100644 index 00000000..7dc20794 --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/README.md @@ -0,0 +1,88 @@ +# PyTorch Environment Validation + +This test runs a PyTorch script to screen for NCCL, MPI, OpenMP, CUDA.... on your environment. This script is executed once per instance and helps you verify your environment: The AWS [Deep Learning Container](https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html) is used for that purpose. + +Here you will: +- Build a container from the AWS [Deep Learning Container](https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html) and convert it to a squash file using [Enroot](https://github.com/NVIDIA/enroot). +- Run a Python script to screen the PyTorch environment with [Pyxis](https://github.com/NVIDIA/pyxis) via Slurm. +- Mount a local directory in the container via Pyxis. + +## 0. Preparation + +This guide assumes that you have the following: + +- A functional Slurm cluster on AWS. +- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- Enroot requires libmd to compile and squashfs-tools to execute. +- A shared directory mounted on `/apps` + +It is recommended that you use the templates in the architectures [directory](../../1.architectures) to deploy Slurm (for example AWS ParallelCluster). + + +## 1. Build the container and the squash file + +We use the AWS [Deep Learning Container](https://docs.aws.amazon.com/deep-learning-containers/latest/devguide/deep-learning-containers-images.html) as a base for your validation container and the EFA libraries to use the latest versions. Here, you will start by building your container image then convert it to a squash file via Enroot. + +To build the container: + +1. Copy the file `0.pytorch-screenl.Dockerfile` or its content to your head-node. +2. Build the container image with the command below + ```bash + # get the region, this assumes we run on EC2 + AWS_AZ=$(ec2-metadata --availability-zone | cut -d' ' -f2) + AWS_REGION=${AWS_AZ::-1} + + # Authenticate with ECR to get the AWS Deep Learning Container + aws ecr get-login-password | docker login --username AWS \ + --password-stdin 763104351884.dkr.ecr.${AWS_REGION}.amazonaws.com/pytorch-training + + # Build the container + docker build -t pytorch-screenl -f 0.pytorch-screenl.Dockerfile --build-arg="AWS_REGION=${AWS_AZ::-1}" . + ``` +3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: + ``` + REPOSITORY TAG IMAGE ID CREATED SIZE + pytorch-screen latest 2892fe08195a 2 minutes ago 21.6GB + ... + 763104351884.dkr.ecr.ap-northeast-2.amazonaws.com/pytorch-training 2.0.1-gpu-py310-cu118-ubuntu20.04-ec2 3d25d3d0f25e 2 months ago 20.8GB + ... + ``` +3. Convert the container image to a squash file via Enroot + ```bash + enroot import -o /apps/pytorch-screen.sqsh dockerd://pytorch-screen:latest + ``` + The file will be stored in the `/apps` directory. + +> You can set versions and the branch for NCCL and EFA by editing the variables below in the Dockerfile. + +> | Variable | Default | +> |-----------------------|-------------| +> |`EFA_INSTALLER_VERSION`| `latest` | +> |`AWS_OFI_NCCL_VERSION` | `aws` | + +## 2. Running the Pytorch screening + +Now you copy the files `1.torch-screen.sbatch` and `pytorch-screen.py` to your cluster in the same directory then submit a test job with the command below from where the files are placed: + +```bash +sbatch 1.torch-screen.sbatch +``` + +An output file named `slurm-XX.out`, with `XX` being the job ID, will be placed in the directory. It will report the environment variables, location of `python`, `nvidia-smi` and PyTorch environment variables for each node (instance). Please keep in mind that each process, 1 per node, will write concurrently to the output file. Each process output is prepended by their ID `:0` for process 0, `:1` for process 1. These can be interleaved. Below is an example of output: + + +```bash +0: torch.backends.opt_einsum.strategy=None +0: torch.distributed.is_available()=True +0: torch.distributed.is_mpi_available()=True +0: torch.distributed.is_nccl_available()=True +1: torch.cuda.is_available()=True +1: torch.backends.cuda.is_built()=True +1: torch.backends.cuda.matmul.allow_tf32=False +1: torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction=True +1: torch.backends.cuda.cufft_plan_cache= +1: torch.backends.cuda.preferred_linalg_library(backend=None)=<_LinalgBackend.Default: 0> +1: torch.backends.cuda.flash_sdp_enabled()=True +``` + +> **Execute on X number nodes?**: to change the number of nodes modify the line `SBATCH -N 2` and change `2` to the desired number of nodes on which you'd like to run this script. diff --git a/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py b/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py new file mode 100644 index 00000000..52fdb68a --- /dev/null +++ b/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py @@ -0,0 +1,41 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import torch + +try: + from rich.console import Console + + print = Console(force_terminal=True, force_jupyter=False).out +except ModuleNotFoundError: + pass + +print(f"{torch.cuda.is_available()=}") +print(f"{torch.backends.cuda.is_built()=}") +print(f"{torch.backends.cuda.matmul.allow_tf32=}") +print(f"{torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction=}") +print(f"{torch.backends.cuda.cufft_plan_cache=}") +print(f"{torch.backends.cuda.preferred_linalg_library(backend=None)=}") +print(f"{torch.backends.cuda.flash_sdp_enabled()=}") +print(f"{torch.backends.cuda.math_sdp_enabled()=}") + +print(f"{torch.backends.cudnn.version()=}") +print(f"{torch.backends.cudnn.is_available()=}") +print(f"{torch.backends.cudnn.enabled=}") +print(f"{torch.backends.cudnn.allow_tf32=}") +print(f"{torch.backends.cudnn.deterministic=}") +print(f"{torch.backends.cudnn.benchmark=}") +print(f"{torch.backends.cudnn.benchmark_limit=}") + +print(f"{torch.backends.mkl.is_available()=}") +print(f"{torch.backends.mkldnn.is_available()=}") + +print(f"{torch.backends.openmp.is_available()=}") +print(f"{torch.backends.opt_einsum.is_available()=}") +print(f"{torch.backends.opt_einsum.get_opt_einsum()=}") +print(f"{torch.backends.opt_einsum.enabled=}") +print(f"{torch.backends.opt_einsum.strategy=}") + +print(f"{torch.distributed.is_available()=}") +print(f"{torch.distributed.is_mpi_available()=}") +print(f"{torch.distributed.is_nccl_available()=}") From 7e2b7df1925f78d5629969417eacf372da55221a Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 09:03:33 -0500 Subject: [PATCH 049/648] Update NCCL test sbatch files --- .../0.nccl-tests/2.nccl-3collectives.sbatch | 2 -- 4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch | 6 +----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch index 63e418c7..14b77540 100644 --- a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch +++ b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch @@ -40,14 +40,12 @@ declare -a ARGS=( --container-image $IMAGE ) - echo " Hostname: $(hostname) " env - echo " ################################################################################ # $NCCL_TEST_PATH/all_reduce_perf diff --git a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch index 1f29c6c3..2d2b1954 100644 --- a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch +++ b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch @@ -41,14 +41,10 @@ export FI_EFA_ENABLE_SHM_TRANSFER=1 export NCCL_ASYNC_ERROR_HANDLING=1 export NCCL_DEBUG=INFO - declare -a ARGS=( --container-image $IMAGE ) - -export HELLO_WORLD="${1-undefined}" - echo " Hostname: $(hostname) " @@ -59,7 +55,7 @@ echo " ######################################## # srun ########################################" -srun bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' +srun -l "${ARGS[@]}" --mpi=pmix bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' echo " ######################################## From f8fe6d87fc7862f1eba3fd8bec2d7ef703ec4d21 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 09:03:33 -0500 Subject: [PATCH 050/648] Update NCCL test sbatch files --- .../0.nccl-tests/2.nccl-3collectives.sbatch | 2 -- 4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch | 6 +----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch index 63e418c7..14b77540 100644 --- a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch +++ b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch @@ -40,14 +40,12 @@ declare -a ARGS=( --container-image $IMAGE ) - echo " Hostname: $(hostname) " env - echo " ################################################################################ # $NCCL_TEST_PATH/all_reduce_perf diff --git a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch index 1f29c6c3..2d2b1954 100644 --- a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch +++ b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch @@ -41,14 +41,10 @@ export FI_EFA_ENABLE_SHM_TRANSFER=1 export NCCL_ASYNC_ERROR_HANDLING=1 export NCCL_DEBUG=INFO - declare -a ARGS=( --container-image $IMAGE ) - -export HELLO_WORLD="${1-undefined}" - echo " Hostname: $(hostname) " @@ -59,7 +55,7 @@ echo " ######################################## # srun ########################################" -srun bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' +srun -l "${ARGS[@]}" --mpi=pmix bash -c 'hostname ; env | egrep "^NCCL|^FI|^HELLO" | sed "s/^/`hostname`: /g"' echo " ######################################## From 3f99cd7b684a13a0df085bca87f08903fd1218e4 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 09:23:36 -0500 Subject: [PATCH 051/648] Fix documentation for NGC image name --- 3.test_cases/2.nemo-launcher/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index db22fe2d..eed9ea24 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -11,7 +11,7 @@ Table of contents: The following pre-requisites are needed to run this example: -- You have access to the base image [`bignlp-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). +- You have access to the base image [`nemofw-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). - Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) You will need to setup the following environment variables before running the scripts. : From 106a0a376216294bed68eef616ebe900b0c0d9b0 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 09:26:28 -0500 Subject: [PATCH 052/648] Update test case path for Nemo --- 3.test_cases/2.nemo-launcher/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index eed9ea24..59592928 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -20,8 +20,8 @@ You will need to setup the following environment variables before running the sc export NEMO_VERSION=23.07 export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION-py3 -export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem -export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher-23.07 # where you copy the test case or set to your test case path +export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem +export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher # where you copy the test case or set to your test case path export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH From 36da43d237465b87958d1626919b009a347c192e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 31 Aug 2023 10:22:57 -0500 Subject: [PATCH 053/648] Change head node root volume size from 100GB to 500GB --- .../2.aws-parallelcluster/distributed-training-p4de-base.yaml | 2 +- ...distributed-training-p4de_batch-inference-g5_custom_ami.yaml | 2 +- .../distributed-training-p4de_custom_ami.yaml | 2 +- .../distributed-training-p4de_postinstall_scripts.yaml | 2 +- .../distributed-training-trn1_custom_ami.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index 18403483..1da1b0b4 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -10,7 +10,7 @@ HeadNode: KeyName: PLACEHOLDER_SSH_KEY LocalStorage: RootVolume: - Size: 100 + Size: 500 DeleteOnTermination: true # that's your root and /home volume for users Iam: AdditionalIamPolicies: # grant ECR, SSM and S3 read access diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index 5e859e70..4624f2db 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -13,7 +13,7 @@ HeadNode: KeyName: PLACEHOLDER_SSH_KEY LocalStorage: RootVolume: - Size: 100 + Size: 500 DeleteOnTermination: true # that's your root and /home volume for users Iam: AdditionalIamPolicies: # grant ECR, SSM and S3 read access diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index 3134573a..3950a92f 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -11,7 +11,7 @@ HeadNode: KeyName: PLACEHOLDER_SSH_KEY LocalStorage: RootVolume: - Size: 100 + Size: 500 DeleteOnTermination: true # that's your root and /home volume for users Iam: AdditionalIamPolicies: # grant ECR, SSM and S3 read access diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 82014d55..0dce7d2e 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -10,7 +10,7 @@ HeadNode: KeyName: PLACEHOLDER_SSH_KEY LocalStorage: RootVolume: - Size: 100 + Size: 500 DeleteOnTermination: true # that's your root and /home volume for users Iam: AdditionalIamPolicies: # grant ECR, SSM and S3 read access diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index 72f8a882..cbf2ebcf 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -11,7 +11,7 @@ HeadNode: KeyName: PLACEHOLDER_SSH_KEY LocalStorage: RootVolume: - Size: 100 + Size: 500 DeleteOnTermination: true # that's your root and /home volume for users Iam: AdditionalIamPolicies: # grant ECR, SSM and S3 read access From 9dbc351f5aea34fe2d808cbf9dbb88d614547e4e Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 1 Sep 2023 01:59:11 +0000 Subject: [PATCH 054/648] Update README.md --- 3.test_cases/2.nemo-launcher/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 59592928..ce1cc927 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -118,7 +118,7 @@ source ${TARGET_PATH}/.venv/bin/activate # Download tokenizer data (one-time activity) mkdir -p $TARGET_PATH/bpe -curl -L https://huggingface.co/gpt2/raw/main/config.json > $TARGET_PATH/bpe/vocab.json +curl -L https://huggingface.co/gpt2/raw/main/vocab.json > $TARGET_PATH/bpe/vocab.json curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/bpe/merges.txt ``` @@ -130,5 +130,5 @@ Run pre-training as follows: # 2. review, edit (if necessary), then run pretrain-gpt3-*.sh. # # Below show option 2. -./pretrain-gpt3-126m2.sh +./bmk-pretrain-gpt3-126m2.sh ``` From f24c92001534ab77566ae7de11d2cdd18b90a6b2 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 5 Sep 2023 02:32:56 +0000 Subject: [PATCH 055/648] Fix nemo-megatron paths --- 3.test_cases/2.nemo-launcher/README.md | 3 +- .../bmk-pretrain-gpt3-126m2.sh | 2 +- .../2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh | 2 +- .../step-01-bmk-pretrain-gpt3.sh | 31 ++++++++++--------- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index ce1cc927..76e4d0b7 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -130,5 +130,6 @@ Run pre-training as follows: # 2. review, edit (if necessary), then run pretrain-gpt3-*.sh. # # Below show option 2. -./bmk-pretrain-gpt3-126m2.sh +cd $TARGET_PATH +$TEST_CASE_PATH/bmk-pretrain-gpt3-126m2.sh ``` diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh index b57d729b..0918e494 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh @@ -10,7 +10,7 @@ export MODEL_SIZE=126m export NUM_NODES=2 export RUNTIME=30m export MAX_STEPS=40 -export UNIQUE_OUTPUT_DIR=1 +#export UNIQUE_OUTPUT_DIR=1 BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh index efe96249..e6edae2a 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh @@ -9,7 +9,7 @@ export NUM_NODES=2 export RUNTIME=30m export MAX_STEPS=20 -export UNIQUE_OUTPUT_DIR=1 +#export UNIQUE_OUTPUT_DIR=1 BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) diff --git a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh index 5f4a6705..98b827d9 100755 --- a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh @@ -10,23 +10,11 @@ set -exuo pipefail : "${NUM_NODES:=8}" : "${RUNTIME:=4h}" : "${MAX_STEPS:=5}" -: "${WORKSPACE_CONT:=/fsx/ubuntu/nemo-megatron-23.07}" +: "${WORKSPACE_CONT:=/fsx/nemo-launcher-23.07}" CONT_RESULT_DIR=${WORKSPACE_CONT}/results : "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then - # For debugging: each run has its own output dir. - TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) - CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} -fi - -echo " -#################### -This run will write to directory ${CONT_RESULT_DIR} -#################### -" - declare -a BMK_ARGS=( # Disable validation, as we're only interested to measure the training time. training.trainer.limit_val_batches=0.0 @@ -40,8 +28,21 @@ declare -a BMK_ARGS=( training.model.data.data_prefix=[] ) -# base_results_dir=${CONT_RESULT_DIR} \ -HYDRA_FULL_ERROR=1 python3 /fsx/ubuntu/nemo-launcher-23.07/launcher_scripts/main.py \ +if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + +HYDRA_FULL_ERROR=1 python3 /fsx/nemo-launcher-23.07/launcher_scripts/main.py \ stages=[training] \ training=${MODEL}/${MODEL_SIZE} \ training.trainer.num_nodes=$NUM_NODES \ From cc7e8e5c77855a8076813b52891012e97237143b Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 5 Sep 2023 17:41:58 +0800 Subject: [PATCH 056/648] Incorporate code review to documentations --- ...ap-launcher.sh => 0.bootstrap-launcher.sh} | 0 ...retrain-gpt3.sh => 1.bmk-pretrain-gpt3.sh} | 0 3.test_cases/2.nemo-launcher/README.md | 76 ++++++++++++++++--- .../bmk-pretrain-gpt3-126m2.sh | 2 +- .../2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh | 2 +- 5 files changed, 66 insertions(+), 14 deletions(-) rename 3.test_cases/2.nemo-launcher/{step-00-bootstrap-launcher.sh => 0.bootstrap-launcher.sh} (100%) rename 3.test_cases/2.nemo-launcher/{step-01-bmk-pretrain-gpt3.sh => 1.bmk-pretrain-gpt3.sh} (100%) diff --git a/3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh b/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh similarity index 100% rename from 3.test_cases/2.nemo-launcher/step-00-bootstrap-launcher.sh rename to 3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh diff --git a/3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh similarity index 100% rename from 3.test_cases/2.nemo-launcher/step-01-bmk-pretrain-gpt3.sh rename to 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 76e4d0b7..d7ba2e57 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -5,7 +5,9 @@ Table of contents: - [1. Pre-requisites](#1-pre-requisites) - [2. Build AWS-optimized Nemo-Launcher image](#2-build-aws-optimized-nemo-launcher-image) - [3. Set-up the NemoMegatron environment](#3-set-up-the-nemomegatron-environment) -- [4. Pre-training GPT3](#4-pre-training-gpt3) +- [4. Download vocabularies](#4-download-vocabularies) +- [5. Pre-training GPT3](#5-pre-training-gpt3) +- [6. Customizing Pre-Training](#6-customizing-pre-training) ## 1. Pre-requisites @@ -109,27 +111,77 @@ cp -Rv ${TEST_CASE_PATH}/conf.template/cluster ${TARGET_PATH}/launcher_scripts/c envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml ``` -## 4. Pre-training GPT3 +## 4. Download vocabularies -This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment. To start, source the NemoMegatron environment: +The pre-training process we're going to run uses the [GPT2](https://huggingface.co/gpt2) tokenizer which requires you to download the vocabularies files: ```bash -source ${TARGET_PATH}/.venv/bin/activate - -# Download tokenizer data (one-time activity) mkdir -p $TARGET_PATH/bpe curl -L https://huggingface.co/gpt2/raw/main/vocab.json > $TARGET_PATH/bpe/vocab.json curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/bpe/merges.txt ``` -Run pre-training as follows: +## 5. Pre-training GPT3 + +This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. + +To start pre-training, source the NemoMegatron environment: + +```bash +source ${TARGET_PATH}/.venv/bin/activate +``` + +To pre-train a GPT3-126m on two instances with mock dataset, run the following: ```bash -# Choose one of these options: -# 1. edit then run step-01-pretrain-gpt3.sh, or -# 2. review, edit (if necessary), then run pretrain-gpt3-*.sh. -# -# Below show option 2. cd $TARGET_PATH $TEST_CASE_PATH/bmk-pretrain-gpt3-126m2.sh ``` + +which results in this execution tree: + +```bash +$TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh +\_ $TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh + \_ $TARGET_PATH/launcher_scripts/main.py + \_ sbatch +``` + +As can be seen, Nemo-launcher `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submit it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. + +```bash +$TARGET_PATH/results/gpt3_126m +├── gpt3_126m_hydra.yaml # The fully interpolated pre-training configuration +├── launcher_cmd.log # The full invocation command of launcher_scripts/main.py +├── launcher.log # Job id produced by the sbatch command +├── log-nemo-megatron-gpt3_126m_.out # Stdout of the pre-training Slurm job +├── nemo-megatron-gpt3_126m_submission.sh # .sbatch file generated and submitted by nemo-launcher +└── results + ├── cmd-args.log # The full invocation command of the pre-training script + ├── events.out.tfevents.* # Tensorboard logs + ├── git-info.log # The commit hash of the NeMO repo provided in the container. + ├── hparams.yaml # Pre-training hyperparameters + ├── lightning_logs.txt # Additional logs from PyTorch-Lightning + ├── nemo_error_log.txt # Stderr of pre-training step + └── nemo_log_globalrank-*.txt # Log of each rank +``` + +Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, edit `TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` and uncomment the `#export UNIQUE_OUTPUT_DIR=1` line to produce this output dir instead: + +```bash +$TARGET_PATH/results-/gpt3_126m/ +├── gpt3_126m_hydra.yaml +├── ... +└── results + ├── cmd-args.log + ├── ... + └── nemo_log_globalrank-*.txt\ +``` + +Congratulations! You've successfully run this test case to completion. + +## 6. Customizing Pre-Training + +The `$TEST_CASE_PATH` comes with `bmk-pretrain-gpt3-126m2.sh` and `bmk-pretrain-gpt3-5b2.sh` to pre-train 126m and 5b models, respectively, on two instances. + +To pre-train a different model size on different instance count, create your own `bmk-pretrain-gpt3-.sh` based on those examples. Please that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh index 0918e494..043c8d7d 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh @@ -15,4 +15,4 @@ export MAX_STEPS=40 BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) # Node_count == 2 can work without full activations checkpointing. -$BIN_DIR/step-01-bmk-pretrain-gpt3.sh +$BIN_DIR/1.bmk-pretrain-gpt3.sh diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh index e6edae2a..d164dbc8 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh @@ -18,7 +18,7 @@ BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) # # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with # activations_checkpoint_num_layers=0. -$BIN_DIR/step-01-bmk-pretrain-gpt3.sh \ +$BIN_DIR/1.bmk-pretrain-gpt3.sh \ training.model.activations_checkpoint_granularity='full' \ training.model.activations_checkpoint_method='block' \ training.model.activations_checkpoint_num_layers=1 From 1efdaf80438bfe20637331b4c9a47af5be5cba77 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 5 Sep 2023 17:54:35 +0800 Subject: [PATCH 057/648] Update paths in nemo-launcher bootstrap scripts --- .../2.nemo-launcher/0.bootstrap-launcher.sh | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh b/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh index 5a3121ac..ffe4b879 100755 --- a/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh +++ b/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh @@ -7,15 +7,20 @@ set -exuo pipefail -NEMO_LAUNCHER_VERSION=23.07 +: "${NEMO_VERSION:=23.07}" +: "${REPO:=aws-nemo-megatron}" +: "${TAG:=$NEMO_VERSION-py3}" +: "${ENROOT_IMAGE:=/apps/${REPO}_${TAG}.sqsh}" +: "${TARGET_PATH:=/fsx/nemo-launcher-$NEMO_VERSION}" # must be a shared filesystem + srun -N 1 \ - --container-mounts=/fsx/ubuntu/sample-slurm-jobs/nemo-launcher-$NEMO_LAUNCHER_VERSION:/workspace/mount_dir \ - --container-image=/fsx/ubuntu/aws-nemo-megatron_$NEMO_LAUNCHER_VERSION-py3.sqsh \ + --container-mounts=$TARGET_PATH:/workspace/mount_dir \ + --container-image=$ENROOT_IMAGE \ bash -c "cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/" -cd /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-$NEMO_LAUNCHER_VERSION/ -/usr/bin/python3 -m venv .venv -source /fsx/ubuntu/sample-slurm-jobs/nemo-launcher-$NEMO_LAUNCHER_VERSION/.venv/bin/activate -curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_LAUNCHER_VERSION/requirements.txt -pip3 install --upgrade pip setuptools -pip3 install -r requirements.txt +cd $TARGET_PATH +/usr/bin/python3.8 -m venv .venv +source $TARGET_PATH/.venv/bin/activate +curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_VERSION/requirements.txt +pip3.8 install --upgrade pip setuptools +pip3.8 install -r requirements.txt From f6d73098d1388cac32f4496be40b4aa0201e1e05 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 6 Sep 2023 03:12:52 +0000 Subject: [PATCH 058/648] nemo-launcher: fix job error due to missing input dir --- 3.test_cases/2.nemo-launcher/README.md | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index d7ba2e57..bca4da1c 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -111,9 +111,9 @@ cp -Rv ${TEST_CASE_PATH}/conf.template/cluster ${TARGET_PATH}/launcher_scripts/c envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml ``` -## 4. Download vocabularies +## 4. Prepare Input Data -The pre-training process we're going to run uses the [GPT2](https://huggingface.co/gpt2) tokenizer which requires you to download the vocabularies files: +The pre-training we're going to run uses the [GPT2](https://huggingface.co/gpt2) tokenizer which requires you to download the vocabularies files: ```bash mkdir -p $TARGET_PATH/bpe @@ -121,6 +121,12 @@ curl -L https://huggingface.co/gpt2/raw/main/vocab.json > $TARGET_PATH/bpe/vocab curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/bpe/merges.txt ``` +We also create an input directory but leave it empty for our pre-training which uses a mock dataset generated on-the-fly. + +```bash + mkdir -p $TARGET_PATH/data +``` + ## 5. Pre-training GPT3 This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. @@ -185,3 +191,15 @@ Congratulations! You've successfully run this test case to completion. The `$TEST_CASE_PATH` comes with `bmk-pretrain-gpt3-126m2.sh` and `bmk-pretrain-gpt3-5b2.sh` to pre-train 126m and 5b models, respectively, on two instances. To pre-train a different model size on different instance count, create your own `bmk-pretrain-gpt3-.sh` based on those examples. Please that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). + +At the very least, you'd want to review and customize one or more YAML files under `$TARGET_PATH/launcher_scripts/conf/`. Nemo-launcher organizes its config files in an opinionated hierarchy. Below is an example of relevant YAML files when launching `$TARGET_PATH/launcher_scripts/main.py` for `training` stage for `gpt3/126m` (see `$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh`). + +```bash +$TARGET_PATH/launcher_scripts/conf +├── config.yaml # Config for generating job scripts (.sbatch, .yaml, etc.) +├── cluster +│  └── bcm.yaml # Config for Slurm jobs +└── training # Config for stage "training" + └── gpt3 # Config for model "gpt3" +    └── 126m.yaml # Config for model size "126m" +``` From f755c64621a12dd5f787541ba20ac498064cb92b Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 6 Sep 2023 15:49:56 -0700 Subject: [PATCH 059/648] Remove extra space in command for Nemo --- 3.test_cases/2.nemo-launcher/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index bca4da1c..064188cb 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -124,7 +124,7 @@ curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/bpe/merge We also create an input directory but leave it empty for our pre-training which uses a mock dataset generated on-the-fly. ```bash - mkdir -p $TARGET_PATH/data +mkdir -p $TARGET_PATH/data ``` ## 5. Pre-training GPT3 From e717b01a9ba856f75bf7408e4510979c19594d17 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 7 Sep 2023 17:06:26 +0000 Subject: [PATCH 060/648] Delete deploy.sh for VPC --- 1.architectures/1.vpc_network/deploy.sh | 6 ------ 1 file changed, 6 deletions(-) delete mode 100755 1.architectures/1.vpc_network/deploy.sh diff --git a/1.architectures/1.vpc_network/deploy.sh b/1.architectures/1.vpc_network/deploy.sh deleted file mode 100755 index 6973efbc..00000000 --- a/1.architectures/1.vpc_network/deploy.sh +++ /dev/null @@ -1,6 +0,0 @@ -aws cloudformation create-stack --stack-name vpc-stack-ml\ - --template-body file://2.vpc-one-az.yaml \ - --parameters ParameterKey=SubnetsAZ,ParameterValue=us-west-2a \ - ParameterKey=VPCName,ParameterValue="ML HPC VPC" \ - --capabilities CAPABILITY_IAM - From 2ddc78e3e89f01c64e4bbf7803f90c8073eb5482 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 11 Sep 2023 11:15:36 +0800 Subject: [PATCH 061/648] nemo-launcher: fix missing tokenizer dir --- 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh | 7 ++++++- 3.test_cases/2.nemo-launcher/README.md | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh index 98b827d9..d9330987 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh @@ -10,8 +10,10 @@ set -exuo pipefail : "${NUM_NODES:=8}" : "${RUNTIME:=4h}" : "${MAX_STEPS:=5}" -: "${WORKSPACE_CONT:=/fsx/nemo-launcher-23.07}" +: "${TARGET_PATH:=/fsx/nemo-launcher-23.07}" +WORKSPACE_CONT=$TARGET_PATH CONT_RESULT_DIR=${WORKSPACE_CONT}/results +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe : "${UNIQUE_OUTPUT_DIR:=0}" @@ -26,6 +28,9 @@ declare -a BMK_ARGS=( # https://github.com/NVIDIA/NeMo/pull/6181/files training.model.data.data_impl=mock training.model.data.data_prefix=[] + training.model.tokenizer.vocab_file=${CONT_TOKENIZER_DIR}/vocab.json + training.model.tokenizer.merge_file=${CONT_TOKENIZER_DIR}/merges.txt + ) if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 064188cb..70fbff59 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -5,7 +5,7 @@ Table of contents: - [1. Pre-requisites](#1-pre-requisites) - [2. Build AWS-optimized Nemo-Launcher image](#2-build-aws-optimized-nemo-launcher-image) - [3. Set-up the NemoMegatron environment](#3-set-up-the-nemomegatron-environment) -- [4. Download vocabularies](#4-download-vocabularies) +- [4. Prepare Input Data](#4-prepare-input-data) - [5. Pre-training GPT3](#5-pre-training-gpt3) - [6. Customizing Pre-Training](#6-customizing-pre-training) @@ -116,9 +116,9 @@ envsubst < ${TEST_CASE_PATH}/conf.template/config.yaml > ${TARGET_PATH}/launcher The pre-training we're going to run uses the [GPT2](https://huggingface.co/gpt2) tokenizer which requires you to download the vocabularies files: ```bash -mkdir -p $TARGET_PATH/bpe -curl -L https://huggingface.co/gpt2/raw/main/vocab.json > $TARGET_PATH/bpe/vocab.json -curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/bpe/merges.txt +mkdir -p $TARGET_PATH/data/bpe +curl -L https://huggingface.co/gpt2/raw/main/vocab.json > $TARGET_PATH/data/bpe/vocab.json +curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/data/bpe/merges.txt ``` We also create an input directory but leave it empty for our pre-training which uses a mock dataset generated on-the-fly. From d97d1bad00c1a883aea431ad1ca0e5e7e79e27a4 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 11 Sep 2023 12:03:33 +0800 Subject: [PATCH 062/648] nemo-launcher: simplify tokenizer fix in 7d1b2fa --- 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh | 3 --- 3.test_cases/2.nemo-launcher/README.md | 8 ++------ 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh index d9330987..bbf4678a 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh @@ -28,9 +28,6 @@ declare -a BMK_ARGS=( # https://github.com/NVIDIA/NeMo/pull/6181/files training.model.data.data_impl=mock training.model.data.data_prefix=[] - training.model.tokenizer.vocab_file=${CONT_TOKENIZER_DIR}/vocab.json - training.model.tokenizer.merge_file=${CONT_TOKENIZER_DIR}/merges.txt - ) if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 70fbff59..7652ae19 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -121,11 +121,7 @@ curl -L https://huggingface.co/gpt2/raw/main/vocab.json > $TARGET_PATH/data/bpe/ curl -L https://huggingface.co/gpt2/raw/main/merges.txt > $TARGET_PATH/data/bpe/merges.txt ``` -We also create an input directory but leave it empty for our pre-training which uses a mock dataset generated on-the-fly. - -```bash -mkdir -p $TARGET_PATH/data -``` +That's all needed to pre-train with a mock dataset generated on-the-fly. ## 5. Pre-training GPT3 @@ -153,7 +149,7 @@ $TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh \_ sbatch ``` -As can be seen, Nemo-launcher `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submit it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. +As can be seen, Nemo-launcher `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submits it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. ```bash $TARGET_PATH/results/gpt3_126m From 387b676342abb89b17eec073f19d30d4b6d789f3 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 11 Sep 2023 11:14:49 -0700 Subject: [PATCH 063/648] Corrections for vpc module --- .../{1.vpc-all-az.yaml => 1.vpc-multi-az.yaml} | 0 1.architectures/1.vpc_network/README.md | 10 +++++----- 1.architectures/1.vpc_network/status.sh | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) rename 1.architectures/1.vpc_network/{1.vpc-all-az.yaml => 1.vpc-multi-az.yaml} (100%) create mode 100755 1.architectures/1.vpc_network/status.sh diff --git a/1.architectures/1.vpc_network/1.vpc-all-az.yaml b/1.architectures/1.vpc_network/1.vpc-multi-az.yaml similarity index 100% rename from 1.architectures/1.vpc_network/1.vpc-all-az.yaml rename to 1.architectures/1.vpc_network/1.vpc-multi-az.yaml diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index 3ec077e1..8a25dea3 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -6,11 +6,11 @@ You will find here a collection of cloudformation templates to deploy a VPC with The architectures each deploy a VPC, public and private subnets, gateways and endpoints. You can deploy them through the AWS Console or AWS CLI. -### 1. Template VPC All AZs +### 1. Template VPC Multiple AZs -This template deploys a VPC with private subnets in all Availability zones. Public subnets can be optionally created in every AZ (done by default). This template serves most use cases. +This template deploys a VPC with private subnets in multiple Availability zones. Public subnets can be optionally created in every AZ (done by default). This template serves most use cases. -- **Template file**: [`1.vpc-all-az.yaml`](./1.vpc-all-az.yaml) +- **Template file**: [`1.vpc-multi-az.yaml`](./1.vpc-multi-az.yaml) #### List of Parameters @@ -30,12 +30,12 @@ Please note that the deployment of public subnets is optional. #### Deploy with the AWS CLI -The command to deploy the template through the CLI is shown below. Feel free to edit for your own configuration and parameters. +The command to deploy the template through the CLI is shown below. Feel free to edit for your own configuration and parameters. Please ensure that the `NumberOfAZs` parameter value matches the number of AZs that you specified. ```bash aws cloudformation create-stack --stack-name vpc-stack-ml\ - --template-body file://1.vpc-all-az.yaml \ + --template-body file://1.vpc-multi-az.yaml \ --parameters ParameterKey=AvailabilityZones,ParameterValue=us-east-1a\\,us-east-1b\\,us-east-1c\\,us-east-1d\\,us-east-1e\\,us-east-1f\ ParameterKey=NumberOfAZs,ParameterValue=6 \ ParameterKey=VPCName,ParameterValue="ML HPC VPC" \ diff --git a/1.architectures/1.vpc_network/status.sh b/1.architectures/1.vpc_network/status.sh new file mode 100755 index 00000000..6b8ea0b9 --- /dev/null +++ b/1.architectures/1.vpc_network/status.sh @@ -0,0 +1,3 @@ +#!/bin/bash +aws cloudformation describe-stacks --stack-name vpc-stack-ml | jq -r .Stacks[].StackStatus + From d76682dc8dfa326f27d57f9f950017deddb0ade5 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 11 Sep 2023 11:14:49 -0700 Subject: [PATCH 064/648] Corrections for vpc module --- .../{1.vpc-all-az.yaml => 1.vpc-multi-az.yaml} | 0 1.architectures/1.vpc_network/README.md | 10 +++++----- 1.architectures/1.vpc_network/status.sh | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) rename 1.architectures/1.vpc_network/{1.vpc-all-az.yaml => 1.vpc-multi-az.yaml} (100%) create mode 100755 1.architectures/1.vpc_network/status.sh diff --git a/1.architectures/1.vpc_network/1.vpc-all-az.yaml b/1.architectures/1.vpc_network/1.vpc-multi-az.yaml similarity index 100% rename from 1.architectures/1.vpc_network/1.vpc-all-az.yaml rename to 1.architectures/1.vpc_network/1.vpc-multi-az.yaml diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index 3ec077e1..8a25dea3 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -6,11 +6,11 @@ You will find here a collection of cloudformation templates to deploy a VPC with The architectures each deploy a VPC, public and private subnets, gateways and endpoints. You can deploy them through the AWS Console or AWS CLI. -### 1. Template VPC All AZs +### 1. Template VPC Multiple AZs -This template deploys a VPC with private subnets in all Availability zones. Public subnets can be optionally created in every AZ (done by default). This template serves most use cases. +This template deploys a VPC with private subnets in multiple Availability zones. Public subnets can be optionally created in every AZ (done by default). This template serves most use cases. -- **Template file**: [`1.vpc-all-az.yaml`](./1.vpc-all-az.yaml) +- **Template file**: [`1.vpc-multi-az.yaml`](./1.vpc-multi-az.yaml) #### List of Parameters @@ -30,12 +30,12 @@ Please note that the deployment of public subnets is optional. #### Deploy with the AWS CLI -The command to deploy the template through the CLI is shown below. Feel free to edit for your own configuration and parameters. +The command to deploy the template through the CLI is shown below. Feel free to edit for your own configuration and parameters. Please ensure that the `NumberOfAZs` parameter value matches the number of AZs that you specified. ```bash aws cloudformation create-stack --stack-name vpc-stack-ml\ - --template-body file://1.vpc-all-az.yaml \ + --template-body file://1.vpc-multi-az.yaml \ --parameters ParameterKey=AvailabilityZones,ParameterValue=us-east-1a\\,us-east-1b\\,us-east-1c\\,us-east-1d\\,us-east-1e\\,us-east-1f\ ParameterKey=NumberOfAZs,ParameterValue=6 \ ParameterKey=VPCName,ParameterValue="ML HPC VPC" \ diff --git a/1.architectures/1.vpc_network/status.sh b/1.architectures/1.vpc_network/status.sh new file mode 100755 index 00000000..6b8ea0b9 --- /dev/null +++ b/1.architectures/1.vpc_network/status.sh @@ -0,0 +1,3 @@ +#!/bin/bash +aws cloudformation describe-stacks --stack-name vpc-stack-ml | jq -r .Stacks[].StackStatus + From 8389c7ca02909d8a0b258e793b78cad4d3cbdf88 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 11 Sep 2023 11:36:31 -0700 Subject: [PATCH 065/648] CDK skeleton project for EKS with references in README.md --- 1.architectures/4.aws-eks/.gitignore | 10 +++ 1.architectures/4.aws-eks/README.md | 77 +++++++++++++++++++ 1.architectures/4.aws-eks/app.py | 28 +++++++ 1.architectures/4.aws-eks/cdk.json | 58 ++++++++++++++ 1.architectures/4.aws-eks/cluster/__init__.py | 0 .../4.aws-eks/cluster/cluster_stack.py | 19 +++++ .../4.aws-eks/requirements-dev.txt | 1 + 1.architectures/4.aws-eks/requirements.txt | 2 + 1.architectures/4.aws-eks/source.bat | 13 ++++ 1.architectures/4.aws-eks/tests/__init__.py | 0 .../4.aws-eks/tests/unit/__init__.py | 0 .../tests/unit/test_cluster_stack.py | 15 ++++ 12 files changed, 223 insertions(+) create mode 100644 1.architectures/4.aws-eks/.gitignore create mode 100644 1.architectures/4.aws-eks/README.md create mode 100644 1.architectures/4.aws-eks/app.py create mode 100644 1.architectures/4.aws-eks/cdk.json create mode 100644 1.architectures/4.aws-eks/cluster/__init__.py create mode 100644 1.architectures/4.aws-eks/cluster/cluster_stack.py create mode 100644 1.architectures/4.aws-eks/requirements-dev.txt create mode 100644 1.architectures/4.aws-eks/requirements.txt create mode 100644 1.architectures/4.aws-eks/source.bat create mode 100644 1.architectures/4.aws-eks/tests/__init__.py create mode 100644 1.architectures/4.aws-eks/tests/unit/__init__.py create mode 100644 1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py diff --git a/1.architectures/4.aws-eks/.gitignore b/1.architectures/4.aws-eks/.gitignore new file mode 100644 index 00000000..37833f8b --- /dev/null +++ b/1.architectures/4.aws-eks/.gitignore @@ -0,0 +1,10 @@ +*.swp +package-lock.json +__pycache__ +.pytest_cache +.venv +*.egg-info + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/1.architectures/4.aws-eks/README.md b/1.architectures/4.aws-eks/README.md new file mode 100644 index 00000000..c4aa366b --- /dev/null +++ b/1.architectures/4.aws-eks/README.md @@ -0,0 +1,77 @@ + +# Welcome to your CDK Python project! +This is a project for CDK development with Python. +The `cdk.json` file tells the CDK Toolkit how to execute your app. + +# Prerequisites +1. AWS CLI +2. Python 3.8 +3. npm +4. npm install -g aws-cdk + +# Project + +This project is set up like a standard Python project. The initialization +process also creates a virtualenv within this project, stored under the `.venv` +directory. To create the virtualenv it assumes that there is a `python3` +(or `python` for Windows) executable in your path with access to the `venv` +package. If for any reason the automatic creation of the virtualenv fails, +you can create the virtualenv manually. + +To manually create a virtualenv on MacOS and Linux: + +``` +$ python3 -m venv .venv +``` + +After the init process completes and the virtualenv is created, you can use the following +step to activate your virtualenv. + +``` +$ source .venv/bin/activate +``` + +If you are a Windows platform, you would activate the virtualenv like this: + +``` +% .venv\Scripts\activate.bat +``` + +Once the virtualenv is activated, you can install the required dependencies. + +``` +$ python -m pip install --upgrade pip +``` + +``` +$ pip install -r requirements.txt +``` + +At this point you can now synthesize the CloudFormation template for this code. + +``` +$ cdk synth +``` + +To add additional dependencies, for example other CDK libraries, just add +them to your `setup.py` file and rerun the `pip install -r requirements.txt` +command. + +## Useful commands + + * `cdk ls` list all stacks in the app + * `cdk synth` emits the synthesized CloudFormation template + * `cdk deploy` deploy this stack to your default AWS account/region + * `cdk diff` compare deployed stack with current state + * `cdk docs` open CDK documentation + +# References + +* [CDK v2 Documentation](https://docs.aws.amazon.com/cdk/v2/guide/home.html) +* [Getting started with CDK](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) +* [CDK examples](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/eks/cluster) +* [CDK API reference](https://docs.aws.amazon.com/cdk/v2/guide/reference.html) +* [CDK API reference EKS quick start](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks-readme.html#quick-start) +* [CDK Python API for EKS](https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_eks/Cluster.html) +* [CDK for Kubernetes (cdk8s)](https://cdk8s.io/) + diff --git a/1.architectures/4.aws-eks/app.py b/1.architectures/4.aws-eks/app.py new file mode 100644 index 00000000..184d0ac9 --- /dev/null +++ b/1.architectures/4.aws-eks/app.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +import os + +import aws_cdk as cdk + +from cluster.cluster_stack import ClusterStack + + +app = cdk.App() +ClusterStack(app, "ClusterStack", + # If you don't specify 'env', this stack will be environment-agnostic. + # Account/Region-dependent features and context lookups will not work, + # but a single synthesized template can be deployed anywhere. + + # Uncomment the next line to specialize this stack for the AWS Account + # and Region that are implied by the current CLI configuration. + + #env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), + + # Uncomment the next line if you know exactly what Account and Region you + # want to deploy the stack to. */ + + #env=cdk.Environment(account='123456789012', region='us-east-1'), + + # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html + ) + +app.synth() diff --git a/1.architectures/4.aws-eks/cdk.json b/1.architectures/4.aws-eks/cdk.json new file mode 100644 index 00000000..b7d6c491 --- /dev/null +++ b/1.architectures/4.aws-eks/cdk.json @@ -0,0 +1,58 @@ +{ + "app": "python3 app.py", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "requirements*.txt", + "source.bat", + "**/__init__.py", + "python/__pycache__", + "tests" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true, + "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, + "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, + "@aws-cdk/aws-efs:denyAnonymousAccess": true, + "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, + "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, + "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true + } +} diff --git a/1.architectures/4.aws-eks/cluster/__init__.py b/1.architectures/4.aws-eks/cluster/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/1.architectures/4.aws-eks/cluster/cluster_stack.py b/1.architectures/4.aws-eks/cluster/cluster_stack.py new file mode 100644 index 00000000..3f705302 --- /dev/null +++ b/1.architectures/4.aws-eks/cluster/cluster_stack.py @@ -0,0 +1,19 @@ +from aws_cdk import ( + # Duration, + Stack, + # aws_sqs as sqs, +) +from constructs import Construct + +class ClusterStack(Stack): + + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # The code that defines your stack goes here + + # example resource + # queue = sqs.Queue( + # self, "ClusterQueue", + # visibility_timeout=Duration.seconds(300), + # ) diff --git a/1.architectures/4.aws-eks/requirements-dev.txt b/1.architectures/4.aws-eks/requirements-dev.txt new file mode 100644 index 00000000..92709451 --- /dev/null +++ b/1.architectures/4.aws-eks/requirements-dev.txt @@ -0,0 +1 @@ +pytest==6.2.5 diff --git a/1.architectures/4.aws-eks/requirements.txt b/1.architectures/4.aws-eks/requirements.txt new file mode 100644 index 00000000..c0512aa5 --- /dev/null +++ b/1.architectures/4.aws-eks/requirements.txt @@ -0,0 +1,2 @@ +aws-cdk-lib==2.95.1 +constructs>=10.0.0,<11.0.0 diff --git a/1.architectures/4.aws-eks/source.bat b/1.architectures/4.aws-eks/source.bat new file mode 100644 index 00000000..9e1a8344 --- /dev/null +++ b/1.architectures/4.aws-eks/source.bat @@ -0,0 +1,13 @@ +@echo off + +rem The sole purpose of this script is to make the command +rem +rem source .venv/bin/activate +rem +rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. +rem On Windows, this command just runs this batch file (the argument is ignored). +rem +rem Now we don't need to document a Windows command for activating a virtualenv. + +echo Executing .venv\Scripts\activate.bat for you +.venv\Scripts\activate.bat diff --git a/1.architectures/4.aws-eks/tests/__init__.py b/1.architectures/4.aws-eks/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/1.architectures/4.aws-eks/tests/unit/__init__.py b/1.architectures/4.aws-eks/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py new file mode 100644 index 00000000..d6746756 --- /dev/null +++ b/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py @@ -0,0 +1,15 @@ +import aws_cdk as core +import aws_cdk.assertions as assertions + +from cluster.cluster_stack import ClusterStack + +# example tests. To run these tests, uncomment this file along with the example +# resource in cluster/cluster_stack.py +def test_sqs_queue_created(): + app = core.App() + stack = ClusterStack(app, "cluster") + template = assertions.Template.from_stack(stack) + +# template.has_resource_properties("AWS::SQS::Queue", { +# "VisibilityTimeout": 300 +# }) From e1969b37c200b0d1034ddba0c868318ac0e3de29 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 11 Sep 2023 11:36:31 -0700 Subject: [PATCH 066/648] CDK skeleton project for EKS with references in README.md --- 1.architectures/4.aws-eks/.gitignore | 10 +++ 1.architectures/4.aws-eks/README.md | 77 +++++++++++++++++++ 1.architectures/4.aws-eks/app.py | 28 +++++++ 1.architectures/4.aws-eks/cdk.json | 58 ++++++++++++++ 1.architectures/4.aws-eks/cluster/__init__.py | 0 .../4.aws-eks/cluster/cluster_stack.py | 19 +++++ .../4.aws-eks/requirements-dev.txt | 1 + 1.architectures/4.aws-eks/requirements.txt | 2 + 1.architectures/4.aws-eks/source.bat | 13 ++++ 1.architectures/4.aws-eks/tests/__init__.py | 0 .../4.aws-eks/tests/unit/__init__.py | 0 .../tests/unit/test_cluster_stack.py | 15 ++++ 12 files changed, 223 insertions(+) create mode 100644 1.architectures/4.aws-eks/.gitignore create mode 100644 1.architectures/4.aws-eks/README.md create mode 100644 1.architectures/4.aws-eks/app.py create mode 100644 1.architectures/4.aws-eks/cdk.json create mode 100644 1.architectures/4.aws-eks/cluster/__init__.py create mode 100644 1.architectures/4.aws-eks/cluster/cluster_stack.py create mode 100644 1.architectures/4.aws-eks/requirements-dev.txt create mode 100644 1.architectures/4.aws-eks/requirements.txt create mode 100644 1.architectures/4.aws-eks/source.bat create mode 100644 1.architectures/4.aws-eks/tests/__init__.py create mode 100644 1.architectures/4.aws-eks/tests/unit/__init__.py create mode 100644 1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py diff --git a/1.architectures/4.aws-eks/.gitignore b/1.architectures/4.aws-eks/.gitignore new file mode 100644 index 00000000..37833f8b --- /dev/null +++ b/1.architectures/4.aws-eks/.gitignore @@ -0,0 +1,10 @@ +*.swp +package-lock.json +__pycache__ +.pytest_cache +.venv +*.egg-info + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/1.architectures/4.aws-eks/README.md b/1.architectures/4.aws-eks/README.md new file mode 100644 index 00000000..c4aa366b --- /dev/null +++ b/1.architectures/4.aws-eks/README.md @@ -0,0 +1,77 @@ + +# Welcome to your CDK Python project! +This is a project for CDK development with Python. +The `cdk.json` file tells the CDK Toolkit how to execute your app. + +# Prerequisites +1. AWS CLI +2. Python 3.8 +3. npm +4. npm install -g aws-cdk + +# Project + +This project is set up like a standard Python project. The initialization +process also creates a virtualenv within this project, stored under the `.venv` +directory. To create the virtualenv it assumes that there is a `python3` +(or `python` for Windows) executable in your path with access to the `venv` +package. If for any reason the automatic creation of the virtualenv fails, +you can create the virtualenv manually. + +To manually create a virtualenv on MacOS and Linux: + +``` +$ python3 -m venv .venv +``` + +After the init process completes and the virtualenv is created, you can use the following +step to activate your virtualenv. + +``` +$ source .venv/bin/activate +``` + +If you are a Windows platform, you would activate the virtualenv like this: + +``` +% .venv\Scripts\activate.bat +``` + +Once the virtualenv is activated, you can install the required dependencies. + +``` +$ python -m pip install --upgrade pip +``` + +``` +$ pip install -r requirements.txt +``` + +At this point you can now synthesize the CloudFormation template for this code. + +``` +$ cdk synth +``` + +To add additional dependencies, for example other CDK libraries, just add +them to your `setup.py` file and rerun the `pip install -r requirements.txt` +command. + +## Useful commands + + * `cdk ls` list all stacks in the app + * `cdk synth` emits the synthesized CloudFormation template + * `cdk deploy` deploy this stack to your default AWS account/region + * `cdk diff` compare deployed stack with current state + * `cdk docs` open CDK documentation + +# References + +* [CDK v2 Documentation](https://docs.aws.amazon.com/cdk/v2/guide/home.html) +* [Getting started with CDK](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) +* [CDK examples](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/eks/cluster) +* [CDK API reference](https://docs.aws.amazon.com/cdk/v2/guide/reference.html) +* [CDK API reference EKS quick start](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks-readme.html#quick-start) +* [CDK Python API for EKS](https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_eks/Cluster.html) +* [CDK for Kubernetes (cdk8s)](https://cdk8s.io/) + diff --git a/1.architectures/4.aws-eks/app.py b/1.architectures/4.aws-eks/app.py new file mode 100644 index 00000000..184d0ac9 --- /dev/null +++ b/1.architectures/4.aws-eks/app.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +import os + +import aws_cdk as cdk + +from cluster.cluster_stack import ClusterStack + + +app = cdk.App() +ClusterStack(app, "ClusterStack", + # If you don't specify 'env', this stack will be environment-agnostic. + # Account/Region-dependent features and context lookups will not work, + # but a single synthesized template can be deployed anywhere. + + # Uncomment the next line to specialize this stack for the AWS Account + # and Region that are implied by the current CLI configuration. + + #env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), + + # Uncomment the next line if you know exactly what Account and Region you + # want to deploy the stack to. */ + + #env=cdk.Environment(account='123456789012', region='us-east-1'), + + # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html + ) + +app.synth() diff --git a/1.architectures/4.aws-eks/cdk.json b/1.architectures/4.aws-eks/cdk.json new file mode 100644 index 00000000..b7d6c491 --- /dev/null +++ b/1.architectures/4.aws-eks/cdk.json @@ -0,0 +1,58 @@ +{ + "app": "python3 app.py", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "requirements*.txt", + "source.bat", + "**/__init__.py", + "python/__pycache__", + "tests" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true, + "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, + "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, + "@aws-cdk/aws-efs:denyAnonymousAccess": true, + "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, + "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, + "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true + } +} diff --git a/1.architectures/4.aws-eks/cluster/__init__.py b/1.architectures/4.aws-eks/cluster/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/1.architectures/4.aws-eks/cluster/cluster_stack.py b/1.architectures/4.aws-eks/cluster/cluster_stack.py new file mode 100644 index 00000000..3f705302 --- /dev/null +++ b/1.architectures/4.aws-eks/cluster/cluster_stack.py @@ -0,0 +1,19 @@ +from aws_cdk import ( + # Duration, + Stack, + # aws_sqs as sqs, +) +from constructs import Construct + +class ClusterStack(Stack): + + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # The code that defines your stack goes here + + # example resource + # queue = sqs.Queue( + # self, "ClusterQueue", + # visibility_timeout=Duration.seconds(300), + # ) diff --git a/1.architectures/4.aws-eks/requirements-dev.txt b/1.architectures/4.aws-eks/requirements-dev.txt new file mode 100644 index 00000000..92709451 --- /dev/null +++ b/1.architectures/4.aws-eks/requirements-dev.txt @@ -0,0 +1 @@ +pytest==6.2.5 diff --git a/1.architectures/4.aws-eks/requirements.txt b/1.architectures/4.aws-eks/requirements.txt new file mode 100644 index 00000000..c0512aa5 --- /dev/null +++ b/1.architectures/4.aws-eks/requirements.txt @@ -0,0 +1,2 @@ +aws-cdk-lib==2.95.1 +constructs>=10.0.0,<11.0.0 diff --git a/1.architectures/4.aws-eks/source.bat b/1.architectures/4.aws-eks/source.bat new file mode 100644 index 00000000..9e1a8344 --- /dev/null +++ b/1.architectures/4.aws-eks/source.bat @@ -0,0 +1,13 @@ +@echo off + +rem The sole purpose of this script is to make the command +rem +rem source .venv/bin/activate +rem +rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. +rem On Windows, this command just runs this batch file (the argument is ignored). +rem +rem Now we don't need to document a Windows command for activating a virtualenv. + +echo Executing .venv\Scripts\activate.bat for you +.venv\Scripts\activate.bat diff --git a/1.architectures/4.aws-eks/tests/__init__.py b/1.architectures/4.aws-eks/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/1.architectures/4.aws-eks/tests/unit/__init__.py b/1.architectures/4.aws-eks/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py new file mode 100644 index 00000000..d6746756 --- /dev/null +++ b/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py @@ -0,0 +1,15 @@ +import aws_cdk as core +import aws_cdk.assertions as assertions + +from cluster.cluster_stack import ClusterStack + +# example tests. To run these tests, uncomment this file along with the example +# resource in cluster/cluster_stack.py +def test_sqs_queue_created(): + app = core.App() + stack = ClusterStack(app, "cluster") + template = assertions.Template.from_stack(stack) + +# template.has_resource_properties("AWS::SQS::Queue", { +# "VisibilityTimeout": 300 +# }) From 0cec029129778d6f73993f157ca09de6e6e333e7 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 11 Sep 2023 22:54:26 -0700 Subject: [PATCH 067/648] CDK app with configurable EKS cluster stack --- 1.architectures/4.aws-eks/.gitignore | 1 + 1.architectures/4.aws-eks/README.md | 31 ++++++++++++++----- 1.architectures/4.aws-eks/app.py | 6 ++-- .../4.aws-eks/cluster/cluster_stack.py | 29 +++++++++++++++++ 1.architectures/4.aws-eks/config.py | 12 +++++++ 5 files changed, 68 insertions(+), 11 deletions(-) create mode 100644 1.architectures/4.aws-eks/config.py diff --git a/1.architectures/4.aws-eks/.gitignore b/1.architectures/4.aws-eks/.gitignore index 37833f8b..454fac8b 100644 --- a/1.architectures/4.aws-eks/.gitignore +++ b/1.architectures/4.aws-eks/.gitignore @@ -8,3 +8,4 @@ __pycache__ # CDK asset staging directory .cdk.staging cdk.out +cdk.context.json diff --git a/1.architectures/4.aws-eks/README.md b/1.architectures/4.aws-eks/README.md index c4aa366b..be3fcf2b 100644 --- a/1.architectures/4.aws-eks/README.md +++ b/1.architectures/4.aws-eks/README.md @@ -2,14 +2,16 @@ # Welcome to your CDK Python project! This is a project for CDK development with Python. The `cdk.json` file tells the CDK Toolkit how to execute your app. +The `config.py` file configures an existing VPC and the EKS cluster to create in it. # Prerequisites -1. AWS CLI -2. Python 3.8 -3. npm -4. npm install -g aws-cdk +1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) +2. [Python 3.8 or greater](https://www.python.org/downloads/) +3. [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) +4. CDK Toolkit: `npm install -g aws-cdk` +5. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -# Project +# Project setup This project is set up like a standard Python project. The initialization process also creates a virtualenv within this project, stored under the `.venv` @@ -57,7 +59,7 @@ To add additional dependencies, for example other CDK libraries, just add them to your `setup.py` file and rerun the `pip install -r requirements.txt` command. -## Useful commands +# Useful commands * `cdk ls` list all stacks in the app * `cdk synth` emits the synthesized CloudFormation template @@ -65,8 +67,21 @@ command. * `cdk diff` compare deployed stack with current state * `cdk docs` open CDK documentation -# References +# Project use + +1. Configure existing VPC name and specify desired EKS cluster settings by editing `./config.py` +2. Configure AWS CLI: `aws configure` +3. `export CDK_DEFAULT_ACCOUNT=` +4. `export CDK_DEFAULT_REGION=` +5. Execute `cdk synth` +6. Execute `cdk deploy --require-approval never` +7. Tag VPC public and private subnets as instructed by deployment log if needed +8. Upon successful creation, copy and execute the displayed aws command to update the cluster kubeconfig + +If the cluster was created successfully, you will see the cluster nodes you specified by executing the following command: +`kubectl get nodes` +# References * [CDK v2 Documentation](https://docs.aws.amazon.com/cdk/v2/guide/home.html) * [Getting started with CDK](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) * [CDK examples](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/eks/cluster) @@ -74,4 +89,4 @@ command. * [CDK API reference EKS quick start](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks-readme.html#quick-start) * [CDK Python API for EKS](https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_eks/Cluster.html) * [CDK for Kubernetes (cdk8s)](https://cdk8s.io/) - +* [CDK Workshop Lab - build EKS cluster](https://catalog.us-east-1.prod.workshops.aws/workshops/c15012ac-d05d-46b1-8a4a-205e7c9d93c9/en-US/40-deploy-clusters) diff --git a/1.architectures/4.aws-eks/app.py b/1.architectures/4.aws-eks/app.py index 184d0ac9..539d3647 100644 --- a/1.architectures/4.aws-eks/app.py +++ b/1.architectures/4.aws-eks/app.py @@ -5,7 +5,6 @@ from cluster.cluster_stack import ClusterStack - app = cdk.App() ClusterStack(app, "ClusterStack", # If you don't specify 'env', this stack will be environment-agnostic. @@ -15,14 +14,15 @@ # Uncomment the next line to specialize this stack for the AWS Account # and Region that are implied by the current CLI configuration. - #env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), + env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), # Uncomment the next line if you know exactly what Account and Region you # want to deploy the stack to. */ - #env=cdk.Environment(account='123456789012', region='us-east-1'), + #env=cdk.Environment(account='************', region='us-west-2'), # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html + ) app.synth() diff --git a/1.architectures/4.aws-eks/cluster/cluster_stack.py b/1.architectures/4.aws-eks/cluster/cluster_stack.py index 3f705302..9864b60f 100644 --- a/1.architectures/4.aws-eks/cluster/cluster_stack.py +++ b/1.architectures/4.aws-eks/cluster/cluster_stack.py @@ -3,8 +3,16 @@ Stack, # aws_sqs as sqs, ) + from constructs import Construct +import aws_cdk.aws_eks as eks +import aws_cdk.aws_ec2 as ec2 +import aws_cdk.aws_iam as iam +import sys +sys.path.append('../') +import config + class ClusterStack(Stack): def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: @@ -17,3 +25,24 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: # self, "ClusterQueue", # visibility_timeout=Duration.seconds(300), # ) + + # EKS Cluster example: one-liner with default node group + #cluster = eks.Cluster(self,"HelloEKS", version=eks.KubernetesVersion.V1_27, default_capacity=2, default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5,ec2.InstanceSize.SMALL)) + + # Lookup VPC + #my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id="vpc-*****************") + my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_name=config.vpc_name) + + # Role to access cluster + admin_role = iam.Role(self, id=config.eks_master_role_name, role_name=config.eks_master_role_name, assumed_by=iam.AccountRootPrincipal(), description="Role to allow admin access to EKS cluster") + + # EKS Cluster example: separate cluster and custom nodegroup creation + cluster = eks.Cluster(self, id=config.eks_cluster_name, cluster_name=config.eks_cluster_name, version=config.eks_kubernetes_version, default_capacity=0, vpc=my_vpc, masters_role=admin_role, output_cluster_name=True,output_config_command=True, output_masters_role_arn=True ) + cluster.add_nodegroup_capacity("sys-node-group", + instance_types=[ec2.InstanceType(config.eks_sys_ng_instance_type)], + min_size=config.eks_sys_ng_min_size, + desired_size=config.eks_sys_ng_desired_size, + max_size=config.eks_sys_ng_max_size, + disk_size=config.eks_sys_ng_disk_size, + #ami_type=config.eks_sys_ng_ami_type + ) diff --git a/1.architectures/4.aws-eks/config.py b/1.architectures/4.aws-eks/config.py new file mode 100644 index 00000000..7664a7dc --- /dev/null +++ b/1.architectures/4.aws-eks/config.py @@ -0,0 +1,12 @@ +import aws_cdk.aws_eks as eks + +vpc_name="ML EKS VPC" +eks_cluster_name="eks-cdk" +eks_master_role_name="EKSMaster" +eks_sys_ng_instance_type="m5.large" +eks_sys_ng_disk_size=50 +eks_sys_ng_min_size=1 +eks_sys_ng_desired_size=2 +eks_sys_ng_max_size=10 +#eks_sys_ng_ami_type=eks.NodegroupAmiType.AL2_X86_64 +eks_kubernetes_version=eks.KubernetesVersion.V1_27 \ No newline at end of file From ff70be811363f200ad09c94ac935a5a4c3e0c48f Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 11 Sep 2023 22:54:26 -0700 Subject: [PATCH 068/648] CDK app with configurable EKS cluster stack --- 1.architectures/4.aws-eks/.gitignore | 1 + 1.architectures/4.aws-eks/README.md | 31 ++++++++++++++----- 1.architectures/4.aws-eks/app.py | 6 ++-- .../4.aws-eks/cluster/cluster_stack.py | 29 +++++++++++++++++ 1.architectures/4.aws-eks/config.py | 12 +++++++ 5 files changed, 68 insertions(+), 11 deletions(-) create mode 100644 1.architectures/4.aws-eks/config.py diff --git a/1.architectures/4.aws-eks/.gitignore b/1.architectures/4.aws-eks/.gitignore index 37833f8b..454fac8b 100644 --- a/1.architectures/4.aws-eks/.gitignore +++ b/1.architectures/4.aws-eks/.gitignore @@ -8,3 +8,4 @@ __pycache__ # CDK asset staging directory .cdk.staging cdk.out +cdk.context.json diff --git a/1.architectures/4.aws-eks/README.md b/1.architectures/4.aws-eks/README.md index c4aa366b..be3fcf2b 100644 --- a/1.architectures/4.aws-eks/README.md +++ b/1.architectures/4.aws-eks/README.md @@ -2,14 +2,16 @@ # Welcome to your CDK Python project! This is a project for CDK development with Python. The `cdk.json` file tells the CDK Toolkit how to execute your app. +The `config.py` file configures an existing VPC and the EKS cluster to create in it. # Prerequisites -1. AWS CLI -2. Python 3.8 -3. npm -4. npm install -g aws-cdk +1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) +2. [Python 3.8 or greater](https://www.python.org/downloads/) +3. [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) +4. CDK Toolkit: `npm install -g aws-cdk` +5. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -# Project +# Project setup This project is set up like a standard Python project. The initialization process also creates a virtualenv within this project, stored under the `.venv` @@ -57,7 +59,7 @@ To add additional dependencies, for example other CDK libraries, just add them to your `setup.py` file and rerun the `pip install -r requirements.txt` command. -## Useful commands +# Useful commands * `cdk ls` list all stacks in the app * `cdk synth` emits the synthesized CloudFormation template @@ -65,8 +67,21 @@ command. * `cdk diff` compare deployed stack with current state * `cdk docs` open CDK documentation -# References +# Project use + +1. Configure existing VPC name and specify desired EKS cluster settings by editing `./config.py` +2. Configure AWS CLI: `aws configure` +3. `export CDK_DEFAULT_ACCOUNT=` +4. `export CDK_DEFAULT_REGION=` +5. Execute `cdk synth` +6. Execute `cdk deploy --require-approval never` +7. Tag VPC public and private subnets as instructed by deployment log if needed +8. Upon successful creation, copy and execute the displayed aws command to update the cluster kubeconfig + +If the cluster was created successfully, you will see the cluster nodes you specified by executing the following command: +`kubectl get nodes` +# References * [CDK v2 Documentation](https://docs.aws.amazon.com/cdk/v2/guide/home.html) * [Getting started with CDK](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) * [CDK examples](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/eks/cluster) @@ -74,4 +89,4 @@ command. * [CDK API reference EKS quick start](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks-readme.html#quick-start) * [CDK Python API for EKS](https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_eks/Cluster.html) * [CDK for Kubernetes (cdk8s)](https://cdk8s.io/) - +* [CDK Workshop Lab - build EKS cluster](https://catalog.us-east-1.prod.workshops.aws/workshops/c15012ac-d05d-46b1-8a4a-205e7c9d93c9/en-US/40-deploy-clusters) diff --git a/1.architectures/4.aws-eks/app.py b/1.architectures/4.aws-eks/app.py index 184d0ac9..539d3647 100644 --- a/1.architectures/4.aws-eks/app.py +++ b/1.architectures/4.aws-eks/app.py @@ -5,7 +5,6 @@ from cluster.cluster_stack import ClusterStack - app = cdk.App() ClusterStack(app, "ClusterStack", # If you don't specify 'env', this stack will be environment-agnostic. @@ -15,14 +14,15 @@ # Uncomment the next line to specialize this stack for the AWS Account # and Region that are implied by the current CLI configuration. - #env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), + env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), # Uncomment the next line if you know exactly what Account and Region you # want to deploy the stack to. */ - #env=cdk.Environment(account='123456789012', region='us-east-1'), + #env=cdk.Environment(account='************', region='us-west-2'), # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html + ) app.synth() diff --git a/1.architectures/4.aws-eks/cluster/cluster_stack.py b/1.architectures/4.aws-eks/cluster/cluster_stack.py index 3f705302..9864b60f 100644 --- a/1.architectures/4.aws-eks/cluster/cluster_stack.py +++ b/1.architectures/4.aws-eks/cluster/cluster_stack.py @@ -3,8 +3,16 @@ Stack, # aws_sqs as sqs, ) + from constructs import Construct +import aws_cdk.aws_eks as eks +import aws_cdk.aws_ec2 as ec2 +import aws_cdk.aws_iam as iam +import sys +sys.path.append('../') +import config + class ClusterStack(Stack): def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: @@ -17,3 +25,24 @@ def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: # self, "ClusterQueue", # visibility_timeout=Duration.seconds(300), # ) + + # EKS Cluster example: one-liner with default node group + #cluster = eks.Cluster(self,"HelloEKS", version=eks.KubernetesVersion.V1_27, default_capacity=2, default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5,ec2.InstanceSize.SMALL)) + + # Lookup VPC + #my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id="vpc-*****************") + my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_name=config.vpc_name) + + # Role to access cluster + admin_role = iam.Role(self, id=config.eks_master_role_name, role_name=config.eks_master_role_name, assumed_by=iam.AccountRootPrincipal(), description="Role to allow admin access to EKS cluster") + + # EKS Cluster example: separate cluster and custom nodegroup creation + cluster = eks.Cluster(self, id=config.eks_cluster_name, cluster_name=config.eks_cluster_name, version=config.eks_kubernetes_version, default_capacity=0, vpc=my_vpc, masters_role=admin_role, output_cluster_name=True,output_config_command=True, output_masters_role_arn=True ) + cluster.add_nodegroup_capacity("sys-node-group", + instance_types=[ec2.InstanceType(config.eks_sys_ng_instance_type)], + min_size=config.eks_sys_ng_min_size, + desired_size=config.eks_sys_ng_desired_size, + max_size=config.eks_sys_ng_max_size, + disk_size=config.eks_sys_ng_disk_size, + #ami_type=config.eks_sys_ng_ami_type + ) diff --git a/1.architectures/4.aws-eks/config.py b/1.architectures/4.aws-eks/config.py new file mode 100644 index 00000000..7664a7dc --- /dev/null +++ b/1.architectures/4.aws-eks/config.py @@ -0,0 +1,12 @@ +import aws_cdk.aws_eks as eks + +vpc_name="ML EKS VPC" +eks_cluster_name="eks-cdk" +eks_master_role_name="EKSMaster" +eks_sys_ng_instance_type="m5.large" +eks_sys_ng_disk_size=50 +eks_sys_ng_min_size=1 +eks_sys_ng_desired_size=2 +eks_sys_ng_max_size=10 +#eks_sys_ng_ami_type=eks.NodegroupAmiType.AL2_X86_64 +eks_kubernetes_version=eks.KubernetesVersion.V1_27 \ No newline at end of file From 50685926a2d5312ab1cd0af54f145b74f20b35d0 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 13 Sep 2023 06:05:40 +0000 Subject: [PATCH 069/648] nemo-launcher: show example of success job; remove data preprocessing --- 3.test_cases/2.nemo-launcher/README.md | 20 +++++++++++++++++- .../gpt3/download_gpt3_pile.yaml | 21 ------------------- 2 files changed, 19 insertions(+), 22 deletions(-) delete mode 100644 3.test_cases/2.nemo-launcher/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 7652ae19..134e915f 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -177,7 +177,25 @@ $TARGET_PATH/results-/gpt3_126m/ └── results ├── cmd-args.log ├── ... - └── nemo_log_globalrank-*.txt\ + └── nemo_log_globalrank-*.txt +``` + +You can use Slurm command `squeue` to monitor the job progress. Sample output below shows a `nemo-megatron` job with job id `1234` is in running state (`ST` = `R`). A queued job will have state `ST` = `PD` (pending). Please refer to the complete of job states in this [Slurm documentation](https://slurm.schedmd.com/squeue.html#SECTION_JOB-STATE-CODES). + +```text +JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + 1234 my-cluste nemo-mega ec2-user R 00:19:40 1 p4de-dy-p4de-24xlarge-[1-2] +``` + +Once a job finishes, check the `log-nemo-megatron-__.err`, and see it should contains ``Trainer.fit` stopped: `max_steps=40` reached``. + +```console +$ tail -5 $TARGET_PATH/results/gpt3_126m/log-nemo-megatron-gpt3_126m_72.err + +[NeMo W 2023-09-11 22:31:45 nemo_logging:349] /usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/logger_connector/result.py:232: UserWarning: You called `self.log('consumed_samples', ...)` in your `training_step` but the value needs to be floating point. Converting it to torch.float32. + warning_cache.warn( + +`Trainer.fit` stopped: `max_steps=40` reached. ``` Congratulations! You've successfully run this test case to completion. diff --git a/3.test_cases/2.nemo-launcher/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml b/3.test_cases/2.nemo-launcher/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml deleted file mode 100644 index c63d6b21..00000000 --- a/3.test_cases/2.nemo-launcher/conf.template/data_preparation/gpt3/download_gpt3_pile.yaml +++ /dev/null @@ -1,21 +0,0 @@ -run: - name: download_gpt3_pile - results_dir: ${base_results_dir}/${.name} - time_limit: "4:00:00" - dependency: "singleton" - node_array_size: 30 - array: ${..file_numbers} - bcp_preproc_npernode: 2 # 2 should be safe to use and x2 times faster. - -dataset: pile -download_the_pile: True # Whether to download the pile dataset from the internet. -the_pile_url: "https://the-eye.eu/public/AI/pile/train/" # Source URL to download The Pile dataset from. -file_numbers: "0-29" # The pile dataset consists of 30 files (0-29), choose which ones to download. -preprocess_data: True # True to preprocess the data from a jsonl file, False otherwise. -download_vocab_url: "https://huggingface.co/gpt2/resolve/main/vocab.json" # URL to download the vocab from. -download_merges_url: "https://huggingface.co/gpt2/resolve/main/merges.txt" # URL to download the merges from. -vocab_save_dir: ${data_dir}/bpe -merges_save_dir: ${data_dir}/bpe -tokenizer_type: GPT2BPETokenizer -rm_downloaded: True # Extract script will remove downloaded zst after extraction -rm_extracted: True # Preprocess script will remove extracted files after preproc. From 71b11d968419c7721c84d62db24de4928f1ac6cc Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 13 Sep 2023 08:38:11 +0000 Subject: [PATCH 070/648] Example of gpt-5b; improve docs on nemo launcher mechanics --- .../2.nemo-launcher/1.bmk-pretrain-gpt3.sh | 68 +++++++++++++++---- 3.test_cases/2.nemo-launcher/README.md | 25 ++++--- .../bmk-pretrain-gpt3-126m2.sh | 2 + .../2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh | 2 + 4 files changed, 74 insertions(+), 23 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh index bbf4678a..55d55b3a 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh @@ -3,20 +3,45 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -set -exuo pipefail - -: "${MODEL:=gpt3}" -: "${MODEL_SIZE:=5b}" -: "${NUM_NODES:=8}" -: "${RUNTIME:=4h}" -: "${MAX_STEPS:=5}" -: "${TARGET_PATH:=/fsx/nemo-launcher-23.07}" -WORKSPACE_CONT=$TARGET_PATH -CONT_RESULT_DIR=${WORKSPACE_CONT}/results -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH +set -u + + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train gpt3-126m on 2 nodes for 40 steps. +export MODEL_SIZE=126m +export NUM_NODES=2 +export RUNTIME=30m +export MAX_STEPS=40 +declare -a MODEL_ARGS=() + +## Uncomment below to pre-train gpt3-5b on 2 nodes for 5 steps +# export MODEL=gpt3 +# export MODEL_SIZE=5b +# export NUM_NODES=2 +# export RUNTIME=4h +# export MAX_STEPS=5 +## When node_count < 8, needs full activations checkpointing. These're settings found on +## Nemo repo's Jenkin script. +## +## Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with +## activations_checkpoint_num_layers=0. +# declare -a MODEL_ARGS=( +# training.model.activations_checkpoint_granularity='full' +# training.model.activations_checkpoint_method='block' +# training.model.activations_checkpoint_num_layers=1 +# ) -: "${UNIQUE_OUTPUT_DIR:=0}" +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ declare -a BMK_ARGS=( # Disable validation, as we're only interested to measure the training time. training.trainer.limit_val_batches=0.0 @@ -30,6 +55,16 @@ declare -a BMK_ARGS=( training.model.data.data_prefix=[] ) + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${UNIQUE_OUTPUT_DIR:=0}" if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) @@ -44,11 +79,14 @@ if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then " fi -HYDRA_FULL_ERROR=1 python3 /fsx/nemo-launcher-23.07/launcher_scripts/main.py \ + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ stages=[training] \ training=${MODEL}/${MODEL_SIZE} \ training.trainer.num_nodes=$NUM_NODES \ training.trainer.max_steps=$MAX_STEPS \ training.trainer.val_check_interval=$MAX_STEPS \ - "${BMK_ARGS[@]}" \ - "$@" + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 134e915f..e91c2d67 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -137,16 +137,15 @@ To pre-train a GPT3-126m on two instances with mock dataset, run the following: ```bash cd $TARGET_PATH -$TEST_CASE_PATH/bmk-pretrain-gpt3-126m2.sh +$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh ``` which results in this execution tree: ```bash -$TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh -\_ $TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh - \_ $TARGET_PATH/launcher_scripts/main.py - \_ sbatch +$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh +\_ $TARGET_PATH/launcher_scripts/main.py + \_ sbatch ``` As can be seen, Nemo-launcher `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submits it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. @@ -202,9 +201,7 @@ Congratulations! You've successfully run this test case to completion. ## 6. Customizing Pre-Training -The `$TEST_CASE_PATH` comes with `bmk-pretrain-gpt3-126m2.sh` and `bmk-pretrain-gpt3-5b2.sh` to pre-train 126m and 5b models, respectively, on two instances. - -To pre-train a different model size on different instance count, create your own `bmk-pretrain-gpt3-.sh` based on those examples. Please that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). +To pre-train a different model size on different instance count, open `$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh` and edit section `000` to choose the right hyperparameters. Be aware that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). At the very least, you'd want to review and customize one or more YAML files under `$TARGET_PATH/launcher_scripts/conf/`. Nemo-launcher organizes its config files in an opinionated hierarchy. Below is an example of relevant YAML files when launching `$TARGET_PATH/launcher_scripts/main.py` for `training` stage for `gpt3/126m` (see `$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh`). @@ -217,3 +214,15 @@ $TARGET_PATH/launcher_scripts/conf └── gpt3 # Config for model "gpt3"    └── 126m.yaml # Config for model size "126m" ``` + +You can edit directly the `gpt3/.yaml` to customize the number of instances, tensor parallelism, pipeline parallelism, batch sizes (micro and global), experiment tracking, etc. on this file. Alternatively, you can override the settings through the CLI options of `$TARGET_PATH/launcher_scripts/main.py` (refer to `1.bmk-pretrain-gpt3.sh`). For example, this CLI arg `training.trainer.num_nodes=$NUM_NODES` is equivalent to editing file `$TARGET_PATH/launcher_scripts/training_scripts/conf/training//.yaml` to set key `trainer -> num_nodes` to `$NUM_NODES`. + +```text + +-- file `training//.yaml` under `$TARGET_PATH/launcher_scripts/conf` + | +/---+--\ +training.trainer.num_nodes=$NUM_NODES + \_______________/ + | + └── key 'trainer -> num_nodes' in the `.yaml` file. +``` diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh index 043c8d7d..cbb19b9b 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh @@ -3,6 +3,8 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +# This script is meant for advance users who've gone through README.md +# # Override the default values in the underlying step-02-bmk-pretrain-gpt3.sh script. # See that underlying script to learn about the defaults. diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh index d164dbc8..44710794 100755 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh +++ b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh @@ -3,6 +3,8 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +# This script is meant for advance users who've gone through README.md +# # Override the default values in the underlying step-02-bmk-pretrain-gpt3.sh script. # See that underlying script to learn about the defaults. From d691cbb8b87f838c94a246f655c203c5d22b8730 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 09:50:33 -0500 Subject: [PATCH 071/648] Add security patching for AMI and update readme files for PC and AMI --- 1.architectures/2.aws-parallelcluster/README.md | 3 +++ 2.amazon_machine_images/README.md | 1 + 2.amazon_machine_images/roles/aws_efa/tasks/main.yml | 7 ------- 2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml | 2 +- 2.amazon_machine_images/roles/packages/tasks/main.yml | 6 ++++++ 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 66d26575..b24ecf48 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -29,6 +29,9 @@ pip3 install awscli # install the AWS CLI pip3 install aws-parallelcluster # then AWS ParallelCluster ``` +> **Note**: you can use virtual environments to test different versions of AWS ParallelCluster by setting the version during the installation. For example to use 3.6.1, change the command `pip3 install aws-parallelcluster==3.6.1`. + + ### Create your EC2 Keypair (if needed) The EC2 key pair enables your to connect to your cluster on the head-node through ssh or [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html). We will cover for SSH here. diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index 305dc778..5c85e608 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -133,3 +133,4 @@ Alternatively, you can add a new role to install a new software component, ensur * For Enroot, we are using shared filesystem (`/fsx`) for container cache, set this accordingly to your cluster in `roles/nvidia_enroot_pyxis/templates/enroot.conf` variable `ENROOT_CACHE_PATH`. * Review variables (dependency versions) in `./roles/*/defaults/main.yml` according to [Ansible directory structure](https://docs.ansible.com/ansible/latest/tips_tricks/sample_setup.html). * These are based upon using the default VPCs found in the account. If this does not exist, the default VPC can be recreated with `aws ec2 create-default-vpc`. +* If packer can't find the AMI with the following message `Error querying AMI: InvalidAMIID.NotFound`, force the region by prepending your `make` command by the region `AWS_REGION=us-east-1`. diff --git a/2.amazon_machine_images/roles/aws_efa/tasks/main.yml b/2.amazon_machine_images/roles/aws_efa/tasks/main.yml index d0121812..d3838d42 100644 --- a/2.amazon_machine_images/roles/aws_efa/tasks/main.yml +++ b/2.amazon_machine_images/roles/aws_efa/tasks/main.yml @@ -33,10 +33,3 @@ block: | export LD_PRELOAD=/opt/nccl/build/lib/libnccl.so:/opt/aws-ofi-nccl/lib/libnccl-net.so -# TODO automate -# - name: "Disable ptrace protection" -# ansible.builtin.sysctl: -# name: kernel.yama.ptrace_scop -# value: '0' -# state: present -# reload: true diff --git a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml index 3bdab439..439c61ad 100644 --- a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml @@ -18,7 +18,7 @@ - name: Build NCCL ansible.builtin.shell: | - make -j src.build CUDA_HOME=/usr/local/cuda NVCC_GENCODE='-gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80' + make -j src.build CUDA_HOME=/usr/local/cuda NVCC_GENCODE='-gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_90,code=sm_90' args: chdir: /opt/nccl diff --git a/2.amazon_machine_images/roles/packages/tasks/main.yml b/2.amazon_machine_images/roles/packages/tasks/main.yml index 2efc81b8..e6304af6 100644 --- a/2.amazon_machine_images/roles/packages/tasks/main.yml +++ b/2.amazon_machine_images/roles/packages/tasks/main.yml @@ -8,6 +8,12 @@ update_cache: true when: packages_update +- name: "Update packages for security" + ansible.builtin.yum: + security: yes + state: latest + check_mode: yes + - name: "Reboot system" reboot: when: allow_reboot From 497f4a8d13a72ac3e4dac0a84c43c612cecc7bc7 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 17:15:26 -0500 Subject: [PATCH 072/648] Update documentation and test structure --- ...in-gpt3.sh => 1.bmk-pretrain-gpt3-126m.sh} | 21 +-- .../2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 83 +++++++++++ 3.test_cases/2.nemo-launcher/README.md | 134 ++++++++---------- .../bmk-pretrain-gpt3-126m2.sh | 20 --- .../2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh | 26 ---- .../2.nemo-launcher/build-enroot-image.sh | 20 --- 6 files changed, 145 insertions(+), 159 deletions(-) rename 3.test_cases/2.nemo-launcher/{1.bmk-pretrain-gpt3.sh => 1.bmk-pretrain-gpt3-126m.sh} (80%) create mode 100755 3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh delete mode 100755 3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh delete mode 100755 3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh delete mode 100755 3.test_cases/2.nemo-launcher/build-enroot-image.sh diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh similarity index 80% rename from 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh rename to 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh index 55d55b3a..3e134048 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh @@ -7,8 +7,6 @@ set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ || echo TARGET_PATH=$TARGET_PATH -set -u - ################################################################################ # 000: Modify this section to define pre-training configuration: model size, @@ -19,24 +17,7 @@ export MODEL_SIZE=126m export NUM_NODES=2 export RUNTIME=30m export MAX_STEPS=40 -declare -a MODEL_ARGS=() - -## Uncomment below to pre-train gpt3-5b on 2 nodes for 5 steps -# export MODEL=gpt3 -# export MODEL_SIZE=5b -# export NUM_NODES=2 -# export RUNTIME=4h -# export MAX_STEPS=5 -## When node_count < 8, needs full activations checkpointing. These're settings found on -## Nemo repo's Jenkin script. -## -## Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with -## activations_checkpoint_num_layers=0. -# declare -a MODEL_ARGS=( -# training.model.activations_checkpoint_granularity='full' -# training.model.activations_checkpoint_method='block' -# training.model.activations_checkpoint_num_layers=1 -# ) +export MODEL=gpt3 ################################################################################ diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh new file mode 100755 index 00000000..a75a7e6b --- /dev/null +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train gpt3-5b on 2 nodes for 5 steps +export MODEL=gpt3 +export MODEL_SIZE=5b +export NUM_NODES=2 +export RUNTIME=4h +export MAX_STEPS=5 +# When node_count < 8, needs full activations checkpointing. These're settings found on +# Nemo repo's Jenkin script. + +# Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with +# activations_checkpoint_num_layers=0. +declare -a MODEL_ARGS=( + training.model.activations_checkpoint_granularity='full' + training.model.activations_checkpoint_method='block' + training.model.activations_checkpoint_num_layers=1 +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${UNIQUE_OUTPUT_DIR:=0}" +if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index e91c2d67..108ae7ab 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -25,7 +25,6 @@ export TAG=$NEMO_VERSION-py3 export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher # where you copy the test case or set to your test case path export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh - cd $TEST_CASE_PATH ``` @@ -125,83 +124,72 @@ That's all needed to pre-train with a mock dataset generated on-the-fly. ## 5. Pre-training GPT3 -This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. - -To start pre-training, source the NemoMegatron environment: - -```bash -source ${TARGET_PATH}/.venv/bin/activate -``` - -To pre-train a GPT3-126m on two instances with mock dataset, run the following: - -```bash -cd $TARGET_PATH -$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh -``` - -which results in this execution tree: - -```bash -$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh -\_ $TARGET_PATH/launcher_scripts/main.py - \_ sbatch -``` +This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. Here you start a pre-training on a small model of 126M parameters, this serves as a quick sanity check. + +1. Source the NemoMegatron environment created earlier. + ```bash + source ${TARGET_PATH}/.venv/bin/activate + ``` +2. To pre-train a GPT3-126m on two instances with mock dataset, run the commands below to let : + ```bash + cd $TARGET_PATH + $TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh + ``` +3. Check the file `$TARGET_PATH/launcher_scripts/main.py`. The `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submits it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. + ```bash + $TARGET_PATH/results/gpt3_126m + ├── gpt3_126m_hydra.yaml # The fully interpolated pre-training configuration + ├── launcher_cmd.log # The full invocation command of launcher_scripts/main.py + ├── launcher.log # Job id produced by the sbatch command + ├── log-nemo-megatron-gpt3_126m_.out # Stdout of the pre-training Slurm job + ├── nemo-megatron-gpt3_126m_submission.sh # .sbatch file generated and submitted by nemo-launcher + └── results + ├── cmd-args.log # The full invocation command of the pre-training script + ├── events.out.tfevents.* # Tensorboard logs + ├── git-info.log # The commit hash of the NeMO repo provided in the container. + ├── hparams.yaml # Pre-training hyperparameters + ├── lightning_logs.txt # Additional logs from PyTorch-Lightning + ├── nemo_error_log.txt # Stderr of pre-training step + └── nemo_log_globalrank-*.txt # Log of each rank + ``` + Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, edit `TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` and uncomment the `#export UNIQUE_OUTPUT_DIR=1` line to produce this output dir instead: + +4. You can use Slurm command `squeue` to monitor the job status in the queue. The ample output below shows a `nemo-megatron` job with job id `1234` is in running state (`ST` = `R`). A queued job will have state `ST` = `PD` (pending). Please refer to the complete of job states in this [Slurm documentation](https://slurm.schedmd.com/squeue.html#SECTION_JOB-STATE-CODES). + + ```text + JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + 1234 my-cluste nemo-mega ec2-user R 00:19:40 1 p4de-dy-p4de-24xlarge-[1-2] + ``` +5. Once a job finishes, check the `log-nemo-megatron-__.err`, and see it should contains ``Trainer.fit` stopped: `max_steps=40` reached`` (disregard the warnings). + ```console + $ tail -5 $TARGET_PATH/results/gpt3_126m/log-nemo-megatron-gpt3_126m_72.err + + [NeMo W 2023-09-11 22:31:45 nemo_logging:349] /usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/logger_connector/result.py:232: UserWarning: You called `self.log('consumed_samples', ...)` in your `training_step` but the value needs to be floating point. Converting it to torch.float32. + warning_cache.warn( + + `Trainer.fit` stopped: `max_steps=40` reached. + ``` +6. Review the output file (`log-nemo-megatron-gpt3_126m_.out`) which contains the `stdout` output of the job. The end of the file should be similar to the snippet below + ```console + [NeMo I 2023-09-11 22:31:28 lr_scheduler:910] Scheduler "" + will be used during training (effective maximum steps = 40) - + Parameters : + (warmup_steps: 636 + constant_steps: 100000 + min_lr: 6.0e-05 + max_steps: 40 + ) + Epoch 0: 100%|██████████| 40/40 [00:31<00:00, 1.27it/s, loss=10.9, v_num=, reduced_train_loss=10.90, global_step=39.00, consumed_samples=9984.0] + ``` -As can be seen, Nemo-launcher `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submits it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. - -```bash -$TARGET_PATH/results/gpt3_126m -├── gpt3_126m_hydra.yaml # The fully interpolated pre-training configuration -├── launcher_cmd.log # The full invocation command of launcher_scripts/main.py -├── launcher.log # Job id produced by the sbatch command -├── log-nemo-megatron-gpt3_126m_.out # Stdout of the pre-training Slurm job -├── nemo-megatron-gpt3_126m_submission.sh # .sbatch file generated and submitted by nemo-launcher -└── results - ├── cmd-args.log # The full invocation command of the pre-training script - ├── events.out.tfevents.* # Tensorboard logs - ├── git-info.log # The commit hash of the NeMO repo provided in the container. - ├── hparams.yaml # Pre-training hyperparameters - ├── lightning_logs.txt # Additional logs from PyTorch-Lightning - ├── nemo_error_log.txt # Stderr of pre-training step - └── nemo_log_globalrank-*.txt # Log of each rank -``` - -Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, edit `TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` and uncomment the `#export UNIQUE_OUTPUT_DIR=1` line to produce this output dir instead: - -```bash -$TARGET_PATH/results-/gpt3_126m/ -├── gpt3_126m_hydra.yaml -├── ... -└── results - ├── cmd-args.log - ├── ... - └── nemo_log_globalrank-*.txt -``` - -You can use Slurm command `squeue` to monitor the job progress. Sample output below shows a `nemo-megatron` job with job id `1234` is in running state (`ST` = `R`). A queued job will have state `ST` = `PD` (pending). Please refer to the complete of job states in this [Slurm documentation](https://slurm.schedmd.com/squeue.html#SECTION_JOB-STATE-CODES). - -```text -JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - 1234 my-cluste nemo-mega ec2-user R 00:19:40 1 p4de-dy-p4de-24xlarge-[1-2] -``` - -Once a job finishes, check the `log-nemo-megatron-__.err`, and see it should contains ``Trainer.fit` stopped: `max_steps=40` reached``. - -```console -$ tail -5 $TARGET_PATH/results/gpt3_126m/log-nemo-megatron-gpt3_126m_72.err - -[NeMo W 2023-09-11 22:31:45 nemo_logging:349] /usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/logger_connector/result.py:232: UserWarning: You called `self.log('consumed_samples', ...)` in your `training_step` but the value needs to be floating point. Converting it to torch.float32. - warning_cache.warn( +Congratulations! You've successfully run this test case to completion. -`Trainer.fit` stopped: `max_steps=40` reached. -``` +> **Note**: Execute 2 -Congratulations! You've successfully run this test case to completion. -## 6. Customizing Pre-Training +## 7. Customizing Pre-Training -To pre-train a different model size on different instance count, open `$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh` and edit section `000` to choose the right hyperparameters. Be aware that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). +To pre-train for a different model size on different instance count, open `$TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh` and edit section `000` to choose the right hyperparameters. Be aware that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). At the very least, you'd want to review and customize one or more YAML files under `$TARGET_PATH/launcher_scripts/conf/`. Nemo-launcher organizes its config files in an opinionated hierarchy. Below is an example of relevant YAML files when launching `$TARGET_PATH/launcher_scripts/main.py` for `training` stage for `gpt3/126m` (see `$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh`). diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh deleted file mode 100755 index cbb19b9b..00000000 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-126m2.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# This script is meant for advance users who've gone through README.md -# -# Override the default values in the underlying step-02-bmk-pretrain-gpt3.sh script. -# See that underlying script to learn about the defaults. - -export MODEL_SIZE=126m -export NUM_NODES=2 -export RUNTIME=30m -export MAX_STEPS=40 -#export UNIQUE_OUTPUT_DIR=1 - -BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) - -# Node_count == 2 can work without full activations checkpointing. -$BIN_DIR/1.bmk-pretrain-gpt3.sh diff --git a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh b/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh deleted file mode 100755 index 44710794..00000000 --- a/3.test_cases/2.nemo-launcher/bmk-pretrain-gpt3-5b2.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# This script is meant for advance users who've gone through README.md -# -# Override the default values in the underlying step-02-bmk-pretrain-gpt3.sh script. -# See that underlying script to learn about the defaults. - -export NUM_NODES=2 -export RUNTIME=30m -export MAX_STEPS=20 -#export UNIQUE_OUTPUT_DIR=1 - -BIN_DIR=$(dirname `readlink -e ${BASH_SOURCE[0]}`) - -# When node_count < 8, needs full activations checkpointing. These're settings found on -# Nemo repo's Jenkin script. -# -# Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with -# activations_checkpoint_num_layers=0. -$BIN_DIR/1.bmk-pretrain-gpt3.sh \ - training.model.activations_checkpoint_granularity='full' \ - training.model.activations_checkpoint_method='block' \ - training.model.activations_checkpoint_num_layers=1 diff --git a/3.test_cases/2.nemo-launcher/build-enroot-image.sh b/3.test_cases/2.nemo-launcher/build-enroot-image.sh deleted file mode 100755 index 8fe0ff56..00000000 --- a/3.test_cases/2.nemo-launcher/build-enroot-image.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -REPO=aws-nemo-megatron -TAG=23.07-py3 - -# EC2 instance: us-west-2, EBS: gp3, 3k IOPS, 350 MB/s throughput. -# Time: ~3min -#/usr/bin/time docker pull nvcr.io/ea-bignlp/nemofw-training:$TAG - -# EC2 instance: m5.4xlarge, EBS: gp3, 3k IOPS, 350 MB/s throughput. -# Time: ~6min -docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized.Dockerfile . - -# On m5.8xlarge (32 vcpu). /fsx is FSxL 1.2TB configured with 500 MB/s/TB throughput. -IMAGE=/apps/${REPO}_${TAG}.sqsh ; [[ -e $IMAGE ]] && rm $IMAGE -/usr/bin/time enroot import -o $IMAGE dockerd://${REPO}:${TAG} -# 25.09user 102.21system 2:17.85elapsed 92%CPU (0avgtext+0avgdata 17450056maxresident)k From 545e83a137bbd97ca438fe6e3e559b49a1dd83cd Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 17:16:29 -0500 Subject: [PATCH 073/648] Remove bootstrapping script for nemo --- .../2.nemo-launcher/0.bootstrap-launcher.sh | 26 ------------------- 1 file changed, 26 deletions(-) delete mode 100755 3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh diff --git a/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh b/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh deleted file mode 100755 index ffe4b879..00000000 --- a/3.test_cases/2.nemo-launcher/0.bootstrap-launcher.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# Based on https://github.com/NVIDIA/NeMo-Megatron-Launcher/tree/23.05#5111-slurm - -set -exuo pipefail - -: "${NEMO_VERSION:=23.07}" -: "${REPO:=aws-nemo-megatron}" -: "${TAG:=$NEMO_VERSION-py3}" -: "${ENROOT_IMAGE:=/apps/${REPO}_${TAG}.sqsh}" -: "${TARGET_PATH:=/fsx/nemo-launcher-$NEMO_VERSION}" # must be a shared filesystem - -srun -N 1 \ - --container-mounts=$TARGET_PATH:/workspace/mount_dir \ - --container-image=$ENROOT_IMAGE \ - bash -c "cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/" - -cd $TARGET_PATH -/usr/bin/python3.8 -m venv .venv -source $TARGET_PATH/.venv/bin/activate -curl -LO https://raw.githubusercontent.com/NVIDIA/NeMo-Megatron-Launcher/$NEMO_VERSION/requirements.txt -pip3.8 install --upgrade pip setuptools -pip3.8 install -r requirements.txt From 76165b85a7d1dc8f6012352092717d037121f414 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 17:22:13 -0500 Subject: [PATCH 074/648] Add contributors to readme --- README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c65b5f12..e5074766 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,6 @@ reference-architectures/ **NOTE**: the architectures are designed to work with the S3 bucket and VPC created using reference templates `1.architectures/0.s3/` and `1.architectures/1.vpc_network/`. _You're strongly recommended to deploy these two templates **before** deploying any of the reference architectures._ - ## 1. Architectures Architectures are located in `1.architectures` and consists of utilities and service related architectures @@ -42,3 +41,17 @@ All test cases are under `3.test_cases/`. You can go in each test case directory | `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | | `3.MPT` | ❓ | ❓ | ❓ | | `4.DDP` | ❓ | ❓ | ❓ | + + +## 4. Contributors + +Thanks to all the contributors for building, reviewing and testing. + +- Pierre-Yves Aquilanti - pierreya@ +- Verdi March - marcverd@ +- Uros Lipovsek - lipovsek@ +- Keita Watanabe - mlkeita@ +- Ankur Srivastava - awsankur@ +- Alex Iankoulski - iankouls@ +- Tom McDonald - tjm@ +- Sean Smith - seaam@ From a11934833efd912264be1e098c9c59dcbeb1294a Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 20:59:47 -0500 Subject: [PATCH 075/648] Update Nemo steps values and add MBS for 5B case --- 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh | 2 +- 3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 4 +++- 3.test_cases/2.nemo-launcher/README.md | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh index 3e134048..922bf886 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh @@ -16,7 +16,7 @@ set -exo pipefail export MODEL_SIZE=126m export NUM_NODES=2 export RUNTIME=30m -export MAX_STEPS=40 +export MAX_STEPS=100 export MODEL=gpt3 diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh index a75a7e6b..f4d88850 100755 --- a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -17,7 +17,8 @@ export MODEL=gpt3 export MODEL_SIZE=5b export NUM_NODES=2 export RUNTIME=4h -export MAX_STEPS=5 +export MAX_STEPS=30 +export MBS=2 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) # When node_count < 8, needs full activations checkpointing. These're settings found on # Nemo repo's Jenkin script. @@ -44,6 +45,7 @@ declare -a BMK_ARGS=( # https://github.com/NVIDIA/NeMo/pull/6181/files training.model.data.data_impl=mock training.model.data.data_prefix=[] + training.model.micro_batch_size=${MBS} ) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 108ae7ab..2968fbb2 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -13,6 +13,7 @@ Table of contents: The following pre-requisites are needed to run this example: +- You are using p4de.24xlarge instances with A100 80GB or newer, with at least 80GB of memory per GPU. - You have access to the base image [`nemofw-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). - Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) From e360a3f3ae9e7f7030b804cb2085971aa3903538 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 21:00:53 -0500 Subject: [PATCH 076/648] Change BCM to avoid exporting host env vars to jobs through Slurm --- 3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml index bb0f5703..32b0aa7e 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml @@ -6,6 +6,7 @@ gpus_per_node: 8 mem: 0 job_name_prefix: "nemo-megatron-" gres: "gpu:8" +export: "NIL" srun_args: - "--no-container-mount-home" stderr_to_stdout: True From df2981a76a9470e9dd53da7b3c94f42672b2dcc3 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 13 Sep 2023 21:09:42 -0500 Subject: [PATCH 077/648] Add doc for MBS in Nemo --- 3.test_cases/2.nemo-launcher/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 2968fbb2..de484c49 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -185,7 +185,7 @@ This section assumes that you went through the previous sections and 1/ retrieve Congratulations! You've successfully run this test case to completion. -> **Note**: Execute 2 +> **Note**: Should you run into an OOM error, you can adjust the minimum batch size by setting the MBS in `bmk` launch scripts. You can tune the NemoMegatron and PyTorch parameters in such way as well. ## 7. Customizing Pre-Training From 7b4ccb941bf59631c6524239bd9c25226327a42b Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 14 Sep 2023 16:50:11 +0800 Subject: [PATCH 078/648] nemo-launcher: gpt3-40b on 2 nodes --- .../2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 13 +-- .../3.bmk-pretrain-gpt3-40b.sh | 86 +++++++++++++++++++ 2 files changed, 93 insertions(+), 6 deletions(-) create mode 100755 3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh index f4d88850..9f6249c1 100755 --- a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -19,12 +19,14 @@ export NUM_NODES=2 export RUNTIME=4h export MAX_STEPS=30 export MBS=2 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) -# When node_count < 8, needs full activations checkpointing. These're settings found on -# Nemo repo's Jenkin script. - -# Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with -# activations_checkpoint_num_layers=0. declare -a MODEL_ARGS=( + training.model.micro_batch_size=${MBS} + + # When node_count < 8, needs full activations checkpointing. These're settings found on + # Nemo repo's Jenkin script. + # + # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with + # activations_checkpoint_num_layers=0. training.model.activations_checkpoint_granularity='full' training.model.activations_checkpoint_method='block' training.model.activations_checkpoint_num_layers=1 @@ -45,7 +47,6 @@ declare -a BMK_ARGS=( # https://github.com/NVIDIA/NeMo/pull/6181/files training.model.data.data_impl=mock training.model.data.data_prefix=[] - training.model.micro_batch_size=${MBS} ) diff --git a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh new file mode 100755 index 00000000..52455fa5 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train gpt3-40b on 2 nodes for 5 steps +export MODEL=gpt3 +export MODEL_SIZE=5b +export NUM_NODES=2 +export RUNTIME=4h +export MAX_STEPS=5 +export MBS=1 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) +declare -a MODEL_ARGS=( + training.model.micro_batch_size=${MBS} + + # Activation checkpointing + training.model.activations_checkpoint_granularity='full' + training.model.activations_checkpoint_method='block' + training.model.activations_checkpoint_num_layers=1 + + # Not applicable for A100 + training.model.transformer_engine=False + training.model.ub_tp_comm_overlap=False +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${UNIQUE_OUTPUT_DIR:=0}" +if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" From 710d8865f74db23926beb54c155110c2ae7ac087 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 14 Sep 2023 09:00:27 +0000 Subject: [PATCH 079/648] Fix gpt3-40b scripts --- 3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh index 52455fa5..c4b8bea0 100755 --- a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh +++ b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh @@ -12,9 +12,9 @@ set -exo pipefail # 000: Modify this section to define pre-training configuration: model size, # number of nodes, max. pre-training steps, job's max. runtime. ################################################################################ -## Pre-train gpt3-40b on 2 nodes for 5 steps +## Pre-train gpt3-40b on 2 nodes for 5 steps. Requires minimum 2x p4de.24xlarge export MODEL=gpt3 -export MODEL_SIZE=5b +export MODEL_SIZE=40b export NUM_NODES=2 export RUNTIME=4h export MAX_STEPS=5 From fbcfec2b5ef2827bf7087faaf7316c4d664142f0 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 14 Sep 2023 09:25:22 -0500 Subject: [PATCH 080/648] Update batch arch name for consistency by prepending digit --- ...ed-training.yaml => 0.aws-batch-distributed-training.yaml} | 0 1.architectures/3.aws-batch/README.md | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename 1.architectures/3.aws-batch/{aws-batch-distributed-training.yaml => 0.aws-batch-distributed-training.yaml} (100%) diff --git a/1.architectures/3.aws-batch/aws-batch-distributed-training.yaml b/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml similarity index 100% rename from 1.architectures/3.aws-batch/aws-batch-distributed-training.yaml rename to 1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml diff --git a/1.architectures/3.aws-batch/README.md b/1.architectures/3.aws-batch/README.md index 20578a7c..64b920ad 100644 --- a/1.architectures/3.aws-batch/README.md +++ b/1.architectures/3.aws-batch/README.md @@ -16,7 +16,7 @@ This architecture consists of the following resources: This template deploys AWS Batch and EC2 resources. It can be deployed via the console and the AWS CLI. Regardless of the deployment method it is assumed that you deployed the VPC template [`2.vpc-one-az.yaml`](../0.vpc_network/2.vpc-oneaz.yaml) prior to deploying that one. -- **Template file**: [`aws-batch-distributed-training.yaml`](./aws-batch-distributed-training.yaml) +- **Template file**: [`0.aws-batch-distributed-training.yaml`](./0.aws-batch-distributed-training.yaml) ## List of Parameters @@ -39,7 +39,7 @@ The command to deploy the template through the CLI is shown below. Please edit t ```bash aws cloudformation create-stack --stack-name batch-distributed-training \ - --template-body file://aws-batch-distributed-training.yaml \ + --template-body file://0.aws-batch-distributed-training.yaml \ --parameters ParameterKey=VPCStackParameter,ParameterValue="vpc-stack-ml" \ ParameterKey=CapacityReservationId,ParameterValue="cr-123567890abc" \ --capabilities CAPABILITY_IAM From 963b6dbbc225aed88516a57129e192b756f91152 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 14 Sep 2023 09:39:33 -0500 Subject: [PATCH 081/648] Update README and contributions --- CONTRIBUTING.md | 16 ++++++++++++++++ README.md | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c4b6a1c5..5ed91f93 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,6 +40,22 @@ GitHub provides additional document on [forking a repository](https://help.githu [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). +## Contributions format +Assets in this repository are organized differently depending on their nature but regardless of that each test example, architecture must have the following: + +- Must be self contained with documentation and scripts. Dependencies are accepted for the network architecture (VPC templates) and if they are within the repository. If your scripts depend on external software (libs, frameworks, containers...) then fix the versions via a tag or commit ID to ensure reproducibility and do not use a `latest` tag. +- A README file in markdown format with the prerequisites, instructions on deploying the architecture or running the test case and notes for known edge cases or common issues. +- Scripts templates must be numbered in sequence in their usage order starting by 0. Example: `0.preprocessing.sh`, `1.processing.sh`, `2.post-processing.sh`. +- All contributions must be done through PRs at the exception of hotfixes. +- Contributions should follow a KISS principle, **K**eep **I**t **S**imple and **S**tupid. Users should expect to copy paste commands, reproduce tests as is and have some guidance on what to modify if they need to. + +For guidance specific on assets: + +- *Architectures*: self-contained, ensure you stick to AWS CloudFormation, the AWS CDK or Terraform. +- *Machine Image*: based on Packer and use Ansible. No specific guidance. +- *Test cases*: self-contained models & frameworks test cases, please ensure that these are tested and run at the target scale (if you say 256 A100, test on that scale). +- *Validation scripts*: self contained, keep them simple enough, no need to boil the ocean. + ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. diff --git a/README.md b/README.md index fa95165e..812fa91b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ML Training Reference Architectures & Tests -This directory contains reference architectures and test cases for distributed model training with [AWS ParallelCluster](https://docs.aws.amazon.com/parallelcluster/latest/ug/what-is-aws-parallelcluster.html), [AWS Batch](https://docs.aws.amazon.com/batch/latest/userguide/what-is-batch.html), and [Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html). The test cases cover different types and sizes of models (Falcon, GPT3, T5) as well as different frameworks and parallel optimizations (Pytorch DDP/FSDP, MegatronLM, MegatronLM-DeepSpeed). +This repository contains reference architectures and test cases for distributed model training with [AWS ParallelCluster](https://docs.aws.amazon.com/parallelcluster/latest/ug/what-is-aws-parallelcluster.html), [AWS Batch](https://docs.aws.amazon.com/batch/latest/userguide/what-is-batch.html), and [Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html). The test cases cover different types and sizes of models as well as different frameworks and parallel optimizations (Pytorch DDP/FSDP, MegatronLM, NemoMegatron...). The major components of this directory are: From 0f4c4ac6efb4ce1df14a4ea840d112229a2e9d4a Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 14 Sep 2023 09:58:13 -0500 Subject: [PATCH 082/648] Add copyright on files and remove deploy.sh for VPC --- 1.architectures/0.s3/0.private-bucket.yaml | 3 +++ 1.architectures/1.vpc_network/1.vpc-all-az.yaml | 3 +++ 1.architectures/1.vpc_network/2.vpc-one-az.yaml | 3 +++ 1.architectures/1.vpc_network/deploy.sh | 6 ------ .../distributed-training-p4de-base.yaml | 3 +++ ...ributed-training-p4de_batch-inference-g5_custom_ami.yaml | 3 +++ .../distributed-training-p4de_custom_ami.yaml | 3 +++ .../distributed-training-p4de_postinstall_scripts.yaml | 3 +++ .../distributed-training-trn1_custom_ami.yaml | 3 +++ .../3.aws-batch/0.aws-batch-distributed-training.yaml | 3 +++ 3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile | 3 +++ 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 4 ++++ .../1.megatron-lm/2.distributed-training.Dockerfile | 3 +++ 3.test_cases/1.megatron-lm/3.distributed-training.sbatch | 4 ++++ 4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile | 3 +++ 4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch | 4 ++++ .../0.nccl-tests/2.nccl-3collectives.sbatch | 4 ++++ 4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch | 1 + 18 files changed, 53 insertions(+), 6 deletions(-) delete mode 100755 1.architectures/1.vpc_network/deploy.sh diff --git a/1.architectures/0.s3/0.private-bucket.yaml b/1.architectures/0.s3/0.private-bucket.yaml index e39643ed..d07d4050 100644 --- a/1.architectures/0.s3/0.private-bucket.yaml +++ b/1.architectures/0.s3/0.private-bucket.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + AWSTemplateFormatVersion: '2010-09-09' Description: This CloudFormation template to create S3 Bucket diff --git a/1.architectures/1.vpc_network/1.vpc-all-az.yaml b/1.architectures/1.vpc_network/1.vpc-all-az.yaml index 8a992be7..74bc05f7 100644 --- a/1.architectures/1.vpc_network/1.vpc-all-az.yaml +++ b/1.architectures/1.vpc_network/1.vpc-all-az.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + AWSTemplateFormatVersion: '2010-09-09' Description: > Setup for large scale computations on AWS portable to multiple regions. diff --git a/1.architectures/1.vpc_network/2.vpc-one-az.yaml b/1.architectures/1.vpc_network/2.vpc-one-az.yaml index c2de5791..be1fb81c 100644 --- a/1.architectures/1.vpc_network/2.vpc-one-az.yaml +++ b/1.architectures/1.vpc_network/2.vpc-one-az.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + AWSTemplateFormatVersion: '2010-09-09' Description: > Setup for tightly coupled workloads on AWS. A public subnet and a private diff --git a/1.architectures/1.vpc_network/deploy.sh b/1.architectures/1.vpc_network/deploy.sh deleted file mode 100755 index 6973efbc..00000000 --- a/1.architectures/1.vpc_network/deploy.sh +++ /dev/null @@ -1,6 +0,0 @@ -aws cloudformation create-stack --stack-name vpc-stack-ml\ - --template-body file://2.vpc-one-az.yaml \ - --parameters ParameterKey=SubnetsAZ,ParameterValue=us-west-2a \ - ParameterKey=VPCName,ParameterValue="ML HPC VPC" \ - --capabilities CAPABILITY_IAM - diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index 1da1b0b4..cceecd7b 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + Imds: ImdsSupport: v2.0 Image: diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index 4624f2db..7d7d9e36 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + Imds: ImdsSupport: v2.0 diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index 3950a92f..3674df7d 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + Imds: ImdsSupport: v2.0 Image: diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 0dce7d2e..ed5a5cec 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + Imds: ImdsSupport: v2.0 Image: diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index cbf2ebcf..91df40cf 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + Imds: ImdsSupport: v2.0 Image: diff --git a/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml b/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml index 40ddb741..eab126bb 100644 --- a/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml +++ b/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + AWSTemplateFormatVersion: '2010-09-09' Description: > Setup for large scale AWS Batch with CEs, JQ, ECR and Job Definition. diff --git a/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile b/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile index 74a83ef7..4e780fa3 100644 --- a/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile +++ b/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + # Container file for data prep # This could be reduced in the future FROM nvcr.io/nvidia/pytorch:23.05-py3 diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index c6eef41b..9e931e6c 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -1,7 +1,11 @@ #!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + #SBATCH -N 1 # number of nodes we want #SBATCH --exclusive # job has exclusive use of the resource, no sharing +#SBATCH --export=NIL # do not export env vars from the host env ########################### ###### User Variables ##### diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile index 6e4bdb7e..5b4220be 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile +++ b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + FROM nvcr.io/nvidia/pytorch:23.05-py3 ARG EFA_INSTALLER_VERSION=latest diff --git a/3.test_cases/1.megatron-lm/3.distributed-training.sbatch b/3.test_cases/1.megatron-lm/3.distributed-training.sbatch index 7450ed83..a1d23a60 100644 --- a/3.test_cases/1.megatron-lm/3.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/3.distributed-training.sbatch @@ -1,11 +1,15 @@ #!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + #SBATCH --nodes=24 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job #SBATCH --gpus-per-node=8 # Number of GPU per node #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive # job has exclusive use of the resource, no sharing #SBATCH --wait-all-nodes=1 +#SBATCH --export=NIL # do not export env vars from the host env set -ex; diff --git a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile index 99d8ea70..5ec20805 100644 --- a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile +++ b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 ARG EFA_INSTALLER_VERSION=latest diff --git a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch index 2ad86f90..f8f1a7dc 100644 --- a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch +++ b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch @@ -1,11 +1,15 @@ #!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + #SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job #SBATCH --ntasks-per-node 8 # Number of GPU per node #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive #SBATCH --wait-all-nodes=1 +#SBATCH --export=NIL # do not export env vars from the host env ### Disable hyperthreading by setting the tasks per core to 1 #SBATCH --ntasks-per-core=1 diff --git a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch index 63e418c7..86306fa3 100644 --- a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch +++ b/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch @@ -1,11 +1,15 @@ #!/bin/bash +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + #SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job #SBATCH --ntasks-per-node 8 # Number of GPU per node #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive #SBATCH --wait-all-nodes=1 +#SBATCH --export=NIL # do not export env vars from the host env ### Disable hyperthreading by setting the tasks per core to 1 #SBATCH --ntasks-per-core=1 diff --git a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch index 1f29c6c3..e9860468 100644 --- a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch +++ b/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch @@ -9,6 +9,7 @@ #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive #SBATCH --wait-all-nodes=1 +#SBATCH --export=NIL # do not export env vars from the host env ### Disable hyperthreading by setting the tasks per core to 1 #SBATCH --ntasks-per-core=1 From f5510ef9701a7b32a8f04574e597cd7b4941f4b8 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 14 Sep 2023 11:08:23 -0500 Subject: [PATCH 083/648] Fix name for EKS architecture --- 1.architectures/{4.aws-eks => 4.amazon-eks}/.gitignore | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/README.md | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/app.py | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/cdk.json | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/cluster/__init__.py | 0 .../{4.aws-eks => 4.amazon-eks}/cluster/cluster_stack.py | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/config.py | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements-dev.txt | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements.txt | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/source.bat | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/__init__.py | 0 .../{4.aws-eks => 4.amazon-eks}/tests/unit/__init__.py | 0 .../{4.aws-eks => 4.amazon-eks}/tests/unit/test_cluster_stack.py | 0 13 files changed, 0 insertions(+), 0 deletions(-) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/.gitignore (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/README.md (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/app.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/cdk.json (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/cluster/__init__.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/cluster/cluster_stack.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/config.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements-dev.txt (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements.txt (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/source.bat (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/__init__.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/unit/__init__.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/unit/test_cluster_stack.py (100%) diff --git a/1.architectures/4.aws-eks/.gitignore b/1.architectures/4.amazon-eks/.gitignore similarity index 100% rename from 1.architectures/4.aws-eks/.gitignore rename to 1.architectures/4.amazon-eks/.gitignore diff --git a/1.architectures/4.aws-eks/README.md b/1.architectures/4.amazon-eks/README.md similarity index 100% rename from 1.architectures/4.aws-eks/README.md rename to 1.architectures/4.amazon-eks/README.md diff --git a/1.architectures/4.aws-eks/app.py b/1.architectures/4.amazon-eks/app.py similarity index 100% rename from 1.architectures/4.aws-eks/app.py rename to 1.architectures/4.amazon-eks/app.py diff --git a/1.architectures/4.aws-eks/cdk.json b/1.architectures/4.amazon-eks/cdk.json similarity index 100% rename from 1.architectures/4.aws-eks/cdk.json rename to 1.architectures/4.amazon-eks/cdk.json diff --git a/1.architectures/4.aws-eks/cluster/__init__.py b/1.architectures/4.amazon-eks/cluster/__init__.py similarity index 100% rename from 1.architectures/4.aws-eks/cluster/__init__.py rename to 1.architectures/4.amazon-eks/cluster/__init__.py diff --git a/1.architectures/4.aws-eks/cluster/cluster_stack.py b/1.architectures/4.amazon-eks/cluster/cluster_stack.py similarity index 100% rename from 1.architectures/4.aws-eks/cluster/cluster_stack.py rename to 1.architectures/4.amazon-eks/cluster/cluster_stack.py diff --git a/1.architectures/4.aws-eks/config.py b/1.architectures/4.amazon-eks/config.py similarity index 100% rename from 1.architectures/4.aws-eks/config.py rename to 1.architectures/4.amazon-eks/config.py diff --git a/1.architectures/4.aws-eks/requirements-dev.txt b/1.architectures/4.amazon-eks/requirements-dev.txt similarity index 100% rename from 1.architectures/4.aws-eks/requirements-dev.txt rename to 1.architectures/4.amazon-eks/requirements-dev.txt diff --git a/1.architectures/4.aws-eks/requirements.txt b/1.architectures/4.amazon-eks/requirements.txt similarity index 100% rename from 1.architectures/4.aws-eks/requirements.txt rename to 1.architectures/4.amazon-eks/requirements.txt diff --git a/1.architectures/4.aws-eks/source.bat b/1.architectures/4.amazon-eks/source.bat similarity index 100% rename from 1.architectures/4.aws-eks/source.bat rename to 1.architectures/4.amazon-eks/source.bat diff --git a/1.architectures/4.aws-eks/tests/__init__.py b/1.architectures/4.amazon-eks/tests/__init__.py similarity index 100% rename from 1.architectures/4.aws-eks/tests/__init__.py rename to 1.architectures/4.amazon-eks/tests/__init__.py diff --git a/1.architectures/4.aws-eks/tests/unit/__init__.py b/1.architectures/4.amazon-eks/tests/unit/__init__.py similarity index 100% rename from 1.architectures/4.aws-eks/tests/unit/__init__.py rename to 1.architectures/4.amazon-eks/tests/unit/__init__.py diff --git a/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py similarity index 100% rename from 1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py rename to 1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py From 1ba8e84d6c350d2ce73a9baea7ddd0b2005ad972 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Thu, 14 Sep 2023 11:08:23 -0500 Subject: [PATCH 084/648] Fix name for EKS architecture --- 1.architectures/{4.aws-eks => 4.amazon-eks}/.gitignore | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/README.md | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/app.py | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/cdk.json | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/cluster/__init__.py | 0 .../{4.aws-eks => 4.amazon-eks}/cluster/cluster_stack.py | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/config.py | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements-dev.txt | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements.txt | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/source.bat | 0 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/__init__.py | 0 .../{4.aws-eks => 4.amazon-eks}/tests/unit/__init__.py | 0 .../{4.aws-eks => 4.amazon-eks}/tests/unit/test_cluster_stack.py | 0 13 files changed, 0 insertions(+), 0 deletions(-) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/.gitignore (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/README.md (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/app.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/cdk.json (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/cluster/__init__.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/cluster/cluster_stack.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/config.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements-dev.txt (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/requirements.txt (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/source.bat (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/__init__.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/unit/__init__.py (100%) rename 1.architectures/{4.aws-eks => 4.amazon-eks}/tests/unit/test_cluster_stack.py (100%) diff --git a/1.architectures/4.aws-eks/.gitignore b/1.architectures/4.amazon-eks/.gitignore similarity index 100% rename from 1.architectures/4.aws-eks/.gitignore rename to 1.architectures/4.amazon-eks/.gitignore diff --git a/1.architectures/4.aws-eks/README.md b/1.architectures/4.amazon-eks/README.md similarity index 100% rename from 1.architectures/4.aws-eks/README.md rename to 1.architectures/4.amazon-eks/README.md diff --git a/1.architectures/4.aws-eks/app.py b/1.architectures/4.amazon-eks/app.py similarity index 100% rename from 1.architectures/4.aws-eks/app.py rename to 1.architectures/4.amazon-eks/app.py diff --git a/1.architectures/4.aws-eks/cdk.json b/1.architectures/4.amazon-eks/cdk.json similarity index 100% rename from 1.architectures/4.aws-eks/cdk.json rename to 1.architectures/4.amazon-eks/cdk.json diff --git a/1.architectures/4.aws-eks/cluster/__init__.py b/1.architectures/4.amazon-eks/cluster/__init__.py similarity index 100% rename from 1.architectures/4.aws-eks/cluster/__init__.py rename to 1.architectures/4.amazon-eks/cluster/__init__.py diff --git a/1.architectures/4.aws-eks/cluster/cluster_stack.py b/1.architectures/4.amazon-eks/cluster/cluster_stack.py similarity index 100% rename from 1.architectures/4.aws-eks/cluster/cluster_stack.py rename to 1.architectures/4.amazon-eks/cluster/cluster_stack.py diff --git a/1.architectures/4.aws-eks/config.py b/1.architectures/4.amazon-eks/config.py similarity index 100% rename from 1.architectures/4.aws-eks/config.py rename to 1.architectures/4.amazon-eks/config.py diff --git a/1.architectures/4.aws-eks/requirements-dev.txt b/1.architectures/4.amazon-eks/requirements-dev.txt similarity index 100% rename from 1.architectures/4.aws-eks/requirements-dev.txt rename to 1.architectures/4.amazon-eks/requirements-dev.txt diff --git a/1.architectures/4.aws-eks/requirements.txt b/1.architectures/4.amazon-eks/requirements.txt similarity index 100% rename from 1.architectures/4.aws-eks/requirements.txt rename to 1.architectures/4.amazon-eks/requirements.txt diff --git a/1.architectures/4.aws-eks/source.bat b/1.architectures/4.amazon-eks/source.bat similarity index 100% rename from 1.architectures/4.aws-eks/source.bat rename to 1.architectures/4.amazon-eks/source.bat diff --git a/1.architectures/4.aws-eks/tests/__init__.py b/1.architectures/4.amazon-eks/tests/__init__.py similarity index 100% rename from 1.architectures/4.aws-eks/tests/__init__.py rename to 1.architectures/4.amazon-eks/tests/__init__.py diff --git a/1.architectures/4.aws-eks/tests/unit/__init__.py b/1.architectures/4.amazon-eks/tests/unit/__init__.py similarity index 100% rename from 1.architectures/4.aws-eks/tests/unit/__init__.py rename to 1.architectures/4.amazon-eks/tests/unit/__init__.py diff --git a/1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py similarity index 100% rename from 1.architectures/4.aws-eks/tests/unit/test_cluster_stack.py rename to 1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py From 8e97aa3559d886eb3ef01b763cb8f1a4d74f862e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 15 Sep 2023 11:55:26 -0500 Subject: [PATCH 085/648] Update to use cdk config for parameters --- 1.architectures/4.amazon-eks/README.md | 6 -- 1.architectures/4.amazon-eks/app.py | 3 +- 1.architectures/4.amazon-eks/cdk.json | 75 +++++-------------- .../4.amazon-eks/cluster/cluster_stack.py | 67 ++++++++++------- 1.architectures/4.amazon-eks/config.py | 3 +- .../4.amazon-eks/requirements-dev.txt | 1 - 1.architectures/4.amazon-eks/source.bat | 13 ---- .../tests/unit/test_cluster_stack.py | 6 +- 8 files changed, 61 insertions(+), 113 deletions(-) delete mode 100644 1.architectures/4.amazon-eks/requirements-dev.txt delete mode 100644 1.architectures/4.amazon-eks/source.bat diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index be3fcf2b..2e61fe42 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -33,12 +33,6 @@ step to activate your virtualenv. $ source .venv/bin/activate ``` -If you are a Windows platform, you would activate the virtualenv like this: - -``` -% .venv\Scripts\activate.bat -``` - Once the virtualenv is activated, you can install the required dependencies. ``` diff --git a/1.architectures/4.amazon-eks/app.py b/1.architectures/4.amazon-eks/app.py index 539d3647..b0a6f0a6 100644 --- a/1.architectures/4.amazon-eks/app.py +++ b/1.architectures/4.amazon-eks/app.py @@ -2,8 +2,8 @@ import os import aws_cdk as cdk - from cluster.cluster_stack import ClusterStack +import boto3 app = cdk.App() ClusterStack(app, "ClusterStack", @@ -22,7 +22,6 @@ #env=cdk.Environment(account='************', region='us-west-2'), # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html - ) app.synth() diff --git a/1.architectures/4.amazon-eks/cdk.json b/1.architectures/4.amazon-eks/cdk.json index b7d6c491..6a680044 100644 --- a/1.architectures/4.amazon-eks/cdk.json +++ b/1.architectures/4.amazon-eks/cdk.json @@ -1,58 +1,21 @@ { - "app": "python3 app.py", - "watch": { - "include": [ - "**" - ], - "exclude": [ - "README.md", - "cdk*.json", - "requirements*.txt", - "source.bat", - "**/__init__.py", - "python/__pycache__", - "tests" - ] - }, - "context": { - "@aws-cdk/aws-lambda:recognizeLayerVersion": true, - "@aws-cdk/core:checkSecretUsage": true, - "@aws-cdk/core:target-partitions": [ - "aws", - "aws-cn" - ], - "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, - "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, - "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, - "@aws-cdk/aws-iam:minimizePolicies": true, - "@aws-cdk/core:validateSnapshotRemovalPolicy": true, - "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, - "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, - "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, - "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, - "@aws-cdk/core:enablePartitionLiterals": true, - "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, - "@aws-cdk/aws-iam:standardizedServicePrincipals": true, - "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, - "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, - "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, - "@aws-cdk/aws-route53-patters:useCertificate": true, - "@aws-cdk/customresources:installLatestAwsSdkDefault": false, - "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, - "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, - "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, - "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, - "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, - "@aws-cdk/aws-redshift:columnId": true, - "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, - "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, - "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, - "@aws-cdk/aws-kms:aliasNameRef": true, - "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, - "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, - "@aws-cdk/aws-efs:denyAnonymousAccess": true, - "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, - "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, - "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true - } + "app": "python3 app.py", + "watch": { + "include": ["**"], + "exclude": ["README.md", "cdk*.json", "requirements*.txt", + "**/__init__.py", "python/__pycache__", "tests"] + }, + "context": { + "vpc_id": "VPC_ID_PLACEHOLDER", + + "eks_cluster_name": "eks-cdk", + "eks_master_role_name": "EKSMaster", + + "eks_sys_ng_instance_type": "m5.large", + "eks_sys_ng_disk_size": 50, + "eks_sys_ng_min_size": 1, + "eks_sys_ng_desired_size": 2, + "eks_sys_ng_max_size": 10, + "eks_kubernetes_version": "1.27" + } } diff --git a/1.architectures/4.amazon-eks/cluster/cluster_stack.py b/1.architectures/4.amazon-eks/cluster/cluster_stack.py index 9864b60f..6263d556 100644 --- a/1.architectures/4.amazon-eks/cluster/cluster_stack.py +++ b/1.architectures/4.amazon-eks/cluster/cluster_stack.py @@ -1,48 +1,59 @@ from aws_cdk import ( - # Duration, Stack, - # aws_sqs as sqs, + Fn, + aws_eks as eks, + aws_ec2 as ec2, + aws_iam as iam ) from constructs import Construct -import aws_cdk.aws_eks as eks -import aws_cdk.aws_ec2 as ec2 -import aws_cdk.aws_iam as iam import sys -sys.path.append('../') -import config + class ClusterStack(Stack): def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) - # The code that defines your stack goes here - - # example resource - # queue = sqs.Queue( - # self, "ClusterQueue", - # visibility_timeout=Duration.seconds(300), - # ) - - # EKS Cluster example: one-liner with default node group - #cluster = eks.Cluster(self,"HelloEKS", version=eks.KubernetesVersion.V1_27, default_capacity=2, default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5,ec2.InstanceSize.SMALL)) - # Lookup VPC - #my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id="vpc-*****************") - my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_name=config.vpc_name) + + vpc_id = self.node.try_get_context("vpc_id") + vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id=vpc_id) # Role to access cluster - admin_role = iam.Role(self, id=config.eks_master_role_name, role_name=config.eks_master_role_name, assumed_by=iam.AccountRootPrincipal(), description="Role to allow admin access to EKS cluster") + eks_master_role_name = self.node.try_get_context("eks_master_role_name") + admin_role = iam.Role(self, id=eks_master_role_name, + role_name=eks_master_role_name, + assumed_by=iam.AccountRootPrincipal(), + description="Role to allow admin access to EKS cluster") + + eks_cluster_name = self.node.try_get_context("eks_cluster_name") + eks_version_config = self.node.try_get_context("eks_kubernetes_version") + eks_version = eks.KubernetesVersion.of(eks_version_config) # EKS Cluster example: separate cluster and custom nodegroup creation - cluster = eks.Cluster(self, id=config.eks_cluster_name, cluster_name=config.eks_cluster_name, version=config.eks_kubernetes_version, default_capacity=0, vpc=my_vpc, masters_role=admin_role, output_cluster_name=True,output_config_command=True, output_masters_role_arn=True ) + cluster = eks.Cluster(self, id=eks_cluster_name, + cluster_name=eks_cluster_name, + version=eks_version, + default_capacity=0, + vpc=vpc, + masters_role=admin_role, + output_cluster_name=True, + output_config_command=True, + output_masters_role_arn=True) + + eks_sys_ng_instance_type = self.node.try_get_context("eks_sys_ng_instance_type") + eks_sys_ng_min_size = self.node.try_get_context("eks_sys_ng_min_size") + eks_sys_ng_desired_size = self.node.try_get_context("eks_sys_ng_desired_size") + eks_sys_ng_max_size = self.node.try_get_context("eks_sys_ng_max_size") + eks_sys_ng_disk_size = self.node.try_get_context("eks_sys_ng_disk_size") + + cluster.add_nodegroup_capacity("sys-node-group", - instance_types=[ec2.InstanceType(config.eks_sys_ng_instance_type)], - min_size=config.eks_sys_ng_min_size, - desired_size=config.eks_sys_ng_desired_size, - max_size=config.eks_sys_ng_max_size, - disk_size=config.eks_sys_ng_disk_size, - #ami_type=config.eks_sys_ng_ami_type + instance_types=[ec2.InstanceType(eks_sys_ng_instance_type)], + min_size=eks_sys_ng_min_size, + desired_size=eks_sys_ng_desired_size, + max_size=eks_sys_ng_max_size, + disk_size=eks_sys_ng_disk_size, ) diff --git a/1.architectures/4.amazon-eks/config.py b/1.architectures/4.amazon-eks/config.py index 7664a7dc..448aad37 100644 --- a/1.architectures/4.amazon-eks/config.py +++ b/1.architectures/4.amazon-eks/config.py @@ -8,5 +8,4 @@ eks_sys_ng_min_size=1 eks_sys_ng_desired_size=2 eks_sys_ng_max_size=10 -#eks_sys_ng_ami_type=eks.NodegroupAmiType.AL2_X86_64 -eks_kubernetes_version=eks.KubernetesVersion.V1_27 \ No newline at end of file +eks_kubernetes_version=eks.KubernetesVersion.V1_27 diff --git a/1.architectures/4.amazon-eks/requirements-dev.txt b/1.architectures/4.amazon-eks/requirements-dev.txt deleted file mode 100644 index 92709451..00000000 --- a/1.architectures/4.amazon-eks/requirements-dev.txt +++ /dev/null @@ -1 +0,0 @@ -pytest==6.2.5 diff --git a/1.architectures/4.amazon-eks/source.bat b/1.architectures/4.amazon-eks/source.bat deleted file mode 100644 index 9e1a8344..00000000 --- a/1.architectures/4.amazon-eks/source.bat +++ /dev/null @@ -1,13 +0,0 @@ -@echo off - -rem The sole purpose of this script is to make the command -rem -rem source .venv/bin/activate -rem -rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. -rem On Windows, this command just runs this batch file (the argument is ignored). -rem -rem Now we don't need to document a Windows command for activating a virtualenv. - -echo Executing .venv\Scripts\activate.bat for you -.venv\Scripts\activate.bat diff --git a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py index d6746756..ba9c68ec 100644 --- a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py +++ b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py @@ -5,11 +5,7 @@ # example tests. To run these tests, uncomment this file along with the example # resource in cluster/cluster_stack.py -def test_sqs_queue_created(): +def test_cluster_created(): app = core.App() stack = ClusterStack(app, "cluster") template = assertions.Template.from_stack(stack) - -# template.has_resource_properties("AWS::SQS::Queue", { -# "VisibilityTimeout": 300 -# }) From 7d5ea7586e8c0408fc4a988c77352becc247b4c3 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 15 Sep 2023 11:55:26 -0500 Subject: [PATCH 086/648] Update to use cdk config for parameters --- 1.architectures/4.amazon-eks/README.md | 6 -- 1.architectures/4.amazon-eks/app.py | 3 +- 1.architectures/4.amazon-eks/cdk.json | 75 +++++-------------- .../4.amazon-eks/cluster/cluster_stack.py | 67 ++++++++++------- 1.architectures/4.amazon-eks/config.py | 3 +- .../4.amazon-eks/requirements-dev.txt | 1 - 1.architectures/4.amazon-eks/source.bat | 13 ---- .../tests/unit/test_cluster_stack.py | 6 +- 8 files changed, 61 insertions(+), 113 deletions(-) delete mode 100644 1.architectures/4.amazon-eks/requirements-dev.txt delete mode 100644 1.architectures/4.amazon-eks/source.bat diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index be3fcf2b..2e61fe42 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -33,12 +33,6 @@ step to activate your virtualenv. $ source .venv/bin/activate ``` -If you are a Windows platform, you would activate the virtualenv like this: - -``` -% .venv\Scripts\activate.bat -``` - Once the virtualenv is activated, you can install the required dependencies. ``` diff --git a/1.architectures/4.amazon-eks/app.py b/1.architectures/4.amazon-eks/app.py index 539d3647..b0a6f0a6 100644 --- a/1.architectures/4.amazon-eks/app.py +++ b/1.architectures/4.amazon-eks/app.py @@ -2,8 +2,8 @@ import os import aws_cdk as cdk - from cluster.cluster_stack import ClusterStack +import boto3 app = cdk.App() ClusterStack(app, "ClusterStack", @@ -22,7 +22,6 @@ #env=cdk.Environment(account='************', region='us-west-2'), # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html - ) app.synth() diff --git a/1.architectures/4.amazon-eks/cdk.json b/1.architectures/4.amazon-eks/cdk.json index b7d6c491..6a680044 100644 --- a/1.architectures/4.amazon-eks/cdk.json +++ b/1.architectures/4.amazon-eks/cdk.json @@ -1,58 +1,21 @@ { - "app": "python3 app.py", - "watch": { - "include": [ - "**" - ], - "exclude": [ - "README.md", - "cdk*.json", - "requirements*.txt", - "source.bat", - "**/__init__.py", - "python/__pycache__", - "tests" - ] - }, - "context": { - "@aws-cdk/aws-lambda:recognizeLayerVersion": true, - "@aws-cdk/core:checkSecretUsage": true, - "@aws-cdk/core:target-partitions": [ - "aws", - "aws-cn" - ], - "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, - "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, - "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, - "@aws-cdk/aws-iam:minimizePolicies": true, - "@aws-cdk/core:validateSnapshotRemovalPolicy": true, - "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, - "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, - "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, - "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, - "@aws-cdk/core:enablePartitionLiterals": true, - "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, - "@aws-cdk/aws-iam:standardizedServicePrincipals": true, - "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, - "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, - "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, - "@aws-cdk/aws-route53-patters:useCertificate": true, - "@aws-cdk/customresources:installLatestAwsSdkDefault": false, - "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, - "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, - "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, - "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, - "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, - "@aws-cdk/aws-redshift:columnId": true, - "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, - "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, - "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, - "@aws-cdk/aws-kms:aliasNameRef": true, - "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, - "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, - "@aws-cdk/aws-efs:denyAnonymousAccess": true, - "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, - "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, - "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true - } + "app": "python3 app.py", + "watch": { + "include": ["**"], + "exclude": ["README.md", "cdk*.json", "requirements*.txt", + "**/__init__.py", "python/__pycache__", "tests"] + }, + "context": { + "vpc_id": "VPC_ID_PLACEHOLDER", + + "eks_cluster_name": "eks-cdk", + "eks_master_role_name": "EKSMaster", + + "eks_sys_ng_instance_type": "m5.large", + "eks_sys_ng_disk_size": 50, + "eks_sys_ng_min_size": 1, + "eks_sys_ng_desired_size": 2, + "eks_sys_ng_max_size": 10, + "eks_kubernetes_version": "1.27" + } } diff --git a/1.architectures/4.amazon-eks/cluster/cluster_stack.py b/1.architectures/4.amazon-eks/cluster/cluster_stack.py index 9864b60f..6263d556 100644 --- a/1.architectures/4.amazon-eks/cluster/cluster_stack.py +++ b/1.architectures/4.amazon-eks/cluster/cluster_stack.py @@ -1,48 +1,59 @@ from aws_cdk import ( - # Duration, Stack, - # aws_sqs as sqs, + Fn, + aws_eks as eks, + aws_ec2 as ec2, + aws_iam as iam ) from constructs import Construct -import aws_cdk.aws_eks as eks -import aws_cdk.aws_ec2 as ec2 -import aws_cdk.aws_iam as iam import sys -sys.path.append('../') -import config + class ClusterStack(Stack): def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) - # The code that defines your stack goes here - - # example resource - # queue = sqs.Queue( - # self, "ClusterQueue", - # visibility_timeout=Duration.seconds(300), - # ) - - # EKS Cluster example: one-liner with default node group - #cluster = eks.Cluster(self,"HelloEKS", version=eks.KubernetesVersion.V1_27, default_capacity=2, default_capacity_instance=ec2.InstanceType.of(ec2.InstanceClass.M5,ec2.InstanceSize.SMALL)) - # Lookup VPC - #my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id="vpc-*****************") - my_vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_name=config.vpc_name) + + vpc_id = self.node.try_get_context("vpc_id") + vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id=vpc_id) # Role to access cluster - admin_role = iam.Role(self, id=config.eks_master_role_name, role_name=config.eks_master_role_name, assumed_by=iam.AccountRootPrincipal(), description="Role to allow admin access to EKS cluster") + eks_master_role_name = self.node.try_get_context("eks_master_role_name") + admin_role = iam.Role(self, id=eks_master_role_name, + role_name=eks_master_role_name, + assumed_by=iam.AccountRootPrincipal(), + description="Role to allow admin access to EKS cluster") + + eks_cluster_name = self.node.try_get_context("eks_cluster_name") + eks_version_config = self.node.try_get_context("eks_kubernetes_version") + eks_version = eks.KubernetesVersion.of(eks_version_config) # EKS Cluster example: separate cluster and custom nodegroup creation - cluster = eks.Cluster(self, id=config.eks_cluster_name, cluster_name=config.eks_cluster_name, version=config.eks_kubernetes_version, default_capacity=0, vpc=my_vpc, masters_role=admin_role, output_cluster_name=True,output_config_command=True, output_masters_role_arn=True ) + cluster = eks.Cluster(self, id=eks_cluster_name, + cluster_name=eks_cluster_name, + version=eks_version, + default_capacity=0, + vpc=vpc, + masters_role=admin_role, + output_cluster_name=True, + output_config_command=True, + output_masters_role_arn=True) + + eks_sys_ng_instance_type = self.node.try_get_context("eks_sys_ng_instance_type") + eks_sys_ng_min_size = self.node.try_get_context("eks_sys_ng_min_size") + eks_sys_ng_desired_size = self.node.try_get_context("eks_sys_ng_desired_size") + eks_sys_ng_max_size = self.node.try_get_context("eks_sys_ng_max_size") + eks_sys_ng_disk_size = self.node.try_get_context("eks_sys_ng_disk_size") + + cluster.add_nodegroup_capacity("sys-node-group", - instance_types=[ec2.InstanceType(config.eks_sys_ng_instance_type)], - min_size=config.eks_sys_ng_min_size, - desired_size=config.eks_sys_ng_desired_size, - max_size=config.eks_sys_ng_max_size, - disk_size=config.eks_sys_ng_disk_size, - #ami_type=config.eks_sys_ng_ami_type + instance_types=[ec2.InstanceType(eks_sys_ng_instance_type)], + min_size=eks_sys_ng_min_size, + desired_size=eks_sys_ng_desired_size, + max_size=eks_sys_ng_max_size, + disk_size=eks_sys_ng_disk_size, ) diff --git a/1.architectures/4.amazon-eks/config.py b/1.architectures/4.amazon-eks/config.py index 7664a7dc..448aad37 100644 --- a/1.architectures/4.amazon-eks/config.py +++ b/1.architectures/4.amazon-eks/config.py @@ -8,5 +8,4 @@ eks_sys_ng_min_size=1 eks_sys_ng_desired_size=2 eks_sys_ng_max_size=10 -#eks_sys_ng_ami_type=eks.NodegroupAmiType.AL2_X86_64 -eks_kubernetes_version=eks.KubernetesVersion.V1_27 \ No newline at end of file +eks_kubernetes_version=eks.KubernetesVersion.V1_27 diff --git a/1.architectures/4.amazon-eks/requirements-dev.txt b/1.architectures/4.amazon-eks/requirements-dev.txt deleted file mode 100644 index 92709451..00000000 --- a/1.architectures/4.amazon-eks/requirements-dev.txt +++ /dev/null @@ -1 +0,0 @@ -pytest==6.2.5 diff --git a/1.architectures/4.amazon-eks/source.bat b/1.architectures/4.amazon-eks/source.bat deleted file mode 100644 index 9e1a8344..00000000 --- a/1.architectures/4.amazon-eks/source.bat +++ /dev/null @@ -1,13 +0,0 @@ -@echo off - -rem The sole purpose of this script is to make the command -rem -rem source .venv/bin/activate -rem -rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. -rem On Windows, this command just runs this batch file (the argument is ignored). -rem -rem Now we don't need to document a Windows command for activating a virtualenv. - -echo Executing .venv\Scripts\activate.bat for you -.venv\Scripts\activate.bat diff --git a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py index d6746756..ba9c68ec 100644 --- a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py +++ b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py @@ -5,11 +5,7 @@ # example tests. To run these tests, uncomment this file along with the example # resource in cluster/cluster_stack.py -def test_sqs_queue_created(): +def test_cluster_created(): app = core.App() stack = ClusterStack(app, "cluster") template = assertions.Template.from_stack(stack) - -# template.has_resource_properties("AWS::SQS::Queue", { -# "VisibilityTimeout": 300 -# }) From b01b3348abaf349b1de0c1152fb86d0c73980178 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 15 Sep 2023 13:53:17 -0500 Subject: [PATCH 087/648] Add .venv to git ignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 85e6c13c..ba3705fa 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,4 @@ spark-warehouse *.ckpt.* *.h5 *.tfevents.* +*.venv* From ec2b1c929d103d688888ffbf2c0451e65c88c4c8 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Sat, 16 Sep 2023 01:10:45 +0000 Subject: [PATCH 088/648] Update AMI setup for P5 compatibility --- 2.amazon_machine_images/packer-ami.pkr.hcl | 2 +- 2.amazon_machine_images/playbook-pcluster-gpu.yml | 1 + 2.amazon_machine_images/roles/aws_efa/defaults/main.yml | 2 +- 2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index 986482fe..895240d7 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -23,7 +23,7 @@ variable "ami_version" { variable "parallel_cluster_version" { type = string - default = "3.6.1" + default = "3.7.0" } variable "eks_version" { diff --git a/2.amazon_machine_images/playbook-pcluster-gpu.yml b/2.amazon_machine_images/playbook-pcluster-gpu.yml index 72de65be..4f21d219 100644 --- a/2.amazon_machine_images/playbook-pcluster-gpu.yml +++ b/2.amazon_machine_images/playbook-pcluster-gpu.yml @@ -20,3 +20,4 @@ - nvidia_enroot_pyxis - aws_efa_ofi - aws_lustre + # - observability diff --git a/2.amazon_machine_images/roles/aws_efa/defaults/main.yml b/2.amazon_machine_images/roles/aws_efa/defaults/main.yml index 00f3aa19..c482ed61 100644 --- a/2.amazon_machine_images/roles/aws_efa/defaults/main.yml +++ b/2.amazon_machine_images/roles/aws_efa/defaults/main.yml @@ -1,5 +1,5 @@ --- -aws_efa_version: "1.17.1" +aws_efa_version: "1.26.0" aws_efa_archive: "aws-efa-installer-{{ aws_efa_version }}.tar.gz" aws_efa_url: "https://efa-installer.amazonaws.com/{{ aws_efa_archive }}" aws_efa_work_dir: "/tmp" diff --git a/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml b/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml index f77a2faa..e6cb2d3f 100644 --- a/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml +++ b/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml @@ -10,7 +10,7 @@ ansible.builtin.git: repo: https://github.com/aws/aws-ofi-nccl.git dest: /tmp/aws-ofi-nccl - version: aws + version: v1.7.2-aws - name: Build AWS EFA OFI ansible.builtin.shell: | From 13308882abb8770f4081480b2f25d22ecc1e42d2 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Sat, 16 Sep 2023 01:12:40 +0000 Subject: [PATCH 089/648] Update NCCL test image to make it P5 compatible Co-authored-by: Uros Lipovsek --- .../0.nccl-tests/0.nccl-tests.Dockerfile | 68 ++++++++++++------- .../0.nccl-tests/1.nccl-tests.sbatch | 6 +- 2 files changed, 44 insertions(+), 30 deletions(-) diff --git a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile index 5ec20805..e36e49b1 100644 --- a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile +++ b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile @@ -1,20 +1,18 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 - FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 -ARG EFA_INSTALLER_VERSION=latest -ARG AWS_OFI_NCCL_VERSION=aws +ARG EFA_INSTALLER_VERSION=1.26.0 +ARG AWS_OFI_NCCL_VERSION=v1.7.1-aws ARG NCCL_TESTS_VERSION=master -ARG NCCL_VERSION=v2.12.7-1 +ARG NCCL_VERSION=2.18.1 RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ - libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - libnccl2 libnccl-dev + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl2 libnccl-dev + RUN rm -rf /opt/hpcx \ && rm -rf /usr/local/mpi \ - && rm -rf /usr/local/ucx \ && rm -f /etc/ld.so.conf.d/hpcx.conf \ && ldconfig ENV OPAL_PREFIX= @@ -32,15 +30,34 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ libtool \ gdb \ automake \ + python3-distutils \ cmake \ apt-utils \ - python3 \ - python3-pip + devscripts \ + debhelper \ + libsubunit-dev \ + check \ + pkg-config -ENV LD_LIBRARY_PATH=/usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:$LD_LIBRARY_PATH -ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH +RUN mkdir -p /var/run/sshd +RUN sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && \ + echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && \ + sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config +ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/lib:$LD_LIBRARY_PATH +ENV PATH /opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH -RUN pip3 install awscli pynvml +RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ + && pip3 install awscli pynvml + +################################################# +## Install NVIDIA GDRCopy +#RUN git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ +# && cd /opt/gdrcopy \ +# && make lib_install install \ +# && cd /opt/gdrcopy/tests \ +# && make \ +# && mv copylat copybw sanity apiperf /usr/bin/ ################################################# ## Install EFA installer @@ -53,25 +70,24 @@ RUN cd $HOME \ ################################################### ## Install NCCL -RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ +RUN git clone https://github.com/NVIDIA/nccl -b v${NCCL_VERSION}-1 /opt/nccl \ && cd /opt/nccl \ - && git checkout -b ${NCCL_VERSION} \ && make -j src.build CUDA_HOME=/usr/local/cuda \ - NVCC_GENCODE="-gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" ################################################### ## Install AWS-OFI-NCCL plugin +RUN apt-get install libtool autoconf cmake nasm unzip pigz parallel nfs-common build-essential hwloc libhwloc-dev libjemalloc2 libnuma-dev numactl libjemalloc-dev preload htop iftop liblapack-dev libgfortran5 ipcalc wget curl devscripts debhelper check libsubunit-dev fakeroot pkg-config dkms -y RUN export OPAL_PREFIX="" \ && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ && cd /opt/aws-ofi-nccl \ - && env \ && git checkout ${AWS_OFI_NCCL_VERSION} \ && ./autogen.sh \ && ./configure --prefix=/opt/aws-ofi-nccl/install \ - --with-libfabric=/opt/amazon/efa/ \ - --with-cuda=/usr/local/cuda \ - --with-nccl=/opt/nccl/build \ - --with-mpi=/opt/amazon/openmpi/ \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=/usr/local/cuda \ + --with-nccl=/opt/nccl/build \ + --with-mpi=/opt/amazon/openmpi/ \ && make && make install ################################################### @@ -80,11 +96,11 @@ RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ && cd /opt/nccl-tests \ && git checkout ${NCCL_TESTS_VERSION} \ && make MPI=1 \ - MPI_HOME=/opt/amazon/openmpi/ \ - CUDA_HOME=/usr/local/cuda \ - NCCL_HOME=/opt/nccl/build \ - NVCC_GENCODE="-gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" + MPI_HOME=/opt/amazon/openmpi/ \ + CUDA_HOME=/usr/local/cuda \ + NCCL_HOME=/opt/nccl/build \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" +ENV NCCL_PROTO simple RUN rm -rf /var/lib/apt/lists/* -ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so - +ENV LD_PRELOAD /opt/nccl/build/lib/libnccl.so \ No newline at end of file diff --git a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch index f8f1a7dc..353b5191 100644 --- a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch +++ b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch @@ -27,8 +27,8 @@ ## Plenty of EFA level variables export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_PROVIDER=efa export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring export FI_LOG_LEVEL=1 export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons export FI_EFA_ENABLE_SHM_TRANSFER=1 @@ -42,6 +42,4 @@ export NCCL_DEBUG=INFO declare -a ARGS=( --container-image $IMAGE ) - - -srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TESTS_PATH/scatter_perf -b 8 -e 2G -f 2 -g 1 -c 1 -n 100 +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TESTS_PATH/scatter_perf -b 8 -e 2G -f 2 -g 1 -c 1 -n 100 \ No newline at end of file From 93ba559340a921b8da01dd25127ece473096eb7e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 18 Sep 2023 16:51:18 -0500 Subject: [PATCH 090/648] Update on readme format --- README.md | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c65b5f12..709c357e 100644 --- a/README.md +++ b/README.md @@ -32,13 +32,32 @@ More will come, feel free to add new ones (EKS, Ray?) Custom machine images can be built using [Packer](www.packer.io) for AWS ParallelCluster, Amazon EKS and plain EC2. These images are based are on Ansible roles and playbooks. -## 3. Test Cases: Support Matrix +## 3. Test cases: support matrix All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test cases | PC | EKS | AWS Batch | -| ----------------------- | ---- | ----- | --------- | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher-23.03` | ✅ | ❌ | ❌ | -| `3.MPT` | ❓ | ❓ | ❓ | -| `4.DDP` | ❓ | ❓ | ❓ | +| Test cases | Slurm | EKS | AWS Batch | +| ----------------- | ----- | --- | ---------- | +| `1.megatron-lm` | ✅ | ❓ | ❓ | +| `2.nemo-launcher` | ✅ | ❌ | ❌ | +| `3.MPT` | ❓ | ❓ | ❓ | +| `4.DDP` | ❓ | ❓ | ❓ | + + +## 4. Validation scripts + +Utilities scripts and micro-benchmarks examples are set under `4.validation_scripts/`. + + +## 5. Contributors + +Thanks to all the contributors for building, reviewing and testing. + +- Pierre-Yves Aquilanti - pierreya@ +- Verdi March - marcverd@ +- Uros Lipovsek - lipovsek@ +- Keita Watanabe - mlkeita@ +- Ankur Srivastava - awsankur@ +- Alex Iankoulski - iankouls@ +- Tom McDonald - tjm@ +- Sean Smith - seaam@ From efa8a2c81a14eefe719e6697325b937af81b7d9e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 18 Sep 2023 16:57:16 -0500 Subject: [PATCH 091/648] Remove mmap from preprocessing in MegatronLM --- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 1 - 1 file changed, 1 deletion(-) diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index d506a4bb..8a557abf 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -29,7 +29,6 @@ srun -l "${ARGS[@]}" python3 /workspace/Megatron-LM/tools/preprocess_data.py \ --input ${DATA_PATH}/oscar-1GB.jsonl \ --output-prefix ${DATA_PATH}/my-gpt2 \ --vocab-file ${DATA_PATH}/gpt2-vocab.json \ - --dataset-impl mmap \ --tokenizer-type GPT2BPETokenizer \ --merge-file ${DATA_PATH}/gpt2-merges.txt \ --append-eod \ From a32a79efe8dfc6ac5d392ca7f2a16cb52075a498 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 18 Sep 2023 16:58:26 -0500 Subject: [PATCH 092/648] Fix readme file artifact during merge with main --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 38430312..48aea13b 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,6 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -<<<<<<< HEAD | Test cases | Slurm | EKS | AWS Batch | | ----------------- | ----- | --- | ---------- | | `1.megatron-lm` | ✅ | ❓ | ❓ | @@ -48,7 +47,6 @@ All test cases are under `3.test_cases/`. You can go in each test case directory Utilities scripts and micro-benchmarks examples are set under `4.validation_scripts/`. - ## 5. Contributors Thanks to all the contributors for building, reviewing and testing. From 0c5d89248f994bd02bff7d50597dc95aa3a7a93a Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 18 Sep 2023 22:48:28 -0700 Subject: [PATCH 093/648] Switch to eksctl --- 1.architectures/4.amazon-eks/.gitignore | 11 --- 1.architectures/4.amazon-eks/README.md | 92 ++++++------------- 1.architectures/4.amazon-eks/app.py | 27 ------ 1.architectures/4.amazon-eks/cdk.json | 21 ----- .../4.amazon-eks/cluster/__init__.py | 0 .../4.amazon-eks/cluster/cluster_stack.py | 59 ------------ 1.architectures/4.amazon-eks/config.py | 11 --- .../4.amazon-eks/eks-g4dn-vpc.yaml | 50 ++++++++++ 1.architectures/4.amazon-eks/eks-g4dn.yaml | 39 ++++++++ .../4.amazon-eks/eks-p4de-odcr-vpc.yaml | 55 +++++++++++ .../4.amazon-eks/eks-p4de-odcr.yaml | 44 +++++++++ 1.architectures/4.amazon-eks/requirements.txt | 2 - .../4.amazon-eks/tests/__init__.py | 0 .../4.amazon-eks/tests/unit/__init__.py | 0 .../tests/unit/test_cluster_stack.py | 11 --- 15 files changed, 216 insertions(+), 206 deletions(-) delete mode 100644 1.architectures/4.amazon-eks/.gitignore delete mode 100644 1.architectures/4.amazon-eks/app.py delete mode 100644 1.architectures/4.amazon-eks/cdk.json delete mode 100644 1.architectures/4.amazon-eks/cluster/__init__.py delete mode 100644 1.architectures/4.amazon-eks/cluster/cluster_stack.py delete mode 100644 1.architectures/4.amazon-eks/config.py create mode 100644 1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml create mode 100644 1.architectures/4.amazon-eks/eks-g4dn.yaml create mode 100644 1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml create mode 100644 1.architectures/4.amazon-eks/eks-p4de-odcr.yaml delete mode 100644 1.architectures/4.amazon-eks/requirements.txt delete mode 100644 1.architectures/4.amazon-eks/tests/__init__.py delete mode 100644 1.architectures/4.amazon-eks/tests/unit/__init__.py delete mode 100644 1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py diff --git a/1.architectures/4.amazon-eks/.gitignore b/1.architectures/4.amazon-eks/.gitignore deleted file mode 100644 index 454fac8b..00000000 --- a/1.architectures/4.amazon-eks/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -*.swp -package-lock.json -__pycache__ -.pytest_cache -.venv -*.egg-info - -# CDK asset staging directory -.cdk.staging -cdk.out -cdk.context.json diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 2e61fe42..fe1abde6 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -1,86 +1,50 @@ -# Welcome to your CDK Python project! -This is a project for CDK development with Python. -The `cdk.json` file tells the CDK Toolkit how to execute your app. -The `config.py` file configures an existing VPC and the EKS cluster to create in it. +# Amazon EKS distributed training architecture +This project module uses [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. -# Prerequisites +## Prerequisites 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) -2. [Python 3.8 or greater](https://www.python.org/downloads/) -3. [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) -4. CDK Toolkit: `npm install -g aws-cdk` +2. [eksctl](https://eksctl.io) 5. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -# Project setup +## Cluster configuration -This project is set up like a standard Python project. The initialization -process also creates a virtualenv within this project, stored under the `.venv` -directory. To create the virtualenv it assumes that there is a `python3` -(or `python` for Windows) executable in your path with access to the `venv` -package. If for any reason the automatic creation of the virtualenv fails, -you can create the virtualenv manually. +The following example cluster configurations are provided: -To manually create a virtualenv on MacOS and Linux: +* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances +* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC +* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR) +* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC -``` -$ python3 -m venv .venv -``` +To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/) -After the init process completes and the virtualenv is created, you can use the following -step to activate your virtualenv. +## Cluster creation + +Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: ``` -$ source .venv/bin/activate +$ eksctl create cluster -f ./cluster.yaml ``` -Once the virtualenv is activated, you can install the required dependencies. +Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. Execute the following command line in order to verify that the cluster is accessible: ``` -$ python -m pip install --upgrade pip +$ kubectl get nodes ``` -``` -$ pip install -r requirements.txt -``` +You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instanct type.w + +## Delete cluster -At this point you can now synthesize the CloudFormation template for this code. +When it is time to decommission your cluster, execute the following command: ``` -$ cdk synth +$ kubectl delete cluster -f ./cluster.yaml ``` -To add additional dependencies, for example other CDK libraries, just add -them to your `setup.py` file and rerun the `pip install -r requirements.txt` -command. - -# Useful commands - - * `cdk ls` list all stacks in the app - * `cdk synth` emits the synthesized CloudFormation template - * `cdk deploy` deploy this stack to your default AWS account/region - * `cdk diff` compare deployed stack with current state - * `cdk docs` open CDK documentation - -# Project use - -1. Configure existing VPC name and specify desired EKS cluster settings by editing `./config.py` -2. Configure AWS CLI: `aws configure` -3. `export CDK_DEFAULT_ACCOUNT=` -4. `export CDK_DEFAULT_REGION=` -5. Execute `cdk synth` -6. Execute `cdk deploy --require-approval never` -7. Tag VPC public and private subnets as instructed by deployment log if needed -8. Upon successful creation, copy and execute the displayed aws command to update the cluster kubeconfig - -If the cluster was created successfully, you will see the cluster nodes you specified by executing the following command: -`kubectl get nodes` - -# References -* [CDK v2 Documentation](https://docs.aws.amazon.com/cdk/v2/guide/home.html) -* [Getting started with CDK](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) -* [CDK examples](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/eks/cluster) -* [CDK API reference](https://docs.aws.amazon.com/cdk/v2/guide/reference.html) -* [CDK API reference EKS quick start](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks-readme.html#quick-start) -* [CDK Python API for EKS](https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_eks/Cluster.html) -* [CDK for Kubernetes (cdk8s)](https://cdk8s.io/) -* [CDK Workshop Lab - build EKS cluster](https://catalog.us-east-1.prod.workshops.aws/workshops/c15012ac-d05d-46b1-8a4a-205e7c9d93c9/en-US/40-deploy-clusters) +## References +* [AWS CLI](https://aws.amazon.com/cli) +* [Amazon EKS](https://aws.amazon.com/eks) +* [eksctl](https://eksctl.io) +* [kubectl](https://kubernetes.io/docs/reference/kubectl) + diff --git a/1.architectures/4.amazon-eks/app.py b/1.architectures/4.amazon-eks/app.py deleted file mode 100644 index b0a6f0a6..00000000 --- a/1.architectures/4.amazon-eks/app.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 -import os - -import aws_cdk as cdk -from cluster.cluster_stack import ClusterStack -import boto3 - -app = cdk.App() -ClusterStack(app, "ClusterStack", - # If you don't specify 'env', this stack will be environment-agnostic. - # Account/Region-dependent features and context lookups will not work, - # but a single synthesized template can be deployed anywhere. - - # Uncomment the next line to specialize this stack for the AWS Account - # and Region that are implied by the current CLI configuration. - - env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), - - # Uncomment the next line if you know exactly what Account and Region you - # want to deploy the stack to. */ - - #env=cdk.Environment(account='************', region='us-west-2'), - - # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html - ) - -app.synth() diff --git a/1.architectures/4.amazon-eks/cdk.json b/1.architectures/4.amazon-eks/cdk.json deleted file mode 100644 index 6a680044..00000000 --- a/1.architectures/4.amazon-eks/cdk.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "app": "python3 app.py", - "watch": { - "include": ["**"], - "exclude": ["README.md", "cdk*.json", "requirements*.txt", - "**/__init__.py", "python/__pycache__", "tests"] - }, - "context": { - "vpc_id": "VPC_ID_PLACEHOLDER", - - "eks_cluster_name": "eks-cdk", - "eks_master_role_name": "EKSMaster", - - "eks_sys_ng_instance_type": "m5.large", - "eks_sys_ng_disk_size": 50, - "eks_sys_ng_min_size": 1, - "eks_sys_ng_desired_size": 2, - "eks_sys_ng_max_size": 10, - "eks_kubernetes_version": "1.27" - } -} diff --git a/1.architectures/4.amazon-eks/cluster/__init__.py b/1.architectures/4.amazon-eks/cluster/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/1.architectures/4.amazon-eks/cluster/cluster_stack.py b/1.architectures/4.amazon-eks/cluster/cluster_stack.py deleted file mode 100644 index 6263d556..00000000 --- a/1.architectures/4.amazon-eks/cluster/cluster_stack.py +++ /dev/null @@ -1,59 +0,0 @@ -from aws_cdk import ( - Stack, - Fn, - aws_eks as eks, - aws_ec2 as ec2, - aws_iam as iam -) - -from constructs import Construct - -import sys - - -class ClusterStack(Stack): - - def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: - super().__init__(scope, construct_id, **kwargs) - - # Lookup VPC - - vpc_id = self.node.try_get_context("vpc_id") - vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id=vpc_id) - - # Role to access cluster - eks_master_role_name = self.node.try_get_context("eks_master_role_name") - admin_role = iam.Role(self, id=eks_master_role_name, - role_name=eks_master_role_name, - assumed_by=iam.AccountRootPrincipal(), - description="Role to allow admin access to EKS cluster") - - eks_cluster_name = self.node.try_get_context("eks_cluster_name") - eks_version_config = self.node.try_get_context("eks_kubernetes_version") - eks_version = eks.KubernetesVersion.of(eks_version_config) - - # EKS Cluster example: separate cluster and custom nodegroup creation - cluster = eks.Cluster(self, id=eks_cluster_name, - cluster_name=eks_cluster_name, - version=eks_version, - default_capacity=0, - vpc=vpc, - masters_role=admin_role, - output_cluster_name=True, - output_config_command=True, - output_masters_role_arn=True) - - eks_sys_ng_instance_type = self.node.try_get_context("eks_sys_ng_instance_type") - eks_sys_ng_min_size = self.node.try_get_context("eks_sys_ng_min_size") - eks_sys_ng_desired_size = self.node.try_get_context("eks_sys_ng_desired_size") - eks_sys_ng_max_size = self.node.try_get_context("eks_sys_ng_max_size") - eks_sys_ng_disk_size = self.node.try_get_context("eks_sys_ng_disk_size") - - - cluster.add_nodegroup_capacity("sys-node-group", - instance_types=[ec2.InstanceType(eks_sys_ng_instance_type)], - min_size=eks_sys_ng_min_size, - desired_size=eks_sys_ng_desired_size, - max_size=eks_sys_ng_max_size, - disk_size=eks_sys_ng_disk_size, - ) diff --git a/1.architectures/4.amazon-eks/config.py b/1.architectures/4.amazon-eks/config.py deleted file mode 100644 index 448aad37..00000000 --- a/1.architectures/4.amazon-eks/config.py +++ /dev/null @@ -1,11 +0,0 @@ -import aws_cdk.aws_eks as eks - -vpc_name="ML EKS VPC" -eks_cluster_name="eks-cdk" -eks_master_role_name="EKSMaster" -eks_sys_ng_instance_type="m5.large" -eks_sys_ng_disk_size=50 -eks_sys_ng_min_size=1 -eks_sys_ng_desired_size=2 -eks_sys_ng_max_size=10 -eks_kubernetes_version=eks.KubernetesVersion.V1_27 diff --git a/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml new file mode 100644 index 00000000..891497e7 --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml @@ -0,0 +1,50 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-g4dn-vpc + version: "1.27" + region: us-east-1 + +vpc: + id: vpc-xxxxxxxxxxxxxxxxx + subnets: + public: + endpoint-one: + id: subnet-xxxxxxxxxxxxxxx11 + endpoint-two: + id: subnet-xxxxxxxxxxxxxxx12 + private: + p4de-1a: + id: subnet-xxxxxxxxxxxxxxx21 + p4de-1c: + id: subnet-xxxxxxxxxxxxxxx22 + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + - name g4dn + instanceType: g4dn.metal + instancePrefix: g4dn-vpc + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + subnets: + - subnet-xxxxxxxxxxxxxxx22 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/eks-g4dn.yaml b/1.architectures/4.amazon-eks/eks-g4dn.yaml new file mode 100644 index 00000000..a20cc5b1 --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-g4dn.yaml @@ -0,0 +1,39 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-g4dn-vpc + version: "1.27" + region: us-east-1 + +availabilityZones: + - us-east-1a + - us-east-1c + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + - name g4dn + instanceType: g4dn.metal + instancePrefix: g4dn-vpc + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + availabilityZones: ["us-east-1c"] + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml new file mode 100644 index 00000000..a08081ee --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml @@ -0,0 +1,55 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-p4de-odcr-vpc + version: "1.27" + region: us-east-1 + +vpc: + id: vpc-xxxxxxxxxxxxxxxxx + subnets: + public: + endpoint-one: + id: subnet-xxxxxxxxxxxxxxx11 + endpoint-two: + id: subnet-xxxxxxxxxxxxxxx12 + private: + p4de-1a: + id: subnet-xxxxxxxxxxxxxxx21 + p4de-1c: + id: subnet-xxxxxxxxxxxxxxx22 + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + +nodeGroups: + - name: p4de-odcr-vpc + instanceType: p4de.24xlarge + instancePrefix: p4de-odcr-vpc + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + subnets: + - subnet-xxxxxxxxxxxxxxx22 + capacityReservation: + capacityReservationTarget: + capacityReservationID: "cr-xxxxxxxxxxxxxxxxx" + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml new file mode 100644 index 00000000..50b2637f --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml @@ -0,0 +1,44 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-p4de-odcr + version: "1.27" + region: us-east-1 + +availabilityZones: + - us-east-1a + - us-east-1c + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + +nodeGroups: + - name: p4de-odcr + instanceType: p4de.24xlarge + instancePrefix: p4de-odcr + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + availabilityZones: ["us-east-1c"] + capacityReservation: + capacityReservationTarget: + capacityReservationID: "cr-xxxxxxxxxxxxxxxxx" + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/requirements.txt b/1.architectures/4.amazon-eks/requirements.txt deleted file mode 100644 index c0512aa5..00000000 --- a/1.architectures/4.amazon-eks/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -aws-cdk-lib==2.95.1 -constructs>=10.0.0,<11.0.0 diff --git a/1.architectures/4.amazon-eks/tests/__init__.py b/1.architectures/4.amazon-eks/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/1.architectures/4.amazon-eks/tests/unit/__init__.py b/1.architectures/4.amazon-eks/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py deleted file mode 100644 index ba9c68ec..00000000 --- a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py +++ /dev/null @@ -1,11 +0,0 @@ -import aws_cdk as core -import aws_cdk.assertions as assertions - -from cluster.cluster_stack import ClusterStack - -# example tests. To run these tests, uncomment this file along with the example -# resource in cluster/cluster_stack.py -def test_cluster_created(): - app = core.App() - stack = ClusterStack(app, "cluster") - template = assertions.Template.from_stack(stack) From 87181b9138697a8563b0f599eee618bc2aca97f1 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 18 Sep 2023 22:48:28 -0700 Subject: [PATCH 094/648] Switch to eksctl --- 1.architectures/4.amazon-eks/.gitignore | 11 --- 1.architectures/4.amazon-eks/README.md | 92 ++++++------------- 1.architectures/4.amazon-eks/app.py | 27 ------ 1.architectures/4.amazon-eks/cdk.json | 21 ----- .../4.amazon-eks/cluster/__init__.py | 0 .../4.amazon-eks/cluster/cluster_stack.py | 59 ------------ 1.architectures/4.amazon-eks/config.py | 11 --- .../4.amazon-eks/eks-g4dn-vpc.yaml | 50 ++++++++++ 1.architectures/4.amazon-eks/eks-g4dn.yaml | 39 ++++++++ .../4.amazon-eks/eks-p4de-odcr-vpc.yaml | 55 +++++++++++ .../4.amazon-eks/eks-p4de-odcr.yaml | 44 +++++++++ 1.architectures/4.amazon-eks/requirements.txt | 2 - .../4.amazon-eks/tests/__init__.py | 0 .../4.amazon-eks/tests/unit/__init__.py | 0 .../tests/unit/test_cluster_stack.py | 11 --- 15 files changed, 216 insertions(+), 206 deletions(-) delete mode 100644 1.architectures/4.amazon-eks/.gitignore delete mode 100644 1.architectures/4.amazon-eks/app.py delete mode 100644 1.architectures/4.amazon-eks/cdk.json delete mode 100644 1.architectures/4.amazon-eks/cluster/__init__.py delete mode 100644 1.architectures/4.amazon-eks/cluster/cluster_stack.py delete mode 100644 1.architectures/4.amazon-eks/config.py create mode 100644 1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml create mode 100644 1.architectures/4.amazon-eks/eks-g4dn.yaml create mode 100644 1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml create mode 100644 1.architectures/4.amazon-eks/eks-p4de-odcr.yaml delete mode 100644 1.architectures/4.amazon-eks/requirements.txt delete mode 100644 1.architectures/4.amazon-eks/tests/__init__.py delete mode 100644 1.architectures/4.amazon-eks/tests/unit/__init__.py delete mode 100644 1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py diff --git a/1.architectures/4.amazon-eks/.gitignore b/1.architectures/4.amazon-eks/.gitignore deleted file mode 100644 index 454fac8b..00000000 --- a/1.architectures/4.amazon-eks/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -*.swp -package-lock.json -__pycache__ -.pytest_cache -.venv -*.egg-info - -# CDK asset staging directory -.cdk.staging -cdk.out -cdk.context.json diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 2e61fe42..fe1abde6 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -1,86 +1,50 @@ -# Welcome to your CDK Python project! -This is a project for CDK development with Python. -The `cdk.json` file tells the CDK Toolkit how to execute your app. -The `config.py` file configures an existing VPC and the EKS cluster to create in it. +# Amazon EKS distributed training architecture +This project module uses [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. -# Prerequisites +## Prerequisites 1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) -2. [Python 3.8 or greater](https://www.python.org/downloads/) -3. [npm](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) -4. CDK Toolkit: `npm install -g aws-cdk` +2. [eksctl](https://eksctl.io) 5. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -# Project setup +## Cluster configuration -This project is set up like a standard Python project. The initialization -process also creates a virtualenv within this project, stored under the `.venv` -directory. To create the virtualenv it assumes that there is a `python3` -(or `python` for Windows) executable in your path with access to the `venv` -package. If for any reason the automatic creation of the virtualenv fails, -you can create the virtualenv manually. +The following example cluster configurations are provided: -To manually create a virtualenv on MacOS and Linux: +* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances +* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC +* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR) +* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC -``` -$ python3 -m venv .venv -``` +To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/) -After the init process completes and the virtualenv is created, you can use the following -step to activate your virtualenv. +## Cluster creation + +Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: ``` -$ source .venv/bin/activate +$ eksctl create cluster -f ./cluster.yaml ``` -Once the virtualenv is activated, you can install the required dependencies. +Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. Execute the following command line in order to verify that the cluster is accessible: ``` -$ python -m pip install --upgrade pip +$ kubectl get nodes ``` -``` -$ pip install -r requirements.txt -``` +You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instanct type.w + +## Delete cluster -At this point you can now synthesize the CloudFormation template for this code. +When it is time to decommission your cluster, execute the following command: ``` -$ cdk synth +$ kubectl delete cluster -f ./cluster.yaml ``` -To add additional dependencies, for example other CDK libraries, just add -them to your `setup.py` file and rerun the `pip install -r requirements.txt` -command. - -# Useful commands - - * `cdk ls` list all stacks in the app - * `cdk synth` emits the synthesized CloudFormation template - * `cdk deploy` deploy this stack to your default AWS account/region - * `cdk diff` compare deployed stack with current state - * `cdk docs` open CDK documentation - -# Project use - -1. Configure existing VPC name and specify desired EKS cluster settings by editing `./config.py` -2. Configure AWS CLI: `aws configure` -3. `export CDK_DEFAULT_ACCOUNT=` -4. `export CDK_DEFAULT_REGION=` -5. Execute `cdk synth` -6. Execute `cdk deploy --require-approval never` -7. Tag VPC public and private subnets as instructed by deployment log if needed -8. Upon successful creation, copy and execute the displayed aws command to update the cluster kubeconfig - -If the cluster was created successfully, you will see the cluster nodes you specified by executing the following command: -`kubectl get nodes` - -# References -* [CDK v2 Documentation](https://docs.aws.amazon.com/cdk/v2/guide/home.html) -* [Getting started with CDK](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) -* [CDK examples](https://github.com/aws-samples/aws-cdk-examples/tree/master/typescript/eks/cluster) -* [CDK API reference](https://docs.aws.amazon.com/cdk/v2/guide/reference.html) -* [CDK API reference EKS quick start](https://docs.aws.amazon.com/cdk/api/v2/docs/aws-cdk-lib.aws_eks-readme.html#quick-start) -* [CDK Python API for EKS](https://docs.aws.amazon.com/cdk/api/v2/python/aws_cdk.aws_eks/Cluster.html) -* [CDK for Kubernetes (cdk8s)](https://cdk8s.io/) -* [CDK Workshop Lab - build EKS cluster](https://catalog.us-east-1.prod.workshops.aws/workshops/c15012ac-d05d-46b1-8a4a-205e7c9d93c9/en-US/40-deploy-clusters) +## References +* [AWS CLI](https://aws.amazon.com/cli) +* [Amazon EKS](https://aws.amazon.com/eks) +* [eksctl](https://eksctl.io) +* [kubectl](https://kubernetes.io/docs/reference/kubectl) + diff --git a/1.architectures/4.amazon-eks/app.py b/1.architectures/4.amazon-eks/app.py deleted file mode 100644 index b0a6f0a6..00000000 --- a/1.architectures/4.amazon-eks/app.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 -import os - -import aws_cdk as cdk -from cluster.cluster_stack import ClusterStack -import boto3 - -app = cdk.App() -ClusterStack(app, "ClusterStack", - # If you don't specify 'env', this stack will be environment-agnostic. - # Account/Region-dependent features and context lookups will not work, - # but a single synthesized template can be deployed anywhere. - - # Uncomment the next line to specialize this stack for the AWS Account - # and Region that are implied by the current CLI configuration. - - env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), - - # Uncomment the next line if you know exactly what Account and Region you - # want to deploy the stack to. */ - - #env=cdk.Environment(account='************', region='us-west-2'), - - # For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html - ) - -app.synth() diff --git a/1.architectures/4.amazon-eks/cdk.json b/1.architectures/4.amazon-eks/cdk.json deleted file mode 100644 index 6a680044..00000000 --- a/1.architectures/4.amazon-eks/cdk.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "app": "python3 app.py", - "watch": { - "include": ["**"], - "exclude": ["README.md", "cdk*.json", "requirements*.txt", - "**/__init__.py", "python/__pycache__", "tests"] - }, - "context": { - "vpc_id": "VPC_ID_PLACEHOLDER", - - "eks_cluster_name": "eks-cdk", - "eks_master_role_name": "EKSMaster", - - "eks_sys_ng_instance_type": "m5.large", - "eks_sys_ng_disk_size": 50, - "eks_sys_ng_min_size": 1, - "eks_sys_ng_desired_size": 2, - "eks_sys_ng_max_size": 10, - "eks_kubernetes_version": "1.27" - } -} diff --git a/1.architectures/4.amazon-eks/cluster/__init__.py b/1.architectures/4.amazon-eks/cluster/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/1.architectures/4.amazon-eks/cluster/cluster_stack.py b/1.architectures/4.amazon-eks/cluster/cluster_stack.py deleted file mode 100644 index 6263d556..00000000 --- a/1.architectures/4.amazon-eks/cluster/cluster_stack.py +++ /dev/null @@ -1,59 +0,0 @@ -from aws_cdk import ( - Stack, - Fn, - aws_eks as eks, - aws_ec2 as ec2, - aws_iam as iam -) - -from constructs import Construct - -import sys - - -class ClusterStack(Stack): - - def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: - super().__init__(scope, construct_id, **kwargs) - - # Lookup VPC - - vpc_id = self.node.try_get_context("vpc_id") - vpc = ec2.Vpc.from_lookup(self,"VPC",vpc_id=vpc_id) - - # Role to access cluster - eks_master_role_name = self.node.try_get_context("eks_master_role_name") - admin_role = iam.Role(self, id=eks_master_role_name, - role_name=eks_master_role_name, - assumed_by=iam.AccountRootPrincipal(), - description="Role to allow admin access to EKS cluster") - - eks_cluster_name = self.node.try_get_context("eks_cluster_name") - eks_version_config = self.node.try_get_context("eks_kubernetes_version") - eks_version = eks.KubernetesVersion.of(eks_version_config) - - # EKS Cluster example: separate cluster and custom nodegroup creation - cluster = eks.Cluster(self, id=eks_cluster_name, - cluster_name=eks_cluster_name, - version=eks_version, - default_capacity=0, - vpc=vpc, - masters_role=admin_role, - output_cluster_name=True, - output_config_command=True, - output_masters_role_arn=True) - - eks_sys_ng_instance_type = self.node.try_get_context("eks_sys_ng_instance_type") - eks_sys_ng_min_size = self.node.try_get_context("eks_sys_ng_min_size") - eks_sys_ng_desired_size = self.node.try_get_context("eks_sys_ng_desired_size") - eks_sys_ng_max_size = self.node.try_get_context("eks_sys_ng_max_size") - eks_sys_ng_disk_size = self.node.try_get_context("eks_sys_ng_disk_size") - - - cluster.add_nodegroup_capacity("sys-node-group", - instance_types=[ec2.InstanceType(eks_sys_ng_instance_type)], - min_size=eks_sys_ng_min_size, - desired_size=eks_sys_ng_desired_size, - max_size=eks_sys_ng_max_size, - disk_size=eks_sys_ng_disk_size, - ) diff --git a/1.architectures/4.amazon-eks/config.py b/1.architectures/4.amazon-eks/config.py deleted file mode 100644 index 448aad37..00000000 --- a/1.architectures/4.amazon-eks/config.py +++ /dev/null @@ -1,11 +0,0 @@ -import aws_cdk.aws_eks as eks - -vpc_name="ML EKS VPC" -eks_cluster_name="eks-cdk" -eks_master_role_name="EKSMaster" -eks_sys_ng_instance_type="m5.large" -eks_sys_ng_disk_size=50 -eks_sys_ng_min_size=1 -eks_sys_ng_desired_size=2 -eks_sys_ng_max_size=10 -eks_kubernetes_version=eks.KubernetesVersion.V1_27 diff --git a/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml new file mode 100644 index 00000000..891497e7 --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml @@ -0,0 +1,50 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-g4dn-vpc + version: "1.27" + region: us-east-1 + +vpc: + id: vpc-xxxxxxxxxxxxxxxxx + subnets: + public: + endpoint-one: + id: subnet-xxxxxxxxxxxxxxx11 + endpoint-two: + id: subnet-xxxxxxxxxxxxxxx12 + private: + p4de-1a: + id: subnet-xxxxxxxxxxxxxxx21 + p4de-1c: + id: subnet-xxxxxxxxxxxxxxx22 + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + - name g4dn + instanceType: g4dn.metal + instancePrefix: g4dn-vpc + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + subnets: + - subnet-xxxxxxxxxxxxxxx22 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/eks-g4dn.yaml b/1.architectures/4.amazon-eks/eks-g4dn.yaml new file mode 100644 index 00000000..a20cc5b1 --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-g4dn.yaml @@ -0,0 +1,39 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-g4dn-vpc + version: "1.27" + region: us-east-1 + +availabilityZones: + - us-east-1a + - us-east-1c + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + - name g4dn + instanceType: g4dn.metal + instancePrefix: g4dn-vpc + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + availabilityZones: ["us-east-1c"] + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml new file mode 100644 index 00000000..a08081ee --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml @@ -0,0 +1,55 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-p4de-odcr-vpc + version: "1.27" + region: us-east-1 + +vpc: + id: vpc-xxxxxxxxxxxxxxxxx + subnets: + public: + endpoint-one: + id: subnet-xxxxxxxxxxxxxxx11 + endpoint-two: + id: subnet-xxxxxxxxxxxxxxx12 + private: + p4de-1a: + id: subnet-xxxxxxxxxxxxxxx21 + p4de-1c: + id: subnet-xxxxxxxxxxxxxxx22 + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + +nodeGroups: + - name: p4de-odcr-vpc + instanceType: p4de.24xlarge + instancePrefix: p4de-odcr-vpc + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + subnets: + - subnet-xxxxxxxxxxxxxxx22 + capacityReservation: + capacityReservationTarget: + capacityReservationID: "cr-xxxxxxxxxxxxxxxxx" + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml new file mode 100644 index 00000000..50b2637f --- /dev/null +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml @@ -0,0 +1,44 @@ +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig + +metadata: + name: eks-p4de-odcr + version: "1.27" + region: us-east-1 + +availabilityZones: + - us-east-1a + - us-east-1c + +managedNodeGroups: + - name: sys + instanceType: c5.2xlarge + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + +nodeGroups: + - name: p4de-odcr + instanceType: p4de.24xlarge + instancePrefix: p4de-odcr + privateNetworking: true + efaEnabled: true + minSize: 0 + desiredCapacity: 2 + maxSize: 10 + volumeSize: 500 + availabilityZones: ["us-east-1c"] + capacityReservation: + capacityReservationTarget: + capacityReservationID: "cr-xxxxxxxxxxxxxxxxx" + iam: + withAddonPolicies: + autoScaler: true + cloudWatch: true + ebs: true + fsx: true + +iam: + withOIDC: true diff --git a/1.architectures/4.amazon-eks/requirements.txt b/1.architectures/4.amazon-eks/requirements.txt deleted file mode 100644 index c0512aa5..00000000 --- a/1.architectures/4.amazon-eks/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -aws-cdk-lib==2.95.1 -constructs>=10.0.0,<11.0.0 diff --git a/1.architectures/4.amazon-eks/tests/__init__.py b/1.architectures/4.amazon-eks/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/1.architectures/4.amazon-eks/tests/unit/__init__.py b/1.architectures/4.amazon-eks/tests/unit/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py b/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py deleted file mode 100644 index ba9c68ec..00000000 --- a/1.architectures/4.amazon-eks/tests/unit/test_cluster_stack.py +++ /dev/null @@ -1,11 +0,0 @@ -import aws_cdk as core -import aws_cdk.assertions as assertions - -from cluster.cluster_stack import ClusterStack - -# example tests. To run these tests, uncomment this file along with the example -# resource in cluster/cluster_stack.py -def test_cluster_created(): - app = core.App() - stack = ClusterStack(app, "cluster") - template = assertions.Template.from_stack(stack) From b80fa0719437d9d81ce69c07dd5621fed0e41eb4 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 19 Sep 2023 10:42:12 -0500 Subject: [PATCH 095/648] Fix Custom AMI CUDA to 12 otherwise Hopper not recognized as a target --- 2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml b/2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml index 79644989..6bd794f3 100644 --- a/2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml +++ b/2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml @@ -2,5 +2,5 @@ dependencies: - role: nvidia_driver -cuda_version: "11-6" +cuda_version: "12-2" cudnn_version: "libcudnn8" From 43a5cf9eba93b30be8e12c28e48db2d1c7141721 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 19 Sep 2023 11:15:31 -0500 Subject: [PATCH 096/648] Fix env var setting to last for PATH, LDLIB and remove LD_PRELOAD --- 2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml | 4 ++-- 2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml | 6 +++--- .../roles/nvidia_gdrcopy/tasks/main.yml | 6 +++--- 2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml | 9 +-------- 4 files changed, 9 insertions(+), 16 deletions(-) diff --git a/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml b/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml index e6cb2d3f..e5334773 100644 --- a/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml +++ b/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml @@ -41,5 +41,5 @@ path: /etc/profile.d/aws-ofi-nccl.sh create: yes block: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/aws-ofi-nccl/lib:/opt/amazon/openmpi/lib64:/opt/amazon/efa/lib64 - export PATH=$PATH:/opt/amazon/efa/bin:/opt/amazon/openmpi/bin + export LD_LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/amazon/openmpi/lib64:/opt/amazon/efa/lib64:$LD_LIBRARY_PATH + export PATH=/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:$PATH diff --git a/2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml index 1a13e7ac..9cf1c60e 100644 --- a/2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml @@ -21,9 +21,9 @@ path: /etc/profile.d/cuda.sh create: yes block: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 - export PATH=$PATH:/usr/local/cuda/bin - export CPATH=${CPATH}:/usr/local/cuda/targets/x86_64-linux/include + export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH + export PATH=/usr/local/cuda/bin:$PATH + export CPATH=/usr/local/cuda/targets/x86_64-linux/include:$CPATH - name: yum clean metadata ansible.builtin.command: diff --git a/2.amazon_machine_images/roles/nvidia_gdrcopy/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_gdrcopy/tasks/main.yml index f9d5f5d0..221f10c8 100644 --- a/2.amazon_machine_images/roles/nvidia_gdrcopy/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_gdrcopy/tasks/main.yml @@ -36,6 +36,6 @@ path: /etc/profile.d/gdrcopy.sh create: yes block: | - export PATH=$PATH:/usr/local/gdrcopy/bin - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/gdrcopy/lib - export CPATH=$CPATH:/usr/local/gdrcopy/include + export PATH=/usr/local/gdrcopy/bin:$PATH + export LD_LIBRARY_PATH=/usr/local/gdrcopy/lib:$LD_LIBRARY_PATH + export CPATH=/usr/local/gdrcopy/include:$CPATH diff --git a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml index 439c61ad..e8fe335a 100644 --- a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml @@ -27,16 +27,9 @@ path: /etc/profile.d/nccl.sh create: yes block: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nccl/build/lib + export LD_LIBRARY_PATH=/opt/nccl/build/lib:$LD_LIBRARY_PATH export NCCL_PROTO=simple -- name: Adding the NCCL preload profile file - ansible.builtin.blockinfile: - path: /etc/profile.d/nccl-preload.sh - create: yes - block: | - export LD_PRELOAD=/opt/nccl/build/lib/libnccl.so:/opt/aws-ofi-nccl/lib/libnccl-net.so - - name: Clone Nvidia NCCL tests git repository when: install_nccl_tests block: From 346619640f9bff15d2604c9916d7365b823b397a Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 19 Sep 2023 16:09:02 -0500 Subject: [PATCH 097/648] Add notes to EKS readme --- 1.architectures/4.amazon-eks/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index fe1abde6..2e018ea6 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -3,9 +3,12 @@ This project module uses [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. ## Prerequisites -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) -2. [eksctl](https://eksctl.io) -5. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) + +To deploy the architectures you must install the dependencies below. You are advised to go through the fist two steps of the [Getting started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) guide from the AWS Documentation. + +1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) is the AWS command line interface. +2. [eksctl](https://eksctl.io) command line tool to manage EKS clusters. +3. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) command line for Kubernetes. ## Cluster configuration From ae7345f57192b7ea90013b6cccba649b4d36a988 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 19 Sep 2023 16:09:02 -0500 Subject: [PATCH 098/648] Add notes to EKS readme --- 1.architectures/4.amazon-eks/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index fe1abde6..2e018ea6 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -3,9 +3,12 @@ This project module uses [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. ## Prerequisites -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) -2. [eksctl](https://eksctl.io) -5. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) + +To deploy the architectures you must install the dependencies below. You are advised to go through the fist two steps of the [Getting started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) guide from the AWS Documentation. + +1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) is the AWS command line interface. +2. [eksctl](https://eksctl.io) command line tool to manage EKS clusters. +3. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) command line for Kubernetes. ## Cluster configuration From 0e236432ffa3872575ce314f2a2dc01b3b3363db Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Tue, 19 Sep 2023 23:31:17 -0700 Subject: [PATCH 099/648] Update eks README and add arch diagram --- 0.docs/eks-model-training-multi-az.drawio | 90 ++++++++++++++++++++++ 0.docs/eks-model-training-single-az.png | Bin 0 -> 56959 bytes 1.architectures/4.amazon-eks/README.md | 53 ++++++++++--- 3 files changed, 134 insertions(+), 9 deletions(-) create mode 100644 0.docs/eks-model-training-multi-az.drawio create mode 100644 0.docs/eks-model-training-single-az.png diff --git a/0.docs/eks-model-training-multi-az.drawio b/0.docs/eks-model-training-multi-az.drawio new file mode 100644 index 00000000..430da2ca --- /dev/null +++ b/0.docs/eks-model-training-multi-az.drawio @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/0.docs/eks-model-training-single-az.png b/0.docs/eks-model-training-single-az.png new file mode 100644 index 0000000000000000000000000000000000000000..7da661035beaeebb65183514bb225bdf5fd3912a GIT binary patch literal 56959 zcmeFa1z1(;esIo z0^&0?R6r>xS7ri#5bc#DMG%TVlFT9?aJ+L6S97p(F*dU_LZIOgJ^4h#&T0iUG&43c zGNj=UqhV)fXJzMLV&!9EXT8tLO~V0UX9vGHxtQ5`c{IRfaK9`$olkx9MsLs(o&CxU4o056+A*Gr)OYh?EtkmrQr|(-=(b`j4Z)lKnA}Rl)*1G z@Q;mEmyJu8haP-{*w|PasT%3am^r{l;oxEB;09zm8L9hnvNY_X;IpNfg%S88X=Gpl zg+F3wr{~OU1_igXvoUis^8%8nsU6e`+$6!q#>vde%*De2-v}SL^83;y*twWbR;jFK ztY>F-ZaVmGjU4n$PIha`=?FFAvu5XZauw%PmozjLleu@Yb0;G^doyqhCkJ>sIs9P< zR~w@fs-cmSnE_Y{I8AXHRtUHT|KSidG}AM&)3X9!%uX2~<-jJ#Yi=U$EMaUdDs8NA z--46rWEyKdu$SXG?t!yrV*B%GXJcBqNZRSyn94y7j}O_<<>VnYE>3WS@MRggo=|w% zIZg;Bc4xC656aF8Cn%e_oleIF77KsQ#L>*q$o_P+6OscIYUyBR^GBiq)Y=+I#vg9f zv$KOb|M50ssO9ONPiM6O+xo*paDuXdp5)Ak-xTUUGn~GLsTq^q0|R3{F*_bh1v_?5F+C=p6ZSdjSvsB`?g_!(!S(F4 zp^grgX4XcczcCZYtB5hw+Cda*3AH;uI}UaZb_sAc93nu74b8xE&#qZRt--h=z}Q%x zk!aXC#8|mR*?0im9+(XaF!aelr#Bnw*_$4lA^1ct*%yjV$j$?adrcG)y1r z-~hEccLT)I%ml7_4p19F)jR!W0M=pz7Aa!tU}XuevVo8AwVw(#8_>g(@2C5PPpM~b zb0W3IW-jn`h}b}Z(K50VcLIaKMHfB@kep+kwsHYmwlQVabGGMXwlgv@16u-W1*{uf zQ*88p=iMng;TtkKQ$_e)|5&Demn=ymV8h^pv$LK)&ck;q52t#}!O6hxwCI{8AD*PFHzO6vaiAPF8+S z8o}8CH9XJGU&_#Vfjbqezlg+{Xq`!pm6@R-T^C2J8hiJE)_z z;crrTvaJ7F?((vpvhtL>oE)bTbdI&0rvh-EwQ%;qnfUi%?b#~NvGy-~{p0!miUa2~ z_%{my*TjDXFW6amn7K|SI^_l%FXtKMJU{r@ F!olPGM#TlKL0o zuV_4fB?77V^P_-rLjBVWe%YWG|H>kqraI?r zH20}3K5wAUXn!MHaGH|++ZLP}xxc?H;N#>HlYl1_{|B~!3&O!F!UdlBV-)-US6kp@ zWAHDs1^;ZscBc7%b8Eo)zmE+5=F!^kk@0B+&Cbom%zbL*&Y5Snvsn2&N7y-e&*JL8 zkBR2`YsRW_4 z&*>HiC=48*$2sl&pTXQSqkoRM5DkzY{J+cGzglM=R%W)pTIatVe>v<#peoMF)*Kqf z_nkGwxTU$FvP|s%1gvdj2x&xwMuE+Z92{Iv zv1C0*2Pg<`&cRgn4tjPD5O~1@+bWpV%){r?%d{)e16XHNcNOMVG7{8y*>zsTA- zn4O_^7RM!#^B6h2D8mQg;e-RKXX(qI$}<0j2oA3#Q+Y6zG4zg^(?n~eUe zu=X>CpP30xHs-V1&^a0S+c_c=2WLA)Yj%5a1-|>zJPJI|HjWPP+WEgEN;vE1Idc(ge~l4-u}&|I7Xl4({WC0)Fp* zivZZFiWuHCQFj%;XTfP90o9XmW>q@N7tRU5UwHd(fADXA@ITuh`~@%iMW5_HhVaf{ zKhWX+Ti@y#%*gqt;E;puuN|>xf#4s!!GC87>X*jgubHB~tG$B}{3U@CUjT(W1GoV@ zhcEn{`eXlV8U1I|&vEv8!r#U8A7cmq!D|DjxPs*GFKC>X_~WZ*uY{ajIDG(Mwjj%b zzlAU)VqSxQK#m|S2D$H|vyg(47}r1Ay1clg{kj&SbVNxZ74rN?5FMRR=^Vis?%}}DcS6P`NBqOjOI$!^@Xb;G0~Or#oZNuyPxq1|GT=NX zFUNIxO7q7-$iWz!eK{bCWwYxF~$l7t$^nk&$1t zo7zfuY{1%Do{S=7(%|jbL%)6(3Eyzx!WGD!eOt^o+#M7n`Q~3ULf7c-&~xsPVd1Oo z-P~--{b9@wB#NrMZXu#=d@3ny&)1Ephb$*S>jCl{GKkB7SR;zPa{$3 zV_Cw$Lc}P0hbL3j%nGImwAE+krzJ;LU+Jf~i;}IRAF%(uaRcAbX~Jq&2t67BotGN% zCy%E6+)GiBhZ&@y2q;OS0i&hYYoEJ^iy~g#w~7|6Oo%uqE$-BJAjVOO8C^S+&+!fO zDcBzn>;`r$90{SQ*S|);6oUXC6#nBQ4pys<&l8~@+qhqyL1NZQ8R$i%FE>^pheUk` z{}I_qb`V*_GGjo?5XSG#vX3{cr7N>#Vf*zTwQrFhk*~e^q+o!VMDh6MvenfCQSE!>H97l4_nK$W zb@iLdZ8p@dF_XxPHg{eQgD!7>z!JawtttFu3)Bx8ka>-|RVX3CjN)JuN||Qw=a9eE z6Bj$nV8z?-R_%YPBU2e6=yLiAsXTIRi|SRzf91`00liyYDzr0D;W@`6H3ppzm`ge$ zeTYOyvMuS$xH#97Xq?A+T6MhEu#QS|`;&UlE2eK9Sb1p>?`7YviOU!9F&aNe8!#o= z4n9cRxRaCa`*^tjiy7}52I9dZxj{|cn6hG*`T~|)TYS16>E@rtbH6;h6~<>q3_>>&7uyf&ZsL? z%#$K0=$dHpJq?`{3S-;b>C5n9wq~UKMti!@0=%g>@q!?qD{6^Ao&tP(lO555S)9A< z$-cW|sy8LP>7ILopv9jQs%vbXYGS;t!3p&yav-@Hyt*M^^SKM(a+i{O^6+Yz_Sf7r z8MTYtMUPKJ>JB2BjdB!Hqk&>ATnZEPGJ0EavHd!}ECjU@#_kkwZi*$uP@pv=`8pa< z#UwF{sU7x?DvKy)2VT5#6z9_<-15EQMNS;afsAx{e`n~4zDPA{A=AM+#Gh=8+yYim zGve|AJ3S%=Y%G9Wxr9Qwh}3*|qa=3Si8n8FNqY zgvb!u)ZdA|HrZgZYTqoBsCl{N)+-q$HZ1niqgr0-7NfVLT_|$5_tFgsoV5gF{B(uG zM2y|4x;Nh+sI;PH+(|g^t2p6;C=3MAtYn-$zev2j_DhCqi>-sbz~Q!r}B1`BFRvbMHQT zLmW&9y{OkuyJdT1{#4OOF1|*EL)T%4`ZB+Y#OEJ`Uj|}5UzgV9jXe}RTP+(HhEP;a zlv@NSz6274r$gor8hf&bWP7mk$53|9Cz@~Y!n2{s`oqs_gI~oTidE&r^R#%DAP|MC zX%Z40%Dz)5Z;Dv;o^HkX7SJ|wCXJWj2IcZS^(;9&;Hqa=KJh8zCicxfh4jZ4ECytk zTzsMA2~9d`*K;o77P+~AwfCBFUsT(6zTtlI2x*k}7WLJ>Na<&{uEj+qo#$4eBv_@} zg4yspVDQ{Wu9a<)1-$scfY?)>7(~ol%5fveJHpevCZ}IBdM|!%@41t`?tPEe>s}D% zo)Wn*Dw{8|=BZwC6IvhMJu-_t7nwAwfn#XJHSnsXSuuOh;JSP0O%dRma0))-QW+A$`4 zp-HRzxfQ~-3UnXcmj$nVs}E(~b}yFl)(S(~p;!tgb!=t4`COZC70St`#wZr|Zh7Ze z=RFoyV-!F2NGjBga)O0uY1u_^p57WV>+1oh)y_L2m!n=PWHHo+AteucH;u>>Ly~5M zvt`~e6|`%Gbg!m9s>1oefj!bHs-@@>eeUEfgb04*wn~FM>0#cVk|dGVVd8xUW1Zsp zoa=9AzDT=yrc_nti;qC4&SVK54PQu=)h+aJ|{a9rf2KN$SpRbkbS(Gfv#=BBgIZQ5RBh4iLY!u>TrQU#O^$zW9X zlUc!}pB{I;im8L4GjSHpuHB>iGO?uj*4qs-R4!p^7G5xI! z;tHlx55R~b^q~!luXh~nK+3UqhoJ7f5Is#3sM+&W^<%~7+q(=&qI)19n+R_Ow3jDj@RkCvkRq0bqb*IUQuDewIxCW|Y{N}j%YM0dd(8u28< z*eq|jrwgZ`fSg0ym&HxDdv>cZrBXH`X5%z4^BG3BQN}8#%0|3S1ze~BVmkjf3J@Ol z#4z7xAgg-8?t!GmR{3m+%NS)N*Af%Do~Da4+Ta!JDH4sCxGsa~vR6CR_CYLph`>MA zU4P~~sW5Y-G4zrC(!~TETUtzCJ!{h;ZZu3?3=tb!=Y<0y5e)tqiITj~a0e8jqaS7R zMBn!nYo8aF3!Jh-Eg1-WOHA+cc)0ZOA#g&8k?uk^pUNAQV9^KMB=svboq8}<6K81` z6z5l{6iFTyy`eCvb;+4I>*3Cdg~n!_ALC_v;aAisNw)&xY~JVg_uFi5Ne)#kctw&I z1ct9K2q3GI##RRqoW>A7HE?;aK`-~xgdczfPP(u$73*|X?`^C=X4UcQ7YJ2aD|xSR}GdrqjP#wUACWS2!`>SnR|=g zb$Ph{)SQw0Q4m7+IZ2M5>YnG1_#krzZZHAw0=!Dla>9EkRl4d&4fwOm?qbHXNgE%p1{k#fMg zm3~>iS9FxwPfy1g7lxPIHwY0QV|7sHs>y4up|aiH2^*CP|IqhLmyW!1)m1y8;By!x zEdSxC>Q?DqZIE5Yp8DwA^v4!MM&g?T-XUxpMxG_>^uB~gs`%w@DUx5U*hI3&JgF-( z`CE&NLi`)VA2vO9XFqzi!8Mb8qkNLcVls8zWFmF<3V|;7lVfdpZi77)*YXZWvg(}; zIGMn5eR=+yARYK}rT($WrMoJtt?xM)-mwf}&YBJLZd)K*rznUtr`#8t=+y2qVnk&# zvia$;psY9M)gQK@p;V{-_hE&l^gs*22@K=N7!;VuHh?~bpS&5zF;PxkVDUoU z{x{1K7L?&>iiC=RXvZaG4SJYcs^~W9BA4Np!NFEv-KL*=kNL|;@#+hhoYnFDWD_W# zHI$^=Ug6+5UB(asqP%*AFZm~wFn}eL(^+3Dzv=syDe?oCdVZzulU?=FCuFEB#gR+} zOVd#sTUiG+{MV-jTBsRGJRt39s(a`VmCL=8g8sqDT!xeliDkT>M{g@dA5{IQvV5OO zHrHkkGp=)2F{ve7f?A%-3XgRI0kGi$Ih)L9a@O1y#RASu4>{e-+DaFmsZO*ML*q?? zJIA}Rt6At#l8oIdp~Q9D*-Fs?7KId-7-s2Fh7WabOjoy58^uTq4~V@!OGUA)#`mSmoVzkHM^4I}iI*D@zxj)ehBbSi(L( za`onXS9B>vjaAJOMxhc<-)nL3h@2tPY#9ym%`MR@T8Q{Pf#(%ibCmFr9f-!lDVr$UH&$_PzW{J(@OmgN3=< zLkHuYmTI=wQ+1dVYcp^@9+8G+WjLgXR&7s}N8;;O#h)lxG(3e4ppLc-d<%TvWr>#X zP1!qWWx?ZCeHKOc?@Cr?cZ?0DEt`f1RV%UPY!TNOTYgQbRIIDx39MVMu2UKIF7j5d zSW-b!c1sr7W-4ueVb`hYuR`JCtwd0m=1Aae74e97)Ur8rWN5GVAQ^{a29{{-{)A*V zO)x0DKK3|=!!+UCpKN?bW@k zpLQiZYC_*AKaxbM?Dc1P^Hq}LA<4(@b`%EQmh)#T9hDZFkcoE{n~49y^108KE@e^U zF2uOfrI+{1PKgs?*i^i&qVOg>j8^}r+tI4aY|-Py^-c85 z9OH~8_bj9O%vZ9q^Ntv7pUrYtWf20IikVPGdSbp;4m>wYm8{xF1J;9|B;w@2kD(Yu zIKO?G83X+9EFY2faEN4^>n>?9FOCb#+BbA{Jax%-MI*#I!ZJC+@4cMzX*FW{2Y2CP zPHeIiW|SA$P(>_d9HkCrFRwKZr23~0I5^JnRK`?K`bD!|A0`TKGcuEeb)!XA9WfkQ zs_-JcS2+nSdGUe$aLBbOFP)CKmmY0^EK7Ya7d@C|%bj0SqcS3$Mu2UnHqh>?Me80d zj;k&jBYAG{#lyjLI&*Gx5jA=idYHAgtk`ynp%(A8Fe**5`~&MPc{7Ju%^S2^!tYi~ z^><$!uG4_Mev$if5=ZY)f$)9xeMHtm0K^RC?sC3M|8}1EiE9`4b&oyY|=3eK9h6Z;(a{Lv1ZxT)*$A7mP0kx17k6) zTKB{1EilRi$rKhZFMvR0KuaF|ei3H*Ko_C*-RJLNw;qqw>n?w;&Kit)9G5j%f>F8u zbLqz|x(6SKqHbjBchj2KF1z(8kCx6sL0a?UsYA@MYEH2Ms{mvEYzp{9!ZdwlYy#`) zLS@pxrI>yTEyuXrDgX68#+vl^onb%Qd@GW_&Mx|0lk-!|e^Ijkb*go5+xCzk^JbVf zL0W44?)R1)6a6da!QcY8_}}unR!t3H4aV{hZ#JKhyUIyx$ww0fC)H{q!>04)6)h5#_Wm!;8yxIg+`q8>P__cf?oMq{1} zq0FkQ+;9Jg#C%lXP1G{Bd%yeB6R$}UE2}8I7hNZ;xgrlDVbZz_ybsstVI-_SB8Lor z&Uo3+ug!58=BMB6e_Hm=<)D)dgI#iA-jV}8=@37n^_<=^PUcZ7SU<;;t81e)VEx5V zuaC^0T_wF1k<}ldichG{Fef;$ea-D!)fpV#j(ZQCT~kxkSl)X;DO$ZMw)@)gUVNm{ zKmaJsT87I{2?9XLw=W7%s8;0tQ}`?@?r-D{Xc2szccdL5xO`RScMU>7PfP(KHH_2i zQ-tl3^Y%(pwJ_)RPyM}JzT6>c^YTj3!*?e$$WHbO|NT4)0fl%kunGIeVF!1RS%K+~ z>YUs7p_`$34Edb4(_$OT`&uD>BrxBufHJHhs-T0ttqk!n!hq@8k+=weo+tVVpyM-d;B*ild|$y27o$7W%z`QA!UZ&9L0tYRb&SFd;Mm5!sU^(=<3({gSJTdQtl6 zy!B%P1c%G%!-j8!sOVs_J+^A-=tGEz@}0e;)OMx*A7zN-?t;V1b zK2;#^Bai7P@{SaKB5P~ws?hFRUNoNH^ayGP*ylJXJ2kS+0KU}t4*6AHkoDA8{m+q7 z_A0bMtTwPeAqg^X518F&&AUb=5t*RA)+ymH&PWj*$a!rOag3Nbk) zB_=NJky&rHUi%ZKSI)~f^bHIY(?w9|Va6Sah-74BB!X`EizAgy(G;TOb0%#x(Y%sq z?Cr^Yi%RLF__jPTq>hl{yY^R#uFxKp!Cu#Ef4{|LzdghMn96YBh46tm`kh1X&iI}zSL|o&_3^wn}jrlrQqAQ$y+Y!V2U?V{z*~rL98gjEIiNoYx zwY7G#kmt4QJl4$#x?UmkgC!ObfgxQju;D~D0}{%cft*Usi+EkceCqMfTPpqK8GgrUpQ(Ik&@yV0E;5?9hbJ)z z8RNn+|HSR+FTZ(a#Tp0%n5`l)0oe{M*et|qq~dKy5=Tr@QpnWQL-6UM=kB`VrLsql zkm+HRR8%wTt-9U^1fat5eiYh(357yE);f8opElRn<5qO% zjSTCkQLM0sPaofQo?Tt!O=ou zNQ`iWFSKSSX5LT{0m=CuT%(t+#)K_yX}g(fAdygTFewZeXCQY_z*I_fFezS$g+@gM zrKP1o{A3qis}*T*)qHsWLOkp%=X)}*ZA9=doy4|4x1l~lsUlLdw3iDd$-FAW^ zs`s2;;d`UnrV;d8Oja&jS}|`xU{3FkA@xL|ua%(u(iZ8l!!Sa`n@VNqfhyIgh-PD+ zx+E1FmLY&>nFH#L#Cu5SY9T&}OpF6BvNAKDd+e;*6%ogBBmR_hjb3BS;OVltQjDB- z<%%B21;eiS#Ee6-HKBm7Uw9YdpY{4Kl9R+8V0Pz;4D)@< zN%Sr_3%gV&B1B0%C3us5)(h|k(L^;fMZ-Vt4PJB#NtWhSK@}mJOuI*nUfMw~<+--bnUcSV#6>2t$u~ix;Ok$MIXalo#$v-v@JO zN+UVRkei2AYlUV#EzMQp))<_|{=U2XcE06<&E^$x<85_n=$wD}UQS;v$2WDMoWtje zSTU|g)YF4b(VP1d4=~DpsJEZp$M{;7;`kj5oqsV3ExWP6|M9mPBEwS4A30EMS*Q+XYZ)vAKq6yY6cF zF}y38cebI|xD*;^O1+#C-uI;-oPFUd0dg95`IKVG#h#J{v3JhehOS3T4NXN#*E3W0 zh(FY}Qt%(9DsH%OZzD_;-4yLhBk;O{E?gf_m^kk`fWyxAf$n+-z{MUm9BbTAOxVGu znOmwbfPi+sv?@7CfCYU-%Kp5RAw9vT`dY=4erltEV#SQ(f|-feo#{=zdzsB<3#qxT z`9VvoH@I|z6oOc$7VttlY9(V{el{=^#C=x8`<5n?(u3avu{yNqIZO)D_I6lGXgx`B z;}YAq(L;NezS+Rcl#J-r!D)f^Ao*#uq=QWrH+iI1oaZYb@+^MO>a0g|2cn`z)$U{l z4>P;2P)rX9yGQo83SNY`a{(?VWK%2^sK8zwI9rRR+1XT-d$VoVmmx1b7=gs#hGmX_ ziE-1fw7b~)exXLy`a)V&`4p}SfqM5~Ooby|PsyUnl6lEp*CBg`H4-GDp@>Z3R*D(e zzQ8EnZN4KaLyLhz#oFF-6NS^D(bC`Fc(S4kdb8KH6CR5_4x8P*YEgy-j%*W$)=e7e ziZlqtmJ;HrVC&c`gnoscoXy2>MV-Q?#f0 ze^9wplUy`eewe8{7TRn@6;0qipI3_G$G9SzMJ^+h31bC%unaEK1%4hTzc(ZPe(Y!qX5Ne6X-cT(hcJzXqG>4CXZldCHuR~fpsU%`>C_m=R@VamvUd0e6RJzhBx|;5K z&u~uo({NEoIiH_p9iks&H=XzWBtDE}7E4A$JtQ9Mv1q~;MKalwVDq@a0v&wjmLvD5 zD_L#7TU>{0kK6*y*CrxZan+;q7?l_82vw14pvq=6pH$V}<0QR2>C|g9+$`7|Q|Fy+ zHz@kROK@YpcKO<^frL(9$@nT*=$nW~olVhBX0`Hr)>+TUUv1r>Ubq_ewdqpjzIWZ> zi{4dXk(;-^!TjZ%O>RpfX&}QrUl2Kw5M3l(kh>|Do-fY)@I62k@~E3Mc=pf~`tkF) z1KR$GoalNeQH<^P+{lb4DN`DCS44KnAg$wHg7@+gNJ!hYKWy?}0LTrSoU(E$w|7a(xw=N;4!DStL?w7+S>Oc(Xo2(!#D@iOlh_!idScdo?bSQI59s<`Vkod#c$LKY>wYO9?+u)G(b`A4KGB?Ly2D z^y%uvQ&0Q@rFF~@YYU1;w zK9NA4yi$O#MCK0qqpn;=3$%sTxH?@9qPvS3!MkOo(edenQH?zDehZ&-#JMD3cI9C> zWzj;l*euAyZ&6&Ua<)6bdDQgaSR!{5!X z*J#?%3F8iW0j+}cttLb}&8HcKXAx)HMX5wQh_EdyhJ z!~97DI=lDRM#D9Jh0hhiEQ>vqk6ukMgNYH zs9H>=uTo$!Y}X%Dt3iw!#e!j&EfAxNR`#jho7`G6lDt<>mrnY#!iQme!z<&Thh2gw z;fZOJ9P^cpt1nB(Hd8&}vBr9%7nP^?eV?%c?<)DJ zO3{mn;mw|F4g+}H56Q-4c<%YTRZJUYY71u$7H13lj;p-Wq=B(3rR7c*SyDaP7>#&U zq~A9ELSQsj7xE-6=AvzI1sxGFF*+6&*6?Pf#o!B*uGHK79pkmmie-krM5Am+lYOxbDXk{zh)MaK;u>ga zX{oqX_{5bnK{~~0F>o2=3$2F-dlYSpV>NgN?;~4s6*E{Z1{e+GI2tbCO*xl3t-KdLuWyIka(#_y{2G>tpoN6rG@3T8$MOtDvvl&tA1c-5DM#vAcTeELmt8J5k zaJvXkJIO-crIJ84hlp17}L>`5dXCwDP?7Uuq}5C(>`ICyV=WxviUyy>^s|4A08S`n>GP#E`T_xvc-YzBm$z=@nBL61cNLtPMCAjE`Z=M41NuhGK7UAsFw zI^vYEREsoFOiWDpZW9s_UG(-|?1s&>CuD&1jMrw$hlHkAR=d_oJOG1$4wgE?p9qQd zVLOQX3I~>fXoIZpZsxE&?n_a4!Bjjob+3{HIO!1~@#sa-vCTYJw6ZsLj}{K&%xyN& zA>9!7scTACx`j6-drF~e>lE9apkJepG%sO`;@egXEWGYT{A7k!ED3}g5q*znMqPEb zrluDq0zHY1<#ny|vP1&2_Sccpg$2tRLQ9Zb{+M`EJMy*E;Qgmpwgzx?B+X;>170yG zsNj))dw>J$(va+dLvz0G6;j09TNBaIw6vk`F z)C+ii3(|Y}oAD_ohamLCv>ku#X!l4a6JQz{o_m`^RaVdHB11z(-neai%9O_2-(Tx` zeDDRd<9;M#d=14r_U2b%EjM4&ro4o=%wBqzO+SyrYP{a)rqff#3>bkL-{CfG(irBC zGi1yDj;KmJe`r(ay9aw`nsrthivH)@ne~{dxRD*S>W*_0gKx+1@K`9+2lftHKylN^6QVDt7cDq z_T!d%<9a{mA@2O!P^{6Q#3E(v22D%%qs&GIAqOn23wNiB1Ih7rJm!zcL?+ckI)>a` zSeL~OX&(tIPE@LK84A_q9*#Bi-$Sk+xz}8^wToh0 z!?H>XyNns1_;d)sQG6KRzJ0qHmX(tuL&5F7X<5_sVhExb{=7d=S+YQbVoFO}d&UHm zig8VZQ!wfNE>+=xXG$d#w`VyO zvRY#`x2N+!lj7xpsY{$y@S~;z>~hCzWy)W^dL}gLqb&)L3UxUsHbo7u(@!_YwFuy= z*X*zvW2)3TEq*23)PE$)5wh0Ito@o=__p2GV0kjmE(&)5Q_aM^2SD4?eq9xvS7 zYW%6mPjhO$V`VhZK`h|lm`9BKP7j@f9$wwfU?=EHTh z$O0+9`EjvFI1~SqIkq3aT?Q%?{(aWlrN;8!ksGQ!LNO`*;A~Nk#ylhhAb>Ajm96%S?HRJ7BJ}lULU#_H_ zIWB}Q{+gFp!`mG|m*pk=cMCAJ=YP-%49UZj3Tyx*4`BgQX!*2p1M=2Kq=q5AIL(&@ zQ^F=k`qhy*f2PinBvhz#)mkqed(4*6W6tr(jxzI-=z3L>RIRP7iEBwd6Ly~-r$9T; z6Cvt@S{$}f6RLx^;zj$+_36NnVC-LeLocrSPIn&rjq9y4+>FbsGJV>5IFc~S*P=aq zxU3X#v=TE`OLpjkTKw7heDK4a;!N_xtDXZ@eEiA18yzQs;pNaAaCQ|LvT@Hyfr-_ zsGr_*K@bDRR45QyzeOX^-mi+0HXepuZZA}(tRF9`CP7CRrQ`^>ciF?dQW2vx_B!^P z#q#955I#|T)Pe6uW21KKom?+@*>8-CZO=u;xP56>KFF(}ZVPQTdy3p4<1^VZ;sN*Y z-c&?uLP3S&oA4T*j_eQ6Cwu1#T$2`8Kn#L^tKN~HJaa9_hDC*VnBF#cxDt)c{mBQ> zlnA+0qcn(<24xSK4qFvnT>+Qmk9ps`Th^UWmkS7P&zKO9%3+fl^k<1S{ZM zVJx80fB&%oAJB&|>{ny7V$~_{b6Cy4+SxfuK5WOr8MCI-^6-nnalH`5{qfGbkjZ#{ zuFyD1SA(V6?I4QvesnaKB_{Q}mpD7~)P}7Sy{YUwE)45KC1uG}hJ>?foFYivcYHwd zbJ17J^0|Ev%5QBTgoyI7rGz_vqi@D&?`wzDYhykjzR|G!bv7)z${%}MpN!Ytg=+Dm zy1=gIt;Zji8=oT5v=YuT++>17Q>z_vMn9F7KhfUA7Gid}k@jhO+mkPJ zV-*d#Zskg{)>yR1j)K_XD!r2RLBZP9S%G0szNi`p$Wxi7ukyGnh7kpzM%9K zV|5)&cVUrlCBN@#XorK?@(ylP)kjgMyLBJ;QK2F)4A!FO7}fv)K%_0i4XTR=eemic z5LQNbLs(BUy#2QEAySQ>EI!*;I^!=_Zmborl7t>k8oE3wxF{}hO@GrnLUQyQ6@wzl z#|7l-XeF=`nrnUM@AU9^48_ySP14G1Tid{|D^3a-0}jAWxTUv z;%p@b*(u`!*xMq`m0RlHKFdBd!`6OjBia81|fGy!_AKaJt(|&?9`Q7 z?@@q_3zxpgK_PEU@Ih)U*H^@;=CNiDK*HdCEBz&Bj2Z`{XYw#@+|L%?+hc+INz$jQf}0q4<#Af z;`VX#u@5QERQt2fZz&m?)Y)=2&|ao#Li!TjT-zcr1*s5&Xd4rFnrac$i9d$& z6-oM7kQTj9eF8V+@LrO|*1%M7;zM-4xw&+_{QOjObTDIqO!D%id!KM{Srg%6C?U>_ z>P%tGNDW_~H2H9N9r}tTkhGX z8hexA!?oF*H2tlm@k9=jD^80eG|R$A`)gMP-8gSMEtonslDNKfL;Hb%epL><#qdKy zf?2|uhVZ35Fi}o7?!1Ox1^Z~?CHSWG;lqcOpFiI&HylO2cAPios+5YMblF}>e?mU` z%9cvUbK^lrs^E1h$;e0fb=Za=krQ=r;C#ZUPPrU5X=PSX$of9UsvLTZzmdV+x2JIT z`ow*(BU$ms8$+Ck^~ggGF6*~V+Fs=$_k)sBH$VnxEp=+0g6HRdU}unBp)%)IGsX|zQk275nq z*xuH1f<=o;p_*@$@ZnWm&kcgsNYYDece9i3CNM`R2HwCpj zM(8h6#d^Y917Cj;W8Uocp3RyCnx7O&V^O9D~vE??GFJ?ZK|&&I`?QW4T~1V~|7#6cjKxEscd-A-(;uD^<`=ju;R9Yf$5CZw{M5 z>t()cqt7817_@5Q#G)uKr*{{SGwup+joF({)YXg) zE2A9((5yyYktB=!W4%Rxe*A*B$aoA1vEU zM&i8G3z&g%lS|bq?58pt)Bw9F6i;;mCMxxABcbj@v(&SzY-?+4u^H%9k1b>3`G!Cv z13{9vJYq5e-9vj}?V~OFu~ZKE=#Xx@QiGhUGeod5z?_YKh!hMDMzegK_>AMh?Jk7_z6!Cod;qpl)ze2;?f24bL1%h;^`zHz z92^|Gpe|4>nx09}Z3QA54Dm<(FdH`079aa=0rcxcPfM$M$w6*Ln!>vo2ygyOL(_jG zhsMsO>cE(QJy)fz&N=Sj*) zA|0JYR;ms8c320NAj^h7-!L~&6lVYZ;|EMWZp$HZ$);w`Q6C=!kDZwWI|JfULfJyZ z-UMCQlm}LG=}%GVv*|+d@$jgeQ3-l+0?6^cFN8Q6-Gba~eyFCFIE+G^recKz&)g-W zFQws20!#MI6vz3~=IyrjeE-0NPShznDy&(_YzSIeoiXEWlsJEjc-0s$%leLsX)kOF z)`WVFp61nUqWQQtL8N+f-+$RbmlCeWmI{mIpcqfP?jO#fdqMhh+q{G*Xrj3Q+J4rO z>FJS^_SU{&F2#yOqf@LOY)!T+vXvUk%09rKmX_6B7^!6NI@~egqr53myxJyg)P_Hd z)eL(&)F>(o9Rp}rvJL}sN#-$@Q2JT#BnT?m-I3F4S0kdQzcX%%VK6Et2 zS3dU1LeC^k4P0+hUen_Lrc^~!ce_|(db*4~%|6C$#dV7uS7=RDXD`85VUj^AnFvW- zf--q*e~Nzr=kXI25vO;gHQb$0F{H->+~Q8nX0D6;E8I%lZ^8;k_tvy(zfCwG2|T^V z3_}(!wU!}aWzN1Fic`#I{|$!27~^%Yp~@$xh*q$4P!$Z~?e z54R*D5ws=>%1S%w#s}=-&o&GmTHHW`+&r9#*J?+HY+~XPCd3YF0l?P@;b$5O%;LKb z`^uubNp(E=SuX%F`kF|(l)@v>%dsCQPH7AN@1E}`V_f$oo2kqeFTpT`nMw{&Cq&w^ zQ(<+L!V04z;lvCuv4HOGZo8-Yp!@1;h1dgJ%6Y)KAYL+sHK2Gc*VsTQVMihT9&B-; zC08T<)`PEz-v{J9vMY~!@7$?t)b^yaJZ0d*&1pLVQb7=a=uTXgm|?Ym^<0q?olDQT z#J(&)N$#arOupv1j4*Bd9fQ5$FabxHM>To}D@yF$v=)Sr_12A$0(+FCqbW_Pi6Ois zo;5s;5qzWu7-Y%$g78|aLB{o@CdkwAaZLEz#F;nhs@A7+Wj zQd4Hg+pC+LA~jp20T1TVXQc#r<(99pn@qk~vE;+F?$;3?5x!(7L<>Z`EL?W6r zIWq1-Z=VG$*}v`Gux^ctY~hAo$tK3w{Q4c0ZS~myMb}$LRn|cN?vPIDZV5?A36TydCFIcE-AZ@IUFW^O?;AJ&atzeNK6|e{*E8o6>k2z9zdq65 zLS&rXL3?2!t|N3m-C)=D$M(kB+#pZT;3nq#P(C@>&Kn+LXjfT24&eqjotyi;wj(Lx zt1a^>KC1}~TWqJ4#BlI?5?xdU#%bNf&K*P#(cn^?T>&NM#&Od;)_!CcZqU4~zxGyy zpR5nX<>ADJ?}gD2Bede=`E0>khuyD2Hk+pLZ5g6O|3mIoEWrkDnW|%IxX@%^{^+|a z*&EuJD->HSZ;!dICsR@$@7Qq{5QM2p+T45^Ioc=(1_xPqGzf3rRV=x|PQF$va(qzsj9Ie?D0b&-Q=KJN8D zdET^EQV^N_+{FtTZ0XTWraHU2>isGezPUDbnRBgVE0+Ln$(5B$^%Z^gBlzf`L0!{9 z6@afe;eEZhU)2{>DJ;JxlfSe0He=Tu!E5%(d@@hC%p*uK@nwX;S!5g(DT^x8lfv$7fPV#2*7^o^wRA{Qr+xW$lDvs_p z;>AV*T&d8jMK&=piHMEu$&(1KGU-D05&@Kx@A;Tq&0@3b4u|LQdoMOU08cqUg}ny^ zluL-+#qQMgdNg}NdU^!8i1+)>5HuIT4@ycgEUueSoI0B+bO{LwtFe5f`8w<1ogAMR z06O)64=WPkM5;0F@aK14M+C5tZqMiOW~bV$4_9yG`lR3fjtEd$g@rURFL}`c@=3dV zZ9gBvtW){M;KQF7&RaWfIlVorD9bNyKtav_0(1qvk-P>9@HDCsX#6ph3qlByK9+xD z3$I)sR@ENNyL}&O1XYfEeG*(uS=E&*IGa#Bd3~{rE_^6&2c~zqB#s=V=0k_xJlbX~ zd;1!{!pJ28sxOdlp{^7;8-b7u;3oT%-|fE=_0?Bm{;zWVE;H~&96KKP003k}(gZL} zt=Yxg_xLx8Zq@rw_ z#Vu6D6P1ybT?Y>!%~U-?0`hlv=7%GPhJ|kutU)^@*z7Fn83W)}K););t}X7gW;bum zycSLXom&0obPIzQ3-TxFa&VFz`bdwchtlmk#i^5UK^SBQe62rt(_?kIa*l0|U*zjp zq1IqEP#xT-3L|BxlyhR|E73y1&O{%Y^hWX-;FK_cj8{rrJWxL{(nm7y=Jr-$OtVOV zfYD!4O3Hb6l9ACngc8rR9Q0;3zt1mbj6YoN&ozDgmxQk*c7KfUlGi3ACgyolON%X9 zjbr!iowO0a`WGn#y-A z>!(`-`kVy$3X6C_EP{gi%3ApHN(R1r-y0{X&2~m|(+X@PJtJ`%o*wU1-lbMHJ75yN)*(#Gaii#TX36Z6-pjk@V>V__ZkKEK$`hW{IcxyGV!IPZbb2$EQ7;25Z zsu<|!j`C+nbi+dVM<}u4APEFdu+JDhk5iv+;s=@dWSP#SZHMITp=^J3{Tspd%gA>m z7YAj3m)^uAT!;ff#iScbKphe}LETIOnsK4{pa9X(k11kQHM*qq8iwhF(&A@jh;z3W zdutmT0LcSDLVbo_OhlhrTmm_VK|7d5D z`>m-#*mK5E%oIfIM-gy8_RZ-mWBN}IkU_LyH4|Uxa=Jn&0TxFMg3=|3o6y1r z9MpTDQeH={6b1C>J5ID8SB$~MxBJRqvRAA)(2yCF1Dz|!4bLIkQJ4sJGmlO}sDCR` z>65+0dg7M1?McADe?j{IxCdhknJTRB-Fob5*I(I(Ny290gBG) z`q(6eQ3Lr$$D`oo;S%#vD@dur8LvHp#y!BW#r=E5Li#3y zEm^EdXX}4LHiwope()vB<(s}@aX<5PT2D{USIJOxeS?8$#xT>Du;6ezo-baXrrR?_d|oRKb%M=^c4oGKZ~7?dcQjr9AU`Dm#fNDnvd`# zzAmLLAb;S=&w6o} z6!*P7FZcDD8x&)TBXPK^ZhXeWb6unkzkAm7^10w&x}`j2Es3yh1HD|2>O=xBjAKg{wNDQG zhuVFR6*`+;JDEvUW}{8^cYme=tJT(Byt_tO-6&GiZj|E9ttA=mqh5)-^yjCO(=yu* z%au@-jYH2SRYs`vBh>>R^-YCKh?4QmuZ9AgQEdatFO?U!wzvh>rW!9`#Xm06&BQd? zSA&)_TiuQiHm88s>N_&E_l5D_NZxsas{G8faES|P8)I?o5Py%!&2_TCsuzAZ)9tUb zmf8m6J_QZ#M_2>jGa$$b^WV2(#bllgqWhph{;azXPA@1iLT&2huE&RWg?o^h<)FAv z!vtn38=H6gEyq(pITSGVn3)o^zlz@0gD@W(gl(?$6YV%-db_jK>-H{EH@MGC4?c1y zesy|W6dDsSmCKB0EPt;sNj8d@{a3da_?p7wj;8I zcF%K8y{uWxePq4uY_aeW#t&lI@sETE0n*x==AL#KGV$*4WaJ@d4{e3capQN8`>uD? z)k;WVtpQGecm?=d71g^jR699|;vYML3SzwXeda(Ug9$$t?%$cs{y7uPnyxX=^q57D zfSi`8tA~D+FN-&rNKd}{O^XLf=@lDRgTz7o17~+m!KQOceOw%_N<*y^_M3L+mpYQ(wIe^5XLDCrN{rV+T3NRc>qSoz+SAWivg%zuKaxXdKsgC7Ra%s z9!wF*iN@EV%uLD#8~=?Or(mzEz~i$z*bc(N_V6vA=gwt{RkQA-tMvq)T*8Wq$!YLj z50rg(E)h$mh7CeE7c8i~7qVVeHMdYPiK$ZN$xvKRTF2jE7*Eafs=2LOw6U)up@5yiS^L;Lca3vw@VhL*au7}mxe4n zJ{$SO#4zS$L95u5&rfC|_j~nx&As?$baMUZ_KBb;VwDXQO$o-e4+dp^xTZ=GTpIM~TpD=hethzvUE?E3EfMT#5+O+JGLKjm zlmipqFRVq+UY7PiZpI;t1_>=;N*kM2ZUS0fAN*5a*1#EQeX*J2>ZS{;p#o23V*N>nu3ql6UL%zQGq*oh z_20yh%Iwejztr92TK;U(w%I=%GZrb(xss{PL%{+orh)+pc|r8@Z>+AzR?(nIqnU|l z4hMZ+(d10~g^}#9+NA{dE3pfh&$=e)@ti>m&pE)4U9L~_lDpuLN@O?nR`vE zVMLf*_3_Tf6o36|>OMk3WOzw4)atJa1ig>QE4xn$Xp)#2Ue#>f2;hz)t z=DTnC6pL+YvAPOEF1?~T<@|piY#~fKM7Wtr@_*q{OUOu~+XM6|2P0g2D@ojhny$P= zgI)zhgKVhcVPdL-L9#_D(D*?IyVf>xaA(~Z@zapEWD3E|u#rEi%@oSm!j4qpXE=JI z*NBZ2LFpm`iL6+XT5r?{e4H?A)~Pq$Min0tom{xdANY_mPPvyZrHo1BB~s-Y-M|YR zCe}PC4?j(PY&A2})aYI@|7rJ3<(EidVX72kx(@eT*(y9#6Sp4>h_w}g|qMq zUs&Zj%**q-+$NbO@3Oz#>GIr9DIvRS>!dkT7ylm*v48y+qQxdzOtg|KO4IUdEFSYR z!MlV-qeo{6?ZS;`*Hd0&bqynA;@!nayMk_LH5HruS$pravP-U?Vn40E?*~a73STM) zEYM)}VF)=eE+adzAVyet{+xuWUKXlprA4hBsNx2JrmpupZ9Tbor{VXr@sQwjk*6u*alW=y&dCgwW(+=#nn|3uVtx1k#LPZ5f( z{%hN%23XAGTj0u4&#D;I1`uJ+#_TT<8nGOuI%-YqZ3zpy$)?5wQp&v*BO4YNwxH#h z+kCxmB5N8oK;@!u3QdvR5}oQ>4QAB+@1>Nz@u2bzLoEXRO!7jWT7z#G0R& z&K|qkKKmtnT&w!yeyfsWMmX(7)5!omNB#^rXe-z>DCQ2C$KC;*3yf;&{G#d;#dH26 zPsu`HT>qs%rXZEgj(WxjFXeNIA#X~TGhwjk*u_HX{sp6A zn&=dsy^(&Xeg$wSw9|+rEIoKHk%Xw!P-D7*0$15i;TsG3;+N&(hmpb&HTH?oP>=GEuD7})onnOrLubB>aU8I^TQ zRS4ZL5~-F9UW@LCjX#UW)lw}>oBd3Ly|=aQ;;s-svAgiV|NTg=~UB|vAcf0o|_K&=KMTkm!IiFXzpxRz>DtyeP3Yb}>TPfMC{l?=Ttj_YC zBv!6PXJ93ivixkw{>I0@m?h_**2}2D=X#=bZtN-h0veD0ZZ?)Q?;hnI58r(yXHr@I zTG!?ugdd`?G5oFNvi<%uu>0)=)WIB6^kDF6;hFa;RI59z8I=fx&;eUbhP(ZzSUz53 z2Z4V5Ufm`zIDp1@} zSQ|>;jB?*dwxOYFUG3?9HT~-ge$p?M&`^i2&pxT%t0zUmkjo?7wTvL|aA+$o?R zkV{XM{{_o30;z_cjwb)<7AZfKfHVUiCMpY{h52usFD1#A`mZbujq89h8X(f&Lt=x4 zFD$xcC=Y(~f-3VO?2^F<#{o`8$yP`d*ZS&5+1}>wQM#Ui{x{;8&y2O_sL_Ss;)L-p zQEUA|6Jf$BGA-~x9A7IXSgYvOg;;7?k_B4!m8n!fYD4Uof>>}Y#mWl#*xkojr+eyp zGx-_u%eFjIt?29%0^Pu9OfOvMzqrjnIyiqK+ba}UP=Kw!6fqT)YlG@!C2*e5t#-O5 z+UW#S+Cq}&aZjf!zrmJG07d6Xl{Is&RG$A%O$I3JWrO~p)56pOmbxUb?_x;X=-~Vi z!Q?o34VVo@LJe8Y$V?+B@p7aO-bK%L_%}vq{AT?LE3uOe`BwqV9+Gv^=r+@szu8-F zLKs0L(`X6if9fIFbCHtze?J!{=&Ni)_cAhNy%@npWVNU7uIaO(lfEdBdT&7`pE*@r zkn9A|fO6*lJfL6oanm1E20)BxYKQ3ij-dU;FZcDcFBH)@WhE@aKNSqns6xTrAgBqf z5dg*`7z2=D$TIPONe00S7mj9()+?2_a2xDdz>*ko`(!JA$QGC3j1z&=kc>BIyD)(w zG1BouhpdHvek(;rKoWfBv~pK}EYO%}ZDdr3_~EQVgZ+A)q2c_(GAxYShJ1A4N-ws}4DF2>YQe=Jx0&xGuBh_@`)WEOR z>Y9N2{k0eZ<2XnE6FvS{@hfiJzy~TeKTxp6+nTGrE*}zrciz_BZBuOG`PnREuajL( zlv7mi;a*5Nfd`+v9*bl6@Vx|?;kNVYjknWz=?!|)r=SYTa~7RccDU3J_6mwInT*yo zY`9gTuzJo3bB+2Xfd^Ufk_PH<@`|exGBH7cz*HwU_J=xW-hkqf8|CD+yD+b@1Q)oG zA>;!|;>-V4EG)?XU5E2)1M!p^+JCNBQP0}O_)V7Ln%qMhQ%k4l=>1=iWhm!V>rl*5 zT1mYP9cgOw&Aa_M`s%rTB(E8O^9RutF+ghh>#5O$d{m9Vku+f32MbOTjjT}LDj%Of zMzu;uQxhLYdOt9b0aQuKzbH|Gj|4JEzv0KDk(kGYz9uIu<0U-|4_dsQ+jMYwJ?G+a z#J%G*+a`2zHQcrs?y*eV@ zI`&9&!{NIK$ie$|*`(nR|6{Y8n^ES61RuMS`&Z*Xh4htlA!4~k!h}TAepF&UH@5{F z`FN+UZ&O(wuij7-vk~^4R{K#%1Vgwtce*qjB!+*lZ0)RCq~&kf{`sz0B9Sq`T8r7P;HN2?1c`omR1t;CWRDw zp)pze&RU}9o(A?VcQmu2-<2m`zD zbmcqd(AmSKmc+!wPwVTZSpu%e7ke`^ODy@6u5o>gPzETAg}ziiWsg|L^_4|)BNB7us^o@CI^^OLf^Xiu@GX0e@eDyLC7IWbtM;{ge=28Lk0!`VV0j)E~X{l^dX!;U9^#!&?Yh4&o9 zn-oPOHa|YI8z&2vGdqq5hNc|@`9vdet@;C)c}nMs`tiITSt^p6yKC5h>v$SmI;dJj zZAFk``I)N&={LQ}BR@}=8yPo3b`GE8oO?M*-LO#*;czEjM2@;flZq$EL_lQri=HxH z9i)JxnH2M>C6v zNM7>^0cAbYvE4bldbXw)Z#4;Q49&($`=k>#3XeN}g=E)1=MU8qw4x(O0HvG-OS?f! zm_tOH%@Dt_3Mx^_{j=GZT>btx{68Fw=g+&tFiA)2ZFPhi%N}-#&Bp}z@+l{pDQm-t znFj?!_meZ7IA4-PW)jLLS zQC0p%e^WgC)aY0>bbvONc0KWd1D{BrRZ+r%TRROukJ1&*oez`{FuOs#W@*U6)a~gOs(zY2@G+dmA z4-_SeT_xy>Ga{TAWb~wynJnL_U6N!i3&Mqx^xBFDAru&Vl{m_X8Y39cVVh4ft)MKJ z=VOGH$#g%bQT+huUXA#;ib(*IW^h)8-N<|zl;8=_CZ!Vuf=%~kUQ<^|*ly;|$0KZH z`BX@hm+oPwj4qp|)85R@UU*`h)&Fd43-oU6c&~%chUsJ6(Tf#DcTMp0t*|dO~SuH%Jtt{_hfE|JNlL$<0(-g3TLG zA~nE;{Y9(Y!@DIM&4t0mu)u3VsF)u%%AZRYX*}&cI}twwc+&dslu(zdy_1?Id~#YS ze7-)_*OhSXPn#v=-F+7Y2TXsIVs8+o!4FZe*9Y|l>;K7s+FU*_4_Fme7=*)hf(PK5 zUfcqY=Dj7n;g^!-FB<20%Gz7Qsa6dI19fULnP%5AU+WeoQ zT2|)2)`pdbmhrV&(|vMg=*#o0umofsCU~}ffoMEixo`Shx*8af54N$6=~#{{;yp}& z;m)rB!Ij!%5c~V#G%fqtPi;=G;JnFS^``PB2Ns}mc!qN6xj0Y8)K=)kQsU9N#6Lz4 z2FwT)X(YaL0P+5cgZ5iwcUUA(6>bU(I68~E*3J1=vmcN$u%+H2v(%tpou@iO1(@^>qcS8yHY)cCBAP`&IEgl z_3%g-Kz28>JIJOH-}!GPKZDJqD}8Cc5tvo(7ZA>0&We~hr^QQ}l`r6v?BqOQl<;XE zWSnA@#?LB-Ln_o9%iVD=vhMs+@vXIJ*Jv?f9KNU1nqQelh~b;*H<8)QptK`S#^shP zOgL0hjkwGI;zaaRF>u)$7vA!cWs7TmVwzTvD2}V`MX%Xz^h2nQS7fBE^nypS4zIvP zw;x5g_}TH?v150HfSD0wKp2;3|Fg#^n}Cb0%=|u^gX$M>A1=oLogC8Hrh;pMN!Pp! z&5CjeTz|$r+bP|ftu@E}R!TW=zhhZwXBd>!Y8Nhf{^Et){zW`hgrIg@YpZw1u$Zz0iVBP)byZC%Ui1HO2&Rc@DVzO)W zzNbN*`=N!`#BWg^u|(n4_(+QdRdf~!RH$pVzDHdU7E;K_o21?}Ze~A)->3L(9|0j~ zW-t<{)30E48ctP@7L7ms8K@!%bVMjBt>e2>EP8{Ai=n}Il^)9B3I=NeX70bDO z6e%C^Ss$EvqmU+@qmLW-ar>)BbqhP6&Q%YD0u8Aah0 zC&$eZKX;W|+=blEcKDmC_Q{sMgjbAh98vw8X>+$1vcJ>eKl%O522qdCU1Yg4tCtlk ztf5EB{TO2_`IkM8l9t}TQJshHl0z z@|J(Z>5OroXkVI@!mSE@c4V!n56Y6T540z>rEJ@a5&8_h2K`JXnyn;42pg1r6z1x9Z%CeUf*zY5%MnEA=^&GXIHpcg-^e6RHNECUBjPex;mW= zU@!j@{P518U3@K65GQEbu<-Vg-5g)#Ji!LmJWU@@4CNR)e2_vhQ)q+-6RT znc5aP7fzq5G>@jjdKQPXk*>Xl7UaTU@dlyQ524&XD5yeTdQcpmmkyhhG#VfzJy8Y* zR2dnWzhea`Al+%GC}v7^E7~EpgbYJ4@!6H2AP!$9)h8R&Z+K?YX)K4n-$a$7Du!db zu*E!U>`wmwkhCejO#9>CroSQghRL+iPa@-}VbL-*zIH2eqwk^5Z*GoL3=&+AJ#GY= z=8CLt=2iYQ{11?h^!y(nEnk(}()9 zTW0i{cSK)Sht;ArKbzBmQN%8qr;krTVPSXH7(ft!trrpn+KgR{J$|6)kBc4o+QJIQ zHl)Ev*356I0GJH{+lk!vuBO{2q`&WxuO;F%)9U zrdTAQ>6Zt8$ddwDVf#p-3@q4c;NinjE54aGt{I;vRQ<{?4ns(B#zQ;ZP4ttR72mO| z=gswkFhw**t&`o99|L2*77OW$E0ZMYxvCbsh*=qjH5;BWJdb6P0-0%w%)vs02+gN? z+|}CIBuY@lMi^X553-9ZfuJW2QLl#)2YU(^?LkyCixKIcTp~bh+Lmg%oo?R(wmjWx zD-0{S7mOq>OYNh!T7BV1*WLWifg?OX(YYH^5Fg7Q}6-9ttes9p*2dyW}ihE9G*F%d!Tu$N-$}2SfI3P5gtfl?muz$>Le~t@HlLox| zC-46z`C@_ToAvF?!RafiFDn(EmYX?GC?$L^N06&h0lK1W=qf;sJaXWT$JPKlklb|N zkoH*w$JJXD{};J|s6E2gD5=ypxn5eAp&)dRA2fZeKnFPyL@rm)$d5IYs&mctHbN5#YdzLuB&;_nu1`9l$453L_n{Zoh3F&cWUl`#95OP}dt-WaOw%0eaC;CwT={7Q#o<-~kaPg7>Dq;9Y6 z!NTR?(3@L_hGylW;IGlH+*Nmx)7!!qhp#?}PR<);pKwJqjk_yp$#ApS{=sM$**P$^5iIqJ_cvxIphAR<12teKx(1vLpH(?IfQt^{JmG{a+E!^pd>&vY?7< zc2k12{%h7~TNsnV507(3`2CE3^P2>?DWt#HJ7L+nLh!UiXZwQh)iO|7#+#0^n5g-3 zuD!s$zSLUaL(;2{;7R<%4)=v4cqlt>$xikUyo2m}pnUS&&aiX>vdBztHsO3EBqXbe z;@Byd^}q}Jg@&&{o{z1jRG$Elp8hArvlL6(X8s+Ap?eRqxHc_17F^juxy{dwdOSvI zG5iIV_RIGKaBi9TtBW}M0VhnBJk}0fo7v1OR=d;e&Zkc~e8x7H<2$H(POgGAy^`tkiBt<~tA+x4}K2LN)3m3d{nb-*y)o+2hDXqJ$2c5P$8qgB$xo<0`(pp%#8!YZ%VnM zWCo26GQeuV0sJb!--1Wsca8(52=pU|KKs?ZPIKmICP0Qb+v34~*nZ~_;rhU4)Ecq( zql5hEZsqA1%p5k(|4m@q1GyWaj-fLWEY$=gO;WP5Viw57{{aF-=Owop2on=JP)kZY zJw6=7MV(C^zR`oAO{R-Gy9>ID>0Rau;=FT|O;-Diutr9_KzdVBzTBy?p{&V3yjX{) zuWa<8-FtP*HZj2EM_bvCnw-3lOKkF*sDHDW!HLbTZ8tUJRzrdPX=NK(a*$(eQnD7e>RQE>=kT}d@>^V zL|f+vqH0B8x%!#zti6nftUpw36T)@Jd%0e#eiJj|z%Sfk^^PE#*WzST~aM0pvT4st|m5$#co!-(=egO3S6E z_2_GV zlXOC4S2pRpJ3jai^JOm+uw_f{(jfLLL=4U8KgyJDl-hs0sqT!gq1sj~x231ug9(*4 zTA1ku3Cc7saCmONhe@4Lut%dQnQz_D*&ew_uonV)URcFG94u@QT)zXrH_}?wzc2sT z3QeNHYK%(wzUoAn0@8{6ocLLFi3PCGcmmyRVp>`_3AcqVY7MDFlSCAAVhA=VpB*u5 zUWFk+C;bK6h^p=WbO8HqM)Yo9twXM~k;OdSd-7SY+^KxxM z6VldNVS_Fxw4n*_eIktiz=b54(r1QiHvOh$)dTY9-g^>!ee>`%Lzl=`Q^2QCsKd<2 zhz4wb$pfUH5i^c0GNc!QeJBDpfl8DkuLdl!a#n(=N+A&OtplG`nj&>5rQ+MSpA`vY za0dvFT8>1+J)v3N8ch-(o*R9&qj7Ow-{ehs|1@jF!0$S{X!X9J!AjJE#Fu)PznXg9 zri_CTN6vDI{i-@cy=N=sa=%&;QSW{#8E5`@S3j0o%v{W6_(0s*UoZ-TOU*DHu49$| zI-H)8vP>YzbfIg2K2O`;EO$PPdm-j&!-{oGN}IV`nOR9KVm-3_L|FA-CN^251--MH~t_hr@6q9kiRgCKMy~ zPD&}r4)_$UW~(4D8Frdd)Z|Y$+g2b(e}QA>OK7l64(rNa_`NTyFk~4J622hB-?8|j zcpV~}zfAT4XX(tr6UB;}A)$Y5k}31O)7nnFIm3NYXU(smb`wr3PV4&TeCrXLxDwl2 zTfx^x6WHn<<IEbSdq5G1P^`C*Xtp471UhqI$!zIU{(&0RaJZ)z2$Z+8PpcmodE72-*ltngiJi6E z-$cw>OIS;g0nS)q+RVSjF>Q=#4u9%Q%rQYn5g8B;~ zhvnOO-h@KVn=zmnGw`%}^jqrCDx%ti+D)4jnuC-TDYc*WJZTSCu!$A~?;ummztWo+H!=P5qf#F0$7Rlbk*u})d;>sX-{7qPBm!@{2cCT6s0UFx*%^& zT4=X`8I`J>4AvY_6+@`Gfde6z>rI5!&glTVoV$BN%<}c96jo_&Ad~B^c}`jPZ#$Yi zMtG{E^+XbnHPf(-VYunZK&_8|#46Fg#2dVLnhj%enGud2K+FIcSr29avz6bV?i+Gl z-HUNew!h@0`%+9I63HAygkPl$;{??y-YEOkYM`bo-PsM&WNf2bm z&1ESAJzBy!p$jBoi@-QWA($f5H46afPRq0T!u z0xBx1LQ1>2nj(DvP9e*q!nC%y%k4k6Z+__^N^5p)QfyOu4Z+JeX4KxU?Yeg-4K=}0 z<18F;dfuT3v@37cA=6mw1io)55%w*LF1CiS- zDfti8Q}8V|Hp47c6Z|#fq)>~~FR>u4|1gg)zb_Sc4sM81+dpx+E`f>E*zr$~Ey$Qx zUyDN5qE5giF_7nJD3N+yoYs_K&hLjZqyFtVOXzTWdD@ub1!a5S=Z~T9 z4A*`Q=Vp;2W{+uN$ZFcYa^i^aKSe|DjV*qcH(?`F1R!2|;vfla!y}P!d4705a>8)$ z^L0^*t{vqgX>9%jb3%uSu!2ich3o>>KCeWoTS*FFl#bf(UYMd49LFuWj)+7=N4u6l zn(U29CZ*;+Zmu_Id?EV*fdeMgWQORDTi(L`M?|Wie0B85qqr*BHk|oyB}~qA!`K5Y zRTu)6PvYCv_1HO$QH&hLn5Oe$G29DurGltGO})IKHvDHWhN=vP0u8g-H~B9iK)&#v;8 z@vvtH+7L6AB(&5Zxjm^)Fu_lj*Hbm?0q$rFWi67LbnWA}g^M1W=&YwRGP74kTK4Vp zW`GC56xsQ_r5JdoyHHCzKa|5GQc&4CG?CEQ^YU5gdHuF_{_K|n_1elU zof#X+`>U}~tQ$4&UXaISS?>VBCIjl|XV@Uy{ic^J*=V~|Xg{I^bMA~o4`7&Y_sfRUUWm)R1aXL5jNv?7%c9JEC% zP6*EPsltF;0y3_Ngj(mHde%1)B4&xPL(dV|NDyt5KVO81;z?UF^$YR=yfkS~SAs5tQX zK&dEvJ(NU`|IwLV}2hkQKJrc57`zswC`!)Q}_3!`9Ru|aqTrN2Ff<(J5N5{4c zLWlMn3ppj@rRlyr^8R9aqNQcrpJe=iwtTB1p{*FG%%m)_O*3j2hWJW}(z5HLnPa*` z{pVF?Im*?f%!JF6x7)&FUDMP@KWL$Mw$;1KNN)bb5@}+AlTUlLY(VIJ7 zB1NgAw_Tct7#ip$fJVAMINPA!CKULQR|nWTy5%VRXs|+zetu-k33&8k>=*p_mlA0X z+@TmojbiaE96ITV?nu0%V_b8P6cLR>Eej3K?139&Z#&1G{$U(HV;n$cZEZdK)2+Nh zY7>~^#9-%7EP}v?>;2jhC(y?A-2%QdjL66n?rPbGJ;(1-Px$y3f(t)r{6g^^bd0=c zflZ92XAyW4fh|sd?g)%Ay*xj@#RCOk6IK9hyxFU>EJ092d65W^^P`Q*_uSyF(u}%0 zgt^ypt73g)^XYoNS@A5}U z033`UuRk0KgYZ3YQDVc1fPJ>eX>YkrNYLZ(`5K8mi2Q$!96TuGxGW4yrq7|jTy!1j zyA9D{rlRU(fxdk05Cc;GGmvya)F!xNpY0=W4eG2Q186G7Z}Grn+XXh+PKFj0dk_ya zGlMiNef>D_f^@dsc`lq5aa0+Tz+9 zd{yG+0G4X~%60uetCipz-vqzG2^!QK`LIGAaAdP-m+M%cQ`NqK?Vat0H3uxPg;hB6 zo^p>5SH`e}3hzFL<}E_6cb+Y`mj}#k2BuzXbG4R5v$)p30^yqe6=!b#9`ac6^C&{2 zN+pVVlL_!t^&owc?jtzl!>E=LbWBEV2B0mR0>_|bk_HYeo{n%M5{?(_vhKLSNo z+b`!_Dz^RMTYz{um;ZGbw0||l?QGS{wKQrscR~KK#Vf|)&%mdoK<@wM5%z6WxTY*V z56{tL{}wti2C^VYt|bn=(c>R&y<*<(bA#S}Ky!ICq;gAR%*dx<^20i$5;nu4+zQ?s zx+$PAjt#U(U51PiF=8v;;khsIqpE3hW(b=BlcfwW;cbo3Nn8$ckECCPM(#;!0zVrr z2pIW%o)66T#!aA!G$*N!QDtGoC+Aqx!n=cZc6M$$N`9oJp?MRZsteDU${R&?wNLAa7R`aeO$-`N!6r>$HPghF<_r&krSmzP)-)42X^2jB1T@E4AZ{>U zA@@C&(CawP=bQ_NjW=Csi0%g2r`JV+n8?N&jP~{6KdL)}W>^r7K68u`uFaD?gm~cg z#%%#{4w{>I)~mjGSf!TWMiR4m<;c-Ua3@_#xnS`wA@rRXp=64d(zLLdkjG)l09I(@ zUdPj8a*s4+EC^E>*aH^&oz_)pZerU9Mi+mbj&PR3JM55;(i9g%9GM;{8Sk5BU)KcCX(!`JRbfVF$ zXt3CbsmmNwuMx`k9unImy8P%WpjCNxRmX6!o#XCm25hAR;1ESR1S$vO@MF?H`aBr{ zo)}4D#F{_L2Qb{^zRO%5DJIr_unw)%T9nbgbbZqpjy-P9k+hvk#z6MkmjGV>yzZOS zxeR*uIL$4UbOv*>t`W>X-cK7ZsX7EQo2c~jSrswgg6I^^>i8j}$g4V9c|haj=xv(* z5Q@8K!T`6_qZi=dM%4$kiK!IV>NfqU~s?2TZ9 zblKIp{^orFwFNpMITds~z?;lkEK0=8DDb!HzoTPO6{@Cp0a35g0mvTK!lby#xDmPnHZKC$1&W<=-&`)7^fb+|)a|t$$I3J4 z3YKTxiCrn^#LF_?Ggv2utf;?V9QE(;I1Jz+d(&)Fe11PY?F z%tcd{c_gdlv|fp_FHdxH{R3gtQkx!2&sv3^CRZwK;Sgt1P*fRFQujcPDzgHO6p7boQnY7NkC^=$|#$-g>* z>Bq8!y~e-aSZOD`bM7HH^(DYz>`!^T-D8KvB7?v*kn7-Pd%WN3iUvkj&8-JhFApr( zlX5gv=CLFb+SCLolc|{Qgz`t6LO$-tb000~41bJDV9Odm`i2Mlz zv!l0tMPB_HnTEpKsjso5QMsH`!ce(1MVFkqFi-cTaXkNyew8|HdQoJiU&A151=7*N ze!u_q_;9ynaw(aIcJxJv{w+q0&tY@#I(9$UT{OtkGg4A$Mx=p2ANSbqzjw&EtxDm+ zpH96!*V<^;{FEd8G47{3e~{2(1h_Q1r{9&=E&qIcC+seq-eqxb0?8dwX5bLWx|>V_ zIUNNC1wIh=C%c5U2u zJ(8Zo|K9T^{wR)2XcI0S<;5%U4*v(3FaTO#0%)bAL<8kqQP!m#3V$mR^;99!&)}$2 z6-XXYq6wDIKM)CX<@iRzVqS{Sb%I!Ww8C(u|r~ zTWBJR8hJ!A98A3N+pP=!%77IXQ*hei;l$FKZ&0v#?(XWhj9D23aT8N;(J=EfN*r3- zB8^zz6gd_0m0Hz!LN^qymeGnyOd!J8=ak`53pctHw1MXnl7N3nr8<=)GtnGguKhj9 zr?a2F`c*aHVzz0cO|dfiq}WPItIkrb~e|#OAUnXcI_&?ga3a_a8ZcPkQ zN{2`{2qGaJk^)Ky2s%=NG$Jv83P^`Y4IxMfD2hd65RNF)DWP;DC|wRA%H0QjzwiCx z54d-&yVfig%sQNNezo)2&qfTNJ6Ah_41aQKb?UIpmAe+_GqBiTI4U;1PmB;>2O%Z^ zJ{WrQS6316pT|oU|Rbv~`64 zTjSZ1UnJWjufK4iMqkhjEH|XjQ{EI{*&aK2J0{Ak*_E&lN3whiHGLEFcz21tkCr=O zH)LVp`=uts=dUe#(@#%M7W0Pm>Tce{~RX$--TjMkOcs zT_J5}k+ajK%{hVF98J#o?o||`XlUS%w*-$BZ`V*Y920D%1c%D<0zZh=W&GNq;+Xy+ zL7s)R5S<^}H`6->m8cSNk*X<-W4N%|@gK8Nr|vl%Bkwd*Wl+z7eq)rUlOVWVX@qsV zZj`#|#*Z2xva2z9t%p_44BuUjnw)%ZtoC_RKacCjPLZS<{wtMTjrmv=m((F@)&Bl~ z&5YQELKj5p#oo3!XUP4!SqOy&v&}v@eoa>s&0s_q5#?HUi*^gboD$G=Y`f^!co=mH zf2@dEfp`?I_;4@6)8X}U7;<(akrBq)(jraa65$g_))i9|v9mZ(E=WN^0d6htofl+L z!X4Z+M(Db2lG=6YYfHnYFOR%*P20#PpTG||^2ACcXG49#x8^~%=YsS!wxZkBvMq~; zo(Hg@Ju>+moLUAy|~_5bH#Al%w89OxnHBId zM%BQ?kVZ#GlYz+<6n>G3v{nC}f~jA_=K?hGgc|+RCr`+r!$9hLRfqNrEwz~ZPIa^xz$_F6esRN3ycNfW<1w*A9>=D}9Ikhlo0ALKuBu5i`-s6U~8 zZ{_Bv_-+vp{!%b0jP;K=%EqhTs_PDVP8Um^BEY*!)2ITYx@Q9KMHE9l%2U3Q_4ds2?$|+eU=Wcp4oh`-P^i1JYn_`uGsCx$K7>c%c%Kb0} z=5&3E#1n1ZL8I*v5xD~@K7*6ClV;_m0rY+?1RRmW=Wgit@`w5HElQ-WGOHyqk0F-(GBK$t|4V91-F_@S~378Y3yZnZf1n>*eML$9gNq>1^QJX6}pm#abK zN6Ug2FN8rBXGJ``Q{J-3E!8D5yNa4 zJt428nhz`AkKj3C?EPUpOZuukp?bNDXQ=8cNhZbRx!-^;NYm3aXAsk;MVAK9MQKxZ z2|OrA9XL?+`cA(;7_oFuB7?0^=QImeZWY^}@<<)jblGxjM zrQkv@Pq1I4b}kP;U${ot`#JP>KcHmmdxV-*NHvy7Fx6UuL0Y4cx=d06+`;Al#e-qV+F9D=A@T_-BL#^h}O5y zNYD$7Su71*6`CFd99aLtsrb+!OgC(|@o%5yu3rYMW=r&L5+S z#}^K9klhYF{}j7^rh+15M4QLQ;c~fPyJ8hf?SYO1Z*D%{a1!<#y-k_;a)%}5A%|2S zv+@jTEy$l!D|)D%)I3I&t6`k8A*@I%$kv*ERH;)3EUU#Pu&mZIER?1KXCbZ(@6dT$ z9++RU^pG50|uM#+$+|E>|PCr(V4!vE2QNwY; zL!c(bvvi=x0RZd>I|Wn|?~lF(Xz-n;^b-;C52^PTu6x+{X_S8u8BbCp zt8wEO0%q6fo*1P7N}VZbm$=PLG_ zeQ};!d156DYs28(qfospUK!to3lL5U%BbO%dG=m(75mXhW%AMz|x6TFUQXfl%2yym|mG z;h}I`to)nccfbxG=>JWcl@UEG@)*1QJ-LZ_w4{4yQXsY3OzYgAa!+!j9=9qfQY`}rww_yPzjM|UM8Mt(>&)&OM60q$>MvXpwngTR) z6a-P3iAkyIAPD^hZ8tdxpw3d~9s%#UUUIN%>+I7k1?Fk&8xuIGyE>FNhr?t?uM-28 zZQ70c``t(x)#TI^WUx<^zA!sPlKxq=_7^tTl{|}`*}+JcVAbu({TdcT> zI$hRrpHH^F{|TXx-tM7vSL^a8HNOr*k5Wj@claZ^w@-z9pvsKQNgewWERnB$#hege zU0r-VW(zQ5FhdVRTVi4Pn|nVuuLJd(Czx!)O)6XC#Cq9EEC`z{w02DZUBtdV)2;!W zbTlk1(Eto4hbA{^55xd0^w{8@I$*+9fI;;>kTV;<(9e=H@~hXZ9t&8H+g0S_3lG&3 zk?QIA3A|bNLKL1V47Tg$`t-M@mrgL{QB`e-Z8f|Sw_cBPjSEign;drKX?Y+)UW}e* z#~dqqs&(GIoBZK%&X8T@=j~c`FGDrvJte;7ZV%Q^Ycvg24nG*&>XZ5;leZm!xJz}2 zyOe5iB1oh>%vWICOAX&^Vu&=;?Z)uGF@693J&k|sGdjCkDLgFsX$JLM(KnrtA}a7( zxKkHTICrBCgsCk_)7R`lin_b?m3=D{+5rgI5ca&QxbJ3S=ojnZwmy893=!YSFd523 z=;RF($j@7qU8wRFfAWD#b5x$$QSc6hy=x2VCZrUi{hq zOY-GWqB1f}Ci!}aU&23D?aV!2t!K7w8fRndfrQ4R3-sSR)1?|G9hbN<#mz0Fa|r}m zk$#zoG&9EH7>w-CCXC~WA8jc}39FfjdYfJP5pwG&&S8p4vaZIL$k{qhGn>Zx(f3cs zuN6MqIr28qW@!9GR;E9@^c6PWI6$J?Bd^90xMjm)UkWPN%e@ckv4ly|D*}OHnfw&l zQwUD?i8j?qbHC8snNBub0f^OQgsO1he!n&o6O%yD5TMcM=;-W5A$-URhA_5oZ)=LW zGPd;E^5_P(^2%JP4Twa4C~7W`n`vg|5v4#``!es&rI?ppcr8t2ZW-1?9~Y|kFqev$ z_U5KVjH`>en>au1)7Ee<5%V&kqA#5NmrCtQg|=zD_DU_v`oHSKQE<8Ojh_?AL}yrR zc#(Q!2@pYMIlpAHmQo;c{nC)0+;+gjh$zvud|fh~uP#M(ihKuDCVV_yY)x+HUoQz&7>@l~ zJ=^5^%CG5&fqZ0&y&<>z=H|@K5k2|H$+mT&X5m9~gt_Be?mBxmx3zx8jh%k&=Fzq& zgH$jKoSUAMF}chBlAf>~2#qJug{&CQ8> zCd0s$`R(V?9LQ$z`Gs9n+R;w z%@1@MTn)K5PbT;2RldzH2XcdQF${!Eu)+S2Kf@gNFMZ9I3#6kgbM5*7 zeAd>yyazVHgjOp_fd#=^uU4p{C4;D40{MB0SmLKHXJr1g_!XzXgU371EDHps4%QqG zP^OA_?3>vtn|=eu!aBpd{MG%Dn$kg`W&^GF15nTNAgF!jdOTNo`|0eQOo(T?>4{~Z z+>}>8gYXs5RzdpA)bcL6FAO05GV7cyj3zVOBHuOBm5e8sv1dqmmVOP$$>iU-v(1GU z7fCsf!F^-ZPTUv}n%=EGV*15h1(;3p-16+M7SdljOKne?81vat?1!T}+#D~^vU36| zsP&Q^Y#t*LiYg&CnfC%f#Jam`#gU$9H_X zGkDzEI%ZS3QKP}pGh&Z%6Z(4yculZWGYp@svX$i>Pi1FvTlSRB^tq6&VU}e%L)fZVMl3w0|I+E%7bP&lR&m7QOIbu-*3Py4INZkpaaZ)rm@t8DGj|Dqh#dzAvfbM1?aZ*Z~L2 z82~VF5pMDTS-2J6)?HWiJW5EiH!~A7RZ{4c>sRi)mnUMqra7!EMbrC+b&Y;6;yzd3 z;oUqn*25I*MkxUnft!coB7*{0r`dPwEv(Nz=Ma)Br>-AxHXhrqmySrYCZI4_P8-7! zh0@;j5MGPljemH&fopI2YBJTi-Re)I%vV^mA~BfayvVo@MbP4+A{K1N){o_gRHrN9 zgoz!xu*At^?nan{6?zdcOB1iZ6c=jZZ}%&b_oSB1nkU~)d9^_5YHxzMlakD&+A%9z z4ZU-?%W~_hWOjQ$@^-B@vz=FBoMTPaIa0=mkG#_8IWIHyQR>*)tV}DnlY_fCvt(s1 zq|eI_B7g@Eudac(cN3wZvB|2;(A(e{rPRD17EDTqUq3(09S-A7ReU>O`{+LNI6GT} z*)m66SD#*O(z8OE+_8_roXI2{>G>q#R z=jC|VK6t{ zX4vY#C7j}U-m77@JPMQH7M_gZOu7N(ryAwDnI7ZcQlBZ=xiS02b4``^dkrEKrlvo* zY!6r>`ICrrd7ZbYidnYYk>Q@JOCRTP8dJ{}^BL@x+23{P-fTe)JmEj3hOgH|t1OiL zauWBZ!k;u{iYsj&=7q024J8e6Kk{4nW0*o0KE=t2Kf9x#hBq>PdDAEDAXs3>>cIk& zO0yXL67O(w5xU#9S@EH%LGq34CzOp5fcy0DPo3|fdefB77{;~upJ7R%76Yo6!epSv zM%$*_69OkEju3t~=h^(YJVS+gy$~5%PHKOaI&pU@qju;x!S>_L*5y&c_hc@9EM9o` z^Pj%qV}zQ{h1ep#6rG0&$b_pda=w|U4b@;C4cwjP)5;=wT+hv!Iz}%V^~{Jd?~Mj_ z-55IXcw%^X{)x?0i7QA&F@#-Ta^ngRkO!j{5H5F_)auAyY0@6?I|WZ_Uaa9h*n%PP z3f;Vp!8m7^#WmNj&#GuUf#8XiQvbKDwHMio=@02N_vRAKte`j=mT)TkBP*OE9NOaX6ripAk3hoEu)4I5?@}C6pC^fgm!M-!Ck+nd zeotn-ycM0tep$1-C`|utc}wl!_1^tD8*H#I^5{O91s3wJ*+?mlA>m(___JF!W+P2) zoREe!zHT3ohBeq=7nt5>MiRW_PLW8oAv4tCgp}3*tstUe=$vGxE)oTxeU1gaTDlIN zn_RYpIT&;FmsTk3A57aWr#I}pfc~9&^GvpT3+ga>vBPL`a-f#(y-ff(ldXN6iH}BU zNh1t%{jiA_mnkhc4gMX}gYC+T4bVbang}*ovxjr`njSZ^ipB=;#_PE=&0g1i7l$wi z;7XfdwP(E92|G;o3%6B?2BCdKzU5GfBVVpRVzi$m`RkG;AMgG8awOr;U^n>kkpC$} zK|VuLPN(W)`EptIK~of7U2P2Z+F8hI>E;f^x%i;{|~sWQwJngswyu*~J2<#qinA|5Rt4VxZR+LKE-6O|hzlWKgmi zodC_x3e02~DE$w@?GhyTNsSqS#HV6*cVGA{K-V5#h=?W~MB-KqUK3K9?r@V02haF4 z)bK52KeERBRf+$H4B+pJRfY6e`fd{Z*Cl_7_5Z%+F!Q-cwqD~j8?Dx+vRVjq%P)}7v|H*3D zmF1MD%+!%>@{$MGL-83QTC@+3R8d>klOuQvZhzC7Bwa z_G%^o-;`osw^<*<+}f!TdGP&1-Q?@{*iJ|R7v&Y|P9&&zK!BM#XN%2>9}aaELlu|m za)?-4!4K$`{Fql#(%4J<29Ms^3>mkUd4i_*a=aGpYMZjI!UV9Tv&pAx7600T_u|t+ z^&UsgmLPP{svl5@Y&xN&JRmIyzA1+1ip&=UctFu*>&#T=*MlD~qe1%=(j%#`f45E% zje_*IxGIE(PLXgr5IK4Z5xl+P3rz64gm{EbelUaG{=Pzl1wccQf9MfyS^aZ87*Ztr zRkrk%74;kTf{VaV-4-v7zT->&vFE|Qctf`O`TbDW4X>_w_WNU=P}JCmuQ~!$A4P5L z@1b!UHrSjd7bFlSCc4EyAq&pM2ikJNKQ)n8mWdY~6s1xTCCa%k`9!=tbL88y2czJ^ zn{s7LFhWu|0vLjGX8Y8z$g2WSpTRCel2lOxWL!1@3Ck?;KgNi3nqBv%#02~v6a38J zLr$OrH?zLMdEb^0I5KQxX{N4G1j@$-OG*5HPrxOuD$F9JDm7%O}}3W0QrI}$kp-w+5j$x*_J{r)K3gXcdszdMC6 z-Tl6nZ+{smE1kTm%1o$$@M}i3qTr$;>Z@~C{IP!e-bhC` zn~@jJ^gA2hOcpU`-B~K^j1cW7+$2Z(k9Yp_%R4Q&kp(Kd9a;B(`M?_e{cl9@H$QOD zdmk@{#Celd%USy9#yKPrq5o&$a#IPy?mYokpln2Gn}{}hor6`_wSF! zy+927AEkEsM-a90$F@RlayFhIP1H-_&w6&O{@Y7|F#&^xSNPXACI2ti2S<{_>muHT zJTx~+*C;77E~JImEkuX^ft_HYc|r{9-_&EZ#GVbBPiG$tP~{l7D zEVAel2Q2FF2ZIIvKWpmI%rhH2ie!Gi^`PFBonge8_=c`M#Q2%AiW z+VY3ev + ## Cluster configuration -The following example cluster configurations are provided: +The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. +The following example cluster configurations for distributed training are provided: -* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances -* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC -* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR) -* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC +* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances. This instance type supports Elastic Fabric Adapter (EFA), usually does not require a capacity reservation, and is a good starting point when developing distributed training architectures. To use this manifest, edit the vpc id and subnets, and specify the desired private subnet for the nodes. +* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC. This example shows that when a VPC is not specified, one is created for the cluster. The manifest can work without any modifications, however if you wish to change the cluster name, API version, region, avilability zones, etc. you can modify the file before using it to create the cluster. +* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR). This is the most common configuration for distributed training workloads.Edit the file to specify vpc id, subnets, and capacityReservationID. Please note that the subnet of the nodeGroup should match the one of the capacity reservation. +* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC. This cluster configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. -To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/) ## Cluster creation +1. Create a cluster configuration + +To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/). Any of the values in the manifests can be changed and more node groups can be added to the same cluster. The minimal set of values to specify for each file are described above. + +2. Create cluster + Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: ``` $ eksctl create cluster -f ./cluster.yaml ``` -Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. Execute the following command line in order to verify that the cluster is accessible: +Example output: +``` +YYYY-MM-DD HH:mm:SS [ℹ] eksctl version x.yyy.z +YYYY-MM-DD HH:mm:SS [ℹ] using region +... +YYYY-MM-DD HH:mm:SS [✔] EKS cluster "" in "" region is ready +``` + +Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. + +3. Validate cluster + +Execute the following command line in order to verify that the cluster is accessible: ``` $ kubectl get nodes ``` -You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instanct type.w +You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instance type for distributed training. -## Delete cluster +## Cleanup When it is time to decommission your cluster, execute the following command: @@ -45,9 +69,20 @@ When it is time to decommission your cluster, execute the following command: $ kubectl delete cluster -f ./cluster.yaml ``` +Example output: +``` +YYYY-MM-DD HH:mm:SS [ℹ] deleting EKS cluster "" +... +YYYY-MM-DD HH:mm:SS [ℹ] waiting for CloudFormation stack "" +``` + ## References +For further information regarding EKS cluster infrastructure see the [aws-do-eks](https://github.com/aws-samples/aws-do-eks) project. More cluster configurations are available [here](https://github.com/aws-samples/aws-do-eks/tree/main/wd/conf/eksctl/yaml). + +Related resources for further reading can be found at the links below: * [AWS CLI](https://aws.amazon.com/cli) * [Amazon EKS](https://aws.amazon.com/eks) * [eksctl](https://eksctl.io) * [kubectl](https://kubernetes.io/docs/reference/kubectl) +* [do-framework](https://bit.ly/do-framework) From 878ce34a1f03e3124f9f7d31f38a8b3a37340102 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Tue, 19 Sep 2023 23:31:17 -0700 Subject: [PATCH 100/648] Update eks README and add arch diagram --- 0.docs/eks-model-training-multi-az.drawio | 90 ++++++++++++++++++++++ 0.docs/eks-model-training-single-az.png | Bin 0 -> 56959 bytes 1.architectures/4.amazon-eks/README.md | 53 ++++++++++--- 3 files changed, 134 insertions(+), 9 deletions(-) create mode 100644 0.docs/eks-model-training-multi-az.drawio create mode 100644 0.docs/eks-model-training-single-az.png diff --git a/0.docs/eks-model-training-multi-az.drawio b/0.docs/eks-model-training-multi-az.drawio new file mode 100644 index 00000000..430da2ca --- /dev/null +++ b/0.docs/eks-model-training-multi-az.drawio @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/0.docs/eks-model-training-single-az.png b/0.docs/eks-model-training-single-az.png new file mode 100644 index 0000000000000000000000000000000000000000..7da661035beaeebb65183514bb225bdf5fd3912a GIT binary patch literal 56959 zcmeFa1z1(;esIo z0^&0?R6r>xS7ri#5bc#DMG%TVlFT9?aJ+L6S97p(F*dU_LZIOgJ^4h#&T0iUG&43c zGNj=UqhV)fXJzMLV&!9EXT8tLO~V0UX9vGHxtQ5`c{IRfaK9`$olkx9MsLs(o&CxU4o056+A*Gr)OYh?EtkmrQr|(-=(b`j4Z)lKnA}Rl)*1G z@Q;mEmyJu8haP-{*w|PasT%3am^r{l;oxEB;09zm8L9hnvNY_X;IpNfg%S88X=Gpl zg+F3wr{~OU1_igXvoUis^8%8nsU6e`+$6!q#>vde%*De2-v}SL^83;y*twWbR;jFK ztY>F-ZaVmGjU4n$PIha`=?FFAvu5XZauw%PmozjLleu@Yb0;G^doyqhCkJ>sIs9P< zR~w@fs-cmSnE_Y{I8AXHRtUHT|KSidG}AM&)3X9!%uX2~<-jJ#Yi=U$EMaUdDs8NA z--46rWEyKdu$SXG?t!yrV*B%GXJcBqNZRSyn94y7j}O_<<>VnYE>3WS@MRggo=|w% zIZg;Bc4xC656aF8Cn%e_oleIF77KsQ#L>*q$o_P+6OscIYUyBR^GBiq)Y=+I#vg9f zv$KOb|M50ssO9ONPiM6O+xo*paDuXdp5)Ak-xTUUGn~GLsTq^q0|R3{F*_bh1v_?5F+C=p6ZSdjSvsB`?g_!(!S(F4 zp^grgX4XcczcCZYtB5hw+Cda*3AH;uI}UaZb_sAc93nu74b8xE&#qZRt--h=z}Q%x zk!aXC#8|mR*?0im9+(XaF!aelr#Bnw*_$4lA^1ct*%yjV$j$?adrcG)y1r z-~hEccLT)I%ml7_4p19F)jR!W0M=pz7Aa!tU}XuevVo8AwVw(#8_>g(@2C5PPpM~b zb0W3IW-jn`h}b}Z(K50VcLIaKMHfB@kep+kwsHYmwlQVabGGMXwlgv@16u-W1*{uf zQ*88p=iMng;TtkKQ$_e)|5&Demn=ymV8h^pv$LK)&ck;q52t#}!O6hxwCI{8AD*PFHzO6vaiAPF8+S z8o}8CH9XJGU&_#Vfjbqezlg+{Xq`!pm6@R-T^C2J8hiJE)_z z;crrTvaJ7F?((vpvhtL>oE)bTbdI&0rvh-EwQ%;qnfUi%?b#~NvGy-~{p0!miUa2~ z_%{my*TjDXFW6amn7K|SI^_l%FXtKMJU{r@ F!olPGM#TlKL0o zuV_4fB?77V^P_-rLjBVWe%YWG|H>kqraI?r zH20}3K5wAUXn!MHaGH|++ZLP}xxc?H;N#>HlYl1_{|B~!3&O!F!UdlBV-)-US6kp@ zWAHDs1^;ZscBc7%b8Eo)zmE+5=F!^kk@0B+&Cbom%zbL*&Y5Snvsn2&N7y-e&*JL8 zkBR2`YsRW_4 z&*>HiC=48*$2sl&pTXQSqkoRM5DkzY{J+cGzglM=R%W)pTIatVe>v<#peoMF)*Kqf z_nkGwxTU$FvP|s%1gvdj2x&xwMuE+Z92{Iv zv1C0*2Pg<`&cRgn4tjPD5O~1@+bWpV%){r?%d{)e16XHNcNOMVG7{8y*>zsTA- zn4O_^7RM!#^B6h2D8mQg;e-RKXX(qI$}<0j2oA3#Q+Y6zG4zg^(?n~eUe zu=X>CpP30xHs-V1&^a0S+c_c=2WLA)Yj%5a1-|>zJPJI|HjWPP+WEgEN;vE1Idc(ge~l4-u}&|I7Xl4({WC0)Fp* zivZZFiWuHCQFj%;XTfP90o9XmW>q@N7tRU5UwHd(fADXA@ITuh`~@%iMW5_HhVaf{ zKhWX+Ti@y#%*gqt;E;puuN|>xf#4s!!GC87>X*jgubHB~tG$B}{3U@CUjT(W1GoV@ zhcEn{`eXlV8U1I|&vEv8!r#U8A7cmq!D|DjxPs*GFKC>X_~WZ*uY{ajIDG(Mwjj%b zzlAU)VqSxQK#m|S2D$H|vyg(47}r1Ay1clg{kj&SbVNxZ74rN?5FMRR=^Vis?%}}DcS6P`NBqOjOI$!^@Xb;G0~Or#oZNuyPxq1|GT=NX zFUNIxO7q7-$iWz!eK{bCWwYxF~$l7t$^nk&$1t zo7zfuY{1%Do{S=7(%|jbL%)6(3Eyzx!WGD!eOt^o+#M7n`Q~3ULf7c-&~xsPVd1Oo z-P~--{b9@wB#NrMZXu#=d@3ny&)1Ephb$*S>jCl{GKkB7SR;zPa{$3 zV_Cw$Lc}P0hbL3j%nGImwAE+krzJ;LU+Jf~i;}IRAF%(uaRcAbX~Jq&2t67BotGN% zCy%E6+)GiBhZ&@y2q;OS0i&hYYoEJ^iy~g#w~7|6Oo%uqE$-BJAjVOO8C^S+&+!fO zDcBzn>;`r$90{SQ*S|);6oUXC6#nBQ4pys<&l8~@+qhqyL1NZQ8R$i%FE>^pheUk` z{}I_qb`V*_GGjo?5XSG#vX3{cr7N>#Vf*zTwQrFhk*~e^q+o!VMDh6MvenfCQSE!>H97l4_nK$W zb@iLdZ8p@dF_XxPHg{eQgD!7>z!JawtttFu3)Bx8ka>-|RVX3CjN)JuN||Qw=a9eE z6Bj$nV8z?-R_%YPBU2e6=yLiAsXTIRi|SRzf91`00liyYDzr0D;W@`6H3ppzm`ge$ zeTYOyvMuS$xH#97Xq?A+T6MhEu#QS|`;&UlE2eK9Sb1p>?`7YviOU!9F&aNe8!#o= z4n9cRxRaCa`*^tjiy7}52I9dZxj{|cn6hG*`T~|)TYS16>E@rtbH6;h6~<>q3_>>&7uyf&ZsL? z%#$K0=$dHpJq?`{3S-;b>C5n9wq~UKMti!@0=%g>@q!?qD{6^Ao&tP(lO555S)9A< z$-cW|sy8LP>7ILopv9jQs%vbXYGS;t!3p&yav-@Hyt*M^^SKM(a+i{O^6+Yz_Sf7r z8MTYtMUPKJ>JB2BjdB!Hqk&>ATnZEPGJ0EavHd!}ECjU@#_kkwZi*$uP@pv=`8pa< z#UwF{sU7x?DvKy)2VT5#6z9_<-15EQMNS;afsAx{e`n~4zDPA{A=AM+#Gh=8+yYim zGve|AJ3S%=Y%G9Wxr9Qwh}3*|qa=3Si8n8FNqY zgvb!u)ZdA|HrZgZYTqoBsCl{N)+-q$HZ1niqgr0-7NfVLT_|$5_tFgsoV5gF{B(uG zM2y|4x;Nh+sI;PH+(|g^t2p6;C=3MAtYn-$zev2j_DhCqi>-sbz~Q!r}B1`BFRvbMHQT zLmW&9y{OkuyJdT1{#4OOF1|*EL)T%4`ZB+Y#OEJ`Uj|}5UzgV9jXe}RTP+(HhEP;a zlv@NSz6274r$gor8hf&bWP7mk$53|9Cz@~Y!n2{s`oqs_gI~oTidE&r^R#%DAP|MC zX%Z40%Dz)5Z;Dv;o^HkX7SJ|wCXJWj2IcZS^(;9&;Hqa=KJh8zCicxfh4jZ4ECytk zTzsMA2~9d`*K;o77P+~AwfCBFUsT(6zTtlI2x*k}7WLJ>Na<&{uEj+qo#$4eBv_@} zg4yspVDQ{Wu9a<)1-$scfY?)>7(~ol%5fveJHpevCZ}IBdM|!%@41t`?tPEe>s}D% zo)Wn*Dw{8|=BZwC6IvhMJu-_t7nwAwfn#XJHSnsXSuuOh;JSP0O%dRma0))-QW+A$`4 zp-HRzxfQ~-3UnXcmj$nVs}E(~b}yFl)(S(~p;!tgb!=t4`COZC70St`#wZr|Zh7Ze z=RFoyV-!F2NGjBga)O0uY1u_^p57WV>+1oh)y_L2m!n=PWHHo+AteucH;u>>Ly~5M zvt`~e6|`%Gbg!m9s>1oefj!bHs-@@>eeUEfgb04*wn~FM>0#cVk|dGVVd8xUW1Zsp zoa=9AzDT=yrc_nti;qC4&SVK54PQu=)h+aJ|{a9rf2KN$SpRbkbS(Gfv#=BBgIZQ5RBh4iLY!u>TrQU#O^$zW9X zlUc!}pB{I;im8L4GjSHpuHB>iGO?uj*4qs-R4!p^7G5xI! z;tHlx55R~b^q~!luXh~nK+3UqhoJ7f5Is#3sM+&W^<%~7+q(=&qI)19n+R_Ow3jDj@RkCvkRq0bqb*IUQuDewIxCW|Y{N}j%YM0dd(8u28< z*eq|jrwgZ`fSg0ym&HxDdv>cZrBXH`X5%z4^BG3BQN}8#%0|3S1ze~BVmkjf3J@Ol z#4z7xAgg-8?t!GmR{3m+%NS)N*Af%Do~Da4+Ta!JDH4sCxGsa~vR6CR_CYLph`>MA zU4P~~sW5Y-G4zrC(!~TETUtzCJ!{h;ZZu3?3=tb!=Y<0y5e)tqiITj~a0e8jqaS7R zMBn!nYo8aF3!Jh-Eg1-WOHA+cc)0ZOA#g&8k?uk^pUNAQV9^KMB=svboq8}<6K81` z6z5l{6iFTyy`eCvb;+4I>*3Cdg~n!_ALC_v;aAisNw)&xY~JVg_uFi5Ne)#kctw&I z1ct9K2q3GI##RRqoW>A7HE?;aK`-~xgdczfPP(u$73*|X?`^C=X4UcQ7YJ2aD|xSR}GdrqjP#wUACWS2!`>SnR|=g zb$Ph{)SQw0Q4m7+IZ2M5>YnG1_#krzZZHAw0=!Dla>9EkRl4d&4fwOm?qbHXNgE%p1{k#fMg zm3~>iS9FxwPfy1g7lxPIHwY0QV|7sHs>y4up|aiH2^*CP|IqhLmyW!1)m1y8;By!x zEdSxC>Q?DqZIE5Yp8DwA^v4!MM&g?T-XUxpMxG_>^uB~gs`%w@DUx5U*hI3&JgF-( z`CE&NLi`)VA2vO9XFqzi!8Mb8qkNLcVls8zWFmF<3V|;7lVfdpZi77)*YXZWvg(}; zIGMn5eR=+yARYK}rT($WrMoJtt?xM)-mwf}&YBJLZd)K*rznUtr`#8t=+y2qVnk&# zvia$;psY9M)gQK@p;V{-_hE&l^gs*22@K=N7!;VuHh?~bpS&5zF;PxkVDUoU z{x{1K7L?&>iiC=RXvZaG4SJYcs^~W9BA4Np!NFEv-KL*=kNL|;@#+hhoYnFDWD_W# zHI$^=Ug6+5UB(asqP%*AFZm~wFn}eL(^+3Dzv=syDe?oCdVZzulU?=FCuFEB#gR+} zOVd#sTUiG+{MV-jTBsRGJRt39s(a`VmCL=8g8sqDT!xeliDkT>M{g@dA5{IQvV5OO zHrHkkGp=)2F{ve7f?A%-3XgRI0kGi$Ih)L9a@O1y#RASu4>{e-+DaFmsZO*ML*q?? zJIA}Rt6At#l8oIdp~Q9D*-Fs?7KId-7-s2Fh7WabOjoy58^uTq4~V@!OGUA)#`mSmoVzkHM^4I}iI*D@zxj)ehBbSi(L( za`onXS9B>vjaAJOMxhc<-)nL3h@2tPY#9ym%`MR@T8Q{Pf#(%ibCmFr9f-!lDVr$UH&$_PzW{J(@OmgN3=< zLkHuYmTI=wQ+1dVYcp^@9+8G+WjLgXR&7s}N8;;O#h)lxG(3e4ppLc-d<%TvWr>#X zP1!qWWx?ZCeHKOc?@Cr?cZ?0DEt`f1RV%UPY!TNOTYgQbRIIDx39MVMu2UKIF7j5d zSW-b!c1sr7W-4ueVb`hYuR`JCtwd0m=1Aae74e97)Ur8rWN5GVAQ^{a29{{-{)A*V zO)x0DKK3|=!!+UCpKN?bW@k zpLQiZYC_*AKaxbM?Dc1P^Hq}LA<4(@b`%EQmh)#T9hDZFkcoE{n~49y^108KE@e^U zF2uOfrI+{1PKgs?*i^i&qVOg>j8^}r+tI4aY|-Py^-c85 z9OH~8_bj9O%vZ9q^Ntv7pUrYtWf20IikVPGdSbp;4m>wYm8{xF1J;9|B;w@2kD(Yu zIKO?G83X+9EFY2faEN4^>n>?9FOCb#+BbA{Jax%-MI*#I!ZJC+@4cMzX*FW{2Y2CP zPHeIiW|SA$P(>_d9HkCrFRwKZr23~0I5^JnRK`?K`bD!|A0`TKGcuEeb)!XA9WfkQ zs_-JcS2+nSdGUe$aLBbOFP)CKmmY0^EK7Ya7d@C|%bj0SqcS3$Mu2UnHqh>?Me80d zj;k&jBYAG{#lyjLI&*Gx5jA=idYHAgtk`ynp%(A8Fe**5`~&MPc{7Ju%^S2^!tYi~ z^><$!uG4_Mev$if5=ZY)f$)9xeMHtm0K^RC?sC3M|8}1EiE9`4b&oyY|=3eK9h6Z;(a{Lv1ZxT)*$A7mP0kx17k6) zTKB{1EilRi$rKhZFMvR0KuaF|ei3H*Ko_C*-RJLNw;qqw>n?w;&Kit)9G5j%f>F8u zbLqz|x(6SKqHbjBchj2KF1z(8kCx6sL0a?UsYA@MYEH2Ms{mvEYzp{9!ZdwlYy#`) zLS@pxrI>yTEyuXrDgX68#+vl^onb%Qd@GW_&Mx|0lk-!|e^Ijkb*go5+xCzk^JbVf zL0W44?)R1)6a6da!QcY8_}}unR!t3H4aV{hZ#JKhyUIyx$ww0fC)H{q!>04)6)h5#_Wm!;8yxIg+`q8>P__cf?oMq{1} zq0FkQ+;9Jg#C%lXP1G{Bd%yeB6R$}UE2}8I7hNZ;xgrlDVbZz_ybsstVI-_SB8Lor z&Uo3+ug!58=BMB6e_Hm=<)D)dgI#iA-jV}8=@37n^_<=^PUcZ7SU<;;t81e)VEx5V zuaC^0T_wF1k<}ldichG{Fef;$ea-D!)fpV#j(ZQCT~kxkSl)X;DO$ZMw)@)gUVNm{ zKmaJsT87I{2?9XLw=W7%s8;0tQ}`?@?r-D{Xc2szccdL5xO`RScMU>7PfP(KHH_2i zQ-tl3^Y%(pwJ_)RPyM}JzT6>c^YTj3!*?e$$WHbO|NT4)0fl%kunGIeVF!1RS%K+~ z>YUs7p_`$34Edb4(_$OT`&uD>BrxBufHJHhs-T0ttqk!n!hq@8k+=weo+tVVpyM-d;B*ild|$y27o$7W%z`QA!UZ&9L0tYRb&SFd;Mm5!sU^(=<3({gSJTdQtl6 zy!B%P1c%G%!-j8!sOVs_J+^A-=tGEz@}0e;)OMx*A7zN-?t;V1b zK2;#^Bai7P@{SaKB5P~ws?hFRUNoNH^ayGP*ylJXJ2kS+0KU}t4*6AHkoDA8{m+q7 z_A0bMtTwPeAqg^X518F&&AUb=5t*RA)+ymH&PWj*$a!rOag3Nbk) zB_=NJky&rHUi%ZKSI)~f^bHIY(?w9|Va6Sah-74BB!X`EizAgy(G;TOb0%#x(Y%sq z?Cr^Yi%RLF__jPTq>hl{yY^R#uFxKp!Cu#Ef4{|LzdghMn96YBh46tm`kh1X&iI}zSL|o&_3^wn}jrlrQqAQ$y+Y!V2U?V{z*~rL98gjEIiNoYx zwY7G#kmt4QJl4$#x?UmkgC!ObfgxQju;D~D0}{%cft*Usi+EkceCqMfTPpqK8GgrUpQ(Ik&@yV0E;5?9hbJ)z z8RNn+|HSR+FTZ(a#Tp0%n5`l)0oe{M*et|qq~dKy5=Tr@QpnWQL-6UM=kB`VrLsql zkm+HRR8%wTt-9U^1fat5eiYh(357yE);f8opElRn<5qO% zjSTCkQLM0sPaofQo?Tt!O=ou zNQ`iWFSKSSX5LT{0m=CuT%(t+#)K_yX}g(fAdygTFewZeXCQY_z*I_fFezS$g+@gM zrKP1o{A3qis}*T*)qHsWLOkp%=X)}*ZA9=doy4|4x1l~lsUlLdw3iDd$-FAW^ zs`s2;;d`UnrV;d8Oja&jS}|`xU{3FkA@xL|ua%(u(iZ8l!!Sa`n@VNqfhyIgh-PD+ zx+E1FmLY&>nFH#L#Cu5SY9T&}OpF6BvNAKDd+e;*6%ogBBmR_hjb3BS;OVltQjDB- z<%%B21;eiS#Ee6-HKBm7Uw9YdpY{4Kl9R+8V0Pz;4D)@< zN%Sr_3%gV&B1B0%C3us5)(h|k(L^;fMZ-Vt4PJB#NtWhSK@}mJOuI*nUfMw~<+--bnUcSV#6>2t$u~ix;Ok$MIXalo#$v-v@JO zN+UVRkei2AYlUV#EzMQp))<_|{=U2XcE06<&E^$x<85_n=$wD}UQS;v$2WDMoWtje zSTU|g)YF4b(VP1d4=~DpsJEZp$M{;7;`kj5oqsV3ExWP6|M9mPBEwS4A30EMS*Q+XYZ)vAKq6yY6cF zF}y38cebI|xD*;^O1+#C-uI;-oPFUd0dg95`IKVG#h#J{v3JhehOS3T4NXN#*E3W0 zh(FY}Qt%(9DsH%OZzD_;-4yLhBk;O{E?gf_m^kk`fWyxAf$n+-z{MUm9BbTAOxVGu znOmwbfPi+sv?@7CfCYU-%Kp5RAw9vT`dY=4erltEV#SQ(f|-feo#{=zdzsB<3#qxT z`9VvoH@I|z6oOc$7VttlY9(V{el{=^#C=x8`<5n?(u3avu{yNqIZO)D_I6lGXgx`B z;}YAq(L;NezS+Rcl#J-r!D)f^Ao*#uq=QWrH+iI1oaZYb@+^MO>a0g|2cn`z)$U{l z4>P;2P)rX9yGQo83SNY`a{(?VWK%2^sK8zwI9rRR+1XT-d$VoVmmx1b7=gs#hGmX_ ziE-1fw7b~)exXLy`a)V&`4p}SfqM5~Ooby|PsyUnl6lEp*CBg`H4-GDp@>Z3R*D(e zzQ8EnZN4KaLyLhz#oFF-6NS^D(bC`Fc(S4kdb8KH6CR5_4x8P*YEgy-j%*W$)=e7e ziZlqtmJ;HrVC&c`gnoscoXy2>MV-Q?#f0 ze^9wplUy`eewe8{7TRn@6;0qipI3_G$G9SzMJ^+h31bC%unaEK1%4hTzc(ZPe(Y!qX5Ne6X-cT(hcJzXqG>4CXZldCHuR~fpsU%`>C_m=R@VamvUd0e6RJzhBx|;5K z&u~uo({NEoIiH_p9iks&H=XzWBtDE}7E4A$JtQ9Mv1q~;MKalwVDq@a0v&wjmLvD5 zD_L#7TU>{0kK6*y*CrxZan+;q7?l_82vw14pvq=6pH$V}<0QR2>C|g9+$`7|Q|Fy+ zHz@kROK@YpcKO<^frL(9$@nT*=$nW~olVhBX0`Hr)>+TUUv1r>Ubq_ewdqpjzIWZ> zi{4dXk(;-^!TjZ%O>RpfX&}QrUl2Kw5M3l(kh>|Do-fY)@I62k@~E3Mc=pf~`tkF) z1KR$GoalNeQH<^P+{lb4DN`DCS44KnAg$wHg7@+gNJ!hYKWy?}0LTrSoU(E$w|7a(xw=N;4!DStL?w7+S>Oc(Xo2(!#D@iOlh_!idScdo?bSQI59s<`Vkod#c$LKY>wYO9?+u)G(b`A4KGB?Ly2D z^y%uvQ&0Q@rFF~@YYU1;w zK9NA4yi$O#MCK0qqpn;=3$%sTxH?@9qPvS3!MkOo(edenQH?zDehZ&-#JMD3cI9C> zWzj;l*euAyZ&6&Ua<)6bdDQgaSR!{5!X z*J#?%3F8iW0j+}cttLb}&8HcKXAx)HMX5wQh_EdyhJ z!~97DI=lDRM#D9Jh0hhiEQ>vqk6ukMgNYH zs9H>=uTo$!Y}X%Dt3iw!#e!j&EfAxNR`#jho7`G6lDt<>mrnY#!iQme!z<&Thh2gw z;fZOJ9P^cpt1nB(Hd8&}vBr9%7nP^?eV?%c?<)DJ zO3{mn;mw|F4g+}H56Q-4c<%YTRZJUYY71u$7H13lj;p-Wq=B(3rR7c*SyDaP7>#&U zq~A9ELSQsj7xE-6=AvzI1sxGFF*+6&*6?Pf#o!B*uGHK79pkmmie-krM5Am+lYOxbDXk{zh)MaK;u>ga zX{oqX_{5bnK{~~0F>o2=3$2F-dlYSpV>NgN?;~4s6*E{Z1{e+GI2tbCO*xl3t-KdLuWyIka(#_y{2G>tpoN6rG@3T8$MOtDvvl&tA1c-5DM#vAcTeELmt8J5k zaJvXkJIO-crIJ84hlp17}L>`5dXCwDP?7Uuq}5C(>`ICyV=WxviUyy>^s|4A08S`n>GP#E`T_xvc-YzBm$z=@nBL61cNLtPMCAjE`Z=M41NuhGK7UAsFw zI^vYEREsoFOiWDpZW9s_UG(-|?1s&>CuD&1jMrw$hlHkAR=d_oJOG1$4wgE?p9qQd zVLOQX3I~>fXoIZpZsxE&?n_a4!Bjjob+3{HIO!1~@#sa-vCTYJw6ZsLj}{K&%xyN& zA>9!7scTACx`j6-drF~e>lE9apkJepG%sO`;@egXEWGYT{A7k!ED3}g5q*znMqPEb zrluDq0zHY1<#ny|vP1&2_Sccpg$2tRLQ9Zb{+M`EJMy*E;Qgmpwgzx?B+X;>170yG zsNj))dw>J$(va+dLvz0G6;j09TNBaIw6vk`F z)C+ii3(|Y}oAD_ohamLCv>ku#X!l4a6JQz{o_m`^RaVdHB11z(-neai%9O_2-(Tx` zeDDRd<9;M#d=14r_U2b%EjM4&ro4o=%wBqzO+SyrYP{a)rqff#3>bkL-{CfG(irBC zGi1yDj;KmJe`r(ay9aw`nsrthivH)@ne~{dxRD*S>W*_0gKx+1@K`9+2lftHKylN^6QVDt7cDq z_T!d%<9a{mA@2O!P^{6Q#3E(v22D%%qs&GIAqOn23wNiB1Ih7rJm!zcL?+ckI)>a` zSeL~OX&(tIPE@LK84A_q9*#Bi-$Sk+xz}8^wToh0 z!?H>XyNns1_;d)sQG6KRzJ0qHmX(tuL&5F7X<5_sVhExb{=7d=S+YQbVoFO}d&UHm zig8VZQ!wfNE>+=xXG$d#w`VyO zvRY#`x2N+!lj7xpsY{$y@S~;z>~hCzWy)W^dL}gLqb&)L3UxUsHbo7u(@!_YwFuy= z*X*zvW2)3TEq*23)PE$)5wh0Ito@o=__p2GV0kjmE(&)5Q_aM^2SD4?eq9xvS7 zYW%6mPjhO$V`VhZK`h|lm`9BKP7j@f9$wwfU?=EHTh z$O0+9`EjvFI1~SqIkq3aT?Q%?{(aWlrN;8!ksGQ!LNO`*;A~Nk#ylhhAb>Ajm96%S?HRJ7BJ}lULU#_H_ zIWB}Q{+gFp!`mG|m*pk=cMCAJ=YP-%49UZj3Tyx*4`BgQX!*2p1M=2Kq=q5AIL(&@ zQ^F=k`qhy*f2PinBvhz#)mkqed(4*6W6tr(jxzI-=z3L>RIRP7iEBwd6Ly~-r$9T; z6Cvt@S{$}f6RLx^;zj$+_36NnVC-LeLocrSPIn&rjq9y4+>FbsGJV>5IFc~S*P=aq zxU3X#v=TE`OLpjkTKw7heDK4a;!N_xtDXZ@eEiA18yzQs;pNaAaCQ|LvT@Hyfr-_ zsGr_*K@bDRR45QyzeOX^-mi+0HXepuZZA}(tRF9`CP7CRrQ`^>ciF?dQW2vx_B!^P z#q#955I#|T)Pe6uW21KKom?+@*>8-CZO=u;xP56>KFF(}ZVPQTdy3p4<1^VZ;sN*Y z-c&?uLP3S&oA4T*j_eQ6Cwu1#T$2`8Kn#L^tKN~HJaa9_hDC*VnBF#cxDt)c{mBQ> zlnA+0qcn(<24xSK4qFvnT>+Qmk9ps`Th^UWmkS7P&zKO9%3+fl^k<1S{ZM zVJx80fB&%oAJB&|>{ny7V$~_{b6Cy4+SxfuK5WOr8MCI-^6-nnalH`5{qfGbkjZ#{ zuFyD1SA(V6?I4QvesnaKB_{Q}mpD7~)P}7Sy{YUwE)45KC1uG}hJ>?foFYivcYHwd zbJ17J^0|Ev%5QBTgoyI7rGz_vqi@D&?`wzDYhykjzR|G!bv7)z${%}MpN!Ytg=+Dm zy1=gIt;Zji8=oT5v=YuT++>17Q>z_vMn9F7KhfUA7Gid}k@jhO+mkPJ zV-*d#Zskg{)>yR1j)K_XD!r2RLBZP9S%G0szNi`p$Wxi7ukyGnh7kpzM%9K zV|5)&cVUrlCBN@#XorK?@(ylP)kjgMyLBJ;QK2F)4A!FO7}fv)K%_0i4XTR=eemic z5LQNbLs(BUy#2QEAySQ>EI!*;I^!=_Zmborl7t>k8oE3wxF{}hO@GrnLUQyQ6@wzl z#|7l-XeF=`nrnUM@AU9^48_ySP14G1Tid{|D^3a-0}jAWxTUv z;%p@b*(u`!*xMq`m0RlHKFdBd!`6OjBia81|fGy!_AKaJt(|&?9`Q7 z?@@q_3zxpgK_PEU@Ih)U*H^@;=CNiDK*HdCEBz&Bj2Z`{XYw#@+|L%?+hc+INz$jQf}0q4<#Af z;`VX#u@5QERQt2fZz&m?)Y)=2&|ao#Li!TjT-zcr1*s5&Xd4rFnrac$i9d$& z6-oM7kQTj9eF8V+@LrO|*1%M7;zM-4xw&+_{QOjObTDIqO!D%id!KM{Srg%6C?U>_ z>P%tGNDW_~H2H9N9r}tTkhGX z8hexA!?oF*H2tlm@k9=jD^80eG|R$A`)gMP-8gSMEtonslDNKfL;Hb%epL><#qdKy zf?2|uhVZ35Fi}o7?!1Ox1^Z~?CHSWG;lqcOpFiI&HylO2cAPios+5YMblF}>e?mU` z%9cvUbK^lrs^E1h$;e0fb=Za=krQ=r;C#ZUPPrU5X=PSX$of9UsvLTZzmdV+x2JIT z`ow*(BU$ms8$+Ck^~ggGF6*~V+Fs=$_k)sBH$VnxEp=+0g6HRdU}unBp)%)IGsX|zQk275nq z*xuH1f<=o;p_*@$@ZnWm&kcgsNYYDece9i3CNM`R2HwCpj zM(8h6#d^Y917Cj;W8Uocp3RyCnx7O&V^O9D~vE??GFJ?ZK|&&I`?QW4T~1V~|7#6cjKxEscd-A-(;uD^<`=ju;R9Yf$5CZw{M5 z>t()cqt7817_@5Q#G)uKr*{{SGwup+joF({)YXg) zE2A9((5yyYktB=!W4%Rxe*A*B$aoA1vEU zM&i8G3z&g%lS|bq?58pt)Bw9F6i;;mCMxxABcbj@v(&SzY-?+4u^H%9k1b>3`G!Cv z13{9vJYq5e-9vj}?V~OFu~ZKE=#Xx@QiGhUGeod5z?_YKh!hMDMzegK_>AMh?Jk7_z6!Cod;qpl)ze2;?f24bL1%h;^`zHz z92^|Gpe|4>nx09}Z3QA54Dm<(FdH`079aa=0rcxcPfM$M$w6*Ln!>vo2ygyOL(_jG zhsMsO>cE(QJy)fz&N=Sj*) zA|0JYR;ms8c320NAj^h7-!L~&6lVYZ;|EMWZp$HZ$);w`Q6C=!kDZwWI|JfULfJyZ z-UMCQlm}LG=}%GVv*|+d@$jgeQ3-l+0?6^cFN8Q6-Gba~eyFCFIE+G^recKz&)g-W zFQws20!#MI6vz3~=IyrjeE-0NPShznDy&(_YzSIeoiXEWlsJEjc-0s$%leLsX)kOF z)`WVFp61nUqWQQtL8N+f-+$RbmlCeWmI{mIpcqfP?jO#fdqMhh+q{G*Xrj3Q+J4rO z>FJS^_SU{&F2#yOqf@LOY)!T+vXvUk%09rKmX_6B7^!6NI@~egqr53myxJyg)P_Hd z)eL(&)F>(o9Rp}rvJL}sN#-$@Q2JT#BnT?m-I3F4S0kdQzcX%%VK6Et2 zS3dU1LeC^k4P0+hUen_Lrc^~!ce_|(db*4~%|6C$#dV7uS7=RDXD`85VUj^AnFvW- zf--q*e~Nzr=kXI25vO;gHQb$0F{H->+~Q8nX0D6;E8I%lZ^8;k_tvy(zfCwG2|T^V z3_}(!wU!}aWzN1Fic`#I{|$!27~^%Yp~@$xh*q$4P!$Z~?e z54R*D5ws=>%1S%w#s}=-&o&GmTHHW`+&r9#*J?+HY+~XPCd3YF0l?P@;b$5O%;LKb z`^uubNp(E=SuX%F`kF|(l)@v>%dsCQPH7AN@1E}`V_f$oo2kqeFTpT`nMw{&Cq&w^ zQ(<+L!V04z;lvCuv4HOGZo8-Yp!@1;h1dgJ%6Y)KAYL+sHK2Gc*VsTQVMihT9&B-; zC08T<)`PEz-v{J9vMY~!@7$?t)b^yaJZ0d*&1pLVQb7=a=uTXgm|?Ym^<0q?olDQT z#J(&)N$#arOupv1j4*Bd9fQ5$FabxHM>To}D@yF$v=)Sr_12A$0(+FCqbW_Pi6Ois zo;5s;5qzWu7-Y%$g78|aLB{o@CdkwAaZLEz#F;nhs@A7+Wj zQd4Hg+pC+LA~jp20T1TVXQc#r<(99pn@qk~vE;+F?$;3?5x!(7L<>Z`EL?W6r zIWq1-Z=VG$*}v`Gux^ctY~hAo$tK3w{Q4c0ZS~myMb}$LRn|cN?vPIDZV5?A36TydCFIcE-AZ@IUFW^O?;AJ&atzeNK6|e{*E8o6>k2z9zdq65 zLS&rXL3?2!t|N3m-C)=D$M(kB+#pZT;3nq#P(C@>&Kn+LXjfT24&eqjotyi;wj(Lx zt1a^>KC1}~TWqJ4#BlI?5?xdU#%bNf&K*P#(cn^?T>&NM#&Od;)_!CcZqU4~zxGyy zpR5nX<>ADJ?}gD2Bede=`E0>khuyD2Hk+pLZ5g6O|3mIoEWrkDnW|%IxX@%^{^+|a z*&EuJD->HSZ;!dICsR@$@7Qq{5QM2p+T45^Ioc=(1_xPqGzf3rRV=x|PQF$va(qzsj9Ie?D0b&-Q=KJN8D zdET^EQV^N_+{FtTZ0XTWraHU2>isGezPUDbnRBgVE0+Ln$(5B$^%Z^gBlzf`L0!{9 z6@afe;eEZhU)2{>DJ;JxlfSe0He=Tu!E5%(d@@hC%p*uK@nwX;S!5g(DT^x8lfv$7fPV#2*7^o^wRA{Qr+xW$lDvs_p z;>AV*T&d8jMK&=piHMEu$&(1KGU-D05&@Kx@A;Tq&0@3b4u|LQdoMOU08cqUg}ny^ zluL-+#qQMgdNg}NdU^!8i1+)>5HuIT4@ycgEUueSoI0B+bO{LwtFe5f`8w<1ogAMR z06O)64=WPkM5;0F@aK14M+C5tZqMiOW~bV$4_9yG`lR3fjtEd$g@rURFL}`c@=3dV zZ9gBvtW){M;KQF7&RaWfIlVorD9bNyKtav_0(1qvk-P>9@HDCsX#6ph3qlByK9+xD z3$I)sR@ENNyL}&O1XYfEeG*(uS=E&*IGa#Bd3~{rE_^6&2c~zqB#s=V=0k_xJlbX~ zd;1!{!pJ28sxOdlp{^7;8-b7u;3oT%-|fE=_0?Bm{;zWVE;H~&96KKP003k}(gZL} zt=Yxg_xLx8Zq@rw_ z#Vu6D6P1ybT?Y>!%~U-?0`hlv=7%GPhJ|kutU)^@*z7Fn83W)}K););t}X7gW;bum zycSLXom&0obPIzQ3-TxFa&VFz`bdwchtlmk#i^5UK^SBQe62rt(_?kIa*l0|U*zjp zq1IqEP#xT-3L|BxlyhR|E73y1&O{%Y^hWX-;FK_cj8{rrJWxL{(nm7y=Jr-$OtVOV zfYD!4O3Hb6l9ACngc8rR9Q0;3zt1mbj6YoN&ozDgmxQk*c7KfUlGi3ACgyolON%X9 zjbr!iowO0a`WGn#y-A z>!(`-`kVy$3X6C_EP{gi%3ApHN(R1r-y0{X&2~m|(+X@PJtJ`%o*wU1-lbMHJ75yN)*(#Gaii#TX36Z6-pjk@V>V__ZkKEK$`hW{IcxyGV!IPZbb2$EQ7;25Z zsu<|!j`C+nbi+dVM<}u4APEFdu+JDhk5iv+;s=@dWSP#SZHMITp=^J3{Tspd%gA>m z7YAj3m)^uAT!;ff#iScbKphe}LETIOnsK4{pa9X(k11kQHM*qq8iwhF(&A@jh;z3W zdutmT0LcSDLVbo_OhlhrTmm_VK|7d5D z`>m-#*mK5E%oIfIM-gy8_RZ-mWBN}IkU_LyH4|Uxa=Jn&0TxFMg3=|3o6y1r z9MpTDQeH={6b1C>J5ID8SB$~MxBJRqvRAA)(2yCF1Dz|!4bLIkQJ4sJGmlO}sDCR` z>65+0dg7M1?McADe?j{IxCdhknJTRB-Fob5*I(I(Ny290gBG) z`q(6eQ3Lr$$D`oo;S%#vD@dur8LvHp#y!BW#r=E5Li#3y zEm^EdXX}4LHiwope()vB<(s}@aX<5PT2D{USIJOxeS?8$#xT>Du;6ezo-baXrrR?_d|oRKb%M=^c4oGKZ~7?dcQjr9AU`Dm#fNDnvd`# zzAmLLAb;S=&w6o} z6!*P7FZcDD8x&)TBXPK^ZhXeWb6unkzkAm7^10w&x}`j2Es3yh1HD|2>O=xBjAKg{wNDQG zhuVFR6*`+;JDEvUW}{8^cYme=tJT(Byt_tO-6&GiZj|E9ttA=mqh5)-^yjCO(=yu* z%au@-jYH2SRYs`vBh>>R^-YCKh?4QmuZ9AgQEdatFO?U!wzvh>rW!9`#Xm06&BQd? zSA&)_TiuQiHm88s>N_&E_l5D_NZxsas{G8faES|P8)I?o5Py%!&2_TCsuzAZ)9tUb zmf8m6J_QZ#M_2>jGa$$b^WV2(#bllgqWhph{;azXPA@1iLT&2huE&RWg?o^h<)FAv z!vtn38=H6gEyq(pITSGVn3)o^zlz@0gD@W(gl(?$6YV%-db_jK>-H{EH@MGC4?c1y zesy|W6dDsSmCKB0EPt;sNj8d@{a3da_?p7wj;8I zcF%K8y{uWxePq4uY_aeW#t&lI@sETE0n*x==AL#KGV$*4WaJ@d4{e3capQN8`>uD? z)k;WVtpQGecm?=d71g^jR699|;vYML3SzwXeda(Ug9$$t?%$cs{y7uPnyxX=^q57D zfSi`8tA~D+FN-&rNKd}{O^XLf=@lDRgTz7o17~+m!KQOceOw%_N<*y^_M3L+mpYQ(wIe^5XLDCrN{rV+T3NRc>qSoz+SAWivg%zuKaxXdKsgC7Ra%s z9!wF*iN@EV%uLD#8~=?Or(mzEz~i$z*bc(N_V6vA=gwt{RkQA-tMvq)T*8Wq$!YLj z50rg(E)h$mh7CeE7c8i~7qVVeHMdYPiK$ZN$xvKRTF2jE7*Eafs=2LOw6U)up@5yiS^L;Lca3vw@VhL*au7}mxe4n zJ{$SO#4zS$L95u5&rfC|_j~nx&As?$baMUZ_KBb;VwDXQO$o-e4+dp^xTZ=GTpIM~TpD=hethzvUE?E3EfMT#5+O+JGLKjm zlmipqFRVq+UY7PiZpI;t1_>=;N*kM2ZUS0fAN*5a*1#EQeX*J2>ZS{;p#o23V*N>nu3ql6UL%zQGq*oh z_20yh%Iwejztr92TK;U(w%I=%GZrb(xss{PL%{+orh)+pc|r8@Z>+AzR?(nIqnU|l z4hMZ+(d10~g^}#9+NA{dE3pfh&$=e)@ti>m&pE)4U9L~_lDpuLN@O?nR`vE zVMLf*_3_Tf6o36|>OMk3WOzw4)atJa1ig>QE4xn$Xp)#2Ue#>f2;hz)t z=DTnC6pL+YvAPOEF1?~T<@|piY#~fKM7Wtr@_*q{OUOu~+XM6|2P0g2D@ojhny$P= zgI)zhgKVhcVPdL-L9#_D(D*?IyVf>xaA(~Z@zapEWD3E|u#rEi%@oSm!j4qpXE=JI z*NBZ2LFpm`iL6+XT5r?{e4H?A)~Pq$Min0tom{xdANY_mPPvyZrHo1BB~s-Y-M|YR zCe}PC4?j(PY&A2})aYI@|7rJ3<(EidVX72kx(@eT*(y9#6Sp4>h_w}g|qMq zUs&Zj%**q-+$NbO@3Oz#>GIr9DIvRS>!dkT7ylm*v48y+qQxdzOtg|KO4IUdEFSYR z!MlV-qeo{6?ZS;`*Hd0&bqynA;@!nayMk_LH5HruS$pravP-U?Vn40E?*~a73STM) zEYM)}VF)=eE+adzAVyet{+xuWUKXlprA4hBsNx2JrmpupZ9Tbor{VXr@sQwjk*6u*alW=y&dCgwW(+=#nn|3uVtx1k#LPZ5f( z{%hN%23XAGTj0u4&#D;I1`uJ+#_TT<8nGOuI%-YqZ3zpy$)?5wQp&v*BO4YNwxH#h z+kCxmB5N8oK;@!u3QdvR5}oQ>4QAB+@1>Nz@u2bzLoEXRO!7jWT7z#G0R& z&K|qkKKmtnT&w!yeyfsWMmX(7)5!omNB#^rXe-z>DCQ2C$KC;*3yf;&{G#d;#dH26 zPsu`HT>qs%rXZEgj(WxjFXeNIA#X~TGhwjk*u_HX{sp6A zn&=dsy^(&Xeg$wSw9|+rEIoKHk%Xw!P-D7*0$15i;TsG3;+N&(hmpb&HTH?oP>=GEuD7})onnOrLubB>aU8I^TQ zRS4ZL5~-F9UW@LCjX#UW)lw}>oBd3Ly|=aQ;;s-svAgiV|NTg=~UB|vAcf0o|_K&=KMTkm!IiFXzpxRz>DtyeP3Yb}>TPfMC{l?=Ttj_YC zBv!6PXJ93ivixkw{>I0@m?h_**2}2D=X#=bZtN-h0veD0ZZ?)Q?;hnI58r(yXHr@I zTG!?ugdd`?G5oFNvi<%uu>0)=)WIB6^kDF6;hFa;RI59z8I=fx&;eUbhP(ZzSUz53 z2Z4V5Ufm`zIDp1@} zSQ|>;jB?*dwxOYFUG3?9HT~-ge$p?M&`^i2&pxT%t0zUmkjo?7wTvL|aA+$o?R zkV{XM{{_o30;z_cjwb)<7AZfKfHVUiCMpY{h52usFD1#A`mZbujq89h8X(f&Lt=x4 zFD$xcC=Y(~f-3VO?2^F<#{o`8$yP`d*ZS&5+1}>wQM#Ui{x{;8&y2O_sL_Ss;)L-p zQEUA|6Jf$BGA-~x9A7IXSgYvOg;;7?k_B4!m8n!fYD4Uof>>}Y#mWl#*xkojr+eyp zGx-_u%eFjIt?29%0^Pu9OfOvMzqrjnIyiqK+ba}UP=Kw!6fqT)YlG@!C2*e5t#-O5 z+UW#S+Cq}&aZjf!zrmJG07d6Xl{Is&RG$A%O$I3JWrO~p)56pOmbxUb?_x;X=-~Vi z!Q?o34VVo@LJe8Y$V?+B@p7aO-bK%L_%}vq{AT?LE3uOe`BwqV9+Gv^=r+@szu8-F zLKs0L(`X6if9fIFbCHtze?J!{=&Ni)_cAhNy%@npWVNU7uIaO(lfEdBdT&7`pE*@r zkn9A|fO6*lJfL6oanm1E20)BxYKQ3ij-dU;FZcDcFBH)@WhE@aKNSqns6xTrAgBqf z5dg*`7z2=D$TIPONe00S7mj9()+?2_a2xDdz>*ko`(!JA$QGC3j1z&=kc>BIyD)(w zG1BouhpdHvek(;rKoWfBv~pK}EYO%}ZDdr3_~EQVgZ+A)q2c_(GAxYShJ1A4N-ws}4DF2>YQe=Jx0&xGuBh_@`)WEOR z>Y9N2{k0eZ<2XnE6FvS{@hfiJzy~TeKTxp6+nTGrE*}zrciz_BZBuOG`PnREuajL( zlv7mi;a*5Nfd`+v9*bl6@Vx|?;kNVYjknWz=?!|)r=SYTa~7RccDU3J_6mwInT*yo zY`9gTuzJo3bB+2Xfd^Ufk_PH<@`|exGBH7cz*HwU_J=xW-hkqf8|CD+yD+b@1Q)oG zA>;!|;>-V4EG)?XU5E2)1M!p^+JCNBQP0}O_)V7Ln%qMhQ%k4l=>1=iWhm!V>rl*5 zT1mYP9cgOw&Aa_M`s%rTB(E8O^9RutF+ghh>#5O$d{m9Vku+f32MbOTjjT}LDj%Of zMzu;uQxhLYdOt9b0aQuKzbH|Gj|4JEzv0KDk(kGYz9uIu<0U-|4_dsQ+jMYwJ?G+a z#J%G*+a`2zHQcrs?y*eV@ zI`&9&!{NIK$ie$|*`(nR|6{Y8n^ES61RuMS`&Z*Xh4htlA!4~k!h}TAepF&UH@5{F z`FN+UZ&O(wuij7-vk~^4R{K#%1Vgwtce*qjB!+*lZ0)RCq~&kf{`sz0B9Sq`T8r7P;HN2?1c`omR1t;CWRDw zp)pze&RU}9o(A?VcQmu2-<2m`zD zbmcqd(AmSKmc+!wPwVTZSpu%e7ke`^ODy@6u5o>gPzETAg}ziiWsg|L^_4|)BNB7us^o@CI^^OLf^Xiu@GX0e@eDyLC7IWbtM;{ge=28Lk0!`VV0j)E~X{l^dX!;U9^#!&?Yh4&o9 zn-oPOHa|YI8z&2vGdqq5hNc|@`9vdet@;C)c}nMs`tiITSt^p6yKC5h>v$SmI;dJj zZAFk``I)N&={LQ}BR@}=8yPo3b`GE8oO?M*-LO#*;czEjM2@;flZq$EL_lQri=HxH z9i)JxnH2M>C6v zNM7>^0cAbYvE4bldbXw)Z#4;Q49&($`=k>#3XeN}g=E)1=MU8qw4x(O0HvG-OS?f! zm_tOH%@Dt_3Mx^_{j=GZT>btx{68Fw=g+&tFiA)2ZFPhi%N}-#&Bp}z@+l{pDQm-t znFj?!_meZ7IA4-PW)jLLS zQC0p%e^WgC)aY0>bbvONc0KWd1D{BrRZ+r%TRROukJ1&*oez`{FuOs#W@*U6)a~gOs(zY2@G+dmA z4-_SeT_xy>Ga{TAWb~wynJnL_U6N!i3&Mqx^xBFDAru&Vl{m_X8Y39cVVh4ft)MKJ z=VOGH$#g%bQT+huUXA#;ib(*IW^h)8-N<|zl;8=_CZ!Vuf=%~kUQ<^|*ly;|$0KZH z`BX@hm+oPwj4qp|)85R@UU*`h)&Fd43-oU6c&~%chUsJ6(Tf#DcTMp0t*|dO~SuH%Jtt{_hfE|JNlL$<0(-g3TLG zA~nE;{Y9(Y!@DIM&4t0mu)u3VsF)u%%AZRYX*}&cI}twwc+&dslu(zdy_1?Id~#YS ze7-)_*OhSXPn#v=-F+7Y2TXsIVs8+o!4FZe*9Y|l>;K7s+FU*_4_Fme7=*)hf(PK5 zUfcqY=Dj7n;g^!-FB<20%Gz7Qsa6dI19fULnP%5AU+WeoQ zT2|)2)`pdbmhrV&(|vMg=*#o0umofsCU~}ffoMEixo`Shx*8af54N$6=~#{{;yp}& z;m)rB!Ij!%5c~V#G%fqtPi;=G;JnFS^``PB2Ns}mc!qN6xj0Y8)K=)kQsU9N#6Lz4 z2FwT)X(YaL0P+5cgZ5iwcUUA(6>bU(I68~E*3J1=vmcN$u%+H2v(%tpou@iO1(@^>qcS8yHY)cCBAP`&IEgl z_3%g-Kz28>JIJOH-}!GPKZDJqD}8Cc5tvo(7ZA>0&We~hr^QQ}l`r6v?BqOQl<;XE zWSnA@#?LB-Ln_o9%iVD=vhMs+@vXIJ*Jv?f9KNU1nqQelh~b;*H<8)QptK`S#^shP zOgL0hjkwGI;zaaRF>u)$7vA!cWs7TmVwzTvD2}V`MX%Xz^h2nQS7fBE^nypS4zIvP zw;x5g_}TH?v150HfSD0wKp2;3|Fg#^n}Cb0%=|u^gX$M>A1=oLogC8Hrh;pMN!Pp! z&5CjeTz|$r+bP|ftu@E}R!TW=zhhZwXBd>!Y8Nhf{^Et){zW`hgrIg@YpZw1u$Zz0iVBP)byZC%Ui1HO2&Rc@DVzO)W zzNbN*`=N!`#BWg^u|(n4_(+QdRdf~!RH$pVzDHdU7E;K_o21?}Ze~A)->3L(9|0j~ zW-t<{)30E48ctP@7L7ms8K@!%bVMjBt>e2>EP8{Ai=n}Il^)9B3I=NeX70bDO z6e%C^Ss$EvqmU+@qmLW-ar>)BbqhP6&Q%YD0u8Aah0 zC&$eZKX;W|+=blEcKDmC_Q{sMgjbAh98vw8X>+$1vcJ>eKl%O522qdCU1Yg4tCtlk ztf5EB{TO2_`IkM8l9t}TQJshHl0z z@|J(Z>5OroXkVI@!mSE@c4V!n56Y6T540z>rEJ@a5&8_h2K`JXnyn;42pg1r6z1x9Z%CeUf*zY5%MnEA=^&GXIHpcg-^e6RHNECUBjPex;mW= zU@!j@{P518U3@K65GQEbu<-Vg-5g)#Ji!LmJWU@@4CNR)e2_vhQ)q+-6RT znc5aP7fzq5G>@jjdKQPXk*>Xl7UaTU@dlyQ524&XD5yeTdQcpmmkyhhG#VfzJy8Y* zR2dnWzhea`Al+%GC}v7^E7~EpgbYJ4@!6H2AP!$9)h8R&Z+K?YX)K4n-$a$7Du!db zu*E!U>`wmwkhCejO#9>CroSQghRL+iPa@-}VbL-*zIH2eqwk^5Z*GoL3=&+AJ#GY= z=8CLt=2iYQ{11?h^!y(nEnk(}()9 zTW0i{cSK)Sht;ArKbzBmQN%8qr;krTVPSXH7(ft!trrpn+KgR{J$|6)kBc4o+QJIQ zHl)Ev*356I0GJH{+lk!vuBO{2q`&WxuO;F%)9U zrdTAQ>6Zt8$ddwDVf#p-3@q4c;NinjE54aGt{I;vRQ<{?4ns(B#zQ;ZP4ttR72mO| z=gswkFhw**t&`o99|L2*77OW$E0ZMYxvCbsh*=qjH5;BWJdb6P0-0%w%)vs02+gN? z+|}CIBuY@lMi^X553-9ZfuJW2QLl#)2YU(^?LkyCixKIcTp~bh+Lmg%oo?R(wmjWx zD-0{S7mOq>OYNh!T7BV1*WLWifg?OX(YYH^5Fg7Q}6-9ttes9p*2dyW}ihE9G*F%d!Tu$N-$}2SfI3P5gtfl?muz$>Le~t@HlLox| zC-46z`C@_ToAvF?!RafiFDn(EmYX?GC?$L^N06&h0lK1W=qf;sJaXWT$JPKlklb|N zkoH*w$JJXD{};J|s6E2gD5=ypxn5eAp&)dRA2fZeKnFPyL@rm)$d5IYs&mctHbN5#YdzLuB&;_nu1`9l$453L_n{Zoh3F&cWUl`#95OP}dt-WaOw%0eaC;CwT={7Q#o<-~kaPg7>Dq;9Y6 z!NTR?(3@L_hGylW;IGlH+*Nmx)7!!qhp#?}PR<);pKwJqjk_yp$#ApS{=sM$**P$^5iIqJ_cvxIphAR<12teKx(1vLpH(?IfQt^{JmG{a+E!^pd>&vY?7< zc2k12{%h7~TNsnV507(3`2CE3^P2>?DWt#HJ7L+nLh!UiXZwQh)iO|7#+#0^n5g-3 zuD!s$zSLUaL(;2{;7R<%4)=v4cqlt>$xikUyo2m}pnUS&&aiX>vdBztHsO3EBqXbe z;@Byd^}q}Jg@&&{o{z1jRG$Elp8hArvlL6(X8s+Ap?eRqxHc_17F^juxy{dwdOSvI zG5iIV_RIGKaBi9TtBW}M0VhnBJk}0fo7v1OR=d;e&Zkc~e8x7H<2$H(POgGAy^`tkiBt<~tA+x4}K2LN)3m3d{nb-*y)o+2hDXqJ$2c5P$8qgB$xo<0`(pp%#8!YZ%VnM zWCo26GQeuV0sJb!--1Wsca8(52=pU|KKs?ZPIKmICP0Qb+v34~*nZ~_;rhU4)Ecq( zql5hEZsqA1%p5k(|4m@q1GyWaj-fLWEY$=gO;WP5Viw57{{aF-=Owop2on=JP)kZY zJw6=7MV(C^zR`oAO{R-Gy9>ID>0Rau;=FT|O;-Diutr9_KzdVBzTBy?p{&V3yjX{) zuWa<8-FtP*HZj2EM_bvCnw-3lOKkF*sDHDW!HLbTZ8tUJRzrdPX=NK(a*$(eQnD7e>RQE>=kT}d@>^V zL|f+vqH0B8x%!#zti6nftUpw36T)@Jd%0e#eiJj|z%Sfk^^PE#*WzST~aM0pvT4st|m5$#co!-(=egO3S6E z_2_GV zlXOC4S2pRpJ3jai^JOm+uw_f{(jfLLL=4U8KgyJDl-hs0sqT!gq1sj~x231ug9(*4 zTA1ku3Cc7saCmONhe@4Lut%dQnQz_D*&ew_uonV)URcFG94u@QT)zXrH_}?wzc2sT z3QeNHYK%(wzUoAn0@8{6ocLLFi3PCGcmmyRVp>`_3AcqVY7MDFlSCAAVhA=VpB*u5 zUWFk+C;bK6h^p=WbO8HqM)Yo9twXM~k;OdSd-7SY+^KxxM z6VldNVS_Fxw4n*_eIktiz=b54(r1QiHvOh$)dTY9-g^>!ee>`%Lzl=`Q^2QCsKd<2 zhz4wb$pfUH5i^c0GNc!QeJBDpfl8DkuLdl!a#n(=N+A&OtplG`nj&>5rQ+MSpA`vY za0dvFT8>1+J)v3N8ch-(o*R9&qj7Ow-{ehs|1@jF!0$S{X!X9J!AjJE#Fu)PznXg9 zri_CTN6vDI{i-@cy=N=sa=%&;QSW{#8E5`@S3j0o%v{W6_(0s*UoZ-TOU*DHu49$| zI-H)8vP>YzbfIg2K2O`;EO$PPdm-j&!-{oGN}IV`nOR9KVm-3_L|FA-CN^251--MH~t_hr@6q9kiRgCKMy~ zPD&}r4)_$UW~(4D8Frdd)Z|Y$+g2b(e}QA>OK7l64(rNa_`NTyFk~4J622hB-?8|j zcpV~}zfAT4XX(tr6UB;}A)$Y5k}31O)7nnFIm3NYXU(smb`wr3PV4&TeCrXLxDwl2 zTfx^x6WHn<<IEbSdq5G1P^`C*Xtp471UhqI$!zIU{(&0RaJZ)z2$Z+8PpcmodE72-*ltngiJi6E z-$cw>OIS;g0nS)q+RVSjF>Q=#4u9%Q%rQYn5g8B;~ zhvnOO-h@KVn=zmnGw`%}^jqrCDx%ti+D)4jnuC-TDYc*WJZTSCu!$A~?;ummztWo+H!=P5qf#F0$7Rlbk*u})d;>sX-{7qPBm!@{2cCT6s0UFx*%^& zT4=X`8I`J>4AvY_6+@`Gfde6z>rI5!&glTVoV$BN%<}c96jo_&Ad~B^c}`jPZ#$Yi zMtG{E^+XbnHPf(-VYunZK&_8|#46Fg#2dVLnhj%enGud2K+FIcSr29avz6bV?i+Gl z-HUNew!h@0`%+9I63HAygkPl$;{??y-YEOkYM`bo-PsM&WNf2bm z&1ESAJzBy!p$jBoi@-QWA($f5H46afPRq0T!u z0xBx1LQ1>2nj(DvP9e*q!nC%y%k4k6Z+__^N^5p)QfyOu4Z+JeX4KxU?Yeg-4K=}0 z<18F;dfuT3v@37cA=6mw1io)55%w*LF1CiS- zDfti8Q}8V|Hp47c6Z|#fq)>~~FR>u4|1gg)zb_Sc4sM81+dpx+E`f>E*zr$~Ey$Qx zUyDN5qE5giF_7nJD3N+yoYs_K&hLjZqyFtVOXzTWdD@ub1!a5S=Z~T9 z4A*`Q=Vp;2W{+uN$ZFcYa^i^aKSe|DjV*qcH(?`F1R!2|;vfla!y}P!d4705a>8)$ z^L0^*t{vqgX>9%jb3%uSu!2ich3o>>KCeWoTS*FFl#bf(UYMd49LFuWj)+7=N4u6l zn(U29CZ*;+Zmu_Id?EV*fdeMgWQORDTi(L`M?|Wie0B85qqr*BHk|oyB}~qA!`K5Y zRTu)6PvYCv_1HO$QH&hLn5Oe$G29DurGltGO})IKHvDHWhN=vP0u8g-H~B9iK)&#v;8 z@vvtH+7L6AB(&5Zxjm^)Fu_lj*Hbm?0q$rFWi67LbnWA}g^M1W=&YwRGP74kTK4Vp zW`GC56xsQ_r5JdoyHHCzKa|5GQc&4CG?CEQ^YU5gdHuF_{_K|n_1elU zof#X+`>U}~tQ$4&UXaISS?>VBCIjl|XV@Uy{ic^J*=V~|Xg{I^bMA~o4`7&Y_sfRUUWm)R1aXL5jNv?7%c9JEC% zP6*EPsltF;0y3_Ngj(mHde%1)B4&xPL(dV|NDyt5KVO81;z?UF^$YR=yfkS~SAs5tQX zK&dEvJ(NU`|IwLV}2hkQKJrc57`zswC`!)Q}_3!`9Ru|aqTrN2Ff<(J5N5{4c zLWlMn3ppj@rRlyr^8R9aqNQcrpJe=iwtTB1p{*FG%%m)_O*3j2hWJW}(z5HLnPa*` z{pVF?Im*?f%!JF6x7)&FUDMP@KWL$Mw$;1KNN)bb5@}+AlTUlLY(VIJ7 zB1NgAw_Tct7#ip$fJVAMINPA!CKULQR|nWTy5%VRXs|+zetu-k33&8k>=*p_mlA0X z+@TmojbiaE96ITV?nu0%V_b8P6cLR>Eej3K?139&Z#&1G{$U(HV;n$cZEZdK)2+Nh zY7>~^#9-%7EP}v?>;2jhC(y?A-2%QdjL66n?rPbGJ;(1-Px$y3f(t)r{6g^^bd0=c zflZ92XAyW4fh|sd?g)%Ay*xj@#RCOk6IK9hyxFU>EJ092d65W^^P`Q*_uSyF(u}%0 zgt^ypt73g)^XYoNS@A5}U z033`UuRk0KgYZ3YQDVc1fPJ>eX>YkrNYLZ(`5K8mi2Q$!96TuGxGW4yrq7|jTy!1j zyA9D{rlRU(fxdk05Cc;GGmvya)F!xNpY0=W4eG2Q186G7Z}Grn+XXh+PKFj0dk_ya zGlMiNef>D_f^@dsc`lq5aa0+Tz+9 zd{yG+0G4X~%60uetCipz-vqzG2^!QK`LIGAaAdP-m+M%cQ`NqK?Vat0H3uxPg;hB6 zo^p>5SH`e}3hzFL<}E_6cb+Y`mj}#k2BuzXbG4R5v$)p30^yqe6=!b#9`ac6^C&{2 zN+pVVlL_!t^&owc?jtzl!>E=LbWBEV2B0mR0>_|bk_HYeo{n%M5{?(_vhKLSNo z+b`!_Dz^RMTYz{um;ZGbw0||l?QGS{wKQrscR~KK#Vf|)&%mdoK<@wM5%z6WxTY*V z56{tL{}wti2C^VYt|bn=(c>R&y<*<(bA#S}Ky!ICq;gAR%*dx<^20i$5;nu4+zQ?s zx+$PAjt#U(U51PiF=8v;;khsIqpE3hW(b=BlcfwW;cbo3Nn8$ckECCPM(#;!0zVrr z2pIW%o)66T#!aA!G$*N!QDtGoC+Aqx!n=cZc6M$$N`9oJp?MRZsteDU${R&?wNLAa7R`aeO$-`N!6r>$HPghF<_r&krSmzP)-)42X^2jB1T@E4AZ{>U zA@@C&(CawP=bQ_NjW=Csi0%g2r`JV+n8?N&jP~{6KdL)}W>^r7K68u`uFaD?gm~cg z#%%#{4w{>I)~mjGSf!TWMiR4m<;c-Ua3@_#xnS`wA@rRXp=64d(zLLdkjG)l09I(@ zUdPj8a*s4+EC^E>*aH^&oz_)pZerU9Mi+mbj&PR3JM55;(i9g%9GM;{8Sk5BU)KcCX(!`JRbfVF$ zXt3CbsmmNwuMx`k9unImy8P%WpjCNxRmX6!o#XCm25hAR;1ESR1S$vO@MF?H`aBr{ zo)}4D#F{_L2Qb{^zRO%5DJIr_unw)%T9nbgbbZqpjy-P9k+hvk#z6MkmjGV>yzZOS zxeR*uIL$4UbOv*>t`W>X-cK7ZsX7EQo2c~jSrswgg6I^^>i8j}$g4V9c|haj=xv(* z5Q@8K!T`6_qZi=dM%4$kiK!IV>NfqU~s?2TZ9 zblKIp{^orFwFNpMITds~z?;lkEK0=8DDb!HzoTPO6{@Cp0a35g0mvTK!lby#xDmPnHZKC$1&W<=-&`)7^fb+|)a|t$$I3J4 z3YKTxiCrn^#LF_?Ggv2utf;?V9QE(;I1Jz+d(&)Fe11PY?F z%tcd{c_gdlv|fp_FHdxH{R3gtQkx!2&sv3^CRZwK;Sgt1P*fRFQujcPDzgHO6p7boQnY7NkC^=$|#$-g>* z>Bq8!y~e-aSZOD`bM7HH^(DYz>`!^T-D8KvB7?v*kn7-Pd%WN3iUvkj&8-JhFApr( zlX5gv=CLFb+SCLolc|{Qgz`t6LO$-tb000~41bJDV9Odm`i2Mlz zv!l0tMPB_HnTEpKsjso5QMsH`!ce(1MVFkqFi-cTaXkNyew8|HdQoJiU&A151=7*N ze!u_q_;9ynaw(aIcJxJv{w+q0&tY@#I(9$UT{OtkGg4A$Mx=p2ANSbqzjw&EtxDm+ zpH96!*V<^;{FEd8G47{3e~{2(1h_Q1r{9&=E&qIcC+seq-eqxb0?8dwX5bLWx|>V_ zIUNNC1wIh=C%c5U2u zJ(8Zo|K9T^{wR)2XcI0S<;5%U4*v(3FaTO#0%)bAL<8kqQP!m#3V$mR^;99!&)}$2 z6-XXYq6wDIKM)CX<@iRzVqS{Sb%I!Ww8C(u|r~ zTWBJR8hJ!A98A3N+pP=!%77IXQ*hei;l$FKZ&0v#?(XWhj9D23aT8N;(J=EfN*r3- zB8^zz6gd_0m0Hz!LN^qymeGnyOd!J8=ak`53pctHw1MXnl7N3nr8<=)GtnGguKhj9 zr?a2F`c*aHVzz0cO|dfiq}WPItIkrb~e|#OAUnXcI_&?ga3a_a8ZcPkQ zN{2`{2qGaJk^)Ky2s%=NG$Jv83P^`Y4IxMfD2hd65RNF)DWP;DC|wRA%H0QjzwiCx z54d-&yVfig%sQNNezo)2&qfTNJ6Ah_41aQKb?UIpmAe+_GqBiTI4U;1PmB;>2O%Z^ zJ{WrQS6316pT|oU|Rbv~`64 zTjSZ1UnJWjufK4iMqkhjEH|XjQ{EI{*&aK2J0{Ak*_E&lN3whiHGLEFcz21tkCr=O zH)LVp`=uts=dUe#(@#%M7W0Pm>Tce{~RX$--TjMkOcs zT_J5}k+ajK%{hVF98J#o?o||`XlUS%w*-$BZ`V*Y920D%1c%D<0zZh=W&GNq;+Xy+ zL7s)R5S<^}H`6->m8cSNk*X<-W4N%|@gK8Nr|vl%Bkwd*Wl+z7eq)rUlOVWVX@qsV zZj`#|#*Z2xva2z9t%p_44BuUjnw)%ZtoC_RKacCjPLZS<{wtMTjrmv=m((F@)&Bl~ z&5YQELKj5p#oo3!XUP4!SqOy&v&}v@eoa>s&0s_q5#?HUi*^gboD$G=Y`f^!co=mH zf2@dEfp`?I_;4@6)8X}U7;<(akrBq)(jraa65$g_))i9|v9mZ(E=WN^0d6htofl+L z!X4Z+M(Db2lG=6YYfHnYFOR%*P20#PpTG||^2ACcXG49#x8^~%=YsS!wxZkBvMq~; zo(Hg@Ju>+moLUAy|~_5bH#Al%w89OxnHBId zM%BQ?kVZ#GlYz+<6n>G3v{nC}f~jA_=K?hGgc|+RCr`+r!$9hLRfqNrEwz~ZPIa^xz$_F6esRN3ycNfW<1w*A9>=D}9Ikhlo0ALKuBu5i`-s6U~8 zZ{_Bv_-+vp{!%b0jP;K=%EqhTs_PDVP8Um^BEY*!)2ITYx@Q9KMHE9l%2U3Q_4ds2?$|+eU=Wcp4oh`-P^i1JYn_`uGsCx$K7>c%c%Kb0} z=5&3E#1n1ZL8I*v5xD~@K7*6ClV;_m0rY+?1RRmW=Wgit@`w5HElQ-WGOHyqk0F-(GBK$t|4V91-F_@S~378Y3yZnZf1n>*eML$9gNq>1^QJX6}pm#abK zN6Ug2FN8rBXGJ``Q{J-3E!8D5yNa4 zJt428nhz`AkKj3C?EPUpOZuukp?bNDXQ=8cNhZbRx!-^;NYm3aXAsk;MVAK9MQKxZ z2|OrA9XL?+`cA(;7_oFuB7?0^=QImeZWY^}@<<)jblGxjM zrQkv@Pq1I4b}kP;U${ot`#JP>KcHmmdxV-*NHvy7Fx6UuL0Y4cx=d06+`;Al#e-qV+F9D=A@T_-BL#^h}O5y zNYD$7Su71*6`CFd99aLtsrb+!OgC(|@o%5yu3rYMW=r&L5+S z#}^K9klhYF{}j7^rh+15M4QLQ;c~fPyJ8hf?SYO1Z*D%{a1!<#y-k_;a)%}5A%|2S zv+@jTEy$l!D|)D%)I3I&t6`k8A*@I%$kv*ERH;)3EUU#Pu&mZIER?1KXCbZ(@6dT$ z9++RU^pG50|uM#+$+|E>|PCr(V4!vE2QNwY; zL!c(bvvi=x0RZd>I|Wn|?~lF(Xz-n;^b-;C52^PTu6x+{X_S8u8BbCp zt8wEO0%q6fo*1P7N}VZbm$=PLG_ zeQ};!d156DYs28(qfospUK!to3lL5U%BbO%dG=m(75mXhW%AMz|x6TFUQXfl%2yym|mG z;h}I`to)nccfbxG=>JWcl@UEG@)*1QJ-LZ_w4{4yQXsY3OzYgAa!+!j9=9qfQY`}rww_yPzjM|UM8Mt(>&)&OM60q$>MvXpwngTR) z6a-P3iAkyIAPD^hZ8tdxpw3d~9s%#UUUIN%>+I7k1?Fk&8xuIGyE>FNhr?t?uM-28 zZQ70c``t(x)#TI^WUx<^zA!sPlKxq=_7^tTl{|}`*}+JcVAbu({TdcT> zI$hRrpHH^F{|TXx-tM7vSL^a8HNOr*k5Wj@claZ^w@-z9pvsKQNgewWERnB$#hege zU0r-VW(zQ5FhdVRTVi4Pn|nVuuLJd(Czx!)O)6XC#Cq9EEC`z{w02DZUBtdV)2;!W zbTlk1(Eto4hbA{^55xd0^w{8@I$*+9fI;;>kTV;<(9e=H@~hXZ9t&8H+g0S_3lG&3 zk?QIA3A|bNLKL1V47Tg$`t-M@mrgL{QB`e-Z8f|Sw_cBPjSEign;drKX?Y+)UW}e* z#~dqqs&(GIoBZK%&X8T@=j~c`FGDrvJte;7ZV%Q^Ycvg24nG*&>XZ5;leZm!xJz}2 zyOe5iB1oh>%vWICOAX&^Vu&=;?Z)uGF@693J&k|sGdjCkDLgFsX$JLM(KnrtA}a7( zxKkHTICrBCgsCk_)7R`lin_b?m3=D{+5rgI5ca&QxbJ3S=ojnZwmy893=!YSFd523 z=;RF($j@7qU8wRFfAWD#b5x$$QSc6hy=x2VCZrUi{hq zOY-GWqB1f}Ci!}aU&23D?aV!2t!K7w8fRndfrQ4R3-sSR)1?|G9hbN<#mz0Fa|r}m zk$#zoG&9EH7>w-CCXC~WA8jc}39FfjdYfJP5pwG&&S8p4vaZIL$k{qhGn>Zx(f3cs zuN6MqIr28qW@!9GR;E9@^c6PWI6$J?Bd^90xMjm)UkWPN%e@ckv4ly|D*}OHnfw&l zQwUD?i8j?qbHC8snNBub0f^OQgsO1he!n&o6O%yD5TMcM=;-W5A$-URhA_5oZ)=LW zGPd;E^5_P(^2%JP4Twa4C~7W`n`vg|5v4#``!es&rI?ppcr8t2ZW-1?9~Y|kFqev$ z_U5KVjH`>en>au1)7Ee<5%V&kqA#5NmrCtQg|=zD_DU_v`oHSKQE<8Ojh_?AL}yrR zc#(Q!2@pYMIlpAHmQo;c{nC)0+;+gjh$zvud|fh~uP#M(ihKuDCVV_yY)x+HUoQz&7>@l~ zJ=^5^%CG5&fqZ0&y&<>z=H|@K5k2|H$+mT&X5m9~gt_Be?mBxmx3zx8jh%k&=Fzq& zgH$jKoSUAMF}chBlAf>~2#qJug{&CQ8> zCd0s$`R(V?9LQ$z`Gs9n+R;w z%@1@MTn)K5PbT;2RldzH2XcdQF${!Eu)+S2Kf@gNFMZ9I3#6kgbM5*7 zeAd>yyazVHgjOp_fd#=^uU4p{C4;D40{MB0SmLKHXJr1g_!XzXgU371EDHps4%QqG zP^OA_?3>vtn|=eu!aBpd{MG%Dn$kg`W&^GF15nTNAgF!jdOTNo`|0eQOo(T?>4{~Z z+>}>8gYXs5RzdpA)bcL6FAO05GV7cyj3zVOBHuOBm5e8sv1dqmmVOP$$>iU-v(1GU z7fCsf!F^-ZPTUv}n%=EGV*15h1(;3p-16+M7SdljOKne?81vat?1!T}+#D~^vU36| zsP&Q^Y#t*LiYg&CnfC%f#Jam`#gU$9H_X zGkDzEI%ZS3QKP}pGh&Z%6Z(4yculZWGYp@svX$i>Pi1FvTlSRB^tq6&VU}e%L)fZVMl3w0|I+E%7bP&lR&m7QOIbu-*3Py4INZkpaaZ)rm@t8DGj|Dqh#dzAvfbM1?aZ*Z~L2 z82~VF5pMDTS-2J6)?HWiJW5EiH!~A7RZ{4c>sRi)mnUMqra7!EMbrC+b&Y;6;yzd3 z;oUqn*25I*MkxUnft!coB7*{0r`dPwEv(Nz=Ma)Br>-AxHXhrqmySrYCZI4_P8-7! zh0@;j5MGPljemH&fopI2YBJTi-Re)I%vV^mA~BfayvVo@MbP4+A{K1N){o_gRHrN9 zgoz!xu*At^?nan{6?zdcOB1iZ6c=jZZ}%&b_oSB1nkU~)d9^_5YHxzMlakD&+A%9z z4ZU-?%W~_hWOjQ$@^-B@vz=FBoMTPaIa0=mkG#_8IWIHyQR>*)tV}DnlY_fCvt(s1 zq|eI_B7g@Eudac(cN3wZvB|2;(A(e{rPRD17EDTqUq3(09S-A7ReU>O`{+LNI6GT} z*)m66SD#*O(z8OE+_8_roXI2{>G>q#R z=jC|VK6t{ zX4vY#C7j}U-m77@JPMQH7M_gZOu7N(ryAwDnI7ZcQlBZ=xiS02b4``^dkrEKrlvo* zY!6r>`ICrrd7ZbYidnYYk>Q@JOCRTP8dJ{}^BL@x+23{P-fTe)JmEj3hOgH|t1OiL zauWBZ!k;u{iYsj&=7q024J8e6Kk{4nW0*o0KE=t2Kf9x#hBq>PdDAEDAXs3>>cIk& zO0yXL67O(w5xU#9S@EH%LGq34CzOp5fcy0DPo3|fdefB77{;~upJ7R%76Yo6!epSv zM%$*_69OkEju3t~=h^(YJVS+gy$~5%PHKOaI&pU@qju;x!S>_L*5y&c_hc@9EM9o` z^Pj%qV}zQ{h1ep#6rG0&$b_pda=w|U4b@;C4cwjP)5;=wT+hv!Iz}%V^~{Jd?~Mj_ z-55IXcw%^X{)x?0i7QA&F@#-Ta^ngRkO!j{5H5F_)auAyY0@6?I|WZ_Uaa9h*n%PP z3f;Vp!8m7^#WmNj&#GuUf#8XiQvbKDwHMio=@02N_vRAKte`j=mT)TkBP*OE9NOaX6ripAk3hoEu)4I5?@}C6pC^fgm!M-!Ck+nd zeotn-ycM0tep$1-C`|utc}wl!_1^tD8*H#I^5{O91s3wJ*+?mlA>m(___JF!W+P2) zoREe!zHT3ohBeq=7nt5>MiRW_PLW8oAv4tCgp}3*tstUe=$vGxE)oTxeU1gaTDlIN zn_RYpIT&;FmsTk3A57aWr#I}pfc~9&^GvpT3+ga>vBPL`a-f#(y-ff(ldXN6iH}BU zNh1t%{jiA_mnkhc4gMX}gYC+T4bVbang}*ovxjr`njSZ^ipB=;#_PE=&0g1i7l$wi z;7XfdwP(E92|G;o3%6B?2BCdKzU5GfBVVpRVzi$m`RkG;AMgG8awOr;U^n>kkpC$} zK|VuLPN(W)`EptIK~of7U2P2Z+F8hI>E;f^x%i;{|~sWQwJngswyu*~J2<#qinA|5Rt4VxZR+LKE-6O|hzlWKgmi zodC_x3e02~DE$w@?GhyTNsSqS#HV6*cVGA{K-V5#h=?W~MB-KqUK3K9?r@V02haF4 z)bK52KeERBRf+$H4B+pJRfY6e`fd{Z*Cl_7_5Z%+F!Q-cwqD~j8?Dx+vRVjq%P)}7v|H*3D zmF1MD%+!%>@{$MGL-83QTC@+3R8d>klOuQvZhzC7Bwa z_G%^o-;`osw^<*<+}f!TdGP&1-Q?@{*iJ|R7v&Y|P9&&zK!BM#XN%2>9}aaELlu|m za)?-4!4K$`{Fql#(%4J<29Ms^3>mkUd4i_*a=aGpYMZjI!UV9Tv&pAx7600T_u|t+ z^&UsgmLPP{svl5@Y&xN&JRmIyzA1+1ip&=UctFu*>&#T=*MlD~qe1%=(j%#`f45E% zje_*IxGIE(PLXgr5IK4Z5xl+P3rz64gm{EbelUaG{=Pzl1wccQf9MfyS^aZ87*Ztr zRkrk%74;kTf{VaV-4-v7zT->&vFE|Qctf`O`TbDW4X>_w_WNU=P}JCmuQ~!$A4P5L z@1b!UHrSjd7bFlSCc4EyAq&pM2ikJNKQ)n8mWdY~6s1xTCCa%k`9!=tbL88y2czJ^ zn{s7LFhWu|0vLjGX8Y8z$g2WSpTRCel2lOxWL!1@3Ck?;KgNi3nqBv%#02~v6a38J zLr$OrH?zLMdEb^0I5KQxX{N4G1j@$-OG*5HPrxOuD$F9JDm7%O}}3W0QrI}$kp-w+5j$x*_J{r)K3gXcdszdMC6 z-Tl6nZ+{smE1kTm%1o$$@M}i3qTr$;>Z@~C{IP!e-bhC` zn~@jJ^gA2hOcpU`-B~K^j1cW7+$2Z(k9Yp_%R4Q&kp(Kd9a;B(`M?_e{cl9@H$QOD zdmk@{#Celd%USy9#yKPrq5o&$a#IPy?mYokpln2Gn}{}hor6`_wSF! zy+927AEkEsM-a90$F@RlayFhIP1H-_&w6&O{@Y7|F#&^xSNPXACI2ti2S<{_>muHT zJTx~+*C;77E~JImEkuX^ft_HYc|r{9-_&EZ#GVbBPiG$tP~{l7D zEVAel2Q2FF2ZIIvKWpmI%rhH2ie!Gi^`PFBonge8_=c`M#Q2%AiW z+VY3ev + ## Cluster configuration -The following example cluster configurations are provided: +The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. +The following example cluster configurations for distributed training are provided: -* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances -* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC -* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR) -* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC +* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances. This instance type supports Elastic Fabric Adapter (EFA), usually does not require a capacity reservation, and is a good starting point when developing distributed training architectures. To use this manifest, edit the vpc id and subnets, and specify the desired private subnet for the nodes. +* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC. This example shows that when a VPC is not specified, one is created for the cluster. The manifest can work without any modifications, however if you wish to change the cluster name, API version, region, avilability zones, etc. you can modify the file before using it to create the cluster. +* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR). This is the most common configuration for distributed training workloads.Edit the file to specify vpc id, subnets, and capacityReservationID. Please note that the subnet of the nodeGroup should match the one of the capacity reservation. +* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC. This cluster configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. -To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/) ## Cluster creation +1. Create a cluster configuration + +To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/). Any of the values in the manifests can be changed and more node groups can be added to the same cluster. The minimal set of values to specify for each file are described above. + +2. Create cluster + Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: ``` $ eksctl create cluster -f ./cluster.yaml ``` -Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. Execute the following command line in order to verify that the cluster is accessible: +Example output: +``` +YYYY-MM-DD HH:mm:SS [ℹ] eksctl version x.yyy.z +YYYY-MM-DD HH:mm:SS [ℹ] using region +... +YYYY-MM-DD HH:mm:SS [✔] EKS cluster "" in "" region is ready +``` + +Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. + +3. Validate cluster + +Execute the following command line in order to verify that the cluster is accessible: ``` $ kubectl get nodes ``` -You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instanct type.w +You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instance type for distributed training. -## Delete cluster +## Cleanup When it is time to decommission your cluster, execute the following command: @@ -45,9 +69,20 @@ When it is time to decommission your cluster, execute the following command: $ kubectl delete cluster -f ./cluster.yaml ``` +Example output: +``` +YYYY-MM-DD HH:mm:SS [ℹ] deleting EKS cluster "" +... +YYYY-MM-DD HH:mm:SS [ℹ] waiting for CloudFormation stack "" +``` + ## References +For further information regarding EKS cluster infrastructure see the [aws-do-eks](https://github.com/aws-samples/aws-do-eks) project. More cluster configurations are available [here](https://github.com/aws-samples/aws-do-eks/tree/main/wd/conf/eksctl/yaml). + +Related resources for further reading can be found at the links below: * [AWS CLI](https://aws.amazon.com/cli) * [Amazon EKS](https://aws.amazon.com/eks) * [eksctl](https://eksctl.io) * [kubectl](https://kubernetes.io/docs/reference/kubectl) +* [do-framework](https://bit.ly/do-framework) From aa8f922db6eeb82337f92a2bf67bacdbf11be72b Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Tue, 19 Sep 2023 23:41:57 -0700 Subject: [PATCH 101/648] Explain sys --- 1.architectures/4.amazon-eks/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 178b2e45..83046b61 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -16,6 +16,8 @@ The following digram shows a common architecture that can be used for distribute +The EKS cluster has two nodegroups. A `system` nodegroup is used to run pods like kube-dns, kubeflow training operator, etc. which provide internal cluster-scope services and can run on CPU. A worker nodegroup built with an accelerated instance type is used to run the distributed training workload. + ## Cluster configuration The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. From 4e8e329893f6e3e9bec161e2a7e7b119c9c6ea56 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Tue, 19 Sep 2023 23:41:57 -0700 Subject: [PATCH 102/648] Explain sys --- 1.architectures/4.amazon-eks/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 178b2e45..83046b61 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -16,6 +16,8 @@ The following digram shows a common architecture that can be used for distribute +The EKS cluster has two nodegroups. A `system` nodegroup is used to run pods like kube-dns, kubeflow training operator, etc. which provide internal cluster-scope services and can run on CPU. A worker nodegroup built with an accelerated instance type is used to run the distributed training workload. + ## Cluster configuration The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. From b31557a37d06d370e71b6d5ec431a675f43fe4e5 Mon Sep 17 00:00:00 2001 From: Uros Lipovsek Date: Fri, 22 Sep 2023 15:51:18 -0500 Subject: [PATCH 103/648] Add param benchmark --- .../0.param-benchmark.Dockerfile | 6 ++ .../1.param-benchmark-comms.sbatch | 43 ++++++++++ .../2.param-benchmark-compute.sbatch | 52 ++++++++++++ 3.test_cases/5.param-benchmark/README.md | 84 +++++++++++++++++++ 4 files changed, 185 insertions(+) create mode 100644 3.test_cases/5.param-benchmark/0.param-benchmark.Dockerfile create mode 100644 3.test_cases/5.param-benchmark/1.param-benchmark-comms.sbatch create mode 100644 3.test_cases/5.param-benchmark/2.param-benchmark-compute.sbatch create mode 100644 3.test_cases/5.param-benchmark/README.md diff --git a/3.test_cases/5.param-benchmark/0.param-benchmark.Dockerfile b/3.test_cases/5.param-benchmark/0.param-benchmark.Dockerfile new file mode 100644 index 00000000..69b547c3 --- /dev/null +++ b/3.test_cases/5.param-benchmark/0.param-benchmark.Dockerfile @@ -0,0 +1,6 @@ +FROM 763104351884.dkr.ecr.us-east-1.amazonaws.com/pytorch-training:2.0.1-gpu-py310-cu118-ubuntu20.04-ec2 + +RUN git clone https://github.com/facebookresearch/param.git && \ + cd param && git checkout 6236487e8969838822b52298c2a2318f6ac47bbd + +WORKDIR /param/train/comms/pt diff --git a/3.test_cases/5.param-benchmark/1.param-benchmark-comms.sbatch b/3.test_cases/5.param-benchmark/1.param-benchmark-comms.sbatch new file mode 100644 index 00000000..02d9b86f --- /dev/null +++ b/3.test_cases/5.param-benchmark/1.param-benchmark-comms.sbatch @@ -0,0 +1,43 @@ +#!/bin/bash + +#SBATCH --exclusive # the job has exclusive use of the instances it uses +#SBATCH --gres=gpu:8 # reserve 8 GPU resources / instance / node +#SBATCH --gpus-per-node=8 # +#SBATCH --nodes=2 # how many nodes, you can override on the CLI +#SBATCH --wait-all-nodes=1 # wait for all nodes before running the job +#SBATCH --job-name=param_benchmark # name of your job +#SBATCH --output=%x_%j.out # declare output, merge both stdout and stderr + +set -ex; + +########################### +###### User Variables ##### +########################### + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${NCCL_TESTS_PATH:=/home/ec2-user/}" +: "${IMAGE:=/apps/param-benchmark.sqsh}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +#export NCCL_SOCKET_IFNAME=ens +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO + +srun --container-image=$IMAGE -l torchrun \ + --nproc_per_node $SLURM_GPUS_PER_NODE \ + --nnodes $SLURM_JOB_NUM_NODES \ + --rdzv_id $SLURM_JOB_ID \ + --rdzv_backend c10d \ + --rdzv_endpoint $(hostname):0 \ + /param/train/comms/pt/comms.py --b=8 --e=2GB --f=2 --collective=all_reduce --num_iters=100 + + diff --git a/3.test_cases/5.param-benchmark/2.param-benchmark-compute.sbatch b/3.test_cases/5.param-benchmark/2.param-benchmark-compute.sbatch new file mode 100644 index 00000000..81e462ec --- /dev/null +++ b/3.test_cases/5.param-benchmark/2.param-benchmark-compute.sbatch @@ -0,0 +1,52 @@ +#!/bin/bash + +#SBATCH --exclusive # the job has exclusive use of the instances it uses +#SBATCH --gres=gpu:8 # reserve 8 GPU resources / instance / node +#SBATCH --gpus-per-node=8 # +#SBATCH --nodes=1 # how many nodes, you can override on the CLI +#SBATCH --wait-all-nodes=1 # wait for all nodes before running the job +#SBATCH --job-name=param_benchmark # name of your job +#SBATCH --output=%x_%j.out # declare output, merge both stdout and stderr + +set -ex; + +########################### +###### User Variables ##### +########################### + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${NCCL_TESTS_PATH:=/home/ec2-user/}" +: "${IMAGE:=/apps/param-benchmark.sqsh}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +#export NCCL_SOCKET_IFNAME=ens +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO + + +# Matrix multiplication test +srun --container-image=$IMAGE -l python3 /param/train/compute/pt/driver.py \ + --steps=100 --device='cpu' gemm --dataset='A' +srun --container-image=$IMAGE -l python3 /param/train/compute/pt/driver.py \ + --steps=100 --device='gpu' gemm --dataset='A' + +# EmbeddingBag +srun --container-image=$IMAGE -l python3 /param/train/compute/pt/driver.py \ + --steps=100 --device='cpu' emb --dataset='A' +srun --container-image=$IMAGE -l python3 /param/train/compute/pt/driver.py \ + --steps=100 --device='gpu' emb --dataset='A' + +# MLP Linear +srun --container-image=$IMAGE -l python3 /param/train/compute/pt/driver.py \ + --steps=100 --device='cpu' linear --dataset='A' +srun --container-image=$IMAGE -l python3 /param/train/compute/pt/driver.py \ + --steps=100 --device='gpu' linear --dataset='A' diff --git a/3.test_cases/5.param-benchmark/README.md b/3.test_cases/5.param-benchmark/README.md new file mode 100644 index 00000000..641df325 --- /dev/null +++ b/3.test_cases/5.param-benchmark/README.md @@ -0,0 +1,84 @@ +# DDP PyTorch +[Param benchmark](https://github.com/facebookresearch/param/tree/main) is a PyTorch benchmark for computation ([GEMM, MLP, EmbeddingBag](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd/train/compute/pt)), communication ([NCCL, DLRMs, TraceReplay](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd/train/comms/pt)), workloads ([DLRM](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd/train/workloads), [training](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd/train/compute/python)). This guide only addresses communications but serves as an template for other tests. + +# 0. Preparation + +This guide assumes that you have the following: + +- A functional Slurm cluster on AWS. +- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- A shared directory mounted on `/apps` + +It is recommended that you use the templates in the architectures [directory](../../1.architectures) + + +## 1. Build the Squash file + +The [AWS Deep learning containers](https://aws.amazon.com/machine-learning/containers/) is used as a base for this project and the Param benchmark is built on top of it following the steps below. It is assumed that you copied the assets (`Dockerfile` and `sbatch` file) to your cluster. + +1. Login to AWS ECR with command bellow + ```bash + aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-east-1.amazonaws.com + ``` +2. Build the container image with the command below + ```bash + docker build -t param-benchmark -f 0.param-benchmark.Dockerfile . + ``` +3. Convert the container image to a squash file via Enroot + ```bash + enroot import -o /apps/param-benchmark.sqsh dockerd://param-benchmark:latest + ``` + The file will be stored in the `/apps` directory. + +> **Note**: We use specific commit due to lack of versioning of param benchmark, which would otherwise be used to pin the dependency. + +## 2. Running a communications test + +Ensure that the submission file `1.param-benchmark.sbatch` has been copied to your cluster and your shell points to the directory where this file is present. Run the Param benchmark on 2 nodes as follows: + +```bash +sbatch -N 2 --export=NONE 1.param-benchmark-comms.sbatch +``` + +The command will return a job ID. If you stay in the same directory where `sbatch` was called, assuming that your job is executing (or has executed) you will find a output file for your job names `param_benchmark_.out` where ID corresponds to your job ID. + +We add `--export=NONE` parameter to make sure any conflicting environment variable from AMI is not exported to container. + +> **Note**: the number of nodes used for the job is defined via the command line with the option and argument `-N 2`. An alternative is to set it in the `sbatch` file as the directive `#SBATCH -N 2`. + +You will see NCCL test outputs in the logs (in your local directory), refer to [NCCL-tests](https://github.com/NVIDIA/nccl-tests/blob/master/doc/PERFORMANCE.md) documentation to understand the difference between `AlbBW` and `BusBw`. Review the Param [documentation](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd/train/comms/pt) for other CLI parameters and other benchmarks in the [repository](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd) for more communication and computation tests (example [here](https://github.com/facebookresearch/param/blob/6236487e8969838822b52298c2a2318f6ac47bbd/train/compute/pt/README.md) or [here](https://github.com/facebookresearch/param/tree/6236487e8969838822b52298c2a2318f6ac47bbd/train/comms/pt)). + +## 3. Other Param tests + +Param contains several tests that you can use to evaluate your system. Run the command below to execute single node compute tests for GEMM (Matrix Multiply), EmbeddingBag and MLP. + +```bash +sbatch -N 1 --export=NONE 2.param-benchmark-compute.sbatch +``` + +You should see an output similar to the sample below (MatMult): + +``` +0: Measuring the performance of gemm on device = gpu +0: Steps = 100 warmups = 10 +0: with matrix dataset A , Data type: float32 +0: +0: ---------------------------------------------------------------- +0: M N K Time(s) Rate(TF/s) +0: ---------------------------------------------------------------- +0: 128, 4096, 4096, 0.000326 13.158 +0: 256, 4096, 4096, 0.000610 14.077 +0: 512, 4096, 4096, 0.001197 14.347 +0: 1024, 4096, 4096, 0.001845 18.624 +0: 128, 1024, 1024, 0.000030 8.803 +0: 256, 1024, 1024, 0.000048 11.107 +0: 512, 1024, 1024, 0.000078 13.700 +0: 1024, 1024, 1024, 0.000130 16.558 +0: 4096, 4096, 128, 0.000238 18.022 +0: 4096, 4096, 256, 0.000462 18.584 +0: 4096, 4096, 512, 0.000912 18.829 +0: 4096, 4096, 1024, 0.001814 18.942 +0: 1024, 1024, 128, 0.000026 10.282 +0: 1024, 1024, 256, 0.000040 13.291 +0: 1024, 1024, 512, 0.000071 15.208 +``` From 8c7f57aa63de25198e43298d2a63a529706f5da3 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 22 Sep 2023 15:57:16 -0500 Subject: [PATCH 104/648] Add authors to param benchmark --- 3.test_cases/5.param-benchmark/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/3.test_cases/5.param-benchmark/README.md b/3.test_cases/5.param-benchmark/README.md index 641df325..8b1aed60 100644 --- a/3.test_cases/5.param-benchmark/README.md +++ b/3.test_cases/5.param-benchmark/README.md @@ -82,3 +82,8 @@ You should see an output similar to the sample below (MatMult): 0: 1024, 1024, 256, 0.000040 13.291 0: 1024, 1024, 512, 0.000071 15.208 ``` + +## Authors / Reviewers + +- [A] Uros Lipovsek - lipovsek@ +- [R] Pierre-Yves Aquilanti - pierreya@ From 9c8ad6578d7f505a1af6d5029324931947e43045 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 25 Sep 2023 13:45:00 -0500 Subject: [PATCH 105/648] Fix megatron-lm doc for test case path --- 3.test_cases/1.megatron-lm/README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 0b1c7dc4..6bdb335a 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -28,11 +28,12 @@ It is recommended that you use the templates in the architectures [directory](.. You will also setup the following variables in your terminal environment. ```bash -export DATA_PATH=/fsx -export APPS_PATH=/apps +export DATA_PATH=/fsx # FSx for Lustre shared file-system +export APPS_PATH=/apps # this is were the squash file (Enroot file) will be stored +export TEST_CASE_PATH=${HOME}/1.megatron-lm # it is assumes that this test case is copied in your home directory +cd ${TEST_CASE_PATH} ``` - ## 1. Data Preprocessing Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data ans tokenize it using the GPT2 vocabulary. @@ -56,9 +57,9 @@ Below are the steps you need to follow: ``` 4. Create the squash file with the command below. ```bash - enroot import -o /apps/megatron-preprocess.sqsh dockerd://megatron-preprocess:latest + enroot import -o ${APPS_PATH}/megatron-preprocess.sqsh dockerd://megatron-preprocess:latest ``` - The file will be stored in the `/apps` directory. The output should look as below. + The file will be stored in the `/apps` directory (if left as default). The output should look as below. ```bash [ec2-user@ip-10-0-10-78 ~]$ enroot import -o ./megatron-preprocess.sqsh dockerd://megatron-preprocess:latest @@ -125,7 +126,7 @@ Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. ``` 3. Convert the docker container to a squash file in `/apps`. ```bash - enroot import -o /apps/megatron-training.sqsh dockerd://megatron-training:latest + enroot import -o ${APPS_PATH}/megatron-training.sqsh dockerd://megatron-training:latest ``` 4. You copy the file `3.distributed-training.sbatch` on your cluster then submit a training jobs with the command below: From d9008b4ea06076d79ce3a2a58ce541694428e969 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 25 Sep 2023 16:41:22 -0500 Subject: [PATCH 106/648] Update readme file for consistency on commands --- 1.architectures/4.amazon-eks/README.md | 55 ++++++++++++-------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 83046b61..559405ea 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -1,6 +1,7 @@ # Amazon EKS distributed training architecture -This project module uses [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. + +This project provides several reference architectures to run distributed training on Amazon EKS for different use cases using `p4d.24xlarge` instances (you can replace them by `p5` or `trn1`. These examples use [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. ## Prerequisites @@ -31,35 +32,28 @@ The following example cluster configurations for distributed training are provid ## Cluster creation -1. Create a cluster configuration +### Edit the cluster configuration To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/). Any of the values in the manifests can be changed and more node groups can be added to the same cluster. The minimal set of values to specify for each file are described above. -2. Create cluster - -Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: - -``` -$ eksctl create cluster -f ./cluster.yaml -``` - -Example output: -``` -YYYY-MM-DD HH:mm:SS [ℹ] eksctl version x.yyy.z -YYYY-MM-DD HH:mm:SS [ℹ] using region -... -YYYY-MM-DD HH:mm:SS [✔] EKS cluster "" in "" region is ready -``` - -Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. - -3. Validate cluster - -Execute the following command line in order to verify that the cluster is accessible: - -``` -$ kubectl get nodes -``` +### Create a cluster + +1. Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: + ```bash + eksctl create cluster -f ./cluster.yaml + ``` + Example output: + ```console + YYYY-MM-DD HH:mm:SS [ℹ] eksctl version x.yyy.z + YYYY-MM-DD HH:mm:SS [ℹ] using region + ... + YYYY-MM-DD HH:mm:SS [✔] EKS cluster "" in "" region is ready + ``` + Cluster creation may take between 15 and 30 minutes. Upon successful creation your local `~/.kube/config` file gets updated with connection information to your cluster. +2. Execute the following command line in order to verify that the cluster is accessible: + ```bash + kubectl get nodes + ``` You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instance type for distributed training. @@ -67,18 +61,19 @@ You should see a list of three nodes. One would be a system node instance of typ When it is time to decommission your cluster, execute the following command: -``` -$ kubectl delete cluster -f ./cluster.yaml +```bash +kubectl delete cluster -f ./cluster.yaml ``` Example output: -``` +```console YYYY-MM-DD HH:mm:SS [ℹ] deleting EKS cluster "" ... YYYY-MM-DD HH:mm:SS [ℹ] waiting for CloudFormation stack "" ``` ## References + For further information regarding EKS cluster infrastructure see the [aws-do-eks](https://github.com/aws-samples/aws-do-eks) project. More cluster configurations are available [here](https://github.com/aws-samples/aws-do-eks/tree/main/wd/conf/eksctl/yaml). Related resources for further reading can be found at the links below: From 6cb7b66097c86bf00d54eb84ae4cf8dc5ba12ad7 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Mon, 25 Sep 2023 16:45:59 -0500 Subject: [PATCH 107/648] Update readme for EKS with table --- 1.architectures/4.amazon-eks/README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 559405ea..e58754cb 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -24,14 +24,19 @@ The EKS cluster has two nodegroups. A `system` nodegroup is used to run pods lik The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. The following example cluster configurations for distributed training are provided: -* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances. This instance type supports Elastic Fabric Adapter (EFA), usually does not require a capacity reservation, and is a good starting point when developing distributed training architectures. To use this manifest, edit the vpc id and subnets, and specify the desired private subnet for the nodes. -* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC. This example shows that when a VPC is not specified, one is created for the cluster. The manifest can work without any modifications, however if you wish to change the cluster name, API version, region, avilability zones, etc. you can modify the file before using it to create the cluster. -* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR). This is the most common configuration for distributed training workloads.Edit the file to specify vpc id, subnets, and capacityReservationID. Please note that the subnet of the nodeGroup should match the one of the capacity reservation. -* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC. This cluster configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. +* [**`eks-g4dn-vpc.yaml`**](./eks-g4dn-vpc.yaml): A cluster using an existing VPC with a nodegroup of 2 * `g4dn.metal` instances. This instance type supports Elastic Fabric Adapter (EFA), usually does not require a capacity reservation, and is a good starting point when developing distributed training architectures. To use this manifest, edit the vpc id and subnets, and specify the desired private subnet for the nodes. +* [**`eks-g4dn.yaml`**](./eks-g4dn.yaml): Cluster with a nodegroup of 2 * `g4dn.metal` instances, created in a new VPC. This example shows that when a VPC is not specified, one is created for the cluster. The manifest can work without any modifications, however if you wish to change the cluster name, API version, region, availability zones, etc. you can modify the file before using it to create the cluster. +* [**`eks-p4de-odcr-vpc.yaml`**](./eks-p4de-odcr-vpc.yaml): It is a cluster using an existing VPC with a nodegroup of 2 * `p4de.24xlarge` instances from an existing on-demand capacity reservation (ODCR). This is the most common configuration for distributed training workloads.Edit the file to specify vpc id, subnets, and capacityReservationID. Please note that the subnet of the nodeGroup should match the one of the capacity reservation. +* [**`eks-p4de-odcr.yaml`**](./eks-p4de-odcr.yaml): A cluster with 2 * `p4de.24xlarge` instances from an existing ODCR, that will be created in a new VPC. This cluster configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. + + +You will need to replace + ## Cluster creation + ### Edit the cluster configuration To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/). Any of the values in the manifests can be changed and more node groups can be added to the same cluster. The minimal set of values to specify for each file are described above. From 92300d040791580af03dbe561762902cc7f5431b Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Tue, 26 Sep 2023 18:09:06 +0900 Subject: [PATCH 108/648] tidied up scripts and added README --- .../3.MPT/{Dockerfile => 0.Dockerfile} | 81 ++++--- 3.test_cases/3.MPT/Makefile | 58 ----- 3.test_cases/3.MPT/README.md | 134 ++++++++++++ 3.test_cases/3.MPT/config.env | 33 --- 3.test_cases/3.MPT/report/.gitignore | 1 - 3.test_cases/3.MPT/scripts/build-image.sh | 4 - 3.test_cases/3.MPT/scripts/deploy-pcluster.sh | 13 -- 3.test_cases/3.MPT/scripts/deploy-s3.sh | 8 - 3.test_cases/3.MPT/scripts/deploy-vpc.sh | 8 - .../3.MPT/scripts/print-headnode-login.sh | 5 - 3.test_cases/3.MPT/scripts/run-benchmarks.sh | 38 ---- 3.test_cases/3.MPT/scripts/run-container.sh | 12 -- ...eprocess.sbatch => 1.c4-preprocess.sbatch} | 0 ... => 2.train-mpt-manual-distributed.sbatch} | 0 .../3.MPT/slurm-scripts/{ => logs}/.gitignore | 0 .../slurm-scripts/run-distributed-training.sh | 55 ----- .../train-mpt-single-node.sbatch | 150 ------------- .../3.MPT/slurm-scripts/train-mpt.sbatch | 200 ------------------ 18 files changed, 172 insertions(+), 628 deletions(-) rename 3.test_cases/3.MPT/{Dockerfile => 0.Dockerfile} (51%) delete mode 100644 3.test_cases/3.MPT/Makefile create mode 100644 3.test_cases/3.MPT/README.md delete mode 100644 3.test_cases/3.MPT/config.env delete mode 100644 3.test_cases/3.MPT/report/.gitignore delete mode 100644 3.test_cases/3.MPT/scripts/build-image.sh delete mode 100644 3.test_cases/3.MPT/scripts/deploy-pcluster.sh delete mode 100644 3.test_cases/3.MPT/scripts/deploy-s3.sh delete mode 100644 3.test_cases/3.MPT/scripts/deploy-vpc.sh delete mode 100644 3.test_cases/3.MPT/scripts/print-headnode-login.sh delete mode 100644 3.test_cases/3.MPT/scripts/run-benchmarks.sh delete mode 100644 3.test_cases/3.MPT/scripts/run-container.sh rename 3.test_cases/3.MPT/slurm-scripts/{c4-preprocess.sbatch => 1.c4-preprocess.sbatch} (100%) rename 3.test_cases/3.MPT/slurm-scripts/{train-mpt-manual-distributed.sbatch => 2.train-mpt-manual-distributed.sbatch} (100%) rename 3.test_cases/3.MPT/slurm-scripts/{ => logs}/.gitignore (100%) delete mode 100644 3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh delete mode 100644 3.test_cases/3.MPT/slurm-scripts/train-mpt-single-node.sbatch delete mode 100644 3.test_cases/3.MPT/slurm-scripts/train-mpt.sbatch diff --git a/3.test_cases/3.MPT/Dockerfile b/3.test_cases/3.MPT/0.Dockerfile similarity index 51% rename from 3.test_cases/3.MPT/Dockerfile rename to 3.test_cases/3.MPT/0.Dockerfile index 987897ee..12f18215 100644 --- a/3.test_cases/3.MPT/Dockerfile +++ b/3.test_cases/3.MPT/0.Dockerfile @@ -1,30 +1,23 @@ FROM mosaicml/pytorch:2.0.1_cu118-python3.10-ubuntu20.04 -# FROM nvcr.io/nvidia/pytorch:23.05-py3 - -ARG EFA_INSTALLER_VERSION=latest -ARG AWS_OFI_NCCL_VERSION=v1.6.0 +ARG EFA_INSTALLER_VERSION=1.26.1 +ARG AWS_OFI_NCCL_VERSION=master ARG NCCL_TESTS_VERSION=master +ARG NCCL_VERSION=v2.12.7-1 ARG OPEN_MPI_PATH=/opt/amazon/openmpi -###################### -# Update and remove the IB libverbs -###################### + RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ - libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - libnccl2 libnccl-dev libibnetdisc5 + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ + libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 RUN rm -rf /opt/hpcx \ && rm -rf /usr/local/mpi \ && rm -rf /usr/local/ucx \ && rm -f /etc/ld.so.conf.d/hpcx.conf \ && ldconfig +ENV OPAL_PREFIX= -###################### -# Add enviroment variable for processes to be able to call fork() -###################### -ENV RDMAV_FORK_SAFE=1 - -RUN DEBIAN_FRONTEND=noninteractive apt install -y --allow-unauthenticated \ +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ git \ gcc \ vim \ @@ -38,40 +31,32 @@ RUN DEBIAN_FRONTEND=noninteractive apt install -y --allow-unauthenticated \ gdb \ automake \ cmake \ - apt-utils && \ + apt-utils \ + libhwloc-dev \ + aptitude && \ DEBIAN_FRONTEND=noninteractive apt autoremove -y - -RUN mkdir -p /var/run/sshd && \ - sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && \ - echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && \ - sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config && \ - sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd - -RUN rm -rf /root/.ssh/ \ - && mkdir -p /root/.ssh/ \ - && ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa \ - && cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys \ - && printf "Host *\n StrictHostKeyChecking no\n" >> /root/.ssh/config - -ENV LD_LIBRARY_PATH=/usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:$LD_LIBRARY_PATH +ENV LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:/usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:$LD_LIBRARY_PATH ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH +RUN pip install awscli pynvml + ################################################# ## Install EFA installer RUN cd $HOME \ && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ && cd aws-efa-installer \ - && ./efa_installer.sh -y --skip-kmod + && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ + && rm -rf $HOME/aws-efa-installer ################################################### ## Install NCCL RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ && cd /opt/nccl \ - && git checkout v2.15.5-1 \ - && make -j$(nproc) src.build CUDA_HOME=/usr/local/cuda \ - NVCC_GENCODE="-gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" + && git checkout -b ${NCCL_VERSION} \ + && make -j src.build CUDA_HOME=/usr/local/cuda \ + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" ################################################### ## Install AWS-OFI-NCCL plugin @@ -82,26 +67,36 @@ RUN export OPAL_PREFIX="" \ && git checkout ${AWS_OFI_NCCL_VERSION} \ && ./autogen.sh \ && ./configure --prefix=/opt/aws-ofi-nccl/install \ - --with-libfabric=/opt/amazon/efa \ - --with-cuda=/usr/local/cuda \ - --with-mpi=/opt/amazon/openmpi/ \ - --enable-platform-aws \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=/usr/local/cuda \ + --with-nccl=/opt/nccl/build \ + --with-mpi=/opt/amazon/openmpi/ \ && make && make install ################################################### +## Install NCCL-tests +RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ + && cd /opt/nccl-tests \ + && git checkout ${NCCL_TESTS_VERSION} \ + && make MPI=1 \ + MPI_HOME=/opt/amazon/openmpi/ \ + CUDA_HOME=/usr/local/cuda \ + NCCL_HOME=/opt/nccl/build \ + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" + RUN rm -rf /var/lib/apt/lists/* ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ - && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf + && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf RUN pip3 install awscli RUN pip3 install pynvml RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ - && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ - && echo '/opt/amazon/openmpi/bin/mpirun.real "$@"' >> $OPEN_MPI_PATH/bin/mpirun \ - && chmod a+x $OPEN_MPI_PATH/bin/mpirun + && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ + && echo '/opt/amazon/openmpi/bin/mpirun.real "$@"' >> $OPEN_MPI_PATH/bin/mpirun \ + && chmod a+x $OPEN_MPI_PATH/bin/mpirun ###################### # Transformers dependencies used in the model @@ -111,7 +106,7 @@ RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ COPY llm-foundry llm-foundry RUN cd llm-foundry \ && pip install -e ".[gpu]" \ - && pip install xformers nvtx + && pip install xformers nvtx 'flash-attn==v1.0.3.post0' RUN wget https://developer.download.nvidia.com/devtools/nsight-systems/NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb RUN apt-get install -y libglib2.0-0 \ && dpkg -i NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb \ No newline at end of file diff --git a/3.test_cases/3.MPT/Makefile b/3.test_cases/3.MPT/Makefile deleted file mode 100644 index a63d9b58..00000000 --- a/3.test_cases/3.MPT/Makefile +++ /dev/null @@ -1,58 +0,0 @@ -SHELL := /bin/bash - -default: help - -help: # Show help for each of the Makefile recipes. - @grep -E '^[a-zA-Z0-9 -]+:.*#' Makefile | sort | while read -r l; do printf "\033[1;32m$$(echo $$l | cut -f 1 -d':')\033[00m:$$(echo $$l | cut -f 2- -d'#')\n"; done - -################################################## -# Container Creation and Single node benchmarks -################################################## - -build-image: # Build docker image - bash scripts/build-image.sh - -run-container: # Run docker container locally for debugging - bash scripts/run-container.sh - -run-benchmark: build-image # Run single node benchmark locally - bash scripts/run-benchmarks.sh - - -################################################## -# Multi-node benchmarks with ParallelCluster -################################################## - -install-node: # Insatall node command - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.38.0/install.sh | bash - chmod ug+x ~/.nvm/nvm.sh - source ~/.nvm/nvm.sh - nvm install --lts - node --version - -install-pcluster: # Install pcluster cli command using pip - python3 -m pip install --upgrade pip - python3 -m pip install --user --upgrade virtualenv - python3 -m virtualenv ~/deepspeed-on-aws-tutorials-venv - source ~/deepspeed-on-aws-tutorials-venv/bin/activate - python3 -m pip install --upgrade "aws-parallelcluster==3.6.1" - -deploy-s3: # Deploy S3 bucket using CloudFormation - bash scripts/deploy-s3.sh - -deploy-vpc: # Deploy VPC using CloudFormation - bash scripts/deploy-vpc.sh - -deploy-pcluster: # Deploy ParallelCluster using pcluster command - bash scripts/deploy-pcluster.sh - -print-headnode-login: # Print login command for headnode - bash scripts/print-headnode-login.sh - - -################################################# -# Multi-node job submisison on Slurm headnode -################################################## -enroot-import: # Enroot - enroot import -o /apps/llm-foundry.sqsh dockerd://llm-foundry:latest - diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md new file mode 100644 index 00000000..aa945dd2 --- /dev/null +++ b/3.test_cases/3.MPT/README.md @@ -0,0 +1,134 @@ +# Mosaic Pretraind Transformers (MPT) Test Case + +MPT are GPT-style models in llm-foundry with some special features -- Flash Attention for efficiency, ALiBi for context length extrapolation, and stability improvements to mitigate loss spikes (https://github.com/mosaicml/llm-foundry/tree/main). +This subdirectory contains: + +* AWS optimized llm-foundary container image +* Slurm scripts for c4 dataset preparation and multi-node training + +## 0. Preparation + +This guide assumes that you have the following: + +- A functional Slurm cluster on AWS. +- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- An FSx for Lustre filesystem mounted on `/fsx`. + +It is recommended that you use the templates in the architectures [directory](../../1.architectures) + + +## 1. Data Preprocessing + +Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data ans tokenize it. + +Below are the steps you need to follow: + +1. Clone this repository along with submodule. +2. Build the container image with the command below in this directory + + ```bash + docker build -t llm-foundry -f 0.Dockerfile . + ``` + +3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: + + ```bash + REPOSITORY TAG IMAGE ID CREATED SIZE + llm-foundry latest a964fb32cd53 2 weeks ago 23.6GB + ... + ``` + +4. Create the squash file with the command below. + + ```bash + enroot import -o /apps/llm-foundry.sqsh dockerd://llm-foundry:latest + ``` + The file will be stored in the `/apps` directory. The output should look as below. + + ```bash + [INFO] Fetching image + + 36a8c752c28a2db543d2a632a3fc1fcbd5789a6f3d45b9d3a24632420dedcfa8 + + [INFO] Extracting image content... + [INFO] Creating squashfs filesystem... + + Parallel mksquashfs: Using 32 processors + Creating 4.0 filesystem on /apps/llm-foundry.sqsh, block size 131072. + [========================================================================================================================================================================================================================-] 291068/291068 100% + + Exportable Squashfs 4.0 filesystem, gzip compressed, data block size 131072 + uncompressed data, uncompressed metadata, uncompressed fragments, uncompressed xattrs + duplicates are not removed + ... + ``` + + It will take roughly 5 minutes. + +5. Go to `slurm-scripts` subdirectory and then run `1.c4-preprocess.sbatch` script + + ```bash + sbatch 1.c4-preprocess.sbatch + ``` + +It will create the streaming dataset for composer library using C4 dataset. + +7. You will see a new file in your current working directory called `c4-preprocess-XY.out` where `XY` is a number. This is your outputfile and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f slurm-XY.out` but with the relevant filename. + +Once the job completed, you will see the following data under `/fsx/my-c4-copy`. + + ```bash + /fsx/my-copy-c4/ + ├── train_small + │ ├── index.json + │ ├── shard.00000.mds + │ ├── shard.00001.mds + │ ├── shard.00002.mds + ... + │ ├── shard.00023.mds + │ └── shard.00024.mds + └── val_small + ├── index.json + ├── shard.00000.mds + ├── shard.00001.mds + └── shard.00002.mds + ``` + +## 2. Distributed training + +Now that the data is preprocessed, we will pretrain a MPT model with composer. + + +1. Go to `slurm-scripts` subdirectory. +2. Run the training script as follows: + ```bash + sbatch 2.train-mpt-manual-distributed.sbatch + ``` + +3. The training starts running and should produce an output similar to below if successful. + + +``` +... +0: [batch=1/300000000]: +0: Train time/epoch: 0 +0: Train time/batch: 0 +0: Train time/sample: 0 +0: Train time/batch_in_epoch: 0 +0: Train time/sample_in_epoch: 0 +0: Train time/token: 0 +0: Train time/token_in_epoch: 0 +0: Train memory/allocated_mem: 3.6287 +0: Train memory/active_mem: 3.6287 +0: Train memory/inactive_mem: 2.7844 +0: Train memory/reserved_mem: 20.9650 +0: Train memory/alloc_retries: 0 +0: Train trainer/device_train_microbatch_size: 8 +0: Train loss/train/total: 12.0000 +0: Train metrics/train/LanguageCrossEntropy: 12.0000 +0: Train metrics/train/LanguagePerplexity: 162754.5000 +0: Train time/train: 0.0037 +0: Train time/val: 0.0000 +... +``` + diff --git a/3.test_cases/3.MPT/config.env b/3.test_cases/3.MPT/config.env deleted file mode 100644 index 07ad47b5..00000000 --- a/3.test_cases/3.MPT/config.env +++ /dev/null @@ -1,33 +0,0 @@ -function set_options(){ - if [ "${DRYRUN:-}" == "" ]; then # Execute command when DRYRUN is unset - set -euxo pipefail - fi -} -# You can dry run each command like -# DRYRUN=1 make run-container -function run(){ - if [ "${DRYRUN:-}" == "" ]; then # Execute command when DRYRUN is unset - "$@" - else # Dry-run otherwise - { - set +x - echo -n "[DRYRUN]" - echo "$@" - } >&2 - fi -} -export NAME=llm-foundry -export ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) -export # S3 -export S3_BUCKET_NAME=${NAME}-${ACCOUNT_ID}-bucket -export # VPC -export VPC_NAME=${NAME}-${ACCOUNT_ID}-vpc -export REGION=ap-northeast-1 -export AZ=ap-northeast-1a -export # PCLUSTER -export COMPUTE_INSTANCE_TYPE=p4d.24xlarge -export PLACEMENT_GROUP_ID=p4d-placement-group -export CAPACITY_RESERVATION_ID=cr-075894134626f63cb -export COMPUTE_INSTANCE_MIN_COUNT=8 -export COMPUTE_INSTANCE_MAX_COUNT=8 -export SSH_KEY=mlkeita-user-admin-dev-machine \ No newline at end of file diff --git a/3.test_cases/3.MPT/report/.gitignore b/3.test_cases/3.MPT/report/.gitignore deleted file mode 100644 index 8dbd2b01..00000000 --- a/3.test_cases/3.MPT/report/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.nsys-rep \ No newline at end of file diff --git a/3.test_cases/3.MPT/scripts/build-image.sh b/3.test_cases/3.MPT/scripts/build-image.sh deleted file mode 100644 index cbc58219..00000000 --- a/3.test_cases/3.MPT/scripts/build-image.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -. config.env -set_options -run docker build -t llm-foundry . \ No newline at end of file diff --git a/3.test_cases/3.MPT/scripts/deploy-pcluster.sh b/3.test_cases/3.MPT/scripts/deploy-pcluster.sh deleted file mode 100644 index 979fb1c8..00000000 --- a/3.test_cases/3.MPT/scripts/deploy-pcluster.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash -. config.env - -export PRIVATE_SUBNET_ID=$(aws cloudformation describe-stacks --stack-name vpc-${NAME} --region ${REGION} \ - | jq -r '.Stacks[] | .Outputs[] | select(.OutputKey == "PrivateSubnet") | .OutputValue') -export PUBLIC_SUBNET_ID=$(aws cloudformation describe-stacks --stack-name vpc-${NAME} --region ${REGION} \ - | jq -r '.Stacks[] | .Outputs[] | select(.OutputKey == "PublicSubnet") | .OutputValue') -TMPFILE=$(mktemp) -echo ${TMPFILE} -cat ../../1.architectures/2.aws-parallelcluster/distributed-training-clususter-with-container.yaml | envsubst > ${TMPFILE} -set_options -run cat ${TMPFILE} -run pcluster create-cluster --cluster-configuration ${TMPFILE} --cluster-name pcluster-${NAME} --region ${REGION} \ No newline at end of file diff --git a/3.test_cases/3.MPT/scripts/deploy-s3.sh b/3.test_cases/3.MPT/scripts/deploy-s3.sh deleted file mode 100644 index d4b7042d..00000000 --- a/3.test_cases/3.MPT/scripts/deploy-s3.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -. config.env - -set_options -run aws cloudformation create-stack --stack-name s3-${NAME} \ - --template-body file://../../1.architectures/0.s3/0.private-bucket.yaml \ - --parameters ParameterKey=S3BucketName,ParameterValue=${S3_BUCKET_NAME} \ - --region ${REGION} --capabilities=CAPABILITY_IAM \ No newline at end of file diff --git a/3.test_cases/3.MPT/scripts/deploy-vpc.sh b/3.test_cases/3.MPT/scripts/deploy-vpc.sh deleted file mode 100644 index 6b7b212b..00000000 --- a/3.test_cases/3.MPT/scripts/deploy-vpc.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -. config.env - -set_options -run aws cloudformation create-stack --stack-name vpc-${NAME} \ - --template-body file://../../1.architectures/1.vpc_network/2.vpc-one-az.yaml \ - --parameters ParameterKey=VPCName,ParameterValue=${VPC_NAME} ParameterKey=SubnetsAZ,ParameterValue=${AZ} \ - --region ${REGION} --capabilities=CAPABILITY_IAM diff --git a/3.test_cases/3.MPT/scripts/print-headnode-login.sh b/3.test_cases/3.MPT/scripts/print-headnode-login.sh deleted file mode 100644 index c83894ec..00000000 --- a/3.test_cases/3.MPT/scripts/print-headnode-login.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash -. config.env - -set_options -pcluster ssh --region ${REGION} --cluster-name pcluster-${NAME} -i ~/.ssh/${SSH_KEY} --dryrun True \ No newline at end of file diff --git a/3.test_cases/3.MPT/scripts/run-benchmarks.sh b/3.test_cases/3.MPT/scripts/run-benchmarks.sh deleted file mode 100644 index 32d0e47a..00000000 --- a/3.test_cases/3.MPT/scripts/run-benchmarks.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -. config.env -# Prepare dataset -set_options - -export PROFILE_FILE=/report/profile_file -run docker run \ - --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 \ - --rm \ - -i \ - -v $(pwd)/report:/report \ - --name llm-foundry \ - llm-foundry \ - /bin/bash -s <' - -nsys sessions list -python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -# Train an MPT-7B model for 10 batches -nsys profile -w true -t cuda,nvtx,osrt,cudnn,cublas \ - --force-overwrite=true -s cpu --cudabacktrace=true -x true -o ${PROFILE_FILE} composer train/train.py \ - train/yamls/pretrain/mpt-7b.yaml \ - data_local=my-copy-c4 \ - train_loader.dataset.split=train_small \ - eval_loader.dataset.split=val_small \ - model.loss_fn=torch_crossentropy \ - max_duration=3ba \ - eval_interval=0 \ - save_folder=mpt-7b \ - device_train_microbatch_size=8 \ - global_train_batch_size=256 -EOF diff --git a/3.test_cases/3.MPT/scripts/run-container.sh b/3.test_cases/3.MPT/scripts/run-container.sh deleted file mode 100644 index 32695853..00000000 --- a/3.test_cases/3.MPT/scripts/run-container.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -. config.env - -set_options - -run docker run \ - --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 \ - --rm \ - --name llm-foundry \ - -it \ - llm-foundry \ - bash \ No newline at end of file diff --git a/3.test_cases/3.MPT/slurm-scripts/c4-preprocess.sbatch b/3.test_cases/3.MPT/slurm-scripts/1.c4-preprocess.sbatch similarity index 100% rename from 3.test_cases/3.MPT/slurm-scripts/c4-preprocess.sbatch rename to 3.test_cases/3.MPT/slurm-scripts/1.c4-preprocess.sbatch diff --git a/3.test_cases/3.MPT/slurm-scripts/train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch similarity index 100% rename from 3.test_cases/3.MPT/slurm-scripts/train-mpt-manual-distributed.sbatch rename to 3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch diff --git a/3.test_cases/3.MPT/slurm-scripts/.gitignore b/3.test_cases/3.MPT/slurm-scripts/logs/.gitignore similarity index 100% rename from 3.test_cases/3.MPT/slurm-scripts/.gitignore rename to 3.test_cases/3.MPT/slurm-scripts/logs/.gitignore diff --git a/3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh b/3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh deleted file mode 100644 index 13d644c1..00000000 --- a/3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 -# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# https://github.com/pytorch/pytorch/issues/68893 -# export NCCL_SOCKET_IFNAME=ens -# async runtime error ... -export CUDA_DEVICE_MAX_CONNECTIONS=1 - -export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_DEBUG=INFO -export HEAD_NODE_IP=$1 -export NODES=$3 -export RANK=${SLURM_PROCID} -export WORLD_SIZE=1 -export LOCAL_RANK=${SLURM_LOCALID} -export LOCAL_WORLD_SIZE=1 # 1 gpu の検証から -export NODE_RANK=${SLURM_NODEID} -[[ ${NODE_RANK} == 0 ]] && export MASTER_ADDR=0.0.0.0 || export MASTER_ADDR=$(hostname) -export MASTER_PORT=$2 -export PYTHONUNBUFFERED=1 -#python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -nvidia-smi -# VISIBLE_DIVICES does not work -export CUDA_VISIBLE_DEVICES=${LOCAL_RANK} -echo "CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES" -composer \ - --verbose \ - /llm-foundry/scripts/train/train.py \ - /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ - data_local=/fsx/my-copy-c4 \ - train_loader.dataset.split=train_small \ - eval_loader.dataset.split=val_small \ - max_duration=3ba \ - eval_interval=0 \ - save_folder=mpt-7b \ - model.loss_fn=torch_crossentropy \ - device_train_microbatch_size=8 \ - global_train_batch_size=8 -# bash \ -# /apps/reference-architectures/3.test_cases/3.MPT/slurm-scripts/run-composer.sh \ -# ${RANK} ${LOCAL_RANK} ${NODE_RANK} ${WORLD_SIZE} ${LOCAL_WORLD_SIZE} ${MASTER_ADDR} ${MASTER_PORT} -#$ if [ ${NODE_RANK} == 0 ]; then -#$ else -#$ ssh -p 2221 -q $(hostname) \ -#$ bash \ -#$ /apps/reference-architectures/3.test_cases/3.MPT/slurm-scripts/run-composer.sh \ -#$ ${WORLD_SIZE} ${NODE_RANK} ${MASTER_ADDR} ${MASTER_PORT} -#$ fi \ No newline at end of file diff --git a/3.test_cases/3.MPT/slurm-scripts/train-mpt-single-node.sbatch b/3.test_cases/3.MPT/slurm-scripts/train-mpt-single-node.sbatch deleted file mode 100644 index 17fa7ba4..00000000 --- a/3.test_cases/3.MPT/slurm-scripts/train-mpt-single-node.sbatch +++ /dev/null @@ -1,150 +0,0 @@ -#!/bin/bash -#SBATCH --nodes=1 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -#SBATCH --job-name=train-mpt # name of your job -#SBATCH --output=logs/%x_%j.out -#SBATCH --error=logs/%x_%j.err -#SBATCH --ntasks-per-node 1 # Number of GPU per node -#SBATCH --gpus-per-node=8 # Number of GPU per node -#SBATCH --gres=gpu:8 # number of GPU we reserve -#SBATCH --exclusive -#SBATCH --wait-all-nodes=1 - - -# default variables for Enroot -: "${APPS_PATH:=/apps}" -: "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" -: "${DATA_PATH:=/fsx}" -: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" - -## Plenty of EFA level variables -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 -# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# https://github.com/pytorch/pytorch/issues/68893 -#export NCCL_SOCKET_IFNAME=ens -export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_DEBUG=INFO - -export PROFILE_FILE=/report/profile_file - -declare -a ARGS=( - --container-image $IMAGE - --container-mounts ${FSX_MOUNT} -) -NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -NODES_ARRAY=($NODES) -HEAD_NODE=${NODES_ARRAY[0]} -HEAD_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) -srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -srun -l "${ARGS[@]}" composer --verbose /llm-foundry/scripts/train/train.py \ - /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ - data_local=/fsx/my-copy-c4 \ - train_loader.dataset.split=train_small \ - eval_loader.dataset.split=val_small \ - max_duration=3ba \ - eval_interval=0 \ - save_folder=mpt-7b \ - device_train_microbatch_size=8 \ - global_train_batch_size=256 - -# #!/bin/bash -# #SBATCH -N 1 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -# #SBATCH --job-name=train-mpt # name of your job -# #SBATCH --output=logs/%x_%j.out -# #SBATCH --error=logs/%x_%j.err -# #SBATCH --ntasks-per-node 8 # Number of GPU per node -# #SBATCH --gres=gpu:8 # number of GPU we reserve -# #SBATCH --exclusive -# #SBATCH --wait-all-nodes=1 -# -# -# set -euxo pipefail -# # default variables for Enroot -# : "${APPS_PATH:=/apps}" -# : "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" -# : "${DATA_PATH:=/fsx}" -# : "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" -# : "${APPS_MOUNT:=$APPS_PATH:$APPS_PATH}" -# -# ## Plenty of EFA level variables -# export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -# export FI_EFA_FORK_SAFE=1 -# # export NCCL_ALGO=Ring -# export FI_LOG_LEVEL=1 -# export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -# export FI_EFA_ENABLE_SHM_TRANSFER=1 -# # https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# # https://github.com/pytorch/pytorch/issues/68893 -# #export NCCL_SOCKET_IFNAME=ens -# export NCCL_ASYNC_ERROR_HANDLING=1 -# export NCCL_DEBUG=INFO -# -# export PROFILE_FILE=/report/profile_file -# -# declare -a ARGS=( -# --container-image $IMAGE -# --container-mounts ${FSX_MOUNT},${APPS_MOUNT} -# ) -# -# export NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -# export NODES_ARRAY=($NODES) -# export HEAD_NODE=${NODES_ARRAY[0]} -# export HEAD_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) -# export MASTER_PORT=$RANDOM -# # echo "Node ID "$SLURM_NODEID -# # srun -l "${ARGS[@]}" echo "nodeID" $SLURM_NODEID -# srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -# # srun -l "${ARGS[@]}" bash /apps/reference-architectures/3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh ${HEAD_NODE_IP} ${MASTER_PORT} ${NODES} -# -# # This works -# srun -l "${ARGS[@]}" composer /llm-foundry/scripts/train/train.py \ -# /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ -# data_local=/fsx/my-copy-c4 \ -# train_loader.dataset.split=train_small \ -# eval_loader.dataset.split=val_small \ -# max_duration=3ba \ -# eval_interval=0 \ -# save_folder=mpt-7b \ -# device_train_microbatch_size=8 \ -# global_train_batch_size=256 -# -# # srun -l "${ARGS[@]}" composer --nproc 8 \ -# # --world_size 64 \ -# # --base_rank 0 \ -# # --node_rank \ -# # --master_addr $HEAD_NODE_IP \ -# # --master_port $RANDOM \ -# # /llm-foundry/scripts/train/train.py \ -# # /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ -# # data_local=/fsx/my-copy-c4 \ -# # train_loader.dataset.split=train_small \ -# # eval_loader.dataset.split=val_small \ -# # max_duration=3ba \ -# # eval_interval=0 \ -# # save_folder=mpt-7b \ -# # model.loss_fn=torch_crossentropy \ -# # device_train_microbatch_size=8 \ -# # global_train_batch_size=256 -# -# # srun -l "${ARGS[@]}" nsys profile -w true -t cuda,nvtx,osrt,cudnn,cublas \ -# # --force-overwrite=true -s cpu --cudabacktrace=true -x true -o ${PROFILE_FILE} composer \ -# # --nproc 8 \ -# # --world_size 8 \ -# # --base_rank 0 \ -# # --master_addr $HEAD_NODE_IP \ -# # --master_port $RANDOM \ -# # train/train.py \ -# # train/yamls/pretrain/mpt-7b.yaml \ -# # data_local=my-copy-c4 \ -# # train_loader.dataset.split=train_small \ -# # eval_loader.dataset.split=val_small \ -# # max_duration=3ba \ -# # eval_interval=0 \ -# # save_folder=mpt-7b \ -# # device_train_microbatch_size=8 \ -# # global_train_batch_size=256 -# \ No newline at end of file diff --git a/3.test_cases/3.MPT/slurm-scripts/train-mpt.sbatch b/3.test_cases/3.MPT/slurm-scripts/train-mpt.sbatch deleted file mode 100644 index 2c015ea1..00000000 --- a/3.test_cases/3.MPT/slurm-scripts/train-mpt.sbatch +++ /dev/null @@ -1,200 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -#SBATCH --job-name=train-mpt # name of your job -#SBATCH --output=logs/%x_%j.out -#SBATCH --error=logs/%x_%j.err -#SBATCH --ntasks-per-node 1 # Number of GPU per node -#SBATCH --gres=gpu:8 # number of GPU we reserve -#SBATCH --exclusive -#SBATCH --wait-all-nodes=1 - - -set -euxo pipefail -# default variables for Enroot -: "${APPS_PATH:=/apps}" -: "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" -: "${DATA_PATH:=/fsx}" -: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" -: "${APPS_MOUNT:=$APPS_PATH:$APPS_PATH}" - -## Plenty of EFA level variables -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 -# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# https://github.com/pytorch/pytorch/issues/68893 -#export NCCL_SOCKET_IFNAME=ens -export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_DEBUG=INFO - -export PROFILE_FILE=/report/profile_file - -declare -a ARGS=( - --container-image $IMAGE - --container-mounts ${FSX_MOUNT},${APPS_MOUNT} -) - -export NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -export NODES_ARRAY=($NODES) -export HEAD_NODE=${NODES_ARRAY[0]} -export HEAD_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) -export MASTER_PORT=$RANDOM -# echo "Node ID "$SLURM_NODEID -# srun -l "${ARGS[@]}" echo "nodeID" $SLURM_NODEID -srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -srun -l "${ARGS[@]}" bash /apps/reference-architectures/3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh ${HEAD_NODE_IP} ${MASTER_PORT} ${NODES} - - -# #!/bin/bash -# -# #SBATCH -N 1 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -# #SBATCH --job-name=train-mpt # name of your job -# #SBATCH --output=logs/%x_%j.out -# #SBATCH --error=logs/%x_%j.err -# #SBATCH --ntasks-per-node 8 # Number of GPU per node -# #SBATCH --gres=gpu:8 # number of GPU we reserve -# #SBATCH --exclusive -# #SBATCH --wait-all-nodes=1 -# -# -# # default variables for Enroot -# : "${APPS_PATH:=/apps}" -# : "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" -# : "${DATA_PATH:=/fsx}" -# : "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" -# -# ## Plenty of EFA level variables -# export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -# export FI_EFA_FORK_SAFE=1 -# # export NCCL_ALGO=Ring -# export FI_LOG_LEVEL=1 -# export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -# export FI_EFA_ENABLE_SHM_TRANSFER=1 -# # https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# # https://github.com/pytorch/pytorch/issues/68893 -# #export NCCL_SOCKET_IFNAME=ens -# export NCCL_ASYNC_ERROR_HANDLING=1 -# export NCCL_DEBUG=INFO -# -# export PROFILE_FILE=/report/profile_file -# -# declare -a ARGS=( -# --container-image $IMAGE -# --container-mounts ${FSX_MOUNT} -# ) -# NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -# NODES_ARRAY=($NODES) -# HEAD_NODE=${NODES_ARRAY[0]} -# HEAD_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) -# srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -# srun -l "${ARGS[@]}" composer --verbose /llm-foundry/scripts/train/train.py \ -# /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ -# data_local=/fsx/my-copy-c4 \ -# train_loader.dataset.split=train_small \ -# eval_loader.dataset.split=val_small \ -# max_duration=3ba \ -# eval_interval=0 \ -# save_folder=mpt-7b \ -# device_train_microbatch_size=8 \ -# global_train_batch_size=256 - -# #!/bin/bash -# #SBATCH -N 1 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -# #SBATCH --job-name=train-mpt # name of your job -# #SBATCH --output=logs/%x_%j.out -# #SBATCH --error=logs/%x_%j.err -# #SBATCH --ntasks-per-node 8 # Number of GPU per node -# #SBATCH --gres=gpu:8 # number of GPU we reserve -# #SBATCH --exclusive -# #SBATCH --wait-all-nodes=1 -# -# -# set -euxo pipefail -# # default variables for Enroot -# : "${APPS_PATH:=/apps}" -# : "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" -# : "${DATA_PATH:=/fsx}" -# : "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" -# : "${APPS_MOUNT:=$APPS_PATH:$APPS_PATH}" -# -# ## Plenty of EFA level variables -# export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -# export FI_EFA_FORK_SAFE=1 -# # export NCCL_ALGO=Ring -# export FI_LOG_LEVEL=1 -# export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -# export FI_EFA_ENABLE_SHM_TRANSFER=1 -# # https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# # https://github.com/pytorch/pytorch/issues/68893 -# #export NCCL_SOCKET_IFNAME=ens -# export NCCL_ASYNC_ERROR_HANDLING=1 -# export NCCL_DEBUG=INFO -# -# export PROFILE_FILE=/report/profile_file -# -# declare -a ARGS=( -# --container-image $IMAGE -# --container-mounts ${FSX_MOUNT},${APPS_MOUNT} -# ) -# -# export NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -# export NODES_ARRAY=($NODES) -# export HEAD_NODE=${NODES_ARRAY[0]} -# export HEAD_NODE_IP=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) -# export MASTER_PORT=$RANDOM -# # echo "Node ID "$SLURM_NODEID -# # srun -l "${ARGS[@]}" echo "nodeID" $SLURM_NODEID -# srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -# # srun -l "${ARGS[@]}" bash /apps/reference-architectures/3.test_cases/3.MPT/slurm-scripts/run-distributed-training.sh ${HEAD_NODE_IP} ${MASTER_PORT} ${NODES} -# -# # This works -# srun -l "${ARGS[@]}" composer /llm-foundry/scripts/train/train.py \ -# /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ -# data_local=/fsx/my-copy-c4 \ -# train_loader.dataset.split=train_small \ -# eval_loader.dataset.split=val_small \ -# max_duration=3ba \ -# eval_interval=0 \ -# save_folder=mpt-7b \ -# device_train_microbatch_size=8 \ -# global_train_batch_size=256 -# -# # srun -l "${ARGS[@]}" composer --nproc 8 \ -# # --world_size 64 \ -# # --base_rank 0 \ -# # --node_rank \ -# # --master_addr $HEAD_NODE_IP \ -# # --master_port $RANDOM \ -# # /llm-foundry/scripts/train/train.py \ -# # /llm-foundry/scripts/train/yamls/pretrain/mpt-7b.yaml \ -# # data_local=/fsx/my-copy-c4 \ -# # train_loader.dataset.split=train_small \ -# # eval_loader.dataset.split=val_small \ -# # max_duration=3ba \ -# # eval_interval=0 \ -# # save_folder=mpt-7b \ -# # model.loss_fn=torch_crossentropy \ -# # device_train_microbatch_size=8 \ -# # global_train_batch_size=256 -# -# # srun -l "${ARGS[@]}" nsys profile -w true -t cuda,nvtx,osrt,cudnn,cublas \ -# # --force-overwrite=true -s cpu --cudabacktrace=true -x true -o ${PROFILE_FILE} composer \ -# # --nproc 8 \ -# # --world_size 8 \ -# # --base_rank 0 \ -# # --master_addr $HEAD_NODE_IP \ -# # --master_port $RANDOM \ -# # train/train.py \ -# # train/yamls/pretrain/mpt-7b.yaml \ -# # data_local=my-copy-c4 \ -# # train_loader.dataset.split=train_small \ -# # eval_loader.dataset.split=val_small \ -# # max_duration=3ba \ -# # eval_interval=0 \ -# # save_folder=mpt-7b \ -# # device_train_microbatch_size=8 \ -# # global_train_batch_size=256 -# \ No newline at end of file From d6569f2ba08e3bbfa0c21d4a2ded2f1e899ba6b5 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 26 Sep 2023 18:59:15 -0500 Subject: [PATCH 109/648] Remove local gitignore in favor of global for MPT --- .gitignore | 4 ++++ 3.test_cases/3.MPT/slurm-scripts/logs/.gitignore | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) delete mode 100644 3.test_cases/3.MPT/slurm-scripts/logs/.gitignore diff --git a/.gitignore b/.gitignore index ba3705fa..53a44a27 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,7 @@ spark-warehouse *.h5 *.tfevents.* *.venv* + +# slurm outputs +*.out +*.err diff --git a/3.test_cases/3.MPT/slurm-scripts/logs/.gitignore b/3.test_cases/3.MPT/slurm-scripts/logs/.gitignore deleted file mode 100644 index 951d812a..00000000 --- a/3.test_cases/3.MPT/slurm-scripts/logs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.out -*.err \ No newline at end of file From d97d398887199726c49cfbae0bedd4053b85083b Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 27 Sep 2023 16:11:46 +0800 Subject: [PATCH 110/648] Training cluster defaults to exclusive mode --- .../2.aws-parallelcluster/distributed-training-p4de-base.yaml | 1 + .../distributed-training-p4de_batch-inference-g5_custom_ami.yaml | 1 + .../distributed-training-p4de_custom_ami.yaml | 1 + .../distributed-training-p4de_postinstall_scripts.yaml | 1 + .../distributed-training-trn1_custom_ami.yaml | 1 + 5 files changed, 5 insertions(+) diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index cceecd7b..ad01a75a 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -51,6 +51,7 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance SharedStorage: - MountDir: /fsx Name: fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index 7d7d9e36..b6630621 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -55,6 +55,7 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance - Name: inference-gpu CapacityType: ONDEMAND Networking: diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index 3674df7d..031aaf29 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -52,6 +52,7 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance SharedStorage: - MountDir: /fsx Name: fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index ed5a5cec..34a9089e 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -56,6 +56,7 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance CustomActions: OnNodeConfigured: Sequence: diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index 91df40cf..c99ac4d9 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -52,6 +52,7 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true + JobExclusiveAllocation: true # GenAI training likes to gobble all accelerators in an instance SharedStorage: - MountDir: /fsx Name: fsx From 271300a07b1638216386ef63c91378d2469f39f4 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 27 Sep 2023 12:54:56 +0000 Subject: [PATCH 111/648] Add nemo-launcher gpt3-175b --- .../4.bmk-pretrain-gpt3-175b.sh | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100755 3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh diff --git a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh new file mode 100755 index 00000000..58ac2c4d --- /dev/null +++ b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train gpt3-175b on 16 nodes for 5 steps. Number of nodes must be multiplies of 4. +export MODEL=gpt3 +export MODEL_SIZE=175b +export NUM_NODES=16 +export RUNTIME=4h +export MAX_STEPS=5 +export MBS=2 +declare -a MODEL_ARGS=( + training.model.micro_batch_size=${MBS} + + # Activation checkpointing + training.model.activations_checkpoint_granularity='full' + training.model.activations_checkpoint_method='block' + training.model.activations_checkpoint_num_layers=1 + + # Not applicable for A100 + training.model.transformer_engine=False + training.model.ub_tp_comm_overlap=False +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${UNIQUE_OUTPUT_DIR:=0}" +if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" From 2469cf680096e29ecf230dd6c0cd3bc5be82b794 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 08:43:24 -0500 Subject: [PATCH 112/648] Add documentation to NemoMegatron case --- 3.test_cases/2.nemo-launcher/README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index de484c49..fcf781c9 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -1,4 +1,6 @@ -# Nemo Megatron on Slurm +# Train GPT3 NemoMegatron on Slurm + +This project provides a guide to run [NemoMegatron](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/megatron.html) on AWS using a container from Nvidia GPU Cloud (NGC). The test cases in this case cover NemoMegatron for different model sizes: [126M](1.bmk-pretrain-gpt3-126m.sh), [5B](1.bmk-pretrain-gpt3-5b.sh), [40B](1.bmk-pretrain-gpt3-40b.sh) and [175B](1.bmk-pretrain-gpt3-175b.sh) parameters. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. Table of contents: @@ -215,3 +217,14 @@ training.trainer.num_nodes=$NUM_NODES | └── key 'trainer -> num_nodes' in the `.yaml` file. ``` + + +## 8. References + +- Nvidia NemoMegatron Documentation: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/megatron.html +- Train Large Scale NLP with Nemo Megatron from Nvidia: https://docs.nvidia.com/launchpad/ai/base-command-nemo/latest/index.html + +## Authors / Reviewers + +- [A] Verdi March - marcverd@ +- [R] Pierre-Yves Aquilanti - pierreya@ From a5807542c972803eb734602d04aaab4d87b771ec Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 08:45:48 -0500 Subject: [PATCH 113/648] Fix contributors section in main readme --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 48aea13b..627cbcd2 100644 --- a/README.md +++ b/README.md @@ -51,10 +51,6 @@ Utilities scripts and micro-benchmarks examples are set under `4.validation_scri Thanks to all the contributors for building, reviewing and testing. -## 4. Contributors - -Thanks to all the contributors for building, reviewing and testing. - - Pierre-Yves Aquilanti - pierreya@ - Verdi March - marcverd@ - Uros Lipovsek - lipovsek@ From 9db3b7d14bc7aef473c60adc4e3373f54bee693c Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 22:49:01 +0900 Subject: [PATCH 114/648] drop support for older GPUs --- 3.test_cases/3.MPT/0.Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/3.MPT/0.Dockerfile b/3.test_cases/3.MPT/0.Dockerfile index 12f18215..6a2a3e4f 100644 --- a/3.test_cases/3.MPT/0.Dockerfile +++ b/3.test_cases/3.MPT/0.Dockerfile @@ -56,7 +56,7 @@ RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ && cd /opt/nccl \ && git checkout -b ${NCCL_VERSION} \ && make -j src.build CUDA_HOME=/usr/local/cuda \ - NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80" ################################################### ## Install AWS-OFI-NCCL plugin @@ -82,7 +82,7 @@ RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ MPI_HOME=/opt/amazon/openmpi/ \ CUDA_HOME=/usr/local/cuda \ NCCL_HOME=/opt/nccl/build \ - NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80" RUN rm -rf /var/lib/apt/lists/* ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so From 1af7ed3889d1e918fc057ce2ddf9bdebc8632c48 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 27 Sep 2023 21:53:02 +0800 Subject: [PATCH 115/648] Nitpicking: white spaces and eol --- 3.test_cases/3.MPT/0.Dockerfile | 8 ++++---- 3.test_cases/3.MPT/README.md | 20 +++++++++---------- .../2.train-mpt-manual-distributed.sbatch | 4 ++-- .../3.MPT/slurm-scripts/run-composer.sh | 2 +- 4 files changed, 16 insertions(+), 18 deletions(-) diff --git a/3.test_cases/3.MPT/0.Dockerfile b/3.test_cases/3.MPT/0.Dockerfile index 6a2a3e4f..58a90b6b 100644 --- a/3.test_cases/3.MPT/0.Dockerfile +++ b/3.test_cases/3.MPT/0.Dockerfile @@ -9,7 +9,7 @@ ARG OPEN_MPI_PATH=/opt/amazon/openmpi RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 + libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 RUN rm -rf /opt/hpcx \ && rm -rf /usr/local/mpi \ && rm -rf /usr/local/ucx \ @@ -90,8 +90,8 @@ ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf -RUN pip3 install awscli -RUN pip3 install pynvml +RUN pip3 install awscli +RUN pip3 install pynvml RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ @@ -109,4 +109,4 @@ RUN cd llm-foundry \ && pip install xformers nvtx 'flash-attn==v1.0.3.post0' RUN wget https://developer.download.nvidia.com/devtools/nsight-systems/NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb RUN apt-get install -y libglib2.0-0 \ - && dpkg -i NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb \ No newline at end of file + && dpkg -i NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index aa945dd2..2e11a514 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -1,6 +1,6 @@ # Mosaic Pretraind Transformers (MPT) Test Case -MPT are GPT-style models in llm-foundry with some special features -- Flash Attention for efficiency, ALiBi for context length extrapolation, and stability improvements to mitigate loss spikes (https://github.com/mosaicml/llm-foundry/tree/main). +MPT are GPT-style models in llm-foundry with some special features -- Flash Attention for efficiency, ALiBi for context length extrapolation, and stability improvements to mitigate loss spikes (). This subdirectory contains: * AWS optimized llm-foundary container image @@ -10,13 +10,12 @@ This subdirectory contains: This guide assumes that you have the following: -- A functional Slurm cluster on AWS. -- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. -- An FSx for Lustre filesystem mounted on `/fsx`. +* A functional Slurm cluster on AWS. +* Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +* An FSx for Lustre filesystem mounted on `/fsx`. It is recommended that you use the templates in the architectures [directory](../../1.architectures) - ## 1. Data Preprocessing Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data ans tokenize it. @@ -43,6 +42,7 @@ Below are the steps you need to follow: ```bash enroot import -o /apps/llm-foundry.sqsh dockerd://llm-foundry:latest ``` + The file will be stored in the `/apps` directory. The output should look as below. ```bash @@ -65,7 +65,7 @@ Below are the steps you need to follow: It will take roughly 5 minutes. -5. Go to `slurm-scripts` subdirectory and then run `1.c4-preprocess.sbatch` script +5. Go to `slurm-scripts` subdirectory and then run `1.c4-preprocess.sbatch` script ```bash sbatch 1.c4-preprocess.sbatch @@ -73,7 +73,7 @@ Below are the steps you need to follow: It will create the streaming dataset for composer library using C4 dataset. -7. You will see a new file in your current working directory called `c4-preprocess-XY.out` where `XY` is a number. This is your outputfile and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f slurm-XY.out` but with the relevant filename. +7. You will see a new file in your current working directory called `c4-preprocess-XY.out` where `XY` is a number. This is your outputfile and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f slurm-XY.out` but with the relevant filename. Once the job completed, you will see the following data under `/fsx/my-c4-copy`. @@ -98,15 +98,14 @@ Once the job completed, you will see the following data under `/fsx/my-c4-copy`. Now that the data is preprocessed, we will pretrain a MPT model with composer. - 1. Go to `slurm-scripts` subdirectory. 2. Run the training script as follows: + ```bash sbatch 2.train-mpt-manual-distributed.sbatch ``` - -3. The training starts running and should produce an output similar to below if successful. +3. The training starts running and should produce an output similar to below if successful. ``` ... @@ -131,4 +130,3 @@ Now that the data is preprocessed, we will pretrain a MPT model with composer. 0: Train time/val: 0.0000 ... ``` - diff --git a/3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch index 50d6ae70..61f8c7e9 100644 --- a/3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch @@ -60,7 +60,7 @@ function run_compose() { } NODE_RANK=1 for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) -do +do NODE=${NODES[$NODE_RANK]} echo "Run compute node ${NODE} for rank: ${NODE_RANK}" run_compose & @@ -69,4 +69,4 @@ NODE_RANK=0 NODE=${HEAD_NODE} echo "Run master node ${NODE} for rank: ${NODE_RANK}" run_compose -wait \ No newline at end of file +wait diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index 17c68f27..6fbb5e7e 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -52,4 +52,4 @@ nsys profile --duration 300 --delay 30 --force-overwrite=true -w true -t cuda,n max_duration=3ba \ eval_interval=0 \ save_folder=${MODEL} -echo "Elapsed Time: $(($end-$start)) seconds" \ No newline at end of file +echo "Elapsed Time: $(($end-$start)) seconds" From 3b949142672bace410f8c69355f745300cff61a8 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 23:16:34 +0900 Subject: [PATCH 116/648] add end --- 3.test_cases/3.MPT/slurm-scripts/run-composer.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index 6fbb5e7e..87930404 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -52,4 +52,5 @@ nsys profile --duration 300 --delay 30 --force-overwrite=true -w true -t cuda,n max_duration=3ba \ eval_interval=0 \ save_folder=${MODEL} +end=$(date +%s) echo "Elapsed Time: $(($end-$start)) seconds" From 9bf0baf7b7c826b8c67e8a46830038c6ed905a5d Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 23:17:28 +0900 Subject: [PATCH 117/648] update to SECONDS --- 3.test_cases/3.MPT/slurm-scripts/run-composer.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index 87930404..06154eb3 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -37,7 +37,7 @@ PROFILE_FILE=/fsx/logs/profile mkdir -p /fsx/logs MODEL=mpt-7b PROFILER="nsys profile --force-overwrite=true -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu --capture-range=cudaProfilerApi --cudabacktrace=true -x true -o /fsx/logs/profile-mpt30b-rank-${NODE_RANK}" -start=$(date +%s) +start=${SECONDS} nsys profile --duration 300 --delay 30 --force-overwrite=true -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu -x true -o /fsx/logs/${MODEL}-p4d-worldsize-${WORLD_SIZE}-rank-${NODE_RANK}.profile composer \ --world_size ${WORLD_SIZE} \ --nproc ${N_PROC} \ @@ -52,5 +52,5 @@ nsys profile --duration 300 --delay 30 --force-overwrite=true -w true -t cuda,n max_duration=3ba \ eval_interval=0 \ save_folder=${MODEL} -end=$(date +%s) +end=${SECONDS} echo "Elapsed Time: $(($end-$start)) seconds" From 3ac2c430b4330012de10a8f2188fd32e0d37ee25 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 09:18:36 -0500 Subject: [PATCH 118/648] Update documentation and rename dockerfile. --- ...{0.Dockerfile => 0.llm-foundry.Dockerfile} | 0 3.test_cases/3.MPT/README.md | 27 ++++++++++--------- 2 files changed, 15 insertions(+), 12 deletions(-) rename 3.test_cases/3.MPT/{0.Dockerfile => 0.llm-foundry.Dockerfile} (100%) diff --git a/3.test_cases/3.MPT/0.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile similarity index 100% rename from 3.test_cases/3.MPT/0.Dockerfile rename to 3.test_cases/3.MPT/0.llm-foundry.Dockerfile diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 2e11a514..9c96495e 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -1,32 +1,34 @@ # Mosaic Pretraind Transformers (MPT) Test Case -MPT are GPT-style models in llm-foundry with some special features -- Flash Attention for efficiency, ALiBi for context length extrapolation, and stability improvements to mitigate loss spikes (). -This subdirectory contains: +MPT are GPT-style models in [llm-foundry](https://github.com/mosaicml/llm-foundry/tree/main) with some special features -- [Flash Attention](https://arxiv.org/abs/2205.14135) for efficiency, [ALiBi](https://arxiv.org/abs/2108.12409) for context length extrapolation, and stability improvements to mitigate loss spikes. -* AWS optimized llm-foundary container image -* Slurm scripts for c4 dataset preparation and multi-node training +This project contains: + +* AWS optimized [llm-foundry](https://github.com/mosaicml/llm-foundry/tree/main) container image. +* Slurm scripts for the [c4 dataset](https://huggingface.co/datasets/c4) preparation and multi-node distributed training. ## 0. Preparation This guide assumes that you have the following: -* A functional Slurm cluster on AWS. -* Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. -* An FSx for Lustre filesystem mounted on `/fsx`. +- A functional Slurm cluster on AWS. +- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +- An FSx for Lustre filesystem mounted on `/fsx`. It is recommended that you use the templates in the architectures [directory](../../1.architectures) + ## 1. Data Preprocessing -Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data ans tokenize it. +Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data and tokenize it. Below are the steps you need to follow: 1. Clone this repository along with submodule. -2. Build the container image with the command below in this directory +2. Build the container image with the command below in this directory. ```bash - docker build -t llm-foundry -f 0.Dockerfile . + docker build -t llm-foundry -f 0.llm-foundry.Dockerfile . ``` 3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: @@ -42,7 +44,6 @@ Below are the steps you need to follow: ```bash enroot import -o /apps/llm-foundry.sqsh dockerd://llm-foundry:latest ``` - The file will be stored in the `/apps` directory. The output should look as below. ```bash @@ -98,15 +99,16 @@ Once the job completed, you will see the following data under `/fsx/my-c4-copy`. Now that the data is preprocessed, we will pretrain a MPT model with composer. + 1. Go to `slurm-scripts` subdirectory. 2. Run the training script as follows: - ```bash sbatch 2.train-mpt-manual-distributed.sbatch ``` 3. The training starts running and should produce an output similar to below if successful. + ``` ... 0: [batch=1/300000000]: @@ -130,3 +132,4 @@ Now that the data is preprocessed, we will pretrain a MPT model with composer. 0: Train time/val: 0.0000 ... ``` + From 5bf79ba7f35cc9955cc4fb0427ff02affb9be23e Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 23:20:33 +0900 Subject: [PATCH 119/648] add pmix dependencies --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index 58a90b6b..a59988cb 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -9,7 +9,8 @@ ARG OPEN_MPI_PATH=/opt/amazon/openmpi RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 + libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 \ + libpmix-dev libpmix2 RUN rm -rf /opt/hpcx \ && rm -rf /usr/local/mpi \ && rm -rf /usr/local/ucx \ From 70dfc75a2543ad4532cda02deb79830ee99f9289 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 23:23:10 +0900 Subject: [PATCH 120/648] remove redundant pip --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index a59988cb..d4761705 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -9,8 +9,7 @@ ARG OPEN_MPI_PATH=/opt/amazon/openmpi RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 \ - libpmix-dev libpmix2 + libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 RUN rm -rf /opt/hpcx \ && rm -rf /usr/local/mpi \ && rm -rf /usr/local/ucx \ @@ -34,6 +33,8 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ cmake \ apt-utils \ libhwloc-dev \ + libpmix-dev \ + libpmix2 \ aptitude && \ DEBIAN_FRONTEND=noninteractive apt autoremove -y @@ -91,9 +92,6 @@ ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf -RUN pip3 install awscli -RUN pip3 install pynvml - RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ && echo '/opt/amazon/openmpi/bin/mpirun.real "$@"' >> $OPEN_MPI_PATH/bin/mpirun \ From cde7893dfd3a3ecf3334725f62eef5f2fe498b82 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 09:47:01 -0500 Subject: [PATCH 121/648] Change apt to apt-get in MPT dockerfile --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index d4761705..4675f27e 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -36,7 +36,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ libpmix-dev \ libpmix2 \ aptitude && \ - DEBIAN_FRONTEND=noninteractive apt autoremove -y + DEBIAN_FRONTEND=noninteractive apt-get autoremove -y ENV LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:/usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:$LD_LIBRARY_PATH ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH From d80c33e34a5a7201624cb542f157ef19f903f8e2 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 15:41:52 -0500 Subject: [PATCH 122/648] Set versions for MPT dockerfile EFA packages --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index 4675f27e..9a0a3fc9 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -1,11 +1,11 @@ FROM mosaicml/pytorch:2.0.1_cu118-python3.10-ubuntu20.04 ARG EFA_INSTALLER_VERSION=1.26.1 -ARG AWS_OFI_NCCL_VERSION=master +ARG AWS_OFI_NCCL_VERSION=1.7.2-aws ARG NCCL_TESTS_VERSION=master ARG NCCL_VERSION=v2.12.7-1 +ARG LLM_FOUNDRY_VERSION=0.2.0 ARG OPEN_MPI_PATH=/opt/amazon/openmpi - RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ @@ -73,7 +73,7 @@ RUN export OPAL_PREFIX="" \ --with-cuda=/usr/local/cuda \ --with-nccl=/opt/nccl/build \ --with-mpi=/opt/amazon/openmpi/ \ - && make && make install + && make -j && make install ################################################### ## Install NCCL-tests @@ -101,11 +101,16 @@ RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ # Transformers dependencies used in the model ###################### - -COPY llm-foundry llm-foundry -RUN cd llm-foundry \ +RUN git clone https://github.com/mosaicml/llm-foundry.git llm-foundry \ + && cd llm-foundry \ + && git checkout $LLM_FOUNDRY_VERSION \ && pip install -e ".[gpu]" \ && pip install xformers nvtx 'flash-attn==v1.0.3.post0' + +###################### +# Install Nsight Systems +###################### + RUN wget https://developer.download.nvidia.com/devtools/nsight-systems/NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb RUN apt-get install -y libglib2.0-0 \ && dpkg -i NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb From da669ea345c541723d9e4f749cca97eaa86ffb5b Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 15:42:40 -0500 Subject: [PATCH 123/648] Update MPT for environment variables instead of fixed path --- 3.test_cases/3.MPT/README.md | 49 +++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 9c96495e..18f86d41 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -15,16 +15,24 @@ This guide assumes that you have the following: - Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. - An FSx for Lustre filesystem mounted on `/fsx`. -It is recommended that you use the templates in the architectures [directory](../../1.architectures) - +It is recommended that you use the templates in the architectures [directory](../../1.architectures). You will need to setup the following environment variables before running the scripts: + +```bash +export APPS_PATH=/apps +export ENROOT_IMAGE=$APPS_PATH/llm-foundry.sqsh +export FSX_PATH=/fsx +export DATA_PATH=$FSX_PATH/c4-dataset +export TEST_CASE_PATH=${HOME}/3.MPT # where you copy the test case or set to your test case path +cd $TEST_CASE_PATH +``` -## 1. Data Preprocessing +## 1. Build the container -Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data and tokenize it. +Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot). Below are the steps you need to follow: -1. Clone this repository along with submodule. +1. Copy the test case files to your cluster. You will need `0.llm-foundry.Dockerfile`, 2. Build the container image with the command below in this directory. ```bash @@ -42,9 +50,9 @@ Below are the steps you need to follow: 4. Create the squash file with the command below. ```bash - enroot import -o /apps/llm-foundry.sqsh dockerd://llm-foundry:latest + enroot import -o ${ENROOT_IMAGE} dockerd://llm-foundry:latest ``` - The file will be stored in the `/apps` directory. The output should look as below. + The file will be stored in the `/apps` directory (default). The output should look as below. ```bash [INFO] Fetching image @@ -64,22 +72,33 @@ Below are the steps you need to follow: ... ``` - It will take roughly 5 minutes. -5. Go to `slurm-scripts` subdirectory and then run `1.c4-preprocess.sbatch` script +It will take around 5 minutes to convert the container image from Docker to the Enroot format. Once done proceed to the next stage. + +## 2. Run the processing job +Before running training jobs you need to retrieve input data and preprocess it before running the training job. + +1. Run a preprocessing job by submitting the script `1.c4-preprocess.sbatch` to Slurm. The command will return the Slurm Job ID. You can use `squeue` to consult the status of your jobs. ```bash sbatch 1.c4-preprocess.sbatch ``` + It will create the streaming dataset for composer library using C4 dataset in `/fsx/c4-dataset` (default). -It will create the streaming dataset for composer library using C4 dataset. - -7. You will see a new file in your current working directory called `c4-preprocess-XY.out` where `XY` is a number. This is your outputfile and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f slurm-XY.out` but with the relevant filename. - -Once the job completed, you will see the following data under `/fsx/my-c4-copy`. +2. You see a new file in your current working directory called `c4-preprocess_XY.out` where `XY` corresponds the Slurm job ID. This is your output file and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f c4-preprocess_XY.out` with the correct job ID instead of `XY`. If running successfully, the job will generate an output similar to the except below. + ```console + Downloading (…)okenizer_config.json: 100%|██████████| 156/156 [00:00<00:00, 1.09MB/s] + ... + Downloading metadata: 100%|██████████| 2.40M/2.40M [00:01<00:00, 2.05MB/s] + ... + train_small: 32%|███▏ | 31745/100000 [01:51<00:19, 3538.83it/s] + ... + val_small: 100%|██████████| 10000/10000 [00:19<00:00, 514.19it/s] + ``` +3. After the job completed, check `/fsx/c4-dataset` (default) which will contain a structure similar as below ```bash - /fsx/my-copy-c4/ + /fsx/c4-dataset/ ├── train_small │ ├── index.json │ ├── shard.00000.mds From c8326102e1f271d99678a6ae8cd25d125cf41efc Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 15:43:15 -0500 Subject: [PATCH 124/648] Change MPT preprocessing sbatch file name --- 3.test_cases/3.MPT/1.c4-preprocess.sbatch | 24 +++++++++++ .../slurm-scripts/1.c4-preprocess.sbatch | 42 ------------------- 2 files changed, 24 insertions(+), 42 deletions(-) create mode 100644 3.test_cases/3.MPT/1.c4-preprocess.sbatch delete mode 100644 3.test_cases/3.MPT/slurm-scripts/1.c4-preprocess.sbatch diff --git a/3.test_cases/3.MPT/1.c4-preprocess.sbatch b/3.test_cases/3.MPT/1.c4-preprocess.sbatch new file mode 100644 index 00000000..1cff351b --- /dev/null +++ b/3.test_cases/3.MPT/1.c4-preprocess.sbatch @@ -0,0 +1,24 @@ +#!/bin/bash + +#SBATCH -N 1 # number of nodes to use +#SBATCH --job-name=c4-preprocess # name of your job +#SBATCH --output=%x_%j.out # stdout output +#SBATCH --ntasks-per-node 8 # Number of processes per node +#SBATCH --exclusive + +# default variables for Enroot, if these variables are defined then use them +: "${APPS_PATH:=/apps}" +: "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" +: "${FSX_PATH:=/fsx}" +: "${DATA_PATH:=$FSX_PATH/c4-dataset}" +: "${CONTAINER_MOUNT:=$FSX_PATH:$FSX_PATH}" + +declare -a ARGS=( + --container-image $IMAGE + --container-mounts $CONTAINER_MOUNT +) + +srun -l "${ARGS[@]}" python /llm-foundry/scripts/data_prep/convert_dataset_hf.py \ + --dataset c4 --data_subset en \ + --out_root ${DATA_PATH} --splits train_small val_small \ + --concat_tokens 2048 --tokenizer EleutherAI/gpt-neox-20b --eos_text '<|endoftext|>' diff --git a/3.test_cases/3.MPT/slurm-scripts/1.c4-preprocess.sbatch b/3.test_cases/3.MPT/slurm-scripts/1.c4-preprocess.sbatch deleted file mode 100644 index d2e6a7cb..00000000 --- a/3.test_cases/3.MPT/slurm-scripts/1.c4-preprocess.sbatch +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -#SBATCH -N 1 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -#SBATCH --job-name=data-preprocess # name of your job -#SBATCH --output=logs/%x_%j.out -#SBATCH --error=logs/%x_%j.err -#SBATCH --ntasks-per-node 8 # Number of GPU per node -#SBATCH --gres=gpu:8 # number of GPU we reserve -#SBATCH --exclusive -#SBATCH --wait-all-nodes=1 - - -# default variables for Enroot -: "${APPS_PATH:=/apps}" -: "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" -: "${DATA_PATH:=/fsx}" -: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" - -## Plenty of EFA level variables -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 -# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# https://github.com/pytorch/pytorch/issues/68893 -#export NCCL_SOCKET_IFNAME=ens -export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_DEBUG=INFO - - -declare -a ARGS=( - --container-image $IMAGE - --container-mounts $FSX_MOUNT -) - - -srun -l "${ARGS[@]}" python /llm-foundry/scripts/data_prep/convert_dataset_hf.py \ - --dataset c4 --data_subset en \ - --out_root /fsx/my-copy-c4 --splits train_small val_small \ - --concat_tokens 2048 --tokenizer EleutherAI/gpt-neox-20b --eos_text '<|endoftext|>' From f6655696085e913858fb1f87b3133536c8544d67 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 16:05:14 -0500 Subject: [PATCH 125/648] Add authors and reviewers to MPT test case --- 3.test_cases/3.MPT/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 18f86d41..792e70f3 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -152,3 +152,8 @@ Now that the data is preprocessed, we will pretrain a MPT model with composer. ... ``` +## Authors / Reviewers + +- [A] Keita Watanabe - mlkeita@ +- [R] Pierre-Yves Aquilanti - pierreya@ +- [R] Verdi March - marcverd@ From e3110c618c63e4d8059c63a4caa343dd1f9d4049 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Wed, 27 Sep 2023 16:17:58 -0500 Subject: [PATCH 126/648] Update MPT training file location and start updating readme --- .../2.train-mpt-manual-distributed.sbatch | 0 3.test_cases/3.MPT/README.md | 13 ++++++------- 3.test_cases/3.MPT/slurm-scripts/run-composer.sh | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) rename 3.test_cases/3.MPT/{slurm-scripts => }/2.train-mpt-manual-distributed.sbatch (100%) diff --git a/3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch similarity index 100% rename from 3.test_cases/3.MPT/slurm-scripts/2.train-mpt-manual-distributed.sbatch rename to 3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 792e70f3..c47e9333 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -114,21 +114,20 @@ Before running training jobs you need to retrieve input data and preprocess it b └── shard.00002.mds ``` -## 2. Distributed training +Preprocessing is done, you will run a training job in the next stage. -Now that the data is preprocessed, we will pretrain a MPT model with composer. +## 3. Distributed training of MPT +Now that the data is preprocessed, we will pretrain a MPT model with composer. -1. Go to `slurm-scripts` subdirectory. -2. Run the training script as follows: +1. Run the training job by submitting the script `2.train-mpt-manual-distributed.sbatch` to Slurm via `sbatch` as shown below. ```bash sbatch 2.train-mpt-manual-distributed.sbatch ``` -3. The training starts running and should produce an output similar to below if successful. +2. The training starts running and should produce an output similar to below if successful. - -``` +```console ... 0: [batch=1/300000000]: 0: Train time/epoch: 0 diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index 06154eb3..2f875c38 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -46,7 +46,7 @@ nsys profile --duration 300 --delay 30 --force-overwrite=true -w true -t cuda,n --master_port ${MASTER_PORT} \ --verbose /apps/reference-architectures/3.test_cases/3.MPT/llm-foundry/scripts/train/train.py \ /llm-foundry/scripts/train/yamls/pretrain/${MODEL}.yaml \ - data_local=/fsx/my-copy-c4 \ + data_local=/fsx/c4-dataset \ train_loader.dataset.split=train_small \ eval_loader.dataset.split=val_small \ max_duration=3ba \ From 5796b19a01075957599dcb85524fa77840102350 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 23:59:38 +0000 Subject: [PATCH 127/648] Merge branch 'p5-experiments' of ssh.gitlab.aws.dev:smml/reference-architectures into p5-experiments --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 7 ------- 3.test_cases/3.MPT/slurm-scripts/run-composer.sh | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index 9a0a3fc9..bcbbb274 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -107,10 +107,3 @@ RUN git clone https://github.com/mosaicml/llm-foundry.git llm-foundry \ && pip install -e ".[gpu]" \ && pip install xformers nvtx 'flash-attn==v1.0.3.post0' -###################### -# Install Nsight Systems -###################### - -RUN wget https://developer.download.nvidia.com/devtools/nsight-systems/NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb -RUN apt-get install -y libglib2.0-0 \ - && dpkg -i NsightSystems-linux-cli-public-2023.2.1.122-3259852.deb diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index 2f875c38..fac427c0 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -38,7 +38,7 @@ mkdir -p /fsx/logs MODEL=mpt-7b PROFILER="nsys profile --force-overwrite=true -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu --capture-range=cudaProfilerApi --cudabacktrace=true -x true -o /fsx/logs/profile-mpt30b-rank-${NODE_RANK}" start=${SECONDS} -nsys profile --duration 300 --delay 30 --force-overwrite=true -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu -x true -o /fsx/logs/${MODEL}-p4d-worldsize-${WORLD_SIZE}-rank-${NODE_RANK}.profile composer \ +composer \ --world_size ${WORLD_SIZE} \ --nproc ${N_PROC} \ --node_rank ${NODE_RANK} \ From a01457ebae573b7f1f2a05f9d8a29ccd975de320 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Wed, 27 Sep 2023 23:59:48 +0000 Subject: [PATCH 128/648] Remove Nsight from MPT example --- 3.test_cases/3.MPT/slurm-scripts/run-composer.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index fac427c0..979f5607 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -25,18 +25,9 @@ export NCCL_DEBUG=INFO echo "Sanity check" nvidia-smi -PROFILE_FILE=/fsx/logs/profile -# if [ ${NODE_RANK} -eq 0 ]; then -# # Use nvidia-nsight only on RANK 0 -# mkdir -p /fsx/logs -# PROFILER="nsys profile -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu --capture-range=cudaProfilerApi --cudabacktrace=true -x true -o /fsx/logs/profile-rank-${NODE_RANK}" -# else -# PROFILER="" -# fi mkdir -p /fsx/logs MODEL=mpt-7b -PROFILER="nsys profile --force-overwrite=true -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu --capture-range=cudaProfilerApi --cudabacktrace=true -x true -o /fsx/logs/profile-mpt30b-rank-${NODE_RANK}" start=${SECONDS} composer \ --world_size ${WORLD_SIZE} \ From 5865eb0a61fd1027a152fed424f85efd0134f17f Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 28 Sep 2023 04:37:27 +0000 Subject: [PATCH 129/648] use train script inside container --- 3.test_cases/3.MPT/slurm-scripts/run-composer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh index 979f5607..4f846b22 100644 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh @@ -35,7 +35,7 @@ composer \ --node_rank ${NODE_RANK} \ --master_addr ${MASTER_ADDR} \ --master_port ${MASTER_PORT} \ - --verbose /apps/reference-architectures/3.test_cases/3.MPT/llm-foundry/scripts/train/train.py \ + --verbose /llm-foundry/scripts/train/train.py \ /llm-foundry/scripts/train/yamls/pretrain/${MODEL}.yaml \ data_local=/fsx/c4-dataset \ train_loader.dataset.split=train_small \ From df8a865f0691317fa300ec090f11a6d023a19a56 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 28 Sep 2023 04:42:09 +0000 Subject: [PATCH 130/648] remove llm-foundry submodule --- .gitmodules | 3 --- 3.test_cases/3.MPT/llm-foundry | 1 - 2 files changed, 4 deletions(-) delete mode 160000 3.test_cases/3.MPT/llm-foundry diff --git a/.gitmodules b/.gitmodules index 7df5b04e..1653d443 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,6 +7,3 @@ [submodule "2.test_cases/4.MPT/llm-foundry"] path = 2.test_cases/3.MPT/llm-foundry url = https://github.com/mosaicml/llm-foundry.git -[submodule "3.test_cases/3.MPT/llm-foundry"] - path = 3.test_cases/3.MPT/llm-foundry - url = https://github.com/mosaicml/llm-foundry.git diff --git a/3.test_cases/3.MPT/llm-foundry b/3.test_cases/3.MPT/llm-foundry deleted file mode 160000 index 2f49eb7f..00000000 --- a/3.test_cases/3.MPT/llm-foundry +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 2f49eb7fa0486acf1d6c09ed90c8856ccdc31b4a From e66158874b7e290bc5490cf1be64c563e1e1be96 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 28 Sep 2023 04:43:55 +0000 Subject: [PATCH 131/648] remove nccl-test and mpi --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index bcbbb274..6f5e1d62 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -33,8 +33,6 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ cmake \ apt-utils \ libhwloc-dev \ - libpmix-dev \ - libpmix2 \ aptitude && \ DEBIAN_FRONTEND=noninteractive apt-get autoremove -y @@ -75,28 +73,12 @@ RUN export OPAL_PREFIX="" \ --with-mpi=/opt/amazon/openmpi/ \ && make -j && make install -################################################### -## Install NCCL-tests -RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ - && cd /opt/nccl-tests \ - && git checkout ${NCCL_TESTS_VERSION} \ - && make MPI=1 \ - MPI_HOME=/opt/amazon/openmpi/ \ - CUDA_HOME=/usr/local/cuda \ - NCCL_HOME=/opt/nccl/build \ - NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80" - RUN rm -rf /var/lib/apt/lists/* ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf -RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ - && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ - && echo '/opt/amazon/openmpi/bin/mpirun.real "$@"' >> $OPEN_MPI_PATH/bin/mpirun \ - && chmod a+x $OPEN_MPI_PATH/bin/mpirun - ###################### # Transformers dependencies used in the model ###################### From d5624c097d1e19b38291ebef3690031749d92ab0 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 28 Sep 2023 18:16:04 +0800 Subject: [PATCH 132/648] Update MPT readme --- 3.test_cases/3.MPT/README.md | 53 ++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index c47e9333..af299834 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -1,4 +1,4 @@ -# Mosaic Pretraind Transformers (MPT) Test Case +# Mosaic Pretraind Transformers (MPT) Test Case MPT are GPT-style models in [llm-foundry](https://github.com/mosaicml/llm-foundry/tree/main) with some special features -- [Flash Attention](https://arxiv.org/abs/2205.14135) for efficiency, [ALiBi](https://arxiv.org/abs/2108.12409) for context length extrapolation, and stability improvements to mitigate loss spikes. @@ -7,15 +7,15 @@ This project contains: * AWS optimized [llm-foundry](https://github.com/mosaicml/llm-foundry/tree/main) container image. * Slurm scripts for the [c4 dataset](https://huggingface.co/datasets/c4) preparation and multi-node distributed training. -## 0. Preparation +## 1. Preparation This guide assumes that you have the following: -- A functional Slurm cluster on AWS. -- Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. -- An FSx for Lustre filesystem mounted on `/fsx`. +* A functional Slurm cluster on AWS. +* Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +* An FSx for Lustre filesystem mounted on `/fsx`. -It is recommended that you use the templates in the architectures [directory](../../1.architectures). You will need to setup the following environment variables before running the scripts: +We recommend that you setup a Slurm cluster using the templates in the architectures [directory](../../1.architectures). Before creating the Slurm cluster, you need to setup the following environment variables: ```bash export APPS_PATH=/apps @@ -26,20 +26,20 @@ export TEST_CASE_PATH=${HOME}/3.MPT # where you copy the test case or set to yo cd $TEST_CASE_PATH ``` -## 1. Build the container +then follow the detailed instructions [here](../../1.architectures/2.aws-parallelcluster/README.md). -Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot). +## 2. Build the container -Below are the steps you need to follow: +Before running training jobs, you need to use an [Enroot](https://github.com/NVIDIA/enroot) container to retrieve and preprocess the input data. Below are the steps you need to follow: 1. Copy the test case files to your cluster. You will need `0.llm-foundry.Dockerfile`, -2. Build the container image with the command below in this directory. +2. Build the Docker image with the command below in this directory. ```bash docker build -t llm-foundry -f 0.llm-foundry.Dockerfile . ``` -3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: +3. Once the Docker image is built, you can check if it is present with `docker images`. You should see an output similar to this one: ```bash REPOSITORY TAG IMAGE ID CREATED SIZE @@ -47,11 +47,12 @@ Below are the steps you need to follow: ... ``` -4. Create the squash file with the command below. +4. Convert the Docker image to a squash file with the command below. ```bash enroot import -o ${ENROOT_IMAGE} dockerd://llm-foundry:latest ``` + The file will be stored in the `/apps` directory (default). The output should look as below. ```bash @@ -72,20 +73,22 @@ Below are the steps you need to follow: ... ``` - It will take around 5 minutes to convert the container image from Docker to the Enroot format. Once done proceed to the next stage. -## 2. Run the processing job +## 3. Run the processing job -Before running training jobs you need to retrieve input data and preprocess it before running the training job. +You need to retrieve input data and preprocess it before running the training job. 1. Run a preprocessing job by submitting the script `1.c4-preprocess.sbatch` to Slurm. The command will return the Slurm Job ID. You can use `squeue` to consult the status of your jobs. + ```bash sbatch 1.c4-preprocess.sbatch ``` + It will create the streaming dataset for composer library using C4 dataset in `/fsx/c4-dataset` (default). 2. You see a new file in your current working directory called `c4-preprocess_XY.out` where `XY` corresponds the Slurm job ID. This is your output file and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f c4-preprocess_XY.out` with the correct job ID instead of `XY`. If running successfully, the job will generate an output similar to the except below. + ```console Downloading (…)okenizer_config.json: 100%|██████████| 156/156 [00:00<00:00, 1.09MB/s] ... @@ -97,6 +100,7 @@ Before running training jobs you need to retrieve input data and preprocess it b ``` 3. After the job completed, check `/fsx/c4-dataset` (default) which will contain a structure similar as below + ```bash /fsx/c4-dataset/ ├── train_small @@ -114,18 +118,19 @@ Before running training jobs you need to retrieve input data and preprocess it b └── shard.00002.mds ``` -Preprocessing is done, you will run a training job in the next stage. +Once preprocessing is done, you will run a training job in the next stage. + +## 4. Distributed training of MPT -## 3. Distributed training of MPT +Now that the data is preprocessed, we will pretrain a MPT model with [Mosaic Composer](https://github.com/mosaicml/composer). -Now that the data is preprocessed, we will pretrain a MPT model with composer. +1. Run a training job by submitting script `2.train-mpt-manual-distributed.sbatch` to Slurm via `sbatch` as shown below. -1. Run the training job by submitting the script `2.train-mpt-manual-distributed.sbatch` to Slurm via `sbatch` as shown below. ```bash sbatch 2.train-mpt-manual-distributed.sbatch ``` -2. The training starts running and should produce an output similar to below if successful. +2. When the training job completes successfully, it should produce an output similar to below. ```console ... @@ -151,8 +156,8 @@ Now that the data is preprocessed, we will pretrain a MPT model with composer. ... ``` -## Authors / Reviewers +## 5. Authors / Reviewers -- [A] Keita Watanabe - mlkeita@ -- [R] Pierre-Yves Aquilanti - pierreya@ -- [R] Verdi March - marcverd@ +* [A] Keita Watanabe - mlkeita@ +* [R] Pierre-Yves Aquilanti - pierreya@ +* [R] Verdi March - marcverd@ From 7806b911ad9dc0befc3373e1c0c0410c959d3044 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 28 Sep 2023 18:42:23 +0800 Subject: [PATCH 133/648] Add remark on tokenizers --- 3.test_cases/3.MPT/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index af299834..da03842a 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -99,6 +99,8 @@ You need to retrieve input data and preprocess it before running the training jo val_small: 100%|██████████| 10000/10000 [00:19<00:00, 514.19it/s] ``` + Please be aware that this job downloads the tokenizer on demand (if it's not available under `./EleutherAI/gpt-neox-20b`), after which the tokenizer will be cached under `$HOME/.cache/huggingface`, and the `$HOME` directory is an NFS filesystem shared by the head node. Please consult the [HuggingFace cache management](https://huggingface.co/docs/datasets/cache) document to learn more about fine-grained control of the HuggingFace cache. + 3. After the job completed, check `/fsx/c4-dataset` (default) which will contain a structure similar as below ```bash From fe0405fb8bb4e1cc9cb4be658b5d338d1c4526db Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 28 Sep 2023 13:42:19 +0000 Subject: [PATCH 134/648] clean up slurm scripts --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 5 +- .../2.train-mpt-manual-distributed.sbatch | 19 ++++++-- 3.test_cases/3.MPT/README.md | 4 ++ .../3.MPT/slurm-scripts/run-composer.sh | 47 ------------------- 4 files changed, 21 insertions(+), 54 deletions(-) delete mode 100644 3.test_cases/3.MPT/slurm-scripts/run-composer.sh diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index 6f5e1d62..0a7a2a83 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -1,9 +1,9 @@ FROM mosaicml/pytorch:2.0.1_cu118-python3.10-ubuntu20.04 ARG EFA_INSTALLER_VERSION=1.26.1 -ARG AWS_OFI_NCCL_VERSION=1.7.2-aws +ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws ARG NCCL_TESTS_VERSION=master ARG NCCL_VERSION=v2.12.7-1 -ARG LLM_FOUNDRY_VERSION=0.2.0 +ARG LLM_FOUNDRY_VERSION=v0.3.0 ARG OPEN_MPI_PATH=/opt/amazon/openmpi RUN apt-get update -y @@ -63,7 +63,6 @@ RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ RUN export OPAL_PREFIX="" \ && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ && cd /opt/aws-ofi-nccl \ - && env \ && git checkout ${AWS_OFI_NCCL_VERSION} \ && ./autogen.sh \ && ./configure --prefix=/opt/aws-ofi-nccl/install \ diff --git a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch index 61f8c7e9..53125d93 100644 --- a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch @@ -12,7 +12,7 @@ #SBATCH --wait-all-nodes=1 set -euxo pipefail - +MODEL=${1:-mpt-7b} # default variables for Enroot : "${APPS_PATH:=/apps}" : "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" @@ -54,9 +54,20 @@ function run_compose() { # else # OPTION="exclude" # fi - srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" bash \ - /apps/reference-architectures/3.test_cases/3.MPT/slurm-scripts/run-composer.sh \ - ${WORLD_SIZE} ${NPROC} ${NODE_RANK} ${MASTER_ADDR} ${MASTER_PORT} + srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" composer \ + --world_size ${WORLD_SIZE} \ + --nproc ${NPROC} \ + --node_rank ${NODE_RANK} \ + --master_addr ${MASTER_ADDR} \ + --master_port ${MASTER_PORT} \ + --verbose /llm-foundry/scripts/train/train.py \ + /llm-foundry/scripts/train/yamls/pretrain/${MODEL}.yaml \ + data_local=/fsx/c4-dataset \ + train_loader.dataset.split=train_small \ + eval_loader.dataset.split=val_small \ + max_duration=3ba \ + eval_interval=0 \ + save_folder=${MODEL} } NODE_RANK=1 for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index c47e9333..8aab0f4c 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -124,6 +124,10 @@ Now that the data is preprocessed, we will pretrain a MPT model with composer. ```bash sbatch 2.train-mpt-manual-distributed.sbatch ``` +by default it runs `mpt-7b` model. You can specify model to be trained as: + ```bash + sbatch 2.train-mpt-manual-distributed.sbatch mpt-30b + ``` 2. The training starts running and should produce an output similar to below if successful. diff --git a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh b/3.test_cases/3.MPT/slurm-scripts/run-composer.sh deleted file mode 100644 index 4f846b22..00000000 --- a/3.test_cases/3.MPT/slurm-scripts/run-composer.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -export WORLD_SIZE=$1 -export N_PROC=$2 -export NODE_RANK=$3 -export MASTER_ADDR=$4 -export MASTER_PORT=$5 - -echo "Hello, I am $(hostname), processing rank ${NODE_RANK}." -echo "Set Environment variables for distributed training" -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 -# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 -# https://github.com/pytorch/pytorch/issues/68893 -#export NCCL_SOCKET_IFNAME=ens -export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_DEBUG=INFO -export NCCL_ASYNC_ERROR_HANDLING=1 -export NCCL_DEBUG=INFO - -echo "Sanity check" -nvidia-smi - -mkdir -p /fsx/logs -MODEL=mpt-7b -start=${SECONDS} -composer \ - --world_size ${WORLD_SIZE} \ - --nproc ${N_PROC} \ - --node_rank ${NODE_RANK} \ - --master_addr ${MASTER_ADDR} \ - --master_port ${MASTER_PORT} \ - --verbose /llm-foundry/scripts/train/train.py \ - /llm-foundry/scripts/train/yamls/pretrain/${MODEL}.yaml \ - data_local=/fsx/c4-dataset \ - train_loader.dataset.split=train_small \ - eval_loader.dataset.split=val_small \ - max_duration=3ba \ - eval_interval=0 \ - save_folder=${MODEL} -end=${SECONDS} -echo "Elapsed Time: $(($end-$start)) seconds" From 23597d19353be8c337dca528b722e397ef31f730 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Fri, 29 Sep 2023 01:34:14 +0000 Subject: [PATCH 135/648] simplify sbatch script Co-authored-by: Verdi March --- 3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch index 53125d93..7fd0e66d 100644 --- a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch @@ -40,13 +40,13 @@ declare -a ARGS=( ) NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -NNODES=${#NODES[@]} NODES_ARRAY=($NODES) HEAD_NODE=${NODES_ARRAY[0]} -MASTER_ADDR=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) +MASTER_ADDR=$(hostname --ip-address) MASTER_PORT=$RANDOM -NPROC=8 -WORLD_SIZE=$((NNODES * NPROC)) +NNODES=$SLURM_JOB_NUM_NODES +NPROC=$SLURM_GPUS_PER_NODE +WORLD_SIZE=$(( $NNODES * $NPROC )) srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" function run_compose() { # if [ ${NODE_RANK} -eq 0 ]; then From 4e3b6e9db7e67d15d1db2d82032337c40ee1a2fb Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Thu, 28 Sep 2023 19:07:40 -0700 Subject: [PATCH 136/648] Process feedback --- 1.architectures/4.amazon-eks/README.md | 13 ++++++------ .../4.amazon-eks/eks-g4dn-vpc.yaml | 21 +++++++++++++------ 1.architectures/4.amazon-eks/eks-g4dn.yaml | 16 ++++++++++---- .../4.amazon-eks/eks-p4de-odcr-vpc.yaml | 17 ++++++++++----- .../4.amazon-eks/eks-p4de-odcr.yaml | 12 ++++++++++- 5 files changed, 56 insertions(+), 23 deletions(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 83046b61..22cd0c70 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -23,11 +23,10 @@ The EKS cluster has two nodegroups. A `system` nodegroup is used to run pods lik The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. The following example cluster configurations for distributed training are provided: -* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.metal instances. This instance type supports Elastic Fabric Adapter (EFA), usually does not require a capacity reservation, and is a good starting point when developing distributed training architectures. To use this manifest, edit the vpc id and subnets, and specify the desired private subnet for the nodes. -* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.metal instances, created in a new VPC. This example shows that when a VPC is not specified, one is created for the cluster. The manifest can work without any modifications, however if you wish to change the cluster name, API version, region, avilability zones, etc. you can modify the file before using it to create the cluster. -* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR). This is the most common configuration for distributed training workloads.Edit the file to specify vpc id, subnets, and capacityReservationID. Please note that the subnet of the nodeGroup should match the one of the capacity reservation. -* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with two p4de.24xlarge instances from an existing ODCR, that will be created in a new VPC. This cluster configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. - +* [eks-g4dn-vpc.yaml](./eks-g4dn-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two g4dn.8xlarge instances. This instance type supports Elastic Fabric Adapter (EFA) and is a good starting point when developing distributed training architectures. To use this manifest, edit the vpc id and subnets, and specify the desired private subnet for the nodes. +* [eks-g4dn.yaml](./eks-g4dn.yaml) - a cluster with a nodegroup of two g4dn.8xlarge instances, created in a new VPC. This example shows that when a VPC is not specified, one is created for the cluster. The manifest can work without any modifications, however if you wish to change the cluster name, API version, region, instance type, avilability zones, etc. you can modify the file before using it to create the cluster. +* [eks-p4de-odcr-vpc.yaml](./eks-p4de-odcr-vpc.yaml) - a cluster using an existing VPC with a nodegroup of two p4de.24xlarge instances from an existing on-demand capacity reservation (ODCR). This is the most common configuration for distributed training workloads. Edit the file to specify vpc id, subnets, and capacityReservationID. Please note that the subnet of the nodeGroup should match the one of the capacity reservation. +* [eks-p4de-odcr.yaml](./eks-p4de-odcr.yaml) - a cluster with a nodegroup of two p4de.24xlarge instances from an existing ODCR. A new VPC will be created for this cluster. This configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. Nodegroups in this and previous examples are fully-managed and can be accessed via the EKS console. If you are using an instance type that is not yet supported in managed nodegroups by EKS, you can define a nodegroup in a self-manged nodegroup section as shown at the end of this example. ## Cluster creation @@ -65,7 +64,7 @@ You should see a list of three nodes. One would be a system node instance of typ ## Cleanup -When it is time to decommission your cluster, execute the following command: +To remove your cluster, execute the following command: ``` $ kubectl delete cluster -f ./cluster.yaml @@ -87,4 +86,4 @@ Related resources for further reading can be found at the links below: * [eksctl](https://eksctl.io) * [kubectl](https://kubernetes.io/docs/reference/kubectl) * [do-framework](https://bit.ly/do-framework) - +* [aws-do-eks](https://bit.ly/do-eks) diff --git a/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml index 891497e7..a4661f0f 100644 --- a/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml +++ b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml @@ -1,26 +1,31 @@ apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig +# Update cluster name, region, and eks version if needed metadata: name: eks-g4dn-vpc version: "1.27" region: us-east-1 +# Substitute vpc and subnet ids below vpc: id: vpc-xxxxxxxxxxxxxxxxx subnets: public: - endpoint-one: + public-one: id: subnet-xxxxxxxxxxxxxxx11 - endpoint-two: + public-two: id: subnet-xxxxxxxxxxxxxxx12 private: - p4de-1a: + private-one: id: subnet-xxxxxxxxxxxxxxx21 - p4de-1c: + private-two: id: subnet-xxxxxxxxxxxxxxx22 +# Fully-managed nodegroups managedNodeGroups: + + # Nodegroup for system pods - name: sys instanceType: c5.2xlarge desiredCapacity: 1 @@ -28,8 +33,12 @@ managedNodeGroups: withAddonPolicies: autoScaler: true cloudWatch: true - - name g4dn - instanceType: g4dn.metal + + # GPU nodegroup + # Update capacityReservationID below + # specify the subnet id corresponding to the capacity reservation + - name: g4dn + instanceType: g4dn.8xlarge instancePrefix: g4dn-vpc privateNetworking: true efaEnabled: true diff --git a/1.architectures/4.amazon-eks/eks-g4dn.yaml b/1.architectures/4.amazon-eks/eks-g4dn.yaml index a20cc5b1..c5cde94f 100644 --- a/1.architectures/4.amazon-eks/eks-g4dn.yaml +++ b/1.architectures/4.amazon-eks/eks-g4dn.yaml @@ -1,16 +1,21 @@ apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig +# Update cluster name, region, and eks version if needed metadata: - name: eks-g4dn-vpc + name: eks-g4dn version: "1.27" region: us-east-1 +# List availability zones where cluster subnets will be created availabilityZones: - us-east-1a - us-east-1c +# Fully-managed nodegroups managedNodeGroups: + + # Nodegroup for system pods - name: sys instanceType: c5.2xlarge desiredCapacity: 1 @@ -18,9 +23,12 @@ managedNodeGroups: withAddonPolicies: autoScaler: true cloudWatch: true - - name g4dn - instanceType: g4dn.metal - instancePrefix: g4dn-vpc + + # GPU nodegroup + # List availability zones where instances in from this nodegroup will be launched + - name: g4dn + instanceType: g4dn.8xlarge + instancePrefix: g4dn privateNetworking: true efaEnabled: true minSize: 0 diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml index a08081ee..17b9ed24 100644 --- a/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml @@ -1,26 +1,31 @@ apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig +# Update cluster name, region, and eks version if needed metadata: name: eks-p4de-odcr-vpc version: "1.27" region: us-east-1 +# Substitute vpc and subnet ids below vpc: id: vpc-xxxxxxxxxxxxxxxxx subnets: public: - endpoint-one: + public-one: id: subnet-xxxxxxxxxxxxxxx11 - endpoint-two: + public-two: id: subnet-xxxxxxxxxxxxxxx12 private: - p4de-1a: + private-one: id: subnet-xxxxxxxxxxxxxxx21 - p4de-1c: + private-two: id: subnet-xxxxxxxxxxxxxxx22 +# Fully-managed nodegroups managedNodeGroups: + + # Nodegroup for system pods - name: sys instanceType: c5.2xlarge desiredCapacity: 1 @@ -29,7 +34,9 @@ managedNodeGroups: autoScaler: true cloudWatch: true -nodeGroups: + # GPU nodegroup + # Update capacityReservationID below + # Specify the subnet id matching the capacity reservation - name: p4de-odcr-vpc instanceType: p4de.24xlarge instancePrefix: p4de-odcr-vpc diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml index 50b2637f..a906203c 100644 --- a/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml @@ -1,16 +1,21 @@ apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig +# Update cluster name, region, and eks version if needed metadata: name: eks-p4de-odcr version: "1.27" region: us-east-1 +# List availability zones where cluster subnets will be created availabilityZones: - us-east-1a - us-east-1c +# Fully-managed nodegroups managedNodeGroups: + + # Nodegroup for system pods - name: sys instanceType: c5.2xlarge desiredCapacity: 1 @@ -19,7 +24,9 @@ managedNodeGroups: autoScaler: true cloudWatch: true -nodeGroups: + # GPU nodegroup + # Update capacityReservationID below + # Specify the availability zone matching the capacity reservation - name: p4de-odcr instanceType: p4de.24xlarge instancePrefix: p4de-odcr @@ -40,5 +47,8 @@ nodeGroups: ebs: true fsx: true +# Self-managed nodegroups +#nodeGroups: + iam: withOIDC: true From 9555fcad8303f604494e280b074926bef192e622 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Thu, 28 Sep 2023 19:32:15 -0700 Subject: [PATCH 137/648] Remove $ from cmds in README --- 1.architectures/4.amazon-eks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 164bf9d3..9caf0631 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -63,7 +63,7 @@ You should see a list of three nodes. One would be a system node instance of typ To remove your cluster, execute the following command: ``` -$ kubectl delete cluster -f ./cluster.yaml +kubectl delete cluster -f ./cluster.yaml ``` Example output: From e3e6244fb3d8b49fddfe371bc7b3a31e5c4a0ce3 Mon Sep 17 00:00:00 2001 From: jianyinglangaws Date: Thu, 28 Sep 2023 22:53:38 -0700 Subject: [PATCH 138/648] Fix the Neuron image build --- 2.amazon_machine_images/README.md | 4 +- 2.amazon_machine_images/packer-ami.pkr.hcl | 44 ++++++++++++++++++---- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index 5c85e608..5c2b71a2 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -16,7 +16,7 @@ Assuming that GNU Make, Packer and Ansible installed, you can build AMIs by typi Here is an example to build a AMI for training or inference on GPU with AWS ParallelCluster: ```bash -make ami_pcluster_gpu +AWS_REGION=us-west-2 make ami_pcluster_gpu ``` The list of arguments you can use is shown in the table below with the AMI origin (what are we starting our custom AMI from) and notes regarding their content. @@ -27,7 +27,7 @@ The list of arguments you can use is shown in the table below with the AMI origi | `ami_pcluster_gpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for GPU based workloads, training or inference | | `ami_base` | [EC2 AL2 AMI](https://aws.amazon.com/amazon-linux-2/) | EC2 AMI with updates, Docker, Lustre, EFA, Pyxis and Enroot (everything) | | `ami_dlami_gpu` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI with updated drivers, Pyxis, enroot, Lustre module client and Docker. | -| `ami_dlami_neuron` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI for Neuron, same as above without the Nvidia stack | +| `ami_dlami_neuron` | [DLAMI-Neuron](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI for Neuron, same as above without the Nvidia stack | | `ami_eks_gpu` | [EKS AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) | EKS GPU AMI with Lustre, EFA | | `ami` |AMI dependent| Build all the images | diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index 895240d7..e9a9a762 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -13,7 +13,7 @@ packer { variable "ami_name" { type = string - default = "pcluster-gpu-efa" + default = "pcluster-efa" } variable "ami_version" { @@ -92,7 +92,7 @@ data "amazon-ami" "eks-al2" { owners = ["amazon"] } -data "amazon-ami" "dlami-al2" { +data "amazon-ami" "dlami-gpu-al2" { filters = { virtualization-type = "hvm" name = "Deep Learning AMI GPU PyTorch 2.0.1 (Amazon Linux 2) *" @@ -103,6 +103,17 @@ data "amazon-ami" "dlami-al2" { owners = ["amazon"] } +data "amazon-ami" "dlami-neuron-al2" { + filters = { + virtualization-type = "hvm" + name = "Deep Learning AMI Neuron PyTorch 1.13 (Amazon Linux 2) *" + architecture= "x86_64" + root-device-type = "ebs" + } + most_recent = true + owners = ["amazon"] +} + source "amazon-ebs" "aws-pcluster-ami" { ami_name = "${var.ami_name}-pcluster-${var.ami_version}-${local.timestamp}" instance_type = "${var.instance_type}" @@ -166,11 +177,11 @@ source "amazon-ebs" "aws-eks-ami" { } } -source "amazon-ebs" "aws-dlami-ami" { +source "amazon-ebs" "aws-dlami-gpu-ami" { ami_name = "${var.ami_name}-dlami-${var.ami_version}-${local.timestamp}" instance_type = "${var.instance_type}" region = "${var.aws_region}" - source_ami = data.amazon-ami.dlami-al2.id + source_ami = data.amazon-ami.dlami-gpu-al2.id ssh_username = "ec2-user" launch_block_device_mappings { device_name = "/dev/xvda" @@ -181,10 +192,29 @@ source "amazon-ebs" "aws-dlami-ami" { delete_on_termination = true } run_tags = { - "Name" = "packer-builder-dlami-al2" + "Name" = "packer-builder-dlami-gpu-al2" } } +source "amazon-ebs" "aws-dlami-neuron-ami" { + ami_name = "${var.ami_name}-dlami-${var.ami_version}-${local.timestamp}" + instance_type = "${var.instance_type}" + region = "${var.aws_region}" + source_ami = data.amazon-ami.dlami-neuron-al2.id + ssh_username = "ec2-user" + launch_block_device_mappings { + device_name = "/dev/xvda" + volume_size = 100 + throughput = 1000 + iops = 10000 + volume_type = "gp3" + delete_on_termination = true + } + run_tags = { + "Name" = "packer-builder-dlami-neuron-al2" + } +} + build { name = "aws-base-gpu" sources = ["source.amazon-ebs.aws-base-ami"] @@ -235,7 +265,7 @@ build { build { name = "aws-dlami-gpu" - sources = ["source.amazon-ebs.aws-dlami-ami"] + sources = ["source.amazon-ebs.aws-dlami-gpu-ami"] provisioner "ansible" { user = "ec2-user" @@ -247,7 +277,7 @@ build { build { name = "aws-dlami-neuron" - sources = ["source.amazon-ebs.aws-dlami-ami"] + sources = ["source.amazon-ebs.aws-dlami-neuron-ami"] provisioner "ansible" { user = "ec2-user" From 72bdb2b6b5374fc071346fb3a7305a55bdcc0048 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 29 Sep 2023 17:28:19 +0800 Subject: [PATCH 139/648] Explain reason for launching composer one after another --- 3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch index 7fd0e66d..5cce5457 100644 --- a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch @@ -48,6 +48,10 @@ NNODES=$SLURM_JOB_NUM_NODES NPROC=$SLURM_GPUS_PER_NODE WORLD_SIZE=$(( $NNODES * $NPROC )) srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" + +# [20230929, mlkeita@] Launch composer on each node one-by-one. Unclear why, but +# when I try torun composer with single command with appropriate worldsize and +# etc. it fails to establish communication between nodes. function run_compose() { # if [ ${NODE_RANK} -eq 0 ]; then # OPTION="nodelist" From c8f3b4082db013744c2e2a1a6ec0542e4d9ae666 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 29 Sep 2023 11:47:22 -0500 Subject: [PATCH 140/648] Update readme file for custom AMI for AWS_REGION --- 2.amazon_machine_images/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index 5c2b71a2..1ec714eb 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -16,9 +16,11 @@ Assuming that GNU Make, Packer and Ansible installed, you can build AMIs by typi Here is an example to build a AMI for training or inference on GPU with AWS ParallelCluster: ```bash -AWS_REGION=us-west-2 make ami_pcluster_gpu +make ami_pcluster_gpu ``` +> **Note**: If you encounter an error because Packer could not find the source AMI with the error `InvalidAMIID.NotFound` then prepend by `AWS_REGION` with the target region. For example, `AWS_REGION=us-east-2 make ami_pcluster_gpu`. + The list of arguments you can use is shown in the table below with the AMI origin (what are we starting our custom AMI from) and notes regarding their content. | Argument | Source AMI | Notes | From 1a5e8a230e21e0c9f4279b7c87ebf70d7dac3c4e Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Fri, 29 Sep 2023 14:22:52 -0500 Subject: [PATCH 141/648] Change aws_cli v2 install --- 2.amazon_machine_images/packer-ami.pkr.hcl | 2 +- .../roles/aws_cliv2/tasks/main.yml | 34 +++++++++++++------ 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index e9a9a762..44566ad6 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -269,7 +269,7 @@ build { provisioner "ansible" { user = "ec2-user" - ansible_env_vars = ["ANSIBLE_SCP_EXTRA_ARGS='-O'"] + # ansible_env_vars = ["ANSIBLE_SCP_EXTRA_ARGS='-O'"] playbook_file = "playbook-dlami-gpu.yml" inventory_directory = "${var.inventory_directory}" } diff --git a/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml b/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml index 907395d5..1dbf34d4 100644 --- a/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml +++ b/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml @@ -1,17 +1,29 @@ --- - name: "Download AWS CLI v2" ansible.builtin.get_url: - url: "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" - dest: "/tmp/awscli-exe-linux-x86_64.zip" + url: https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip + dest: /tmp/awscli-exe-linux-x86_64.zip mode: '0755' +- name: Install packages required unzip, sudo + ansible.builtin.package: + name: + - unzip + - sudo + state: present + +- name: "Extract AWS CLI v2" + ansible.builtin.unarchive: + src: /tmp/awscli-exe-linux-x86_64.zip + dest: /tmp + remote_src: yes + +- name: "Install AWS CLI v2" + ansible.builtin.command: /tmp/aws/install + become: yes + - name: "Install and configure AWS CLI v2" - ansible.builtin.shell: | - unzip /tmp/awscli-exe-linux-x86_64.zip - sudo /tmp/aws/install - aws configure set default.s3.max_concurrent_requests 1 - aws configure set default.s3.max_queue_size 10000 - aws configure set default.s3.multipart_threshold 64MB - aws configure set default.s3.multipart_chunksize 16MB - args: - chdir: /tmp + ansible.builtin.command: aws configure set default.s3.max_concurrent_requests 1 + # aws configure set default.s3.max_queue_size 10000 + # aws configure set default.s3.multipart_threshold 64MB + # aws configure set default.s3.multipart_chunksize 16MB From d15163713d6c32eae11e711eb675ab96da00f3b4 Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Fri, 29 Sep 2023 14:23:08 -0500 Subject: [PATCH 142/648] Add molecule test for cliv2 --- .../aws_cliv2/molecule/default/converge.yml | 7 ++++ .../aws_cliv2/molecule/default/molecule.yml | 36 +++++++++++++++++++ .../aws_cliv2/molecule/default/prepare.yml | 14 ++++++++ .../aws_cliv2/molecule/default/verify.yml | 9 +++++ .../roles/aws_cliv2/tasks/main.yml | 2 +- 5 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 2.amazon_machine_images/roles/aws_cliv2/molecule/default/converge.yml create mode 100644 2.amazon_machine_images/roles/aws_cliv2/molecule/default/molecule.yml create mode 100644 2.amazon_machine_images/roles/aws_cliv2/molecule/default/prepare.yml create mode 100644 2.amazon_machine_images/roles/aws_cliv2/molecule/default/verify.yml diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/converge.yml b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/converge.yml new file mode 100644 index 00000000..646a2cc5 --- /dev/null +++ b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: Converge + hosts: all + tasks: + - name: "Include aws_cliv2" + include_role: + name: "aws_cliv2" diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/molecule.yml b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/molecule.yml new file mode 100644 index 00000000..94852275 --- /dev/null +++ b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/molecule.yml @@ -0,0 +1,36 @@ +dependency: + name: galaxy +driver: + name: podman +platforms: + - name: AmazonLinux2 + image: amazonlinux:2 + groups: + - amzn2 + pre_build_image: true + # - name: CentOS7 + # image: centos:7 + # privileged: true + # pre_build_image: true + # - name: Ubuntu2004 + # image: ubuntu:20.04 + # groups: + # - ubuntu + # privileged: true + # pre_build_image: true + # - name: Ubuntu1804 + # image: ubuntu:18.04 + # groups: + # - ubuntu + # privileged: true + # pre_build_image: true + # - name: Ubuntu1604 + # image: ubuntu:16.04 + # groups: + # - ubuntu + # privileged: true + # pre_build_image: true +provisioner: + name: ansible +verifier: + name: ansible diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/prepare.yml b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/prepare.yml new file mode 100644 index 00000000..e5bbc44e --- /dev/null +++ b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/prepare.yml @@ -0,0 +1,14 @@ +--- +- name: prepare Ubuntu + hosts: ubuntu + gather_facts: no + pre_tasks: + - name: install python3 + raw: test -e /usr/bin/python3 || (apt -y update && apt install -y python3) + +# - name: prepare Amazon Linux 2 +# hosts: amzn2 +# gather_facts: no +# pre_tasks: +# - name: install pam +# raw: yum install -y pam diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/verify.yml b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/verify.yml new file mode 100644 index 00000000..a82dd6fd --- /dev/null +++ b/2.amazon_machine_images/roles/aws_cliv2/molecule/default/verify.yml @@ -0,0 +1,9 @@ +--- +# This is an example playbook to execute Ansible tests. + +- name: Verify + hosts: all + tasks: + - name: Example assertion + assert: + that: true diff --git a/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml b/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml index 1dbf34d4..ad012945 100644 --- a/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml +++ b/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml @@ -22,7 +22,7 @@ ansible.builtin.command: /tmp/aws/install become: yes -- name: "Install and configure AWS CLI v2" +- name: "Configure S3 parameters for AWS CLI v2" ansible.builtin.command: aws configure set default.s3.max_concurrent_requests 1 # aws configure set default.s3.max_queue_size 10000 # aws configure set default.s3.multipart_threshold 64MB From e24675956b1fd4bb524f3e2f3d2851ce499aa989 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Sat, 30 Sep 2023 19:29:05 +0900 Subject: [PATCH 143/648] add Neuron SDK AMI --- 2.amazon_machine_images/packer-ami.pkr.hcl | 12 ++++ .../playbook-pcluster-neuron.yml | 19 ++++++ .../roles/aws_efa/tasks/main.yml | 18 +----- .../roles/neuron_driver/defaults/main.yml | 1 + .../roles/neuron_driver/tasks/main.yml | 41 ++++++++++++ .../roles/nvidia_nccl/tasks/main.yml | 17 ++++- .../roles/pytorch_neuron/defaults/main.yml | 1 + .../roles/pytorch_neuron/tasks/main.yml | 63 +++++++++++++++++++ 8 files changed, 154 insertions(+), 18 deletions(-) create mode 100644 2.amazon_machine_images/playbook-pcluster-neuron.yml create mode 100644 2.amazon_machine_images/roles/neuron_driver/defaults/main.yml create mode 100644 2.amazon_machine_images/roles/neuron_driver/tasks/main.yml create mode 100644 2.amazon_machine_images/roles/pytorch_neuron/defaults/main.yml create mode 100644 2.amazon_machine_images/roles/pytorch_neuron/tasks/main.yml diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.amazon_machine_images/packer-ami.pkr.hcl index e9a9a762..e3ee9019 100644 --- a/2.amazon_machine_images/packer-ami.pkr.hcl +++ b/2.amazon_machine_images/packer-ami.pkr.hcl @@ -251,6 +251,18 @@ build { } } +build { + name = "aws-pcluster-neuron" + sources = ["source.amazon-ebs.aws-pcluster-ami"] + + provisioner "ansible" { + user = "ec2-user" + ansible_env_vars = ["ANSIBLE_SCP_EXTRA_ARGS='-O'"] + playbook_file = "playbook-pcluster-neuron.yml" + inventory_directory = "${var.inventory_directory}" + } +} + build { name = "aws-eks-gpu" sources = ["source.amazon-ebs.aws-eks-ami"] diff --git a/2.amazon_machine_images/playbook-pcluster-neuron.yml b/2.amazon_machine_images/playbook-pcluster-neuron.yml new file mode 100644 index 00000000..4020824c --- /dev/null +++ b/2.amazon_machine_images/playbook-pcluster-neuron.yml @@ -0,0 +1,19 @@ +--- +- name: "Configure Parallel cluster AMI" + hosts: default + become: true + gather_facts: true + gather_subset: ["kernel"] + vars: + ansible_python_interpreter: /usr/bin/python3 + roles: + - base + - packages + - aws_cliv2 + - docker + - aws_efa + - nvidia_enroot_pyxis + - aws_efa_ofi + - aws_lustre + - neuron_driver + - pytorch_neuron diff --git a/2.amazon_machine_images/roles/aws_efa/tasks/main.yml b/2.amazon_machine_images/roles/aws_efa/tasks/main.yml index d3838d42..8903dd99 100644 --- a/2.amazon_machine_images/roles/aws_efa/tasks/main.yml +++ b/2.amazon_machine_images/roles/aws_efa/tasks/main.yml @@ -3,7 +3,7 @@ ansible.builtin.get_url: url: "{{ aws_efa_url }}" dest: "{{ aws_efa_work_dir }}/{{ aws_efa_archive }}" - mode: '0644' + mode: "0644" - name: "Extract EFA installer" ansible.builtin.unarchive: @@ -17,19 +17,3 @@ cmd: "bash efa_installer.sh -y -n {{ '-g' if enable_gpus else '' }}" args: chdir: "{{ aws_efa_work_dir }}/aws-efa-installer" - -- name: Adding the NCCL preload profile file - ansible.builtin.blockinfile: - path: /etc/profile.d/nccl.sh - create: yes - block: | - export FI_PROVIDER=efa - export NCCL_PROTO=simple - -- name: Adding the NCCL preload profile file - ansible.builtin.blockinfile: - path: /etc/profile.d/nccl.sh - create: yes - block: | - export LD_PRELOAD=/opt/nccl/build/lib/libnccl.so:/opt/aws-ofi-nccl/lib/libnccl-net.so - diff --git a/2.amazon_machine_images/roles/neuron_driver/defaults/main.yml b/2.amazon_machine_images/roles/neuron_driver/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/2.amazon_machine_images/roles/neuron_driver/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/2.amazon_machine_images/roles/neuron_driver/tasks/main.yml b/2.amazon_machine_images/roles/neuron_driver/tasks/main.yml new file mode 100644 index 00000000..b53a2922 --- /dev/null +++ b/2.amazon_machine_images/roles/neuron_driver/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: "Install git" + ansible.builtin.yum: + name: + - git + state: present +- name: "Install Neuron Driver" + block: + - name: "Configure Linux for Neuron repository update" + ansible.builtin.blockinfile: + path: /etc/yum.repos.d/neuron.repo + create: yes + block: | + [neuron] + name=Neuron YUM Repository + baseurl=https://yum.repos.neuron.amazonaws.com + enabled=1 + metadata_expire=0 + - name: "import rpm" + ansible.builtin.shell: | + rpm --import https://yum.repos.neuron.amazonaws.com/GPG-PUB-KEY-AMAZON-AWS-NEURON.PUB + - name: "Update yum" + ansible.builtin.yum: + name: "*" + state: latest + - name: "Install OS headers" + ansible.builtin.shell: + cmd: yum install -y kernel-devel-$(uname -r) kernel-headers-$(uname -r) + - name: "Install Neuron Driver, Runtime, and Tools" + ansible.builtin.yum: + name: + - aws-neuronx-dkms-2.* + - aws-neuronx-collectives-2.* + - aws-neuronx-runtime-lib-2.* + - aws-neuronx-tools-2.* + - name: "Set PATH" + ansible.builtin.blockinfile: + path: /etc/profile.d/neuron.sh + create: yes + block: | + export PATH=$PATH:/opt/aws/neuron/bin diff --git a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml b/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml index e8fe335a..2035559e 100644 --- a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml +++ b/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml @@ -14,7 +14,7 @@ ansible.builtin.git: repo: https://github.com/{{ nccl_repo }}/nccl.git dest: /opt/nccl - version: '{{ nccl_tag }}' + version: "{{ nccl_tag }}" - name: Build NCCL ansible.builtin.shell: | @@ -30,6 +30,21 @@ export LD_LIBRARY_PATH=/opt/nccl/build/lib:$LD_LIBRARY_PATH export NCCL_PROTO=simple +- name: Adding the NCCL preload profile file + ansible.builtin.blockinfile: + path: /etc/profile.d/nccl.sh + create: yes + block: | + export FI_PROVIDER=efa + export NCCL_PROTO=simple + +- name: Adding the NCCL preload profile file + ansible.builtin.blockinfile: + path: /etc/profile.d/nccl.sh + create: yes + block: | + export LD_PRELOAD=/opt/nccl/build/lib/libnccl.so:/opt/aws-ofi-nccl/lib/libnccl-net.so + - name: Clone Nvidia NCCL tests git repository when: install_nccl_tests block: diff --git a/2.amazon_machine_images/roles/pytorch_neuron/defaults/main.yml b/2.amazon_machine_images/roles/pytorch_neuron/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/2.amazon_machine_images/roles/pytorch_neuron/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/2.amazon_machine_images/roles/pytorch_neuron/tasks/main.yml b/2.amazon_machine_images/roles/pytorch_neuron/tasks/main.yml new file mode 100644 index 00000000..4b55ccc4 --- /dev/null +++ b/2.amazon_machine_images/roles/pytorch_neuron/tasks/main.yml @@ -0,0 +1,63 @@ +--- +- name: "Enable Python38" + ansible.builtin.shell: | + amazon-linux-extras enable python3.8 +- name: "Install python and gcc" + ansible.builtin.yum: + name: + - gcc-c++ + - python38 + - python38-devel + state: present + +- name: "Install PyTorch Neuron" + ansible.builtin.shell: | + # Create Python venv + python3.8 -m venv aws_neuron_venv_pytorch + + # Activate Python venv + source aws_neuron_venv_pytorch/bin/activate + python3.8 -m pip install -U pip + + # Install Jupyter notebook kernel + pip install ipykernel + python3.8 -m ipykernel install --user --name aws_neuron_venv_pytorch --display-name "Python (torch-neuronx)" + pip install jupyter notebook + pip install environment_kernels + + # Set pip repository pointing to the Neuron repository + python3.8 -m pip config set global.extra-index-url https://pip.repos.neuron.amazonaws.com + + # Install wget, awscli + python3.8 -m pip install wget + python3.8 -m pip install awscli + + # Install Neuron Compiler and Framework + python3.8 -m pip install neuronx-cc==2.* torch-neuronx torchvision + args: + chdir: /home/ec2-user + +- name: "Clone neuronx-nemo-megatron" + ansible.builtin.git: + repo: https://github.com/aws-neuron/neuronx-nemo-megatron.git + dest: /home/ec2-user/neuronx-nemo-megatron + +- name: "Install neuronx-nemo-megatron" + ansible.builtin.shell: | + # Activate Python venv + source aws_neuron_venv_pytorch/bin/activate + cd neuronx-nemo-megatron + pip install wheel + ./build.sh + pip3 install ./build/*.whl + pip3 install -r requirements.txt torch==1.13.1 protobuf==3.20.3 + args: + chdir: /home/ec2-user + +- name: "Build the Megatron helper module" + ansible.builtin.shell: | + # Activate Python venv + source aws_neuron_venv_pytorch/bin/activate + python -c "from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import compile_helper; compile_helper()" + args: + chdir: /home/ec2-user From 75facdd80ec5d0696590e419f850de241eb280eb Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Sat, 30 Sep 2023 19:30:21 +0900 Subject: [PATCH 144/648] add pcluster template --- .../distributed-training-trn1_custom_ami.yaml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index c99ac4d9..9d686f98 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -31,8 +31,6 @@ Scheduling: Networking: SubnetIds: - PLACEHOLDER_PRIVATE_SUBNET - PlacementGroup: - Enabled: true ComputeSettings: LocalStorage: EphemeralVolume: @@ -43,16 +41,19 @@ Scheduling: # with a targeted ODCRs. You can also use a capacity resource group and # CapacityReservationResourceGroupArn if you want to regroup # multiple reservations - CapacityReservationTarget: - CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID ComputeResources: - Name: distributed-ml InstanceType: trn1.32xlarge - MinCount: 4 # if min = max then capacity is maintained and will - MaxCount: 4 # not scale down + MinCount: PLACEHOLDER_NUM_INSTANCES # if min = max then capacity is maintained and will + MaxCount: PLACEHOLDER_NUM_INSTANCES # not scale down Efa: Enabled: true - JobExclusiveAllocation: true # GenAI training likes to gobble all accelerators in an instance + CapacityReservationTarget: + CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + Networking: + PlacementGroup: + Enabled: true + Id: PLACEHOLDER_PLACEMENT_GROUP SharedStorage: - MountDir: /fsx Name: fsx From f4b654618f4b6d85f0b46daaed5ccbc59bdeca45 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 09:47:15 -0500 Subject: [PATCH 145/648] Add comments and fix language in MPT --- .../3.MPT/2.train-mpt-manual-distributed.sbatch | 16 ++++++++++------ 3.test_cases/3.MPT/README.md | 4 ++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch index 5cce5457..8e6ba806 100644 --- a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch @@ -2,17 +2,19 @@ #SBATCH --nodes=2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --ntasks=2 #SBATCH --job-name=train-mpt # name of your job -#SBATCH --output=logs/%x_%j.out -#SBATCH --error=logs/%x_%j.err +#SBATCH --output=logs/%x_%j.out # logfile for stdout +#SBATCH --error=logs/%x_%j.err # logfile for stderr, remove it to merge both outputs #SBATCH --ntasks-per-node 1 # Number of GPU per node #SBATCH --gpus-per-node=8 # Number of GPU per node #SBATCH --gpus-per-task=8 # Number of GPU per node #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive -#SBATCH --wait-all-nodes=1 + set -euxo pipefail +# use MTP 7B by default MODEL=${1:-mpt-7b} + # default variables for Enroot : "${APPS_PATH:=/apps}" : "${IMAGE:=$APPS_PATH/llm-foundry.sqsh}" @@ -20,7 +22,7 @@ MODEL=${1:-mpt-7b} : "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" : "${APPS_MOUNT:=$APPS_PATH:$APPS_PATH}" -## Plenty of EFA level variables +## EFA settings export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d export FI_EFA_FORK_SAFE=1 # export NCCL_ALGO=Ring @@ -33,7 +35,7 @@ export FI_EFA_ENABLE_SHM_TRANSFER=1 export NCCL_ASYNC_ERROR_HANDLING=1 export NCCL_DEBUG=INFO - +# variables for Enroot declare -a ARGS=( --container-image $IMAGE --container-mounts ${FSX_MOUNT},${APPS_MOUNT} @@ -73,6 +75,8 @@ function run_compose() { eval_interval=0 \ save_folder=${MODEL} } + +# run the composer NODE_RANK=1 for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) do @@ -82,6 +86,6 @@ do done NODE_RANK=0 NODE=${HEAD_NODE} -echo "Run master node ${NODE} for rank: ${NODE_RANK}" +echo "Run main node ${NODE} for rank: ${NODE_RANK}" run_compose wait diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 2f018c41..2455ff68 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -103,7 +103,7 @@ You need to retrieve input data and preprocess it before running the training jo 3. After the job completed, check `/fsx/c4-dataset` (default) which will contain a structure similar as below - ```bash + ```console /fsx/c4-dataset/ ├── train_small │ ├── index.json @@ -136,7 +136,7 @@ by default it runs `mpt-7b` model. You can specify model to be trained as: sbatch 2.train-mpt-manual-distributed.sbatch mpt-30b ``` -2. When the training job completes successfully, it should produce an output similar to below. +2. When the training job completes successfully, it should produce a log output similar to the below in the `logs/` directory of `$TEST_CASE_PATH`. ```console ... From 9a24b70e139bff2b49e737716c214245db75dbee Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 10:05:13 -0500 Subject: [PATCH 146/648] Add and fix documentation in trn1 PC test case --- .../2.aws-parallelcluster/README.md | 7 +++++ .../distributed-training-trn1_custom_ami.yaml | 8 ++++-- 2.amazon_machine_images/README.md | 26 +++++++++---------- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index b24ecf48..dc89ff12 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -95,6 +95,13 @@ The templates contain placeholder variables that you need to replace before use. - `PLACEHOLDER_SSH_KEY`: ID of the SSH key you'd like to use to connect to the head-node, use the name of the key. You can also use AWS Systems Manager Session Manager (SSM). - `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). +In some of the templates you may need to update these placeholders: + +- `PLACEHOLDER_MIN_INSTANCES`: the minimum number of instances you want in your cluster at any point in time. +- `PLACEHOLDER_MAX_INSTANCES`: the maximum number of instances you anticipate to scale to. + +If `MIN` = `MAX` then you keep a fixed amount of instances at any point in time. If `MIN` < `MAX` then the cluster will keep a `MIN` number of instances and scale up to `MAX` if capacity beyond `MIN` is required to run jobs. Update this values by updating your cluster ([documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/using-pcluster-update-cluster-v3.html)) + ## AWS ParallelCluster must know ### Compute diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index 9d686f98..d991f52c 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -1,6 +1,8 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +# For additional examples please refer to this [Github repository](https://github.com/aws-neuron/aws-neuron-parallelcluster-samples/blob/master/examples/jobs/neuronx-nemo-megatron-llamav2-job.md) from aws-neuron. + Imds: ImdsSupport: v2.0 Image: @@ -44,10 +46,12 @@ Scheduling: ComputeResources: - Name: distributed-ml InstanceType: trn1.32xlarge - MinCount: PLACEHOLDER_NUM_INSTANCES # if min = max then capacity is maintained and will - MaxCount: PLACEHOLDER_NUM_INSTANCES # not scale down + MinCount: PLACEHOLDER_MIN_INSTANCES # if min = max then capacity is maintained and will + MaxCount: PLACEHOLDER_MAX_INSTANCES # not scale down Efa: Enabled: true + # assumes you are using a capacity reservation. + # If not comment or remove the 2 lines below CapacityReservationTarget: CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID Networking: diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index 1ec714eb..60829c32 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -44,19 +44,19 @@ You will find below the list of images you can build and which roles are deploye | Ansible Roles | `ami_pcluster_cpu` | `ami_pcluster_gpu`| `ami_base` | `ami_dlami_gpu` | `ami_dlami_neuron` | `ami_eks_gpu` | |-----------------------|--------------------|-------------------|------------|-----------------|--------------------|---------------| -| `base` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | -| `packages` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| `aws_cliv2` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| `aws_lustre` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| `nvidia_enroot_pyxis` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | -| `docker` | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | -| `nvidia_docker` | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | -| `nvidia_driver` | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ | -| `nvidia_cuda` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | -| `nvidia_gdrcopy` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | -| `nvidia_nccl` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | -| `aws_efa` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | -| `aws_efa_ofi` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `base` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `packages` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `aws_cliv2` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `aws_lustre` | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| `nvidia_enroot_pyxis` | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `docker` | ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | +| `nvidia_docker` | ❌ | ✅ | ✅ | ✅ | ✅ | ❌ | +| `nvidia_driver` | ❌ | ✅ | ✅ | ❌ | ❌ | ✅ | +| `nvidia_cuda` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `nvidia_gdrcopy` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `nvidia_nccl` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `aws_efa` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | +| `aws_efa_ofi` | ❌ | ✅ | ✅ | ❌ | ❌ | ❌ | ## Customizing your AMIs From 596e023d429a11148f3c0588b487a7023c3ac916 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 10:16:20 -0500 Subject: [PATCH 147/648] Add neuron ami to makefile --- 2.amazon_machine_images/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/2.amazon_machine_images/Makefile b/2.amazon_machine_images/Makefile index bab77d1c..1c4c0f7b 100644 --- a/2.amazon_machine_images/Makefile +++ b/2.amazon_machine_images/Makefile @@ -1,6 +1,6 @@ IMAGE=registry.gitlab.aws.dev/smml/benchmarking/tools/preflight DEPLOY_IMAGE=deploy -AWS_REGION=us-west-2 +: "${AWS_REGION:=us-east-1}" container_build: docker build -t ${IMAGE} ./preflight container_run: @@ -12,6 +12,8 @@ ami_pcluster_cpu: packer build -only 'aws-pcluster-cpu.*' -var aws_region=${AWS_REGION} -var "ami_version=1" packer-ami.pkr.hcl | tee aws-pcluster-cpu_ami.log ami_pcluster_gpu: packer build -only 'aws-pcluster-gpu.*' -var aws_region=${AWS_REGION} -var "ami_version=1" packer-ami.pkr.hcl | tee aws-pcluster-gpu_ami.log +ami_pcluster_neuron: + packer build -only 'aws-pcluster-neuron.*' -var aws_region=${AWS_REGION} -var "ami_version=1" packer-ami.pkr.hcl | tee aws-pcluster-gpu_ami.log ami_base: packer build -only 'aws-base-gpu.*' -var aws_region=${AWS_REGION} -var "ami_version=1" packer-ami.pkr.hcl | tee base-gpu_ami.log ami_dlami_gpu: From 2be7d79d42c9433281ffb4a5778b10ed52a29cc6 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 10:36:07 -0500 Subject: [PATCH 148/648] Change doc for custom ami to report updates on Neuron --- 2.amazon_machine_images/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index 60829c32..5bd146c0 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -19,7 +19,7 @@ Here is an example to build a AMI for training or inference on GPU with AWS Para make ami_pcluster_gpu ``` -> **Note**: If you encounter an error because Packer could not find the source AMI with the error `InvalidAMIID.NotFound` then prepend by `AWS_REGION` with the target region. For example, `AWS_REGION=us-east-2 make ami_pcluster_gpu`. +> **Note**: If you encounter an error because Packer could not find the source AMI with the error `InvalidAMIID.NotFound` then prepend by `AWS_REGION` with the target region. For example, `AWS_REGION=us-east-2 make ami_pcluster_gpu`. By default the script use `us-west-2`. The list of arguments you can use is shown in the table below with the AMI origin (what are we starting our custom AMI from) and notes regarding their content. @@ -27,6 +27,7 @@ The list of arguments you can use is shown in the table below with the AMI origi |--------------------|------------|------------------------------------------------------------------------------------| | `ami_pcluster_cpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for CPU based workloads | | `ami_pcluster_gpu` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for GPU based workloads, training or inference | +| `ami_pcluster_neuron` | [ParallelCluster AMI](https://docs.aws.amazon.com/parallelcluster/latest/ug/pcluster.list-official-images-v3.html) | Creates a custom ParallelCluter AMI for [Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/) (Trn, Inf) based workloads, training or inference | | `ami_base` | [EC2 AL2 AMI](https://aws.amazon.com/amazon-linux-2/) | EC2 AMI with updates, Docker, Lustre, EFA, Pyxis and Enroot (everything) | | `ami_dlami_gpu` | [DLAMI](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI with updated drivers, Pyxis, enroot, Lustre module client and Docker. | | `ami_dlami_neuron` | [DLAMI-Neuron](https://docs.aws.amazon.com/dlami/latest/devguide/appendix-ami-release-notes.html) | DL AMI for Neuron, same as above without the Nvidia stack | From 8a136444b4f11a3e9c55e72bf4807315b0d72dd1 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 11:01:21 -0500 Subject: [PATCH 149/648] Update EKS archs documentation and use placeholders for config --- 1.architectures/4.amazon-eks/README.md | 31 +++++++++++++------ .../4.amazon-eks/eks-g4dn-vpc.yaml | 16 +++++----- 1.architectures/4.amazon-eks/eks-g4dn.yaml | 10 +++--- .../4.amazon-eks/eks-p4de-odcr-vpc.yaml | 18 +++++------ .../4.amazon-eks/eks-p4de-odcr.yaml | 12 +++---- 5 files changed, 50 insertions(+), 37 deletions(-) diff --git a/1.architectures/4.amazon-eks/README.md b/1.architectures/4.amazon-eks/README.md index 9caf0631..ceeabc62 100644 --- a/1.architectures/4.amazon-eks/README.md +++ b/1.architectures/4.amazon-eks/README.md @@ -3,7 +3,7 @@ This project provides several reference architectures to run distributed training on Amazon EKS for different use cases using `p4d.24xlarge` instances (you can replace them by `p5` or `trn1`. These examples use [eksctl](eksctl.io) and a cluster manifest to create your specified Amazon EKS cluster. -## Prerequisites +## 0. Prerequisites To deploy the architectures you must install the dependencies below. You are advised to go through the fist two steps of the [Getting started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) guide from the AWS Documentation. @@ -11,7 +11,7 @@ To deploy the architectures you must install the dependencies below. You are adv 2. [eksctl](https://eksctl.io) command line tool to manage EKS clusters. 3. [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) command line for Kubernetes. -## Architecture +## 1. Architecture The following digram shows a common architecture that can be used for distributed model training on EKS. @@ -19,7 +19,7 @@ The following digram shows a common architecture that can be used for distribute The EKS cluster has two nodegroups. A `system` nodegroup is used to run pods like kube-dns, kubeflow training operator, etc. which provide internal cluster-scope services and can run on CPU. A worker nodegroup built with an accelerated instance type is used to run the distributed training workload. -## Cluster configuration +## 2. Cluster configuration The cluster configuration is specified via a yaml manifest file. If a cluster version is not specified in the manifest, then the default EKS API version will be used. For our examples we set the version to 1.27. This setting may be adjusted before creating clusters as needed. The following example cluster configurations for distributed training are provided: @@ -30,14 +30,27 @@ The following example cluster configurations for distributed training are provid * [**`eks-p4de-odcr.yaml`**](./eks-p4de-odcr.yaml): Cluster with 2 * `p4de.24xlarge` instances from an existing ODCR. A new VPC will be created for this cluster. This configuration is useful for distributed training when no VPC is already available. Note that you would have to match the AZ of your ODCR in the nodegroup section of the manifest. Nodegroups in this and previous examples are fully-managed and can be accessed via the EKS console. If you are using an instance type that is not yet supported in managed nodegroups by EKS, you can define a nodegroup in a self-manged nodegroup section as shown at the end of this example. -## Cluster creation +## 3. Cluster creation - -### Edit the cluster configuration +### 3.1 Edit the cluster configuration To configure your desired cluster, edit the cluster manifest file that most closely matches your desired configuration or copy the file and customize it, following the [cluster manifest schema](https://eksctl.io/usage/schema/). Any of the values in the manifests can be changed and more node groups can be added to the same cluster. The minimal set of values to specify for each file are described above. -### Create a cluster + +You will need to replace the following placeholders to deploy your clusters: + +- `PLACEHOLDER_AWS_REGION`: region in which to deploy the cluster, replace by `us-east-1` for example. +- `PLACEHOLDER_AZ_1`: We use 2 AZs for the cluster, replace by `us-east-1a` for example. +- `PLACEHOLDER_AZ_2`: This AZ is where your compute capacity is located, replace by `us-east-1c` for example if that's where your capacity is located. +- `PLACEHOLDER_VPC_ID`: ID of the VPC in which you deploy the cluster, it should take the form `vpc-12356790abcd`. +- `PLACEHOLDER_SUBNET_PUBLIC_1` and `PLACEHOLDER_SUBNET_PUBLIC_2`: change to the id of a public subnet (`subnet-12356790abcd`). +- `PLACEHOLDER_SUBNET_PUBLIC_2`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). +- `PLACEHOLDER_SUBNET_PRIVATE_1`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). +- `PLACEHOLDER_SUBNET_PRIVATE_2`: change to the id of a public subnet to host the compute nodes (`subnet-12356790abcd`). This subnet holds your compute capacity, ensure it is in the right AZ. +- `PLACEHOLDER_CAPACITY_RESERVATION_ID`: if using a capacity reservation put the ID here (`cr-12356790abcd`). + + +### 3.2 Create a cluster 1. Let's assume that your desired cluster configuration is stored in file `cluster.yaml`. Then to create the cluster, execute the following command: ```bash @@ -58,7 +71,7 @@ To configure your desired cluster, edit the cluster manifest file that most clos You should see a list of three nodes. One would be a system node instance of type c5.2xlarge, and the others will belong to the nodegroup of instances with your desired instance type for distributed training. -## Cleanup +### 3.3 Cleanup To remove your cluster, execute the following command: @@ -73,7 +86,7 @@ YYYY-MM-DD HH:mm:SS [ℹ] deleting EKS cluster "" YYYY-MM-DD HH:mm:SS [ℹ] waiting for CloudFormation stack "" ``` -## References +## 4. References For further information regarding EKS cluster infrastructure see the [aws-do-eks](https://github.com/aws-samples/aws-do-eks) project. More cluster configurations are available [here](https://github.com/aws-samples/aws-do-eks/tree/main/wd/conf/eksctl/yaml). diff --git a/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml index a4661f0f..5d44640d 100644 --- a/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml +++ b/1.architectures/4.amazon-eks/eks-g4dn-vpc.yaml @@ -4,23 +4,23 @@ kind: ClusterConfig # Update cluster name, region, and eks version if needed metadata: name: eks-g4dn-vpc - version: "1.27" - region: us-east-1 + version: "1.28" + region: PLACEHOLDER_AWS_REGION # Substitute vpc and subnet ids below vpc: - id: vpc-xxxxxxxxxxxxxxxxx + id: PLACEHOLDER_VPC_ID subnets: public: public-one: - id: subnet-xxxxxxxxxxxxxxx11 + id: PLACEHOLDER_SUBNET_PUBLIC_1 public-two: - id: subnet-xxxxxxxxxxxxxxx12 + id: PLACEHOLDER_SUBNET_PUBLIC_2 private: private-one: - id: subnet-xxxxxxxxxxxxxxx21 + id: PLACEHOLDER_SUBNET_PRIVATE_1 private-two: - id: subnet-xxxxxxxxxxxxxxx22 + id: PLACEHOLDER_SUBNET_PRIVATE_2 # Fully-managed nodegroups managedNodeGroups: @@ -47,7 +47,7 @@ managedNodeGroups: maxSize: 10 volumeSize: 500 subnets: - - subnet-xxxxxxxxxxxxxxx22 + - PLACEHOLDER_SUBNET_PRIVATE_2 iam: withAddonPolicies: autoScaler: true diff --git a/1.architectures/4.amazon-eks/eks-g4dn.yaml b/1.architectures/4.amazon-eks/eks-g4dn.yaml index c5cde94f..85f65da8 100644 --- a/1.architectures/4.amazon-eks/eks-g4dn.yaml +++ b/1.architectures/4.amazon-eks/eks-g4dn.yaml @@ -4,13 +4,13 @@ kind: ClusterConfig # Update cluster name, region, and eks version if needed metadata: name: eks-g4dn - version: "1.27" - region: us-east-1 + version: "1.28" + region: PLACEHOLDER_AWS_REGION # List availability zones where cluster subnets will be created availabilityZones: - - us-east-1a - - us-east-1c + - PLACEHOLDER_AZ_1 + - PLACEHOLDER_AZ_2 # Fully-managed nodegroups managedNodeGroups: @@ -35,7 +35,7 @@ managedNodeGroups: desiredCapacity: 2 maxSize: 10 volumeSize: 500 - availabilityZones: ["us-east-1c"] + availabilityZones: ["PLACEHOLDER_AZ_2"] iam: withAddonPolicies: autoScaler: true diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml index 17b9ed24..c9b35367 100644 --- a/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr-vpc.yaml @@ -4,23 +4,23 @@ kind: ClusterConfig # Update cluster name, region, and eks version if needed metadata: name: eks-p4de-odcr-vpc - version: "1.27" - region: us-east-1 + version: "1.28" + region: PLACEHOLDER_AWS_REGION # Substitute vpc and subnet ids below vpc: - id: vpc-xxxxxxxxxxxxxxxxx + id: PLACEHOLDER_VPC_ID subnets: public: public-one: - id: subnet-xxxxxxxxxxxxxxx11 + id: PLACEHOLDER_SUBNET_PUBLIC_1 public-two: - id: subnet-xxxxxxxxxxxxxxx12 + id: PLACEHOLDER_SUBNET_PUBLIC_2 private: private-one: - id: subnet-xxxxxxxxxxxxxxx21 + id: PLACEHOLDER_SUBNET_PRIVATE_1 private-two: - id: subnet-xxxxxxxxxxxxxxx22 + id: PLACEHOLDER_SUBNET_PRIVATE_2 # Fully-managed nodegroups managedNodeGroups: @@ -47,10 +47,10 @@ managedNodeGroups: maxSize: 10 volumeSize: 500 subnets: - - subnet-xxxxxxxxxxxxxxx22 + - PLACEHOLDER_SUBNET_PRIVATE_2 capacityReservation: capacityReservationTarget: - capacityReservationID: "cr-xxxxxxxxxxxxxxxxx" + capacityReservationID: "PLACEHOLDER_CAPACITY_RESERVATION_ID" iam: withAddonPolicies: autoScaler: true diff --git a/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml index a906203c..53a77534 100644 --- a/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml +++ b/1.architectures/4.amazon-eks/eks-p4de-odcr.yaml @@ -4,13 +4,13 @@ kind: ClusterConfig # Update cluster name, region, and eks version if needed metadata: name: eks-p4de-odcr - version: "1.27" - region: us-east-1 + version: "1.28" + region: PLACEHOLDER_AWS_REGION # List availability zones where cluster subnets will be created availabilityZones: - - us-east-1a - - us-east-1c + - PLACEHOLDER_AZ_1 + - PLACEHOLDER_AZ_2 # Fully-managed nodegroups managedNodeGroups: @@ -36,10 +36,10 @@ managedNodeGroups: desiredCapacity: 2 maxSize: 10 volumeSize: 500 - availabilityZones: ["us-east-1c"] + availabilityZones: ["PLACEHOLDER_AZ_2"] capacityReservation: capacityReservationTarget: - capacityReservationID: "cr-xxxxxxxxxxxxxxxxx" + capacityReservationID: "PLACEHOLDER_CAPACITY_RESERVATION_ID" iam: withAddonPolicies: autoScaler: true From dc3c6d68c576c85927ab9f5c33ce031c29aa10f3 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 11:28:40 -0500 Subject: [PATCH 150/648] Update main readmefile --- README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 627cbcd2..8c7d964d 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ reference-architectures/ |-- 1.architectures # CloudFormation templates for reference arch |-- 2.amazon_machine_images/ # Scripts to create AMIs |-- 3.test_cases/ # Reference test cases and/or benchmark scripts +|-- 3.validation_scripts/ # Tools to measure performance or troubleshoot `-- ... ``` @@ -24,6 +25,7 @@ Architectures are located in `1.architectures` and consists of utilities and ser | `1.vpc_network` | Network | Create a VPC with subnets required resources | | `2.aws-parallelcluster` | Compute | Cluster templates for GPU & custom silicon training | | `3.aws-batch` | Compute | AWS Batch template for distributed training | +| `4.amazon-eks` | Compute | Manifest files to train with Amazon EKS | More will come, feel free to add new ones (EKS, Ray?) @@ -35,12 +37,13 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test cases | Slurm | EKS | AWS Batch | -| ----------------- | ----- | --- | ---------- | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher` | ✅ | ❌ | ❌ | -| `3.MPT` | ❓ | ❓ | ❓ | -| `4.DDP` | ❓ | ❓ | ❓ | +| Test cases | Slurm | EKS | AWS Batch | +| ------------------- | ----- | --- | ---------- | +| `1.megatron-lm` | ✅ | ❓ | ❓ | +| `2.nemo-launcher` | ✅ | ❌ | ❌ | +| `3.MPT` | ✅ | ❓ | ❓ | +| `4.DDP` | ✅ | ❓ | ❓ | +| `4.param-benchmark` | ✅ | ❓ | ❓ | ## 4. Validation scripts @@ -59,3 +62,5 @@ Thanks to all the contributors for building, reviewing and testing. - Alex Iankoulski - iankouls@ - Tom McDonald - tjm@ - Sean Smith - seaam@ +- Jianying Lang - langjian@ +- Maxime Hugues - maxhaws@ From a7e964a4bba91d5008723e5740061f54eaf58cc7 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Sat, 30 Sep 2023 12:30:32 -0500 Subject: [PATCH 151/648] Fix numpy version in ddp test case requirements --- 3.test_cases/4.DDP/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/4.DDP/requirements.txt b/3.test_cases/4.DDP/requirements.txt index 25b77953..b63d01a0 100644 --- a/3.test_cases/4.DDP/requirements.txt +++ b/3.test_cases/4.DDP/requirements.txt @@ -1,7 +1,7 @@ catalyst==22.4 imageio==2.16.1 matplotlib==3.5.1 -numpy==1.21.5 +numpy==1.22.0 opencv_python==4.5.5.64 pandas==1.5.0 Pillow==9.4.0 From 410b4d803efdec2cbc9360d4e0bbd1e8a4e0766a Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Sun, 1 Oct 2023 08:02:47 +0900 Subject: [PATCH 152/648] Update README.md fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8c7d964d..58e580c3 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ All test cases are under `3.test_cases/`. You can go in each test case directory | `2.nemo-launcher` | ✅ | ❌ | ❌ | | `3.MPT` | ✅ | ❓ | ❓ | | `4.DDP` | ✅ | ❓ | ❓ | -| `4.param-benchmark` | ✅ | ❓ | ❓ | +| `5.param-benchmark` | ✅ | ❓ | ❓ | ## 4. Validation scripts From 156331918d013ada6e059a6ce218156beb5a053e Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 2 Oct 2023 13:23:01 -0700 Subject: [PATCH 153/648] Update versions for Megatron-LM * Latest NCCL Version `v2.18.5-1` * Latest `aws-ofi-nccl` version to `v1.7.2-aws`, this contains a memory leak patch, see https://github.com/aws/aws-ofi-nccl/commit/8b62baf18190ab3781c294169f7570da7a24220b * Update docker image to `pytorch:23.08-py3` Signed-off-by: Sean Smith --- 3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile | 2 +- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 1 - .../1.megatron-lm/2.distributed-training.Dockerfile | 7 ++++--- 3.test_cases/1.megatron-lm/3.distributed-training.sbatch | 1 - 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile b/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile index 4e780fa3..82a49439 100644 --- a/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile +++ b/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile @@ -3,7 +3,7 @@ # Container file for data prep # This could be reduced in the future -FROM nvcr.io/nvidia/pytorch:23.05-py3 +FROM nvcr.io/nvidia/pytorch:23.08-py3 RUN apt-get update -y && apt-get install wget xz-utils git -y RUN apt-get install python3 python3-pip -y RUN pip3 install nltk diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index 8a557abf..3e1e0fae 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -5,7 +5,6 @@ #SBATCH -N 1 # number of nodes we want #SBATCH --exclusive # job has exclusive use of the resource, no sharing -#SBATCH --export=NIL # do not export env vars from the host env ########################### ###### User Variables ##### diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile index 5b4220be..93d164d3 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile +++ b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile @@ -1,11 +1,12 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -FROM nvcr.io/nvidia/pytorch:23.05-py3 +FROM nvcr.io/nvidia/pytorch:23.08-py3 ARG EFA_INSTALLER_VERSION=latest -ARG AWS_OFI_NCCL_VERSION=v1.6.0 +ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws ARG NCCL_TESTS_VERSION=master +ARG NCCL_VERSION=v2.18.5-1 ARG OPEN_MPI_PATH=/opt/amazon/openmpi ###################### @@ -71,7 +72,7 @@ RUN cd $HOME \ ## Install NCCL RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ && cd /opt/nccl \ - && git checkout v2.15.5-1 \ + && git checkout ${NCCL_VERSION}} \ && make -j$(nproc) src.build CUDA_HOME=/usr/local/cuda \ NVCC_GENCODE="-gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" diff --git a/3.test_cases/1.megatron-lm/3.distributed-training.sbatch b/3.test_cases/1.megatron-lm/3.distributed-training.sbatch index a1d23a60..762e0c5c 100644 --- a/3.test_cases/1.megatron-lm/3.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/3.distributed-training.sbatch @@ -9,7 +9,6 @@ #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive # job has exclusive use of the resource, no sharing #SBATCH --wait-all-nodes=1 -#SBATCH --export=NIL # do not export env vars from the host env set -ex; From 26cf148c2784ca6ffa03ed802f9d77966dcfb07c Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 2 Oct 2023 16:16:06 -0700 Subject: [PATCH 154/648] Add more information on placement group usage Signed-off-by: Sean Smith --- .../2.aws-parallelcluster/README.md | 48 ++++++++++++++++--- .../distributed-training-p4de-base.yaml | 2 +- ...ted-training-p4de_postinstall_scripts.yaml | 6 ++- 3 files changed, 48 insertions(+), 8 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index dc89ff12..7a2d79d2 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -29,7 +29,7 @@ pip3 install awscli # install the AWS CLI pip3 install aws-parallelcluster # then AWS ParallelCluster ``` -> **Note**: you can use virtual environments to test different versions of AWS ParallelCluster by setting the version during the installation. For example to use 3.6.1, change the command `pip3 install aws-parallelcluster==3.6.1`. +> **Note**: you can use virtual environments to test different versions of AWS ParallelCluster by setting the version during the installation. For example to use 3.7.1, change the command `pip3 install aws-parallelcluster==3.7.1`. ### Create your EC2 Keypair (if needed) @@ -121,17 +121,53 @@ Storage comes in 3 flavors: - **High performance filesystem**: An [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html) filesystem can be access from every cluster node on `/fsx`. This is where users would store their datasets. This file system has been sized to 4.8TiB and provides 1.2GB/s of aggregated throughput. You can modify its size and the throughput per TB provisioned in the config file following the service [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/performance.html). -#### Network +#### Network {#efa} -Applications will make use of [Elastic Fabric Adapter (EFA)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) for distributed training. In addition, instances will be placed to one another through the use of placement groups or assistance from AWS. +Applications will make use of [Elastic Fabric Adapter (EFA)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) for enhanced networking during distributed training. To achieve optimal network latency instances should be placed in a placement groups using either the `PlacementGroup` flag or by specifying a targeted [On-Demand Capacity reservation (ODCR)](#odcr). -Placement groups are only relevant for distributed training, not inference. You may remove the placement groups declaration in the config file if requested. In which case you will need to delete these lines +When using a targeted ODCR you'll want to disable placement groups in your ODCR. The placement group option creates a specific placement group that may conflict with the placement group assigned in your ODCR leading to instance launch failures. + +Placement groups are only relevant for distributed training, not inference. + +In both cases just set the following parameter to `false`: ```yaml PlacementGroup: - Enabled: true + Enabled: false ``` +#### On-Demand Capacity Reservation (ODCR) {#odrc} + +On-Demand Capacity Reservation (ODCR) is a tool for reserving capacity without having to launch and run the instances. For capacity constrained instances like the `p4de.24xlarge`, this is typically **the only way** to launch these instances. + +1. AWS ParallelCluster supports specifying the [CapacityReservationId](https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#yaml-Scheduling-SlurmQueues-CapacityReservationTarget) in the cluster's config file. If using a capacity reservation put the ID i.e. `cr-12356790abcd` in your config file by substituting the variable `PLACEHOLDER_CAPACITY_RESERVATION_ID`. It should look like the following: + + ```yaml + CapacityReservationTarget: + CapacityReservationId: cr-12356790abcd + ``` + +If you have multiple ODCR's you can group them together into a [*Capacity Reservation Group*](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-cr-group.html), this allows you to launch instances from multiple ODCR's as part of the **same queue** of the cluster. + +1. First create a group, this will return a group arn like: `arn:aws:resource-groups:us-east-2:123456789012:group/MyCRGroup`. Save that for later. + + ```bash + $ aws resource-groups create-group --name MyCRGroup --configuration '{"Type":"AWS::EC2::CapacityReservationPool"}' '{"Type":"AWS::ResourceGroups::Generic", "Parameters": [{"Name": "allowed-resource-types", "Values": ["AWS::EC2::CapacityReservation"]}]}' + ``` + +2. Next add your capacity reservations to that group: + + ```bash + aws resource-groups group-resources --group MyCRGroup --resource-arns arn:aws:ec2:sa-east-1:123456789012:capacity-reservation/cr-1234567890abcdef1 arn:aws:ec2:sa-east-1:123456789012:capacity-reservation/cr-54321abcdef567890 + ``` + +3. Then add the group to your cluster's config like so: + + ```yaml + CapacityReservationTarget: + CapacityReservationResourceGroupArn: arn:aws:resource-groups:us-east-2:123456789012:group/MyCRGroup + ``` + #### Installing applications & libraries You can chose to use a custom image or post-install scripts to install your application stack. @@ -154,4 +190,4 @@ You can chose to use a custom image or post-install scripts to install your appl A common issue we see customer face is a problem with the post install scripts or issue to access capacity due to a mis-configuration. This can manifest itself through a `HeadNodeWaitCondition` that'll cause the ParallelCluster to fail a cluster deployment. -To solve that, you can look at the cluster logs in CloudWatch in the cluster loggroup, otherwise use the option `--rollback-on-failure false` to keep resources up upon failure for further troubleshooting. +To solve that, you can look at the cluster logs in CloudWatch in the cluster log group, otherwise use the option `--rollback-on-failure false` to keep resources up upon failure for further troubleshooting. diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index ad01a75a..0760f482 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -31,7 +31,7 @@ Scheduling: SubnetIds: - PLACEHOLDER_PRIVATE_SUBNET PlacementGroup: - Enabled: true + Enabled: true # set this to false if using a targeted ODCR ComputeSettings: LocalStorage: EphemeralVolume: diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 34a9089e..5ee53619 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -25,6 +25,8 @@ HeadNode: Sequence: - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/docker/postinstall.sh' - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/pyxis/postinstall.sh' + Args: + - /fsx # cache enroot images on /fsx Scheduling: Scheduler: slurm SlurmSettings: @@ -36,7 +38,7 @@ Scheduling: SubnetIds: - PLACEHOLDER_PRIVATE_SUBNET PlacementGroup: - Enabled: true + Enabled: true # set this to false if using a targeted ODCR ComputeSettings: LocalStorage: EphemeralVolume: @@ -62,6 +64,8 @@ Scheduling: Sequence: - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/docker/postinstall.sh' - Script: 'https://raw.githubusercontent.com/aws-samples/aws-parallelcluster-post-install-scripts/main/pyxis/postinstall.sh' + Args: + - /fsx # cache enroot images on /fsx SharedStorage: - MountDir: /fsx Name: fsx From 063833a6a57af8361fae0ad7b7aa5aa59ceb555d Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 3 Oct 2023 22:30:23 +0800 Subject: [PATCH 155/648] More clarification on ODCR and PG; other changes to readme --- .../2.aws-parallelcluster/README.md | 127 +++++++++--------- 1 file changed, 61 insertions(+), 66 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 7a2d79d2..8b2df6a9 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -1,18 +1,17 @@ -# AWS ParallelCluster Distributed Training Reference Architectures +# AWS ParallelCluster Distributed Training Reference Architectures -## Architectures +## 1. Architectures Clusters in AWS ParallelCluster share similar components: a head-node, compute nodes (typically P or Trn EC2 family of instances) and one or multiple shared filesystems (FSx for Lustre). You will find below a section on the architectures themselves and how to deploy them. After this section, you will be brief on key elements of these templates (or things you wanna know to avoid potential mistakes). -## Initial setup +## 2. Pre-requisites -Before deploying a cluster, let's ensure you have AWS ParallelCluster (PC) accessible and that you have generated an EC2 key pair that you can use to connect to your head-node. If you have both PC installed and the key pair generated then skip this section and go deploy a cluster. +Before deploying a cluster, let's ensure you have installed the AWS ParallelCluster (PC) CLI, and that you have generated an EC2 key pair for the head node later on. If you have both PC installed and the key pair generated then skip this section and go [deploy-a-cluster section](#3-deploy-clusters). -### Install AWS ParallelCluster +### 2.1. Install AWS ParallelCluster CLI Run the script below to install AWS ParallelCluster in a Python virtual environment and access this environment. - ```bash #!/bin/bash @@ -31,8 +30,7 @@ pip3 install aws-parallelcluster # then AWS ParallelCluster > **Note**: you can use virtual environments to test different versions of AWS ParallelCluster by setting the version during the installation. For example to use 3.7.1, change the command `pip3 install aws-parallelcluster==3.7.1`. - -### Create your EC2 Keypair (if needed) +### 2.2. Create your EC2 Keypair (if needed) The EC2 key pair enables your to connect to your cluster on the head-node through ssh or [AWS Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html). We will cover for SSH here. @@ -40,7 +38,6 @@ You can list your public keys on your [AWS Console](https://console.aws.amazon.c If you do not have a keypair that you can use then we will create one with the command below (see [this documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/set-up-keypair.html)). - ```bash #!/bin/bash @@ -53,16 +50,13 @@ aws ec2 create-key-pair --key-name pcluster-workshop-key \ --region $AWS_TARGET_REGION \ --output text > $KEYPAIR_NAME.pem -# the private part of your key pair is located in the current directory -# we change the access rights to the current user only +# The private part of your key pair is located in the current directory. +# We must change the access rights to the current user only, otherwise the ssh +# client refuses to use this key. sudo chmod 600 $KEYPAIR_NAME.pem ``` -## Deploy Clusters - -We will show you how to - -### How to deploy a cluster +## 3. Deploy a Cluster To create the cluster use the command below and replace `CLUSTER_CONFIG_FILE` by the path to the cluster configuration file (see next section) and `NAME_OF_YOUR_CLUSTER` by the name of your cluster (`realpotato` is a cool name). @@ -72,22 +66,22 @@ pcluster create-cluster --cluster-configuration CLUSTER_CONFIG_FILE --cluster-na You can follow the [documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/commands-v3.html) to review the list of all AWS ParallelCluster commands. -### Cluster templates +### 3.1. Cluster templates -Each reference architectures provides an example of cluster for different use cases. The architectures most commonly used are: +Each reference architectures provides an example of cluster configuration (`.yaml`) for different use cases. The architectures most commonly used are: -- `distributed-training-gpu`: base template, uses the default AMI with no software installed. -- `distributed-training-p4de_custom_ami`: base cluster with a custom AMI to install custom software. -- `distributed-training-p4de_postinstall_scripts`: same as above but uses post-install scripts to install Docker, Pyxis and Enroot. +- `distributed-training-gpu.yaml`: base template, uses the default AMI with no software installed. +- `distributed-training-p4de_custom_ami.yaml`: base cluster with a custom AMI to install custom software. +- `distributed-training-p4de_postinstall_scripts.yaml`: same as above but uses post-install scripts to install Docker, Pyxis and Enroot. Alternatively you can refer to these architectures for more specific use cases: -- `distributed-training-p4de_batch-inference-g5_custom_ami`: multi-queue template with p4de for training and g5 for inference. It assumes a custom AMI. -- `distributed-training-trn1_custom_ami`: uses Trainium instances for distributed training. Assumes a custom AMI. +- `distributed-training-p4de_batch-inference-g5_custom_ami.yaml`: multi-queue template with p4de for training and g5 for inference. It assumes a custom AMI. +- `distributed-training-trn1_custom_ami.yaml`: uses Trainium instances for distributed training. Assumes a custom AMI. -### What to replace in the templates +### 3.2. What to replace in the templates -The templates contain placeholder variables that you need to replace before use. +The `.yaml` templates contain placeholder variables that you need to replace before use. - `PLACEHOLDER_CUSTOM_AMI_ID`: if using a custom AMI then replace with the custom AMI ID (`ami-12356790abcd`). - `PLACEHOLDER_PUBLIC_SUBNET`: change to the id of a public subnet to host the head-node (`subnet-12356790abcd`). @@ -102,9 +96,11 @@ In some of the templates you may need to update these placeholders: If `MIN` = `MAX` then you keep a fixed amount of instances at any point in time. If `MIN` < `MAX` then the cluster will keep a `MIN` number of instances and scale up to `MAX` if capacity beyond `MIN` is required to run jobs. Update this values by updating your cluster ([documentation](https://docs.aws.amazon.com/parallelcluster/latest/ug/using-pcluster-update-cluster-v3.html)) -## AWS ParallelCluster must know +## 4. Anatomy of AWS Parallel Cluster + +![AWS ParallelCluster diagram](../../0.docs/parallelcluster-arch-diagram.png) -### Compute +### 4.1. Compute Compute is represented through the following: @@ -112,47 +108,23 @@ Compute is represented through the following: - **Compute-gpu**: is the queue (or partition) to run your ML training jobs. The instances are either [p4de.24xlarge](https://aws.amazon.com/ec2/instance-types/p4/) or [trn1.32xlarge](https://aws.amazon.com/ec2/instance-types/trn1/) which are recommended for training, especially for LLMs or large models. The default number of instances in the queue has been set to *4* and can be changed as necessary. - **Inference-gpu**: is an optional queue that can be used to run inference workloads and uses [g5.12xlarge](https://aws.amazon.com/ec2/instance-types/m5/). -#### Storage - -Storage comes in 3 flavors: - -- **Local**: head and compute nodes have 200GiB of EBS volume mounted on `/`. In addition, the headnode has an EBS volume of `200GiB` mounted on `/apps` The compute nodes have NVMe drives striped in RAID0 and mounted as `/local_scratch`. -- **File network storage**: The head-node shares `/home` and `/apps` to the whole cluster through NFS. These directories are automatically mounted on every instance in the cluster and accessible through the same path. `/home` is a regular home directory, `/apps` is a shared directory where applications or shared files can be stored. Please note that none should be used for data intensive tasks. -- **High performance filesystem**: An [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html) filesystem can be access from every cluster node on `/fsx`. This is where users would store their datasets. This file system has been sized to 4.8TiB and provides 1.2GB/s of aggregated throughput. You can modify its size and the throughput per TB provisioned in the config file following the service [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/performance.html). - - -#### Network {#efa} - -Applications will make use of [Elastic Fabric Adapter (EFA)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) for enhanced networking during distributed training. To achieve optimal network latency instances should be placed in a placement groups using either the `PlacementGroup` flag or by specifying a targeted [On-Demand Capacity reservation (ODCR)](#odcr). - -When using a targeted ODCR you'll want to disable placement groups in your ODCR. The placement group option creates a specific placement group that may conflict with the placement group assigned in your ODCR leading to instance launch failures. - -Placement groups are only relevant for distributed training, not inference. - -In both cases just set the following parameter to `false`: - -```yaml -PlacementGroup: - Enabled: false -``` - -#### On-Demand Capacity Reservation (ODCR) {#odrc} +### 4.2. On-Demand Capacity Reservation (ODCR) -On-Demand Capacity Reservation (ODCR) is a tool for reserving capacity without having to launch and run the instances. For capacity constrained instances like the `p4de.24xlarge`, this is typically **the only way** to launch these instances. +On-Demand Capacity Reservation (ODCR) is a tool for reserving capacity without having to launch and run the EC2 instances. ODCR is practically **the only way** to launch capacity-constrained instances like `p4d.24xlarge`, `p4de.24xlarge`, or `p5.48xlarge`. In addition, the CRs for these instance types are typically *created by AWS*, not by users, which affects how to correctly configure the cluster networking (section [4.3](#43-network-efa-elastic-fabric-adapter)). -1. AWS ParallelCluster supports specifying the [CapacityReservationId](https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#yaml-Scheduling-SlurmQueues-CapacityReservationTarget) in the cluster's config file. If using a capacity reservation put the ID i.e. `cr-12356790abcd` in your config file by substituting the variable `PLACEHOLDER_CAPACITY_RESERVATION_ID`. It should look like the following: +AWS ParallelCluster supports specifying the [CapacityReservationId](https://docs.aws.amazon.com/parallelcluster/latest/ug/Scheduling-v3.html#yaml-Scheduling-SlurmQueues-CapacityReservationTarget) in the cluster's config file. If using a capacity reservation put the ID i.e. `cr-12356790abcd` in your config file by substituting the variable `PLACEHOLDER_CAPACITY_RESERVATION_ID`. It should look like the following: - ```yaml - CapacityReservationTarget: - CapacityReservationId: cr-12356790abcd - ``` + ```yaml + CapacityReservationTarget: + CapacityReservationId: cr-12356790abcd + ``` If you have multiple ODCR's you can group them together into a [*Capacity Reservation Group*](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-cr-group.html), this allows you to launch instances from multiple ODCR's as part of the **same queue** of the cluster. 1. First create a group, this will return a group arn like: `arn:aws:resource-groups:us-east-2:123456789012:group/MyCRGroup`. Save that for later. ```bash - $ aws resource-groups create-group --name MyCRGroup --configuration '{"Type":"AWS::EC2::CapacityReservationPool"}' '{"Type":"AWS::ResourceGroups::Generic", "Parameters": [{"Name": "allowed-resource-types", "Values": ["AWS::EC2::CapacityReservation"]}]}' + aws resource-groups create-group --name MyCRGroup --configuration '{"Type":"AWS::EC2::CapacityReservationPool"}' '{"Type":"AWS::ResourceGroups::Generic", "Parameters": [{"Name": "allowed-resource-types", "Values": ["AWS::EC2::CapacityReservation"]}]}' ``` 2. Next add your capacity reservations to that group: @@ -168,25 +140,48 @@ If you have multiple ODCR's you can group them together into a [*Capacity Reserv CapacityReservationResourceGroupArn: arn:aws:resource-groups:us-east-2:123456789012:group/MyCRGroup ``` -#### Installing applications & libraries +### 4.3. Network: EFA (Elastic Fabric Adapter) + +Applications will make use of [Elastic Fabric Adapter (EFA)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) for enhanced networking during distributed training. To achieve optimal network latency instances should be placed in a placement groups using either the `PlacementGroup` flag or by specifying a targeted [On-Demand Capacity reservation (ODCR)](#42-on-demand-capacity-reservation-odcr). + +It is important to note the targeted ODCR for `p4` or `p5` are typically not created by users. Instead, AWS will create the CR with placement group assigned, then deliver (i.e., share) the CR to users. Users must accept the CR (e.g., via their AWS console) before they can use it to launch the `p4` or `p5` instances. + +When using the AWS-assisted targeted ODCR, you're strongly recommended to disable the `PlacementGroup` setting for AWS Parallel Cluster, otherwise this placement group option creates a specific placement group that may conflict with the placement group assigned in your ODCR, causing instance launch failures *Insufficient Capacity Error* (*ICE*). + +Placement groups are only relevant for distributed training, not inference. + +In both cases, for `p4` or `p5` with AWS-assisted CR, the rule-of-thumb is to set the following parameter to `false`: + +```yaml +PlacementGroup: + Enabled: false +``` + +### 4.4. Storage + +Storage comes in 3 flavors: + +- **Local**: head and compute nodes have 200GiB of EBS volume mounted on `/`. In addition, the headnode has an EBS volume of `200GiB` mounted on `/apps` The compute nodes have NVMe drives striped in RAID0 and mounted as `/local_scratch`. +- **File network storage**: The head-node shares `/home` and `/apps` to the whole cluster through NFS. These directories are automatically mounted on every instance in the cluster and accessible through the same path. `/home` is a regular home directory, `/apps` is a shared directory where applications or shared files can be stored. Please note that none should be used for data intensive tasks. +- **High performance filesystem**: An [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html) filesystem can be accessed from every cluster node on `/fsx`. This is where users would store their datasets. This file system has been sized to 4.8TiB and provides 1.2GB/s of aggregated throughput. You can modify its size and the throughput per TB provisioned in the config file following the service [documentation](https://docs.aws.amazon.com/fsx/latest/LustreGuide/performance.html). + +### 4.5. Installing applications & libraries You can chose to use a custom image or post-install scripts to install your application stack. - **Custom images**: the image needs to be pre-built before creating a cluster. They are preferred for drivers, kernel modules or libraries regularly used and seeing little to no updates. This option is preferred to ensure repeatability. You can use custom images as follows: + ```yaml Image: Os: alinux2 #system type CustomAmi: PLACEHOLDER_CUSTOM_AMI_ID #replace by custom imageAMI ID ``` - If not using a custom image, remove the `CustomAmi` field. -- **Post-install scripts**: these scripts will be executed at instance boot (head+compute). This option is recommended for quick testing and will increase instance boot time. You can run post-install scripts through `CustomActions` for the head node and the compute nodes. -### Diagram - -![AWS ParallelCluster diagram](../../0.docs/parallelcluster-arch-diagram.png) + If not using a custom image, remove the `CustomAmi` field. +- **Post-install scripts**: these scripts will be executed at instance boot (head+compute). This option is recommended for quick testing and will increase instance boot time. You can run post-install scripts through `CustomActions` for the head node and the compute nodes. The `distributed-training-p4de_postintall_scripts.yaml` uses the post-install scripts from this [repo](https://github.com/aws-samples/aws-parallelcluster-post-install-scripts) to enable the container support. -### Troubleshooting +### 4.6. Troubleshooting A common issue we see customer face is a problem with the post install scripts or issue to access capacity due to a mis-configuration. This can manifest itself through a `HeadNodeWaitCondition` that'll cause the ParallelCluster to fail a cluster deployment. From 12674f000d429b494ab066d764f0c334e108e304 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 3 Oct 2023 22:38:06 +0800 Subject: [PATCH 156/648] Update readme --- 1.architectures/2.aws-parallelcluster/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 8b2df6a9..4571feee 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -50,9 +50,9 @@ aws ec2 create-key-pair --key-name pcluster-workshop-key \ --region $AWS_TARGET_REGION \ --output text > $KEYPAIR_NAME.pem -# The private part of your key pair is located in the current directory. +# The above command will also generate a private key in the current directory. # We must change the access rights to the current user only, otherwise the ssh -# client refuses to use this key. +# client refuses to use this private key to open an ssh connection. sudo chmod 600 $KEYPAIR_NAME.pem ``` From e8a35bab4102e9f4db7d87d47485575595560894 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 3 Oct 2023 22:41:17 +0800 Subject: [PATCH 157/648] Fix typo --- 1.architectures/2.aws-parallelcluster/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 4571feee..534afbe6 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -179,7 +179,7 @@ You can chose to use a custom image or post-install scripts to install your appl If not using a custom image, remove the `CustomAmi` field. -- **Post-install scripts**: these scripts will be executed at instance boot (head+compute). This option is recommended for quick testing and will increase instance boot time. You can run post-install scripts through `CustomActions` for the head node and the compute nodes. The `distributed-training-p4de_postintall_scripts.yaml` uses the post-install scripts from this [repo](https://github.com/aws-samples/aws-parallelcluster-post-install-scripts) to enable the container support. +- **Post-install scripts**: these scripts will be executed at instance boot (head+compute). This option is recommended for quick testing and will increase instance boot time. You can run post-install scripts through `CustomActions` for the head node and the compute nodes. The `distributed-training-p4de_postinstall_scripts.yaml` uses the post-install scripts from this [repo](https://github.com/aws-samples/aws-parallelcluster-post-install-scripts) to enable the container support. ### 4.6. Troubleshooting From 14eee57cf8d957467b1542abcf3bab7157bde753 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 01:26:54 +0000 Subject: [PATCH 158/648] Bump pillow from 9.4.0 to 10.0.1 in /3.test_cases/4.DDP Bumps [pillow](https://github.com/python-pillow/Pillow) from 9.4.0 to 10.0.1. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/9.4.0...10.0.1) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- 3.test_cases/4.DDP/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/4.DDP/requirements.txt b/3.test_cases/4.DDP/requirements.txt index b63d01a0..b88c3ad8 100644 --- a/3.test_cases/4.DDP/requirements.txt +++ b/3.test_cases/4.DDP/requirements.txt @@ -4,7 +4,7 @@ matplotlib==3.5.1 numpy==1.22.0 opencv_python==4.5.5.64 pandas==1.5.0 -Pillow==9.4.0 +Pillow==10.0.1 PyYAML==6.0 scikit_image==0.19.2 scikit_learn==1.2.1 From 773d1ed3dbfd6bd5c0a69613d32a22ece1841619 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 5 Oct 2023 18:06:06 -0500 Subject: [PATCH 159/648] Update aws-ofi-nccl version to the latest Also pin efa installer instead of using latest --- 3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile index 93d164d3..c9feb9e1 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile +++ b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile @@ -3,8 +3,8 @@ FROM nvcr.io/nvidia/pytorch:23.08-py3 -ARG EFA_INSTALLER_VERSION=latest -ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws +ARG EFA_INSTALLER_VERSION=1.27.0 +ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws ARG NCCL_TESTS_VERSION=master ARG NCCL_VERSION=v2.18.5-1 ARG OPEN_MPI_PATH=/opt/amazon/openmpi From b0736a3a8e9217d102136ad82734b1853e51f871 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 3 Oct 2023 22:29:01 +0800 Subject: [PATCH 160/648] Enable simple, text-based Slurm accounting --- .../distributed-training-p4de-base.yaml | 11 +++++++++++ ...d-training-p4de_batch-inference-g5_custom_ami.yaml | 11 +++++++++++ .../distributed-training-p4de_custom_ami.yaml | 11 +++++++++++ ...distributed-training-p4de_postinstall_scripts.yaml | 11 +++++++++++ .../distributed-training-trn1_custom_ami.yaml | 11 +++++++++++ 5 files changed, 55 insertions(+) diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index 0760f482..def47596 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -24,6 +24,17 @@ Scheduling: Scheduler: slurm SlurmSettings: ScaledownIdletime: 60 + CustomSlurmSettings: + # Simple accounting to text file /home/slurm/slurm-job-completions.txt. + # + # Must be disabled should you prefer to setup Slurm accounting to database + # (https://docs.aws.amazon.com/parallelcluster/latest/ug/slurm-accounting-v3.html). + # + # NOTE: JobCompType entry will be duplicated, hence will cause a harmless + # warning message in `systemctl status --no-pager slurmctld`. + - JobCompType: jobcomp/filetxt + - JobCompLoc: /home/slurm/slurm-job-completions.txt + - JobAcctGatherType: jobacct_gather/linux SlurmQueues: - Name: compute-gpu CapacityType: ONDEMAND diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index b6630621..6c2d5468 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -28,6 +28,17 @@ Scheduling: Scheduler: slurm SlurmSettings: ScaledownIdletime: 60 + CustomSlurmSettings: + # Simple accounting to text file /home/slurm/slurm-job-completions.txt. + # + # Must be disabled should you prefer to setup Slurm accounting to database + # (https://docs.aws.amazon.com/parallelcluster/latest/ug/slurm-accounting-v3.html). + # + # NOTE: JobCompType entry will be duplicated, hence will cause a harmless + # warning message in `systemctl status --no-pager slurmctld`. + - JobCompType: jobcomp/filetxt + - JobCompLoc: /home/slurm/slurm-job-completions.txt + - JobAcctGatherType: jobacct_gather/linux SlurmQueues: - Name: compute-gpu CapacityType: ONDEMAND diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index 031aaf29..e3172e99 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -25,6 +25,17 @@ Scheduling: Scheduler: slurm SlurmSettings: ScaledownIdletime: 60 + CustomSlurmSettings: + # Simple accounting to text file /home/slurm/slurm-job-completions.txt. + # + # Must be disabled should you prefer to setup Slurm accounting to database + # (https://docs.aws.amazon.com/parallelcluster/latest/ug/slurm-accounting-v3.html). + # + # NOTE: JobCompType entry will be duplicated, hence will cause a harmless + # warning message in `systemctl status --no-pager slurmctld`. + - JobCompType: jobcomp/filetxt + - JobCompLoc: /home/slurm/slurm-job-completions.txt + - JobAcctGatherType: jobacct_gather/linux SlurmQueues: - Name: compute-gpu CapacityType: ONDEMAND diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 5ee53619..6cf9fcdf 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -31,6 +31,17 @@ Scheduling: Scheduler: slurm SlurmSettings: ScaledownIdletime: 60 + CustomSlurmSettings: + # Simple accounting to text file /home/slurm/slurm-job-completions.txt. + # + # Must be disabled should you prefer to setup Slurm accounting to database + # (https://docs.aws.amazon.com/parallelcluster/latest/ug/slurm-accounting-v3.html). + # + # NOTE: JobCompType entry will be duplicated, hence will cause a harmless + # warning message in `systemctl status --no-pager slurmctld`. + - JobCompType: jobcomp/filetxt + - JobCompLoc: /home/slurm/slurm-job-completions.txt + - JobAcctGatherType: jobacct_gather/linux SlurmQueues: - Name: compute-gpu CapacityType: ONDEMAND diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index d991f52c..92230246 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -27,6 +27,17 @@ Scheduling: Scheduler: slurm SlurmSettings: ScaledownIdletime: 60 + CustomSlurmSettings: + # Simple accounting to text file /home/slurm/slurm-job-completions.txt. + # + # Must be disabled should you prefer to setup Slurm accounting to database + # (https://docs.aws.amazon.com/parallelcluster/latest/ug/slurm-accounting-v3.html). + # + # NOTE: JobCompType entry will be duplicated, hence will cause a harmless + # warning message in `systemctl status --no-pager slurmctld`. + - JobCompType: jobcomp/filetxt + - JobCompLoc: /home/slurm/slurm-job-completions.txt + - JobAcctGatherType: jobacct_gather/linux SlurmQueues: - Name: compute-gpu CapacityType: ONDEMAND From e97e95a49da63767fa0f094233bbf6f3b0ef93bf Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Wed, 4 Oct 2023 10:46:32 -0700 Subject: [PATCH 161/648] Stable Diffusion Authors: Ankur Srivastava Sean Smith --- 3.test_cases/6.stable-diffusion/README.md | 179 ++++++++++++++++++ .../6.stable-diffusion/multi-node/0.Makefile | 0 .../multi-node/1.Dockerfile | 104 ++++++++++ .../multi-node/2.train.sbatch | 74 ++++++++ .../6.stable-diffusion/single-node/Dockerfile | 12 ++ .../6.stable-diffusion/single-node/build.sh | 3 + .../calculate_number_of_parameters.py | 70 +++++++ .../single-node/create-conda.sh | 22 +++ .../6.stable-diffusion/single-node/run.sh | 3 + .../single-node/sd_p4de_p5.png | Bin 0 -> 48155 bytes 10 files changed, 467 insertions(+) create mode 100644 3.test_cases/6.stable-diffusion/README.md create mode 100644 3.test_cases/6.stable-diffusion/multi-node/0.Makefile create mode 100644 3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile create mode 100644 3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch create mode 100644 3.test_cases/6.stable-diffusion/single-node/Dockerfile create mode 100755 3.test_cases/6.stable-diffusion/single-node/build.sh create mode 100644 3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py create mode 100644 3.test_cases/6.stable-diffusion/single-node/create-conda.sh create mode 100644 3.test_cases/6.stable-diffusion/single-node/run.sh create mode 100644 3.test_cases/6.stable-diffusion/single-node/sd_p4de_p5.png diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md new file mode 100644 index 00000000..0a95481b --- /dev/null +++ b/3.test_cases/6.stable-diffusion/README.md @@ -0,0 +1,179 @@ +# Stable Diffusion Test Case + +DISCLAIMER: The scripts presented in this repo work but I believe there is room for optimization to further accelerate distributed training + +We will follow MosaicML's stable diffusion benchmarking scripts provided [here](https://github.com/mosaicml/diffusion-benchmark/tree/main). It uses the `'stabilityai/stable-diffusion-2-base'` model. You can check the number of parameters by executing: + +```bash +python3 calculate_number_of_parameters.py +Model has 1289.952427 M parameters and 865.910724 M trainable_params +``` + + +Just for simplifaction of testing, we have separate scripts for Single node and Multi node Distributed Training. We will also present a comparison of throughput (images/second) achieved with P4de (A100 80GB) and P5 (H100 80GB) nodes. + +## 0. Conda and Docker + +Make sure you are able to create conda environments and docker containers. For example, to install Miniconda, please follow the steps below: + +```bash +# Get the appropriate Miniconda_version from https://repo.anaconda.com/miniconda/ +wget -O miniconda.sh "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh" \ + && bash miniconda.sh -b -p ./.conda \ + && ./.conda/bin/conda init bash + +# Amazon Linux 2 instance +source /home/ec2-user/.bashrc + +# Ubuntu instance +source /home/ubuntu/.bashrc +``` + +## 1. Single Node Setup + +When testing the latest version of MosaicML's Composer, we found that different ways to set up the environment with a PyTorch Nightly conda environment or a Nvidia PyTorch Docker container. For single or multi-node testing, you could use either to run distributed training. Next, we present both approaches. + +The `single-node` folder has the shell script [`create-conda.sh`](https://github.com/aws-samples/awsome-distributed-training/blob/stable-diffusion/3.test_cases/6.stable-diffusion/single-node/create-conda.sh) which installs the PyTorch nightly distribution for Cuda 12.1 and the `diffusers, transformers` and weights and biases libraries and also clones the Composer repository. Before installing Composer, we need to avoid some torch and torchvision version checks by commenting out those lines in [`composer/setup.py`](https://github.com/mosaicml/composer/blob/dev/setup.py) so it looks like: + + +```bash +install_requires = [ + 'pyyaml>=6.0,<7', + 'tqdm>=4.62.3,<5', + 'torchmetrics>=0.10.0,<1.1', + 'torch_optimizer>=0.3.0,<0.4', + #'torchvision>=0.13.1,<0.17', + #'torch>=1.13.1,<2.1.1', + 'requests>=2.26.0,<3', + 'numpy>=1.21.5,<1.27.0', + 'psutil>=5.8.0,<6', + 'coolname>=1.1.0,<3', + 'tabulate==0.9.0', # for auto-generating tables + 'py-cpuinfo>=8.0.0,<10', + 'packaging>=21.3.0,<23', + 'importlib-metadata>=5.0.0,<7', + 'mosaicml-cli>=0.5.8,<0.6', +] +``` +Once this change is done, you can install composer as `pip3 install -e .` + +The `single-node` folder also has the Dockerfile and `build.sh` and `run.sh` commands to build the image and run the container. + +```bash +docker build -t mosaicml-stable-diffusion . + +``` +```bash +docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it mosaicml-stable-diffusion /bin/bash +``` + +### 1.1 Single Node Training + +Once you are in the conda environment or the container, run the following to kickstart training. In all these tests, we are using synthetic data generated by `diffusion-benchmark/data.py` + +```bash +composer benchmark.py --use_ema --use_synth_data --device_train_microbatch_size 4 +``` +And you should see training starts as: +```bash +Namespace(batch_size=2048, image_size=512, remote=None, local='/tmp/mds-cache/mds-laion-2/', use_synth_data=True, model_name='stabilityai/stable-diffusion-2-base', use_ema=True, wandb_name=None, wandb_project=None, device_train_microbatch_size=4) +wandb: Tracking run with wandb version 0.13.11 +wandb: W&B syncing is set to `offline` in this directory. +wandb: Run `wandb online` or set WANDB_MODE=online to enable cloud syncing. +wandb: WARNING URL not available in offline run +****************************** +Config: +enabled_algorithms/EMA: true +node_name: unknown because NODENAME environment variable not set +num_gpus_per_node: 8 +num_nodes: 1 +rank_zero_seed: 3179589898 + +****************************** +train Epoch 0: 38%|█████████▍ | 18/48 [03:28<05:29, 10.99s/ba, loss/train/total=0.1463] + +``` + +To see GPU utilization, start another shell on the EC2 instance and run either `watch nvidia-smi` or `nvidia-smi -l 2` if you get a segmentation error which happens frequently if you launched the EC2 instance with a DLAMI. You can also run nvidia-smi in docker as: + +```bash +docker run --rm -it --gpus all nvidia/cuda:12.2.0-devel-ubuntu20.04 watch nvidia-smi +``` + +### 1.2 Single Node Training Benchmarks + +We ran tests on P4de (A100 80GB) and P5 (H100 80GB) machines and here is a comparison. + +

+
+ +|Micro BS|num_workers| EMA | P4de Throughput| P5 Throughput | Ratio | +|:------:|:---------:|:---:| :------------: | :-----------: |:------:| +| 4 | 8 | Yes | 142.69 | 189.28 | 1.32x | +| 8 | 8 | Yes | 173.89 | 289.19 | 1.66x | +| 12 | 8 | Yes | 185.18 | 329.55 | 1.78x | +| 16 | 8 | Yes | 192.85 | 353.81 | 1.83x | +| 20 | 8 | Yes | 197.47 | 358.68 | 1.82x | +| 20 | 8 | No | 197.90 | 361.15 | 1.83x | +| 32 | 8 | Yes | 204.22 | 385.31 | 1.89x | + +#### Scaled Dot Product Attention + +HuggingFace Diffusers has a set_attn_processor method that you can use to plug and play different attention processors. A list of attention processors can be found [here](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py) + +You can try a different Attention Processor like below: + +``` +from diffusers.models.attention_processor import AttnProcessor + +self.unet = UNet2DConditionModel.from_pretrained(''stabilityai/stable-diffusion-2-base'', subfolder='unet') +self.unet.set_attn_processor(AttnProcessor()) +``` +AttnProcessor2_0 which is a Processor for implementing scaled dot-product attention is enabled by default if you're using PyTorch 2.0. + +The older self.unet.set_attn_processor(AttnProcessor()) gives Cuda OOM error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput + +More details on this can be found here: https://pytorch.org/blog/accelerated-diffusers-pt-20/ + + + + +## 2. Multi Node Tests + +### 2.1 Multi-Node Training + +For the multi-node training we've created a `Dockerfile`, and Slurm submit script and a `Makefile` to build the docker image and convert it to an enroot image. To get started please follow the guide [AWS ParallelCluster Distributed Training](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: + +* AWS ParallelCluster >= 3.7.0 +* Pyxis +* Enroot +* FSx Lustre Filesystem + +1. To get started, clone this repo and cd into the multi-node directory: + +``` +git clone https://github.com/aws-samples/awsome-distributed-training.git +cd awsome-distributed-training/6.stable-diffusion/multi-node +``` + +Next build the docker image and convert it to a enroot sqsh file: + +```bash +make # this will build the docker image and convert it to enroot +``` + +Now we can start training + +``` +sbatch 2.train.sbatch +``` + +### 2.1 Multi Node Results + + +## 3. What's Next? +TODO: +1. Investigate why single node performance on A100 80 GB instances is sub-par. +2. Implement distributed training following original implementation of stable diffusion +3. Explore the impact of MosaicML's Exponential Moving Average implementation on training performance. +4. Test the impact of xFormers diff --git a/3.test_cases/6.stable-diffusion/multi-node/0.Makefile b/3.test_cases/6.stable-diffusion/multi-node/0.Makefile new file mode 100644 index 00000000..e69de29b diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile new file mode 100644 index 00000000..393fe5e3 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile @@ -0,0 +1,104 @@ +FROM nvcr.io/nvidia/pytorch:23.08-py3 + +ARG EFA_INSTALLER_VERSION=latest +ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws +ARG NCCL_TESTS_VERSION=master +ARG NCCL_VERSION=v2.18.5-1 +RUN apt-get update -y +RUN apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl2 libnccl-dev + +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ + git \ + gcc \ + vim \ + kmod \ + openssh-client \ + openssh-server \ + build-essential \ + curl \ + autoconf \ + libtool \ + gdb \ + automake \ + python3-distutils \ + cmake \ + apt-utils \ + devscripts \ + debhelper \ + libsubunit-dev \ + check \ + pkg-config + +RUN mkdir -p /var/run/sshd +RUN sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && \ + echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && \ + sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config +ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/lib:$LD_LIBRARY_PATH +ENV PATH /opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH +RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ + && pip3 install awscli pynvml + +################################################# +# Install NVIDIA GDRCopy +RUN git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ + && cd /opt/gdrcopy \ + && make lib_install install \ + && cd /opt/gdrcopy/tests \ + && make \ + && mv gdrcopy_copylat gdrcopy_copybw gdrcopy_sanity gdrcopy_apiperf /usr/bin/ + +################################################# +## Install EFA installer +RUN cd $HOME \ + && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && cd aws-efa-installer \ + && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ + && rm -rf $HOME/aws-efa-installer + +################################################### +## Install NCCL +RUN git clone https://github.com/NVIDIA/nccl -b ${NCCL_VERSION} /opt/nccl \ + && cd /opt/nccl \ + && make -j src.build CUDA_HOME=/usr/local/cuda \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + +################################################### +## Install AWS-OFI-NCCL plugin +RUN apt-get install libtool autoconf cmake nasm unzip pigz parallel nfs-common build-essential hwloc libhwloc-dev libjemalloc2 libnuma-dev numactl libjemalloc-dev preload htop iftop liblapack-dev libgfortran5 ipcalc wget curl devscripts debhelper check libsubunit-dev fakeroot pkg-config dkms -y +RUN export OPAL_PREFIX="" \ + && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ + && cd /opt/aws-ofi-nccl \ + && git checkout ${AWS_OFI_NCCL_VERSION} \ + && ./autogen.sh \ + && ./configure --prefix=/opt/aws-ofi-nccl/install \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=/usr/local/cuda \ + --with-nccl=/opt/nccl/build \ + --with-mpi=/opt/amazon/openmpi/ \ + && make && make install + +################################################### +## Install NCCL-tests +RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ + && cd /opt/nccl-tests \ + && git checkout ${NCCL_TESTS_VERSION} \ + && make MPI=1 \ + MPI_HOME=/opt/amazon/openmpi/ \ + CUDA_HOME=/usr/local/cuda \ + NCCL_HOME=/opt/nccl/build \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + +RUN git clone https://github.com/mosaicml/diffusion-benchmark.git +RUN pip3 install -r diffusion-benchmark/requirements.txt +RUN pip3 install mosaicml==0.15.0 --force +RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force +RUN pip3 uninstall transformer-engine -y +RUN pip3 install protobuf==3.20.3 + +RUN rm -rf /var/lib/apt/lists/* +ENV LD_PRELOAD /opt/nccl/build/lib/libnccl.so + +WORKDIR /workspace/diffusion-benchmark \ No newline at end of file diff --git a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch new file mode 100644 index 00000000..ba2afc27 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch @@ -0,0 +1,74 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH --nodes=2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --job-name=mosaicml-stable-diffusion # name of your job +#SBATCH --gpus-per-node=8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --gpus-per-task=8 # Number of GPU per node +#SBATCH --exclusive # job has exclusive use of the resource, no sharing +#SBATCH --wait-all-nodes=1 +#SBATCH --output jobs/slurm-%j.out + +# default variables for Enroot +: "${APPS_PATH:=/fsx}" +: "${DATA_PATH:=/fsx}" + +# default variables for Enroot +: "${IMAGE:=$APPS_PATH/mosaicml/stable-diffusion.sqsh}" +: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +export NCCL_DEBUG=INFO +export WANDB_MODE=offline + +declare -a ARGS=( + --container-image $IMAGE + --container-mounts $FSX_MOUNT +) + +NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) +NNODES=${#NODES[@]} +NODES_ARRAY=($NODES) +HEAD_NODE=${NODES_ARRAY[0]} +MASTER_ADDR=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) +MASTER_PORT=$RANDOM +NPROC=8 +WORLD_SIZE=$((NNODES * NPROC)) + +srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" +function run_compose() { + # if [ ${NODE_RANK} -eq 0 ]; then + # OPTION="nodelist" + # else + # OPTION="exclude" + # fi + srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" composer \ + --world_size ${WORLD_SIZE} \ + --nproc ${NPROC} \ + --node_rank ${NODE_RANK} \ + --master_addr ${MASTER_ADDR} \ + --master_port ${MASTER_PORT} \ + --verbose \ + benchmark.py \ + --use_ema --use_synth_data --device_train_microbatch_size 4 +} +NODE_RANK=1 +for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) +do + NODE=${NODES[$NODE_RANK]} + echo "Run compute node ${NODE} for rank: ${NODE_RANK}" + run_compose & +done +NODE_RANK=0 +NODE=${HEAD_NODE} +echo "Run master node ${NODE} for rank: ${NODE_RANK}" +run_compose +wait \ No newline at end of file diff --git a/3.test_cases/6.stable-diffusion/single-node/Dockerfile b/3.test_cases/6.stable-diffusion/single-node/Dockerfile new file mode 100644 index 00000000..0964e1c9 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/single-node/Dockerfile @@ -0,0 +1,12 @@ +FROM nvcr.io/nvidia/pytorch:23.08-py3 + +RUN git clone https://github.com/mosaicml/diffusion-benchmark.git /wd +RUN pip3 install -r /wd/requirements.txt +RUN pip3 install mosaicml==0.15.0 --force +RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force +RUN pip3 uninstall transformer-engine -y +RUN pip3 install protobuf==3.20.3 + +WORKDIR /wd + + diff --git a/3.test_cases/6.stable-diffusion/single-node/build.sh b/3.test_cases/6.stable-diffusion/single-node/build.sh new file mode 100755 index 00000000..859c8a44 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/single-node/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker build -t mosaicml-stable-diffusion . diff --git a/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py b/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py new file mode 100644 index 00000000..f2da4532 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py @@ -0,0 +1,70 @@ +import composer +import torch +import torch.nn.functional as F +from composer.utils import dist, reproducibility +from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel +from torch.utils.data import DataLoader +from torchvision import transforms +from transformers import CLIPTextModel + +try: + import xformers + is_xformers_installed = True +except: + is_xformers_installed = False + +class StableDiffusion(composer.models.ComposerModel): + + def __init__(self, model_name: str = 'stabilityai/stable-diffusion-2-base'): + super().__init__() + self.unet = UNet2DConditionModel.from_pretrained(model_name, subfolder='unet') + if is_xformers_installed: + self.unet.enable_xformers_memory_efficient_attention() + self.vae = AutoencoderKL.from_pretrained(model_name, subfolder='vae') + self.text_encoder = CLIPTextModel.from_pretrained(model_name, subfolder='text_encoder') + self.noise_scheduler = DDPMScheduler.from_pretrained(model_name, subfolder='scheduler') + + # Freeze vae and text_encoder when training + self.vae.requires_grad_(False) + self.text_encoder.requires_grad_(False) + + def forward(self, batch): + images, captions = batch['image'], batch['caption'] + + # Encode the images to the latent space. + latents = self.vae.encode(images)['latent_dist'].sample().data + # Magical scaling number (See https://github.com/huggingface/diffusers/issues/437#issuecomment-1241827515) + latents *= 0.18215 + + # Encode the text. Assumes that the text is already tokenized + conditioning = self.text_encoder(captions)[0] # Should be (batch_size, 77, 768) + + # Sample the diffusion timesteps + timesteps = torch.randint(1, len(self.noise_scheduler), (latents.shape[0], ), device=latents.device) + # Add noise to the inputs (forward diffusion) + noise = torch.randn_like(latents) + noised_latents = self.noise_scheduler.add_noise(latents, noise, timesteps) + # Forward through the model + return self.unet(noised_latents, timesteps, conditioning)['sample'], noise + + def loss(self, outputs, batch): + return F.mse_loss(outputs[0], outputs[1]) + + def get_metrics(self, is_train: bool): + return None + + + +model = StableDiffusion(model_name='stabilityai/stable-diffusion-2-base') + +total_params = sum( + param.numel() for param in model.parameters() +) + +trainable_params = sum( + p.numel() for p in model.parameters() if p.requires_grad +) + +print(f'Model has {total_params/1e6} M parameters and {trainable_params/1e6} M trainable_params') + + diff --git a/3.test_cases/6.stable-diffusion/single-node/create-conda.sh b/3.test_cases/6.stable-diffusion/single-node/create-conda.sh new file mode 100644 index 00000000..d4c28d9c --- /dev/null +++ b/3.test_cases/6.stable-diffusion/single-node/create-conda.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# + +conda create -n pt-nightlies python=3.10 + +conda activate pt-nightlies + +# Install PyTorch Nightly distribution with Cuda 12.1 +pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 + +# Install Diffusers and Transformers +pip3 install diffusers["torch"] transformers + +# Install Weights and Biases +pip3 install wandb + +# We will install Composer from source. First clone the Repo +git clone https://github.com/mosaicml/composer.git + + + + diff --git a/3.test_cases/6.stable-diffusion/single-node/run.sh b/3.test_cases/6.stable-diffusion/single-node/run.sh new file mode 100644 index 00000000..ebbad77c --- /dev/null +++ b/3.test_cases/6.stable-diffusion/single-node/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it mosaicml-stable-diffusion /bin/bash diff --git a/3.test_cases/6.stable-diffusion/single-node/sd_p4de_p5.png b/3.test_cases/6.stable-diffusion/single-node/sd_p4de_p5.png new file mode 100644 index 0000000000000000000000000000000000000000..2b707c889b90043fe6d19a952ee219daffbe7e75 GIT binary patch literal 48155 zcmb5WbyQVr)IAJ}paLRTlwc4d-5@23N~53x(kR{CAxK?NB&16zkp?+*mo$Rpp}U(y z9OAdO_rC8p#y5W7A0K0!d%0ZLXP@U;&suZMIoA$&F8_@9BIQLK92{aPNeM+9oU;o! zIJi6K@!=;q_Xt?wUxKzzRBV+j^lcqz>dCn-VZKv!BSGy1_y^k2YKOs7fUz7!72Bbk`RCC7`HNN zV0%@M%5T%8*X7njNl%=w7tcsr^kjPRFyHgKmvrVIFYVN!$uD>A^QC;hcmE$ABk5}y zUbntqU93zEK}pRW=e-O2EX}18ws9IC7RJ=oJ|ZMUSh{C)y`FcJtCF}$cpCry zY6{MCFSdVwRT1a^xh&5A&&x`RKCqrSe?9PAU}N3NP@#=m!cd_By31zfSBNbZySE

^Sbmz14*&i-Q@%2WjcZ+_VDc|;PP923 zdSJgo?oB|FW7!O-2icmBqW8!n}Jc^pbwO6H@oU9Rqu z?qba!spc1+OGp&VOfBHRXWUZryL3~f(wSF5WcMnq>&E-!`Vgv_!Th&>aGo`Czqpe6ZMp_6ECVLw&vX=g*hl6d5;8)Owe$ zqR`YgfB63*PNlRZEnU1?G(1zml^Ic;^iNRGyW_)MoDsLZQ72KY7u8x%T9$LGJADXm zSXx<4#ygBn?u~mGbR~$uBKg^`4iB|`BsJ|_c#Bk(|-G=%CYFYLO{ z!_$-F?H!*-nlVprNSF-dbXW~tV}5;^mNvM!nBQ#pXG?X`jPDPXd|gxkdZ{lHcj0$3 zLGsg&t$FSFZ<_@yCvb4sD|g7;uozzc=V9ty+*;&u!cJtK_h0?6>=N1Qu5hw}dwnb` zOJvfYr32rpSGh(9Tw$DAS!s{8s9K!xe3ae%Xx#y>wLQ`vNYZHbuPo?G0 zWWLIbFpFMk4Jf7LHEw`ySRBm1dp(f8qe^J4#9P$uu=}TxQfH!ASBg}ay}e;-=A}M2G#%f!MY0<>%W6K`>dYFi zY}CEY!}F)=WFa{?t(>tXnPwnUnIy3B(ML+&g37VRz#CQ$4$Zpx9EROVJlLt}Y1Ilx z&c}Y2Qg!Nmud-`WD=RBYhu;&QZu&^UZ?2B=cAmABSni1Bx0$Rv2S-bz%vP${qxsWK zVa(v$mzl4LEi5dsIInh>2Mm6FzRmLH=RG;@rR7qsmJ{r{*qGa%##$BD{Qm1K|MYam zgoK2b-=DuHpmFq^ZHqKGpv<{;<3{u8$(|UNdwqWX8NBDggg1#S#SF`X2QjdMJ!x`O z(vfT*V`Hh_zkhGLG8m#-s2}~{^YD80X=ml$n9TF%muYBdKKS}_8Ft?~Io=yL4el^y z@%)qc<=u31u);PCy#obg0HZRIpGLh5a-}odnq4Jlhdx@7UccT%Wl2CJ3Z zXCL!2@TZ?*&=y7mIX6?Kztq95K-hB||GCGeQ5LR8q%UL5Qqf-m=9=`M7CjT5lC(>C3{FFcQ-kc{~JxdwuvbUCN|o>CeSgmpNJYAD1bm)+7! z-M4RZvF`qoxVrxSR~45Ba`}&rj)-oxCBlwU`Rb5ozbR@(nP$Gu?om&ECLjBCPzI}GkJB%tfQ9UHt#20C@(WbdV zZihRz&H8)oj0_Bvk9~<+wkqIkBFUIr*u5i$7h|2NoXdB(vlzDM^aC=(Vzrb0Fr>`m zn0jC0w%$@j4^hbfk&7nV+uPjz*=muKg2>LzQg9H}3pj2hy(rZ0Uu2%dLlf^ zPHIKHD}E#75fdBRCjwg6&;I`BUESQAwptjCdeev@q*bzCf1(j-MZ($Z9N|iHdkk+N zvvN*%h7zNE{DUWZYokuby92#(18B5RhqJEk9az;g$hv>v&3jU1uF%qUP1gGzY_+kg zR=ElaIjqgp6418WUrr|xJNj&fM*aT&;%Lm#}3CK%yiDY`}@@c zD;t|+rF41Qh3@w;ye4FqFW02XL=IMah(SE!!kx~xu~(-krb-9Viix%ja64{j+pi4P zo+S}}2=RQLi0B=ZE7@o+A6ScgtLdjS&I`A%v%aw%-7|(N2|2+RR{JtJ`L8eqQK`V| ztoL4g!<+AnfBXB}6IUn?rlVzF)<(+FE2@H=_&Xj+y!q(wd%k$h38F6Y`@Gm+0$EAFgvZDTEtvhIY{EpWryyO?-Buv>!M z+6e`xIX%%`T0#O(qugE(Rt-}4?T&nX*M#jYqBv2vn7NKv8@MTSfF(*u>tB46VKRm@om0Nnny0T<_u&GD3${SESghK(JNF}J`EV_Kz6Q;< zt2b}P>qN&3IShC0Ri7TSkDdUuqm+`8iq?D!r^q)rI2e~aw_;1$SI=6}JK@j{N-=Xs zm9mywpQcjmD>F+Km8g)YsHjD!!g8$_FJ3gkf#4cV3=e;|vpzmfBcr0CQpPpVf4=%) zE_SgMZWf7343|sMqH24 zR4=n-8pUKK9Kgvzk4FMXq=;nKl2>Nfbs6~nBH0$JG)_?C2w8$VDEn>mCw%Rak`i)= z;kF&SnZ7T~%0XMRZB(9byVzq=hbV2}i< z1z-EahY#d{YFjr3;Uv~?U@)Cq;|ioqgMSD#Brc|M8*~mvs-aAyjRbGph!pO7s+|3X zx9vDs^zfeDa{oE)$vPj^2s;Od`CbLF#zuO#>C{Lq_h`6nTn?MLcAm4!*XV-9P6Tiu z%O7pjlTbt)9Bp^uqCM3p&>j)v{Kl>97MzIe2;(Wgrk z+pd{>3?)ZXIZh^$O%Eahr#)UMGL&9MUnR$8z7tu5htkXe%SD!xkK_|YiEA=*t70aj z@QaN5re-6xI{OX?f5e@{>gB7f@E!^BzHK6c42H80ppWTw7K8>w9;*22V3{2=8fDhD z;|kSmJ~dLal>;@O;BqOi=)(bklU(M`WlrZEL%^)~HJ@)kzNd!DGMYbuG_kR{DOFr) zF@F$3IF79nLUC<^-K+Ry*hEx9|J3_2QcelYjxut#S&VCSvzs@C?0-g9Mudf3f$rg~>Dk`n&1Tv; ze2Y*>h0iOU?QTtDX`qr+IBnxBzzTngN!J3@4VW^%UO^ZC7S@{!u+%qWL>Kp@( z;u+EcD6vr5>|*+70NOaH_uD*{xO`BIa?Z3|%*ZqFlJ* z3@Ne3w)tI(LZWE&81`^?869~F@3&0M`>g74iFwT9)UBppZtf%h7{}^Jk;#DkHf7G2 zBl@S09z8;hM&#^m$3+_4I^bTKo&7TD8JR)v@l+xHLF49hbMEqkJ5!EJ-T>HpZaR#J z1AyCyT1+nLDxl@Idkyel%B#%RNJ8i5=kLuavRzb!#;6wVARs8H{inM9-Qe^!bW8}; z!=cDi*sB;mGiSH=W4D4uFyv4ar(h>$Ay*q68nM0k=^uE5{S1@Gg8n*LC^XU?p&4V<_jz%xg^ zG<4Dmu#Vs%b*@HvL^F*GQQpUE8*m<`7Z;;*;vlZ9f1W~4G7xxLw|gQK_uI|}>(trC z{rQOE>YtH1VFm(%!zakC&)kd!pujdz>Kmv)Q-0x&YgM~fKox?7 zKGz=oHZhZ2G3F&c0RcMpA59bZiSTB_6*@Y)nFZ&e(Se{Wm3*)LeaDK*%3j&N@RbNP z^GRsG_2KeJmr5@aF&G;Rr!^FIiyQh8SVF7O0r^!}c%*fKQ#kXzB01q}R+??Aw5{z= zH$kU|s33sgO`_-3w`{!xrTU@S(RfE@>k0jM3?#H**u8jw2KmsDYr3rRs%vN%4gX{U z+?ECa6IhFx{kV*DgZ3Zg_}Yn`8_+wbiqfjm|C6C+sgCxV`u&Vnjyq4y7|Mbi|GCK` zl*PDwvH7TfU|>BO9jn(CfdE#bwgh1(t~P_Ujmi3#St^{Pmg?05B2E^TbG;lb9d@J{z*9`g}_0gpk~lY@E8%n`Ih2JpK_ zu*E=;eE$5|Q-_?~S7+V8EG33r=ggoQQ znuXHovC`tCt$izuK~5LCP4x=L5LkLhM#rg==4&$61*2ad6O4?Ga2*a(1mQ@=v)N<4cO2IbwOvV~Xu zle2H=m23bq(3vMtpT6tYyUWNqmy%YM=gMIH!*r;?bogfg0&4-ySuJN5>*lKfRg$Gu zoxqsr8VVtY;C=P@YL4+<*qfeG8-qJ{??RhxfD}W%fv)U|pPx9Vx8u-A8lt46bk<1x zJwrh(k+SiexXk~mac5^I0@Y|loN-V_hVCZ^2BJqBj}asbxszPj(Xwo@zN!*I z80fs1>h&W&qwtxT;dVQ$J(k$X6#4J6`qOwESzTvQsezV2wb=Y7;5z-aky4D<9d45` zQ5TFU85tRz34MS*6qhdJ3s$F7NJ&fU4i)GpxjaDbnDtSPH8SLkJ{3k>6NOymaG5=W4MK6fRi}Lwj%4=!kD-LN6C7j`!7qtFT0~a8gTi^w*$de#h64_$Pc~LK|Rs4WxScO6Of1 zH8bO-G+)7N@u6}Db{xQ2T|9#PM0cCdmg(^tZxvx}V(Dg|nj41$V<-s$|318SDwIIP zc&x%HP3qm2NF@+*pNO~)#t;hx^+s}$UnU3|l9(mw;JU3+`310&iy?v6E! z;S#LWBDh4IM`F1LT!;RfH8|t+_wV0-Hs8L;Jp(0b@m?2&R6>9@bS}_M@iaHjKD92K zsrMs6H#7hGb+A1z2>nx4Q>ao^RTXA7D>N6%ioVdNnj)ZWkj|%uz4}PkBD%n2fX$_P zYo(;T9NkPgSfD2pq35tRV&uVKF;mT{Drhx@2fKg}1OkGBoCclLWe)2J76Wao)%6FP zPVy09e`6KO3GkKn8uK?o>Yf0>dsQkYC+DYe-^IK>N*^Dep*{M&)i(3{g)^c(s=JeF z5H#uX@vSdpp{$BZ9-l78a+9K%$9$1Ke@o z+<~w|Ll15{Q4<>&7+^~-d=^Wb2a16x<)r6MwOI06|4Xp=r1EaCNP4 z%ezBMOY3r~D;d2s*ZuXes^;C&*T%pIQ}oB&d|>OJp5T1HnX|dEk<)Qwf+#)B8M|7% znyf}b&T*^Pz4zcNoF~kS^}=$-3}SK)97aaQZ^_B4EoH&MBzvO{T4~L6d`$)SOJL$xSi$IFFM9g1p@WS=RKuOzAK8F~gX4vfog# zIDit7?Xt>-!^mmWlVXd$LPka|V4375u?VS+t9c$z&Esh1Ix)9?`!9R#hU$~UHuuC_ z`l|=aRuO1Bvf;dkv2akxK7Fx?tU

ju|(L!W<4HUMP(N*#*o|)p7bozMHMggMk~B|=aw^7{Y7zc0Wzo2 z60su3z~*0ei1WmzR|m#3{iON1UYL4-`_!5H7oTxtHI_U!i_|eE3YU#)W*nDnjcL0>g7}0&z1IShupQmq>MpT`V5AK zeB^JvX+x&NQcmU*E7->l=k)MOzgTY+Zd0QjRAA37f7^K^GgR8fht8SZz0$GU?bz|D z6>G)@mjmNusfB24cXm}f9dD}cw&0eBJABUag%bfYw4&fGfJ661Rf{6I^zuASPpVGr z`P*r;BVYQ4$IN@T#R#kQQE=QU;Xd@U8f~jXJq4~}Pn*K@^_5pA$T#@(4L5lim$GUy zJ#lV$Uz+SZ2ch19Q`P*i^7^N5t$LrK5RH{E_5;dZ+`HzxIdH|)Z1Zd0q4m3@!;bMJ zp-x~He+_?}`>#H?qY3{_Xpbz3GNBn+;t_+s{2q2mz zoD^T+|G5&8i$A4GFKPQhEfd>eq&O5&&KZQbh=R;LdXwn(>HoW8=hf%QmyYdUd~K!Q zHIn;C=0#DEY2v@L4k7`I$iDdU$h(RBS&KDK*9kKr;6hp37F-Soo9NGf@4tI}qtGG4 zum-8vFV#P}X5JAgGY2b7{rdH*+)uUp4aEU0Ju^*O4Tev?i(-@%MW44g2!4_br`*Abs@O1Mn4 za01IFVEwooU`+mHOgx@#^m|CiMje}F?_1rX^?{i6fD}RwLb(X(YEm*O!5v9auY+7x z%u>K?uI7?;$$4dQrpg60)bPyLU6x~DZGL-D;t15b7hiV%Q;mKZS0oh#LL_VqcuYOX zH6mt6JlFgOS$kRc4nO%zml5C9uaeL1*QMa=#b42iVyEm5+%!Gk4Oxa+0zzYt@20P3 zVFSay3-F@19F~`VMq%KT%2-R>jSFgz#-2EQ6D_rj4X&PF0yHtA`G|~NEfdw>pAJK$ z5$@2p6@{qH_EQOEp=VQ6PPslI@*0`y^^rc$&e>r}mGGEBa$k}}qfeiSl%}&0vfxFr zum^8P!f^rv@JZtlH!3~885Y}fFL%Z4oQp0v1w|+}tk{S(AUOPx#i-WGq^i!A)j^y_ z2W(JSF_H6ci(OP`pB1|6qHZ5?QFq@gb;in03E46LPdns*?2wU65F7QB zv?JjGzd3U5v4bkFMWJa@n+Aiiy#qZ5s1G&7UXzkOLQiz^xi@L3)j788XL^43niB$l z1KMb<_T<~hB%f=Wfy1|zQ4t+3yWy|X;cb-l^!LB#Hz)tebkq@Mw)RK7EsYq#lt1^c z9oqX={g%@Gf}D`ZmpmmM7s ze%iF`*S{4KS4bp!UpBCpt3Z~ynT%2>Lq?i|RQ25v+?*9PGZzoek zUT6*ozTD|IE*1AdApp-;yxf6nG+$E$pE&sE>NB*SEPkWp^FEMs@Z#m=9YW0|G1ox|7RP2BUFtr_DF3AM2xOE<~ppS3pN&&NAe_KloDiZW46*p4pncF2bnD6-*h(1~@ zwC{Dcwobk&*LSRjS7yGOe5|~@T|$(2!lX%es+L!E4V&%Ob9r1st-YL1{-yEBeOdJz z!V+rlgo~(4BS_BgH~&?N1QFMYhwpiX&71)5E=F60=H+I{AqkLU6vx-oKgn?+VP-s# zEs&kh7Y& zda);PTY=TDHSZ6H<|myX?R9#3XOrr-y-m%8@ygw|AFhP|=t$A}sCnl$7*ikfO!u># zNPr<>a&4o3K(N_yS9z&Ns|_%WJcB_`ds5{8V^FWsni7DUx1sSm2<^wBQZHRkT;=`@ zT03>NhOH|i*eNs4C{1627iqlCRPub6C~Nzw7_yiwEcn6ao@4yifC=A}0U>6mwFZujubxHOmF&HAJ@I_T_?&7P1|) zi)6gr#zJ&uotNaQc9EuPSKM8DSvFu1bNJGmdCuj5f^1-(0>CcwFE_Lc4Y`%$(C_bv z{ke{25(x#uVqw=w5FRbeEs7WDfj zpxQ068E^lAFFCv`nj;60KvYUCIvD=7xgikF=qF7>Ch{Gyo8DP1=WMr~Cg;9)r17XrxCP$mxjeDe^tbLQ zA(hXu1TDrA_4Gj@C7{dE?{_>I2n*_TRSu3BSQOFyVwQDU^(+=RIigX)PF&a^SZ6cE z&(q5*&@47EDmCkK85&>o|0$l9W_|;^X9AutPsO11(`KND5qR8+{a;0r-@T;fYvTg6_L)pQgd|VA?n0LR>5I#$LAE!ac6)ry2F?=f4m?blxTPM8P=Ge`O5|p z4X>b5YVKkA-=P?wKP1_k$}K)qn~(WFh!y8~8oYem zcDo*3YCf$B8m?7tPSvR$ZiWboz6x+!9WaSTiQI~LEG9UAu(Nv2nf$tn^0&LXd`bN_ zpOxUrqJU+R3@&*(HUpm2rLIIt$r%>p`Gt06hky{+uRAC1vM(=Pmunzx|JnuzJl*hj zC27ZPi*Y{!53Vstgp&vV2AThj+o-66jY~K@+TGdvi-Z4_F#Pvfqy%uFJDtDX{Zkb3 z?+|<_7Q{F%2q!20KYaHu#E=Q3M@{yUMTmd1j9*7W{w~bYemsVC$upP!BwYXgy-sov zmq@Vo{lSO(#fvZ)US#5s_BdGmN7QNQ)kKzVL_~z}v;wCbn0!7&NhlofYxVlgPBbPm zkkK*pkdfc!{hymzm$~s(gp^}APF^XtDDRiU~RbRV6sU{X2W4h%@QhX z(tKM1yD)4YlYq8Fa< zNava~d+;XS>r9bM0%~}>j>ggW|GvfN5|t_L52F57GhA}LYnB2RniEV32n`bkv=}Hv zZKkPLx|3LE7oW@i<#+1%gVOPk3SO$gj{*Kffkp@!JM&F;vg(C2eYyu$mIZd92!A}{ zCADJt8S1~>5Y;Cj6Pr!$$bggE)O4x-uvJL$8k@A0Y#@vDHG0_BG9Ia?>Q6J(WkB2v zpKT)fgCqEFVa(L`2W8_4P;r|nD$B^w>o^yhuN6wHz6d0^d{e?}=@mX+#1zDS7I3-9 z(!;$yot1y|ByNcr|G~Taw`l<$n-|_?da1kO?pvS8%NCmg!Txxt2L#eExY&i<4kEA%CMs{``iG<=kU#t^RSXyl)2s2?M4tZh)R2VqRZy@eKXc^-{O~!{5bR+7E>9SMJ z6LL7c)JRr!JDw7|@vsb%tMACtQ=-`($##~!dOGL08^iz9!t>v@%0HKUqTDv(rkMBR+kWZkO@vJ{Bvf9DG9=g9T3ZtzrV-=Q zH4vqRHGH2%ew9+BdK?AL7&3s&d=>!oAO<5?IYsD3R$ z{h_yaZ6_8!J7IO&lT7V4|Fkq){bcpn0fQFFHZ7(RW%Hf{8KR|rB>fy;{NFi_3q4d5 z^`<#r+v#_){TK^hlY~(-m1$k%4}4Z*%7UPtp{HZeoRhvYz$vSLt)?-pQecIEgM2-! zJkaFHEr5i^19e_w27%ANjq@L#J->?m=@S8y+ZjeW$K;XZzzA6xW3x2gJ#q?xahD5C zrrbCyYZRNFGz=poB)BiRqOUUuL<}p~+%Q$SjNcr1sBsvgeY+=#wK7jBf%i+a;NQwm zwqGZ*vuGCjY*fZQqk?{{D}d&9Po&I;z)lj<6Fq9y1D*=b(Agj~V{@Zd=y=9NzDTZr zlOn9=zDt9x`AtHKYK{=IzT^j>c}pzM?t<^wzqR6zD#UOnO|*6vdf2tt;@I@wLIo#4 ztahx`0zE-VASiI{=mQ>hz{bT0el;^E*y1PwxHW@s1>?5_1X}QMN#{n7AR50Ty!>BV zcE$YT^rM4Ix72L#(eFwie_^nH9@GJV@~cHF&4FBU*E}vGm-J$JKAZK&#qy+Q@cz}}`$R5~ zkia3&2Q+U;B&(OGUBn*6E`M%(56+#ro|fWRFpwrrCBp6GF+fL5O19poXwtl-dCMhY zwB43{5Jp?Wy?6_*=T(10PmkBME%YthBbLq>-T*@zCBA|D$@i@QV*HD@@h@XeYdtzp z1FrtpR)VCVFH1t6e>fT48kbI26?$GL9jWdhMRsR|-|!Vr37jsczvb5^?svP!P)LaT!tb@S zBNu;;GBswnAbDfT7Rfx5DPELhKg-#I#H)y)rz=|tA|P}H6sDCiw6cz}YkvT1PP~|hJO1kPx+GHaHYrJQ>nPYX%b(Tsu zSq^CT057y%>@Sbvej^?(dfyc_qY+`fIbes1?=&W@P&$OPNfuA9*d?W)M#7foi9jIInhhBipE{4~bKR5nn69`Wb7tYHPSBn>$@$~1c z%Zk3SI4)Y1zw^8Pg1@+h&V>XUo2%A6gO2OYD&0zhYNl&tf5SoXhO3Ks3HP;|E+};O zj_d503BAJ$H^%FAB`VMim~keev3|fN@I=}jz6*H#TjVEn8x^xdhTV3bf6IsXr93jc z!!?-T_Eud)p{MQ9U*no~(xAywmKq!OQgDyZi%UU3#R3Uy?j0USuo|iPM)I6iosK(^ zhn4lyn*Iq&{I?E*@aIco3M>WV6E!wl-_iZ#xq9&$Xz*1HwLhymBjCbX3KKMt@WTS1 N2lrLwitasm@jnoe&Y}PS literal 0 HcmV?d00001 diff --git a/0.docs/vpc-template.png b/0.docs/vpc-template.png new file mode 100644 index 0000000000000000000000000000000000000000..2f3c93e81e25558237eea06974a34fc1e4584adb GIT binary patch literal 330494 zcmeFZXH-+$7B&nh7ErLzl&XkG3q|QoMA_~%p^p+ry zgx-4#Bvk30(DLn^d(XXheD4_V&H4L|%NQ}+Bx~)p=9+V^XU_So9rjdH?L6I8Iw~rv z^N$|>p-n|aCq_ken(oXg;FJEvYE|HaTw4{Dr;k)rc%HgKt!*8wsHh%>#m1k0`f->g zXqA)|`X=VhnTKa?Vz3I9i&meXG63?d#H22VNTkv!Rw9FTuGMRqARl#zXh5hRg z3oZM=`JeA@En5D1+j0R#|R(iE9S=L;$Lve0Qezzl;o9d^8fv{$1EOpa`nTPd9 zHEal%^>br9JXCaFXifypZu~^&6&3LWHI*Dy*P!UXob_w9ygrB`k<1^2h&IhqQ-xeh zZt9%1;_;{HW>rAkI8D`O)>w$Tp{DSQ^GxrJhp*4@#KXJ`}3<* z-k0Kq9-Rtmyn6^YF_VZ1oenE1eJ9FvO;~kEk(c*l^xCsG8t>T})jl_MJ~;$wZQxvQ zwz#=gHoK;dw0@hc4H?F}ulyu!${d%!_@h$T}$s3 zp4P%>xrX;C+kN&KUfQ#rb*nuwW8bUtDpU5+eCvm~=BYn4i+M)*EkE-mPHh-Hx+G=) zxiTiPs;A^urSgNi4|ARBVerIRljrVDcWkmAoa|^Unt_GgF_}Q!ot~4_88krd!k1Qe zgr=JYXTSZi-@4n8vS7|***@JC6}yXJ^*(58{G*wGmNnZj!f%QB)bNaJt#o!ozAtRQ z2byjh5hGs7bUjg@d8O#y&4+5bG;BO)h%@K!Mp@W)G^!3I;;k+{cYVz8I;2L7`ZX`! zu;clV(dY=Z*t%I(L4)!BwEUZS0~`OvcT^Q&7%XX{Vb_6qggidBu33r$09>xFnQMhF4Y;@4?GOQGz#x&B;I$#2Qgcm zzjYFAK|e&@a*uh4(XQ^I^SL{(rJT7;Xa?&voKGv9ln|pbdv$S*3x4myo7YVjn6*RQ zc~qJ>eugBxm!CdCdvEF~$0sU2mFo}b@`DEMp`V`BWmtY~mu~(=r^{!auJ8PK=>C^$ z^XIp&sQ$WTDRClrK65I~)C# z>M`{LwS}+3PoFTcGlz$(@ulDRq4(puu0YP!{vf*t9$%f(U;KF9$A5=KC#_YyCvsux}V->sbW-ob^P`k7KSf=kYn@Y2|m^ytgi&I}3XAlqRhRUXj-O%x)-MN9sL&+NI;HpOD3)+hlM{M<&PFFkNfva@c(d zUc@8%huS?id#?4!_5}18OC&n{v2z3a(XA<|sdSotntwVvT=#+cgQ)A~E~S|CMo6lQ**E09HB>BC`q8exseAbcbGBs8 z__Kq&j;FiNv~+Me6M4tr6Zy|G#kC+|7_P{Os}04r#ill!)~xMHQU}2|^K3J(6jYkH zp=LeWpNTk%D2pHlK>74W4I}Q3TFbi2%Of@=q3%EAR^;xdEX&z=n2G6L5muGUfOr35 z>+bK{9*q6H(!1Ql*?W5+qQ^LQ@W(DV?MG*yRu8TKSDJcD=pW>{y;VeBv*1O?UY+J0Ve>P@mPN_=e? zZWuCr-fd{C+vFv3w8oFYI%<=nJNdWluJA(^;Rurm-v}l3Dy#Q1My=MZ2KuM;*Y%C_ zzk*>N>n9-BREMtAdJXA)Y?3s?$l z3p@%s=dN}#w<(I1<1{)W7t1;|W;>jCWZ}7%T^(illTHk}3=&kTE4x(7gjmKB}x$>E6d8(xLRp4Gv z9>IkN1U;>mN@XHto|~vEfRyBpEe_}X(}?o3jKa>~rxlYc_X#2DtY(Ja5Vr!l7toI* zDkGx#EiPGJ`h5xMtWw!O*8PX3r4{~6bo*$tU-RxXbdK@t*h#)uziyO&i}^O!?9@CO z6RNGC{aEL1h{N;BHIJM?JH6kbC0!*dwuA-og~%&=@HcRs>*2RF`aWc*RNg5zb^w8x zT7R1S^n4NdkcRJcar#oou!!uP7h+W)$q&ao%dH-r`AtU^H+-AdzYD5AP%jj{ZCv(z z*di4=(b?MF>eD)EFkb*J{tD%@pF!$%zqmp(VemPxqtgP;b*lUD6eq{EDR5t0TF?wY*q6{^P@W#e!AN1s; zG^X^V{QUXlXPoJ&>awMMz<T|y$w2NI;WMWK8@nK#2M57X3)J)u76;_cEGV9bn*MPN+>G_ z(x>s@q5A6w4M_vU0tr(H`Gkhu+$T3wo#U9X(lY@Rz9Wi3~(x!^bD ziLHq$hzMpCHQ%UpPyg8Ov-5Lwrc4NAplNDl=7^m2&R?i%*#3x;J@ow9ipX7f+Z!2| z)FEwLJ?ELWus$yv*m^MVeKdJ=aOKU|0+QXMe0SoN-%m5x>gK`1uDNnkfY^>0>Ei6O zZpaMwyVjjFU6RwjUq;(A&#KPq)&1UgS&RohN1~*rV?yiQU$-}kd+w$D8ksBW9DVB_ z>V!-nSx$c6n;CMRN)rs!M@sm3;6*!81<{RB=W60=xB`pU^Y=$pR+n(4(QhM_eA3oo zODf%J+tHF~32Ac5Sbr`K;|(Ik~-rx<9qaj{!b7 z>-^Brjf#qekNl#3qT1O!1rh^^U00&x;o(tmePJ!9 z{fFv5n*;w-y#CVN-C0gl)XU2Y!uaMqr)K^`q(UWPxrEE&gk+1PS$ND}j-`v>vL`JD+!M40K)5VN1k$$J;R ziyp3wZJRia&VYnl3zYalHr5@7n z&=X`#@7W7HcY~;?X-=N`FBf+s=u7Me3-GBk5JTnBtjoVgw_WO5*7wz=?wI*E;APQ- z;;-W%)-%c3RFKl%R&4lzq5SCF5M6qsgYak;tJ4otSRO~#Ud;w%|G(brukl!T(%)fW z5mwe8D;(&+{NDY353>aJ#o;|mVJZ;Yn#%G-Ix&6vvnei>_-?qI>q1I6a zm)(4Cg@1fMl-M*TosQnRU&&z_df3ym?8@sa)9@wn~B{~R|y|RSeDyC z&{&(2@o#`0ez8~w4}iuE^i748%*Hg;A7sG-f)Gf&Ws7j8NF@K8H{%EhcD^fJrL7yG zg5kf=cO&1|95-%r)G7Poz#8{I!L-x3Htr}x8=rkGx3ROT)H~+Q$){$oTh}b zPuaPh1K;T)OW$5GR(pq-1;lly4Ye*!Vv4p}#s|3U6}i`V+s^OT;rqrmx^!ImpS0_l zKXKEMe}1z=R<8Y`>Fg4RTbsDr`f@47wp!>H zSuOLRS(ZWDkh6S&OG&cmqy3$dmI(AOth=z|FgT#_7)O>~(nx_oT|x+Bgs7Q9Tq&^L zQ|KOuKnd8~QQWk+05qscQlCOYog}avoEi4ZTCLNwlq0GYqqs3VTGrc%;Z8IQ-!v#zdj(cKy zfN|hTiq|&+Tw674wcm-GX2_PQo`4&QA@)>zb9dqkD-w)v`nL3$c#D^7 z8T)=H(RGg>ON*pf1Q!y-gIy}EzW4U@7$OS;oD3IsmbLjXJu}~uu3F}?fb*mV`m&s~ zgT(!^?GB9UBNl3OeZ*OzZKv7fJ(n&h`nC1;*+`{Pe!G`57RCvRdA`IUVYUmEzAj*ji&R%Z?Va z|I~6p?esto5z6_mr;-$R<-&>Q#4F*BPvcUhg zMgLnE!G`>6+e0qmPGftA=SKpI;qC_v%eq?@wCN-Ox9elgq72*s&d+zoRU|IW8H(s%PfVvMV0{@816lT4?)pH>XiXiq-Oo z9-Gdjm`ym$8e>a`=XSA zr=cIk&iC$@kdqiVQ4XV(aADc?8irw6J7_&g3=+z4UibME>>@XaULbC@SgjLN49h== zp@SPf6qm28NEwrrbW*_?B4icMM?hNLe2-1!>|OHc1FZs*?DuhD#( z#f^xbe@`!J9m}S32C-4-F0g?AV(B7ZtLlS_GsqRF179`CL%O?{IkPEZgn745;AV%T1U3;I9Q-Du+DQ2(`5vndqf|5p<$|?fp@XwaD{sP zae!~*+mTWw7;u9SrsB)RuyrJId?o1uBp?A%v0*8fdLQmlS(?$i$KmD<=h}4d1lej) z42jOPSqDGMK*7oTgAmBGDW5L|m?i@#&cHHRPme8$!=lZKNu35RP?A8)x_MyYjDQJS zPNZ&5n>$MxS1bM(?feUP0dil9yes%rW+S4zvdc1n8LbO=Ahy=m@ZS58{kdW+1{P1^ zL91Gf3fvY?f*bH6iv}y|4H5mhqM1~X>0Hr{UI3L@QM~O1Q3dPsi0!wE69wS{jR`bx zU{Bo;gxi{p{CsaoL>|wV5DZa>fOfzet;=q$j|@(+j?Sn=7?xr-CM^d#&A!*9MH-B1N!Ni*LJ*n1e;erBDA z;`D;n2TL9;2gBZ_0S|%@GDQiRO*z7uIpNu+97b*XL^?R`4QnqLxaG`!xSQ_Zz3r3F zcLY=;xjr)TT3^F3nFuqAS|91=BaFNeG9}=#)Cr-o))2-l7OltBjU~*hI`ZcSSF=R8 zQ$nuvB3F{EzV%8B*7|O@nKvl^4clL2W)sIpm8|XlB;l#=b(5kkeqa)pKBd zSr=VxN+KrWqCwIFlussKe{yq7qNnnc1*l}`S~+~Hq{!0e9YU!@=E&-sjV*akn#~m* z=tqCj4G}j8MyQ1Dh(kNM@>zyoU!;6K-)VSJY}1@DFh@PJ;yj z=`;`*XGBFx>m8Q*ux;imZYL`96^g(Ug*zLPC)wB;L}!J>G!sA5i_hkW&F+|1dxllI z21@-6YjZr~ujpw4Z$B#S6y-KW!QJV2SWdG>i~`=J%ou*N_~W9!l?r(p7TmhmaREh0lWqLa(}6U8Khr zKaLny@*CcONtxA%>RRM63Jq|#yb9kwuw`Z|UKg%Q0IY4>Z)cv1Q{Gb;iIk$;M|XwO z-qcFvFo*9;a6f($jAkG{hHqcPYydk_KN?WJh}YrgOWA$5!vQq@qOCr>vOYYdCBjyi zUdVb`HyGF%z7EzXZt(H4S0%dq4dHqK0m0Jlby(1WZG z@CeL=`&(egeUDJ81EXrT{A=eNm&~I6hHB91YjMCj7*aR_1hFheZ8@TuwNwyD4dwj@4~TXFu?y{GbrpDT+pi@~PnOV~)UplO(R@juJ%gBaIvW2wf(G8pQ%bQl-8E)TFbfh7xd=>P8~Pbj{Kc}Y z^Bj2xDPy&0yRq~)Vqx7c8i02Mg7cP7`+Qj<*N!-kW_%qfRfdM0$f;VXauh z2Vz=9QTppKHmRQeZMU^{!(C2LI=U{YlSX=f=53~PLDv1OD0*f#)c)^9@6{tl!uYPz zJ5ek=jCjz-O5cX#pG^xy&fhj)y`SA$w*Jv>B5MpbmxeVZAyN3RX*DEQ;flILX2K~- zHSvq%nLB)djXStcZ@O5iCQjQu`HLJt@_av3`zY`2z%Gn|*IyDnH9g8Z#kA5sM4>b#!a4MK#V_UCw2G0j=W zvY=XryMP-;Jn}MsPMpE_-;0)9NG?+Xv@<{OUMGdi68xLS5mYB*^}8=Y!s_t~)b2QP zc2N1jI)Un)%oKa+4mKKD~}_EsWzJ&8M)Ay7#M6 zV`4h9RVbPVRWLO2t_#_OcDj+K)ejF!&bgO$#zhq1I%oO5jk*S{8ys+6if63wd>g0` zNz2x|F*@!idc{0Yv21#uSBX=&4t)J9g}0)n$-hSy8nRxaeC&$y@j_wFgkwQv6^3Ed z@p7wh?Tf9IaUmdL6nx7_Qg}OadIAET>{r9;(2u4nly@-a4l{P;1=l z)eTYEGWoJJ@a%m6jB^SWj9)r2x_Y=-v54C>82kNk*F1`rH34N=BYEygq68o6*w;-_ zU45^!>2_z6?D@oRd|kb91KH)2ZD(ua7cN*h1g|Q%;2Y-$?3)wr*lIVA%XzIZ5mbTc z4fJ(PH-9Ab4^F@dnhLdcU8|oyMnx5vR(~=nBefji{gRw?0U2~EN#AlpZKfXn>{oev zF7jF$L!?$HEY5GGnOHPw83E!@pRSKPINX>+EMVAmlW{|LCA<%tvk0Ywr?HELs6-s& zkpp~89n*RT@M&<-+NBn{MfYymx#ZhB2mM?cbgY?HHHDQcvoSWm ztH&e|?fOZq7;URI`IRbZGoP7lZ({$T+v<+B`^uPXPmPAOnQ|17klvj?Iu=4aFZsEb z^i>@c{UX(ILAnT62>akc*p@zp-IktUDV$F8a;^dV0HUd$9y4N}&$U=s?uqQNad|0u z)X{Uue_H9I{BHgsu|b*p10$|rs1VBGvj>ILdJG7g1o}pn-0`K$MDG7)UvII8PPg~S zHVccyA~jcbi%3X63^@&4t1hcak=5O0XC^NSW4vI$$1(!^=!CqT9_U32fL@XQMIqR?hWbp{1b$(xWdLDyF}>Stfpu zPhP1_gUu56`U@GGPm8c8CDWt?D&XJCObzkU^?tr;aoBz>@Ey~H5Kh|)wOFvPwp7)+ zGZT7sS$X#`0x__U6HP3x-)KeWzUGP$MY1vTr+I3cc&{txp1N85k;UxguZrbNY6RwY z?hBQWqr8pP3BTsO-?@eKyN@DuOI@d%p1DjvuQXCvUqi8*n90%hMJX?AnOH2xJZ$9|{JC5-KH#z#d4f(S z>5|1ER=E&v&g~7J8pC6*WO1c24lzgf_Y|;LJef;zKjg;OpiDp0pM*VYwX&Rv)qgj4 zN-2TyBukoD?qM!1oAmI@BY);LT@6H>NIS`4Xw93@UT&9|n&!4@D%}`o>|o>pqib40 z3{tNTUDnc5G;{u~n?-#<&KB$m&TB_TZuSv^7cZ4b3ub4r4iJzGNICJ;wu z#{D%8D@zPwnenolC71E}b*iO&Lk6H?X z%K98%jNLzkKr+q`M)Jjjh4;4-MQ1yYJbw2Hy!Ds6cuhn|pF(kFs76LzIV%P`Y<>^6uU(zX zrH#YcFN&a8y~bSi9?Uds_?+L)Vzn@_I95XZ+PBA&x9N11P_#%nF2&raYGj+?U_97m z=L&xu$7j7`@`idF`g47s2z}2rTgC1a!KiHKPh2ZG+|D^N2!Ft8Dl^d*kUP4v=LIDE zkkh8xHc_&(?+Q_SU+NFuCOJiq2O^>9L#BC!`5^NLMsWu~(jMPYy&9bnrC4c6T-06P zsL04Q7|g5qL1FMP@&-VK^sr1721Pkg#XQLlN$_3FyQ0S7>5LSF_?$r;UT1gOZ}7I9 zygkz(j_>_NcIDyg4i-y8E-nr2kSDFz~qm8KI-o$-xfh`B=8U+clR+WjX%w|Y_EG`xm{{l%)HjM(kdP# zyW>X)8T}mKmMc(dELOt@*=syBgZ`C{u#=I@Na$Zczz1EHZ zU=^Mn<@vR672Z%Jv!ZXC4jXE7<-ju3bm!JqF{Y;ScMKG@`s_Cq z!qP@Nu@a^m>}p;1mIkg&YL2$7n_<yL<`mu?pFxF?J0BLlcT&mbu#QotDFw6rL$RR`BkL( z=6J7mN1*&Fe8^@s%|J1Wj|T3A+q~;g-rFg-LOM3?!~%Fjn@xEe?nbD%tR)JF!Z0h1 zEtG`3(-g;P{}KldS_k}J6mPT~0^e6TmnvlLM1ZaDEaXE?tU(R@g$U@;!ALu5HQyyq-l;s{42Q4G4arw7&~YW3moyLf$6+h*3$9 zE1Wg;*!-K-FGtFvLVx*_C)tdDdHL@r&JkKnCRYL97I~2u^>QmL&eS^FdnQCpC~m?r ziVBib@3kr7g;9xC;7VgS5(eZiYJuBQBXX%N zwK_y8yCHcpdf%8*JvdLxHkVxN2Ip2z$B#vTWaM{nUY+6VS2$lbQ$gH_SzeAB)ELQu zaRoEBtqnNrRlUz^^8mp(L*M56=C3JSIuV|_p>QPb&DqO@{UoP|O;E+R?z%zKa?{Jo zL%tC){Ar26Ox(*?k5Lh)NxLtbCGD31_3;y_F!5hU|A^_W_DGUfWehBTkr$^4ESlXM zsAQ66eOBlM1c-n1lF*!x42yXNkYW}0>pgO48R*(H^+4kwg#ip5qV}e+TBemJ*E(m1tUZ8E zcayG=wkF6^AnMCwNnEhJX}3cF|3K2rk857!AvEJbZmKBeeJLq4h2@r!1Yi#pI<~-a z1llFT27H*cyz3Eaf9wlQSOXSGjxsld3Rwk&4QswT1Cgeu=lWA*0PX}S!m(^L-?9c+ zi_XG8R?^|1B=aWMe&_OsS3VlUSLyZwHjZ0eHuJsE(7-_5{?_kvTC3v4qj~4VIs{2u z9po91?uBB%$DhANWRjn)N0g7P6~>5jQkF|Y8Ux!>{+=Icixm~M=u}aPKf78nlQXn) z+BgmlEg3Fb-}|T)5C5*|Q&HqyUpk1pq&(_I!l(Nm5VMZia|?cIqrLN~F-%oYrdl@~ z<5S8a*$aDK zIVcazpeSN}y=(t9y&I@r~(f90o2+&1$QW z9s@?TmNf%Z1hRv|jk;!qZ}!%;51L<;UtW8igUvawDO#7{FLiJnwLY!Wsh_OWsW47h z4_!&k{vK*$7388%cOsiw#uOc@;`c=a6_kp;|aNa2*Y z0{ku7^h(3{|WeH)B|mQzb7-VdLAQkAkTM zKp1b?b(>KSNT7@fxL}6o4B(gm!6MhccluQ0cq##1;+Q7T?wR7HzZ`psaLd!*;4B4>5fy0$qn)iO^k# zs+JjEu}F?R1{5G+jD26xp{)7sI+mBQSyW`A(k&}SYYW)Mh`7-@6B&=OiuHfM=fP`~ z`9sG)BgEZS8u)Fu>Xq?ov`(d%Sq>hIH1%nZM9hgY3UU{WbcV7*t8Xgptr5Vkl`W4W z$eb>R+L>L;xv`kehS2DDKyrZ7Ys|x3$ppOos-Ff9tvzzetJ%ls2|d)WR+Wm2E6`L2 z+Zc;bW9~@P3P~RwGmxYArM`ODk;h>8>D6kyo`zfxGYwo{IX}S zQK7oAcS4>VY%OqRs6~f8ZwSS{Ioq2PoeA9o7ktj7j3pEdjs8_p&syn>uYI7FG?52De#K ze`=DnKLFbZfESOdUiIMtvawH}q{w+xd6~VK3c=4SA1lEs=d`{8n`m!J+Rd8X<4~7I zZmlL!qJq!Hbc~xeGxk_$H{xh#_%_23w6Gpg@@ajQL0--YRqV@$Vd6SCz#Vf46_(sz z+FZn;l}R2NQ)7KEff`*@G}n<8hudMc%OHxoDU>@GKtE1y*aCSd_ralEG}$Bc*b?&x z_`Ver0Vs}W_pE+E=cYeeP;>Zo;ncl|&ZK@oIE98&IHMErg#aC}R0vyP^?DgOA9&Ty z*%-O{bDhAwe1UAtc}0h==R4deN@W2YQ2swL{4ikPGb(UfW%v1_J>hb0BKilI90fN5 zb{<1Sp&>vZohMrXKE5hYvp1!d)>yq7@Lu_j`KlSB={o%bAjpr?64^@g*$H9W>JeI> zO@gWcOa(~0q<%!{ppl|BZPV-s076)y3@~}KK!`J(U0I2om^#EssEb}Pt!f)KDYyMN zXnU44aPlK8V_}zwNrKM6F0VH7r=1HktDTLcWldIQF>($D`)2cfU3G0O$yD1Brbuni z+{pm+_O@je6Jnfs;NOQZ@}T zz2@$1x-()@yqW5MV_>B~ZfRil1qvAo`~M<6$8#E?R}!Sl5HUe zY{$LSb_ES-=h?ZG$ENt#59vkw-J8XEC!#B7e;Y~c0$CQ!#}jEH@FUm4v{^yv4&(XG zg11zJr5Gqz?oidp8XX15;2DgGqT75STE7L-!?CdQRAhvqcG345JapzmC zeId#aY*b|w&{74OZdmpcb_#7Fmu#O&Ew8|`bomTBk?d=eKvX{BO;>XyB<*x!OiHqP|NO{_`KOgsMfTNG00D%>z*1 zeVt}-X$P9LXK?Q7mh16T;G~=DTyipM)ylwxlks?W%0#c_9&vYZz;R{Bop2t%r4yB3 zRpMz>e~@~>1t(YVrLO=Ph>%f{pKJFK{kzf;4X{89VVu`+y|>+sY8}l3_2)|m#tv7j zmr5KDchl7fJtX$`<&Op*@8Y~0w((63j2)xj>hM9ih0*dWHPUAO3;p%Coq!UR3nm+j zZ4t_UT4C)&I36pn-l<9=gs_@-9Apv}s`HBnuDXnghd%H%W2ewFjQk{r!v3|Kp2NH= zH3!HT`EmF08N_CkvhbDMfy$M)#)K@?o=2$)Gi3N@Zzz|G$>H`Zb-}23#x20Ml^zaf zU={d6fn#m>)p)ausH3B2e1w^lXq=w_ImTHy#fF2;)q)}8?U1&F!p9(&-Q_`yl(D0k zgns_VIGkpyvyCyG*@W-G_y?M}^Ny=XqKWVadSmO+{Bn}K=dfd8-sn7RGGmualrT%s z;p#H6Yeu^bnhA4T2tN_D=Qs+*p@VXrZk>&a(+IQEllj;UB$@@ZUlt>lcajR>%diSP zhzzzncs+(PN`sHJH>tz2CSw2X=T=T5rydN8Uv^#zqR5iI&|A8s!$zZmXP~m$+E{nXGtEFwjn{(CZ8Ir+%^^jlMYl69f zsq%QQPAPWxv@-4`ZTo^*<4MYLsb|_$$zH8ai}_5vu}jLS)lc;={YZFz1WpO{1H4a; ze&3-|16|-8vV!E4UbEzYkz1(=Z6lE$GH6DOPH^k!_&@_UFmr>zWM@1E4R5owwI@oK z>u#-esx?;5nQ|1?9$QZ(Zxl@!Ls9Gj21e!13ib{~6C;R`93gCD*vAd^$Ed9q`wuxB zJDeO{1EJk80fs=tB;(mZ8 z22A}Tz40IX=u`}Y#A~rYF^DY!ROP(2(k}#%P~My0JXrDLy49u;sio{MIlFq>9F~_( z1p#v87#7^948RB;v#_|Mh`q^P$PKWC+5bUaU)d~ce!!;e(x6d}eU$Yi_f-J()v-bU&_jNRD#@E4Dt+5w#XQ$yx4|vHv+lwXA-1n?4dwMi{7oaGC)F76J4vGCrw zZ%*@-;n-g7k!&AVEwg_qxwfG{dMuL)WC&xjd;s@kIbJ&06x|Oj7c?HBl z?Ez%WuuaiJtov%L#>)jNLX$~I+XVe~yHgr&*Eqez+*NFF_<40cp--7pM=6h@L@{uh zOzIkd2p=o;l1yQGOI~&9d|I1ziqhEGB*#zOxsr8Y$BCd;(Qt03+6QE^_1=r7ECf(M zjUD5s7t8$RGi#jl?dtS8mV=9*%j-q+;f7`@6#<}~aR(~QYVqkDv<=Fq`W_yG70O?@ z>68Hpe@)7(b( zBFMd^W&ba?*@XjFhc8-~1QHxACwn`sqM5Lq0pMhSl}EW(@J}Ms6(;R7AHNAiZG{g3 zP|$MRV*rk3HNT@RSs%qI-{{tfVq#Q01e1xGd|Ew!IUb0LGCK>s2vR#e?P_k}fHK1v zDV|(=M6XSyVLRlV*a24tb-_2=FwcIc!WTv3-T}ZKP393Kqs*Adbj~SnJEqfYB-aTX zVNE&tsjGr1NsIqjd|w1*0vsW7I0suyVN=*kkvZ&oZTX9Kw$lJ);*qbR-tmYUGjjl9 zCs$!B0I;YYvGU;Fgt;QG<&P00D5sOI#*59|K4h$EWP#%?ARZ>Szfcf6NoiC zIL!REF}-v~Ouv|P1l^6U-95AwE}O3x2Z|JIhdb3iBKMuH52v7|cNxlsb66U*Y7GmC;PEATj?&5rvQUg}dd)+I3@f?q@VM+(U zBB?$rAvtrbr`dFxT+n*b@4RpaU#742%L!N{zKT1qXJK-U)tf`!(-TyRw5+DUq- z?lDRkSBs~M<tKrB&7<@Xql}I-o}41p>1Apyv=U$ zWKbC>Vd3$ChxWeZf*fg_fS1}V8QYc`UDkiD=Uw#mEfwVA+nbabtfMA*i&q*ZSF2e~ z_M#uEGfS5p?WxH(x~Oq;3GFNzdl(trml1i85*Nfpb{EMd93Dmt#2mm)*3K?pfuc6e z4((7Xcp$WAnf<2v*$ZG_pihu|ZjsM7OaUNToRdv)zg1?Xv6l>x&0PzZzHQ|Jbgj1e zar+4}vGprJk$MrjaZvKcJ*O_gFf`03^_>c((WP6%CKD0 zz=)q+EWAa=geViXK$-n-^+Vd~ATB8C_(?jc-@sgrF6Nk!X262^i4oxEPcV2TZ)HHjpyVGAEBNj(gQ;^Fv`^K)g_Y1T?F zqK=6tx<3T5f!UQBzZE6>dlxZ$lRb{Rhr-j5caz9``d^)QfYbZlN0GnQfifi#RfytZ zrw54z3Ijz=#2!9kwjrogsRIiXo~9bq8ox$>Otx!ErnkPZnm3E8DG0Foo0O6sBm8kz zUxEBU>BLttC~({iPlyg}tXL;NkQ23*A3|8eL8i0998mR0uZaL<^AriacoPI}qdV|B zth>}cUY|&JnO_sWJ3$iTe#rTOUh?Jv?d8bj8oA zE>J$Or@26reC^r4G5dC)OgH543G@eLDJ$|={bVj)-|UTFU^U*!T%q`}2RORm?LjlXvN88ssRIxVym+ejdrn08_$G=? z!hO%7qT2Wpg&Ehn@&^{_w6j27dpJ)+AHrneFl9a!${l!haNHLP=af#zcht@;UliIh z6^t`+Pd|*dEhsR(9(zzism0cLX;}bkvJO0@~ z5)a|BrdP~ZqfK%i0+zDHTZOM06{?RAsHkY(w{L7YigWyx#S5m7AOmx5^j|2Fe&_W%vzgXCB8)(zb^pF0jsh4ZJv5YtZV`Q^Y9MY>1$AaWFT&U5z* zOYmv#06DIJ4Ps38VqxuLhsp(Cpz~6oa~kuTBAfx;Gt!^N1oX_toN_fDQ_7Mj6BTq$ zuGFkfg+GR$UO6I2AwB^QI?+LHo$IJ2OoZZz@e{X_$NeU_T)GN9iyhqW{XrR*7f^re zKX^p=1E#D7v2(m?%$nx4qz%N;KoZM-e(AM3nFv(do3P6S_$DZNi?_MVv}{~Kf2G`E zpp-&@0PtOJ3$Jb1L(%PCieMQO@bG|jtjbPQM~WOX#t5Pk{n88XQM@%+>rX;SUZ#(jF)8IoW?QS}V{K-hajBwpGU`6OE%DJ4FKhcDn~?*l zXfF>RN)>PqbXH++zIT28V<)RwrHC-ENF8|3q&U-MAt(vb)HM|nLBeB4G1}_F+Vl35 zSEv%1y?2kIA$8}ym_fx|G@ED`YUq0xH1a$(#_&+DqiS-4P2TDL6eyH?DaLyWGR}4ZtoGCp64Zdc59(LbA+YXNIjtM;NdBIUUSiCNBs19Fy}e;D9O9 z$W;oR%u7&kS)9RT3x@(xT%CteFQX+;(ZIKgLo5NHOlI1hS3pe7vg`o`*7@SKjgZ!X z$OB<=J&j^T{UUqPNXINcZug=^-UHj4vu!Kfjlfy!i@W0_ocB^#mgSo9D$~R_ohQTTmPO_>JPN$75k)|@EkXBlIgp@dGhB>OT+ zwi#yZyCFiB5XLsfgcyt&yD`S_y`A^x{rNq9|9npUGk?u}&)l#3x?bD!`FdW}!$7N? zbIa^mZA(yQ!1yt*&)J;(J11oKw&>l{)lDGL5CsmUNcgTgFxPt{_Rijr0;_2K+}&b3 zCDx>l%4$K1V;1eoOk3u`81R8_s5A?Rp` zXz;zS&;%C16I$W-p`s;5t=*FKyc4NRBR zfA80uc{jNKdk4~2y>rSl2tY`d`sjBrVt|q5-JpEbtFWwq>m-jQBrtRQ544Kt^5h+t z*Pm^#;0GETC0++joVffD^_X*zQ-A$$&pahTR)5VGM3AtCY$>$0NQ;P!N8#5 z?wdolS8ac$^|%5NNPz@Yo`ninXFRDr*Ox8~dbRfWrG{|8!}B3Mrx>CEZE=}%%9}mw z4EwNTOP9Z=)!fSQ}rjU8^}1$nMlRmKZ9mdvhYli zU_Q-FNRHHgA;8+M9JUZAN%BZ#JzF{zzxTTO8z2c*zQ0zjK<#Nd6M=2o*?&3$sgeY0 z`U7;5ljS%5JO0Lde;QKv?-)4?`2&dZ00TxK%%p>X&DH)Y{<7`Ni4n2dh@WCsJYW9M zyz~YAHM;!JOwT<28Et9%{sg@5a4As-)D0s3(>-Y1@;%VRjmUQ=7@XshqV3IOPN%V* zkj#buFUmOE#Ld=jNR<-5oVhgMw}_1Wo6q0fNf8){o+-H;*C(Qrc#`DL=xiAR#_P)e z{%0;bklD0OT=SH9`a^&bYPY3y03=)Zbp{+vBQ5#Uq z4*>r^Be1>JYvS*GvPj*q{qt_pGdqcge!vLj>&OcTB>;5~+kHEDFd|jgbH@=kJt=4I zd5XEjDxU|)AB;lA-n$zH(`#%a?X9)zNWpC_gd4z|{VHzTcz>^9f0rKbPh$wI&qT6l z%?EmOuPMhFzD^c?4E$;r8cQbYWGLJ?Kw^p-hZk4t!$O3eQ>Ukk0Y#ly$NM$FH1qcU z5|Jar5WnBPDtYPg!?4%Zu=k_lelU8Udg$qeWu_wt|XU$6ec_m{V2=VRvehIl9GH`ytQ+Wx>)K zx7rC?Yhs?GuRNPzS99EYQoIllu>S;V#$lN8GN%~aeP*^Q8MC6`vpCB2e8AW$q>KUNErS7ixHTe-T>EqXs!7gDS;YE5qfQqJDk_A`PSt*u$D8GH%A+7Hh(Za8uXP?JnLHH ztO8d9{OY-nIVfT>{S=!m{TC|jh+2oav=J)X+U6*tMN=?aJJ~82?>EV6w)0ukXJjdQ zKD3BLsPyot?D`9B;aBWZH=2ns3(v^&6`s;}CdrmTOK#WBhrBCXvpMSP@=4=DP3zXA zj{wtcK6%(CtoSXI-`eoaEh3JI_*h&sex&n9{N*FbihZhwqp=NJN$%ByDxH1~EQ2%b z_?O_}iBTvwD-)SqD70vI<;c$({N%m&FPdL`L4y@u{c-iA^-Xs4y9WZFIaaAK(CA~d zXUcgGijg=K5Uh~m7+V}JPS@&_d;d6WGyNUM3g2%Q@XACuzY-X{`#tB=qh>7%y>Ulc zk@4BK%UzWAhlz}#y>8;3Vev{PUFBwVk@^5&A4~r>zp}JijG8rx_{G5iRWKo@9A>EUE4{ zW7!8_T<|)T?zZwKyR!j_V-9Tu7Ut4ZZ}OdZc_`;<@i&|GKQ9KXhLcUI*L$OrZVD~I zHxDV28=_cWhK1#;laV5Z9;v_!K=lM{c-a#TJNQ-cnb1ac`wr}hrj3b<1PAojZfe-6 zpI-4Nzeto^Tyq(VySjg#J(>|S=lnMZ^!{fed*o@oH>;m@G#HQJ-ICQ>DHc-!TT5d& z_78OyM%EG@T(`3P<`~2K5rkd4@IN?}{T%@u785mdLXN0%c9}$fthl;;Il&96O+@!W zA0@Y?o#GVae`=5-VK`;5qE8t?fxA7`@WP57)scmw+2c99MEf8bEG_}Y9_t9QCb z!}G??@j-;Ca>3(V6d2_U%FO)Ag~lK&*>mdRJjOr%jdd{1*AZ^A^nX?VV7NlzV+zmd zhybpOeiMFiCC326%jT@#fGkC%FK=|8pQ}s6W$%NB-`@u&pM2)DC_{{!q{@n`8HmT% zD1+!oWb$PG)e8WMQ5D7wiWorvLtWJoCxo_Vv5mB=iZe?d2nAwsIdcBOuw|3&v`RlX zSm`!uyTG37nAGAX#B;_yfeOr|+xQJ`!gnS3&L@UAwKn;aj4W(wrMhI;k6sKt^iv-{ zPMPQhR4PR#LgfIB%iD=Ew)Y_?xz1SzGVsH!x0YvhKhB&<9}j@&R=ZbE{xP#_aT>Hg zEoSc8D9-w0p~U<=msX_EjQgLvk>(E@h#tN zKP+x5$E&hg3jjGTuBBe1RcHE*wsX$&eK9b=x14HKsR$RqB+E3<3-qq`f5dw~68M@E zvNLeTmDnYj$#Yt?x!`e9y4#a84MVETrAjnWekJcC!AYA5-4JxGcMCePgNg&i3O?0N zms_2gve`%}t@p`n2Izqp9c^#x{unv) zEJO4*f{EAY`~!s%sFOti5(91{nPekdtux+-$JnElEtH z^j2z$?e_*BopMb{6-m=SfPpNPfQno($W?BcpIoO0>zV(zGrpF^Eo`%?0Q;4DGxx$Bcw9t0P2HK)!aF>ONnat8S7{<1=I)Y`YLzPBE!iY?~5o8Q36#Ny46qJs24#7%s4(>?!W=za^MuM!% zP=6BiQm=iIhaR*#}&YZpTv_u?_ahJZpYsW3+fx}~F z>jYy*I&TNhIbSnd7OP{L0_ICg#n8yc&v@fN(*1rUXoplWEoJ&D&=v-F0`bzYO|@!hVzD; zWdyW$z48iul08a_&x999130ZuGDpYb>^B$)D@$F54bj>W~8&y<)2=2 z_xzqOrWki4Dja?j^jR=$X;=)rAAMG_cl>6fx_5+tm47T$aajwr2ekH0JKX6H=bWus zn|HStqd+yNFe_e>c*B!GmSyI#>W99P-4Qnl?;M6Wifd!~%QysK1j@;vx!UC2H58_% zeKA!`A>xDwZcII|B*P4IC}(cDq#Lm{Am-`RUmYW7GBO!3Gv*CGR1UT_v~Sy@xW#9r z%f0up@+4NePt=g$iCxGLMx|RT?Ur%qElhJk{}ftM$>D3dyyozU!*ERFWG`);7`JFL z_`JEq!rwBmu*$EWEr|IIu&G(80lEAwtzAu^4`8KNLi|_x5%mHYK@poZ1!xf+&}YD9 z3k)sTm&bJQE8(>(>(F_Ze%^vM?08vh5_SHVltV({#|1p&#jDmAnX$?(m?lg$h|;*F zsI>6GbgKE9NMlEutZUkfHg%`X;$3=zp|)gU42y9D;d#Xzv!oEfQ~+tvsG-X zXHcn5mGoWCbVh2a{ahUYgp233?D#uaIr?=n4?v}IeR4(Zz*zOJxRu>z# zkX-V!yt0t_D<4#&!>fYX)ZHByheF(dV$2t@M9Xsnf+%hYb>QfBlil~ z)Enixi)v!SUEaz=NF6YF47_+SyYWVFYB5+nbd@1sRhbDxnnPFH*^I{kT0)k4vX;ir zo9E&CyJY-_R;?q)q^|OTWlAneyaHK-e@>NgGy_uh@~j~uUD-|JJpNAV{uZTg^r}$t za>tBE@G>A|r1;}KJYn|s${N~pd`mG|TN6+bH@-9Bh=sV{YqH!kap+00S;ps>fq3Ii^rHDoUv9m3iLL!OIx+nnumtD#bxN2Q1b-weZxys^ zeZtkm?0UU(|AA6Op`9y%@XBvp1V$U((J0PY4_+({Dn_s#+;-XDUB?zz>_j|D(PJ~< z(B^i|JtA01y`YROqa|;kO49e>*oxC}pt6W#8wqe#5s{^K`6ZUrT)v#SZbmZet+H{);a0WyPQeu~tD z=zPv;si%2|a*{yQp7ml%iv zMB~lw(L^s3_2-T2by*%_H=Bx(0N1XZL#(xKEUO%PsdyH&>r^5Pohamgk$6z~ZPw3u z3rGh~2(LpCgI=M4X6B<|w^IJMkO+y73CBFX&k>H8<@fs3N2)Bm%wfs3N^|927+k0* z!sd(0WCa8|68tFQyQ1MsGH!~4fKtoE%;A?dlmAm=8VG*fJ`acjZ7Gm?sopN_vs{@4 zD|1xLG#PSOq$Jzwc(*@|Ymin>#tbvNqKY4^JA`|r&Hi*lV_TOC=pl_a-22@+L!gm; z81hEobM#r6rEZ&6cc2i$r9(J&4V|A5c_o`Bq=yThv}QO;1+4Kg1GkR~ZLVVxnE+bw zZ4r-7_Q_Ora7jlOR~A~`w(8d-ORaT0k((wgQ>#h)D-c0RoQbQsSNQDOg^)`f`b*Mx zvpm&<9Z7J|fLVo;hig&Qr#r08YAv!m4GR80-9`}Aw7x%we|(3NJvz65zf?!N z{^Dze%C`d4|F$o7>ZxU6%D3ieZ>!s0|FoMt*rfLCvf^0v(9+k~i8R0ahDRdnVl7?2 z;)`uskMPUo2Pjhg?hOd9G{T5ZoWdor@L53h=cj7z|J3Aq8TYii(Wk6$(l*5d7l0q& zgTFXyWYR!%!F4ajn!CBU7U5o3dJwySaSAszkch)U04R%?Yg%i`F+v*^+nqc7OHY$~ zY^b8)99#Q8oIVz(AJXOW>l}r9>~gTr2~nm0$j|*5#_CSaOL2?1|Ysblr9GPzvBod&1PS5 z=^h$D$ftk7c>;g$-U549;SKDWw7_<-W$|5k_Gs|#;zk{fCtZ4zWGCqeRhc*^>-OfC zY9HXX%?(_;$^ow-vewpRsYf;iswA$7q(&QeupQhVHKFwE4eRb)wVh<-auZs-`Z)3G zwTs99^EQ9-)%TP)(=UfqJ?B-f%bhDTz+{v?1USX`~F4ee{2wO_|-zPXOycMIovw`lkG?VjmxhCvm zE#3PoYZiFQbHa6fCa-f{z-9J5*=lUEfB62N9lv$6<0*lQmiztN}<1L$uR|ZiBmSU`+KGw$@ z@wfMCh*;M>bo!;`di0&>`{}^L9zi7*n{8?R@QU8#L$-z3z*7{UF4wK!i;e6mE~S~F z$2}C?Og+wB{Sml4jy-I4-P2j$C^&H9j1?eOLD3I3#u5N(V-T4>zXdUXQ*vs z+XoAt*Qs;aYBxIdlVHw2*4{(2FUke>Wmch{Bnq^;-VeR?@Mb9azbk;F#L@5XU$K_n zAcx$TwKxGc(=NMp`>2$}_hcQsAZg#O-~=PMrf}lgiC<_(0!5}9d!-A$jK>w@CR9wo zsT)C}`vByrsPs_eex}^lh!DjA^}P*r%vpM*c8Wr0-kwa1-VA%N_NeSH7`x;m2cJ<< z(?ey~@sFkAlXcL&Rr2w{YC$v%5hZcjA)zpuWinY zdd}>>35vzMJ@NB>i+^6xy=>q*?!>2eemX1p&weYo4Frxv_0EW2+I|2sr3YLW{1tiQ>(Jp+Vz;${zl77Ai^Pe#E_%uL_cYfz~^cU^I#SdIpz}Nk&%PjZU zN$aB9UHfwp4qwykWgL1ot{Gz|RrkLp{3vpH*}ODX?!L2ecF#n>tN?>Yfze%YK?te= zITwyEG$63h2D9W%E5fI^97JV2i1oD6b)-yNZa&oJN<*`ZLvMwtjAOuH9h73!at4h! zZ56WK7{j9ShV-PnSI~lMH4Is%;NA-^(-hO5R9@E8ni!-)=Q+vo{<{@c#VKGip)99k z&w&)IIIxUWV}gmLa%rB-EGtFS62%PxKavj@WhxC2uqdy9M%LanIoDw~a}@RWs-*#U z?s0%R0(SvY1COt1a#dSstVRFbn=k+GcYn{G3$TUOSN7^$5p@k9285U9nGfb^7X_4G zHL6aFNSX{>wymF;)L^A%jxipZ!}0vwN}6Ywl3dX~Q}5$ItQI3H`v>NAkaR}i>yT)q zwIhr&ORsGmP9I|nTDF*FiqM(J;YubL0aEm=&ZX9O@PNzu40>ya_Og`Pk}RIys{gTT zfVLCa$H$14cqP?K0H+u_ZBirwR!hKh?h^bSHj5Ulh~2B6a18@72bPzI5y`_x2aQR= zBwE1yqmZ6!=izUPNj6^@b6_IOx0OmKXoP|2kpY9U%37CJ_8$v@Y~E%DeB$5p9X54^ zDlUwwNoS2)0f;@oiE!1tIoy?Y+w}H~bgs@2d_MV`E&L0P_%_NsuOG`EO}6rmA+^m3 zVI4Itl)(z?(Nz@i7TosWHQaYyWX^`^MvQC2l)medR}RxFV$pf) zNSJn?j~I&;@0G9h*Z=K!FaCb9SDChA_JWW^B)DY+)I^VSoAQ87#XEEskT(lPZn zwQ78bXWTJabj%Tus`bFPU~1-jM8`TppgCcYJmq`xq#0d6gc??nSw;L~rf~1BLk1++ ze&5b=`2u9FkbS(FGphuqvi;|nCGI(!F~rb4Y%w)fndcAU0R8Q`CwTotE#1X|*2pjZ zl8GVUnR~-0jA7N`6uel|y%QG(gH|`IK`V~^jg(H@nnmzkU1d6|abu@m<d8%R@ySwkVJz+?k+%8Qd!4z7?M5l7>Q=x+Gvx5dLuwze~-FX3p}1cMSbb z-WXItJsrQS+hE;?+lV0D28WTkMuzSUPsn^x`q)*OpBa?8Z(-(8c!k;`k(t1;X$qw@ zM^agFAkDm7PN0ugh932IdbIrYmTgFN6lUI7q9$b$LAK>7g^pM%rxtni25r{kS@Po0Z37hU;@kVP9js2xMnO(0T zNg4@T6+y|mBV2HHyLkpv)*m_i!PI~Pf0X=M2s?e_H$k*uO@upQKDm|J$QwC&j#g&(xEm^Y?U=ILkQizw@p(m~zK5U=W z9~jZ2(Pxu?552kj(De8JJBU7ieUO`_8ScD`yb#oz6*4Q{MBLzAV)*5`h@w!cp|_BH z>-!YoJ5fR5fO}W9I(ySN^a&9)xh#4bh5vlW7C51190uC`5!ck0OExLb)82iyGu4e% zH$k$&wmZ2L;9G4Yh?&;H!8OF8(9K}qk-r{|6~U?^JuxC_+Kpmw*k8+^*aNYS=AOGz}W9h z=s}0YB<>FKnen(!e{L#rjZ;wxEuej5(ie?9h$R+Pj?lC4w#w@5vDB8qxZ19p9 z{&;aur{!R&QIC4&DNr5Vg8(S7`yMZM+4Zp}$pe|L-=ko8?{}LGi$|JPt_t~<*z%CD zCHy(Ld>bHEL$~&&XC#hFw39`(2hr)+k$)*t3ho{>GaS?182a>_X*#Xrfh_CQWF0ul z&N7dU^-5te`Bt6)nZ}7+0bNCz57nsZG4aM#x#JR3#>TSQpO7ckDbYx&Z+kmIW#WsX z*m;ef7-G{AS_i{T`%~5_(*As1jF9{Ia+jEds~EPVNSug?+5ZMI=1>pkv&qn&+0MH+Y+tFX2 z(=KjA|54-Kp&jgWAN00~qRMeMwsZyo!{16DpQ8!NTU5hqsS)u_nX9fC>Aa+U=!S_% zmI=yTh!8*!-+g9+=_dzc!>VKuEq^k9luJ^B1V(p!cK=G3T97pV!m4T5N<*iqx_Bw5 zb?;Bdx_;@$?n*DB^s`abnV-HO5pv+YB4f`>=FSXzH(s(hQZ>y|k}(47?s$G2>@Xx- z;ZI4bvW%1sPR1=(t>@p#0$`mgKyR8@HTE>8=u&*}ky`fX)0yFxRb1F{|&SykJkXm z$rjK~(q&by(=O+SJ5klwwyc^EyYXTS0v24Z%ggC-8BJ}44HHm3!C z@yv5iZ*vcuzdOvd?*ib$Xo?@B&6reW)7rG&O|6 zeZFLhw!>(PM2y$i4|{dH)l8brCTU_6jRUlf)aeGVbUM=D(!Hc)ze!vX!2v84MEy#q za>IBB8ZqJA0>uq2(!CyM%{@~6W<`?{37<0J&te8r>uwu&3#`yW)|K-X_3Prgj4z$2 zEGBOH&+pYUr@DfVe)4fQAdDcmoY%dPVq00~Vl~(p@)M0(#Ajk(a_O;?gs`mOZ*WZT zfoFE9zl;1ogj+u(DTj)sb_%C9!JwIeDoXeiritErX(*j zyae=uy>$}XwAk$7lPJ}k@wc?~A8*%nwdKzC4LUxdW3F9Ue|)xJfqo*>=LYgqTUPa! z!8}=~mfnpgx=(*>q6!l^mEIvYmjvpY;~+Rr;5r@PCR&2U7oL zHPKOom(e2m{?KQ(AZiBOMqY#M2o#ov7Oq&vRAq9LPIzT!P%IR#87uDh=U$Uo)q?b7yyXQ+MtgQ0?f4LypScQ2My2b}6NHyi08KZ00^$EiSbx-`VSs z`(=SZ{q1`l@r8x1BMNT>>MVLHit>u9*4F%nJ2kGA&%hT()q_C;#yOg2Qe;ZL?YJ^S zt+qeODepHo!b+*tlNA$o9Egpy#g27fgWP5<#QN>ONDdz4e|Nkc5IdZ$(I=o6dI{e0W_O@7pD z_(I@tK*Y!$gV5Cl0n2I)WZ*5!D`$-!l+YkLqSyP=B%4j<5z`Xc35Oq@xbEgdJpmhA z4vE(NnyQGBpy`5ELl7@|_l=CjmlAZlX*NE|qCx{f%RLFPk6YANp|J5|8ru@6d|l*} z!0_g+W-xRBdVSrk;|(HafH|}+Pl~A!!_wh+zbSaAtA_fO(@a-Ex_Bt!d%90o2Kr(zco3^|55LGQD0vS-%2B#;jtD6=*@QR@z z{a_@zrw{WiZn#s-Nu`}*b5I*R((Ao!&vA4nj}{B<`%)2`LinMVx|T2k7X2z9T^^-yB&^yTmI!BR=Q7Z&r0r z;N*Fm_7GYik{Ij`9F5J1|G|v7`IvQFVyhvFG4VG^a%U#$ zPf) zTHxCpFl%p7&sae1l}T+0MSbSyJf(WnIuD&OTu*ZE+~T*^capR^dY#AN<%}Lj8=wG1 zK`m>oLJ$#Bt^ch|O?rm-e{BDq5d8i={t}p_4Y`Ng8D>6`dUF2~N5s$obD*?PKfBJ0 zwFQs|09aYKs+f)AIA5>ZP|0&%XxAmJxUfLf57-=s@lL>a9(x!U>c)g@@Mm5`U6mKK z5A#S?3UsSmEgP#=F#2(3h*Db=kE;+R zT2S@*X|dZT*k}bmhw~o!qKg^VcR~#ZlyLJzo-qy>Wa^Hz_zEn6uk@YVcoMD(-s&0w zs6ef#a!)P*G-6-~H8A%*XY}Yvpg+qNPQjJB8 z7Otd<&fT!cKr1ufa+@hVIaBe_igz{8vtd1+2;H|u=vSBP-UdfSsYT@b-JS5&#YE>Z z+Zo9gGxAqp;fv?w3anQ0$eSwp!u)}{V1H#_ba7A^`}KEJu0hav8$WG)rE39A2J7l&2n^-lSO4<$LCXLM@XAFV=ZrXc)WHP_IQ&hs-OHMz~Vi>OoG{vrfNyrFlHYphH`Gm^{m(P)5$<(*+KO%n*=?Wu6XI8W{0sL;ZIj(@2_}kJlNaVV|=4$_yMYgW6?wBQ)rEI9ZF$Qaa$(#3-#C8b+o%m}7Jjr{+`sanN|>{skI$e)voHIE&|Wcxr|f1;;hT8&1&aD{Ht68ziBR0$y|9XoEO+KpH$Kb(+MG<;-pl-@8cS*^YJdK?pYbzD1Jie+H1c{iNHr}C*KXJZ!E>}T@HDq-UnTx{ZBqpTF6;qZC3pm#FC z^S4H3zl+&Jcu>sm2i*oYX^IcPmI|X2zE({Jw|d*ea7$9isyA$R{pzz28Ov7*0^I8{(j3z-l}Zi&c@Q~tB?&w{_b{H1uLL_ecFUg4?bEm4g@zBlL>qU&Rf zp~d}`#iS<@kpN{)pMILtXQ*-e6el&&1}l*=SC`7eHoAwAe!8=BPG*b#k(&xM7FzF$ z_&h5UhPI6@Y(xe3i_`v)#nJe76ndc9MbDlc9d)jV z7`L_SacI+P5E3c5XIlKuc{H_m`I+TkzOA6EeSy4LbGJ}*Y_q}^F4d3{)7uTlj0U*` z$5AFX?dYZ@e|Z-yKDEG$w+2-K6MQ%51$L1Sc$}=@I~1RJ^QJu7ow~RVah?r6@7>k9 z)n~!V0zM+*n^}A$vgrE3j>!cf`Tj7tLhi$Dk0W>3XzO~!CUhJ9A@o}jCQZ#?WHK27 z&k+IVJ_X-2iux)|JUmTDq6nEkX!W_b8%e4rO2YclAT+jdS1AOkv#ho}HlstGyVh-n z)DEwzhQL~?Nits~G1l~252C{R6gjGov$4SUEP`@1j`ETs&kE!VJZ@8)7EF4D#XeOD zgVO%9Lspr&Da*H4p~hcepW`BGXf#hT3QdKz$A$I8Ifm~a)aFeodsgq?yp9>GDg?c# zSy@9Ms#FXD{S=i6AO4N#_g)`>c8o+FX)y~LEI`p=GBRuW%4QC|m*75i*bacKUOSos ze!Di@;BxgO>zRJ-Vmn&03P6+AG%cV+_eY1GB%TGwOPWW89nFlp^La9ue%XNT@4_Xy z7)ieyAnFtD-tj5g_mFn?XPXzQAqrr6tF%6$ox_C3>;9R_U}j(^*rARDcGB-QgqJjN z`yBTp`k*0$gWxT1@EN@7^{c{j14>{_!}kGK+4->Z3z&RX^n7;8$R)GlmhVi7H>l8} z@f#f?G%+6+1q5{fUwGS!|1y|EQ=Es)IA(eQ7}}V8R*7ae#R3!d1?l$Cu%bMP2VgJV zV!<#AtaG7F_O|kelTYiWM`}p>6&NCRt>YZ*k}&KE`q_Kc7hX|VtY)tdB)h7U$nR1G zz}C#dqC>V5nRXKoJQYSx{uZltnt@R)7HXm`tXPct;mAUsAYGEr9 zJ@2;_-njc?L-vAksL(U~1EY}m@IkbItjfysYr*y!#UEWV76%&3%nIqXZuBAlAfckc z$-BZ(>u|N0Fokg6OAwpG-(I@W3!LIV91#he2*+AUa9ic0HzQ885xd*zD^g8B#XRCu z#dIp9f3%?A5%vl8yfJsMn(gZ}(CJR8g@vMvck(Y#B3qe%-Tw=^e?gw(kz%G-JT~hb zEIF(1rloGkmuv|f|8J62;mo{Y11GUPwOHVgm5OD=cBiMb@vGf}gx>Y;0&{A&giCBEQ8t;twLkDLkY0)i(5fOwdROp*XP+eI7*G039y=8=mMZ$3A2^Dax!ft& z1+HPT;#O6TmYATEw#Eq}qng;p8F>*-K|DtpVkKHlV@hzK&+Fv#dKu^DPwx|if2UO)n9nC{MyBphPR$Ve&Y&Kd5339=IV2?)K&0q9!NmaeL=xqe) zLxp1=?R&S|SfPe1k|-^TL_R$*0U8;L`Qyjd!q{wkRDf{H>qScoVje&xj1)J-Viur# ztiIK}=3KK|Z0&AS4JBBuK7gcZ6~a^#+P-I++gdYjc&BZmVUC2(fjQe7JpGzUWKLO5u2XPppmGCs%E^Yoy9Tdy+aDb6X}S$crHn7qc(3+9;ilmB-%0#5*x;(WGZg(-H!0B zVBUc{H?B7fR;3fpq!(E|E1(NNYZ0bax*tF{?^1r`9F7Yys?=jW*Uqc*t-k_ak;@WL zO$pxxsWx0_{TE=lCkn`I0=}hpK>6hw)^Qr$9A^h@@&Tqf#~;fg8_~Rf&E3sVXciRZ zDO`(3p=MpYPF}j1UlJ;Ms}0f;5m1}8&LLCbkyjFvhemg$)BKwwFUn>W@*CI`ygT{u z{KL+QeSfn{&tC#5h`Tv993>Zi(z&5Y%3_c`x=rv$9b$S!{nHbUhcjD1Kdji8WX}3n zQ$Mw$2A|;YZm_I+Hek@#2o!qj-VIK}HqcK6_dGC3fX2xp0${fa8sF-M?t1lK!Z`wS zD=y}!FNLB;UIq^rUtZ?DSwNXpu>L*r=?pcTnV%OP`ZQ%fj`Eg8nQg5>cZ#RM%?=o?6tIwx=ki=mWOwk{L9d{7r)|J~+x9g7OET)Wt9} zVyoZvNfZk-SxJMfhorNkhzchKY2VV2=(dp1_&zPdHwxC4i)a+-j#hL4|0e!JnU9uk zCQsD%pJ)rNIz;*WC}eDb457gqzXiIEdF?AUtFU%m1rX;2dGcGTTF6D( z<-Vr1q?p}RMk+_cMxm z3Clvch$GzvqwB+W!x6L}K}~jCQtitDR59bIrpt>Lq`xNxu|)DeQwr++;8w@*gEIj@ z;?wtK19BCkE`-gW0z@F&bf~`m(syu@j?C6rJDY!-=q>o`fMoH?bTM!G{THAVc0JH$ zDl0tM{4I{rD7o-fi}$csb#7YTYeQ7lZJX&o_idey*0-^>>s1!Kp-oL+e4%=ROAA%_ zVPXu^6_v+7J}qJ@0}z9$c3cFfMjwef(Xr~-pannoT@NT*+*x|P2xbw8xeqE(vc*IM zxQV32D?E;MSnQHC7Mp_#?u4UP{A^9b<$+eDN4dXi`dS}oi!$%^NIppxSZvYvF_ddO zJbb6EkMnnwH8z!nCro=-;Ncab*Bo34Zlzw6hg6SFe-x8S`o~j7NPK{Lq>4&K_!#xy zhcHlW^k@q6f=Rlbxv$t3%os zL#}*1E8{TAq1=r!wKscqfbwiA7A-gy?u^)bh-9P3XCT}9#4kw$sA-k_nJ>P&;E~ET z1I5+(F!x3t*X^n8OM4cgAGsHRE@_(rrG`(=;G_mW`(~}r0!RLALq|kV)bLk)c*z{u zg@GZv(O>B=KdPnq-0ejjmx^HO?Gi6404U-|2UpaOyhTqJM$fc+L13Y%K3detDG-h? z_N^OmP7%qPu{OGU9ium;7N<-WP^SJi(zd(EU!M|Zl>0-drydszE+x4T+KugF%Ibst$NbfBi*JhOR;9Tlyj-l zM|vUo8zyZgqpvA|8hdx_`oEDe`2f|b(nB9nmOolQsZrYrA}jS{!{9A_!D|)3MB7;m zvsEYG>PHxS=Tj3YQQ#d~J(m`Z3}R?E6#_u|-j$Kj`O+rFLiJYtZO5axc>XxQj5@8n zJ}so;`!NwuMT=JRoqO5?M1MJ%aQ6$aL|T?F|MC#u`&S`u_zJyCDAeI-u8H%o@$m87 zLg`+h8d`|dgkPV7zN%N#Hr8S;rY{%4nh3i#Lx7IfKfzn!)mt7m?yKe;?cY(ZCYSOz zDl^10_+Xz1j1)CDVx_U0v7WGh#GDgK(}N;#i}VQA@)5kHVVYUTyGSad$aLaNJ&eiNt` z8v!Bf$Sc1!AIxu-u26aRo>c6?1cR5e-`IpY^@3m81HOf&JJM2 z7D!}ss;t!9c!^CCx4VGUEwC$eUZu1AQ&l4(j|)4D5QF2a3_}V&qE>GIOJ98skN|II zWM?>v>}2Z`zZKr#%NJ2m17uEiHU(CRa*+#xdoz82XX}koNci<%t#3kl~|u;hjQSnW&m9?mDbf8$MN*Fb@zC7cF1aJduv+*PqwsCo|KL@kJK64H74-l{Lb-aB_t(r3-*R|}9dZOGqQ7kS86AE4@&T7383F)kMU;h( zM04zD{K`(=fqN7Iqq!kDnEI`Ad&+4GDbsI|bqU7RYhFR?#5fCidK}|Xs;a_jnT-Ox zPfvD0)jU{<1(DwztK%Hr!tJ#~xhJtj<|`Ynn2V3D8OJ~7x5bNZ$}0DL^*_2OvHrPS zVyl~<0LiP1pducGG(o6pp9TQ}Np}rW^{P=DT!(preiLeo5rw*N(aytM-q3e~~n}tQo8sp^MI?s1IXDzw{ z9Lsw)YEOsAcMi=4=1F|CtquTsWE6e6VWnzI!_LhC$G78G$eAt&>=a%B=1F4-@VpU{ znY~aq%)G%;zSKkh7crFQ0jGddB!P)E40C85JBGvv{g5?+bi!6AK6+gyK^6t9Vpg)F zc~#D)xfPQ5$>urgClXe-H6DR4>&w3iseSz5gwSwxUW`DcE%~M1hHWAH?L`$cqB`1T z9N5)A!tRWlSj5|Jq6)E-EuaE{XjJNiUS(T6O6er*jgWkN_}w@^=^o| zhR753bOH1blZ44AnK@U+i?e2+!MHiElRlBf^6TJ%jaD3@2>gAT8K1L@LD#Y8_^nN& zmq!oBsm##xFV1@H6G*VxqGLNY#))5jmC|co>#lXDwLiJ8A?_Pl7~1~5FJerY z7nIs?Mz)w8@W}f$&40HzxNzBK22a?iWaObjy{vUdB19{S%ci|?zY{o-!E<7p3!6_A zosbZ>`r@t^5?)^(QdrC18!cqpWVHb1m~zVn!HuFA0HDWK1E}}XcQ|abHE>?$I}dZ9 zF%r)gJQ0G;)Vib~8=h7{6(MfEUto*}C}XxD*%ZOLO!$W&5qX^!x!fg``P&7 zHll;5UiKjeCz0UYRH83A3~}4{gwR|rl3DL!5C*~DkOimf;|qheB_MTlokYB_sh4blgAr`*Ye zHMowEdgv3M)}2=z8kHd5|wDXo$K2=u>3{lb-;hEMP-}gU=rPG*_jtJfWWl^eoOh ziW2k&^#yg~WWYo`=cu#?a#3~LZ$!JIt{Y6i6X{!%?m-}n`7o8sn8qmyfdhE~! zQ@s%RDfgcW5vcE@Sh4d>*P?gY#%udpsw8|1PBy5R^Y@(Kk634A$4eyDn5{kP5O$&P ztwtX1Y$G3`)$QL3n#qjY{aEvsmg3b!;GnIWb|fvN#a4fk`WFZ*_b8yc^Pu<()T3^O zElTb6n31snaGH^nhnIZ7>IR_fI>DJU{VZV){Z_9)ieD1wi^wX|?%uMLI4%PFy!Zdu z`>wF2)^1&jfPxf7db2D+=^dpD0xDg4AP{=*y@LuU1Q3zlK|@dIy(!Xr4OPTQ?_CIi zGgzIk4>!AcAMo z9{8CGn5Q`Y5c}`XEJ5SRNjM#B}LDOiXHE*@_v z%&ZC!#uz-`=KUIGm;f{%$EA4qp7(t3Gh+K=q`JGGt!gZ zv$>F)ou~N(=2gWKb;J^e+xlV!4gK5zObUtLNWV5d=9-sWVBDxOWqA3i4JbE-th793D@nD4)2>ukj}S7sd0L;=y7 zS*qCj^EBN2IimxR;SOtuVj&+yg;z|2kRes)8Vp0S_)dy8p@BR zDNc+4I>L3JYT)3)z|^OsY#)8Hnl+N)SV(`rY3Nc;B z*5reGsRNv3>^8nW=WCMmfv|(74-w={p%`a%hVC9J~q=EX3tsg zrf<{;>QGY}k(7O60F+HNsE63hst(W3MTh!1h|G92&!yEl>fnf0JkVq&>im4V@2cGF zxa{}r_`<6PlB@THTJqMi6SrPR*GdTvvpfuey)D6mPemoasMay`)9&YOj*lo*@&v#r zn#Ea-*a(OD^p9)aEm}0JS|MX&bxno&<^`9H_M>!he^Zq99`L1ZJkf#%2>x_s*dL8} zTHUka&-u0PBa=@Hb5(L-=zE6~T*EQHiparzSu{|*V0RMpe2f-d%d*^zp3mi-mU1jJ zCIS@3k>B)e^v@l)AazPD_jMJT{mZu=E009SD^`L(pa0D7mf8y7{o>GwHtutw+9Ip0 z5z`I%SqB~$xuUe$G%{Yb#65jtE+CO-f?8QJ+O*6EO=qFE42??%Nv&sQ^QoWT{0>L6 zs0NIV3A+3HjNU>|_`gg)tO5vE=9+%wv*eBOYZbmJ9Q;sd!fv)xvgY%1LFDV+S}x0t z_=n>_B@+1n&?0O|q~%+O{>1hU-rnj#)%2hjEGFSYx*hu+#}tlKAvu2-kexQR@vPZU z)C{Fg6Xs#01-}@rn6R@{I?lE^~;19rmw23dU z%+EhD-&%9vpeYz~3wc5|Y5F<@IjU_f#yQ{vYdOy|H?)y>ctZII&l}t$2sM_I- z77eBBU*Bxim};(b?bEuip7Lf+Oix(f*yYx(mhPs3m78U=8bYe`VzW9xixGGi)G2*5 ze3#lb$?d*DXK8{<^f2EK+Ymz9dQXYTK#%*UIj-Yt$!CnmD@#N0>8&#O$JT>)j^{v3 z{S)+LPC>DjO>4|jr$%87K>r_xTt^KA_#*~cZkdsgS=I@^OxuhjRtnkT4M|+t6Tvjf zO^;w6$2lRlN-QS)`j^nm0qn<|lIZt2n4@$r0&v2S#BN37;W$y35cA?%v|yC;kr#Nc zt;(sf`FOK~s*0m0Z5CrCa+G#YV}vDexv2anbtjlh|K{hSXk?wPiqT10}6MmJ`n(^kJ%4#L>wo9i4t zu8kL?IGPtowr@tP$FW+~HH{hOwpa9{b6zBE51}7_SYZ z{5^}8Aq`}!gnF}w<8W|~XGran7%4RxS8)=PNtvr|2CMA`j}rFXCGDr&7*^^`PeomYXoI(nSXM$BfiAULj! zh2Vs>{z!?E#_%e5{Uq%%IWg)LcU4Lae5G87YY%{vguiM*?APn+9O1TK-Ic-J60*0* z8$Hk5v;^D@x%36A?}BNPlQtbEqZCGD+wL{?V|>B772a@rp4Z-GiE~E}LofkOxZgq# z*6NRxBiLM0&<<$w!r?!$sjB zm;*w#ZB(XiFe>^Z0O@?Ly@94PSohW$Us8GS)nK5_PXPd2G31k%juft)IONz?OrS?P zWSg7;R zw$8_s3wW4xf8C=F(V%xSUf|JZ3vV8gWI2qSdzztG4=RU6#xkj-^N*=`AQ4m6X;CGI zwneGErZ~ZVc*HB|&Y!4Y6nQdlfo_vWpKgIee6@`A?aL#_D}ex+ZEiC$`ggVM7wt2e zD#D=`|HmsuaAWF0rLF256P!H*ydm?**~&hEnFI|g!TWdJaUjL&ng0kW^1i1u4LBgJ z(Ba_}*^F<(hn_tb65i)`2sDH1Y>x_0OAGavQBf1kp!Bc;L66$JYN;wxq`hMYi#Mbe zb^0%e$&YH1;lz#SfuFX+9q4H@wOaMmDzo8+?vdogL?a8awaDyFkUvlj)ocYcbQH<$ z!9t4K`G0fh|9fv6i45fDYnJttE%_6$WAgzFP+mSH-+3pN@Th4|7M#$|qX@~Cx;+nl z&C5ZvK1Z$ai1Fs4kr*P~J4gc2jpjR<+p8HUwyEq$`k(NmLR3N9m`Iey(FWO zF(Kxa-22Q>6<^p;Gpn7V-J$UYPMnTSDQA-~BC<|$p^)%jgjzxlskdrdQM0^cLxE5)|?O%o;^Ny6LjVJQW$52gqy z%32hIgpUTqvFcj~I_~?fy~c+EBb-V>n}YzWu04>8f0iyJev~2C3aYc4A@B7bEUN_- z>$whxm0Ko+P#pYu$$VsXHy}CCL!Y%JF(LswW`WC^fxs(|oeO=x{F|coU)Jj9z!l9x zoE$f;_k5ZLSGDL`A5hVryCPZGe$#aJ-_O+cxn^b}j;S7U*jz13(K_;DJyaz+*Sg%} zRx!a9URW*dKj$pE@G`$O1M>rRnvarQ*Ap)kI$P=cT6^?zT)U%vd;j|&6Q$B;hE}D$w!LVPE7#pe?4nhb#(*lA@ms1 z4y6+g`268R&+=3y*mEc!$S=iJ)WvI0s2AJS*gbA_R*df06Ex`2Qgy4O>?689GC+K{ z?)jODZUD4u zkfR1o#EVi*VBbs_b)jM9*KSZjpf4lLXXOW~lDHcl*p4f)xD^=%Ub7u7c~s@k$MpAM z)L*U^B(==N=1u^ZQH@%5-CH;c*zA%AA}zX-4Z7O?&cTGVvWa|}5I1*v|Dz$Gmkn-F z!ASNSJmJ?DG^0%9b$*x6V6CGwL_pia&f5-1d<|!ft{fGudk`^OKPG`$r>#vdpefz5 z)*Mq__$3eDai~GB%GFF1%Ufv*1WTPUZn*@R(@BR=*)A0|=>k$QqvcjLZ`U>UwF41ypgeyBQdk}3H4a!)gM}&*=ML2L*>p$;G za+$Y4Y~}P;dl@^l+#YE6FLFl7??}?h|9!*huUCU@?(td=&mC@6SWCX9)iHwN1DfG; z#I4l4C9emPjl2@+@yz}7kkJ=eyC#f~(Fzyvllo`SRrxx*WU}5OIoG-v6`MSPeG zknhc*F!4$kGZ2=5H_(RBY6bH_IXY_wW!#IcW7`A7u(sp}f1V8G49Qj^8rhF>0F&fB zK4@6U_nDoty~_a7B?FzmI)XCxIjL>I$-xVEt$!c&5^C(_ATn&;Q)TsaG(*}a4EoQ> zGyh!QmKRiNhg(4VR?W7t9*#P0o6>w+?I$?`hNV!dLh+V+%v#! z$|hATwUIa{Uj4OtvGGg7@yt)RvDW~)PIwxyQ&XUKa)&5$I~y;owty$J#N;w>XpqUt z!&LP|=5|+!rDEWHm$c1*S@BnA6~feVSTf2_dz0X|b4d7CA)qR&_%+%@Y}p_79(_BK zYG<2l5_LCNkW1K6)G->ZPy&>>*R9ON0J1PtJ(p*+MXAh-rZRnkq&utA~+40NO zaSIw^3MChd525RM5pFyYMnh$=|EQyk8zyA8+3Z8YWu~0WnY`uoyQu9KO#~2LaPm5T zHxjl#ag)RGPZ)Y?#V6~;$7H5?nU@pMO8ls8n~_XNf*EG%s{+usO5=VRtFJ_T}?zm1m9kWA|VI|q=s6YWrYjGtWv1}moTq;k> zaKp=s6I<`(ROe5fTA#M452P#Ri=*g zZ2MgnB+Dw{r5FEBDJYXqCxPA(A?m zd2QM#w0nh%j?H^C*PqpK=F!$grjLBwPqh#AHlG$M)X1$8avayp`CY}puU0GXQm|Bq zY6;-k*R~i>ws(iznEgaa->I`$XQc z_j&C`Z*gy)#z?q1+gN6RzwIC-#w04m97~zIc|DPTfhC3C-9~S5AcO|oCr2aB*|QSj zkOP?DqrI6Hlgr$&j8=!%eO|*}*GB-8EwAtEQovftL^+79%7ePv%6N0C48}DhOP`|T zz*_C1#@7daUFyFVf}3v@Ad9_8!p_4+O{*rlnxEuCsHsf%XBBKal{wdqCbeZA04C)y zYl)X9U3=N79VbVdEV!*C%S-&~TCLp!SDj(wfZC;p)7nAm#hio7M3`0?lCjG62;pRW z5l>NU8G3kiq#rf_&HeTxxdVw}L!|dh^5WgdH$q8BxNRCg*F}vVF5PWdMtaW6#69kv z7%ugz!dY;Xa5o~YU}TK9YAPVQ^trJFijKaN?+EQUXVh2!X z37fX}K4d=c_zK3+Rj&+W44n9=E<=Bb1fPn5M66Wp>f=&gUbU;7YMj#7yfWYHD7bX^pU8Fr47sz{%l;SVpf-3$0Xi2lUo;BQ0B@XS-CDqO0P> z&w;rN)@sen#_|MNSh4-+u3QqVsDaGiMpU)kHIy7+ZZgu()@IDp6Wf>p(YTAJCDUMz z*=61c7twC6`BjyVta}M#CRiDhzcK%t3*f^>x1AcF&5VjGnwnKBNF5oMXeh~ekHhE< zI6f3Gtmw1O%xs9UybsJL2_>J6tTp$T*q9#7il1bu=N4m5Z5FRs zwo}EAB-Z9@mRUJV6!Jcwpva;Axwmpd6w4=x4PxQnFLyc%K(`7Dm}3VVYC}QZt36rSYr|| zvnL;Si~Q)fOgKUlXH^Q@Y(y*wh)L9^YJ9ngU63euhk}Z5bO!wNjrQ`Az6IJx9JG|dT+y2)aD@&`S@^eXE>HBlF%z;M zKETqm)vQNA>rRvXBG5r^w8V(IC~^p#02Cx|^%u5IO?qQW!vj&>9E%X_XuyK0*eGbo zt|ru@JRXz>$axKM=PEFX*vZYTRB>itihy^fRKNA8n)ATvu^|lDr6Tq<880kgnkITmSy&fm-GCdGi1|E3h#8oxXLlx zC$qcTf=r(E+YOt7q1efg?hszS%!_$zpbxQxYY*MimqpNXnp0_hIMIp%F>6^Y4OmJo z;pA*88R!mjA^d7?cP_(6!2MiE3VhJe z^yA?@>~02WBhjR$zp8rFfk~!z-^$5Fcf{F%es;F*h33+jh$8@P^r`#v%lVbKNH&B( zZD_kD@BBe-a)R~WN&8=q#tqQT*%C;b-%haHEHeD~R03fq?&mju+&V%clzxPhGdraD z=~JHdO8m-6&f-D!S{TEIin}0ZoawuASAc!_*w)){F09hvmMFl5PUJ-`2V+fQB-MB)2HGjJ+G; zz|N{Wx5d)EVJtP$#HHaY)Uaf>F+ty=m0r^&F1O}qc+ky9A1DQtESbTU zb$hkbmb8B>HovR`POu5h6A`P#e!rF8^4sDCz%vphS#R@>9P3>AWBNB-Yrp(f<{(~D z=rw_~=0WrpJP=^HpJnP4PEFi!*{q_&E`rU}B`70hv8XrD5-Zxq$&rBIz0rPwrJ6}1 z(MtE=?8{;aWm3)y+${Xq_$UR7_!)%!DjHPXVDQ&>Z2X7E% z)EJe#4uL+;x{BTQQ2jwcqY33OdQ_jZXAvRX@}w{PVTQgoR(OxvOM(L5%u*{n=p|~~ z?fO@;SoxXvYNXvs1mW8_rd6NX>=HPBas-k7CJ_1M>E03r+u^^N_B=9I7C9K1=fd|BS{_oiY(aelL_S$HmgLNbj^|VQkXz=TJ<321u#05fCL3I z2c_SCK64@fCf~yjf_(ezrcv;xngcTs98F)XRDinorQ!tvtwzYD9N0&F*k$#-lLwW< z!r?{=Q-u(D?3U7mfS-hzJK8{XMl`#7gsYq(Jb6x~Fg)NZJ~WO1zjcIzLFd&s@qj0M zy40$1n|?dC5Mu1i{4wPdkq5ebMshW!*+nLSpUvYUbvReZLzc)@c}Beq@uB&jFRXHn zSP>CDBZ7*aTvZizg*!t$?K*ZZ68!yjpV2<%9m!_WX@VOP3OVA-LSSu<6ydy(|5oGZ_`*9XX-)Nps|cbQ^*ahMo^>#NkRkd$S8SAW-5fb>{hDsFx8Q52ukYJ;D;W^y6PG zs|<)-U&Bg+NCTE(-YIMG`?F`yuBW)?PS1~Ge!@O(xYW6LPB?HD^s4meNUhqaeBtu8-F!K-+{Q7TT`jp{CM3GNE#9o)d8X2p?es{SgOIQXSPYq zZV6cy?h<3(liW-aPpcaD1%ANN~j)%$81o|~|BVm-CFG+ru ziIicAMa%&${CfEiGapWolZM;~3v)LRt7i`6Y(BSOgj58}c#6#Mi!rVf*@E42FnUX#iOiJV&%X)yuOMRVoO%t98H9x&)?e?NSc#@F*$=Damu^;4@2%MNo^cMW zx&7X%^-Y!I(pb}UEl_S^7dAr7JGGrve+>wp>oHDVd=HdmBLeIr>iWz09d*w~woQ)Z zfk8%G0E0YR#d0=w4^}cvVwLE8_9CRrtn0aH78QZbYxQ#ut^-?((UPnxyCQ!E@rqU| z6o>E^UF)DBLh@OG&G<>Okn|I2 z0k8A^6rPAi5MAXSe)v>F1&g5)6@)XC!W4je7z9UOR<=4waW~}cEk_HJ7Qc}zb{`U? z0~^AHxP=Yfsa`}7D`JT~CF9YXr3xGwY23*vHdmL0!i42X1>%vPt#WJqXJ3gtR8!0` zf*pvSq>+lXM^5i5q3`2Eg%g+Iqw#VFLMeG^DiJ4qLfk%mk9qVrLueYCJbWNinr`@8 zVc1fRmr?3|6XGaVR-E=TdV?%-ds=)QVe$#$I_wZM6iTTPLc1ZpX($(0J2FDof3L-g zoV_4J#Y;V5B$=ksE9VL?P_)SokWfFhlu9DP$|b?QG)$5B(6)*b46*>ywOTO_^ERfixx?EC+NRpXDt4lU~tQvdVARLHK5l za!<~utR3{~TCE9aV`U?H`c5im&{9>0ZRYfl{bcAkMGl!Q5}0*s4RCnTEwW&F*L$fg z4aX#ApI(Gm;NM;7dy?=z{x#5r%zMHn*+5Ii8QLN6@ZB>&vKsO@Dq91b)bZa8DJx8a zXetqX@@uD4Zf>%qJkR$`SeL@QvBNbwpGw=Bq;7@KR#MB8GTOKfSKYUsCMTk6`~tZ> zlgHrsGTgZ}Y18aEAY?Z-!q>fAq?#7n%F4*8K2 zcmmm+@h)>3x}CoeLaS(N9aJjbs|!lL%pNKWH5?yu6E30MoCa4fc!v<-@831p=DNGqA&q=xFV0Ev~}tzFx%E>KP4*uF#u76KdQ@G z4`v34qCbmc7oXfzlMW^zq3&+pTTG$9!GQ-2u!qviD4p%t>Q~^(O(Vfj1{KdENsH5hPfze5v!@T}9={ zh>wQ)!&C1o7!0l=zv$+HXa&f4edyYxgVbdsglnu$44sB}DD{fx6YLqkPJ#IVXb?NOEtCta? z;4lnMj+6ml=&WbF;%YY3u?wJZ@Z@guTJ|@WS?!o;teND~m|R7+&|c+VtfpHL#D@lW z4UuwBZD$`=9s+9D92P2{9L@bshpIjpyV&zC-n_*JK*tw90`!<$2Zm}@yb2~aU7bQ4 zJ-zEBMFDZMH5jFHpE-99!|*d$6y^Ds_UdQHS7aV9m++ym(>gbbTig(I);xEYL#P!S zEk1C34xgvUVF492(5<{6q_y`N8g0_mX6>n3y zli2n`!e}VAzQH|E@TBWvxbR(Yq4v{kLV1yzWisES_Qwx~5HUj>+jUM5K5=IEZR<<4 zsMZMJ5SaHgQsn56psbO>dHc= zs04x7=(BFmXjxwCVMlRDX>VbOdP?-Tb)=5{9|x;rdq>bf>^uSK%w+;LY=j0F><;*mTMii+^embKaBSG8eVR&q}!xHKVUZwF@M5l};Zv5*mSr#`S6@WHJXCDF(;&=1k(cu1aAlrD}vbW@Y`T{7W>?_t1 z{QrJ-1wvY(#5}OU-jMKx0eZRus1S<)8;Ic5)0f!kkIL(VI5{34KL+d#-34qcDK{ao zH-O1&?l*lIQH5BT3hBTMhX;<3@lFk9>!WYuwi7?Wa{5yq950T5RtQRamG4BMU_m&~ z3O#;P+@Z%l11Bdg^EZ7zzr0P$Gj=sDe5kp(d6`|(+z&zJCx{OS12P6|LZ3_!$pnt+ z)2Cbrx|QcwL~a#V-iN=Xr@fOBZCIrwSoI{R6x6&p;fHvu_!lv)Q>B~JKjKZTLG)S zj}n(F9~dB`ASw(!oj(gxwGh`-QyfY3w97Q2f>548cXZI?cFmKZp_3sj6HZeX^F|JH z-{Sv$S@5q{Gf%GE&2d1cN!kbo~TaVOfwfB_&BCWss*v6{1fd}o0=ScvH z+`XlGBF%fK3#n49*U!utAQc>TGhSM?7GKO=hIbUEZ=VS1LI&Gp7hw7wm!h^m`6?(z zW9o^zX|YMw7UTpmKsfK;kGYO0liINT7+8g9rIG^3_g3N_w&d~fO5AsBJS=jXc34~e z>0_E9KrwNid1s+5!H|@5=gS?`hoxXNA+~q?bJc8}g?Mdf)nC4;N|CUPI(;y2saN9^ z!$rI=R$LzMcifdSTw?rE`r#cR#Y~MQ_-+q^5}!yeh_B_-<$s=5&@=^_tF*j!zvDsA z&jk4HLv^*aS*xR%Ii{f7r^h7k4tL9LE%n@`NlVdGdDKTSxV_3=SZtiE=o$SQC#UeY z^bY@_nl&x|ywf2dfK#(|o9)x)-;NU|h8_oIG+5u_vRsHKd2 zYhi6|{YfOZV{_a|Z;}v5f>NxZfv!}ua(Mk8#iA^@?S3pq!zvbPYFdcks~&Ik{<(?I z48H_)cV4S-U`@e60TG$m*rILM_qGUp-#Q<($1Zxnh_Ku3@sjUGr00*12}ZGRnpNL5 z!<6E>Vskr3tpk0HKV2VM5cSXn3$Hh%r+T+{W%sFfxR0ErbCXM+K8Ea`thrw&Rh@D; z-d)HxuCJcMNH^`A^reWz7KW0R#wrJn7ftT2lkoIRJ2vygo+p1l_d*MzGLp)=*&`b; z){ckPDaFKllr42WTk)QKYCs&Xr@yU3~sNlS3Z@;OHEV(JAR!Bm`jQS@?j8CbLsC za;z(kmORn4LBhF~NX2IxzOE~Q3V(o6%x}+q7f6o}s^9ERf00t!$Od|r6Am|~lab$p ziPrBBtyfojgC`c2|9SW;-FE|+xfik|lmb+rd<{+Uh~1G=dNMVVq!IXzRhd4;g|rdO zw@y!aW3&Y#nikp{b%^ILGUw!OCcRZY(9EBu=m`c3Z*l_M%<=bqPP>-OE)-(-sP!8? zdSSbpQ-4%>(0^z+JkehmL(Wl?n!rY^y_~lREPHA#GpHY;0~-#~Jm<}Wt$`5;?P8(V zTu@w|-JR+pZwPr%qNGZa=pUG}9+g?8Acy5`b(gu9(OttI6;aeNl}M0my6-CPO2a3g z(!J$ZlK@QcaEUag^hn!KzyeTMDgNWq`5W-VL(HnW$g{eo0q)#Soxl|Ofhg9K+D%mx zxv$bFYTK^n^M1t}z(hbr80Mj2vhVy;(h}x5#~`bk(G5wTBlDHIu|?Q&fBjZ$51iH- zuX0mdz4c%X84HXt));+_1q5pY|5H6xPZH9K%-UEUF9y!BZLGYPYKqn7(ob|Dr92ef zef7b6`2GXmmixOIF*<+r(F2rV-=_Avq&i>QI=r=oH{P|T6KU~k*Q4p?s-8;|R23yq zl?re)3*usLnW$2>wH`0Z&zIm!bGNk*CpwtW8ReXXm{)I9#a9~z_6xQ$RYr=;1*k?z zoVgvF4ri#x-sy46XvXY<0h1D!6!7fabs?&LGcr{~%UCmrj|Gi#5CHmutW_mE5LT|E z3XHQnYGOIyenQ%g*`pH1z+oKyRPFQrX+$h#)LQI#LoXJd!k7jy#gZz$+71so^?0wK z4=Jd;QCDBkx2ck*dCjKxZj0{o=eM^S53=o0LZ#~->bUZ0O_(b10)|}5*;4uH5=Tp3 zR@2U$-_6KISi&qfd&{y4&krYqyyq#O4IOjr%@%w0=p4^FzlL>UfVb#b;AlxiV4TTW z0T)-<#kM%Yxl+d_sMKM$uFSo%5byn8K7@0^h>0$fl<{otgylsDG%~})B=zUC*w>yQl7u9Z~w|e8E|s9 zG?j^#-z(D~po_p`bRX#C3r>=`1H3Gy$zJmHuO@rSrx@$m=O95N<;7l%P7~(P7cu?` zeY-Uf(X{_+y{acFnCk;jz)zuJ`)sCU?_{{iU;lU+HSl9W=j&_IiIwomE1R>Pv+Cmp zIXQwhql2T}KQTX9@TV*8BpCXoFRRw9NqzIH-H37y{_b~_hCD$`X^vW%JQXjg`PWG163E55Xs(rOit9)E+ zmw>>Ou8jm}1`VY#3l-tE>lpefayRXFf9O(Ra?WG?CUkAK!CPQN@L+Dlvh%~!*uID8 zZ#k;wqhkpg_@?jZ3I4#R6*DdO@WD7@snS z+)8E2&MC%SDBXD{rz0k#;XGV#02r=enr!gZW3Bjciha~tg>VOx$0U#SE|*Budkr)b z&An)V^!BF1&%uZft*aaOd(HcSh@K`1AENH{&PG$mxK^^wTlB(i^*QKtR&w$E8hx|S z=m)0|Z-6Zxe*u^znr2tKvH4tEJN_P$Vub-AZZGrZ&F9_VCbsljM>FS7GJn2s9e4{^ zz$@>qm$$kL5#;YzIw_-}vFSvcDVh!>#FbL{cu=2)6VX9fgMOurG2WX%L3k4s+`}k#%19Ge|Qp48r!#^)0 z2#(YLQ+J)$;F!Ydx0hdfc4ZNVg~iCOPyVMhZ_z>IqbX>gemfHRd45(+(_-2{Be=2KC1QTI*ybP4{J8WbGe z3;! z{E(sTUi-(7AK#c?D1tliT0TBFz7`#1l=VgBq%o#+_E2o&4luSTiAf0KM?hQYZDS7; zooOSW1tu$c>RG-l{#*{g3Da&9y!j6|Z@naXhCZ~9jYqy{i%94UDkma<3E59QK1<@i zr50}w3>`+t+6V2#A-}$BJQCw{vh#R6aPCmQelvG}jf04JP(qC)KP?Q4e8Q-~pDM$_ z^$M@UtUF2VN?qaOlsV-wE`ypU%)YyN-m5E13wu0GG5I4fz{fBjhTAjf<)Bu}7U`LO zbYuj?L`xlumn6#&(&j3qStJ()EKZ(x13q7TbMCdQ{?*= zNh;qU=l;~%;fgWQVE=xL5B~L4yB$161k`=3ExMhUE@7a?(Or5#a`$tf(G^A`uMhSq z<1XVt8XwN@cPtg_?QRQW!IKdD9T+$ZcyV{QTPBPT4uhvISDMHGL*AMV+4Kg`D`n1> zla^bv_Gs z{CS#sYH(1^=Ssb`y+L$4y3&=Oa z`L4*K=mBo8iCYpsVu;!&)JOjJvssa^SBiX%QRHE{Hkn$TrhuN(JV_iv_BbmXc|>4Q z02)kxF*BQ3)hY`QNl0f;)n^J-6LzuEdId26YwYXs&nq62z;Mu7spHGMQz-QM9^yxT z{bp74r_gLnOiXrniMlf}0OQNi{_WzX&$ueT!2W>sv7y4}stX@@c7QfHiMhU-A(^(1MX z9h;QO&xa**$Vhrt%k-~2YVj*ign>))fDX>UmABp5YToJjj(!V#*H(%7NZ)w&2(G zXhahnjcjVTlk#o`4#vRb6@3|}wwjO*yWh*n>*eUkZAYf3E_v240M^4rDZ_C7`xmdQ z!(0kn`TCLAvs6yo;_8o9r0*O&cQRdnge&s6MPO0X6iolepCV)dh>!%in0&hT=Es&8 zt03p@t+}U-KJNGnqR**t+bxR4y4ELM@upR%)(hdd?LA@OJ13Nz?a(H}D5=9vC+8{D z1-g)%;r(d%RDyg)C*8};;CU_Az0tE9fT_5E?XK)|y|XcHu5^NTlFh*a?G#e_ z^9&Bk5YP_PZ}T4y98|M1y&;?x++OsEme?su_1|pcKStblDE(1?WhKwF7+ha=wqG~F0MQ_y*N1Om}o z9Z-UmoCptYoIC%iA^CHga<6;^#=sWBWE8MKI=K||V=)%s7vqp6B!6#TJ^5@EiC_c9 z$0h)2_Pg;sqZfFUcnz|&>}%axXOx&7o0wg5q*aOVj0EsYE~C!5p%Kbl@$^0)v9RdL z_b)bSz*aL)Rpfk2it?jzE1}+2m&B3OZGTnqJJ!blpwM}XXoIJIt18w{>P*3V+~tU*^GZz-#tXFJki5pl0)~e5Q*c9V_HjN7@rY%%w>~MAP2NToL&gN zY4jG0_f=5de4B^;wAXy2miftk;E$~p0ldDu4+kj!D&q38T_vzbDf5r{~Lq@GVkd*#u!jmNR z@G4OGW4hnbuK$PGS1Z53cjk{dHG|YsY`&WHq{SwG>-#ad%LF<7F~S2J_9N0u2mF&P zY1N{mEqUntTdirHTer5F&SRo1X5@f|nY;AQ#++=-!4Fz=Ut_uyf=bI!PPuc=)0HkJ zb2&4&vB;2ThF-bwHYq`QI62Ip3k^5IeYZSyKMEZR9obZfp?ftJzAGZGGI}=w4%Dt9 zH_NS7c%5)bT3$?~$dt6ltT$2Z$~a$e*VsjqOL3nOd85mFz%$dHlg?EyQ(8xmfX zw8Lt<+P=-Feg0uc8ZzG1eZMVLEkatNeW|DL^MUPcV}2~su>boA5z-#;RuHzF;rAyv zo*nrQt6xA1Q7a>g@Y)j>>mfg(mC@S5(_LG+^41Wb3t2hHbj$w>8n6qM+|;;IT>xb4K0*b)zXG^Ks=H;epNG*ZKx~rzJRS;T z2j7Ql0_rp-oz{copWh2e>jo!t0&0jg10022cX9A-D!}pkz!o|y^FI|r{2%|8$$GJQ zU6u)^|6EmGyin#7faH&Q<&AMQcDd-hE&wyJ-JM*R!0vnZo&$cnoR7%_7k1n5|F`eg zY}lWo@c*~(e>@TY4^Lla=w(hWj@Y}3@rgP4`ngf>;cx<+Q%{rS68K^Y6Pyf!0s+0Va0Kg@-?HKYZSJw(9=4 z{^Vfwsg?l^4UqN#GJg@X7Um;;Fa9fS|F#q-qn<97fLa$QVCUQks3=7FJLZFfwzz^x z`CkCcYBCVH8As&h8hR%RKSVw*Ta4^qq$#)oK!kcp?r=Q936>#Fm9Wb51DJ~~D3{0{ zJgwU*bu|?6oCcG3QVyJoSf`=aYa_0TqaN!uc)ju4-dP&XZnG&HB3xtb(^)5&Bz^K0 z@oz4G6E34BgQn9(-z2HPV7n1_0p=EJ^c#2<`Us%N?0RkMD4xGytlG7^bDU1aA2v5=?OzO;X^KhI{= z&B}n_*_f(mzl3|qM4l zZEov+i2JbePfh>fHvLzRSxfju5=HJrB)UnV>3nCYv)J0+g-OSe!`5T{rkMLQ2vo`G z$9vO7;of851geNxzCQgqg0R@SaITt>b1^Z<=qT~sMvrOreyKIHhdcWu3)ORMN!}`= z@M^|@?X#FY8e8M403a(CBz{)GvOeQC68R%J>t*->#Uz5BQ~Iq*TgHrp4`bZK6+Gw_ zV#0!$Ai^EreaZW!9o0rsy`vSW_aK1aU&r&?WW85Lf3qbKV4S!GROA=EMsLJ1H$K4u zlzP`2h{#`*GbH4~PGoC1{kUS;UD0g$p|Y~cpDgZdr>$m=Gfrg zLtVC&Q@ENhDFXy!qW>Z}G6S+)EIY68s?`%$sS zY=k?_e2q(_SiK{D^V1E{Cs`sgs#52H7M^umwdHPBbHcM=ed|U2Yvh_7zHfCV_v03S zXvX4$hmd8_Qs`%dMght)YhO&s6268pzokXo$LAPnZ0peRF~g;@_2NZw zTACr!Rk#gqhkf=1D`k%?yUC{Al|k#NpIR5z&ix&`_rUmO!|lXsZ;R(z0cJ4MP5&?J zLvS$%HkveKGmdWuJL(cOr@=jPdXJIZXsxsx%5EDjALuIc+PRL;dst;_W2WnE zUWZ(m0@ob5WJ!6FR4=uaS^zWToS7#`VoX(;?**O-Y3(1W@<>Wjzq?IyZxrLfHxouh z;$I5dZiML?-dZR6OPamN>H?T^Bb>oO4Mq&aY*c-{U9uo`cB&|E;G1&FJD>2_TiL%R z`9TXibIo$N`tDG!QF8HhNRi1Yqey~14x+4&8&IE{64A=NhhO}wpT)99Qld6n2Aa>f zFG|_C^fjAVzq;Xm7ZFAsiQfYcCQsyI|ECeRSWgI3dpRgN#;^!(};N7s*( z0-?Y6dCu5sD+KX@D%Hp}bE+2;v=470%2x8n`zi{k?#0_q0y)S9027Q8YFvBLN_bF6 zofTeH#Xq~|vcWwQ0bmafRhMX8_e@t#uUK*5(a*58Bub6Q!1L9PrrTUtw4cSNU1~vLK^we=bNU2MO=0L$6VZ8kxk&=;IAuHs&=U$0Z?4#RQh%*a1dPdYO?Z z9Mb@$LL08yOWV&kazg3qSo{o3;@MbgW=ZbD-(3iH$Zp|%$u=KGBfo>!b`L{bEF|Yne>f9%@t)t-f_yRl@2hiVjN^y!Jc8#rhBai&89n0 zA|axW2dzCWbqlx@&!x}m%j5>tqr+`KO{DhW3aOi0lq~w*OI1iPXUpDy?XMg?B`$wz zk-ZERAP`>Iy(ZErwiRe{*ll0t4&?iH`>Ih3Y2=b|R}44!M$VJ&pZBIN2uU<5B?}sA z34e8xp!6_6jDVgheCTWGPZiR?K;0V2C8XWJj*aLTzr9&wS! zxJv$fAK?MYw&NI3v~pB$Y&f?CoKy5uA_6OF)Imgc9HL`U8ccDY@f5l?5csV zeeyQnTrhg;K45}WpldCo)*}l$mS<~R=P9SVYb@F1xS~a0wS`w3(wF(*!+FM#08)js zIa|k+vI`Rd#5ANy{2Pwm3kRNZV8v&OKYBHyVnaz861|(4q#iMQ{jfgXIC{#D%Fr9` zlFB6*d8P^Buc->Xi{A5?Q4gcvJNG}&u`2h|fSuVZ?Hkm9>kc>h<46mO zB{}^ULv+lBGc?S)hVL#O44bzeEOZ(Y{V%rOJ)8;ue;=%iqRuRgn zl0;6G^BBU$W>m^5B+2xF7fP$GdKeY+?#Z6}87VNa^=#iN~Rck+4l>Q`8{V(R#6VRQ_R4ikMjT z%MCbh-}+oV@K^HH*e*I0_<9V_fG_{bLrzE8+MET9Js>LXpsH+;h%7z@S3KxvA$RZl#zVwhH!VO6#0|WafNn_@s*RI}f*PY_9K_${F`6lC z-he+{uNOXdiitooyFFCxw3%5PI_Z0x2}d98j|(ho)*q>iA&qbYzAsHs`^UzCD;&CY zFaR_0nT4$ms}I`2YF_OVqeT7rl8gLAf!{!Dg3#kP^v5GbF8q0f|GVdUydLqDyg8o( zJ~;s)8~f*vQzGWtI`j4Bdb6X5j;emB-kV9dtxz3X^x?mgzSahfgsK0%zWY144@Okh zDGbVgQGxg3@w0ADY?sPBcPTCzB%@QYRSTLRxiSy#niI)t2GXfh+j9pK@j#EBj)p30jS#Xf4hmT?4xTwJ)P)wsxD1J!X1WUeg&0_T|$`o$7p)zk|&4Xo{ux3)4*H^>WAoqOc z6Yeq*C#Mj=wnLRI6vGtm72yl?RF6$UPZ64?SVvnzozOC#&$n#K1`CRlprm^W<9=}w zB181&eLC|L%MM&hs>>g-$jYR37^w;Fu9H>~fC{!s@l9z(2``o^7~ObdHH z{K=uSXVJZ$?)!pb0Hl*sHx@<`Q3X(#{Xm&_*kWDp?Qd4?$G|cTGApL_M}&%LvhiwD zqw4wyZtD7wL=*94v*en)a#=UW_}4?U0tNscpI{=+PQkd(q}O0V&0~EZxz^fufUkpy zT@W-u#u+$VWjG@ttP$eN-~3!~k7IcEIpHd?O>#>otd1)W>w9$#9mmy~l0LYruk|Z% z^Q{aX)B@qu+(xo7t0*pcRhr2k`_)n$HD%!Q!tYU`G(Kn9E04wVMPblW1L`A_Wf`_8f!hyE#b^N3Ki zr4jQFAU1sq)*{l+*TTpCP3>!}ThN>9kK^c32m3&Lp0%1FzeMP@SO2x2i8(MrW=%^M za&>ufQ8vst=_y>%e3F_q7we0bSloEk0ahF0hgX`cw%a$aG!vpVTHty$<#91e{v`!h z?*?M4pqe4C8#C%A`c;ZlVO$K)4LT{Z2|buMpS?)+^kCv*|KP6cGc9rCTY|v8M*es% z9eRdb;)-@+#EijrB9a#H?_y*IX@N-2CiD5L6^hI!7tr^)7^56oR2}!;Qu-puG2`lp zH_uId1Uu;ui&>*^@lrrG!=X55N_i2w`Kr90#aOM^nd-od(rwfqu1n4`Jg~&nAI-!D zqNCf^5enCl%gtx#|F8?8}pN-m4-P4AV1ZJwT@8+tRZ0D=uWh$-7O(*aqSky@S;LqRUZ+6@;G|3UpqF_`E#AIgk4)c9Rd?04%80D1AKC7Oe z!b#MI7PVVD9_30br^XDTnFm5srRU^%#_Z(;>Dj*=?5VQ!uD(ZbH*v1~;5~UskBK3FV6)(3lNF6dy^4^j9G84Le@GDB@A)ylj?B%$*;eLV z2jnkCoB-zwn)d%37bHVvSOG^&)mJH#Eqw6r4QsSmF;1B$KR|pr(TK*Bs(h$v6kXzb zpheOm7}OViIg6O$YBp^x8qr)Zw6#;tgmYXo_otZlmx)p0lJYEA`;t2}+w_kS=T}%S z;7Y?BS zT%8Jc`4J3jAqO$$e2?AN)Y|6pulj zf6*DK_Y^L*=nygP-d%mc_YQ|KL!%l8OMWZRm=wR=*5%D!O=9Zk#Pl6PEb?^XxirM3a#NPc8FrD z$xs|4oS9IAhfsfShM@JH@p5Y=!5$t!v7<5fg`X<6e?WEKuyD$Glm8>fCse}513I!_ z`Xz<>x|wlC8Ob&xHcX(dx>|=9|1gDy;hhG97W95{a7tRSy6VX7llUz|!K8KXGSAY4 zU}C7!R4P|*t3tCfM2kRAWw~=FL1txBRSWvnczY=G)FdJT$=(Dn2PsR(CXRD!S$N=J z&T`+u3xTnk@1<)HKnv8^>7Kbch#X5kteKKGHD1;2np zOc3T`?ruFgHS}*w9dEu(54B*|tfP)#@N`$t&ky;Kk3JZm&*LR2%wFi}d6Be>L|jzTXgICTl;#Y#wO_ z49U2|B2I)C>A5dzu3&KMui(;Bk!zP6#L>m3f{u}W=sHF2OyTAZ{vy&sEI>0nwowUV ztI*%y!XSzXNdBIe0LuYMJ4m&d(dM6~&xHb>wbui*h3bT`%?4&MtQCS4g$bm#>-`qV zzG)UoHtPEz&UBjDiMz8IuV@~ZxEEF{%=cd+wmfq(%O2G&j2)mGj(KAs_|Ae&v_^h) zn8+0#am@^kiqtCR`vUp{dY`&TKY^&T9{Z?${o>Ms*!pjV|0)&)* z&}+uI1x^HTQe%Rci&Az1LV;h}g>#BLxAwkp^i~<>ZahH6ICQGAP*2uzR1yCB;sD94 z*64e=BYQ116>xqb?gHYXFRl|GQQp{mo1YKK|I{^PH$?m@uQ&lmKa+b*jB6%*d5nrS z{Z&07gx)mUNEfVB&e|PzCZ>B%Aje*R$GlW|)jaMUgWpMZ!VE>##=BQmj?lksuP+T+ zxZpogoL0Z_rL}R8?RWjQNKIWBx>9Jl3Pe3Fh~C6}8Xo~GqtF+Sx>dYnr6bvhuW!5-9NF>ig*89$_En= zZdYuH@@p`z;{JxsC2HMhqRuK8C~}D)>8`H+AXw2~(hMzac{JAN=j$(zO)DR>_iMhT zL12ml@^6Dm^p^el+l2tH^A(a03l8mCc}aLyqI6Wf95wil@!jbfYtSuY>Y8+qF$3PJ z@u740U5p7!J_Hz7D_ZPeZ~UXrhu0WS*rBEd=$dy)r(Z}Q#oTf>+-?EA0M8rZ*M#im z2t_E-fRRL;l=~2Wd4M$7&edV8*G%*XTVNO(R4+F|loyo(GSTaP-7fK`I14)2v{ZgpD0uXDhwyoKZd1mw*Ub;vSX=^Dp80&`EXL znA*h+7wI&zXQn!de9y!CX5%-5)S%}pMCY4DY=e3rh} zJfDSRYPH`WisZec^)rdjHo(*QMm0KfTfb&ECnelcKQ`*FK31m|{^(o|&3zct#z1rE%sUSo?SYKpuwF#95I)jrE~yX` zCR8m|W$SQ5EM#ki{Xs|c;|4i_Y{{jZ%HbBd*_Xv=P$wlMzXd6eK+oW8P{i^ELBCcJ z#c3%*bpz3b!SGYM1rU8AE0EFtFshO?o4AB4Xy+JaGMI8jqrTnC{PtI%g#AG-_4_N= zNt+tSQ@0ht^PnC$-g-_K(<2O@-!KJIvClfAD%J4MsvMeo|EUaJuH-*mB6>GkH&K;e zLEe^XUZ+=GVuqXRw)yi7U00R+Le$L;g4MN+l_2GFf>WG_>cbwd$D1Yjz?i6xKMz?% zo*^o>4d25NIA(gtXQv%n2QKcEWoI~au9cV4yDQJxfldv1yb2g9;cfduudS<>wr|n8 z;RTBQylch-1?qhuwp@ez6lFskA@NUnD4;J~LHx7z!s$Vlet{hL;5EQ)CFZkd%i=1o zIj{NeECADMCOoZi0*EoJPmnRyM|cjD5HAVI@HfYt@BzD=V#E1gVF#sbFc%hda;M+u zCqd}y(5xu7%r}EqO~X$IY#wuy_2FeLe#3o&xuVl6o1uVxQ;2S>p^CyhA3iUE;@-Jt zZ#+CSyF9CRFVJI{_u0OZE~2gQc6hExN!C9oO#$A!yeJ}uc!X~x8rPcc8$#YET^U&^ zemrp1J;WZgY*o@tj2a4Bj9P5mq7;uFlz1@A{USwr8-e14cD|RoZg1pkb-#L)0-68R zc`5)oND@_I?IqPkbuTo>7!mR=&%LJLHzn0&sEm%1(;sURTaI+~$T>(vejqKf&CSL+CVm->-5`sR z(U7vD3lU&QHW(7Lv}v z`f}CE(48fnpYlw3rJ-YM&6e^Ci zM>Q-2j#G@>CmJXE#VGRc9Hb`}sqGihqK5qS!ij#;!14v?zs=Vb^%yG!rbAu2e_e9d z8590@Vs? zMbJMJ{GZ_{@_J-!12OwD!tPt{-aWCgFRKMVY0O_u325E&eh`_JO!0Py!rY?9LD^A7 z5{6Ol*C3B=7jz?tqe;)w#ykG0lxk`wQKp@GycrGMtm`89mPTApqo}vih2kfpe|%M z3Hrkt-Rx~a1~!DE#SE*?v?w%C?dtOgeAY@-0?Z_a-OJTpDV)LjhiU$vRDRHdQQmMF z%?=;{Ydi|)?FA;SjWSa9Pk-ym7IU9)+-v&(j&C>vb1B(?0X^F|Swe%}5rE5LZT!{4 z?~)p6BdbpNjhdmhqoG`Qp}T^6bacfL)rc8ZHiLi8|H}I(u7s#tAzC@iTaE7MFZ}nt zoP5aOKp_`eH`I@b&!U@T*I5;|)O>bBaVHvG2@!xnyi}Nn!}vyXuTpSB0S$^-;f1@l z7+_<+$~RMQ)U*`arD7wNvZv8*me?bW(a0_~?7k3<0tkM`#z#?T;*$w90F2V(MEnSq=#HK^Y#! z$aaJnyf<>3booom9U^-{^R(23rQxr}JZo4)Z5}^FZ$sS9D4Fkh_UKw^CRLp|GoI&j zOC(c(_wWQ!O|=G*0rhS`-GR5MS?@OrgkriKg}cX>xCRM#( zh4(T-N^Iq5pa}>=q+i(d8hPV_7qk)uVU%9lqM_dBs#vu2!Ez~N^g-#%A~3~atz^XR zwSpO$u3|SsvH7Kl<;54RukMyt!tvrj<7%}?0bR;QdI&Npk@Aaw+zI>M7N-dYlEuz8 zHUZr<`Jca%ks+zxaRi|+kY1Z-l+eftkbYSQA%*zWRtGKom_$|>GO&%rQDIGKNE1I| zMVGW0RA1Le=ZHdx*!SmTd|Fg? zt6Ou&3f*%)xjW4uvq`$8`Um70wH$Q`6Htj%K#)T}BjO?`XVj%6$Y3M;?yn@M>F1*2 zmRXM5+@QFCa6&yq#2Jxn*t2om-1G7C16&yD(u(k>bR@GI4{j{Tn0Z~L6erP-Q|m`I zaf*SHhsxlwhYni}un!pu3Gs!nWIga0La|ftcx89I*)a%$Cw7)+X-4vY&i~1>t54lw z`TJIj53`)>n1f@Gk!wL?KuWN3x8oGOdRJoDr^RpE{75E7V$0U@gUoc zS^Ep`w|4p5Vh)hA=g1&0p}xtwTx~hlaS#!tLJ~cT^mvH!^IKTYaL*9-@QI!p=L`w& z;gz-*>os!c;r;#Q4xeXKuw=LzjuFNyIL3#yc}+C38uZ>^sUON(6mCHn;ZrquAkmx5 z7Zu>Y?CD$^{Ao^&L4A8>sFzzh_3TStyeyns>GuP^(mG@w_lk5_yET&@LBQ*O;3+2X z4ZG%{vO+7|B%Xs6?v{l(+t~5Zxe0C&4sfHL!9SK0rX(qeosuZ5lW|I?vClLCLb9Q;oigmToqtw1?;aDX6+CTx z9n3UVl)sBJ1BxHKQnVIzGs6f!07|AlO8LbCc_aqFE6m4 znm+vgn13?8UW7*+E6Jx5zZB_h`umw15mg<(&p*rX$e@8&0nPsOW7PbpH_NqxQRipZ z=%yc+aQw91sBR=;ax)L;^)z~OWUJ>_seH-uLq_{ugVu%7jrn|WTrPbg3=l*AWz;Im zB}oA%AtDzWo8}*~YfM80u0ms|d(gad4t%uF1X_Ho1IW-jP@(*md(&t~iO&cn!Eojh4cs|&9 zZhE784!0`ZbNpG{$;Ci3neh5qR69QYBepI($#P>>1JEqGw9KH!7%H^u*I)3E_!^)!3SodQFx)Rv@vEHB-^&T;?2a!~H2}5E&2kct zB2!D7`C>BiyCY{-aUyz?SJaEV84w+Wgg4wHw+~|kU?$%p%F||IV!T-w{E5;dP_xXL z{jWFQcWy@(WbW=L#=b%l>G2QOq|kw^PQwCzqa6F+g~Q$Mpg;U-`p@Y;IUftP~T%Rtc|`SbB}QYL-? zwEm>2PlmXhXAcLP(T6!sP>=6srDBXLN0Hum15d{5@{ z?Og0(wwwC>0}fuyt_X8xx1GsM>BXP~VR=6|fN^c;!S~_*r@U{wTO=c{`$Vb#@++a< z0__2l%dSKk{I}bnfZTsY{*OZOgxxkg{aA1X)Sj@kCNto%72M?K0Oa)tupvQSY);Ux zff9F{rZey$|GrEJwe0iHJ`%MfG~cZngj*?Xv=<7*nyjS21pq;{P0r>k_??s; zvUCrR2`LEu(J92cGPiR`HR}xY$`jFesU`L7D+7o4Qk^MEOQh{Ph4yE~j-e7Em z!q%mLjU%L_6rY>@_qS&BCfiM+$EhufM$ks7j-GbKN}`wtsgf%}bY+C69KqVMNPg|N zFn6_=mFe{YwXN;;rUzG>k`J|>A6ayFsY&p^(QZWs(*SQUJ9S#UoP^JM7!8WNFz;(L z7lgMIrg`nuvIxNWO^ifw%<4V&cdVR~&P8=hL7eXS);Y7gUHnJR6{-2>$z=-f35PQ8`JUTrn)%9G;o52lA;ulbB)NS; zSz1>RMnF~+q$lbp)JYi0Q@`dV36pEDBn4~75p4vO3r8EcLkl}$AV zs`kRZNa*#`MX7?|>YK?&H$E(I_o+jsgByA(v57ttIjnKxdeB^NAGB#UH?W2$SbeU% z_odH;;e-1kyOzTY2H+oQOM2WVlWVx)<54Uu|K3(TL|`%C`B0s73K!Ede1FPmsg|Pb zKI64G#E-X{5(#e%nRBAT!HTNwEvMz5GekeMu-@;3nHJ&P5P(mql;H8UB}m3+j!6Pa zEuGnc%|+N&$E7W^^c|-xgE}iVTCG})M!;hv5Kw}r%YaPVt>8YB=LH4PcgmbTmF_&= z(Pt4T=JHYd(K!lDcr_Dm&Kx>y#dKW5Zw~$N@S?{j^-;HG7=mwC?PR`@3yv;et~6~= zgiLs`1+s&WZa>3QB1w~yKZZsuaP}B|1?}tw_8B$dw|dm!^~!c$05K+p?Rqh1dRC4a zusi162?{gd@knGUXKJU-$IaG-c?HzLUdf{x$`8&S@o_WkS%PMIx!Nz&rJMXfTK{?L zuqCx)r?ZG+${h%ObA1I&l|ix(6hgIOv*{wWEdlIq=!~0 zK?lp~z{h;imVod6@18`7kXRbc?~20%CvLsE!21Vh_%pI>LS2qC1EFlDwe-b?nzwv* z1Ji^}V|>}h0_*D2!8ma%qj%5t#MS(;u9fZ{ny2tnH2aI{vR1I2X58=V)Z9fzwV<`% zGns}$~J-lqR#U}NfaV{S*4D=u|)^EssQoW_e7yKJ_( zJSP@h+a#cLG6Id%+>%QQ(#FGD9@6LosIu&r*DE5b3>8<{AJ-Hd&ni(o*w0W2- zron?rFMxgX>*D@|XPJx0(pk5O8O|@Le7_IO+gU-@R8R<*>wwQEQTI3GdxS-T={qSZ%Ok!h=yX zWjbfLQ%p z#l@kJ38UGyvcm13T(se1GSPU^cX<9xV7*$KV>yxMLldd#nicIFKIQUnwKd1_r=I2$ zA@k+;L<&}RaY*Q@iH^R5_kGRIEP3r6K{XVm-M^C1@c18}tla&ret^5T`x?Fe$RVxT z;}nG{a>FWjtZM!-Uj2^051mo!si^Z?gt}{`xT{hS>dM`Y4YM^OfLeHKasAX!jr2UJ z#xL{?H<@mcbn~f#s%X=b@|g#^_17Bo+6OA^Tf+B%v72WMUf)&K#?`4jlo)1DP%mr2 zKlBmbEMQa~sK4Lx?WE!mug2tLGYs8grs&~<%2PGm1jRqzzAyHg6pHA5GW|Op6$2l_ zFh2(Coh&TAiol_sJ*8caWN)n`8+L2$>xn)twzoQnZ>L#av}ahS+ehf>`HsK2vgz0| zw76-hX5b2s`ktJ-Rui=GYw>sQms7_z5h&*D&8q0}*SwI!@}JAMtGd)8AMu&s*@n81 zmIpaD<=rh?Hw-CLWVhm=go(Lg8tNfR&`b6oQeu4F+Dw>(S7`)zq%IR~^P75qKt9fJUtoB;Cl**2MM)+bEspt$Sm2h@4#0(5GAz{jvFhX4ZIt zOj@q~(dF8Y>9<6+zQM@}b0p8HxKGx%H{+{<_>%sU#R;0*oTtjYyWfVx!^f}@I3nw* ze1~+aY+z30R5BKU+euWg&+$DK(ocbe7^l$EP-?rLEF;g7rd#yH=XU=a^7cfY*oJ7YA$DRE83rLD!QT6}#{Xw* z>R6h?Waa%Gs!X-^a(Q|nVz*xj=!?+&PRbFi2%udbW%6jYNQJIf5bJnE1A>U1he8gU zK-9@NGTqC~XhKnHQ)cmaPF~s%#zW&jJgcD##~+{M=_#hgSbu;XZp2q| zk6p%b6j+05?oOPxK(jV6n_{(#VaA;PK87DOAy-(p%Yr=yO8>8JjiYUKwUpC?7FMAR z+#BmL9oI6xc-0KO@FR|zqyG<({gSr)C%PVBMS6Us?JNpX|FsQ~bja&~YhS({``_Ui zrXqXNhgszQAi%(H&e1kZ+W_G04X$I7b*C$@PQb~ufx{W@$H>@PsE2PK0MU9rXoK~T z6%;fHqjMZY_Sd*rBKT>z zEDFKpiT?$$3gJ~HbdXyEtJi&0slGlZG3=jo{(Tl@Nbo$Ike|r4tOX^Vvsw!qtWYKO zH&cfaV{_^Q+0{|*VRNSnr=;ljeUA*&rfUe{8AY$oE~{6E5y4zbuuDqb_C&17#PH4l z5W@IMjTinmX`;~0FsC&wMA+T&-JuO1SMwNwg2d=zUju*IM#%MUk-UkgbJ^J5 zNjB*EMr8yltVVZ9I|AI~Vpt2VC~~hN7}0qyxK>H0WzZYN*39ov5x$H3^0#pc(rT)5 z*}E{Q9?!qT>`WsVufCJ^;%4|3$`N#}3*46V4IaC3qIFPmW>dLYDM@!luCd)ctj~zZ z&f*#4org{*M4jL+YT9I~XxhSh`ydA2@UbqrQRMYvN&HG0Df?g*w0|rKe34A`YWloSx z#R7Xa5d!+)ITY}>J@ zadF!NeavUUe&cdESvq4kyX~dvF+!?paxg(!!n0P#df{cew}fHl_QW>e?&Qe2`nRVY zzxci$e<}!x7!I?)`stu5vgs>B`=*YXDf^Ge@ow|7=j_KfJ`h28!Mn>itszuTVcN1^ zz1Ntfp(wvePmc3NUEyz`4P4ZDv;c4KGnEFrLEP+id)oZUwH_wa?KX({EFv3G05j1s zH_c}?;^4gfVPgv!-duD`%8dw6Cb~vd@m=^WYz2d>srp=) zBS9+i@{ct2IlBp|M%{AipA#xuwEcRo%$TRblP%lf1=q&QcTMaE?fN4WU%S z9zysIdBC(TWZLs*?teg{Ux@mAtxgg$<*rIw!$@RtXIW>(DH29+0Q3$uCHZ8J&tm_& zi;#kPRh(Me5|*xzC6~e<0RJ$?>OT3+cZZz1`T9zOx2;=QvN|d1aF4!hbZIbWmCiLy z*(4-ARE=YLSJvtTyGM*a>oXfj1iUm3j}>cNE@Vv-ZH0v2FWz6ho)Vp>U|uw>= zvD869)HYirK%_r$6J2y4+2G#m^T(E7!3Im)dKt#7si#hq5UxXsGFf@a7+3DxJ6)P} z)V2zpQ!MVNL)e#kxK(!1CA~slhpzzkE9ewcdo-|V8q#e`yv9$5M&hy)A)ko{7Tt*x zo|y;uE+sXVk})@Rcb}=bA8e3Cy`O#7t_cWb_^kY(OyzM;QW%4*l9a7lQ4CrU>m=Xkrh5=QFk7d{_X6I)JMmi$cgyfJ_sy3g}cM*rt1!X-OuSPZ&E-eX6G6@&slF zOg5p#N$lnl6>I)F&7DEw6G@Z+iN2Uu)?^)6|7;yTmh=O5duxeOW*Jd)9gH?Hb_|TmeKIU56W=$H+rVO%=6{ z6;g>$CTdG8@mkI*wmCDmcZuvWdP={e6j^PbPN2*GdW_X`=N9zN#6DLN42`AKT3 z$E?e0JYxBnVy|}bxaM9N!!Jg+5U>AymloiWoASp#kTEo~?dq^3I3w0=&(d1Crc_wm zlW^CBW_PkJG_LJkiI+*>P|&!WpCw$j;5%O@{ofiLn`oHH?w;$rorEBQ^$lwgh>oXm<|`AqlFQXhG(cmV>6iXWQSt?mi3y;19Z0% z;T3`Fw>>f0kokH>W zcqHgZX+??DetL z!D=6}s6S;h23}{RRPOLyJa2C=wsPE~c8rKWt+_AdLGpzkl?GemZf?Q}dU1G~e^Pc5 z@j!5!LS4W(xt-xcj9;hSBsNA&NH%iYRK3XYwuX!1XF}Dq*ENNxx|(#Bgc{qv?bANb z;e8iiTalAhoqr%y;i*abrkMpiqRr-6mml&DBLc6uTc>b5VM&j1OeHKu$=X8|=hF5B ziFD>&bkybuT1k?LS8$rI=16&oT%!Dq{ zJ$s6eTs!$_HRqXi?zWDSCXt9ZcQy<=T zj6UcP+X%+04h9`f-1cocP8{Jk{HqQN_Q_OEK6^5|4a6o%{LW%s1Pdk2w^}0Fl}ng!oaGgW>bXY8*c!ZJF=IDFm3{?h z1j}e{j`u%}K1(l2C5jF7) z9y!hOkERjH)*!DG@stcr#zb^(MP539&GqR~bE>OdhJI^ts~sj**7em3XTPEB{&#Ja zY%tc&PQ!h;3-;Q+E@)~|lpApS>0f?*=`7Ds{Zh-cNK)cQ^JQeT{Mh^LO2sF6uWu5i znHNf^*9v3i&wr5!yDSWD%|Hlr`4&L8$nrxa zYzsBQp@J3XmhJz#)>KRP3Sj@_O}_kvs_(0IPEjr2!&O@cqC=Ln+mOGyU}MF!O<0@o zy8(-DmEXM7XN=F@VC<~i;!Oni!H*Y}lgudvRYKJ7BTQ&XaF0coM|{drzlMw1XzdE% z4iU?El*aO9Tla+fIo${F( z3sn)Z58purM9wJcq+Tojo4@0vTUTI=mh`K91=2`~s5<&?%Rp~r;Eq`~qJrGLD^4WB z4D#-S9a8)V_R~ED&1vh57xjEUL}mJPGbm39C%TSQgJk3wdQ=K?e$RI07@*sv zZ+{&A(Kza%dFNLi!O#P(lWf}Gb#_TIbwIL;LgVQl&HNlk3{Q8jLAT6n4+LJ6ANy0p zDpHlOS{O!sT1XD6Jz33MrO0%`=yq#@6vcv6jr&3x=q8@ip&U^VR3Hu`gGOo}^YLaa zYW~|gtC_bm9iN6DSEPqIp%1*EsQI@(M5xlL5fY;WQdwFzZmKs%7{oVT>t8+*g!joh zI*3NTJwJDnYiZBJ-_jA@khxxZ1!Tx_O#@foFulGE8#GNH7v%P%-aSxngBu|7L?2Ji zd8z1o&(5n4mX4VC3@R=O-ur+h)8p33$3~e&9B9row0wHiJX=Z2E3t_Uwty0R-xJq|bvsF!Ez{Td%us&CZA8!M;{cjck zwY66C@JJ;B1D6=QXwo|h);7*2j$N`Y))>^v|Er*jlHHrQi^{ngUL{SO^!o1X6lVfL zfYO08oC=+_=m+%6gKmBXNZxwkMXw+5%z;#xjhnV_0D#^cNWMvYr6bji&l!-G2R>`mb zhh=aXuPU@SXlfRH*xYR&+kfvzYl%`Q5YVe@_MbT?$LjZFo=+6MD%L=_z%n$Erm^hk z@5lB*+q5*iSoFLe_)cf#;JY-TT=`quV7z-*Co_xZMekkK#-`Z-22fj*s_9N8bI05;yf40=46%*heG=yDEl_ zS-OjYvBx&fZNSpMN?9b~f(gZ3P?$CMcH1{kpIj^Q;p_(k$WOfMdNqZY45cQA`s(j! ziDw4SZ>g;D?kk;G5E;7_alDf1+diLn!acz>@NxM;|Z9P7*0y+QXxu~1} z^<>JUv5w7sdhFoyQbl`}V(wfnaKoae+YM`C6mO&M*yYn%nRnB^W4^hUnB*%KFb8(X zMtD4t`n@a=H&6m?dr;VUWS`FvVqjl>9y4%;aodL#aop(nSHz8{TTd?4aPGHld*WH& zf(_Iv{paR3B%b0o8&Gm7MFsa+L*dh1_q>ueQ$=R1&D!>;+CnK4{slKn&}|c(pHnHK zK+)gnS>gl{Um>dH^RIp{KEA%?=mp*O;Dex_poK>*;Pc(y{M$T86A}iL8iJ>$8n|KI z#n< zAK3N~=91{QvF<*P-~BVZ*Yy%Y|;er!?Z7`X-eY0z%)B?n}tNh0HATq zoAie|SE?*N;YexNJXXkxg++4vDT%GmMUf`Nddu(J={qQEPE=mnHVg8zJEOrK+-27B zK}_3G7h~=sOmcFdEX;;^fNUoOHMA$dH$#RFYhhN*Fw9q0j4thkzRic;xW{qA)sqSg zzn5WIJ3A63KU~q)+xF(W08&U~RZJo~!cd~gdgt@LPnAERX%9gRIK>*1TA5kKM-t3i z+81I@vS0^$G|CIci}K-vy{^VHX$D?B)(%Z(Qc7rpxC8&C95Z$cfxoSUH*hCxX;ti3 zh5Sf$?OgBOx~l}?UStlq@e7`3n#kxct~&HjNa-5P=ed!lY^z_e5eY!V;8J~s+Yn~c z){9^T-m!OmNtj9CSIX#5uuvj@R5dkygF-uDQd)(3Tobo{IcYrhkvO>hZ2fST?C+;6 z&yn1wV#+?fZYwux^0)damx)`AW)h|Y`;b@({=F>ILqStZk_O6;qaKT@CdnEM^j^Qe z^u$aeen{-*$7{LEec5_k>yji<-0Vl|{1eJzfbTzjT5jV$Wa%S{HH$u)cqHGg`Lq+K zRGU%N=|Bc^>V9JCi3h+NooVI27Pq(K_htFW1?9jEYeR78qifL?7X!2&4V*0}-&)j$ zOk_tH4)-L=m%=tL_`*hbVu42RyC1e$;EV`c;r^OV?y$%8ukQ3A+|HBCuH^v;7fR4W zZ}zn$>aA}Pd&;xNFIsEey;VVlYb(7Dh#w?L+=Uy-EVjL_f230=C#VIP52a<8*o%bf zhMXH?=Q>*e4jKz3yW@}JVCognAJFJ5llP2?@>6?vNPpUlCY%*UAW(%%tFAC*_cib2 z0a)*wK}^_PZd)eApzM^a*UW8;Kbk2Ih9HOv?nMKU3}^gi|3&nyi*`=1dy$+AZ=XCP zfPDQ{H_JTpL8wz`5om3_SLt#2+P9CQmEs6w8xtN~_F5IT&u+RHT;0!F`^|6mn) zKp2N9p-%6q{I#pfmv_MFY?|-X*s6o~V67YkV}RnG<*k2o_K&;5Ocpf)-XGX^xvh|U zUfAO_S}tUH02t$hyu6WZ7Kj_SQQj}=oL6LfYRS8b$j`znh(U;p72MY@wbo&D(&Ql{ z&#N_=LP5gQDn4A$)PIbQg4RbcCwf7>T5|N)X>hk1vXz-crtJCfz%jlff?yo(A$HnP zdsgXu3c~(Z^Z8K_^aL|``A=@*u;|PZU9g9evnzuLC^i8GM1RLu`GgL&((T-ExT%sa zyZ+^2^pg+KUPAG=R(G56VDjrV%JlC9LWWV^qhs`MuH36rNXV#9jfUb9zFD-wBf2RrmA!eI>=B3u@%$(^6zW9#=y2~% z?uVgyR$gpqKf2c^WI9jfYTVMXF3-wU z_YG3Vz7zKcyHq7;e_Bfgi#z8>zeP&FcN{t9zf__UzO!IWPW zD3?HPRJs8H%)pXb)DX_|g@4f!XAuzaW1 zZ4@;%of{lQ1~c{=k+*q&%&Z#`+e^}GfGCH53GKv^jXUJ`XQ$CW-XH(h;&U_9xa$rz zAIP~z7p-h^#G8hiTireq?Bi)p7)dK1{jS*;QTUuiQNN4*FrgmeS@rrVqQ7waY|D9K zrgHAZw-5U;*U>U!G?!AMnn?*;{=-&b+ATb=C`gB7Z1n%J_nz@^XzLzuM34|^qJ$tK zV(SsTr%545bVf_G=+Q=R+ZKr~g6N4FQATfLkmzM}Mi+Hr#^?+N^RC(Fo_pVWcFwuZ z@BQ$8Ip3JNX050EpZ`;qf1m7+p-u1YL|6M~gYUSnvTG#$6yJJb!Nx6kql>vD zB~Gh9x!KCT(pdF+wZ3aH>2P30K}Y)A+oVeS`1jpD_pWgFa&asCU=d$5PuTZ-2rlCC znf%tidsVsbPJx&zWAWI+VVd6&S5^vvnpS+kWV$P4D#n{Qr!-paTuGQaE7pNQzqS}} z>nP`!C{JK*c;(xcv6`!rR9QpzGKiYqMWr#N@op(dXI4LK2J;Ihd|&JLa3aKQM{AIW z2RzzkZ&|)vG*Hc<8^u3Z2{lDPxi^kZyVX{IR$cw9EsYAKWvQ&fH|9v=Mf9pG9uX!~ zh7s>mb37;e33!AT(FOiyc(rOD@wk4DBGiH%wF#~;!mXS?4=w`5b%>R&`hVM=k6U_B z$CA0vggen9|-&j62XnsmtPYXKiM%AA_Dmg)(3d;<-~*1p2NB? z0XyGJ27cI-NT_5vl7R$?pKm7-dNtc{g zPwMrk!;|}Ww&Nb+5c$zUXIIg(VRFI*w?710_%8_h3okO3UIXk*6b*%FUW$c2DqT<3 zI89R4+QdLm_5t(?88^+kVD5BTAoq)tObF_Se;IT2=P2J!4^N6He8fS8YR8uZ-G_6GYYYvSW~>+;0npD{J^tL zynm*u=6?&AI(*ads8pV2GzU#3%dyZAN1dB<-I502%+m2-gv{>A@ruQL*&_MRPXKQ-NWSGxyA}M>}jy} zJkzgE&nXmaSanv`h#F~4#0Oy3FUtHo-@pQ_{s=_8BqQ`&Vaj+2^qn%8{zrZ?(XIkFc|N3?2W6`*0Xi?r>>T0qJ!!;--@_Jk19)Hv zFrG)Ub+Ud8WyrH12?h+F&Wve5(J&~l*4=-gP+E5NdC{jb`{8HyujOICozBc_08G15 zE1||KSi)&7<~_F>n?vF+fH+C|9;f5oaJBb9+;``c3Qb*O7HZ2y_FKxBTAwx z?=YYKN#5Uhp6|{HHRdv|U@+z{f##spA?;wrh1+K^I!MI$BiG+HRPyvm=H4v779RKA z7gR5>wwM0d+}CS{fb@Kdid(4q)k*yG$J)LDQCa-a@&fhGU-K&&d#nS@#6Y>Zl;B@u zm|sr8{I7*O6#7e{=G!3q7c_FoeX- z{d<#duThcIDv%}E@ZXz!`=8_crO*WY=lFi9mjBVdU)iKP|Ixl*yNCZ6&tGcMe|+Ds zjqZPZ-!HMr|99vTU?8|gSoJbP7aFS1|1rnqcXUvF@M^^8mtXfwkv&a)p@Hf03+4cR zYEg%w=iqWqwAauf9{Ryri*FC}vCZJeoL*W-jN17R3)2Nr+brT8M~4J1wNzR4il;o) z*~1I{#DvuMLdNo=bw0sQQ>6{F5z_aS;(0k}F{o%hp?b+p+M)+ar(Q*VC@N7u-m?Ip$V>3botE&I7s?>7PNvD_5beXUEuG=%z+&?FEg9;}Ae|d`fvY?UC42j~6^REV~`X{T!zm#e}|aP4nx{L!i68 zzWp_drpByvxs1IP8FK2YT@1h6q`&xv08-WL`n5_-i%OFD$XmyaZ^5NzQwI+BPtJ6$ zSgp+lq0>0G?Q;b{U4~Hzy_UkT?Pptm|PH|C1XdRe}CFAgzV z5;}kAmK`*x5v?DLq6`7cV6{$JM${deJ@6~O&-cdhl26(hKr)Z0#7$|Y{A zX?kO!APLv;p29ivWssAeCo4_PLjZZ347L%^3Y}C&iySpZ=;;Lw9{Un~B41@I(3%W> zn%3Y;VJ*oAwBZg*42Z|t5*x&GyjChLeSdg8PZ>i!`F!uuE~e`bi|{PDFv?%>Yroe2 zd{PR6zTk!c91}05jvl^Q^183T3RSg+)l)C9&Z;u*p@|^{hcDww6%Gvq5gXZ2v1c*U z5PdO+8^P?pAD71~)hg`ASD;yiG)9iDhI?i^24F%=I6KcaHmI<*@&a7~b*=cJU3J!7 zk!dHsI3F%hY`rm*@a~X+A7G;l{6$U2-zK5|16y$?juft6(wU`M9Vyo6mP9-4nQxE% zq5bfL={@f!KEh;S?;PuH`IZ1p>8pItoTC%7XYBnmuax`fU{lBgJ$c@I%tjQE08cRhxzEBQ=HBWqH@{nAf@oXoTI%X#}NwCLQ@8WBh1LCU}|kOyc7 zMM&PWw|RzwY^~N4p^JWerD$(KI289@#vVR|LKbxs*XE=)`dUlPC-=ikjEHt4zhSZo zpIS3o9WCJu>Ia!Gr#u5WbI+L^U}5$1G~9&6k_Ck}rySaNKYZYPXME3S{j7Sm6GaYT zJ}K+xJH_AO3o5IVgq+&_{iJTGwG=P5pU=q6^CV#f>ecC*ZpjWc>&#N~QTJ<6ryi-h z%I6!EYD-TOqH2#?Wa?B>pJ&vR*-Z}WxGf8}wh6e}zs6?QN#BPqPb?+*ee)2T-0ab@ zQaLD^<#v?IL4Mj~cS&YUeI@NPy<_DTNNmvsrZ!aH`i8peUoPMKPIkN97rZ`<+n(VxX zPSa^uWK89jF!C!;SU89FEA2(D)O2{LEfF+&76c#_?dzb>noCt`6lV?h7#Ls37g56_ zx}P4bw;JhHx#t@A92S64VuRSVcdhIn=u{=U3*XBm+ZI#Mu&oR?53ucj4+@RzXf2hG zHSgYj($fjLKB`eSti!BZ9Rn&61ax(VEOSI>9 zj{>m@Rj;pOc#}054=vlAg79$uQe*zxVE@mvJQsoRa^-*K8#du}4pvct0U z@Ee4Kruq4LAGdR+c@lgfe#nNiPFjDlwWsbQH<;dsSS=s885v_^Z7AqNbv9c5kg{{N zesvuwK^M&C`9)UdQ)QXeXp?5pS^Vm>(EZ@ZSSmJegPr9)PP4jP$?DxF-8HF~aq5z~ zo(gi$*nIOVby6f8j7BSFs3h8))_R|>)K2>nCXNx{TlC=+7@xvXvb~wC(#R##O+<)H9QiEO)_LSbfa}Zo zt|KN26wu2eX3Jl`*}t}_qKNU44`Y^@vLdjhlld0M0kqNn9cEb_s#D+?%@PIX|E@LU zO!$k!yTj^%VYeD92^>KOCgj}rVUM`7m;Z`!RpDr>RD#byXIMbr?Y*Or%0^sY^i8+H zc*l&i&7q|~D7#XiaSA8Yu@W$XYJ}U#-&D?^Wqihz?W58uM=R`6qi6CBOc~-CRIkl` znIWIF!ufhlgwpwJA}AWe2J2>+ub|7e`yR0`AQ7{en%jhd9`orsA0tBxP8)5(t&?7* zV{Cp$d-1Zvb398DYkObD2k}#Lkw>j^_1lN6g|8a2C{q*F5&GtP&?)Zp)f7Q+J)1R4 zZnCew*52#IJf|+S0j$J5e5gyt2`PtW^D(P zaKG4^?WA{W?nRqGwKIjDvp&TAv0Ijf41YVFwBl|;H zJ@zOHU&7j;oBcgOm`W(o)Mo5U085c|w(q;)|~-0tz$xw0M<1 z?k0YegEMfb#B?CiY4G`@H-oJZ;kg30g=`3OaIJyJ6>HSjv4Um1!oklZ*p-92L_+GI zBqC9VUC;!XbY-sXP*ta24e!fuW<^`Htnkyr7_rirYp&e+q&cL9kVCLy#0h$gey?_F zx=4Y*ye124W%CNNovv$yw6MM7@_%0X$T}W56Jn7M!Ee6o`|%W>dEMV}q>XXDI|4tR z^iG-$oLIh(j-i+GAnK2#Ttf{%nboy~=dDMb0F~ zXOej~wipZrkcr=RM|XQdx@FM(_})?x|MmBL)}6`K#t*JX>iT&#Ld*^%iLUn<&r#lb zXmIzBE6i(EHl0d5nm^y3R(=-#55Vc4{Tsy#NL>vs7F~zQeU*i6i33ALn>!Ob;WO}^ za4zSMlv5*$Fotf(*o{RjU9I_biu{G1 zG-G>*XFB#SHJ=M~XEZBAG4Eos+jrk-AW(_XCeKoaZPv`TMTDa3iAUr7Hxu<74ys

r+z;+n?|$E$7iuLQo|nH!dt-0`mwx50 zn8+8y_dfEk+?q=*{1)wJ3*4sM;Dqu@ooaOotJhd0H1;x&Q*Au!?P0;ta@t1`>z3Jr z4RvTrxrJKpLQ)qNzs+!Ayu`Gx8l~jja;OBdWUpG|Y}`5iS^G9W8F@P>91H35Oaogj~L(^HiqFw`}m zn|1t+OKHAq4Xr?NMQc+#tbU9QsdL>&jOXEZ-%X;*9C}@>c zNaRR7Gb{*N&BaUz-B5vvhGn`4g>I`ky!mdMBgeU#LYM;aPWPARlSTq_|KkP!^5C6d_t-eu#HUj&bb0OnVRG zMC1fTpAjsF$)eR6bud-#GRW9WhS3^EY;$HTjnQKu`sA>$fxM7yWiRvV%GuV~DXeSL z$_A?Ii?a<~g1l*V!ev~A+YG97qJaz6f2Hq)Ds71V(d2m}*IVQeAVIVkS!~z9!oCgc zJ#Q?ZQeV2_jrtg^*K^rO^>QUHw`teOZ$H!=IENo6?HHO3j;w2kw>8B-{NvOxmyEx# zdha9)EFxZC#|Sf?_L}bgoABs}6yjV`RsyBi+?13h0p6XuGnV9pwG;5}#QXt$R6zD6 zI}_vGyN#{G$d6~OAW@xzPfT&V)9(Ca&WX(=8}0M0q~`t1eZmtg!)HAuF%xknQ7_Q5 z)hTWG7+dWfF%VRe+WCkCqLMhh1j#hSX5V%xQh2N@ zq8-PJR(Q0fNQ$Itgrb%Bai1rh)AW~Y^(uV>9mL90OnzF$w1!x`5Hs&dRhZw%!~Suq z!aC|RCCeC>0kAvU^47Zz7vj&T7P)3v;BfKqInTl+sYiEz86E!BD7P5_t5Mnxe~9cf zn@P{AE1)Q4OH2S|bCiMPB^ryRWPv=iU{B~-dhhHwLaKr2OorQk8 zR0w&6Os_Qic6BJ?>uR_6iwoyFq7S)ae1R1UBI`hP`6Cg;AfTD#%KXx%|K2La=Hof{Zc z&-$Qc@0;{Ab%y+wsa`=2xh#cwT+Ekk;z1j+pnh#v`&rLigp}LCrj*G2lQym`gF2Rv z562;I_+ffBV*UdkfV-HinG!&F%9=5(zZv6li;m%Iuk#_)O0y;$Z2VTe9XAHzs=qk5 ze{u%uLO{td;gbRet~h~>nbsqdy3HOri2UJvL6qEE34h^_6e^{-a8@x%vqSyQqu0b} zG7Rfd@b5HX+M@*>)(vCMJ&|z$C6?-Ne>?FIdDShQPSz|QK33@h_bA79xlbS6H|wII z>&hGt!d1{x-M*bI?eEz1lQGLMm{5zuJ}zRhDNwdJF}E^V-_fVofwA^6?>iV(rECwW zt9eW})KfcxdxJ_*NjKcD6Z1*ei4EEOmM^PC7S9X$|Pl~^p?oAymO zgR`jn(Z_Y~ciQ^bC)sTD6CRXa0{x)!Gnv$&E&W{S6}QJHG9m-d91pf#8z+$oH?3(i z`~7+0r{H$tn>F}lPE%va3$oeUW32YKY(7o~l|ebZnuNI?z^%e7`44wAay5%<2^*TH z@)h+h^9hjoKjzHm63wS(-%;MO*6B~Aw?FzlsKn&k^*(;rM8guZcQIL-xtBKP)D${9 zRZ(#06W0@SsLlG=M8?4qiKPf)ja}?j#|Ph{_DzCPse&Q|c)ajOOG#OyjAAsVRhI;x zX~1cQ?E_Wb4ML-DpK7_(YdxIu^+ZeIWR3DP*?pwN#EU~uiT2EOP9Il`>y=cu3ir*! zy9k~E-gC)4(~{^-;4cUNjyq>&>ZjX7ivuwagLH!4`{pv&k zh?s6-haJ|mz}%PU+c|qiQSE+m}TZ3jZ~9{|xq zlZy&j60rz@D%i&Z;_<=JNcqE!g-*0~&aC&Xab>L&8L2j&-J@9^G2>=iZ|_25B~p{f znY`r0?y&hB@brC}&{YNlW-6@5N8_Js;Y)XDQEgZaN^71+7Y3m z;0b`ciAo>^*e{G7XCCJ_lN@(|2isuK$%iC5E`Q}-2RtfuA9FU>&r5mQmlaIz*dRm< zk<<^+6mh1ivSMZxP_074xM04R?An`^pq)F(70@K+=*6_!8qMVM77Mx5Cf>WnQ|aip zhK2Pzyb0@LousW`0bev=kq1ptBsVVAFOl&t?mUcaYxvga1NRuBk8guJAZ7uJ#;c`Z zTt}Q~aD9T_#tz(HSsLty04^x<_kU7N@sG6ZHPoN`p|aB$l9oy0RV#+)Bld6GZ#i>- z2I?+Q8ox)U`t=$pPFXYhdIg_OLFGXs%^5t8HxSj1L%^XQyYhe`Q~E5MoW^2 z`Ru%oPiK}ic?PA{@E4B+ha~A2maS(Ku6+5;Q^|a^);tde1*8{=`~qrqFR(+js!Px^ z)iJS)u~Jk=#5#f&Ij=9N|Kz(E4)m0`8hZ=MUksr{+|E9GY;0B+G}D!+fm+0!W}GyGQCJ? zvapJC#{^+_R!AgU(wGehg6&NbDc482V04nZ+i3Q;l-i1XAf8C|TQ3K|auYH13N;9? zJmTNmMB_0pS)V92jIsMI$KGp=+j4dG8s%c_2wvR}JY_oB3VP)lDc)PNLleh}39 zUb%EJ!}tS~b!BrXOWOZAyh701fM;6V_2pucM&@vA{F}kxysDoiEM_a<0u>8azsx+@ z{V1Yt*GcC2oNvC+>Vmt59(zGAjI?;WsY*9ADQ@}7Vl}Ek_--Mx@(gZA^lfK!vL5ZP z^GrF-c=pTP0+_1Dw@Ta4;S{q8NwhxjfH2&*N7IFLAF)ObDk{YRcgcUM;z(Nlbs`)A@Q$nW1Za$m-5} z%9{!4cjJtaKGVo13vfbU=7FS7%hO!vyy-b7ZtN_T$bI+^l^g!T`YnxJ5kdyf#wgH3 zAEemp6GoLM_X@_Kal%4R*97vuB`64{aWgkqGSYsO&DGS}+eS?nJShtnW5<^2S`NoDiu_Fc^JqwT3`;McO+bCOCYeB_=fOMxNjPFkV09 zbiAu3Hs)djQz85{?Wu{HBPtRnSaSu|#N1G2waLJvogRXh7D-PqD2JmTXIY1bnvHI( z`jk?g_g0cFvzZyPLgBd73T9N2qJ=rrVlBd15}!DXWavys7V;z}1upnD4@=j5kFNSI+@B`D{V}YeeKN~?nwShQd$ed8&3M$Lbj+A5iwCwhdX}3$#@N^3heuKZ zo|psQVv!ttTGROY3Q(o^3WtHGsVrMA51gl!$mwRnT%yNWrmm9{wl~aI20U@%-iQ0) zMi+k8!Wi4C84c$;U2;2JF}?bfTRz{%bLx-wm6>Jk!eCFm?pt~-tTmpGF{e?8dC#S~ zg8}^TPfB%qfQ42@;c0;MvSQti1?18rjHIEp&5zHigaM-@4uE8rArHAhox8Q$_!tga z)OGag%}-C2(PGma*-^_+TdY46f?a%XgW&Mz*3awa_s2Z;(t9%R?P(zGs~WF3AowIE z`$u#b1<5l6Kk2D&Cz>$g-g`Fn9@W_|;`V@z!EEeL4d6c+s`6@UP9+GB%J{jaLe~ok za4$S;C*8?%d~t=)QFz0@Z`kDOU!#hD^cJL(ZZiZ-kA<^W0i^ye$@};caQH6}`K{jj zS-O4KSDa*0o;rO^gJOF=>yg>D8>vvNRE{2P!-l^y9^ ze+De-)#@H3;%^G8PG6LLg~+Q7rUY~73L5}HHlJ0T&cwC$D2nTv4@COAJeiZVL2|b8u#VsxWII`5NQ$>{Artwx4HzD# z=#(odN(`Wl{SE~ZFK&UPMwfE!i(7>B?#CpRRW(16LAe*3`#PItK6kvs`V3=(r0qP7 zlVvOUC=XJ4J9)^ts%2^Dfe4%R1*2P~S$g7E!2B0MxeCy?BAn+MT<%cykVL9`2x)zw zpkeGBaip6FOJ+W87ypkfkD1%Aia9Xm_9| zz0-Ov-*782bF$q=e2hyu9uDAz-+p|bi5>2Upv(6Pg1UPmL!e!fRQUMMc&Gn)A_@*r zxa?`1*d`)!zM{qMUQGoOQwXBI1CY1Lb=s%+GgYc@xNRL<3K%yxYv8;WBC>HMWqviU zfvCF5<)r!K^6S?=edMD^-uwGqqdJP@ccj?MYv&?vMO@MA1w?AmqQbS`UI;%pGD+daxv`ma6j!L*Ooe0$pc*f1BNDbNK*XA27{(6vDX zNyX%@4}r^C&7$)2qw?tqDuF>KT0Wg7Ra*bwTjPIc1!N!Aua(DIbPw=*Ff+vsrH@{L zA@-dT)@I*E-4QWFP4v{i7stWdHdcM#dJK(TY4{=|<~h;XUp_vUKDpeY{~p+cE8I3l z&yVOZfggeN55hl?9&Jd7i?z1m!B+asOl^ZK}&he^g&qhMN7Yyq#0`4-Q zGtN&4330oGQ2&liz7b+k9%XXrtag?Zje^w21cd4s*FFG7$zoHs48KuUWY zytDLfHG-{#-v(Je;}q30?CZ2Jro)$y5r&`1qR&sD+L6CWnqyqNwAE{(hUu+u>d}?9wZBuazO2V>}kecMlD(}E(nAK2`?rMWLLBbS2GxSeF`L%a8@j`1&u-5fO?dv$0@d z{^-`@a~>YxIaAXn-WkhdsE$FwpCpRg|AfA`iZd$087=J6YqOlW#ED$#d# z_I~r3e;?W%&nzb73?C5}X#^eWvb0e)14nFqJECNf%_j%z_JLej%^g;F3=aj&30+9R6d<^O}^oNvI2ujmxiLTE|&yQtc5pL}o z2)6RP2}k7MNIAr>J$ON7lXQi2jtWm&s}6d6~silX7#<{0*Fv7 zpU#KU9@)t=y}W#5DK4@LkqSFCt)FmOHJb0LP;~a=q~L5dS^S{y%=dxqG=qa1ZY&8D zrI?y6+1_iNS)Fyxva$8^*F&VZ@_+%yMCi)wdheK(+yG$oR8w1#^5z{BY=ta=;O~FHLP>oF`b#sTG@L(Athm*s&!wq)cQIh9gTbw zo-}bb@)4^?Cfywa@z2{qB7OYt69rsyHyz5IBf#Cu`d>A_UZdyxK$G-Bd_wxmaf_JS zQb<+g-B455#XHyDolMLv0rA|#9euBIXG??tI>kB8u=$uCfMf}8JOLcSjk_hxz~y_K z1RMyD*Us`q9Iu?d$I4z`|5B=bogy`(;0+sJeI~aZjp>e$zd{SYek|YuU@PxG{=)8q zHTwH3fIBAwTrT$759B5LejbnA0GaM|QL&tp5Y*-Z_tF z#Hs%EZf%Zj&10GEKP~$mCsLzC1g8yXX}J2^C$g~EuW4xuax#Q`=8e5&w;K*qwR*k+ zfYkey4FBq}fD!P4VaRt`7w%kRq;8^X(9M16ksxgq_A`pq0&&ch<_MY>&^#9+!`WlK< zG-i_+!m9Z9Uwo|d_|NhEa=ZR>e81L`|7hP|G@Yd0|Nm*922T-m(sK`_YBoACCu9^ViM2ww)!<6i8i>6Xig^!o( zP8%prScuIXKzv>wz@dlMxS1whJSG8ZNVU z>j|K?%mY%{E!JC_i{AUy4ykgE`I~h-#z&gD zx}TE`{BW^OKR*{!TsF@N0N040cf7Lx(pB1XGB^MEL*8jGi&4?Qdtgz(i4-D-&|ki4HG_pA zlq_m@oH9oTk1rZ`-jlriB{PdWKyiG5>4C~9ZY(4yDjv*bzW08Mn)ZU0B|MB-z86`S zV@QogCh3HvX?CR|-A@IO3ZHTa=XQ`jR?E+-=PK67(*HY&y9X^uk`8#=Q{}36#zd5F zZ!4y9{=m6MJZ5SQab7+e;$U;}(w$|23rt{+@rmy@j*Y8$Nc_8E(YK+8)F!o<5as#F zN~1G^E0n|00m6T9<++owz}&iT5f{{Q#m%24tE5sW=ijUT#yV`NgwP zXdpvk;7Z@wXf7~|rDozhpRbeU#fDcj!hgMr;nzrU&3I=;W+kthNWLq>Swi!?MC*?W zH8?p3FWC50Qj}RN0+i2*+uKhJ_uRaF|DuxilY8RP@ArKaxTog`h=<_9#NXt4=K8h| z^PRiVvOO~})Z8Y=GryB3!?rCU&;`uNFsoi4x9Lj|+luF-yJPtcA1fAE#W3-&hHYD3 z^=CdA-9EB4-})&Wt<8}<@;pXZE5&$x^J|TEbA^I4E?i=qgUxF;;ouUSXcJtuUbCTf zYO|Y?|6m^r6P|P#I9zZW@z?0KFW>FwLvu9DBK-DV=!)9u@5$(!hlkHC$ioC?A{ zBW<$sRQVf5Eb4~Wr(|GFVItj8X%e|7OsT8o_O_hYAH49IKoRN^C}3}Vq-7QA=lWS+ z4*1z$c*N@XOw>XC(Nrq68Xm*yHtFLx1??sCYQ7X&8SXc|KE1mtcis%evc8}by;$CY z;cJz$OubqFL9!V89I6=jE^MzaI}5d752v~+=2iFrHL#~JSEf9z;Wyf+R~W^n!Py;u|&QUlXhFV?1igyHWCszOI+Su zK5(3CK_kcQ;EHLer+vwI?k{rN+YU!+8 zRTe6LaJ7nPH0t2{eKqK^w%o=~x{gHrxMk(fGK1Dy9Oy#m(w4p|vr}1%`}rNg$k)Ze zx|Ae^3!~Ynqa8MushDz#*?rj*zus^^(qeTV``v5RqrbS%qgi#Y>MOMs%~Dyc%hLMR z9KKoKCSQN-L%|!#)dSX!u?V^SUdhcey3L9w6ifpRj7Au`JQ%{tSC!YmX(qw3Ex^mt zj(pljD9!*{UUH;)WrW!LYTCY73CH|@iNxQ;=9MlPSqtSxs;SQf~t;tM`hWgt(wR?)xL6>iN;|tyc-vqtw$ZG z&v(P`1oJoPEsxkIA?Fk-4R!J@>OncOVb|zaLp{h6{K#x1r+7NRutWQIewG8Tg)NPU zo_mp2<}I_)-`_cx8I)zeG-NFrURN2D?5mweD?0Jh&Nj9c?QclO#`Cdezdki&6!iJLxe zbk#2C({iB~o96J&ey3WqQ>S1`<*TG~j}SClvw?>$k0B&|L9`{%_ZdL;i zI^CvRjhAc3kLXx6mY?Ryo%4V>R9rGQ1cFx{iSbl7aBtUY9yhC3@WVeDRZ7H968(yi z=e$0IUorUD`8!S7yRwkQ{*>W19Rq$AO+~ICrbw)(`;-PdSa?JITsE@N-WGBNpuQiO^S)nHnvudth(9y44UdiG+A{|KutLpe> z;Z`Jlx>lY?y5ftxpF;Kf(t@?SUzu*be&&k{AuA?1j?P&zlrGm4@kd4#yeQU1tF^ICmlD+v=W@?$J%8&BEm8`T9SL=i9zM$I1&q;56 ztip^XZF=hCHrgMpwqxRr91X8dWqD1-Q(IwI?JCCj>j;2sooO2+u2UVpJQCl`digYh ztR)WympUh#=^fPAFObw4c7@Klr_w#aYfM_&j>FLNDR0AWC%9^0WJQkiN*vqyDL061 z$@;$DKuZ$tnU#5`V}00ftc6dH=ZiS0Pu~*1;M;s+4xM)PEyMxN7qK*)VM{3_kAIv$ z=&|=v(|;Wx;CE6MX#z@>mj#V}4x#p&6}0)LSKPYVX7)zDo($ux-;=aR%+*`JRQ$aI zt2pg@xamoKw{8;V<{Mt-`NN=KJf5NtI^hd$9*@f_b^fZqwB|=daEVWEMf#g~?raLx zT(57{ice!C;1CcaSmCpw;Rd$}PnG4O$+$g4<+v5BiumnOr{MZ%uiMMiIA^pSJ-PjV zxqs!&qr}U+()5AvmnO=Pu6Y!3{No{e!z{43hR`u_8uf~gm39fnV=Mck-0W`?EK_;l z+nqi}ky^teRkqAo9DV!Lx3b+3`(OE*OM(PaNN(I4)cxwc5-VVq%H<4FLZ{T@ z-kfLa`7$9wIZky> zEw*yR0_DCo)4JiTg!Y~|lhZPaKD>BICC_)qck#2gZ85O1y0&Q}s{%o&QA`LVYrhjbw+Jt08T4Ntz0%V)Uu$Bsj5FF$qhf-y`YF8#MR#u4XwsYHGimbC-jd&3 z-Yi~u13{xvnXdr|Xysyz>w$mW95!&<{CW7j(?e%zc)Yo#FQfHA>1e0U*{6kG%QdEs zk2TLy4O>vOlnpuSKPle&N!)iX9(*5a3AvMV?bZ5-SES+WSe{_lSFOFXo`eP4=Em9G zt)zSTR$|?=(RB%Bo|e)#Q77j7R*sC9%do!9JeKv--bcP&!mZ220QW~pOxkVn>%WRDQp8N(OxTB!RBJZWWTl~Y~TKlGpW z>St)S%A1{iKG_jYk5rSN`S_R^^2qQMaBDlG#J5#5i{qj3;iWy2QV~l|uWhT^q3cZ= zJ&QO*YOTVsuH8buQEZP+Bb8z(19Edc_t}tjf`X5XXd);dEup;?adl6lOC0sxyvGsG zDq)r-F!InMYj3zKt?n57=%D>$^LWKFZeIy1>!GG&qHl*=N0@Ka*?m5G%jvBVYN zZynKw_=RoF-wszl<=+oF9txl&p@3@}AunpmLX3o?w!Z}jX1pxQn=Ml!534`if$-P& z*Ypt!&hV{kU4>{NM60y4ek+`1==T!?j_-o%x2}H9AmmMGvXfKgh-c@9U0NP<{c?y+ ztQ>)iu)?Ne;AubFYbJWwRnRfkkBP8~ZJm9tL)P+l{JCBdUBP=)Pqh-Z)gAh1Oy9GO z;InC0n76UF8|W+Sm$Vm>V^(hkUns*lEv=7#5lVj&b2G-yUl{3%N1Tg*IMmCeLG|~X zg1+zk48H1XJ5@1ylcnVlKaq%1rU9Os#i7bx)|<$hb^oTGDxXfAePgQXw7jGp-1$>e%N(T&$dM zSF|nZ^Asj5mp11#DuhxnLJITiKa+wM(`>y1skvubY)TA_2(fY4<(x-ZRY$CkDXUpbYT0r4pm3kVWl^W1!M zW$Y0*p(FO4wMP11LQl-G600Skqlf&yA&I>XF93|LHqx<$(_gip3du92M6Z4KdWwvYH$$<)oPoOEGq!KTt;! zr~OP3u1BZE;#`m3DfJ9=hi}2Q9?!I0R^?;kIh%Ozb5?7^k#UrI43UOzQ-bz& zGaLE(;Ie8v!<335LAG#ulY?!Pv1#Eju5-~SSz{gF3JY%P66w**k2@Sz^mhb7fAM@t zUdy9E1<-A)&mltbRJlH=pRC=rV$?#wPW|S;jH~~ytDo3Tonpe0m48)MS(7ZS-qU;1uE3ln)*u`I;z~M;p7q$a8t>%B|nxRy6B8Uw|OX;&Yky zW;dOh{Gsht_cr0IoF=7|_{g%2AEEfXA4s z*Pd!;6U5>CTj!qo{C+X!mlQ3c-$aR_f`}U1qh=+m3#B94`w-xoT@YJ`S*t}~IVrBh zFBiF&S&ON86L4tIW~GCOEdMwh&n z_gkSI&18-8tZX#=`tmt1jkW5N>`Got`1P=lKgkge6+$?T4i9Oi=oA-xJ{cy&_}xD* zzu2*P+;Y4BnuQiQ1_(qUqFEf?5_q0ie9uY?pEq)|LHGHCl)nqlze3G-CTRe1gRqL# z$$#Q6oHDW+Uioq_cDTSL9H1(p`mhLkZ6hJdvsX2(3|wi$FPc1c1_iBu2IvL$`sO&l zn++eeyT+ZyCwU75b)ny;`K*)|%F3OZGYZMWNa)PAq*;~Tfo|G&{ce{$!8UeGqp?YQ zl3(s~<0ahOXW}Fqjew+|m9v3ov#X6zG*-b;yA%b>M1#HO&G zi`|&nq;{pnuho|d+D=e^F(Jg+f#FVbf+l;m?0G<1vt*iBU^^Yy+@eN7LenJvpjd1p zV%&kga$4P^^T*|~rx0nKiF4IkFPA1nROLRa-+{S2ZQcOMY6j3s>3r4mL1l=co*WX4 zBoykd&?vON(;U0EGwxMch1uBr%B-JJQEb}!FeymJ0&;;-?c<=StA5 zm3cikRDrjK-OH>((OvJ+=l9<|9e!igKY?9~wMc&JjjaKB<~HqEie&%Og& z&f_h*J7;UNjW%}ATCQ*Ix5*;${qO;!?%a^ zG9Fec%daAcFLoT_^eiH{7#!j$q-{3jW0qvdDVv4r58kJs>Ly=mQL@>HM(3LJ_9#S# zTz+*`c7ATTK3)nU8W|^mKGGl?%k^*PymTB}^F;>)K%NV(>$y*Uq$|TB01x(2!AT$K zF!JLl#_p47+Lx2Z+p<&4cklQYU+kUSSmn6oG_U4}N1Qjydm_I#SZY3QmBTHI?qYKU z?B<~y+pxy*5Y3%W5fs4BFbytbvjxb*cEV$|r@wSirMeF5z5Ge#(6n6IqitUQ@K}0w z>hx0_Fsd2^#vgk&0=J4!%w?RPq-V*m)GmPG9vt%{4kp2Rh${{wn?p$rRP5&>4$m!L zg5kYHd;v~?rjeLa%Vg-i{5p)%Hq@B$c+S+nVH)h*`sx~k-2Dt1?@eT-@VISh3rWO& zItw3j^G`oLL;7{!A-5kL^S4~MwkAP$!Q>zb@htNGzp}~y%Wh|%0^-6mBapy(+=X4( zO*(V>))CU^ODo5*zy)b78o&FWnES96*A@N0zWrat^an_QNr)CggLB6eF8>n&JYeG; zmE4sVA0K-Ijx4}F`;{_c9RJl~{?`&3K3u!jAjwtB<$UAV8{j7b_PKtPCWV+aXA zYGuFZT=6lZL*_iw)(ZpgA*7yheBe&ljvZs6Q*z5&w`t;U-POA_$=63!WpJXxI-!e> z`|lnt;xv?BY<(1VY;v@0s&brr`6gRs?z>1t(odxiR^IV*Js~7#Q+mZREVO$$^|>q7 z!xunDeC){kRaxgA`cQsRKilf|8sYK$QcY%!v0m(+&TN*vLzAVk_Z3dReW_Jz`PO9+ zv3Zz=hNHrR7;R%J`To~o{Zo0nf4LPK6E;huOG=rDv|p-jQHdTm?E`gbWjDy0$C>l>Z?g?L+uYht+{jt;zzmSjG^sH zZj2`R57g}%J>sV>kW>0ZZ5zAZ?RhokvTxn0D!_^`_7t$M#Z4_c%Furg4ZeIYY={+Y z=;`xqwK1Pi#fcBC2o?Mg8wxm{@3Zw4)3P=!>~nJ&9BEB7LU^Q(MzZhz((J)NO*6kp z&xi0Qe*H?J&05z&!-=|erd=q{)K^mY=a=a=gaKFB{mLj4{dR0)6F4Cl9 z14Wwj4ubS1HS~xsu+T(AIzeet0)*ZYPz0n#dM8MS5CQ}UHGDH`y=DK{`&xO=xz71< z&i<#DCV8GY$DCt~`@Y8*?&e0>o_Q{hOv;;t<98GEtO}yjR0YQXf=)DTyf+iZZg9 zW3cXKjDHyrqc|Yf5`8GF!~864?qhqgu8{nJUoo?iqBZJU>Z7W_)vW`;uz_=`%{pPi z-Sef6?#*63q-iWuljgVAY%CMTC#s0uN%YULGoIk0(9!DA zU$82?u0yv4cKE#Mk$5`)i7k}_&d+k$`%ijuw(57y3utac@7a0-C>R}NrVk3j$FECS zxJT6&c+36jPLoxEp$-YC-aBWHkXzjuen*)xk5UMpF>bH)&Wb5+8WmR)zqS&@vcu?m zS9-SIJ!p`WIfWuV$@)3*<<4saNJ|ys#>)FNQ+)6~4~Y5UbFLhtqKtr(Gz1K>{LZ4d z2j;HUQ;jjV$3_az4wVe(gre&Zr7_KwdC7078QxMaRZf zIalM~3Vpj<4TQ1=F7^q$Su(QAYo1vs%;kM2dt*opqZf@#0|Ay1UUu>g~O_I9mzR*8+6cA|lw zyvvb!+4d%1*uKsWQI4`M*+-1Y2RsKqp;zD_ZlA~mMrl!2y z*v73WXN>PJm$~N0nJTRv#mI{dBv~oB;Ep@YZiGA6MK}`73HTk4S*;PLsAf|6hRwQB z{o&_<@P&{akEiS&<18aTKYF2A?9+nu*2`CjRhW>7!x(v)6fH+6xE-c@zXeCAp8Z40 zUD5Q(wGMZ}zR`rE>jB4Sgq6rY2A<`d&U`XKN3kb{ln=Kbd&Z8 zE_JiknAhL7e@q0VCg}GK_B+O-ytAHTVkgj@2B+13HG63-vG!>xnGYtAQhg`m*L&V+ zmF`J0rQK|UXBF4}>JGB~fvXz5C1nEHIxw+>63kPd?cMjkfPc|fXB*l!{k%-1o_BKp z*#iUQuhKK0zFT9yhltis=lD4{29CU=w^F~|JpGtePkk&fcYv!Es9VgVl>~Km#5l5B z@+@knpNzWgZC;8|?;3dwu%&Vhf033H4T_%dID3X$9xwnd5_V6xGEx148 zqfhC{5diQ+{$s5F?iL3e?Yk8xAn;V*pbava*lA0|! zghbdTSjY<|oMNUEE)1_Wd!qlnH&fyVWJ!jvrBhr|5Ep$sW;wP?^dy6*tk<{7E-vWJ z`U_TdLTpw0nx+huH!Tn=seETY)ySRwRpXsHdbpNYyd`gY$RN1S?t6#vLaOb2()4s$ zbTR$Uz(s`pSyO$j;SGPhc~zADQ4fE^Jz%}rp-mc6MuL?iK`#%I9R;x{cU0VfsA!?t zuuBVzwZoJ64o-UwzxlZYbAj`d_6tEl2yNK}f)y9cP1UJ#iO^V{5)q|gji zxr&De1=AoVDeItzAvTkNy1)eK(bmk=QHMHu^7I@V$sc86*F11HMkomFc)!crEch3{ z?bpEr?z5D|&lh&gLFRl+F$}EnQf2nnrt8`@zisbu;g|t`VW=e}w3}k-cxMv4#4~EN#rOmeXARva5kRZSUm_ zEY(f{3=;Kg-azV#>s z8XZ^5$xb7D!?J!5kE@>E8`Fh&D+Ixe>T%r0HJeE>z&Ab+WHpet2+FoJgJ36&y_P;s zMX^`~tt>6fC=p4PQn}LxXnwPRjx?gZD}R0Bok)fl>*r$dMx)wkTN8fo$#45+-k){} zuI-P|NZ_WY2T<;|8gXZfKc?qWrblS(=LZTADtxf@v6hRLA!+ar2{}DM(IYdKWA2Oz z76iPD0icUW?sRV>ijpt5sHYJsQw2@XV>d%ca|z6(k=HxFT4P6Kwm7VWCb(r?xzZ|c z6|EXUqTe z79w=M1`|4^WfI(y(oV_mt~~PCpP;j|Az#u)AtTqyJeOd(us|7_gq+Ib+oNK#kmW=l zX}jkL@34;ZIHo9o-^kH*=G?G0i5Cl_7vuyswAa#oEALVBsuPxfIQd)p(t{`c^lCo; zsly?!jCV@;*0a$Uw>TWHHI}=2$^vo&q>!%X0-(>bxBG7Z_3F&{qayU)vRvcN;B(^i zH$k`dRekeBQEcH?7y>}2`y3(GQl-ITVL;YmxPBdJIjSh@%4051QUhZzW)3>sBO#|y zT;iQSB}8O11GAN6XvW z!2$nFBYL5g%STxeWpsTzSD&MAIl6h~&%mo%^H@)rregl^K`szm4n)ylv(O z{8&V2Wx4gFPeQ5z`};$wr_T|a#-qZu+TL=W4W%^)BWk0ZvaXlXqG>eoj9#6JrX$yM zjBcKs>3zH$y9}6GBi3qD*X+5O-2_XmgeV$NYH9}y9xO~H1rZPFdggY@N&`%;5f1vI zyY_XigmLOFx_dv_$9tRkgY4~-V_#uC`U&6DL&xT(0+B@)jbw?o(FiEmpu|dVO;fqt zObx8R&F~t!%o9CxK5BO%l!2#9JuMuu&2;k>nH?b}=# z&QN4LiR}P;Vw(y}2q*cD+Qk_`aqXtq^URyKQz1*Vwq^XZD``RV+3Rh&Fxx{__Ub@# z8XpNr(>;{TWl}Klxc_e@C_^jtOV^9c65IKAstygWYC>zH76=0zdBrYKG{Ma4HS%VWfyEcbiAZsJlzTv&Ipqw zV(|%PT|cof<_Oh25h)}43Wte&TM5xWbXyrZUooI&V#3dLj7kY@%1{=2B+3G?m2C<+ z1Sqf^j}ghEXw>29S#bd%2O(|Jcq}047J?n`H_&es_(|M*4z7SYnCETd>mpqXRhX9e ziiu|d#=2`|*bkVX^=_7oqve|(W~8i(|9jH8fy(2e~Q5Lj_<*uN4j z`{Wzr_W?Lqfmnm(N2Ux+O@ZKcrr)gFQX;ArFYJHlx)rwPJMkpq)?8{w85u-~DBg^~ zx*_G9mAo6E&7K@`AJR-&i|Rng-thwBI3>oHe#sKLZ?0G2>KTyZ>_*Ze-)SR~y4Oyg zmxGwdZjXVcRKfa^Sqn(wSH9_^biYa+Vi0`(jRdh=z@|Bq*{aaV6!+Gqpdi<3njACN z>Rw)NCj>qv6gwS&7$Mr(9VLkA1v>0=U ze7oy#)wbjNKWElg1G$=SJ`(19#Z_X8kh822Qy?1iam7~+<*fah`sA^ zVr7Ih%q@rCL28c|$&bJS4t_DzIqPW|R84FtZ;HAcDLD0u+AdsOFd&pi`V8)<79!f; z+6D_y7KEuQkn!{3I?XGsPnsI8|FM$%{SN5r%8k9~PqKfI!5m)^7lM2q zj{*TibDRLt3GM{w>x0`3IU2de+vW6%9I`8AJ1Y^QHqm#UJtIlMt>=@H_2o$TK{30| z;*qkWO#)gBQ%?Ythap$`L7hU~0%c%&MgTdBa@|xvNnJH@XeGb3@@OP3-9slKv1-mt z_p5ForPPN63RvY#!*Ov3t58cQM+)3(cOGD#rOu?ZiQw3<`It-5iY1};)4r4~_l3j& zaJq9RAxZ;^yB4p!i#Jy$>ew$t2$w2n`_fYQ9hbG0z@}}@Ea%4|8WmrqU6^T!>ae>t zl-gy3)fkG2H!$!t-5j+#v?$raSyj&0oK>`?~n^(q^CP*&DKyU09cu zGk%e;tjTc>%G?Iv=Nk1y0Wv=TrKY0vH=Zju0gha%zDABBevixnhUQrSo+JP01QkWz z?9Y$2Ib=pq4Uqjz+MiW1uKum3{}UVeCOewA|iC5LR!d8A8Jl*K7zG@li` zJmOc^1m#qc`W7&&xHi5U0iB7=n_u5C-}LqiQk=%_tnPDJ_lw$CAMC8hwVl(TOu`wgMs!)0~+1&4Y0_J;1F);>U zm%kenvLrpG@GvAgoz(f_QIkE^s@4IC{ODDc_?ZJ38pzgX0#zd7Vqmrd+pkqO&_aMhWS%Lsp;2J%pS=JE3Edo~ zZuQ&rHLRI{gn)+KeHF|InIt+K$UAvw%b^FXqm4cvawFCb;AJt6;2OMdvf6i>(jU`> zHSVm>%;dsU@0&=tukkh6Ol8D?APQA*LQJ=e#kv(P^%lg>`Tf!|(%=+LjXmV*(SS@^ zf>i58gmY18kkG^@*W=wSa7vG(mD(kM?Ch_vL=+pIy_s`ihEvW>>^}Va^~T}nmKe#Z zab3r;#kmAyZm9_?T0vo{slaD~6avTUZPcKO;}s`V!~*JNHDZ&6%OZ}srj%B;)o=ri zzx%?vg=lj`@T5@P*s>txt&btzY%u2zr7Ly9iF-{Cuh_Gnjk3LKxKyw1QZm?X>l){L zA7Bxa>Z7uIZ`tqyb-E`09%ZycLe9FBIUMrOLsh1}wx+|&f1kM>=%#ev<(hEgj{T&! zaI4~)L-FF+(Zs<@jY;Rl(|F`d*G#XACn;Qy3tys?M>Y30ut%3Hg9+v#0lCo;t@CW~ z_#wWb7gRP14M{yfPhrf2VP4|l=NxmXbt7*kVmX0ALp||ZzsVH*7`|13P9YwK$$A*K z=)03HcDFCbe{9wWfcOhlz0k%$5By!N1<1n!QO;6yw7+>M#ovCQAKFn=SLVXd0+%-D zll65V8j6}0FVNf=6m-AWj03|+YIF%zZZx)6qe6R!zg;#4xoon0>U+bT% z9PcH96bs>$lx^tfIgMv5>y+2EFr@Ht$;P2?HLIo6>YsplZ)ulfCw_0}F0m6EXLI{G zA|0cK_9~8EpJ`oy$m}K_W`axeKV0Asx{v%Trei&w_=}}vxIl1l;2V(Yr9Z+E;H{8R zSVZ~hSiWPW)=_Zg#$dB4?1yAdIh{3k%x5rfl`>o9wF%`HE(W?cG*{n;?c4q0uPEM8 zWrZ{9j{cHBl(To?Z9-bF;_K=9LrPtJ!rG*ryr+QSyKAp#L^Qv<@L$n6hbY@A4Ri^Xi<6-ZC(AxGcVn}Zc&L)e4U<~ z#umK4h0f{mTm||*!R|i=3Af;HNZWxk@I{YhU>vIx(UH)?Vg6#e0Q}3v05f#qq|>DT zbbbfAW8X>F4~q-+_||4Wn`_}sT0*Sdp&=Jrz|J2Dod;bJCxct^_`bjA7*$<5*yw)$ zEmb(VbvmLdLbJms(r3s*Az8vvr`OcSV`UiOfjK;4a&Hj7u}`!+f=p+AfxdUEvZ0cU~&;n-kWTwmab!)mGFV{vR@+ey4sOGi>baOp-29|4lP&$MTXZKx0#TvM& zs{%|fV0(I$cvx8#zmp&v4fetwom%Gz;DK$OWkNYdWyf z)$reLC~gevBH9?`=7=2ss6DN>K-`#}(Xx)+?pWw5mOnBotp4U{jFf1sUT#mzB#P6` z&FDPo1d5g+1-kEUft(+zxICvOJ=((T=eP70j|QI``8HRyk80vP%*fSPh{gi~&{t@b zp};lRN-iX7gkU1dKhI>{`E1LiN1+o|<~So^U;(e}1CkE7872Gn=qpG1aBJb9Lq|+js7G;ZP^Lg{J$G}rTlO$(5tOU$et8HmboQO4 zHMBM9{zra;nX*s#@AAS6-OERTQ?bJV`x6AzX?&fq)QI09#jY~6O(O%b;Z)%Rf`fl9 zv&0l+U)Zn};2W^6TX2b6E19I&kVi3fG^PUhk`6`VK3QitCvH^ace1$F!2Ju=Ov)kV z&75p;nidupf-JjiGr1P{t0^Y@-q`sx!u%CHQP^c^oVR#L;?J5yn;e;WG&&HM&6nBd z$x*iVM3s{%!Q4OCAbnUMdbf>I%E{qrXiIy+z&7F}v$LRK6M+@`60q7KJFCtk%i;W4 ziP7X5;8uv-P3Q~I8dH*7-9gsY7UJe6V_Gz-#(Sm!Mo!57XsQ6z*dMww7aq2@yqSn; zZ_kEyBd0cQRR*F9`9a}m&}Mq?Dy#dI!+(g=+tG)fI~j?9RWC&S;vH)S<-95Jf7K|l zQ<~_co@5m)HZZ+(tYx%)V6DDo@3O!t&>otTgDe|{`8XQk!&BU2g142pv14`x8Z)bM zZm(aSSIBux9m!-K;NvYb|J_9d>;NkswWXuDVEtr-VW_>T7B$x*IBhBCYP!-M;#^BT zb5WIpE5R(p)mJlR*QE6)9p*=|I4DxzaIGqCn4cc@s!H2Xq%?F@kN)$LgopjxutD7q zjcB^#;?NA+pP9(k-{R9u5^Gqf))eaeVmOec?7`BaX!vno3NJh(b;z~N^er!Ys8C? zzXqpmr&q)^qit&0QwN|Vf4$CL>#y~qX$`BR1q(j^H{Jrst&~`{9fhigJ$E~mb{D&L z@JPCrSXJ1&3LLUk!~>_#WH;Ah5-K(k6esNaR+TeQAVswqmx2az?^~p-zA}`8?}V6J z_4fNhv%Or2Mx#*+bN_*OxmIzl1JRrmw_2AUhL+Gv!V9MIT%@4lS2pqS2Ax<+tvr$a zFH-?E?f*2Dd{B+b&jtCG=dtadU{{9m*oRO5QcZu&1!YefINdNL&1jaPVc^zDH8Vk! zzaY0#swEY3pV8umBJ+-ufhP2$iQ5*rrpch#&Gx&}C94$=>!EM$5wGr**@@2+nUz9< z)W=-X>q)S&q6l2&hOy5JPkM*KdJ|XS6aPApUqnDP1CyEeI>W|K>vgeO_7JO7 zh%ef@qNnnm=A|#yVFawG5<`b@oj0SIIyjEysewg1-akymaOHh=TfIu)>Xcah!o%l6 zYtG*HxtYBO2**bZM!YH=EL=qP&7aI|eRgjkfffSWy|yp)D0KHXSpM=XC;p4e1VxK| zA`{^?JIpZX&C4_=8L?3{)HcaY$4+x`44FY6Nu{l}a$d~cTH(x1^S_50xGdPMe7LBW8dM@78l8U*BEF^L{TPoL*}oiI`vR}If!ED4mcHvF9zRKejDXX?as!eqVD)4|D`FyWe#jD(1M-~Y|Fb2FfyoXDz+#{y zNlcQRlfh^qWi&KZ=&6f;8a$Wgb3~=5q7B1+hm{ zO<#N5+MpjQvfG)OPPL4Cbs6U&hqVolwUDpMY&ke57cHsUZ)rsuPF3T#Uupc#10ZxG zm4m?+xRj{6`F0v>n8K01lm%t=f5)h4ulyBSJ*b(EB4Q@ zu--AEOX}_ZD(YIXq5hWwDx2DEIx>NzB`;hAR1x`Z`gt195x>7VMOiAv`rt1smjAaE z1Ma!$6ftHHJ`evD$#eT}5K&hdl{VgaG|8 zzy$4Ki&)6LF7%s7dSjr9jov;z`0*ylpqN4D8vXP-auF)<*>4#>T2{7)L{aE|zi9mx zIVAn#q5`S$=kORweY$;M2HdXhhka-z`%j(ZT9hzGv&(jgpO*A+)4wQ7jU3GWnKxrP z{(qM@E1;3GEiqg$%-pQab_}mZ@+`%k85DCEp48D4o*Q_p+MbtPKRO!(>gWh#G7G(I zGYgM133cqjYi?p4o#J!<*>|dtHoa0IyTZG+c1}`vy%oIC^@=i#WPAZ`=GCD<27?K8 zE0u>ih#z-cH)<(W==^qJr+}J^mW8oud`mkdiHr&F<0>^QVC*zx%*KGzuiS7GcjOk5 zFkrY!k1d*%q1cfEx}!nhUznJ!tdE(z6vhGmugBixTC)JWW8bd%ypxzJ`~M{jPaL&3 zUw3fR)V)^Id*!D{u~zg+fY*cwX0}GYDHB17{ke-kXF3#>Emz(}akv5qq)KXcT0f;# z|HZt6O<)1{s+x$AEXh(`W4vVW?Rb6s+p!0^`pk5bfv+BLy`h+Z_^sdIvTzY(ck77X zG}uFJSW#A?7~E}`J=iUGGU31=J*n^8ve6-QF~zmoj^buB;L6w$r>0Zfb^!wQ#!}t$ zqh9+CGLQ%WQ6v6@sOSElgQzi`09?P+q;yoe676`cMMK)C$+y>5fI_oAHu0ddgG2$N+@F>1G{T!wszRuD6cy59$Tn%~geV7$h<2w5yaPj~Ng!s)Hm ztF&&nTpJbrod%7O-G6PDo|hEQ!nTO4rlk~jlnyQLoz?t}MkD(VBKIgoJ^|yP z2gu1BofZl?YVsHR@B&Hh|NhGVt{8=P!vEUX|7+(ZwDdQfPsZ_-W~{%d4^Vrd2R>`U z8WM+6?2>uO-JiZoXz(6L@$YKPDUt3!ta&Jv&~F%^SD3J!?HLDJIYw!Ptr%r+mdEquQoaI4gG<-PlCoWevi> za0bvb$jBW+p{A9*YiEJ*L-9D}%a`$$Eci(sr=={t`7<{^rh=i^KrUSVIRzexR&zC+ z!t@OM1lSjM%018phiBx#qkB&vaS>}6ct70Rvaa^oT!MM%b9 zkHKmN-00V~iFcH*>;cg7yhG(2**I@=~))#|F%v4|M*3r66DzzU2QT~D0!Lx(uDa!;}7xw?6s(< z9YvI>Y?9-=?)>*X^Z&<3zx+Jb{y%%IV<&pez&HvCzJ)*j%QI8556V_X_FFc0kBL)Hn*V%4iw#?n)!PgJPL5I?+)aKOY6#Vm$h$l1BH=i!PR<00@^1q|0 zxI3#H!p%}P-P-UO(`%8;`)U*_hv`Lihnp4Hwa_90*mqQd4tgf`f#BkZx=p=+6k+<<}r8U zuwAX*lhs_@n4r4*d$d8?jH1T87`I7*qXR6N@$hrYJlDEq#g~ZO$keGB zFy;VBRN9uvecD#`)T*IQ-rwb_$PJh84#pKuQ45WaOw6w0Mk_aF5utx;Zdp0tbm!$g zG>>kq#K~ppRGixA5N(k4&rS7wE0j*WfBE%#f8X2J(E@XtaeOEq4N-@cZndo~83Lo) zVT&;J7HOB@+f&Kuw&(Eesbg?+MS9btkJm*yn9%QpjBm>AzDc!IjVRDB&J)@VsPU>V zk|%CHs6h}U)8)YuO}%R@n!vO%82T~>V?JjmOXjy77{eZN9~zX`{?Jl5w)*OaI65YN zl6dZ&5@|bAC*49?!GSm>3_dwA$PG{G~^XkD+ZZ=nrB{C8wUeZ+%>P5*xFNl>dEJ#o z<*4$Sr|9&qIEaL_9oqadZFk-5>lAI|=t!wU5z!>3T1ViP$O-RubB-Fl+;`i#;H;}v zx){SBTjQu&?(GRfgI#ikN&>oMjaOLjOPLN9g0^W>*QwspE;KGn zjEy)#sE8^Kf&tl6M=+u`-{Nm95>KlasJ-Ue`GM}>y@Z>Q zp{DH8Rhef~qPw8r?W*OZ{QCzDb9o=4s!zBzloiXb_cQ0Z^l*pSVso;)E1`4uP+UjJ z&ryXnZ;g+=bxY7RtV(!7NBiMmcf$l|dDb$YcBg{qux&-ex3kcBpFl4#^w#ou*SUcNm~W-=-wmS*oh1)EVIQh?GB0`d zCUtCTI?;AW2*zXE@!_p8FLJg2F%^bO>2{8K8ZYhg5P}+amhlId!zwsOUzn;r`l@cO zrmdGcZJ9U@lqQ)4v^9U`v8&PL1~V;FPniVojWB(5To*^b+cqr!2xc|b6nGDEv68C6 zV3+=p!T1@;=j*A<`cHqQ@~1o79zVvg7&PO0bGyQWGg8j2&RFF<*9v&Q?_=&YF;5f} zLnA~T`CuvN<8s+xrsbSw|31PN>nl^x`RQVyYZ({O z^PZ;I95y~rs+q4ZPhSgM6`w;Z(cPLqaYL5A@Lvx5zqhe>dSuILXRGr(a?pE{eR>lo z{BjS&QHiZ?ljA(jJX0g{=G2WA-CXneTDE{61@84W?j{|ZjASiOL;G8OXL;Ai3NhC` zmnG@7ugb9rE%H}}%)M+c!_J*xwfw1O*O?4D2)TSe-dm<>sKv?V$fI5F?K4vATIW>? zhKQzp;~Q=_?6Wt{T&)!~Ri&F~2z)7rO|*pW4phyC7bz%^XT!eY(U8`hrD}1Pc8`A# zR8{q7tTvyO{Eh!m?j_HIVRc=3>l5%pTum=_0)z87D^>U9jryTyxPAK%oond#TLtrS zw$ZH&)XYf878+L7!JnHt-KGAl!1pi$4B9DHP|dEQQqh{x8U9Jk3V;(7@DZGzhZP*N z%?n&wzZ`r)Z@Ksa=^L3DMFca_OpN07Dqs9`n+?w{HE8KuH~S;-<2~++3s1@05E@TZ zOuURMs{I-VlOrybLUM`z+w`|iNrUEf`?$yA7wLnFkxLxMw22k26FQno3JK<&CY+k! zQZAMuWokXejnW#7l?T`P2J?-S+QwVk(^rg+d@~!GgNw$3w%UH(_U_+VkAYT#ksB@5 zMBD9^-Ot^D#ML+_eZfajUil9r?3mg|;su8XPJARsBB-7AKss^zZlNKwIV1+r%>|C= zF=3^Hoy>)xt>ymrXXiR({h`)W`Jo`4@c@_pGA#y_81Ah5?c66m>qT)19+TA}?O5aHm zGOzrWuX-F$+QW+S8&;O<8+JSr*+}gRUinpYwAK`J6H~tsz%^88?5+`sgtg$T^Lb)CC@@rp)=}DloMhD@#x*!hVDD!`m0RIpcL3eV@ zt|M8$sCL71ff)(iQ-m8KMp+SF=yc@ZVZMhZLk+PgJ(P5huHWA7FkGYepjYtz{#i(E zskyUEu}3iEqd%j%DH`sm!NKa@Fjr)?*C}&zU~`F8qP}JG;a-pvcfPKs#{O@DqWuKK z*!DCkv_$O$*9bMYfP^cXL2ElKWQL5K+>;yRYzYD5-H7#Bx*Y35#z76bJ`KOG=AZ(4Y2pw-GrFf)32h=Mz)yd z`qCyT1i!Da+%Kb3BK6l2Yf%t9iGKeaR(S~8TdFj;un?ADQMPBvaZ#k#F0{R7do)wx`jSue^QngfIrGBK$2gYuX2pSZ9d~tHa6r-Cp}>M8hUkfzRqm{D;Ka) zjyko~y){b?DdY*`9R|=0qIN^fd|TYucoX$t86;^}NTK+#g3Ru0oI>m0hwn#c*rTXa zXn%XXs6;qkxrIKle~&y<`t_N?H|)3848NZ^bx-}W!7LGT^g8iz3irHf+w0v}dFzn= zsFkB%tv@D%_jv+Q82dwUrJG!FOM4(du3d}`UMBKu!+y>#OL3XW$J`iZNG{O-0O@P@!d+# z-W8KBkB%=G3}|0!;UA$n-JU!Zr!3QT<91N2$yV~I_PVh-i(->9k6CQC;2rGE+j5__ z5}y}7$adPgOLUxACLBZ*hY#i@;p>Tyr-p;VI!?8#$~BzvJo{lF@~!?$*MN;ArnJ%p zX;+S(g&)C({BU;+qE8m}IPTH$R>_(#QM!=fw=8pF&(zTt#4rYh&-JL|;itO2Owy+> zUQ)kUW`R@fT(sa%e%fX%aWiN(&cKo${w=3#EMQbSTZO0K%Ndi%e&_!2GPJt|e9-^m zrFZmjCqc{!q)MlEs~EN?psRn%K8xPnoYXwO}lP)^GwB*Aq$Mc=dlpoz&hogjf=?BtDt@97*Bu}?&i9X zdtGLX{(bJlkDM7W8#@z1;zDnr9rXulTa~Dg?unHj^TK1)w&l1zmp`&Q#w?kxV=@;M zLO+x2iK$*GMz)(?TRE$cQG0U2qC)rU)GD^i^wY=$g`^8UPK zt@8NMdg}eRhS3g|T!OnwDdo_?n{l|Pp0($`FlE$8~e&N%B#I5iVoH*vqIDb;G zWj#8kPJ!eTY+ssYA@m6wG_h&uyFp}e(y4k<_9Mq#$~3s;q4C*!aMS%rf0SZV(U3)s zKJ?&lGB{QWC)>O-v$7xjq^tusZ`@J7EV_9x;f@sS#TJo1VFp&VmliU1@BXgEHo?{w zlA!)BFw1Xg82@z8%>EuHi1W6NSbfJ7lh+)tx}Sx;+?E{e04;Z$6vy`_Gce-iZh_G9 z?DW-~j)rpl`%28+fhn1r)_8Z-fa{W1cJL*cm(VasDGaDMo7Hu0^$@!nr zO64;badV$Wn}%*W+>U}t)WL74nRD(DElHnnuO+G9yQR#`W*`rW&$cB<6QQ3HTYo}n zbLfQ(ZhOyi%Pn2E6-T9Bo>z6`f4t2uH)gVYU3v^n|;W`cy>X^DD$v`k}2F0iEi1 zi;K^Ar&@Fsb#v59?e|&5^Nr4Hq1+PB-&Ln7u=l>%WhsxRah{bi*NV!@Y^`va=vfk6 zk;VAU887`rMIl*?mKOuz#fwi>noPNdnS^J|#HXiD<5AX80&}7>T=8nE`OafG*^6Hi z_e63&c$fFu!&onxxQxrqWEZf6sjK$C{<$Itf`X@NBG^a<-M}V@|>r{;B`eM1gk4` z+}u{xWQ6|J5-rs)tHYn*@mz!Ui;Ewihg(&NqF+L74p~TwO-Lp9o(+ypUHfyQlB@?z zak;kAe|P#Wk6f-k^kT*BD{|gf@n}9I(@u2m{kUC`EtGwR{Py=Uk6=b;(*W&7e2?iO#8Oi@sVU=E6iJM z?0`2zJD1m+k2bkH*Ds2(xM7FQ(~uPQ%5zP!jd!x(%evpIC+J=ix2~=iOB2x z;`V5o+1wAGbeS3yx095`pcjlXl+Uv4oj=?>lzy)eD5do-)aKIOWE_ndy^E~~qpqfeE2O4i&5r}0UnpQN*&X|&{U@`h82#IQy#|M^G!CvWg> z_`myjERt0Zj@RM-8nF#}g8YTSZReDo+jsHwZ9(Ytsl(-<1_z<2yo-&AAmEZmYYC-; zl&VG6a##@pdxF>MAPq-yvm|nlyQvNq8mnKPS4y|)w7K&9@fg&jk6_!|`c>)X!@;_& z&`r1q9jfkr1bt`5UmV5wL^IbjDdDCc{6BWlN(;%J96Sj92%d#iMGu zN2rN_Q|~!a3{R-U($GXci}B88z+Ml!%N3xZ-ojgpyT`=u z*t%U>%RY7&JLvvi4!_2}^<(bHhY~ls(A$D1`~%CIh=WwK$-XscBAA|1k?o_Jkm*yRJ@K^$ zO&A>GdeEvVT~}7PWyd5L>n3e3YO9t|{(@RnelIIA@80pb@!QDlZii!$AeSmhXK+nG z@m=E6h+SxjWDq}@7yPBnOahXxSL|C^1{aiF&PG9edx=ESG2#y-WP+FVL%~B;J#pQ- zh*K{|_*J9(?x~uWbqw$F3pb=n>55eEY$%Iyr0E^67D&3>PfkcxHq5HrUKW~HJuY$> zQ!A=jK8t0>c?lT6`DEV~%H=gRhCqyms$BOq{hJ0h%x(YK3qYQ26Kj&w9zXlo= z2C@4dXkJLX*pQ^de*Dm6WqYATnaBl))N7CI`lL6{_4n*Yo8BX4+hUj;z8{INEbxtn zbRDhDXmxQp7FNDpT==N8Sgzj@p<$`hta~4{&`rfZ$eXYq=2HWwRvqW2l9cuYHfnNy zam-ZK5)waf+Bux`_09Gb*6URrC#ILUYKQrSRjv9P_sM9Xuc4V;@Io)%BqBj5p>CDK zAy}z&?5U`|#{ShCqi#1RuZdr+AE3iRB;=MBT4>PG?+548s`_$a!B3)o6wcxL52k7E zy)^17$IW}~dkOA0#qb%A`LdWB$kcr+NVq!%YYpMCtNo?T_iwIzJ@zp! zL^0*l3LLF_SR@v>9n^ysz%f1e@3FLhGlNqMw^)xw%Hmw^9VKzyhC^k!D!L!R3A$MM z0a?q6Zc{kH5Gy|@3#;gMh7(M&@Xiw8bYEr2%3i~h~iTaqvWTAgjV<`m&> zZW&r6K@j?|qgnT-ODg1-ZD=W&efY7#=)1ZX({&;kQVOz|!w39J!GgL7-wZ%q;MUol zUW8NIZklvDc&2;SdZ=xG3oV6d>D@5Icu@M{O@SGQ}vu%$&RdJ#D(0A^V89onP6bRczdEVk*Vd=2hn{nV5 z-beYR!yIs%3klZ)H5+kZ36?}Af(u_J`T>?yJ;lNxKLpPSpqtTf9I+f9Itu-1w**=L zW^b@z_DYcmCh_WL1d&gPtM}CSDEg0Um!?+uO|$&wl*JAYDln#j{#`y3)N?D5g2gB? z?GS+GY`cB@J41i>{5i+zFAaB{TrygQ91AmAq8)-_S}r&VeIga*Bf?m1z97O`X>3}# zoX)-6w^Ykj`UI$vov)-P{T6#KHR;Q=5pP31NmSRv&eNQAgbdJz22Rn<_E^QZjzLbe zH^+VYYUMkI%Rbdmqgx-!7oa{&H`YVVJhAY)B3<0_j24?#hpxP~d=J%~U>OOj4wPM> zqlzJ+Ke+JJjph4#1wIy2dv;w(_!?A+a%;)+ljz%LT>dz@`eiGSOZ@@jqiu)b$?B%IPu z4UM1+19!i^g56VSY2bl;?G2UX44FLx6>}a_pJ3@}G$7V^2Op@$$VaxxKPC#)0;ghn+Uj8n)MW zIthBk%@RWQKcVwO{LehvOTt3K^9NnnuP6Vok302Z58IC`nVM{TbLZx_y&R)skxQTO zG&Pa*Wjcb-*B|UhmaoPX^gGb|S)8I}aSQ&cd9jQB0o}34(QY!>P;~dG@hj~8x`_GU z;J&%U;QlA*@C;Px@b(5k{I^$pEHk>nv{Q~AJk`~|iJdPz05wxB-|M5RH%~HEojI6t z%&DU351i=&dzh4$dwfo!T|#xTOx{*YRJ)zP<2Ot_dY5VCH*@bDXWrT=QLe1pKewS5EOh!tbjV&)fhaF_P1HQ$?KCZz}N#k4m6s ztOHf%_RH-WoUO^}8OgwPpT5yT@XuguJ7;x{4M$jJ=E)6SABb!O zWkkOn&K!fNU0rl)ZkG&|-P%SwfOCk_owNF!FZ^Wnt7qL8+Ss~~W(mBMX)NlkJDagL5=8Ae5K*w zR41w(SCULD(x}rkU^BJ!fHN#^FV3J(82SBrA5zCsc^_VfQb{! zK4%Ws=mN4{e%~jn>|PGaduYyLXhBo;winEFXv@D(xyeswSOUY^e?f5frTJEBF><$P z;T?$n!v~fcwvEO}u}TWG=STbie?nck=hm7s-)YKx%VLbl^@AK-TBsThDC9s*f1y)1 z@>7l`zt?iLdQ#aq1#vFYYT1G0WOg6_N@;EM@F(1~K}_2sh8Uxk8kr#ok+X)sI;q;s z`a}e|8H_06cd-mOW2KHI`C%(@p-PG#N=>=x23LE3B^OelrKJZ83T@HB4*bV!k ztq%|`HdlrJ!`^!aHMz$7q81cE0UM%J6$R-Xq+97lL3-~sf^-N)79uK5dQBoV^xnH5 zAT>ZB)Bw@~gn*O~Nk}*^YtKG=&Yg4jTKD6f+21k)fp^}g{r|uA2%NvotqEn8QM0J2 za|#BwlEXF!%Z_}lut<5riXpw__~p^Kf#c-E0xg-^s*oR@+x7FSjg&6*NQdN!zl5do zd5I-WtDR`AcPxD2ni-|K{!O}u?vW%m&y)$gpOWR3%X7OHAtk_F@Z^z@nbcXcW<2d5 z=ihEE8xvCVG_2)0c0Fpo2ir`K^LHdf7i@)0fb_|T{b}9h7x)V!648!};xzv~*>;wy6VOXt@l{nTqrAU^ zix~IY7jpIScbx6lD_br!39Ax!`eqH_f_{hI*iqRUdb;_M_mzILOM)r-K%<+u#PH3t zo^vJu7>y#x2`iJu*zmG{3AJ#rG{}CD8Rq94^iS8xO1 z^p7JH2&m_XPvUDEgS+xm6R$eg0=aYmkN$_g2KdM}i%ieF>?dIE8}2C$0~6qnI6MWfb9^#!rPx6GT~gdpAU zGd+iF%~a58pw_*C^Lf!`-gN(!In3_lk#8ZJ7vm|icAd^rLnz4NXWoxNH6cJriRK`^ zrX26TXQX068BCG4KlRdIP=%*lyPYCva)&6mF+R4P80Fh{B3=9xx*Ht4y>JrN?2jV; zx+vVlH?~tKV{KovQEF1Ry)HN`^G^z(V~bOrXu!?wkj1zW4u2d+{tYANyYY%n71<%S z&*)b6W1DUXSZ*{jnbXv0!&k3dj&A}kN@du? z25GkjEC5QPG2}-Z`S-7d)&Lcj++<;Bw&KaLdalZDKQxdoQ$9ZT>~ky5yONK0;(r2H z)j-@QblXz5onZ5@twu!D@!{-FmN#g&jT1NxcD3=TQD=G-$RDYO<%8BTM||>?)_$nM zsE1P?I*fkOg0pCkoykb3x_N2*TJaE|%b`u2Z0mu!x(7sFTa z?HsYUzSj7lFG{6ULrbnr98eXK{l9s0iM_8}=%7ANcpkQslLrW!q+XdTiXHRZd1^rM zd@-vf{$2z)fkw-?jf4N=1bUnvv4(vVBqLLH0?s<*^idNccc1o4j_}|G8TZBa=r=BD zQND$zdprj9?Rz_gIN&+|LQ;qG{ip&gX07-;kpfQTG0Ey8zO|Vgxc=Yob)n08r(@Jq zZS~Z65#p-?u4@dE}*^wh5JE(-h$k*Ho1DTmMv% z9ar(BbDZ8XFy#10M`Kq$4MfD{?YB((2b}L;pRYYuRKgwto3L zLZ5^AL8ggRw&up4qj9Ius!`F-bJo;!ceWdY>FkkbIyhC?$~HC&b>;V&c$lW!Y%o@O z&EB(zPlAjw1LbXu#ukQ_l^2cwgx!q?R&CBlB8)qzK0ZnO5i&#pjan@COSmXO!j1&< z^S=aJczxjYmAQ5PHhd_R&HYtM#YUmTZ(Ze!`_0`f9go-YCg)6tn?im(E>)Ds|1fbQ z_Yf0`k_I$DXV24po9I|~f15$MA}zL^zXtCJ$a+947N;BR2!vCY5`phvZ?(NI7JZT zS&P~8h*ik6!UhnJaxAWKKv+hUInrs!DOX;`rgej~5<=R(hp=EG25I+ebRE{cW_$pppw zGBVI+b2G*GGd-t0dqFv=)c2(quZ=HlMiX&b#0LGc#G9^YU;A?A&r4Cvj{5*z@3*P9 zQUZ*LZlnco!d7}ew=*5b=ksQ=>6N%F-^y3O5pSHN+z`>l^Y2_l-hO*akQ zpvouoNYr|tphV}rKMg>N6_%Vi+-CulV#;dpea+QeT^+pV5{^f~f7zg82N5IIA| zKmG#FFmX3~a_CWrOi_kn>Wt4mweY4&>Y)NbN7;IxdffS{q>-tUmC%( zNLqWn2$Sp5X#p1Y-L6a)=z{287lUFzQSia8!?W36uq87FHKmaxxQK}4F$_c0SmAlY z)pIRQC19OoEh%j4u&-WzWsZT|br}cF&a*wZ<222I{fW|T&%6dqy4JVku1mg(->^)z z4W0K?2PjLO_9@EZmSeDkSkq(BI4;H9A88@ZvlCBw{qf|Oh;{&4(Q^v}b97_Z(!4ia zLdM#DTEH$oLuASe)wE4}GQBux{G5E?*>Z{?csrAhVWFcj+#LU~IowcPbfgltAL+O) z()R@0X}n1cIXRxaCbC)@)FgfnggQJa{u=1-Y2uxWIZ3s=D)RIOl9BbCeb-yQqB=t>A_W1WF2^lD{ z`}=(rmoCpregEXhZ^_o*@y{a>#aH0|joRa;kVtN!^wL{n`+CL3MzoClmJ`kHyOW_Z zwR0{)iUMr1H4}Q=X&pD57AhkFVER-yE-#A;*gFWe8`2vLfXd4bDuwUaigZoFm2g&;%Dgnsf z`JG?0ZY_5WOoI!}ei)wu5!Oo0`zsqtltBO0?8}IRY(Nb*-{Sv+s`w{V9&86_gsLn@ zuVano>Rnnhg9FMsDl0`b7IVgnCdCiZ29o&TiDC$)xclKNK!wvCHRT9_8zA0_T2GPn zYweBQ3UzWSQ=Ova;iJJNR%96uEj+LFxlgMD<@OiLEExXm?eC5?Sji(Q?VCJAa-Ib2 zZH|}StA=autxU5Rc&+o>f6xabjeXiT0{r)Ot)Oyf;T(&;qKSFmwOwV7M3ZLlf;Oq+ z1+mnS9ARcrvTDQ0{uL_U6!7IHum0<{+5J{-ZK=I9hbJ($8&^5ilLeV>M6o=|xiiD$ z2wT+ptk4|b>KwxE2q8A+?z}XV0VHI(rqg44z&%R?ebeQ3*_;BN+W02dmXS*jn<)qALkSdl z9|Nx;&r{#XMe9lVeQ|29In@aIFsU#E!J6fFOBQ^TIYc;VB5qCWoPk4c7;~k z-yLAK;!~N!2X2U0{J7}Cu3D0uIz?oGV~}ILj0Ud-#Lf1=9uh z_^lLX%$vB~oi<*3FQqgG;!QHUar;{kkA6vkH$!bZY9|M#MS0eiDT&mDBxmfdQUN;A ze}WzF?zJ8XXzBJu-6@&MkK&?#X_G0=-fg}~%nDgDJ`p<9A~Yt1%(iMzY8L<^q$Ym- zsv4?t{HRbC7F~)UEEM9wjQ_9WI`hr;S?_b>3O4qIkr0B5R zptHOp|5->k>{q^4u;SGZ%)rT~?~pIRC`MOTzIGys;5FBfEHzu%Qh)G%RD)5FaLBgB z$^x>RAMAU5{`SzLr>JU|{HiOgP|jYbDwhG?zW9fpmKH5{y44Ms#FDIAx^t3&1IYO) zz=6@&2FY!TjQYsof4Fa;@J$`tFTy&O=q%sZh)D$8{V6!|{%h1}59Rz8xUw_#+PUY> zR}db+cRwTY10UMI>G<6#;xStbdbn)PapO^3iRAM_$$L|F^GbuZ{)nC&PBs%L3J8vXLclbmIqG5}3N$r%6n2ZCKp=KK zr~ZldJ*=pTpK!?43vs0uZYmeb^QLpQc5EU-Ww>t2Dz^TADnf|51VUtYfq~K!2sEtKrYr+$zoT7!$Dgct~=MA(KP1;c4U+3 z{sy$8Rfp1Z11S|@q^*>Acx zXYfra5Bs6Zg$k}}=3*X6@TgX+a}VxYAu&zZNpA>BLVt|Mo!q-#r;wnWKFqHtP_tdJ z-`$VoVJ54RZr$y30*r;*UVFgeqt4-aH+vvb~Zs@@1 zAo(*?i9fuD0VZQnVa`~J6QVS@Zt;#SL?`*&HL-r%ai_`z5w;TaVukPB+WfC|h^(L% z8bi9;k$vnz4#v#S$TDQAxp^aFD2`t`@?t*`W&)v=D}kw&^5o)YboTc<>O?s|ebjQ> zX;Xj{&1>yy-2gorK6S+$@Fg~a1bTQwoTohYUk6@`OFU* z0Cv9OmR(CGX6)n}#%h7f`l#mf4N*ZMcv+f%L?Jpa{GUp$p|Po_Vm5X!Y-olSxwt4e zA-=q?{8b)=*U|u2Cl@>qxhbKm z8^P|R@~#n2iH3asEd;BMDxYV6Vjkf z6I$nsWlWJ0OZ4>k%&ij^X7?4Jnn{}rWT1UYP8mF-L<+PwHsJf&K{lP9{H7ihGvxRe zkIVyr(teAb-3h)eo%15;m0R-=vxbLLMlq*-lPOlZHda1!wP?bH)84jE1%QQ~BMNt5 z7jKmsHgeSj;Hb+azgt`FWTVaLcttp)KC-(xaEaaK5jb}nFC%`pPot38AGo`ivvqsZ z?ZU~1Z1(r;^i#&sQqF1bjSK8VafUi=W8(bkM9rI{^lL#{Te(k%>uLpc#ooV0#2Le= zeZby@L9G|FT|#hgn-6t2z4irKAxi)^GDDrtW5-!ZWFlXY%waUK@Yu5HOtwPQ-3b*h zDZoP;%6rWxT^?uJDlHxSaSZYwyQ= z8ouEX}!jH1!WoiZkb~Io$24xne1omH+jS-p}9FHSLkUh~i@Z zj5o{bq(w;5=0s7d)5l^}z&#Kjd{$;?6A;$Cgg-*@B<2h$zx~VdM-;o`Ov85#M7ohP zs(@Fb$e=bxeMo70h7931X;ag9m76H2_snbaEree3PG1x|*Qw0^3nL$|jG3+TL*(dZ z^9U3~Oqrg?BtJLPqo3!UUU_f&6D<+854vr(pO>nv@aG&zhK3^FGpkN5T6Po=y2(r} zSx%idS;z&A;7q$HduOr?xwh$9y7RpbB>TX38=2>x^_Nb;tFwisl`SiSX`B<<#faYY zTC82~Fgjb;gGoR~MJ-cLuFL`bqGo_dw~*>oF=-4MhAEZO6C%K&M8Q(i+Me`mbkqSKDZ@Jyo0W^N!gmMM;zcUlseRSqZ(g|73qn z=@?CYmVQQQlFpJb^nl+|>GO3lViNo_U^WssPdq&Q+Y$Qf9J4D(N$p^3{O-C1!7UE( z;RWnX|G)av|IZ(2i35y@c8%;X{{B_?`xu2c^{>T@+N^6mKg1Zs%KQE z-dO6vgRj)|tY7POC0Pi{O{P!1yeX#EgI+;Q+IP_{<1ogu} zR_cE+iT&pz^yUD}VehipYWmg3S3(od-KJ(}F(BeMyl+c*UOLwc>-a?_T<7SM6$K88 zvK>gq|2hBb%fIuZhjv}P`+iq=0flRJoX z87sej#SV}-v=S|Xk0k8bWJ+t=6aho=7o!-+KlKd%Ic!{~!{*c5Xik-VX)sm7z#d_A z*9BFQPzxAZhgspO)1rB~>KKLWQbY zrgy$=e`25KPk>M4Jhd=z3%Q{9p&;;b1V?bop4wZ5rsZMR1(^cNhKv}y~ zVO)x6Rv`HFCvL2C?Az|hqEb#Mq;kLmmx2)m?C$)3W8rK=(c_yOF>4Y(JhyAsU6!4c z19%fu+9A*xO_P6hfG#Qm2ZR50otskt^k6#Elrv~jF~tkskA|#ljK)6h6|kr@283f< zuGs#`%Wd&MS$eR-)~7Ta_;4k~BG|j=w*015gQTPzR>aSheyu$iIsp*>E z+EJy4+roF$er{-qTN^QRyvRsC>u{pLGxYDk4UKXr+V)2U34&X8T20V~tElf&nvn$A zNmb|^ytz7NGr;+PK^cE;NzUK+p!S{QL-fta`UNd1y8L_p>g-#*_O~nEhs8q3>|WT% zD}+p8!baoDDw7g;Z>+1Q;#y9gcKbbOeu<%|E)c!qvYPkum8LV*BMFyjb3g%*ym)N zN8=eC*Za6LJs;Rp-|%MM3Ig#;0Pc;?@CB{v9{2nIq@?@1q*XBlc-IeoI+*tWmU1^% ze%JL|Rrh||SHKZXy%%fW)A7B__GcKUyyl;koci49mzpIlHJ6H10UbsYU50dy$_!Jr z+hl+=nkvTm3-IWhDPp-!`*nxEPasntd*u%jG0C}haeP$<@(=k2N04J30 z0ihqbA+O50id_~SlMPe2fDCSXhSBpBkFMJryI;U1aqhMSc;C#0)`|4Yi?}X=P_TFF zVeBX%l$ox|pv1=yL$Uh931fL`QyHi#fY-7`5WuD0kTN2Cr+s(JVR@Zq4!~LVmZqdK zFShKB>3R(589bidEn#&!vkZhBZ~}aHm-IxncJ$XWDO0Q~91es8B!oyU1+kB1mupfc z^0)8Az|SZtNa+x!ze)b{sD|1+2gWqV9oDI|Tb+@GWZs!-d3KtgAj9J50-IK0!onyfL}U3L?iy|ApYM64@rnD9Ei@{0UfcNF}zOzCUx1uZYwC zQ_2QF83H*fmn`LI>nj22-GF*|%N_h6GC%kM9d`0_lVYupFOLZigk z2L)>Ye^NCJ;nB@cg=_LWQN_Na*6S(Z?;jqg{b00*_ZU4RCCdh=Qvw!9v{O5G4RT z%Y&aKUU3eI1I`D66-_xuSuykx)L~0SLF#}RPaJEq(6}9U{@-lTmw*jPgz90Mk&Ls& z;haN&);oY*SRwD;fV8|dD^Gw!Z*RM5!=#nxnF*l)br?jNyXSCIV5x5w*w7DHet+XG z$E~Iq=TJGn4~sc|Y_}{IY4G54* znYhGrRyB*jaB{Wo+*xPM;X}J^^KaJMUp5iu8 z50kpLDcsON^8<7}86lm#*-W(W`@}UKFRj(NtIme9Vg4m=3KQWr-ASFYr zl*&vS91HPV8}e>*70@)%R@fV1PHQD~Scb!)!156u;KvZ@T=+7xeb$Wyf)o79>7-8$6UFe;Z|?R=>#>wvNo0~8;oAt->P#w zGM8`e`(#C!jrb@i@y8>y&~u|IX$-z_6*E2Ac^Xjxu+h@JXdr*ue%3|QWf)*nC*D;B zt~z7WCObD@<$1hpoU1t*Qy27!(e`?I{=ITcKu^GMoA?f#tCf35(E0vqf6^_8&n%>E zp;2pZMF9;kS6Hw892m2WyclqJSL|&$td+^SG2%VBabfI!zh84>J1;YvU|1!H(nn)a zf1K-I#=4AGlpQpZeEQ;+#o;ZjOFJBZ&(`p*$c~*M5zutL#KNRP;*h@U)diTx@BqdQ zJ7PzRzlUr@&+Liwg1ay5Dc7Rt66KEtF2ZV|XklNd#d<;nNVCEw?lw>kkceTid@?*(VPn02#@8HO2{A=ymF_E_{~Y*s z4m=6cXAz(B0y3hKA;C}=a{2))@d_3b%whu(bD-ygo%|bEZ6)L0fM+Nv?^(j8|E+4Epu`1q=HoOseR`$z$>I8)TY%iN+`FfmBQyz$KW?Ski}P`gWHj z=4hQQ@Z99DvVr5QCof<&1ZgsMCIU-f)!aRJ7tlg(%p#Np^$Ln$C-=C1qtX1Rf0Jmb zQh&~e^;=JlBjCNcFW_CXvisnTQeY+r0guGev zm#nw{I&kAwL>(vUW_A-OCL!!_?iFdsGc zSQXk>+35eU>!t$a@fiCD;8qc3d|d9mJUKZ5K?f8Uz-(>`4)XOUml85tHYbLYUwXog zXXOLtIc0S86CQboN96^bnsHRVao+n#3h~uv&v#!N_#SYFzoP_ zvh_zPN0F?|JlgwP;0ZFS`Gqov2G53yo2PvsFPDm%hHy0ePl*%EkTZNbjVo1|ma*6o z$gz?#_t%%Haa-=&cdG#c0+?}QFb)uSV$4XV&Ge27p~1IjKyPyXW@!Ppg|g&qOYmoh zajhGsoHEX~K%jwVV88(<^m*BiM{&y@aTeNA5nUnqA*p)*Z%vOSa=u2XD{wuOGJH5V zy`KvNe?V}4`b&30-_0JfdDs^^@4n%NX$K&fcpb$49U~)fZ6_SUwE6UH2 z?CRxR3ptZfWgH4Kxy^yGZ>2uPd@J4nsQs_EN@W^+2ev`QrEyfa!HT*j(rQKUtb*js z*vXv~<1)ZuT%%B&1DS*g97agNI{5mfRr@y}ZP&@17uOC}rN+}IxtnjPEr{`hPTV4}MDFav=Sf$kJXfKRL z!|ZsL!Q`QO@}T}5NX#2-9o!bBL3at+A*{&^c=?m6^S9*dedu`J6ue(ikWWdyc3~B8 zQsb$-Hg2Z57!20NAWw2n%^(~bZ$|Tmq`jI360~Gg*HR0W@Wi=|)^|ec83V?TjVNax zqO;C^l0kdLuu2c!oV$@(sEZ4m3l&XI*#aacxoM(!J~07oJ-fENZ*g)&K0yG;ZAz1D zY+tWwJ<<@)B|-7+J6eq)9r6L6k~;sQ37M`psqh3#h43H~V9w0>^HQkUGeOz97QfHV+4= ziz6x@>EC?*_No4TL?MU+o66&8P-@I6lUFM*Q5F;fM6}_}urs*!!c;uOgBx;@l(`vk zE|B`5RlP{B)K!yUUcDU>I_TqHJH8R8EsuZWkBjy_2l79`RgN|3c{ckE=4r(S&WES? z_pgJek#oA5$ibT}_TnmtV6;1+qx5R*R@1}F^~%@e05ooxh4Yzin za(enkqXb(Yxr+DG9tY%BP-oQT?~!wv3S*TSf-H;uLTePZtcd*mCxX%G$DCGDt^N zxRB3+6hSPj*VOkp1A+o5EM6A#mK|4YsA(=Hd%8}EcW`|gn=U8pR@F`V&E(=1$Y^$W zR0yq8s}Pqk@B=;>#tKrCcfX$R9;{38WwjOyPN5CRNI^&Rni8}dvCwBSTV@TIHqv?g z#?9*OebOw`L`~mH*P0{&(r0*yuyK5B`m_SrcuF!Pegd zWFEF;ZCUM5_ zi6ZdD;msD~2}h)8z2Zq3&?kat*Me{QYiEdz;lF6HpDY^Qe9Ww?0;@P1r77sVOyhpr z{iJJHtlED-5}`mB>`tBR^c}En;&_(n)5n_TzH@%2^1=aT4UA!_GKQgE*O^Lq#`~p- z^Vig7@>>>pr7-9miF{VgSm``&&dv+1(OlYGk_d!9PL`N)I9A{39&#=pMsBxn1Fk^I ziSu;}xyP<=7EMoUpu%+^&?%rlk>8*zop0XkXH>^JPtLIYj2--w;``TOzmNMu+R105 zDBoHhS_s8Xd5&YpqV8jy!W12cLjX_|SVnXg_Y|Kq%WY{hA8!g`f|Bo6ZOPZ7H|>x0W>dke_a~9ASRr>aPJ>815Vd9 zYvgJ2_o&%rmX{jmzt8n4$|S_ZtZ4yk@(PXV$ong@M`2ZiLGGXcZcg_}1#l(Fw2&yhaU`OmCB!03>WjE91^1413-v&$(IUN#)8NCo#*Dne~!?%c^G6N zVwweHstgSG6eF1bIKTmD?@C6@Bs#ys+ZS*(#5fKBzHsI#Y?h6n ztzwanu1=k&+a%;aUObKogx%ZCep0Y)()t=T1`T?-RHHX6GUoaZHA1(X&bX2%i)1TrT2epIo$mc(#o$)8>q zSTu?@+t`@8ZTgB>DPU)qB6Cg=v6G`5AxEnQ8EFsy%}d@e{tw4N;mz&`oH>_W3b7~T zeK%#VekT{0AVazbYOkFah#{WSXWJkvN^AmmH!{zC$T%8Ii2bQ)CL`UO_FmK<@Ly!& z0IW?gO%%d!Z~Th8+yZ7U3gl)F*U|W0z2W{42x#!x(=k5sKDl*@n(1xj&?MyH87Bki zr-5+VmHs4TKQf>gU{a<}@PC!(_D?fHS_r+`i9lkMPo1vuoss(bE1Mzpy8I4p*R9Wq zX5vP3*pp4(9-`_2cdia#Q-ep94ML=ioF_}(uBJH!cT;_SOe!{zHMune*>uftvdmLP zfHOg6(m-E>s7sSE*kj_s^koQB=$uBko^iX4uVa2tsmsis5;MU>3X8*R`yRHu{WDF+ zo~?`jjh~O~6ScTQmea#^mP*7+A&Q7r01uS;%~IAvD=uHH5S=E zWDdt}A`-&`WOhJXxY@xfK&aODeiy*NkottnAq1;Gv`Vh;m6>g_&Rvuv-_~VwNO-~QIW)##0 zb7~VA8BVD8n`Z*#Mt0iJkmX*EkodU^>fkC1@R8F7kS)f&b4nr&pxEU77j{g!{XJlB zSC3$6K)SRVU{R1;MYA-W3?O<{`Te;$JCBsp+S9}I4JmM$!U^qI7Ug@ zZpr8~#(yMWX;(Bf*#pR0fg;w7uwKHh0dWOj-*#5*_EPV(GtWhIf6PwwpG?VK7A`M3h;9FHY4iHU zyAQt@P6^z)HH9{FxqtD?r|i3bJ>vB#FDrGKq)gN{S2DfMeVF*F^7|2N=esB2c>6fO zKJ}@>&i>#WSbG8SSr*ZiLO^HrN*=D{5dL5`7t_scoEt_O@jp0yO?p>nNFs?uv>&0B zVXHGzND7lSJ*j;nvs`lnmwgYDm*D^L4AtE;XD>ZDbMET<|L)_s^tpvV*{;~aU~+(d zdvH*(h1-0sxz_~wB$&Uc9E0T*sspa&#Oyq@9FLWw*-(a-uAtZJ7lMob;3pS&SDaq` z(p{ZagEpqK|lSeNgd5!O~`rF%2~~Q=)jnj z{wkX|$fnmW;P|G4zYai`S#!HtIt?&AGqyvr5iY#r{>vJ@l&mE4320;gIDmusj`TrRmo4k(M`PN+KtgP6`Jd zwGC*^l^4E6`?R#@KvQl1Q&(iM2C*bDVz@@Cg~>cX@1=UxpDkpNW`Jc$e_+y{&WseT zzo2%+27yJ)r^SsogIdXdELziQ2#H@lb}4Ir=ob8_xo>UOA<^^M_*cExgEIUpp}S2i zCuyUUU3qQo8KJd&Lx!l0Ixj%y_$OY?As;1r@n4Mg>2xr>dWnkdn|SEhMK!|xp_Cfx zu?p4akAl*~!^du^*%XBIrmP9CGla2=m|kwJmwP1m=A6>!%VWT0RStB&JDEOPEWv7o z)$nc1VuE2#b)s28pT)=4T0Ytxz;2y5Ry`-XmlnMTF8y6l`IvR#<_z7W;qUyvt|xqu zGaI-j%n2V{*EA1{bmC+?5=7TY0HC&HeCZ-~A zf^XG{Q{9xY{+>uqXEa8PW@<^MiGw^b6~#LYw$2m1zHzab{;Z9$tJii%R@M=(#Xik# zNN>N|4wseA%oupYpP<3iN6NG>5xbkA+QFJK0gzCv89zGS-5K(%OukiJSa#^*8cr4E z^e*d1-G)dHuA9((sW+I6pHzEhN)v3Oe);ypuhBTb*gS0c!2J5V-ot(uH+|my*pPWf zrd`7I=-!rP@aLWbEB=WkNiUd-{LQZ7uOnr=`+A$;!WN37O62W_YKe9?PLpAf3PQ(4 z^kzT2Lc8AYvg6^rQ67J0i~*#Xwi+F5l!iZ=mt%iJ4oy!`boBwRrY^ax&OJe zU!`0c|M>o07VSM8*Q`u=U6%AU>!r^g*tZ$8)!Oo@a=lVOx!E9n)-V%@FNC=S1rRjG zGQag}ZQ}E*w2X^Js_$8UG+$94h%PSD?v})3fLehcP~L|Ko$BCZ2~M zYWCIa*<=xyk*v(G*C%pf<$grOy&Y|76qwI`{&R2g@k+&w)hR1wNb0qwhF6!I!r!AE z%FRHtpeCbzdre#!n7!a;)xl2L2}@b4_(nbO8yM-~%uAv?sHsyf_|=A zKT~nIoKjl3-HIuz`*A&T)(s%9p0hi6t+0<}MK2#4A(c#!`5}=B9`i0kp!WT?Lm0-q z|51*wkpoUbY^<}Zho#hEpzf_5Ecmby_%mzwrY#IM=laU|RuxId>OO%8G0{D#d3Sj* zzMH%CvVq+ji4Qe1uTs6j)njqv=DpL`-hzA`7f?63e8*EPrVE6WW`D-zxGqPd>dk!T z6$GLC3PaNL_i#V-Q4LPbYTgkR9-*L3ROmjdL#*O%B6V7g(X>6{ z1oAvr_7!AYO?!FXDLf|sYqwUS(=f?V=?-HXCWZwRhM(N^eD~+IJZMI0feW<0hJ6P! z#OkO0YJ(^ndz!x#>kp?6Z=IahD`ISBui9naJ-kfGqzk%5d(SNCh&IT!nYX2mI3HLY z4JwsrGK{=66eKc4ZejsJuJtH~UlZF9pTcG^hu z#x=$wa>U{nS7A|qVDBA>QE7D=8A)ROA0qDPwpFYRWrQn-1g*z$gLcTVKUESeox|{` zO5b^wxO@CG;cC}}?QaZz@Oj%5+`#7j=V|h-^+zFSZ&b}iI_4?aotR7A2`=}}+UN-P zM!#U;Xd_wQ&wgF`>|D>}LSS!ZZ*z$Ng^)9;X&q-x6$J~F8AF-6MV8!^Q$r+ubyM(|@IU_5tdT8dl&)JQbB~OhgjV7q-?$9DFWr7v z;?4d*>_u=E+0}q1@Zo_#&mPTfy@l}B+MHPVywv=7(LCDQ1jRo)hvn4lnwxO9?Iv7+ ztYy1=>s^hd4uW!EqF_9}aaB}(mI>Ahnpx;hePJL2dq5@7ZGARplT2>V9rEYnFQUPb zd>8ysDMMVWeB+;fj55rDizkW=gm{{|1W9T8?^EO=GQt?9X5Y5sjZc7}OYLcP5-W2R zt+^13jVZO&2OehmI9?;f&}&$m9iNYJn-1g|O?O#@Yl|AAurvSaut6JP%B8&v z_e=Q%^7;jK#Q)Eb3i7$3qM&I_t%*W&L6c72qpYv*MR?QkhhGLhPN{OULRR#c3}`YK z_{O$2Glr#mZw>`|;6TDJY+TK@})KU;$S7oMt zO}MD-GQza)eD+w_``6}k^l~*-w^i(_jPx!vn z`cdy+dyn038;L1vq>5_%P`HQUDTj0$1K-#?(*V?B>6Z&lDh47yW~TTdFNY&{1iw#* z5y)b;q~goZL4t{(_~hu%RC(9)?QT4I$avCHsC70cUGAtR*k__v3f|mzWLg9~iSp#M zdvPZ2!r2_!f-ONX>zlqX)26B))=MooRAYDbhD-p(Ga}q>OUjd*&5TPT0JR{F$nSrU`Y-RG2&RAG>Jt?o*<_$&jLli;p7bfWT{ zy|{M_|8{KosGOg7IBs(s@f|Ev*-(0E%6NlM$OKxT zUKnp?MSiD0ZE>muw)x8)F$cZJ*Nk3%3NlMsdgs8RPGUqA71Xbb_G!cQPzZf~PFzpO z95j)^JHphbbNFQwvtOO#_A8&k6(Ndv zS0!lA%8R%6W-TC?pR|}-!JN!_^5+o6ghWrPP_ezPX6aZxMj(IDTxs4PW05$H)I{c` zXzxvw-Kcr9)ke6dYV24%Fq5IzFw%F&yDRK+QpW4~&y3IYJs7z$;UYPiGnvBg;)QY~ zZWC!I|FAuO4p`w!+^yR9k|vHh7WNNbm5fbFd@tUs5TNb zC^#=$CX$#baW^NEI3d$=u;;>;Sw9#h#~o8Tu9QWEvtmL$&SAr6;W5v!_HQOBV@;~< zU_T#reLgPjwDi~T#E8jxzvlZxFF|g#=xs)hki7-k(yoz|GMO-BzL=@&lE((PG{eQM zL85T;130O_xdO$=n^K1DT-JZA9jA)^qqLB4aFpH1?DxH*A+>$-=o>!ytp%cJ`B|Sq zQG(W=ou99hHf0fz4eN*UOx5gFcoL94XnT+VI+1mQ-LX4(H5{nt{6Ni}U6{{jp4_2k zdd{H~PvQvVFA~{7h3g$+`4cZsR!klco&myw;D3z8h3!L62_8WPweGg6EI1@75 z;NVyfUgWz*`nus6mnj{ie!Zep&Zoajm4hjH8%^Un>N6f}Ge%^?#OQ{jywIv;)Bml+KH+?flco+W_q+! zS8j*=>T=+d+wOQWA4b#gD)RIdC^2?bWvU(r$x2Bri<(CYKN0`j2kiP zPF-N{Y7r=?`@B!~Ll&vv>;)mVnfjqi(X1)RR#YP@0o8&^?o4;lDymXiRuGSQx6Eyr z&=ZZG9%)>EHU5spSZptq(4As4_!(K!m;uv&TQ*bgVgpJBu`%K2wI=7knJ5|mepKKO zeZ1>J0&e4)Ga8*o_$qzpXV}Y@`99$^RS`lYTpf`}k0 zMHEDl-Vu>V z$KCMWZ;bDbJKldBs65ZJ)?Bmx=3I03TM1hAzLv}!WmZl+;|<%V7+haR+MMtw<_dBsdK_Al@2 z6#97fp|>|LHyt0tixQ%>n&a#E0q_8HIp1v+1*PM`>_1w38$gF1jtZZM5_3B5Q-9m% z!qacNa|^ulKOl+72NL_k$0O{B+IZeuAmm20UFr8~8Y{80eE%ksw|5cG14SvI-lHEh z#%GD`kFo;vk|$ToW=6$jx7)e>}%T)cV7$P^pC%R4Y2+K$ zLSBI)C{Klg{qcow(kkZ&W~A%Rxx@Y*H~VucH~muv}yYnZ=9aOf4-wI;)c|> z$_VarhG#gQKOcTUd*V@EblE=(gnau=3%Ncw%woW@K)rJRb1Xgl(AKLojZ9!;@TbRx%r+UGj9k&cpbt?-C;~b8ujk6kf1gJ&mv! z6WsUqb|6T|OSrwH&s+_5`^k(0I^T>mEhEfBxJFyt1Yq+VqN&G?M|I7Ei6frSHSKf; zI8fY%swwHmTJ~qpAlDCw_1~0Du1?oD zw#bVlvw9_a)%hQ`^<%_8fo%OYtaB%&XL8mKmhG#WC&0&MlPjbRT|&h5ML9QG%7!*i zs4+*YR)yy`N}L_w@S!q-0Wsjt^+|;kLpd}Xd;W`(yS*Z(vpz4`0md5$E~#vqrCBO< z@*h=|JX~SF<23A9J~3(#oU4C6ecJ0_vRqF~^eq++<1RY)dYMzO{Qb^&nPP48{fwv& z!o!0)PGw(5qu%CXzbU~+%=I|U-s4je`bkWqV%s^)WxAH(X(0|>67P}sRD!g~M29GC zdLOr9GAu67>MAvEQ4)nN3Zq_Cu)*f6ojlVpx+Q!21qIvsYjhn#<)$aJFm2L=o0}5M zxWthbKBeuv@m|Z4p2SSA3J0kJy{toeTx}mo*&BU;m?!?ZJkx*UT7`h7UiQ3b`lZv! zLzW=nW$Ii~^L0|#)*{Ar&!ytwtyxZa!}r$h$*@Jz8Xz4F+c6N*Z~h?ROz&k>aE@^Z z8qIEcThucW?zJvZH)7FwWYKUbCC2%qlI_=s$mS%+f$z=NUYyo zB5X?dteAKzti#|dF@p4ReFTbbptY_r~{5=BA2UqMLmADV6x zp9II>jHi9DziF@j^-}F};S(6&hC54&R6*Ix=OF+@lH04rJCmKww>M~8C$X6i3_+Ym z=B#|Lgm1yV9KO^z%xG3acaa2puLJkI%5JF8FF3&8_WP6{iR2CmRwEBFqsx1BYI_?S zE}Cmc9Yga!qO}${vWO`!Q`@{_vy=Y2OwWnI_~ZpBe3ay1U8eTq;=!d{M(L(Q+;9)X zwq(SEPWfo94cMr48I$buxnN~~BfKhpw?yqSwUe?YXIdPaah@N8rc&Ho!N}4Ak_q>@ z>D3=6Ht;=71g-soDAY(b-U6flw#sY!!jo^GY(D&<#^$V57t5+7H^Xe6w6Z%>_Wy+5 zGtMKTbD>?PS4@ z8*IC5GS|kXoyslZV#c-SM>8MPZ&~FcMn^DohRZSvLTsZFCregMnk!rH_m7(!32DE4gH0J?qOsfE|Z^If* zrI-KSP0qt~vbstK(n>kl+6mps7c7&&kd~aN?^wICP|`Io>#WGlgj|HXefdIl9cR(5 zsV5AW{rdRW-f6yL+s0$(ZpAvD%kmPJ84wnLRpD%Xt|PJKsDxben?n0{{w(h1%ESG5bn_mDwKkbw#F5MtkHXXA-I5mOu^?3H zG8;UuB1VK0l&{CR8j`v5D~H{sIL^?kxhWL>CQ2XvER51&PIT1%gR)r#XhWw6pDj~k zewZ`~8Iq~1I?>o|i#x!;uZnO9{Mb$PBcKsViJ-m2pvz`a&%VtZtwlQWIz4P21-~xn zGFstzj3_mg>ypQW4+cNCRDXQF)IZ_KE-2Ks?yb$@CU?_rsXdC_HY<+780qY7MEgME z2%-FT`E}4k%92}gMkx4GT1(#&tp@`|7R;VN2u4M}y9ac-j-AB9wX{)?H{(^Hz?DlC z&CD?I0SRZ!4`5)-Qr|304X>XeJUIxD`5tU?tF1{Ze=c0&?p)O1O0jycCrhd;&>noA ze!vKlFOESCri|0jq8jy~;*&ybMuyaKiN$+duBwTq(&nxrIVb);*ZGl;0WlFa_ ztK}1FdEq1`(W%vAaeOc)XaDXfw+(H6MeH%EeKU3C4@;FaWPbH@4l?B(z)WE__2Htk zic#IN_kV<3N;V;rT&IVQ`GI0dMews%3HlqgCTt(b!R9viQyio<*Ja4{H~@#N21RV) zdARB(y50r&U#77}_?5ca)nBq!-)Dnf7)HImgssW7t8HvLl1MoxI~}Qx4{Fq)XjXU` z*LiKsGVbd2FyAY3J0i_gF_%k*iUJr1e&8){c;o0j{HV+oW*G!2Aijm!3g@#E&*LUW zfgN&DXc?Ja?JZ} z3=^0FF@f)nYIX9^08;Lv2j9ePOTvaiqHdUsUAc?KZy}mgT3X1+n(ve&?2z9_r+SRa zE*2a%j~i0Qew?D~pR{78Gs(>#V}-w36dux5e-fncf33cU@klXo5R0wvT5oNU;yR)#74PV+h@}rXjeV5O)S7h9TpTYM;nX5qZ(XO0{-n6Q zWp8EEgN!E?J-#Zrvxxkyzv4Mspf?nHsBT?P!9L;94AALPif;jbfo@zalx}gv;`M8A zM4k<{r)5E@*uxU_bMX)i5u`i>{ zvziuU$$XPkb3g$9vyXdJCS3AHLVM*>-rerZOe%(e-*q$>zFrDj^`FAvqfK?54n*k@ zOEu6`f3LO~UYDbbm|bK_LRU+!FTA&{Evs1=MCW1aD*8}#OW1~_n9{_*l`U0iz71#83|6$%Jh z?LkIfLr^{aW$Kdq_phHuE~MZ9u;3t}()9?lhroI(d4xk(knq*wLru!e)*CTDq2Emj zo(K{&y(S~`S1ERtZgvUi3fSeUp7LY@n6o9AU+eZQJt?ln0rg-o(F>yU@&fTb z<4RP_gKMmrDcWM`o^u1w4VPzOHeWaSwimtXUldntb}hxYzsutym;tGbffolVc5&|m zRqhw7cwIS)E_#2%4=|@}Oi$#=-y66%cM=V^m9_hRUaut_K|$IY zqB%Ni!W(GDNL{n_jvw~XeIllCs@g_3gi;w!1q{ri9n4hVKU5J#LG!N?7yW)_Md8xc z2S+Z|uHyppVA)AxHro=I`{R)B2|x2G=H+=PksXVNT(o0C>$CGDmU}TaoY@QK!zJvF zkGjNE6DVxkLbTpr+_u|@1|8hK+W9Keo41rz)w$ix))GMbUW4lJCih3tW8f08>CfUa zMR`NHplbFhiL_&@AR(Ds*Gv#D^j%+)XKL^6Xsn6NQSexkDzMP_299cNpfbQi)qMvS zr_3Q_)`v=r86~D2mBfZC*+uP)B2l{>aWTayIlUl`v?y)s8_acZ+^RFPtVb7grGNL@ zxR(%S%05+JW_z(HOXcgUvJ&kmrX$l(M{}MS*js|!_&t>iwGGL@DNlR-L z9FLmF9jm%7_MpjB)actGn`>v=a)xgdUuP{2i*=eGUPf)&kiTftFjvLwsZSUid9L2Y z#tS8ZM3@t*AjOU25^++JD1i1Nxblqu0Q)6|J{B zTnc`U^-i{Ufa})p)c4{w=xorX_KV_4Z6rC!pKZ|-XyTgp3ZKU@VqXFeT3XmSF+>8#rIa?E$3&SGym@QdN=I3+*xZmzbh`IV^CIM z9(PToF9nmnmbxRc+h0(%$5o#(D$1R67rryeq)aIQetUIN0#1wBSvCjcW$taSm+Jz* zvG#q(q4ft5RHi z?7k4y{lML-uaMC_;<9XV6yt97DDmR#cg|1JYV5X!9G7z$uSK~1Bp>CC9@q|E1_}9< z#x&C1ZNDB6-KWZ7x>VF@QQl>bX1-4nwkU~m>K7*y`+~meAn!s<4Quc^x{|yeYuz*E zPr7sixCr6SQKY44esMix?R@~xc$_H(d|ULmSoS3~61iwF2jg(R8j4X^Rhk7a3RLVD z^h(#?Lw*#>F>JmQd7Vv{Flt6cl$&N+vfy@7Ub^;Yq2VB#SQk^carhBlxGy}9x=XWa zP@Q8ws$2DIoc2w&aQ(xJq&qeehlVB`i#7#yrcc60Bi||w8K=OG(hVF3#29js1i=mx z&`I$9C^s3M6+3><%ooQSB>0C@{>Pg?Y+SqEZzA6$gEuDa%&Aa1ntzDFQ3IGcnn#)G z`s8=~T4WKzqba3+TD-CcVl(<3zZ%O+b_^K~$~fBEW%4QW9c z%ggz-)tiHE#(YV_D4B5wFGzFrCxvA4O&yM3!CjNKE;%yaK230sqBq`9p?)3W#(DzW zG>_*Bbij+EWs`nKcRsqSVbc%kR|uKklF>rcd8rm1f>U%NwAASK;K5u6u%VSAa+64(Wc3T7)6X z?4oFrCHh0!gGW@vt49P;wCRH~g^sw7b?bm*+^MY!#6KIa^XirQCp)za$*b3gq%EHa zCryK1?nConF3qrT@Ko+a0IW6>asS^I>ivoL{fmDu$Mq6&`rUt(C9k8qML`n0p9~4j z-?AL;(Uv7g-n#|41x~J6PubZst^FY0553(lBRn190h?Q5rsRds%9<}fMi>_@!XYBq zj~j1qkvsrWm(&3*YuyE5qzh<%nxzm#-73-JO)=lr^KoJhF{TiD;*h2fDYLnEp+Aq9 zc;ju6o6*9?3j;?IoWmPIYg#LZ>%AoVB3)j}FimZvksnsM-}b1CqPD;@x8CQOyuVnN z{KLuS`N%#V()LIc^R68x9AwW}*J=qcERyMim^(so3Nc{udpLH2zG}aHK#OBRAta$c z))&;AG&U)u_uR-SNvLK-o<=GTQ#cDr&ziq8IsLN0SEb_F!C;A6StjsvVxG zPuRrcwInwo-GJPGlID49^!-L9xaZd9k5c<(X-WD8=^QFEKUt+Y93E?FSbT{NI!eai z`y;HG|6tT>e^cpx1^NDCR5dpa%v^bo7%U7Q({Op zCpbc870vb>H7#UM%%oeQ283UfgPEd=PWh5(Q6euPVd!s3v-7(A6)AHZtF`idZ;uf0VS{ zl&AuifTh3x8sJFOdTvzt4WYN@(QOFZ6gRTA#Bkbl8B{Y1OgMgd1_C_U<0L!7nDu}7 zhFv$1$vw*~^h~;oe!fCQRJn;s-bFq)c#e4Ra-vmO<5xS=!)5xrE2ZPdEgBN6;)Bk@ z;uwnC)1r>vBgzJWGI!t(ahdIpy0`wQO5@lng4#xtomsqkQMZmfvDOyiy}>N zi-jSUW1<4WWtsevJJGx7CFrvs&UodOV+{~PpU1UfSyg5tZ&7f@pg}7J>KF?kLf_hC z?AQYwXa!44v1V*W=pfe}1ag2?7P?aQ0xa;*_$9h|hwfMHP$NTg$n`JgdLYEyqP1DrEE!z6vXJi3 zG|XfI@FokbJwhqGMiti7oy52=hU*7g zW=oHKLc`|~2pAM-0egafWbjFF%w~@eY^9QsPe#ix^rFiXizFlsxXiL;Be9}-* z20$;6k6}LvuROR*M7;4btFR9M*eQY&e@eR@E%S0;RiMxi0z{LeEJgZ06X|+6lukkV ziU04UvWLcZ#4V7dbA@kOd_DADjhyKwOr;Q%L#pP_*xXmVNB%;wQL_9OcQ+P8CGC~w8 zLgg&c!f4zFX|V(tg+o7=gz(v_i+>qciPfS0dQVjIXO z#Ag@T!mBot*h4N@6fFn?^};cd0E9m=W4dBqJ|;jLu3?8M`F^OXES=qKDPKlpZ0zgM0sWenZNeU0hBq?#hHFl_OExn2ZaTEb#eq6b zxB~8079%>c$b5G$QMA~y-lLNVJc2r8-0Y$S7errDihummi|jy$vFhe#^4!V;xZRG} zZkjkotaH9Z{X%HEV`Qc&m@IaC7{VVV*H+44{QSlg&1oW=Z||N0**A|*++gy^Ax>h# z;G%|W!{dI0heqyw!|j8>daQDt4Zl}py4Kj2ez*&3kx(e1hiz1`%3AV(X>9tc(+{`W zxpCXGR=7j55|}1X*)+GMAupEh(~AF$ByXE8{CXvdT?v}i!7SLyw7U^jY2+u?CDe~7qc_ZR#upvY?b zAnNFaeWJ3nr_Can=*@fW`Z(Bc&URHm>s3!)M#QA9>K;d2_d*-(S|J&L4QDXt|n3JBLc zbevb%F3!g&L}l(f&w*B?wdkFO>#94g0QNDs!Q)Tf1NmZ0N8J#@1)y><$Zmh+)+@`; zxCmL*DY2I{#O#jLc?Fx?_v3(8ExR&CsH1+2tp@?4X8?Evha*wz!g^ z5R;`#vHQ2?nUnUiZR>}iDg6D+%}^CX!foY2WE;B;mtFx(FX}gMO5?JG$0es2?s@SU zT=2MD#U%dCbgc%AyIx;3YN%Uh5{9i6T{bWWaB`)&n*jzxx3#ji{u}&*5SrNgW?#K( zRL4v#q+8K;pD>pPdboPM?m51^JpoB}F%ihQ!QJKJ||h|ZQmbs?d+dy<&XPTAk2mR zyYJ35Ti#q(`cZiFo_oiMMGftTNsjeR`;gndtn&$7>$Ykl>N?4X!tHDX-}>QpZ2=nT z@}?qBmHQ7}6Do68=@I+7?JIy`Nnd~TCgdT5t`{KHRObdSbJ9zC>rNO<{z*1ZytD6N66#bLQt*b@JYZl0Mn|XEAt(l zFN?E;yV~CNp?WasiG2Vx3cyS&#>k(_c|PYO0<=E{v`p}0kD1>LmO3RFUhSi~KN#8c zzphOCSe~Q<jV!d{=yR@o?-#u2GyO|?YzI2l(7Kr(hm%tt0%L_Ia zsL;{ble+a!;`Y+Y+sik;=xo1;B5eA#$4)2baopEcuieFu+EDWgE90-z{ zo;pn&?=OSaYW<-_AFiZ7&2)GQt`J#W0Z-J#I$qs(s~J!i3Z7rjV)to)0If$SZDoEsoo-LO*Br z*FB>2^VVW}f1gWD}R~L`o zIjOAEX}P3yPK&!oqTg@i(C7hrvFN*7`p~uNw{aWFU9B@cc?%46Y@mXSP z*UMw`icW5KS<$VB#vK&A9vPqvx&5tr zpE%#ZMb*_WVsnEHfIFhNBe|5l+eO-Iqh4cn*blouGgDTPO0JH_I$!idc9ceG&8s_2 zCrbKZk6=w4qBCP5FsX8=ll{4vutSWh0}N3ZXWmK71epz|xdlocyusfnvqN;_$`cb0@0 z=?aHb%EvVm{pGyEj=vm7&ybY})a2Q27;hZ~q>R9KBxBugvp1ceIqCJq{J1NeFGxv6 zWS%@~M;QEs9f0Pd8H&aXYqknW2Mrp6zEf88m@&JXw5$LHjE9Xg?aan2Zb#;V=A#vc z!>m{*&IFAIbNbZtwz0@+R9|XU*>Hh3PN20ru z?-;jQvGK*T4`>me8|Q2Jq2Udp9<}7U>jQ;gwQi?uFkXzdL+9t-t=vH=XgqP($noaI z@ns7@I;QYU_a75WGUgn%b*c`94VJ|!amR1Ptug`n>_>W^{_sEw{N78ZC&M_j?HV}T zVO4T>p-<3yOFKnN>q02xlHa>WNPjAtjLhDdf%)1FtM`e)U z5I7VK2?;yDpEu<8%)|C1O+}|v*2Eayx7!&sd+r=5L_zk+OB%PZM1HVD%msos9KSV& zo#ApDN%-*{$>9?2x~bZ(Y%d+;#e$kSIwm{JL3MSohE5SV zY9Lq%a@4%_K)U^5bxiXh&rzqKVHa-P?2?AOT|80tG(T#UH~mtGjA;7|RxMz~E`)pV zr^Vy&zzs*OE4kek1vV?MG|ino+iX%$Vk35_^YcoG+8OM&65*j?GhL+EGSg61~CxnlQ{2eHy5lQMoegB4~pZbzt3dvN7Xk4KKmf0~x}2Dekq z)dDh;cF2eb;OiXf-h|+P_gKqKc+o?qB|Y!5ORoA9b?dyimI<^N9aw|zPA;La@i#t* zeA9eO_uR4u^c9zbobrztoWjz@3`|};$q3WuqQU@I`e`fm>8dpPd4b%Ijk|QR`~Fi* ztBv7;{^3m3(RJ}Sg?kPFj4h!@c-hY`bajkDOXNE$nsIT-1Y_ar^70wSv%Zp_m!{}( z+jlr9FV9m1WUTZS z0v$V2q$ARNIE0Ftp9iJwUqe1x*UXq+1o!VpFy{>GnUYULvl-ts+GYmXe}9yGk#xy2 z4=%w>ZwI)t3^!XLofWZ7qCH{A(2G-AgBSHp4S-t;$c_6nL}?T(h8X0^1^h_R)XgPv z?E)8d0DIHzqF_;a#Axz@tLET`r{V4m?03K-QI4v}~iuq@W0+GN<+Q8_UP9zGLgd z&W0Uuyxgu3f`ZkOa@KXDHvN+YM$x#1$!g&MBiylx=)cnZE_E zb}OlMJ)HDX32blLd5{#>oSRh9tj`qwjY5I4#I^rfHlBsDDaRAi#i|@j$wMhZ@-p*| zN)bk>>|?trffo~02MTT*#sW)_TAkPUy@;Kif`v0LR}jH331=kfB-3vsY`1ndd`PNk9*XNJ`~l^yZ1JU zV%7M-e$j3%j{89k;cUKH(rZm$*@L9ZSIdK_PW=F?#a1%VSXE?tb}`Xy3QI-z*mA&&iuU!U>+b@O+wgZS z8&>P@SKR@ub41}%2Ks2uPjpdmu56luk?)?8c?$s{Dg*%@gZ0B z@yFv5OC!hwd%mi;7Z@yxe+Gzan5{wP!vF_-V7NspMaan@-) z$!;>YlcXTmTkV~pIAzTB0<(bf3A1Eh-6DB)r+XjMeq=QRSwT@Cd@8`=SAaDA&(3{l z7+A*5;*QgikC$W|1JM8t z%SZhJkoRCG5DQ1DF_$7rbf(>4nZdkZ)mmWG5#Q0wyQOS6yc)Flv;y#TyR}&H@^oGZ zlWfi?jXXbOQal>IF*e^x^^>1P5O)T@N5n) zwL4*hjaBvCm3$f!DkYlC(T@uEUIr@|?;h@y9M-Tta^)f+Y;qp%)}PB|H^2&*^xoti zi>#EN3Z@m%O{-2o^nY*%dK=}lzOI`8`ZUDCFe&|a@qm>?+LEt=VPGFpr*Fv?@&%*> zb!_Vg*(xxX_9n!qXs&p&xjlzeAedE4;wNDssEyOQ^;Y z4lH$}Qf--A#dzU2tzG{@(Gh3jBP&}y*YR(F3l&hmtG5X`U1sm!Wyt?5^Aix~G$zXE zN-~!`+$sTzURDSN9sNm8Q+W0lv+{WjD9+a2!317ERfXMO_6GcTk_S{$}(6AZ-0u^oI0ViFXgU-FkAbU_9l)v&&~B7C^V%cA#c& zH!e}#@bYP3zyIXG7dym2c_7~D=jH!KUAUJpK(Av*jW&|gN!x#XdPxGP40ZAPK+SME z7xgdV23%Q@1L$=(zSqbL-zjnIm=|{UxJ)@-aNGj6Q+s&O4$@RpQ)E0 zu_r~A79ttA+uf2NWCPs9;W7sR`l1-El{X=R){i6u)XZQE#(BJ@jRIn;i8Q!*DE$bK z7njS^zQd<@Rw`(}A(rp2MBQdbH!bH|amML5`pg6Bh%yr!Pc>OMGiTbT7IPLO9acyVhaUV9hpJjZc}CWu}2dqV+3dxFabqX+1Jjm zcXxPTL?`3HC`U2630S>@*YTm7d?@QpKFfLixUM5%y`wIOeCYnXApF>_pC4nH7yUFjXtZJnZdpzdt35`E-uhRZ*VAp^2wEtzq{rN5dov5dDX&>@#%}#4mCwi`XDz$DI zmGCgi*&Z5plU=>LVpX5(-Z$!onpCo$Vd6fIsi&k|f_WMSY)x?=rbgbf{KqT*=KDTd zOojFhn(_7Se%vit3{CcMz0BNXYGR(I--}SogV&RVJp=4E4vF8J%|K^2% z9cSHJ;*INgJQH%E*tGN6kom&f-rDL?w-|UkI}x>+vs>M#3Rz08nkJXh{jywzsC@wg z^HVit{NvRA_=EhzwO-11x8YFU zm)y`>XCVv$QRD+27|z2;HMF+I?KzcP=ojCbJ{r=|$1+u#WYS->cSYr+I)rGBGlpjD3^$mb?ofCCh+Oh&$byN2Q z{7e|Fu)Tk4MoBNX<21dB|MdlO`J!*tq}^FfaI*{PbEf4@G{2&^alB?1J~c-!K9*zdi4y$&Y4Bi z5#^3-;iR-Nop=7KJN{0d?!k!DDv-B<(~~q>T=fa$yVQBA5q^JK23duBwQ~cPd7dpk zDNIlXk@>nzoK3=dCf!_Pt@$u`tYKjEsgJ<>3{1S`D@sTl4zu z01R2u`--pI9JJ|8#dih`Dsa=ve2FMs9)6XfoMN7`F-SV8m>t=;dN!^`{DXd56W=c* zGAl0K{}GpxGykZ?&5+U!@qT@-UD0p1m~S}*F$eU#d|OuUIUwD$x(K_?yKj)X(|Acg z?umC#|#>96lc{+gc zSP{_utx~8^f7>X2DwE%$eRqecWu6oLdMCM7{i{3gW|u%`f_!f5ko`kK0GsNUe;^(-Fem$*n@3bt)=J5jbSxV6wKF~ zM$ZL9I#&!Y+_*v`SYHqYLkdm0D?D1DLiJZ4a`GLPj93hm=ObcTZ#4UtBqC6YeVGq8 z{hoTvO}x_;$ZbCmPHDS9rEhFMT7zX~)h#Zr(0#WoX{*omP)UCpUFW{&m_3x`r<2SL zq8q7nEh@$K!_TFv1{0oM{N?5Eac)=>!<8P-5m7g&DL#dly5Cc$HSRY&S0*H7)7zbl zOUe?buvQJe4JjoI9hEVen!c5vQw-a@!s@A@20a8CZ8~-Mq zoU23Qhpxc+xZB9*)*`Mem9v$Vl?HZzhU#4|=fXi8DOKPioH|@0z=zCiK9xuvd29ev zh1!ef*v)|^!mdv54WUog*2Ww3RqeeU&>7ecLypy^P(LHDLuEEin|n~)8h=QAlCVy~ zqXKR%K|4S`h#UD%FCUC-z^RwXssRV1bJK)t{Q z>Y7qrX4Cegihso}h*NBTw`61En@}g4u;c4ExSo~*Dv9c0xzandbIS^AiU-rH+?QM1 z5r7Mhr(0l_nYeAfMsKegetRNM4P!l0XO>x&Qk@m=fjN?TR2QYvC%%d@pc(ovQisIknC@JxH0cylFR)#dz2#pk z{%oy!=eR@U_Va%4uL4%TLBG4N59SEhmuLymi0r1&(P3BdpOC_~X;pSDpX1VZR>uvt z-)lbN$^Ncjb9)a*>u57Xn?DfPHIzTe62lF8Q=ZuUVg&U*S=g!RsyXV&u|P+^-kx)B zdNcf?Mn)*gdSg9$v3I=*-BG-|VQ#)R&3!U%U>?x5RJYhqyE|assIQdn>_Ft>bXg(5 zWshdwrtO>dk+%bGrH+%e{=Pob5ds2+5MT8?d|H(!T}8y}ykJHuuG`l-iIZ;%wR{hW zi$$2=q_S!suZ`fNgc*yk^&oA~*16SH2mz0cwN;xmU^Wu2{4>x*6xU|{YPlV_=xSnb z-*aFcFSuvW8o?m*VmmO}3K6w;B}ITmXPRQqWM)Ga4f{34Jh%P5p;H7>%O2bL5eepU zT}>U*|Dq_47tw@?YO!hM7!NUgd!teKFPHW2=i-=LJ~_+KKE=W-1WiO%_e%h^b#j&% zZhek0g#8R*_dT;NwEf4yA%VC&os!D(Ma9D=&bh>glL>R17!S75nO_Ct&@}f&Ym1R# zHJR6wb%z_nH@f1N(Sk7Mhrv`)oqC=T8!=0t%&WuK5{I`=)Nf4ZXj|JWn@;Od+UZ`u z62gEFbdP55#tT4I-gPdzJooTQOY%C}bA{hvRtd%>>^79&-RM)rKLkyP&4UfyO|Dt> z>|ZP~R3~WanURQ~tC+N`sBG&=V(Al=^6fO*dmO@(iNzq3=m({q6^l<`T(RDNanSxxJ;!^J9(idlA6m{sdHgZ;71LgY4T_?w;P z6n%i(s82_BpG2G}7V_fS9kJJT*OHhijr++n{e|fXk9zIxa9ltd`B2G1r|?bCqw1Wp z+@RZWS^`j`vGp0Qky3E(-TCJ%-zuGk=(1nd`WRqi;`ZCm?^How^`E~Z>Zd8dVD@xo%3b3f&n)z{RWwJH6Q(%V3LD&BuV-FhL#-Cze{{GbIeP?Mi_p3x zW)|yKSC#pE-#y{YIfpJ>nWNf7NiFjd`M@ZNNq1TU=MHd932fQ#4&D3;PEmmC3ao7R zbCs@kKRt1r1vM&MmG_+6fYMbhm6{PCcsQBP*tz}r+zh?fohV3{Uf}4mGoVAwXVIWf z?Gwj$#P@S&xn7Lw-5G>*7d4E2R6M;_{EeWJ4?*f$zaZwxc*Y$H19alsi`3gUy~HcW zuU>_yRsgOIv~t#koeJ=~O9;e4;+Sgwg`K0yDMy)1SKR!co|`@viiK~Cey->+$MCe7 zHF>dAY*!C)cpb%i_F`KxopUCjy8CWhE7n}1$2YF{&DLGo54xsir%ZxDk(1Iqb8L$c zP8lCdBYW1my;@K*Q!^$})0F{Z z=}Z(T8geli-w(40_ZgM>T198!UG#LB_UG-5y?h2l%c}Sh@!g|ByMXBIw|*nZo034+ zt0!F53(_SmPrH^BqNrYJ~cJ?t!VbpUz36l zUhgi3>TLAPs;;h98*hJeUx;f^N2rlhWh}<@c2f`P>~uykSFB8wsMaO$8zp<}KZMp} zwYVdJq3pxU)i2VYJ%Cc18w^ccE^C&>I9u)AE`~L$)UbS+N|g)8OF_mwQw&rae3TK^ zuMw9xqxn&o?|A4XJH^sGoses|-@_-t-Z3*>EMWCJHt(1vHTPSkpz)&TLxCRnkxZRO zi+;s2cujR+VqZ;x69X5Q%G%06=QSshiVK`KjIXMHxq5nvP*9vmfi#9fMVO%JZM3=? zb3R=lbj|CfKT?3LhrjFNgPQT>&$o=l055-`yH`QE!lg#x*;TJ`t<`j`i|uBrVwQ1| z=wr3Tot8tu*F90z;F->f)#53cONt0|_3-lGyAn&8T#jsTNiJ=B$Ks8soO!f_<(hO= zH5h7C_3Brt<3;M`dP#-qao4z4oUMDsPfOC9=3YhRW!}2X}A%Aq-c4(x+srF_| zN|#{;r8k4l{-LK9qeGr54=_<6SeCw%($`SnBp&SG#o*#^)CK%uHQ zJ^lHjI-o0>7`qGuzDyv^6Awy{Ek}+w30jj_qi)uxWeLA_F1OW*8k4Nmd<=viG(Y+o zN|bJLTj+zVT0ikxA+{j3NkXUgoG%|hiF!p4vHA*xp{=)+EDnK4o^f*{>kQ5}kZ$x&)osR)(Dkk1{jBWlrGx z003UY|ErVN@!%&bo`npzcz>kCyzIY$PJ7)EZkzdf`h-~y_e1>>*j z)Bgd2D?{NIPA}AdO5NZis zMBF(uMJ$95CkiA1uhR0jmdS0m&COL?ke)f66miFAu>{i|EI?~ev$Zy%wlUYa$|g#s zb(yDWp}>O|6vT1l8n*mq+eRyjY1^i{?Nn2w{t}to@ewAekMUE|tj-zC*3n52%mH4n zb@KY(C?vmUF_6COc9C5TDl+f8c$Ilo-Q>dQwI#XKJqCiif{-RE5)W~1%75@A(Afi{ z4;VV?35vUlkQHC66QLSX+nhzQk}kiD3tmq*m^)+2?bl813X|LpBcwEtyISIi&C}si zs6gfSHFoz52Hh|&Wi)*>S%zVb=W&VU{+SgpbkA(B_+h$ikMz7uG^Y(E#3Hl@HpTN- z4e&SN?zbwY&j3Pkk`vwhGbalB{#4wOl-u}~Xj&gM^iuf#RJLls|1mumtF0E-V30gYc{~YKK3!1T|m^g21R8O3uH`*Pj#Px(o(ut-e~hG&4)&AekDnl zstZYXFOpWr^-f2!nl+V*Y>R zrXX{MEu5fW_GljpLL1hZ{+5`>|W|85s>T(jr2e3Dd-arhE6G}k)83-?=u0* zG>v}b`(M`acYmF_LT-FjD;RRtL+1sSc|A3a;f$wnS^n`y5#2|J$Os@TF8rtTFHYnb`Vw3S~R&vtV6$ejRf z#mkKGKI3yz-XAT`xH=+JEbOMo@Pyd z;naKcfPpYmK@ptE*Cdw$gsba}tGoO^!u20o<$r|hU#jJQgzG=F%KvY|Wy6ol@5)ugyjxhph*IjLj&8J^iX%i5PNyKdT>4Ro+xYP-0I zf98P8ruZc4^%1$dL0jz5r#Z6I=LtYXU+YPM>WDa-1VdIt?MB{70{BNs6x)0tw&O7H zF)Qic8w4)EBA%o<#f1>HE8{`+k1UbzMiVf1l&H|8e+(l23z9?&Seg5`f0wRA2q#!zDzocIU;vmY~|0)Q9;s%BIqInZ)D% zG>1QAykux=%+$=Tbe~GM?YHdhV0jg^9`aChC#F>~%3X&q6ZSCCrZLu6z6NuR`A;$G zF#Y)O_QLuSCxnvq%ZQ35vEM^q{uq+Ccl=g6_&oc5O1svSoW{fTm0`V1!ck#KWMcJk zXsL&si?T}J0n7NUo?SUwJ25f4GBXHSGU#`#ogR7aJXW|gbJveCF;V4cf7p;)`y0Bd zK!fC**jhv>FTWT?%}8=It#OO@xa>mJ&UrFV*<9@DM0qeC&Ya9`M~Q-nbt;_^BPZrF zk<_<7aS0#I=`f4q#-6wTuPy%Hum1nUqFv%~rZ?Wl3R*fk+glwOAtxMznXKx2|+y06`$PQDqyiZmmzR*_1TH^1^mT)oOl zEvl3!3LgGVC!4$(N0v8ch}N|uEb5gz0g#^A^1^y69_DHndz=k^+OKx#b(?w3u?RZ7(d?wW)Lq7h=%i80&I#iFs6R(ZM&hR)16TM zzC>Sn*J?Sz|0|^Zt3>A*FR$jDiY%t9nv$B6o)|5k%(o04QzL z>C<}~uS#|W(S|Ryd<%g)WS+3S zVcN#XVL1v_2aPB8F9YB)D3s=$+|0l+OTqL$LZ%X z{5HVp95#|7N*3vLtE?5eJuNvgAAQ~TNs&h~USRz1FMSQPp>I6>$jEc~6?HzwHcrw~ zz%#`F_u)lTX;YK*ORHH(foeT4SMJ3XXw?fwP7oTj-(ucp-n$6!VSXpdGfC}w=%J?; zsm$rHA48w_MkV}yu}g}rO#Z%Z{$T`q7_-UycH+lVL&?@H1AxDh^l)wIAdBlpzmm#; zcZ*C}T6e;edEm@xQ+wxoOQ!GIpWD3Z(LlDl;P~aUn;cF1>Zo8hfEw}w@)Q)$7yWk| zTgH}8x@BY=`od4-Mzq#G09c}Nzk=22rY>-_djqVwJm3r$P;PGX8L_rj#7++{2f2rp zm>lDi{MVa>QYGxX5|5G%Z+T}Ob%$YGQ~;pNET!Q6^0~UPhwldC{+iier*g;Uv_5WR z8HZ~z?Y;fZZg^E_kZl(B7fSr`{~Q?q?l;|AKn23(1J%*Xl zQ~18n4dSyny@JE5T_&}o#eVgUqF~HQ!GTJ5WPkG~X0?7_e3T0oKoA%DvkV0jM1?js z7W1?T9J=LwmoYPD_!4w!@}iKzgdG4z#d9hks?bOKThDB%&c+q5KkNemy4}xLEc`Dm zfTp-w-M=^>+B{A$VKN*k6ag5rrX|+>fd?enz3r-rO8-5L=iyN5PB$uRhZD&fmggAz zK)2NBe38?1t3|DMEI`vbRYTnh3fTl8dAKVK?#{zzk>}u z4w`vEEC?bazq!Tg0nCi6@8UrWZADY zQn|1dZ>sjR@rp%BGY!)>fKbn>^bp+G&ryg02&q&P4NH?Df9-0k)Qz*fGAI>*tDY$B z(wuxe6+EiDq-(J~tB-5)c1dmbgk@hMG1AvmxEWi~`L;PwGHZpgy*4*k{HzxqU>3v8 z^4h9kH%ut`IN`d>M4osfQ(=df*^&cds(_GTSz?4}pRm4= zVS<9U^T)$y3=|j@AjIvx!HGa>e>`^b-|~|z znzl_1-@r5E7b!mI`Yv6Hsk8LwW`yUGNJ2SIL<|!+PhJabcA^l5oBNmDV-Az9iB_;o#0wr43^X+U4(Rhp+&Mea%dctNy6gZn@p zZuM2G3EZOO$xrqjImhHv-H{a!3jre0gH(^*ETIx}T+_Y5-2sM+d<->gUd(#Rdp-XA zQFA(~6P}hA-eM+E+((4i`IA!!#;AKnKbI#YzsXd?Tugko-3;*50L)5(aM0MFpa)wV_v7*Czu+GMiKD5IFgDaJf2(!2*_H!Qv zo_gv|t_NsUjc|lUl-iQwG`0e5&58b`Eiqrwxzv$2Odg*4(Tg@nK>sXO}oR zggSuHT`nHkg2e>HBjd?wf|i(pQk(ZzRKb`&{BIG%vI)7Dr>fTY$ZIofg3t3!K2zUL z-Qltv##pjN>eTx|F<^}M!NCwtR65>b-cJ8OyU!Mug}AqT#0^pSEvEKc{5R*^v}Ll( zsLK_J;MgEnN9#On$oX6>^P-53f-yNqmqfp_7fM|-Qe3W`ph6a;3eAr=wkRYE#qKGe zWgBVh{*cTpfKggetH>6NDgmn?^)v#Xl88`>B3B==-oh{`;^S^+qI%QCPULlpuMyOq zG~o?~B$NCZniSghAr*0SwS zOFu0<;x3tZ(97;TrnBW}a5&J#W3rV87uObYHW@B-43qIb?9ZxNJSq*B`t9h3-ovVy zdj`~;=>$61CZ|;5p*`8$>ryFvqy2>At?C71Jp+^ zMC^FiQ!Pl;Guqthbegy`UkAk(+Xzu*6YxHV?Wn{*Mm7@@oC!!Bktv%U`oB=LOq)$ zlS|;tGYAL@o%K{JZ#9SYrgX;!utt8WWw#E%zPTVYkog|%FN-a`uwIp$$beGTjI1z= z>;K4NTn=(kDEP1JGUJhM-F1MmUt$Dd8*y*;)L3yZ z+BxZZN)V$!*|{n5kKwR=2(AGV(*>0Iat?{sm8MhJx%ui z51`dHlN-WdtJ^KR&5hHG`O!m|)5X`yMsx3Ocrs`{jP|GOSa5be9}c6N`FTCOx5oN$UgP<%a=-a67k zn3grSHD*U5Ot1N?Cxqt`^oBDX5*}%%fgbA2K#laL zBGtInPji$^MSdtf>R^ef$mGjs^9;;-m28i#rFsgJ!uG~-DLg83VPAKr(xj77)Fi42 zw5Gq}S3>24Pkmv*!t zbSI#=nR{REhEblLA%GR6NUMvU(AKvATl+8lch8SH{*t)t{DGyFOUkLjp!M?^EvKti zXK}SsUr*S!M_#?yw4Na^8i!OI`#`^0&5(mW&yn^SPr<%gnJo%w3uhAJ!}7RpU%j802R4rYsEi_j)@Z zyAlP}8)bxHkL>RhFCw&hB?!!`iRiF~8sF6cHDzE1l3}X&U0W;BM;v_A0Mac(b6fUD zd#iXlcqJgaO%mc9mF))PX-@@T6yfo`I)EYxbogn4I=>F0E*!8bGp>O)CJtjXnjcRi zwdv{D&G~1I&&akETF18=*2eO7qdXDWb7@zz{BLmF(d^7aK2UbwRv)Vf;Bp$sjyAhu zslAEoccI^uYO+5&AzaQ0%0yDy#SFJNcI29{Gn_ULg1DK{nDHW*8G4f=7v28!5iQ?! z?UcJj=SlK~9&4L{D5yk(~AdtaDTQQ}hLpGq`NA7~&kjd;;*#vT4b_|ZGs&c# z!XR`nKKUc+ai75S01zdd6e)yN*Z0-ZuZufUHujP?cmDc%%q_-Q_t=$$n-V}n?D>;- zO{;v6qIXJZRv6atgh|VgmmKu6Vb7B&Q8QSQ&7+8)9T(NogH)hCB;z5B$S67eM|YlU z`W5A;hVwK+A3`4?aGhXpmsIi~yFKRYob$6XjvfSnLj zw=J3F>UxzYgvijw4xgNebgXa(-t^l^E^=cfRye20b?1zGWOe$R~ zcGY1FUc1dQ7K?MpSd-0DUV=2LsY%SX z4yzmxGez8TMQ=$5-|>MZ++C(7jN~P$v8JNt@dvN9@1wJEj^?>la1jQCaS@o-9r0<< zC7U?DC^#mAb+@$3odwwZP*T?Nk&Vvi?e1;J&}R}eYA2%~=2>rppxorS#Ux2hA8dV+}a1jfE`N9Q31GHAW}nBIZ-av z;}T2np@*AMxUMSEd*wj#q&4b*#7QVC!MDGjgEt%(y*GSksDUU?M{H3#eixJYIunMZ z1~BeJLe965WE|j}swULFT-R_B;6k1uP~hAS=xj*=T}XKJ?@?r_Gsto#;V@~Hs4u72 zueeTzb-aI~2W{jd!zwsV;{uj}0l^7{3yQ18 z8+7O2b)x7sESQ>_Y0J3msIZBI|a8cA>E8b3IwozN@LPQR+!%dy(rVX~)s zAZw4y$`b)vYv;2E&V^pKdLhQl0}i>u!KmoHny_-!?cE?J+zI`)q&{#U@R-z(Pmx|V zBa|coC#~{Xj)YB%m@v6Ar(ch=>S`cI;@IWRpT!)LKtH7fUQ>cw%knH-_bKBXBb;Rf|5j0R66lLK6qTjf_gW2GIQTLOE)f6BNQajg?*zLj2wQ?L6g ziB&9t0$t2izfqoRItv;UeBu-{%6T`kh#oiIdJKcdWmjsY`L>s4*!gR zJo@EX%j*g%E;QIZZ9pVN-0fs`x!T%5ofPJ>l&pR;e8qCANZ^i9X)sZayaT@`O|d2?1h7G$Pl@VCDIYgD#^9Z4 zkX9)~WM&iKv@6W1{2g18N$*;E9J?3Z`)zYvY(S|T%GsZP9}Iqpl#jKr$Mu-cLZT52 zhRn+f{6y#wmD`Oq@ccZTOt5dF(I{?r zKuI;X<~BB%`5cK$F(M+KDIdg@A0o;W8l#g3JmYk%6f=l0?{|)L?BPgIyRSKOdUYiW zKK&@2Hqt%j8m_A1aDCzZU-s|B3u~VdV#RZwK(4Ve^0>tG^$FBtn_f4w_SsfM{<$B8 z0NUs5GeLO(=D}f$h0RhQ??!AUNrlU>@VBJsoNndZQFok~l1$Ci)1ZyCgc4Zy96zHg$={WDdqnP+5dfhe&_$8v8wp8UXS?)kcn%8FyGKv4nBjZn@aB~HwbtxDs2TasD2OAawLUglZf z7PKb`Wh_kBM=a{>CUc9VdAEDjwO$LQk>%^Hxw$swf7%_8)9(iBR9)fn_vT(9M;zBgc}k5S1eMxb~+Qnze8ltu-fB4Xy(bpT`}8xvX@*u&}o58@;y44uf0mV~QL zDa4o}>}{*|vRJQqC8`V_J*t}K+#cfo<~4MXL2!%-?Q`0v#+ywc;PfgN(}RN*qTtSi zN!~abHQb(JGQzN3U0dbV)OkV6qUioszn<(xqf_kCDRZD27QeLdON0}-TjL5NI4yJH zYdR+wuB7#IffplV51%*Teg`8ZTCQI_!wz~>DgZduIAU-MVSZk9Yd0=(1)$S30M()4 zCf{ z=yebfbNvnACg}qrInR_~D^rhKHz}NUT7z#{OKWlg{OffC`EZ%&{Xh@QEaqH?I1aWh zB#TC0&=`%Hd(0SExoip>H$V{UMh4anLbsKHLnUAfAe57g)cyq&{Qr)pLEFc0j+(FF zows>3r-*s?B?x8xdoddg%3iyUhN*67?v#vSK)^>`m799vEWdnxN{_>8WNQc$3+Va= z{$fciYlSIvdpqjFxZmzRF|4Q0k!qSR!WjYKLqH)8NH zFW)^U=7b5t&}H4pW+tO+*24`+BR$gh7MY{KG06j96FPS9)3~;Ey}PuLNe3SjT^?{0 zGgc1$Hqpp`4<=CG;MQMC`!4((Y}sN|Kxa$1J4$eTE6YA%o44-i(W(T1#OIa~FDHC! zAUc1kV-McvXYCwa8_$=0n9NNZIVSB=UUPq}&q1TPn?qf*FAtfZFg37v+)bT%_+aii zH2Pv1kaPd=)pTzGaS(&Nm4VlI2eRph^+vq%%=I?HDuL+Pr7KJTjU?6^oRyPJGGbs4 zIV?C04Zo1yrC&;DlyQHnundZ9JGb=##y0goDayb3Ey(DYrx^RT4tRNC*_r;q;L#0w zcEDm`ncDFiNN{f-pYzrSgP5)+J?E2=_rE=07TT%DSyPpy$GreH$yUX1ej}tyH++wY zZOU6ZFS|7y+^-kj#>^VcWxBX+++4wu0T~aXK)T-1VziCiNtF*=dDKOA?4<1)@w=$vy1t_ zLHBG407B?8@o5Mw|6BM6*8vQs>Dx=Bk9#@v1H#&MD}brk{EeLvu{MAKaD=guw(ZM3 zKm8HlY`CR74u>&=m%JRET!yTDk>2a|N6%fV+)+S%ZwZ~jQq48`Y1HtaWnD2GLqr?Q z13TLCdO*xfrMeR0$CYxZb8DBSz8-j+0-ymDC*ijmdCK<~46}qD&?^);HZd4bfkzg7 zZ7Y-5P-+ofW~+!>5pO9(-hyCzce}h*IcA5OpE|O(U*ywbrX#)P#Gjb2sO{M8LjWD= zKGRv#v3vDoN)srZkDQJ>fDY`C0y(VzEFtK#J7$13D0TuVLkiFG^%~^S00DSaPxo=g z4V^G@&L26&PV%qBq1s5S+Z>-?(?eEen#Pnt%!cy)%`Ycb?RWe9W-t~Pym9gT&q9NP zgE#^-9R-1YySD{6vi5Pbr+$pryXTy&&U!I3b77_z2_FW$ZD@qR>)QP0V>xfU)s2Xk zF75pF=$-f-s}kJa55ujd0>|5Hz z0It;6^QUQUY446wJ^GT{J*R2NMJQUo`lTJUuG4tHy)?k~SmKXfYNErwpGnv$bTO90 zae$e9i-9Ow9tC*S^L_&e0s&b-Awmn&1Cqg#10Z9NZo)~8*d3_39#}VeJEE1OY1-#_ z+8ZV-4d_|9<`l!bi^r;Drna)NNCoD!si}}F!Qiq{>>feUhf>UNaQH)^r7%c?creg> zWONh~M$S8qizVe$ytiV3BlLPkht<_T6GdG9evGuhV_X%WBXKMqbi8qY_nbWZ?qgME zSo#=zDmz_QYQ_&80^g~2byyZFQj<=>DP1SN{)EXnhnzwp>7t>&4`y#lm*SM7Meygw z?#I^}K6X)YcYiBz#(a4r_`4WKQ$>EXEM`g35X4X(s;`paH}E?Vpc06^MprY6t?CTY zMd<%FhfuX=PWJ0D4GXrgUs4c+7?+*umn>qAQ9djsHXz8)nZtN}8@^1zP&G|+Q?IAa zf(=Nv{J?MQun6238N@vCXLn-;r`D45F(#olE;8~K*Zx=PdQ$BfCW^;$N-`QSEz>MiU7 z#})7_UKL7pVxMjOpl4Q4?Lb7ey#9-Y^meJ|s^JWmuXr0#3oOzorbd|%*&s;r=0k;j9LY$mB%kfz{V<5hU%S-D+Xlg6- z9bu}^quKgZSuZnZ_$BBGXIH(T0c7YIkKl{{&83rk zprH%5ah(9T6^8ZjV*-a@TXN#}P^KqlW*OI%-m32$R9gaeYdq~0@gC2zGB?{x`WtuB zfzAy(@=ilnX`)%ZhsM{>p^F1KtsZ`)E(ucZ#uy*O?QFf!UPODuV96(Qd*Ne_rpE&A z`fY&UW)OSZr>O+~>V~EN;a78Xz11n@FPn=|ZR~Mq<5w1dClcZd7|~^OcH)j@zxkc! zYs$ZhbAeT+^iIU2y07B#!8JBdSxx*e9BCJmNy^p zZC$yWRFE0)1ZX187Z(V1U+GN(8li~p5cqxB%zf9Gmpv(ia0g*_)%lO-l@SKLe*Sxv zm4H#8;RHb3$kT{w|4i;yOhi@ZZCukACr*$#WjcZU15%QLk5QR4%=BB0ZE-zwnS+K` z`~e8wOPO8-;J{5>w~KNJkQLrgD~H&;Z!{x$@dAb;m0ojq2VE-pc&vETvb#+Z?;1;R z5>?L&nB;w0=<$nPo2Ysw!_l9MqQyMPIv^(b^8&mdh-@eV1GhB%9C>(i6mv^DZGVsHB zqsu&zxH7)QuB0S{&{70Nk>A$HaRuyh-66If;3d*HO&4aGWhf@I&+!}A_yzCVs8K^cT}kyX0LKzHRC zGCM<_*aHlB+Lob1WF@&NjaT(rB})=^Lvn_fd3HrEs!-bW^M%0flN`;w^^xvVVNs?p zzkTLi!v1KwOyaC|`$BEZio@z2z$CyS+cKgtN?tM3n5y&zF;pi-8TC@Bs{Kf3u7teF z^sG}AIz#mGz@8}?d`%1w@T`WDiHa2JV4XE+3ubb3ptd!k^l`;a;bzaIFRxsq^GHDiN6Ko!gpF6x3%r*Z%I!lj*m1MU)*~&86Ko>QjNMFX+iX# zavm)eiD0W!HBw_Wi)&W*vosfG2gVgoPn4L>$_Pt=Jul@xE<9DsU!5~gDzp0z;qgz` zyU;M824o&UhiPp1oz}36;$MTFS@6ktdiErC5sqah-=Wv}gTwibaz7PUzDjSWk6!}V zqnRP%8I|nF7G58P4{i}J-iZ%}yp_?WSS6TY*A)zlWLuUeQ`)+g&xd@O5{AVWCjMUH zQdy5<)L*3!6TXOf+SBs#F3_G~8LaESZ=Kl&N7rkqMo1PsOW=#bD@OF%F)uo`=woLV zG&wSkX#_=YQ;*UwyDbYWr`-ao4ZQmD6uwD~8N;nu+07!RP|J%bwXS5#geny|qX^eE zQ|`7`w4pZNGd9tv<#t$!hk3D{W#AtPOS050{p*!c)%aOXMLZp}${XTLJ2j~$MH@MZ zI*j&env=0)w*Wl9nO{KyuLqxuY{hW%sFla|0S)(SKy2Y2SuQdHvb|j~+{4R6bt)p!QtDhIm_txU&At7hRGGGOA?XvFBb zAux)9A~($kVCCid^^&~{&7kn6MS-giz=OZ+P(Dor`Z8RPp`a9CO{#4VG0s9{LoWz* z157L(jqo$ItozM$$o#ypRC4Ci#cs}qv=2)I^~bxWwdjhlv3wq}l57IQD`f0<=;b%U zXBs_rf5#38&#Y@!b2+4cnRf|`%)~bJj^poR#d8aEA&rQ0URts24@{=e)&V4#Z)UBp zarld|CowBCN#hNubP8wY(35ak5uk%wYHLtF+UCoin>=<58MRJYmbab`4vozG1SL4$ zm%@fPvfOkvk7UW=mfEhes111hI0}lrwu*tML}3!k_}1zV&F?Nxkk(C`yhaBAR6|w4 z*DYcMSdIq@3rJLB-%d!9`~7RcM;{NFa7BFxPuCT$_7v@2oG! zDfV6!6TFI9y2#U3W8fHP0C8?#|8C19w)!#O`PbE9u7%V{++~&q)WuVGOuw#Q)e|N} zuD~T1HXL>Mjg0cFzJ7E_Gn*Nr-*`~gnjrUK!_U-;iloo6PQ}Pp`vH z_vAMimVRSk4oDRd3n^RIe){o5?s|_`ILEv{Y0?a{t+q7)dmY0cN5fp4;$|D-*8d_? z!B&twVFeJsg#>6L`}|TwtomORW5n9O+IJeSMXs<=Ba7Po>{lui#g`smw30GkoP=zr z;Y}Z3y4$Ir-E(yqx_gxw5$a5_8PUlIbC7!fOX=A+<%mSMPrKJw*Q`#RsR;W9%yq!P zSuvi+vdD;>g?*pG?TB9NncidaZ+y}-n^cwizNs(gyVBDnct!tHN-3SK+%oiNIBv0T z?)`z3xo4JucaBQJA~K&tbEu*=;C}nqEoM-tLV?cI*u6CJTQ8&5cR6WcQzqDrdyzyM zq=QI9-bCsS7kFIXcWv4(#E|12(=^Jr>E%S4%sDq7-GQ$(#l-(MzY#BSOIgC8~c0Z| zE*RF1g36I|17gw{6B@>^>Xi-FY8IZz72#Zw%;|QH*fv&f+@8p4E;aGYx1L`T4^Hg+ ziubK3Vuz|7)$!IOjT6JicY2lnqzAZnqX;nZZ+WZja}zfHMap&x^#s-x7L{(-J4v!R z$x_q(>+>U~r+`E!&#UNyZjsI2xBOg!mUluKGl}V0acY_^Y5}IT z&o#e@KrnPnS3U_Q3TY&2T)qvMS_-V_T--L&WHi=XZnYrtJdRc;m`LXZAWe3_{L9f7 ztZbJa@i)Wwv^2jwAA~$De-^rF_o$JoRqC_w)3Wkyr^E!pCB_lG4Cy)I%Ms?)B3IgJ zBugA6SI_w3e#U}+sfs1q+WqLVd`XQqUpj(S_Irh9@;`H<`UA{wgVnJjfk}dsjkNc4 zgKK#G5`GQo7M!d*u7ssTvPt%XqgM5A>*Z^HKu{~Dv(zZk|?Cb^$XU|L=RjwLt9U8eb3c_DuI441F6HFQrK3uTLv@_IBz%P<;UR zL$bmf_(xyQP_%3wL{$#&m*XHo^6nrq*xUI@(%saH``cPSjmA=l1th$z2EgbBTtq(a zu#@B&(c6bZp2NOKi*i6ZqCP5^xJLT8LkXX9a87qWOu3Jt&e~$Fn<+z%^+%ag(BX{p z(xY$$>FRi+Zx&S6u-sq*Lz}bC=`i#Da0G(h{}!|dH{w_9p&}~xG7B9rMHIKF{;zF| zsekQcWcUsVZfUwolN~22?be7lE(;|@3?}+jQ?35&(s2X9bo(*e5a0_*HE-QG&gD99 zoD*qDIUC+4a$bGoWWHI=i&CGj2debMZvnDLdiA@X8!%7K{yFIboM{&TeOP=(2G`$m zxQ9GIADu&b>`BQ<6ZxIx!Q41eON`O8jUPuy!F(nNRzlu$>60s`!MJlW>T2X4D5-xd zeLl4ve?EH#Fx3=*#2)ekpRXW0rjdTqMJstq-`V2UBw}kZit`C!QW{VlRGD{V0mh{= zZj&|*QJH*tn5-+bR@;R+Lo0&}wwezA(fora!oYVF{g}9Z94qd<0KPug#`cSpU^Y#= z#wcg|-rDMD8Lx`}R%#yLW#5Od{Et5L%<+fjJev61B;(#^0PoSfWFV8KLHGHLm_f0T zg>r_(lkNCFv(SMn(l~dcCFKJAWXIp@bIt&ObB%6YkqbW+eQob)n3AFf#*fScxGOu9 zI%ER!s+D29_{vOG)l0zgbj&XG!ykb~;45?im7J!A>(1Y*%yWpANaHHIDW5={53Vl|53NsBsPVQ}g+GNXzgAID2Y_Y#!;kZp4MLJxLwn z)uR@9?EkUzaN?xXu}F=hsW9Wh9|QH@cmIzc0_MQ~X*55$_V+{k4{q_l{^WnZ?_aOg z|E}l1UYGyfFaLU7{&&Cp>vj46<9<1HSq@66&X9XEBr6K^QR76+6|mI_5|yr0md;e3 z@CCr4@yoRf9YO#4j{o_PzgKx2O$-i?d=C3nj(*Oy23~NZ#4KcRQnvbej)||Gal8>3 zmN%LgdQ#^zB>(C@(@$AravI#!#tJqQ&4l*07D-x9*JGhn)}PEva%>_dHmqL`RLNbt zUt8m;72_)Zaj57#wv9;@2?|TY_)|f~NO0MH)2gBI{;&k(Hit=xVL{T1asy*QeL>OM z>Q^`Yww9jc=f14{AYAckuYYq}l4r5Iy!kw{7&slnq?Im} zotabiX?+)I#`D#1!xzl}&(Df|?|uT~923OnZ?RtL+uK$(jT-#Q({d*cxDfo9djg|A zr(CkO$Q?b6vHks!pFwP{{nk;1tw$M))5mNN>{$9m*S zcWl71pSyq6EQ4v0q*#j_uG>%I?a#m1rv}al1sZeZHsDlUiAf*k5A;ujz%xGb0Z%GWw^xQc{77exB~m`C zE2c!GuSKk>>C9|;K2O23tR(#sv_|Z$(1rbzd$v8S85N#rH>c^m&{xydKVbUhfNWV3 zPumGoOjZ=04BDMm!9dljxDW#_%P&MC0sOepYz)uVD?Q@*Vd+{E{hO;SN@Od1JGq{z zMl1h6cAWc^0;J;?_0UxW$du2gTE4*9)LTRALBI&KaqII8h1FAY0Fh(dH9=7S0cCsD zM)gHj&%PN-I*~G#)?%AJ<(5Px*3Znu&}Ugd54iTezgRP^*>~W4)txFC{z80|yIlP8 z5!j1x0|!g)HOE{v0#1g#S38EAt>1stT$pvo%g9yb0hQm-TCq?b1pWAnO{mAZ7xSdB zIZD^!+R?{liuUkgIWoUUQm_~9c1zg3X^aa^L$LQPUC}j;*=W3eWx{WA)D5U$^Bl<9 z0QSIz-JDaCvf5olW5>5FD!oEM;Q*4~P@)eUL-6Ju=as|p+&EcxZr=tLe-P2s?8_~&Q2n~#T;wW_;z0`yONRB`;hf8gR)uGw>vUL~#u|9s z1G;{)Rs{a%w~+7!-HR4p(OT$o^O6RC>xL{Ar4IF4uHJ)On%>1F1?Q((%CN|5GUJ$e z@bv%^$=}ocE*tU+___tAE>>yF%W`lv_uFJ=Jipxn(~%?|YFi#5lH2W!==ylMyHl0W zNHL&7aVDJrr5|=>O<}1w-Bc}-gP#d?^gJmIU)V8FaUWEF*kHe@-bl59Ua5UGBVwy5 zXGk36?KT(}Ozs1L_w16@sp0F)95Q2z{2UkEIi>w8sMAG1hE*TeSCu=1>#Lppkfr;6 zh^Fl>&Yi!50WKNn8cc%T#(06Z@B3PA_^nUFB>qtt{Bt<^|Aei3634aJt?g-?8 zFhq}YZ2vahCN5(4^vT+~rw0^BwT#mK%*7NpGR2_%CoyJffrEykf zE2#1ow7%liRo|WJl`M^tsddi|Th8A;Z=lv+z^JakoH0E4q3_j^qc@7wtiZ$ABjk32 z7r4~7@CU(*sPI)}m)PS5qINRb@`ssM&+gH^?Cn=k&s3_PvFmu|uchE45Aj#{3Y^zz zA~4oE8U^3`{VmjZ%S2N*x4F~{;NN@V7xN(HhbX!R!1tmx^Xj>phA-~C{XZ=Oxe{IA z?u14*sk}YXno}GRdoRmj<1hMLWT0E)*8RPfQy^GAd7*lx$_6Pb12@0*Me7cFDnNF| z5$iE^@DJaedO^&Kj7(Wxzn<374+xNW4?&XTGN8Sg; z`hpaDt*nju>LwC5wwksPT~E%Szw9#BI1MXDYpEu3oMIkTNYf#t;Ih6}<@&8Za7|wM z&`b zxsQA17nnBvroS~Pf}RU)wYh{%q6k{FOSiClwch7*epe4TTRaE(-k0h4qn*uva~R`} zJq0g(av_%k!In&TQc~b_8BteVcZ&V7l8ObGbamOyLWnsUI%h0kTv_M8H|l>?#-p_S z)`F~M|EH%M7B7>5CFHo$^lS5J-oparX7li&u2b9V4F^~D=3`oQH@~N$>=32|hA9lMX)WPXvL#f{K2RKOUf#Kw zoTh_$y#DqTt;zl8jca2fA34ol7=FAYaC=YFaL{CwxHovwf95VkHKn0fho)U|vRY!4 zB-pumS*4qL{Sf`5+VfgC*){H(e5GRUO%((L?~LG#s5M=033Ye%4pDk}H%-Rrnq0Rm ze3Zt$xzRjwer0U&{M4h}A4}6!4l4b%+P8T&>H~uTcGb8q(Y*G_;O9p#qwDjxP&fLwJWvBBH9s}I&_ zH?n+0=k_4OD!UJuO(4p7ZkkF}#(Hqn-F^PK&fhtcuAtk~A*iEA+?j1qJ3^+VkW96U z;!Io3qCN^i1s?jfP2?SnbQKw=90~-`UK6)vEZtENFaHabmtEN+>Vo3|5}-b^Tda${SI|+#N|H*`Fvn zB_0(K3qeCmNJRgjnHv&W=BPe6m7!Cv z9l_VKI|wO{@Nptk7hUgbCbleNrdnl(U>wCh6q6u9iV5O<`HeN22O4LzF?Gzt#E!=!3S z?kR_vi1e-@JE56JC;NGaZj83Z_#`LuBNyEshk%1{!*x%XEJGWiis<$th@J0zW>5$N z>l1)E3ewa3f}}N_Zm|cRg-QpRlEga3yd5U3a@?_lcK?v6bURQRys_X=)%?*_T(Q5X zhON%3?nG|YJ_AuZ&Oo@_SIzbylKA(c-^irjY$_wiKT8$N7Hx)tim&rRDcgOw z_X6f@>K8Wg({H$b8%Ra*wbZ>DG%I?J{MguQMZQa;mK^?18UMej41!(=9=|doW)<0^ zC8ANs86Zd-5+l_jhxRIm*j1&^R}-vgzFHcT#bzamB!78o_!F2H;!08^1cM`sGM1T> zA8}eWt#xrOOcAfNugKN9n+QCLxD@TkRTf<^<11>p5djK6U|`W$(dEsv`nL52doPe( z1P(Bn-9Q2E5l`vVh+vbbH{aXP3sJS$E$tm<7v4uR0ap5&pvQy<3J%fNjz=X8W~ci& zH38xwEQP~nW`9bz)1_gM1^o+h^zee!wav9Hl*=r$7&kCDsuD+HxX+JNQd@6qj3N$% zfDGR-i>=ZV>u}bon}<@qQzqh@Q}vRBx3i#CW?2Oxppe(CTohT4oP0*y-~+6?jPx~6 zMk;%#{hRgrr*+lMIS#&cj>i~_zKi+ll-?0P6! z5tIo~upR@Iq(Dd}_qr+Hhr-ldvvN~|VF%x>fVA{2Tu;XLLRTt?y-8iEz zD8~2l-Jm=Rp@G`!B>8e-YyJ^2wMB+>AiI#ynZ}?UEe=N$R~*zjTfVjWZ=EcUch7gY zjUqqur;R*DeKz$)Hm}95gvqNX6B?)YD*sCf)%Vosr^%wW4zNY_&y*CYn@^9aVOHqjfB4p1t)*0*A#-LR`IF3^<{T6rQ{N-ehpafUlaGG0aLQTf8Y`dy6cy;bD-L$*69(!&TQpw5x za9HRo1+{snTv|Z~{a~}a>ow2NZd{$>d1r$RA8P4Hts4K491mQmJDkhYg@Ge=BXOcm z+bFOFFdcx89k*h=_t_k64%A9l*ptB!xS=-koaI`t@Qc6Qu3yz>p7SX@@6Ayg3y!gI z`nJI?6CmTeH^4lTCS$q-YnC_8T5FE))752VCvl zE2+|VujOb_dZqXJZVc^w3GSs~!H%s*1H2F~J4w;4&ewe~Z%xSE(ctT_jwL_(;#D{T zM=*zIKRgUGmxpUKH9(I#B;~II*K^}w5r`+wNqm((tEwQ~?SAw*bmU`?i{#ErQp>j% zMr+V+CME9y7p)e(NyFmJ7Tm2_;;EZ=#JZ`a)5UnI*1|>i?*7NSvh%bumxs}7Kt-wf zWUuQ;E$G{a4XppUzqJ60T@oOH_DA$XqDKK@39tXJH6Z`t<4ypYi1QS+ic&q?Y9y=N zu#scOM-l6Y0H>1#KvphBz|=@asPD+sq0^qS1$5kGW)WTMT#N8;cvfoWu{EL*(KQ_c z9mGzgiv{*x?|izV+mZ5sb`0EMN-E6|unQhV7If&!`!7V)rUM8BCzLe!hS_Ldu>Wmn z!SV8E@vo=JBa_zCuLE^aZwCuaH3Q9rvF!4I>jD(|BV-WA)Gt}~3#RF&?V6U^cK(o| zA+@PuY`?*)s@8R4)PR@t3UU^2SBT?Xuf3Ja*?fk0{^ZCfI71c%hf(GAVN#BiWRLbf&DGd zA+ygkF5yR6=6}a(el6%AK;sm%;7x$x{$Ay6f<}mD@rnQhL@u0mFL!P}HFM2$u&4|O zAUz|NTvWak+1`y2{E*Hnsz`H&W7gHUJK*qw2WoawfZSq)zm+b|(R&UIM>ldU=3zhaA5zQ~C>1GHJd9SGPOT*=R#;BA?C$*nIN(;*yq>ZTVKfb8( zHgtCbDEp@eZsf?S+R5%2gaE}?H9GTO!%Yc2yWK2zvb>>MRoHBrP6iBXezDWmoJ=DJOFn=uS|T-dO7#fT(Rz3;>OSRB~MyJD4ncG(@Oz~pWKlk2njGhM%-cR=DP+k(iF2jBxuR^ImO*IQVd z521{B-B%BjA`goLZvr0kF%JLdt}*DdQGSYuxB{#UX#RmMEURF6iL&c zn!4%P&t_@Z>r=B#t&UO4`rPSux^PgIha1>*rKON)=r+DK19jCOI&h#iYy;~n>1HE80WqZ84j~^-f zhhHwEO-a86JqHZp1@%K#4c# zzm=`E5EFA%6->%pVI6FY58Yl1qCb6+*Q@>l?<2i$phx#b?Yb^|Vg0mNEioEz40(+| zEE@84_|DnT)jH?BR*m8)pNj|Lrru~1m{y{q0Pi7g!Fjy&;UIkcnhcm2bGODbaf<&Y z&>Z;PO%|4FD1Fbhq0_U?>Ehkn|fzp|e%ZF4H_^nYnKXALS!T6lV&X=ip9$cH=KTLWa zN*g;rE>CiLB>1$a2i0xKbnSL{0p%6m0`(5)SidmyrOf72N&1D+hvxyTd${Po}d!2dLu20+BP;e<5+RJwm000X|dEARILDgi*K;(*)2 zz4Z?|Y6MRMJ)b_-?Ju+c_R9~KCfF0GL`_Ngjz~HD_DcQrWnzqhDPP?#hy2-NUFyJO z&rRFRW&d^x;;%HKn)%@(Ep_An?6EsMfIx9tY(i@6Z?EWIOUC63Ja?m59;*7Q75ooh zeE#5^2=wqg&U!ov)>v3J))AkrC%lx+c2U~NT zQr?RdpgW$kTJPZ>sN$v9TCGNET@rS^mp@+M({J0g&;vrvXsSql0_T@RF}rC}gYF1p z=Z9Na*_sK0gmiJcaD^+h^X1vy2BD`He8aBCh$O50#*6}pK%ndq%I5>LMjWXONTAi& z6YyP~j`s%)?tLo9GOroO#LdwQk_xkCNu^EO>Ko)TsjPEBfjdpQtD`ax39C4nF(E_V zxVJvIuO`n(q@9`EnFh7EH#6A=uQNr!V%{y66Q!VOZhrf9zE3wN%a|{N z`Y{tTe%JJNq2Zf5;nk4wqYJXnUg9>9#-JO31S9_jd(KMDBymVQaQi_zzSp{yAY&ea z@%5M!KbU*7$r0g~d+S;A8QJt0>lVe3`D0LsR^`>ZF7=*4CL3=r%e(8d_QTJ4-;^8C zdCs1DD>x+V%v+ji&!_T2+NsPuA-@c4m4S#>#H8ltAVvWf(LMLAe(!{J5s3Ig-$RLf z_!YS%cfnB4AlF!5b^Nt+VEDWY%~IB7+0V%RK~`-y`2hG>tkOE+3x zn7l5sF8(ahlZ6d(_X&~`GBtu2I}N&gu*$M+hVHWRBM(R?lpI&}ebL9^F`xXwf!j5#UrcxC%7qPT~`K=iR~Cy{#{| zv&x40?jleecjFz~@Y&s^*zWyv$`jwD%Ip$;sL-ir(IA2Lpo_uf0aGRqwwo5xJ792` zWl~TVNow1?hqr^N1H=V4m7u_zex2}sCJ1yR(|S<|zxRm(3*hyv-xnN2FUWr!AJ`6k zl<@i~%t*uxX^S>4XYaPd!rzuCaHe+(z!E5l53Z4G^YrkP&T zoSmywCz>#g2z z*M?*KYTi$o#~L;u%26&5Hzx3v;{yigyd-us2RNg)2mZ0?66Qwijt=yuNUC35sjL!#qCRX4&7{TrMZ zr@4mJ^n%9+-D2!4d`7zXd;$Y~!3va59a4-axvhxZmDY5y*E9xw-7U>b^Z?bo+JeHQ zs3-;k?!bLNupd_o1-Vb8%+!%ONkQo~<2#=vVmf!Zo9>WL(2M^E^ZDOdh6%I7d+oS! zATG3X^*dQ;MAo4>wS|639rWvCYmA-)&DfA#rC?LG@j<7?XQnfAW%t*o z@mSLq2T#_oz7?%psIDxAF>Y@s!*`hNUctLNh13$$`AZN$iDQY8P?@r>Z}R*>l+#QB za0HB#Vr?Q8CCZj3(I8*{oL`LG-(LQraW#j>M`@3yZhr$!^)clJ)`v3rlLNax9mUFAf~-O)u5XiGi!WX z0@N|5el~8?f9OWr(USL%sHNriD@^IvM}lSj2XxVVuGmsdn!93yI=l}Hs@Imb^4K?M zSdcHKsB<)j7gVFzV^uVbl1NMRMB6p!jh_tnU!Pw;6NBn!U@X0&JB9D7^&B-zLo zy7;<__G0zh?N6CzA~^s=`tEK?Ve=eHV!7op7oLjKf42rJutwjR+@?Kro4&S_0t@6V zZ!ou9!^=v9MDN|$Y2{n! z%kjLi@*Ak-pT&QrshQF^R^&&jeUovqVCy{GrIs9asnvwZ<2 zQkxBHg&TYw%cBw^#MB~PqD}~v#zcDQ9^}-Y^v5OkM6;1r+ z2V9P2R-IH^-H54WVYBBC3ONHdW+clT$Y4|2eC3rZ`t{BZx9s+X9u$=vr=b`BwAPe4 zvmt$1c=F{7YBHAWJUAT_&bPZ);#0FM|E$gEA!^k^7l}o=H7xw_g4xgC!*&WFz&c8A zF5bF{EHnnUqK~zE<5H}9z8Pv77gZW&pW)b6TJsUF%fVHcJMYVPA1|qJ!PhDUZdGZ{ z@iGaWX4M;b^L%umuu}B;+pSt5pgAtYv_`*hWID{yaDn>j3KHc&1>~2^niC4ua*3I z;I_kvkVIQ0lq7e{w({}*m%fd(Z>fB_APYWLS6qFE(L}++@;8^ar%L$Ex65U7Pt6Dl zck3`gIiy#wNQI##V=?9dB>%ee`roGeFq?-)MDRt$TBVm4$w=@WiU0b z$z}}MP3rPJEv%7)ey^Dm!4(f89;hJ1H-fw0BZ}wt26kVS1!69G3Piq=AS@#A(xG}SB) zwDBUw1L+%Bp*Sf*md#+A3td}UL`b`Rj)IBeMNGXP%?0cc6h}(j3+?kNOD+xZ#%%}0 z0-SaTJ4wN;0lJph<5UEPZ5^XR*NpTAr$>#8eU!^Lwe`R%Zq z^w!Z)6+f(KUBab99u@8fI3da<&@^~R6hvyyWC03lNKU;tWIbMCgDMgw@wjY{IoLdK zGqp==KrTs)sN=^CQy}j=BTCU?Xe`FcjSE!hdWLh1@7XySqJd-o*&o&C_O5Hreh2#j zHAlf|f}cqh$$;W-^@WW{5;u9g6VH}B;QE%pp~=PEsS+3QI{+_`W|ay z&bA^KXug*`zs;&Z>%1LBz)6n@#qoWJyWU~kUTuu0HTx>L?={|LYrkx zHz)&7%g=oFVgbgqh$^ue44a6>KTPFXeQH%khoQe*84~A;0_ye=+0Qel;mF z-nE(EjvHh?gt^GVI(Q`A97=CQErAy7D!&M%`4kx={ajWJOt-|JC9_g-45NU=X@fdx zv@tXEjD|C7~RK2 zXM>LwPGT%whCij9dv!BaBc%2naW>T~N5950`K@;nM~G2_>=P=ap9O<|T^3u#Tkq!Q ziGBz5tAk;)GLs3M-+& zKAl=(ud7F+aTKUp>sV^#I-QjrF3%(TdHd?6{46ftv#*D)LD#lcim}>b1KTvcdgYb& z#8;bvL!<0V_1bwJYcC~n-%`80<{mb+vVY%cWLtYX>yt{L4onlpIaHA(dd!{BMyz!M zJ6+f{7U%}2EsyUwckg8sxD)|`J1vbDhJ29V_IBHukChU2>bfh_zO%Pf=k(?9Sgf9b z7G^ny8PfVyS~>e_sNV`B)I8xFeUi2O2~Wb|MOPmw}}?_aGBC5!Xte!h^a{~&UC zu04+%Z9_>5SRI67X51JxdV->dV3V0YV5d?r9Q6(FraO|cm(~LUDO37ev#93EvW$n+ zddH5{9Kd5f;FfdAO%Sa2{^peK0-hu#Ih{FBwq_Q|E@M?tA^=h1^NSpHv3Ay0qBlu& zBkdo{#16pXmQvpMj&ybKMj;!9jHs9Tol5AB+S{nZdvXm~t3Fz$gtn%@xKH!koS>ie za)cz%n(P@j|E%d1LWR;V4t$`~NQgdmqbTE^3|tz9dn6OLSD}~|db7geqjGVGbdcNk zl#)0jwPpK_a|mj>muP#%=d1-7jd-1jR@2XAnY5RYr+e3JJV27Nx^wkLjPOs!qXx>~ z;|1QrrDwce!G|8@_l&v>H;{Q>qYy2*?!?#%lTg=5Ml$W_4Q9(`_YMv^2HtbNLN%-b2Xdr(MN zAZN!BQlQ^x2b*WoXu%CMG7JGj{dxMI?<(t_gb*d^`LJ-5JRzZZA%$e+F)ck`-)32n)Nn6+}qz&3(}Az0mVa?W%pMvDy? zDffFLS7)1N_h3xs$k8;$3w61j?_i>HDqu36lC9ITXIyAzzBYoZut}!(8Kv%wQXRz$ zt~(lg-k)h&DA#WOz6CY|2*6>;Hxme411du>HNX3L6;&3arI$o@urB?O(`{AwhxyC7 z9I8XAB+rZG6ff~{+4sj;Qd3`|m@E3+{p+2)1D)h7N~%#)KPk|~ zZ%_O+RzT}PMp*1efE`U2IWh={ z7JYa2beFT2k^#9N*CIxqAmJwXZXvfDy`!`M;Ib7PqguC7B)U1Mrc#?NK+uLipOKl0P=>|u(P z4~Aez8R-4&F+DM0hdgZ*v|qu%EXhb8Is<1|<~4b(#&@2&k~S9BmYKsxm>v{JerrVV zNHVfrl$oYBZPp19W<{(SMSH~4?XJ;W7r*-mX5c=4g*B;%H$tbRSLR{iBe*bb&XJ5* z0=O5GbfEhvwE^L53AX_m8SaTN8pm3>l?eD7*qq32zPZGmN&Z;^$8HXD+9_)_fTFK_7AiFr)`pCJ^^av>D8_Rk< zokr{3;MYABX=&5MLEB9LPW<8S=fZ3ab%$ENY4u;-GiUp{yH&7*jEIS z8_sfh5FQI-+_vDnq*n!-{SaZXJX2tll%_@%;f`khK?W7#?(6#PT!rdv9XreA%@w?Z9+Tw#7b5mYXW# zGd{7aG<3V4*dnvx|E+6q2S`vv?OgQ!Qo-67(0hCdJ$_=6DgsXu-`ksbWtUchHFv93 z;*@rL!5+C5#pZva`8RSvk%z(V7dphf_Y89TW1+}8)PP#0<8`1|0By^rSZ05TunKFxK31B$Hl z=wJC^naTAD5cw1b9{Rv9UNWGyhBpcLnUqPrVBysC;iD~It6jT%sRJEP8j-HMFy5z+ z0h)TfHcG@;W>q#?ew4J&#o0Jjjo@8#Lu8NDvKNoEuFvaxdVa7CvXY3yrCVu3)Hxf1y{Ya zsxvei9I0ck#mC^tObT|vzfj2pr8=QzLTx<=0|_IO_TyH-5uWlB`Z|XK4}Ua%`YwO7 zfB+<1vMq!C!NXe_e>t$?1_cAL$m=cWrI+v4DbXueX@((4V#ki4v+awDDx5!DC;p$C zbK}#O-DgB7=sQ++J+u@1G(5kpP&+8BZ#<{26R6BWhiMs|di=w-*tJMUbA1n@zwS8C zmlt*m|FR?MSp}{NAfV`xcrB{RSLuLkEY`X$?ba-SOEh}i=cqYap*bmt4jZ%i9dR3Aba`NPZ-L-b8&mFUtLB*p_FLQK!K3inJO&7z-Bzxl)^2J?yW8b!2d_7cX`<7NHD2;TwbAn5jV* zH!;w4CrXvcD~;lIGwUA)eV$YiwMCMfuz`Y<#T37W`G6_<=KboYIa>ZMZxFVql>q=l zr>*3{8jl$)Cm++#-yI>!1D*|g5WuLJr%INrU{rtJYg)PjB&uH{7){A(K_$XE{zOUo zS;vF{lUKTh)l9q}$Bin1@D@NEt+itqJP5W*I&N;ywBlO}C{f5(lHqk}(;;ow$>mgJaDqqbyEgYk)5 zpt6r{rl0Sd2P`S|3VuOSLrDlTq>*@ZUZ-#COFH3DzZ-Sk!{MK!H0$eX2w=3lqLeh{ z9-z`u6_bs52l2;S0SL5pGsRRTz+Xshbj+hf6NhM>;%^!$)2uIK$9@5zV~KjLh2$5v zN$WxL_`%8!cMx&vu>~M78sK8Q63U+?yD@!QSkSb_q-B3c=F4qup}0ZG+f{QpAWbJ9 z^JK6)f?LInOv8f4pqp4}>T^4_>*f15*Q@A7{l6E2OZ6Ife5*bS|FSIm3~^Y#X^~Ml zW>Ku0wn!%`&3(ts=F_vVdHUwBJO&v%D>UH%-W}b_RP^M%YZLVJWUc++3{nNibbfk8 zb4EOcsXztTV_eyx?J0=dRDwf7F#%ioe%F^gtd8$_Rj!rSZFw}j(I)jsD!6^C;r+3I zaz?Snk1RvlP1U48@eUfKKhJsLAN0ZqY{}y^5s;0OCvVuk6_x3zc2(uY&SbTYrKC}- zVzlIOJG=dzCTN3WxUv##v~H3Y{u6fZmW$5KHtZ`z>Ix}J&q{zKil4lc-W}&4)=&~F zeb*_q?x1QIPQr=OISQ18xCum*z!kvUhK==tS>e`M5Bu;V{A2h-W8=@0}x*LdRV` zQ`K+=h^pFlCfZS^;4v)3R0rdeWTl3}wae>fsXr@1(_}VS`CGM^F|Pm}mW(}A^Y9zs zGHPJEAYj%5x)DWdwFE8lXr>DQ%OEAc?Mw>Hgc) zsP+!l0ZDAbt(IBJe!Ybs1FJ)>%m9#0xFF1J_S}6&$Z6&kty{H|!-PW9an&f!gjCeV z=%=u*2=T^wXEnYUhud28)pxW*?u^1KXc+21%7wX%FFoVe1IOeCz zg^lz4#c>BIZ(#US{;8w5>)Qc8Tr$#Li!hGI%)s=sE19($iFX+*=$ru)#K0lVkK2)R zk{tSEuI*sQd#nXeHS42_V1N+Zn!H#)Fl=!2F+%W$+dAx9Yg2<)N{P53d?K$Or#+iY zQTm=-fX7cVcsCg3wqnQ}u`wJg)|khW35s#DdE{c!yIQ@);bYscEW=UYhmdS!Y2Rsc z{l44tZ2DMgT16JC(clrslIO+cchi4K&uXkWBvez69eo>*_&^)m1&+h6mAzHr<4u$H z))b&IjUf#R0eVhV4E^5aXwL+vZr7M=l-Qm-kY5E{0Vqmko5 zyn)8u28dAe&dMmBRB5uNWc1uidGy?QUgbIGt1;cvr-W~;d1DAjl5}I}toX+aoCx%o z)+l<*&LYrS9mw9KcO?pO28v?4FqU0;?!_IEAgD71tFtbYTC+IhE$T7m1gJh8I%QWUU$|R?PIJYFvzBY2_zF4)c<8JP0LZ`ZNqo&jMNU(lD3Rp4 z=x|meB+5j@Ao<~K!Dp!8Dg2z(SoqNx0jK!CZhOETar8C0t!vtl7BzarbD|{+P|cqC zW@T+&n(V2#x8fV-n_h@A8Rybwl`O$VcB9XxlScQIT8D}Fb=kGm`Ok+Oea=*)ldj_~ z&r@d&KJ~xfyj<*d;^ zKHL8!4_bsox%rc0!7e zHW8LrZ8SAW2ttZV*0xsB5o>*mDu9lm-o4*cbgy}pS*^H>Vf*eAI$_606`mw+urGRO zE~u*Q%n6!}iWuuEo^B&6vJtl@IQytjz^J09QYO3q4XCVjEk+)0`viN#)PwAi(T~*YjFy8)0n42w@aqtW3r6+ zE65C$VSszVNVi?^#0GRmRV}jM+f^4gNRz3FyI!H6blk)h+HDn2BHjshPlCRJT4D}o zn%%RN3G!~uT?p8INh{`HW8%_%W|^CsZki6&-rNxwe}LXbfDpDw4Os0m&T43DcBgrD zd)fh9lWb~seMk~@Wmy_9CC0@#uB$Eej#b5Rx=1QVOS+l)x^BAZZoVd(_0wkB=Kc#V zk83@UsYG;8<{nWUs8LVK=PWUH4_ikpK?rG!-dMqO1AJ7BSK%0c+m2DsEX1Z);kzMC{m_rcr?iy zwmL9h9#<^h_6etID=(CMS@>YS81&d_huE64x@gKdYDLhm!DdlL5OK&3OIc%jUi-J2 zo2_iFCyhFt8pW(#s=n5OWOEk{d52wD6A+IC+=70-rZ=6x>qgze)KK)kml5CJE|CWV zc>)bQP0u7*j$N@fFHjb_^|+{HX%7q3=9hum{EU+U{3h$CKA7PVBKZWMC_B-e(3F7T8PT7cf}K3*5#mJ1qe2v0)Ncp zocrQ;p)tXC$AI%M<*S5{<|s(+d|j;Vp3XpDhl8|sR`yM$-#DI67FW)JFU-)-M4BS1 znzpYR0NJepAjv`SU=2kBRfNpRtAth0fh2FynJitGMiu6hV=&OIHU_t0jr2t5=-AEX z&VZcBk1kk9yG3hc4ahW%Zhv8R%CEtwj3qT%e;Z)EaZtYch< zX73g1>$$WX4ooruCma;?NHQ+l_5F4#$>7)`N=bc3fsvpcesar24Kz+*6&&!yP--gG z_`Re8_Bo(W@-B;Yjf&rd83x=oD8!Ds@)8zG=gCV3+!2QiPKYF?v%FjgO+nPG>bIxK zCOeCgnxbIc;BmX)acTf!9rYk&67K4JBd2p`Gtbwk!lN*d;y|}7Ed!)pzMd3rnL`b- zeHf(J&4D4BGjEtjJ-ChorUrz?;(2U9vlRb@;@s6uKsn_d{!GVCG)_ zBO;ZZ!C^gKvSM0C{FtmX5_#Z5Oucx=e|M91q0cVdZKq4LIFkKIVUEMFrjjM=C$A)q z1V*_Y2WbROIsTU6*yC%1x^5+0kAQw;v;FiA^GZ$9!H3biYc&=EXdDQ_PbS{-ydjyF zx8&rb6NN*eCqtmGjjZ}%!S8s|SZA=2#DlekAb-piTK3RWFBbxTFj{IwF3r-9F02%0 zJ*0oontGAHah||ol7L!#bKe76xRhczrb}NnFE)X5oj?xtgc-S$jcx)rK|g)B>CY_s zjT#wRk+TlZR~kRdXLmom8bFDH#NvrTS81odt_PCA?+SM4HzIo$f{(VS5fk+&PSxa- zz{68L8ZS-W_w3a7ER3$1NOQNSDMEWw04eUqy5PEUyYu8+^Mrc=(lwv?-H@FhH4h}O z8^pC8#Q$X2+r*qo+}wuNzYgw((fmM~6$p};eoJ_5WT)zcK zGR@`Giv)=76CfS>5lHnd5kwzX<;<~dYR$_$7=Ndu837#{sRWzu*+<5eHn z*~2P1Q2ev2HcX8AC2l3#@8}`Pm6lm(@3CBxNGm~6{8@s@yLklGwE}dBrVFEoI|0L* z67)qMVV<=r!9=qdD={|WEMm4lOktygt`H{pL^jAhp7%k2>P1I|CYvc9^peqF-;DV8 zpD7F5;;9RR0E1ZoK)NU%0&UnSK%Z)Lz{|ENeCM0nDzQut5Rzz&L-^d#lDL=MU0}&& zXU$_V5H=b5UV9)K_clvN-ybn{ZLX5Taa0H*{AbJpwiXb`x0UB?xT~+d?rYAxnab&Odx5^czUI5 zAwY(b%;};cW>;&}iaEPdoiJX?q;nPEd2R=>%i5xd18)lOu6?8~M+O5N zF}xursd{{Qj%w>K`mSf+!zEQZ0!-JSCkeEtZLzQK1%6#0LvzFQCKY(1Sy<}m%Ahjp zVlPX`(A7;NFmppr&IxsuCGzPskT;?td z#)VSQ6V4ACNJ)||`~w9aAO|Iu_6M$&+b}^!z`#25^mTJz<*6QjoM3KdOl%;}Mi=@O zaxZeq1Hv&+vK&kd$sr5r*zry(rGo901o1fNI^_f$On|?KWj5d_A!Cv4X81 zkoy@CR7D$7o?vqkxwu;Cqgo)N5O>tOeEO}AX13gnf_*=2@*K5lBaNtl>T9c6YZ1U% z5X|>X1R8^ywyV$OzKri5(&nQfN>CI8F}M9_G2R}|fh#Kyb^ur#b}1(qAcQ4q-|pNL zr$0^@sT#*ujg;BG?uISbvVQL;j#Aj~*rS}INabe}b~;yjU41kYPeEBDURS7ibSu}w zH~9gKLAf*YbnUyju(j8lmw$K?SETTb-s`weLkH0dF!SX4ho_jO#|cz-p7{c>gJpzl z!&@3^fZQR}o*-29xcK?B$WZrA{i#aVY6m|MggDA~qH|?hl+@tAjkb#9smIb+TA*o6 zD+wjsod5-d+VpGcEYJw8Cm-aviJU7W?Zfsq8 zVf=(Wg^ZV13wLRtl_en|u+iuYSIBZnVCjBfMu)QJGOPd4VrOe4XTo_EI_!9`_*Wl< z_+EfgI%LoFrel4RXc7^3EV*93FY(~V>KTSHnyFuJ5J#Y=%f}?d#nv}~#m#FA47D^8_-zXaHsx`*d4ldV8QEf9x!U8EU274yo;U!k66c)EYm$L_a@U=rE(ARz& z>|cK@7kn5txYe8T?=WzzYIg&mdU3bWVIzxv!E6Ty<4=@N@5|f{?%guVjy}DJxkX5` zKP6wDSU%U@f5~(C!J?Qk#*01R00h6b<2$0i`QFp#H-_M#4txib+N2aZU{azrjvuC` zlp?nC4Vh`Rb7JUPtzfr;fkYD1o=ORT^NSHt3KjQuO*#D=Gq`vt3oDIaRQ(OCN`qDhkd4P*xklc4wLIlFkSk(r}nGy(|-` z52*geDn>>({ZlIFeQ`SS<-32sMgKPr!QX$=61%bRe21Tvv_I-Fc) z^Ik%r_&rcs`1&y+hUc&@UYI4>yYvv&dQ!;o*-BZccecrlhq{mW@~K~gO#a0K;6l>w z*7>D=KL}Kh;K~YRAvt+k7ZiL|10+*O^$GahoA;}Fy&Vl=mWo}qa+uQ zNpzo0aeLbS#v&j$1E*ev%8hd~(cs{2{HSbf*Hy9>j4;+d;c2T zZ~Vk{KDf9ZbZvaUq`%7kA8Ws%t(kunSgna~_Cvi>7SQo39(dhw=7A?41V?Sw)3~%tF1HS@T)71cEsc@E31@+@sCgfI$0_5 z5ZEk(+{Ycsn1 zYGdEZ%Bqhv!gI~H?CD1B<{hQyBB7*jhXwcTWtEKw727|=*iD8zzW>?0KP#uo;_;s> z_%fV*Pv90Pv#k@%mbJrZ8S%jU9O-B8Kw*5s=m1~y&j$+}f3n#pjLtu*cFy59t#7HH zekmLwe)RqhxcU3{&0Co1iAihloJ*2qG}V^J$p683Wgw)4TNa&_sP^{)d#-cvHcw@{4bMT{WzE$1d5)th7IGgH4@FX!O$=N z`gqah09poys;E_ube;1TFWL%;{hH|Z@5|BHdCFc#o9sB4Ep@vSAaT4tVtsl%^`2X# zC%cv2co)Lv;-8K72=6r<*}h*-|uQjr; zBe^zScbv%FM$=Ev&F}#jeJu~4@ma2}B-kI^u8r`k!mfDkOOm7sZHu3&)NZj|-eqqC zTbeH<_#)@cgpRh%=TVpHn#r9V@kofyYK>&HJpCiE_rmX=cURfqGbEQ-Lz%8u(_e1a z-I-jt!o9#Ztn)*(_~LSnBa|wl_W|9L^lCpY1PFCQ{h^&?_lHvv_r`>iep`^@d7}W2 z-bT#;H_8@}H+y*A^7I(5iGa0CW3?SAaLcn*aHPx|<+aq?EjRaB=s+eN=&-jif{wkW zoz(9v>AlTg4#Kq11Cp{=JSHZ%6Gh0-Epm5i%pLb9zBUu&m>b%TzMfz0YdmJ&TvLxx z_VZf0c1!n^f`}uzYN~kA@}8UtWHkr+N>{nm)YWIr2IGY zKHM}n&`99r?q=Pa?M`TQ?f8vI;-8Ct7DJW6s%X+(?wNXdR=7_jpsOCBNz^43d!Dpq zKiXnhk=ZLz$r*0q&Ds%@Bd_WU3)^swDS9mWmgWqbd|!*RyyG;R$I6fE3p-!9hW3rH zUWs&VHJ)>wE;3b!L!vQibU|?^x3*(CszG?DLDKJ|*J97$fNc{gIma+rr)(<gs9nJ~7ie{v&(Vvt3|zqaB9qZNR1uPVEY17w>k5E+vWkXr zb?aRjdR|A%-l1u}#^EZ8n1|gfj~Gc^!5Z*htwFxQ=dpELK4CBcZ;WS?A6YK+E>xXH z_cmQOEch_HigX&v_8gk~o{{6N(oIaY`I;Qk>9YG*?~Z?3#D6ml|IPn>4Q=c+u!`u& z$P!4d{#N+7m0=_Hh;_-6dzDdzC$2~Xz52qdefA||WQ=vvjeW(Awg9|pO~h_aXP_HO z;N9{_y-{DT?woX2Mw99_yL$cE+9ZlWtP0t-y0ZMgdE5Uumai2GUWyU!vR)N7=Z$@D zFKvDf1WuF@f7=EyQOZ1xK0yRlQ3|Sf&wR4;I7m=0JD73ot8QGB5P%% z+kJD9MZc@BeYj$Z*Um2M^&P&{d&E7?xIB_`nFrY1%`coejTw~v>4bH5=fxI!^273t zv!C6|H!H2vZI6;io%__!`_3+sur?AJ_Xk!_LF?4$D-kk?RmZ`V zkUhX=7d?t5Of3e7yB%#gcePa`JupNI%^Sz7mKC~wCGLANPeO%%g?hHj@L~Gpe_Wsc z^oy_A3Ij`Zq&*5rOXNa$+4u^~&^6T!=kr?6OU;{9 zZHT+gw&_*4`1^oTwIXYOF<)zBYP5~KKtk0xyB(*lT?&B2+FxCA%eGmHly=hbCN&sK z%MNs1eq++TG+#QE8F#f|CorwXbve(<=%L&ngz&+47y;%%Avi_s;A<U-$0A+18RdP3NMMQob*2CF0jQ=1Jce1h*v5E3*KYO#!N`3}Aw?FdWA5V+&i6Gh zj!|>`fg((c)yYU|*NUi^f1syH-p*uIl)ih{%-q-e<7p1*yDwDXNdq%M#q=M_=epE^ zD$;iXQ0qZrhA{PUXHubu_afX7p$q>$jIuL6+_#8z{y!SzCUba4UD84MYQw7|SwYFs z&aW95UY_AWce6-HKQ7CT5mN=z($S&vov?sv(Qf?;gOvN~hYNxR3U2_?vjYZy(IA~Q z^sd)txHGn1X@e-kwa^EPa~>)Cn4tmx(f0BW%mUZh6b1jbLTb+zz_9hcxsN@J0H)55 zQ2)U$R^Z||`M3XT|Nq_p1xD~_uJex^UWG5`)$Dz5Q$>jE`d_a&aV1o0{!jc51+LJ) zBM8}(MpZxzQPP$C`%+CnEEN3mdUVgK<6#835N)o1~{>n7)P33CEhJ;0ps5)Tkf-kfsbo?KTdbkGKpta>#iPMgB zPRy}RhHZHGr*Jp2G(l#tQkqXYLj^V-6c`H#0W|iqgv38elqgKtQePA_z10z43@DUZ z)P5BA`vgN@=ClLA8b3iJCBDg8>F72&4Oqm&OyU`V|C6A-m$5#2ibofV6L-+l)JVAb zM5wj(60oKGjRh@guDI2Ssefr|B3!-gqGzfRchmV#19t^4>cH_j5UkVOp2TtS;*Gtu z0L2&{P21wPe`Ovf5k6t>2YDkv>HY?5+*W@Sfh%y;Qkj^!eGjv!EfBi#ax!b(k5>3T z1#zNuyw{q?W;wgq3~F%mp&9ESeA1-#w_4!+%dCF>f7pBTcqre#eY{fIP%7D}6h&nV zAykSGiVVh{ec#tHma?>1k}d0yeVeh3eT)cM$37TllHFjkjKLUwm;1TD&;7if=kxvE zeg69W^Za*>7v{Ro>pb7*`#j#qalB7;CsVVEjSs@@6Ypx?OT3*VUN_R(e&Sd}nTPJ& z9sRrLVndP)m|QrU<3(15d;Yyhc;)=Nre;e3kzSX8=X&c1@&Rl^&ytUbv%peX>y3bk zsWb7&&&hlRy>@sp&l48wRZ#kOlGH0dH6MmYrpr0ohID?EF1JjGa!lUE-NBa#G_ifG zeiHsUeXsR-Eda<8^OMAK6FIzfK55-JQ$O;zGT|#hLE489+ibfDsB0}YFRkc?PUp-3 zyjAh^>jfL)_tPTa3uT+N-UnBAUbm&5x~e4CK+g`?S6ta~pBLf%I#i&gl_Z+5dN%y; ztcYJJK_KuRvH5BLFW>qt?l^_2)rj+kL+&HLUw>>n_blCTY#MjtiJ$y&Hon>SNs+rE z(2bT~r&?6nyZaYd{!Tdhi5ARgQBp7@;g9S1_Sdl8;@T<-_&bh3=?&O42vC>dPxevdU^(n74lMpuW}!YLt&e+Qs)y_I3s z8h7in%C3j)4TA*S4U=>(t;!ejD(ScN4bwqCqSS*#E#f@>)~*ZOc`(pui{aG=uYawy zKe{?E29R^EW@YpJjr{u%sXBF7`pwX)RQwlg^(R-sp8!=K1N#l`zf~Inx>9BUOE}|ycfJ1>YX9-^;s1}j-h|&V*1LutMjq=k9oJodeTRFl zSxx~xD%P=~XMyrZ)V7;y>dC(hhyNqtaZv+Q$`Jz92fJjV+t#wQ3QXSUfi$1Sp=Z}S z!-sUi)z-ql>^6IbZVlvrqXWLkER&`^X}R8R@+D8J#O_9t#I!cPdTpxBh3S9%?|&su z|GN5*?*pt>_r9wnt#>4r`)+`Cpo6^YxI*My(u9`*M}b~uYTR9&#~)aQuk~<9#WAoC zwD;?~>6sjBA@B%w1%OxWW3Q`T;s;b#QY53Yuc0{>YhOdM{Z(66I{{IdDs~k^m9qh| z_DBfm)dyCgYk-5@z5Pp{OG`!2D_KjzM&mf32hg5Y^MZDZKREt3`u+_-Q6(%C5L<6Q z;ywB1pYV6xNZG&c`u}}(z8**?&9S%#)Fqe=JYixx|;836IC!A(;rw?TJIuFQ7D zdzEYrn-D?K&vKtvca2rd&lK5;^F27}*AN?Ct@i8tdn4$Lnb436rmxHyKMCt?W*OGF zyj(5vj0Sw68wO zbPP@Qo_*=jw%@;9YW>KCuEh&!dFYRW0b~vjJuI*=0(-t1l1iHNfa=-UI@Jw=28xGW z0xpefO!J>vE}(74`9eX}kN1G>^a^uZJ8rP9MNL_Z*3E~yJQe7|XI zj1Dl`e2}$~dUE#x^$eE=?e*t-zBP@$OS^^+qc!?2SzL;tBAbRP7y0V_0~U zto)c#Ta=&p&m%9R@w7-g*Tf$A^|_uq!kf?$hTs0Okf_^K+gh9a!qzPHnfH=+R;F!4 zmRmYtoqH$L0O<*#N7bF9@Rsc>2LB8vM_;(Lu1dUM&Llp0$8i|E1C$>08-GcSAt8s~ z*fMSrR_XC<1}|4%)2x>w=^QUXl>D(!cJT?NnxD~AZo3JcnGW-M7bl|=)1d=4?{bP_D zH`X=MeHhL{+mL$NU!Rz*_ZlENVzSz2hOzi^P#29d9G4r(`6G-V)wO+^H|bFWi;!*# zPQqfWOp$YOBu6nMEr%RzuzTpjbT9=fh+AVev*$r$3}tv?xJ;zOwTD)b$+49$LX}8 z%F50HO5L3$4A5aIOf8aR(L?1ar}VRzG!?|( zf15VPP9R{=U-<2@2TpjCNfUJ;-Qp{ByOgT8u^{xvgxj%*#&p{|s@Oc(B}Ns@t`q#Z zM0(z^QlADXSc8RhTY!$LAV4NgI#*MA{nv&WcE~$y7@*mc-*X{MUAP=4uz+3o$7HiJRjU7QGhoXLoN~cYpadVMMoth8yw?qLc2x z*s*h|n{v+TJL|JC0nj>wkAK=DtGeMZ!ulN_u)O<|Lkpy+3sjHS*{jyQRq^s$d$k}i za`igsrzU0aA-+_KKFJ7Z|79?)Fl>}SYiKsyA}5Bm&2{gqo?+>Bc1{O~G`71ovJmNV z_WASJ&=0y19yg7PU`K_w5_IY1o$uO_QIs~#1Sh#&{S;_`3}tPssiIsLcGRk`r$VOr zYgkOB-)o=^$2EtI>T7*RvQ`P(F_L2LBV4+=f%|JI`H+^j=niism%il9r2DQm1|gqJ zXX}aMR}~x~|70liTrn8?AW<^eBQj#7Blscg3{4%w_NdC65HN|{iE{b_ zJ>bkRZT_gp^A|Q*?r#F>!hm}V2AQgkMQR%rP#j2XiWajaV?y(#i#W?F97`oiASwMx zC7im?rXHU#BwjOk(LQhDe`5erNX4XXmE9{g!LHgZQ)45}e3EK3uhSxRtxW^2$dUW} z&d;=Qwn$~heUyF3N4m5Iz&i?}MpwqMN|sopQ5`y|@-1;v?g!DZ`?r8rYwp}`BN3WL z?Q7}tpB@SS2zsE_NgE3qO)n za+lyweXGBMsa?yvL3kf_zN72t(%9E0=a2!OyUzCKoo=rTpj3;KIFdq<2nG|qp-ZEq4l6xK$9E;coisvPlwk`6k zXNw9B0EwV#_Bj8S#rI5;Gg+FA5pJxE`z!dWHaPzhU?VXW-2YG!$pQD&P$c-s4xEQo zlt}RE1ohjoj^zz%;U<@9(HcRbhVn4bUDxrV&`YeEiv!?*OQZ}K)bcOXXk3dt&vr} z3KC8EieC1}YQcC?-11n3Y8%7i?C9FX^)1&Y%a>UhEHBLn@Vg*3L0|*_c@CL~VBwA{DQK%{3OvY2#1YYT{U>#iKm+kpn zt_`Xrp;WS0fL-AFYKZd*2XE^`iNwjTj;W-^*H6!O5fMy%!_WV0{T?|M_R#P+TPvI7eZvF8 zdas^6a|UgW0P@nX3tu0L(<>$hw8x^`cI(H)W?{n`Hm6r&XQhtmO<7~o&jKwmpKo_) z=e0zNsBQyA$8LqG)23o?Jf_S##OwEgWJ#mXI$csbK$lnFo#mX=F^ee#dPie>nKU2D z$wF(6?2ENsl*4!HE7U|;Gn^$Z5oxD+$8f{oNvOJ?NyEGu!^)D~cz(w;ywRX)bz$7; z&fyMUWRxUpub0xT$>*>z4sU1l7=V(K)+I{z93zWNkn7kH?0yMOZe<+3z^&siA&yTJkOy>($@NuI&SZ;_6^*V~0nOSfqBKX#O5Ls=$_g4H**ot#xf{t` z<-LN(4!4(9w>`1RBAm1zFZYCmtk_LEFDba#glzfU8JQ_kAVIzEMhB1-?Xf`$yiOzI zo=hkzlgCOC>HNHCDxpGjn!m2fwjMaj)@=n=VLvjD1KqZ;PSYsLczC0AYAk|EgkpUN ztm6px0q@R938M0SRwIP=%~dqT=ob;(FkgV*t>{SJg#6kZ*N*rhFEbdM_tlvuZgAMJ*u9Em^{rT$4quepYUwQleAtvL;rrVJZ$8 zx5*h`JxX~3?(7#xA0@sRmm$b?)bLft zYF2qKHf)IVcF@1EvoXs&eU_iSg2hXxiRwC4tu>i1lOKz?nvI1U-nNVo{4g>@1m_)c zi<&COpKR?+p4-SVn{Hp`$7jZ&SlBofHZz`6-O4A8tygdAPU^(Nu@#zKzYkEatNTgr zpI?nFTc(v2zFKMkHA0FVt<#WslQ`_N^ofHy4q?HL^y=)cPW_lZ-PZ3v1Vxy5WDMyU zeFx>!d-dX#Jn_1Lb@Jr}0MpcB58})@y!ZL(NH;zq&s1OZaroBy63f{fha}l$%aYX7QX+n&^cqPMJW$0BwowZ-Qv-HCP^K-ZiCCa<<*ZxNS+Y?vv}~Z5-9R1eY{hX<{#=#zZIxK&$z(( zN;#OGO(1YDRA27Sm)^AbES9yf2Fb?&p-%6ik=`S|BG(KSwu;%iQ)eE7smfdIHhnMX zd`n;pzY34De+WQEn+l5TZ+0QR>ReA7EM|wP()=j6qKK<$IgV8LmK&-LIa{9u`zU&^ zhE0(jM?8t1&@}eTOhj=N0*-OiC50g^uT{X#XvLZTF5qIT8q}-$ws&I_^wuLCz&ezi zUzGUQ4wT#N9HY84?&9jr(`pNlw+5`TUo*0P6M=J(5wTqOfkh7H+t@I?`LZ&oq0L z6`+S|q92L9p3%cjNy3fzd#SGbdnTU`ci|NAcCXI2k>DCZ@%^24XRRv-Xw1u-$080z zC+qR{wPD6yY45DRv40*^$ME#^cN%)sc;x}BNw8ud3mY_a zv7se{x`Y9y_{ANsEiT_0eNJksht*tVbdDLV4(^E$ja5SSJ(r#!=+BE!J0iNyN%%hu zV_J1~Is*(agpmHvlauEHFe{-aBol>G_~L~U5?@RN9*cnE+WwHTbl&;+uWL;=l|s=j zH1p#<-3o0Y)l<3%V@9;wtm^h`mu|DLQ5}~PLnepQ@q^|_L940x-n4U4f=i(=c_{zh zOu^uk65EYE0C$;#{XQ->d-nzZgWJAVSXi|Dj#z$$)MF<&O&Lwe#Lxl>+u=n<-(Gv~ zI?UVW;OR)NeiUvtCx%c5Ni6lnTrUTFe_nX6e^@7rKf1mO=pxyVoCJ$BJHx$Tfzp%X z9@?Xk6*WYC`)6(UHrZ>b=xCpS{F_TZ`Q8*MujUa$0 z>7n=EuEC^_mneGOJH)FI zuBc~xi_`PBoRVNjAoq&Nz`JC*!)GQ8IZ(Sn6^ zok~|)P$o#og#Oy+l?}M|_@Dv}v)*#1vHb8If#bdpYldsc6;JkPk|-)vi$5hz{1*N% z35(Mc@wds$13veLVwD_3UUsv0Vp4~1264g zaO7Ded>z6B9!%&RklbqMd3siMeJ5?~1Mmxv#jHEooICZEyspU8qO|zI&|-``4A?MIj1l`l#2LaY6>2 zhEnEplmU2)IQ(ijO7WQjj`);a!s|^@Gyr?4A?pRZhKmB#svPvQRddd~4%y09C7(20 zS;E$vq)Uxo76R*O4#&EFxa-}Q4exfIJB&9x z1r-~v_`ypW8~6Rc-gBI20&n67-_PMC@XAtQ4+-_8Ab;@XNk@k@)=J8n5WM@+LLBO(WZ zgKaXhhWTdYxSdP!>rl6K+!9h=(N5~3X=N1iSROrkNRQ5YpFhhk9S|D|H_uf?D+tE& zM|>!izjRc|#7vv<%WQYdB#T2O6AvrUt$XXA>XhQack3G^W{TO$)f!&1z(R+ZUl(7A zEd6+8!ri!~s?BA^>S2E^p5RZwV}5Wdo=i>wg3QqQ zT6)DuyTpB_MRzEW*T7_@L3SAsTH%e)K&vRoN)P@p6gd4;>n86^iZI6Exrq%$`hVCp z)=3}WbCvlMr2wyfz*qRPdH!5b6xSrs&U{tq`wS@q6!fwh=y9(2E?5K5Rfuv`uqiev z<-fBg&ohYd01_J@p&KZcv~{FV*lmLP+0N*#7W=^nc42^`vEzt&(Pj17v})s6oVe9* zPaDYOVLtC3^>wRUqQbQIUV%{`?Ie3a3lIHw+yZ}m$MfMABRyaPqU79y7} z_1L|K1>Dp4d_&Euno-gvKNuS=&&796s?eG;m3q2LVbXj1z&OSjBfqlD-|1WOMh`KS zC9hbGPgCSJ9ok3kn)Oh5*Y%}GApZ6{R&){IoL6Wfe$i?*(BT9dIh7Y${MKJ?#elS! z(si5-T3&kLk9ciPX27Vqx@>sfUpyTvAM-v|;p08FY@Ksdbf20X0m9cX$NfR#lbw@H z+}a>!b4_0zQ@44!hqKFG(fBhU&`c@ng&n8?X~@mPbFba&K)Vbv$X4;XMZ5KiCos%T zeL;!zQFFZ}4%LI~+)Dc4MXN)ZB`D+Y=3&!h?;6AUZwgdTzrwT!lVxe{NP;ip+@iEh zxbjFF__S+xz4Je+T6^Kz%QOJEQ__*p5@G}0G*MDbxXJIWQ*xa$ylxWHBOe@S#p6t= z^xtZ|2SKKjf+x|3HPIE%kqD+Dqp!~;?ECi&>)p!YZW#?2=V8DDLt`tJX*=tKr{~T$ zj&z>*n4k)1OqJQb>K8qY;*^`+@=5=Nf9PQYm?d^`tVm;1wO5X0mXLGE#$kpbkZQ z_}3+F#Le#JWWg;>Sj;G-@hd@|7y;u%m*lW#<_s~Sr4o@r%upIbPIS< zCp^nr;|2pIhA&KM7y5pcrrp~(2pP)oz3`u2aO*$RwA91{SaI9hB-RXkj$8iPVNO5YuF27o zn=39zwu}Qre$Rj^v7(n3ko~eUk{5Xy8LWDe^~?Jz_Wmfwi_+T0=7Ld-hZL2In z!PQe?I(3z{Ifi3eUKtW?=XfIN7Q_p#j!iX5Rocp=>Ws?S3FjEwa>N^jMCG_;1DYA4mV7VWGVJpSv|w8JwEre}w|nsg1vm)A<04(G z@egXRzx?{-C^_l;D!S2{%w4D$%zFEirdFN?OCPfT51Gd`@xU<{uj-v*ABjCy1VY5B z4lmX8u0=DVzv%243h6(*a`g1}SrPsWwV$v4R8sf)8Njt*_%?9oSnw(T9kP_$SiTK~ zh$xzg#pHbebynv3dD?e*iz`%L-g$E65x^gR{;4|hPg{AvehU2aViEWA$NkSg z92VLC;!XS&uk-)>Z0{7{bxzk}_ctEx@K4`K1Ggz^t%mtCOz1yv{HHQNqu^x&{moH% z*bS%*BBTxc{xHG#Pacp0@HS~prGKxz{L$mDHGs9yJvJczXVkd=oU+@@SDF@1h7G&& zxvULXj#ZF?Qt$hM%Pl%@`nt`2n(2srO0m=v?vgr1!J^ zI-i$rRv5dzj%)8P3PgR>uAPly=i#}li`aKG_8!Pq0mOpK3F1HBFh9Jqe`17>17(!-L#;ej+dK4owGKOiNt=rv6;Kjf^I6BQP&&L+E(edC*JDr#?&gh;EC~&@ z?oaM_Z|ZMYY)#yNldG+t3Aco8dX|`9bqhw{HH@1@m&D>CNuV*URyw9@JaPeAyNn!8 zUi&|364_&lo_q>@f&TWdFvP&U_1Xu=e;H=j!@eDwO^A0ZP4`Mr7|BVE}r3jed5v{T%!FBrXm%G+3=3V1Lx&${20=MgFue#q*>tCgEN~q+B`Brh#^yd6S4?@A|Z(rOu?}(Uh`6pGl zB-NI=zNO9vj}Jqo_N-T=2zNg5UcN1n$m6(|F;tiLVTboLf1ua2!e<|EjaV7vOlOQv zq0TMg_|C*FC$Bs9I;#cZ^FQFIlih)e1_z*_^I4$6yP%q&d(MU!Bn^OtceN)budQ}#!lc&f~Cb6PpC$AUDbKxaji zwz{8?8|aTGH*rWSf@9MBTGw;3jf(g`i*bBg`mu`|uF{`s$cNXfw8&_oH6l;NzalHp zZ9;wZUe&8jg1x)4)OjMxjr<)<*;_q>MC(XH`OojUg;capnR3NvCWmSJ(QGg{$*pBF06H$d|+?{D78(X1BW+e=YCpf#9w z2)Cg#14|E9W{N7}+5+j>&G+)o^Z&bO{_M*?z~)9rLYaYaAf&~@9a<`ptmvLC>(coC^4mOGtDw{X{kBAvU=X@xt|{zFg3 z8pxXN0TTv@!`ht@j?ds-5%uzzbn9#tpfmm;ue~@p$*LOV@s#-`l-L z-xO}Y59f4z(&l*<8Z0ki0S}`_ZNB>6hxkV^E10S;tPA^lMQ;0uvva|Cw#`(Fj=28$ zM}Q6Pbz!u8=R_`x=E=>)*S)g9@$XyN!^VlZT>z)+RGpTs16mZ+E70^;Xqr;m*1hqh z`2(Bkj)WVrabkjkM9QP^XnDwL!?LW49?>f^;Su<@QuKJi{KKp_zX<&Rc=v27M^`y{o;BrY+n76R7Z9&|rk8{p6-8&P{TW1* zyru~vkKrIw>t74lYxW#w28NT>rvyGX=>lV~l%DkNT#XI2?eIVPLf3sKtwz2fAGf(##Nou*;rMzYUO~0n$M-WGSHr`@2VSgcY4;T z(DC|lrD?G#=HPX+3ZRYW=$9|+jyb;iGz1;YRp;|{pFhuitPa?lKo!pvG}C4H z!XsYv?ePWzm}hmeBhqbVgUy9Dm0HuXl`K)^ zIUiEYdY)GkM%!gf0qmw^Vx#3vPPsiLpyC7I{yCQO?3=&V=yt5Eb=9es_*Dg;(fst9 z-iC7mktW5~9d7l$X)+INGg$T$4yrUXQXe=n$r(_2HIcG@dm;W?WCvNc9K~F)cGR+d zw*TUVk*`E?_-cyh1+b@W&u)*^iELnZdY6uIwAJb|itkj>nbs6E*HcdOBFSChJYXNF zwJ3WmPge+zwEq^})w8v;0eD-R1T6dzt#Hc$2@WSFjL-L^X|ERe<*gNGOHo{bGNy?W zuB_cKN3p_S-VE7K8ZtD0M!AFY?le*IEZ`ce;9z-iR;8@@#BA$T=cJy^TKloeMxi)fEYqPB{bG9okZafTyD1C2CY39}a0juBov9rPY1Pz&0WlcQOCQrba z*rnUY%XYn^$T|YW1O6}e-ph0Dl0d0)HJh^4`b{dGnQ@|FLn%-tO)$roH=0YBmzhy4!7(t7KI2&K@9kFTM^mDk4Tr{#(-0Q26Eg)~j$29wFEow*k^A%VDBn@&O1qd*|3> z?yYBtzN#?BJe{U)53cLXVeUe$wAqQOobl$K_0=Xs8RVN~nd#ll3G%!moJ;=(>}BK% zzFvr0eA7O31MufTnphuE4dh{5qC|M!uNWu=z*@wSEKto z6Ho_Cmhy$5Pwq$)uit^L(=V4$3l*Z5Mj8$F>s+lV5jH{5YwC--2=cBRl@0Ar_x+gQt*CppLYZ~RiCB@LvxURnk$i4fDFHb zCP69(;YjvGE!0o`bq~da}|5_w-w!ZBhrR|}s@`ON0 zDQZ93QPyJfBrN&c8Ay(~(%XyClD>V;KFxMA9^dbenH|4CQwKfeVI1_JS=-nLI`aB% zA1$yBktw-V4!q{oB}(Thh|f!VUZVT61M8k6$;bzg)eBjVXCB!1lM8|e9zkJcE!ptM z^w6V<1ZKt;HcdZK%x}9(R$#vUgPkb7uQpeG;RX;&XJEpwrN+f6q|A8!8f^Q4u)XC0 zKCS2X1fF)%vX3*XXP9GV5nGWp{C(d8FHDPC@P3--tJL@VriKOt9J|`?m@bTaiJ9^ydP1 z0A0&X?7UHYmrD3Zbj`?OUsH>+=B91VRdL2sOEbdDzm~f7PR)&@sD6obKx+y45i+qs zVun^M{n4^*nN!C?!X_)2lQc6yif64$3cfG2ZO&L?7dnZedO6?>3t!!~N9_MIecdYe zRwDJ3euElK84>k3Eb8253%S2|trp=5L4rbla-dno`K`}Nrts=D_jYMplUN1V)m^#Y z5{oW4KLyk`G(ZI5xV}7mz$k73heq>Ck1|Cl>jb-oukoG?<2VR@nD=2;jlJbn^}YG5 zT{W~CAA$NK;4*3jIgmxqU(&I&I$-8D&mPZ9Za`^=Z7xPY29!5Zmg*c$gG=?e$c$|7 z0a*QrTpv};S_MLr(H<@C z^nsm&0u)Opj3T`Yts`Jv3i*fXC^&x`-t^5&k@A%xltC5N5$JY~xr zrdhEW}Qiu@bvo{YSI0v7wiGu-y?G9?`}9-St{E5yrNlp*98Ww!N%`vP&7(` zGSw17U{9j#_q`KUpoF0_P;icG^jepnwT%+p@e$$Lv`3-95m)LB=l~?TN08+m4FeiE z{q9rYv&lg3VL#~0yeiNc(=6h#J@Qb1@$mxm#k>iFxR65*%LSNd6!Y+m(0R%6Q%CLc zHH7rRW-Qhxx?U@77ni}zIH!U2>j}mnlWk6*{FR=5kGmC{!V+7YSxPo$3@EGb_{ESM z>E|A#Kijit87l6PhMeU5T>PrB63{}8r#=I_;X;3wJr`r`MSB{>8KKID&hKY7ar z-q9$Eqm?-VjebisNaBKeNV^fBP)RLd*$9i??Zd+{2+;2aK=4$gbBWBxfgk8N^Lg|z zAT+%Pl{nza%h+8QwtNT8BsJM}7|El%Mni>ZyK=*Y4kO|@nSciE#76hFpE1-ZywSch z?zUZk0G75N&OU&IuNh@MHcJjHWVY)o!EGomyAP1kloL85X2vb(Ak7;9>u~H%= zxrDIy8}nV30kF+RUq$cT3PFRB=W~?S-J~p<-3>*?6mYEg^8yl~uuWE{Xu$416q`=7 z5{I@Cd{OA4V`f)?wo>{n>mw9sKJ$0XzUFy_!w{ZC{T*vtz6k3?XtE!DA&1E4XS}q< zeZM>4G8s}3ELgHo=SHT-{n+6>ma1T18A$Ur>dXffkkCeut@n+5#VU$%9J!Ub04l(t zqa1jTl?=&l9~68O&cs`xXjJ!-z}I*z0;I4zIdYJZfk^^sAdf}*@ozBz4vfc-U3PY$thmwOfd~9s>w9X8AKB8Ph(j4p>gM@dGQB-YJwqaT-Rp#v(kjq2d!tZpBu%QlpaSfp(&~G7U6U{wDCEGU3!?Ukas~iL3J~xce^bEbiV_ zLOw6CSYQAz{O;rM=qmt@-VvF4LMa=&iQ>f>nYS1bwx!?neS3VJJ$V`VBiY1NtocG9 zd!df0-^3Zy>^$$zpKtFiwF9P`T)Vu|Xvdi@pB9z54(WY2dNzr;we76P>jmY+s%GkS@X`P+U1cuXJdlXXmar@Z=NDI$?w4-S*^79$Pkjg7QoCUhfcj*lsF# z5Y~3S2Z167ZmrU3d@k?h{av4AH1HWKD`r42{E{y_ln^d-CJvv+} zL}+L3g-pNa%~v>Ee}auq#IM;^Ip@^x*&Fhjgmw5iiG!C9H#uQaWf5d*JU~S*qW+`Z&cxC?nla;^m3|8tfjj$H=0k{>r3Bi+R7T~FEX1hZ=zxmNSsV=UBE1nU`%p4r=@JUQ@h^3 z#48X7Q^`8P)}_GueZKAFL5b)!&mChTnfIMXeE|9RsqP#Z$D2!p^-@@grupcjp&VqIc#Kn$#!zeHb~EN{_d7T~1~x>vh*6l9k80u`lUA_$58w?JPp5 zJ5#Hphr+#+!BUhItmSXgGzp<})@W4b#@rJH=h^%q>Z_?TR+gzU<7UOlbZq}BU#>y4 zZK=7hCril3nEuz&YpA??icfh1Fe%#pVR34{>L_})uIaAzXB6Wqp)TM9qC`HBaBq7j zg|50^lf4kS+X%^_gGt}h(q})iH%i4PTt-yn%X=~%#AS&kU@vJ`%rPr!()!-p21tMx zY}7(Vr|UCNn3!IjfV?O9`=403``nV<19!VTc3B%=Vp}VJipu%i7WFfYxW=ZIhxxIc z3Y{%;MkqK>lcTV5!iRVBN&7*|+U<5NdNy}h+6Z%U#4Q{ST_l6-$=Xo*A%4tyRx7|e zjzn=HNbHQVp~QA0a~PBo2}Z(@wcth9=&?Jz1BTXFPDk6eu=ja=Ry8pQV?^7{OpLFU zM9xm5X1;eM8ItiIbN_CxKiZpm^5RawI933aW)8W>vYqbk0Tg{(wM66 z6&EsC!?tsmFfwnmAc=#OiJAC`N9?8%*7x_jS=2(v2)MlRw7a12tU}u@0_V!N`3ZdT z|7G(3V_gJ8qJxf1UqUB{zBY)s?4u#UVI*B*VW<>w{u1Ab)(hHa&`UMmBeQHziJA2e zYVIbYuQ9NzN#_tOPO_1T;lzb+#1FP2=HF|#ZZu0W*S)`yYY?zn0UO6#jCvsei^`bW zkIk_J7k6-}OF$hdHzhm8saB%(UU%6h(EHOX&wqDgQ5U^$bctw*XkRf7f4y$fIlpTH zn);#16vmg;liT{vvY$U!EXq{#_Q7tUZuS=qNOq3FmD^FbT6`|b9hhkB&I2t2CfoSTQ4`Q0GGM(@_`8|s7s1CmD{3J# z*KV_R5u*cq<;YHP^`VaCfsjvvQSWmYQh}Sht?c0cEm%|2wfmWx??Kv|>(jpYYlbY^ zpmvXSW{!H26{UdAVaMG>50wvhH(Tq)p-DXLkX_SOIC%b0)6c`1u zPnTh5Px212sffY(8u+q}qxiivC1&@S5ps<@DhTKh)?whzI}knZ0qTs%T))hQ+;{3- zrQnZERv~Y{!p=eWHh1-HNbcG0@xd64qix#_5ttsm&c!e0mm1g@g%c?0dZBNmz~IC# zlUZwAp8Q_a+%sS4RO|}v3-u;l84$d_R^nRrGx0j~Y|{`ihm8x`QYJqAH+mL3&1Y&) zsVDL+%vH_Ggey$5Pgb{3cb4r!`nJ<2op2=s2GMfm@*b5h468Bm>+VZXfqnaNpDe~M zwSum(e8LfPf2M}D?-VR_2J8NSKsSp_$od#X#!okQQXuG~ZCKQf9Qj8zpPa1p&6*yN zLH0$w%PU9GbYh9@!tbKx3V&yRn!!PA<4zjykn+r@Ss&xKiwq(Od_UyLDv;cHIjSug z8b8?HerbL!H9?Kt^fs}}twCT~ymGJo?(EI#$^ByetxUih?yqC};q-1=zI@&^wGAuW z%nGCJ=^DI=kUp*(WhXvA%r7yQN<H_l)v5VzLI2-K!H~YLfkj~wF1qVz-4~7ZX#rkBc|F6< zbB`cbG2`k})B6;5LSakI9jSq;m-KAH^?Qg#LSaFuna5e&%tN)~I_8%OwsH*m@=->3`Hgyo=O(g5D%py? zxRZ**9&UONCxt3#y+8hSUC${9-k(^$W!1Bibl%i|Oo*Uoq8ZZh8jRb@@t1=zibT!g zahQ~9;3^?FXX~fk@q}wNm(-3=Tw4Hz9YjHgGZ(+fI>0o?QUj234qrGu&WhZvU)kC$ z7(`F3(_*4Kh+Fakd`({zbzFPJQokUsj^ypPCk_bjQ=~j~nA}>GnC2rdJ^tu5OU>n? zl>lZSlsJLhFZ?LOYcF>9wdD<>BiPBzxzR6Z&(s2xlKgQEJXr_LJ)BzUUH_O`dorHX+tK?bv@q}C zZ+N$RJ!^)}n`5@`6=L@`h#W^}x0M;z=4N1~P z+r~g6*THJ^2p_BRmEWzP8uK8T3@K&xA z!69}NUft+5nrRs%y6=yiolB9x-7b0Dyk=Co@s@W~%+v2)&FlznK-Ae+bFzxztkgNs z|2_2prsq%N8&{g{(;4@QWvnj?Hjujozw2ns*S2g?)Xw2N^WeI>zm2@%2kZAa;-zWA;I!-X8rV;SxF@R+IHSL!D@*%Y&-zCdrA~Pr|K_ zzp1-JeGm{mHxx5D{#^GTv-yjLWVAdxg>&$+bEQYirEi67H^fh>cO5aW?BYwEJ&&UV z{2ttFMFhxteLUwp=RD8%Jf}1Ley`u3nr80hTJHDt zzOL)OiDf1DPhrG946iz#!CN!H05tm24ru@Te%N^hNVJO$iX&ziN2Gup&R$F{Nx)1` zFev1n2gjg*qb*x4@(8lgK-r|qLN;p%iWURN*nO*H_o`f(?uR>|A(Zx>UF2xt4daka zz*mcq>yk)P+aSY$UQ{$-)PpV|9vUjKioIRsr{J~_ok8nZFO}46QpMA*y!kX)v=Q=I zLJk7d%6o){Ms*LH9WD0Ra;wxQ7gV|{oSBiP(q*TSM;p!br7iShL`mK45n5~2%PG-Y z*F6%AB1rc}omkN7!`43gKjy1PjRAi+Z2=!9Yi@kOWG4F#1+*Trn~YCvvRN7cMzzjE z&MZ|GUYo1zH0i%d+m}O&Q#fRb3NCw97_!nPenMu^p+sZ#az?hM*OozJ4$rowwj+C>y$?mh}LZMybyKK}XW zjZmK2spfrsH8z{iNau6}M7mw%fO>|i_~d8m5NynGNSWm=c+w`0u64_!lCzrJHf<_s zJ42M$5Esbsl+~KPM!mBU9+%vu@gRl&Z!z;9aC$b4l@%fX4W_a6DZ2NhCMNj|?W)0` zL`!58m^f)y6ygGh;U7FmlYxI=L(ul@dDmo3O{&uEG30$}4w@GSMxEgeS0q3}N3QkNR`2QL)56g8BDG`x^#CEgpo z!h-gCwX)K)0$lIBNsY=g(lZp-m6fO_x`>_znT}c{vlSCFCKg>N#QdK zTKOu{@YP9gcC<2y&(!Mqm`F)8*T_N~%hA^$T|vD<$(^Aw3XPx$63srLJsu6kUHs{R zUZ;EhzUR)`bYhMo_^ll~KlIDTN2}3u`xv;CURAP`5cRHyWcE1zkEqQs?zC{K>9wV` zxqhFFlIF=6-XAVDi>K}y+QcR&;a*61l}GILPFbM>RxGmkMDD!c{U;$Ojy5J2oitE+ z*(B+3{!S~WYf@}br(N|_^1yRG+tg_+Wx^sc3)?__H~j6RntMj9#`;(OxOi3sFTch* zfR!oZx3+q^yNcQ=VRfJ{3*{bn5r<7;U%xOo6jtmXcPa8{qSxIHAPSR(Gt{PPww9iw z812@w}T(ykOO5Lp44D%zKV@1wbUEhF2v%riDA9_py{km}mRFFbPS>P4rV zh4i|b^$5iUhQ-cH?c^tm{;UWGE~QP8;0{TfMBkiEk%9ID65qQDE*5~#6g>I~DBThE zQWiM(If{B-5*T)kY!_i&v5C?(H$?gK2c}H zH>55*9ZuvrF1pf7>c^aD_1z%4BBZFK2J4q%^hup}or*)s@5#IiJR0krFqO?uO4&*= z-?Z7gj;bG!co0*zvGCz;U-mu)h+9;{!?@giSgQ}p_zyD`ckcagY^arkuY23%);#&z z>5_L^1uAN^v$YNa0w=CCUbY${Vzr5Ht-4l8LywpDU=Wy~46=^G_WR{gcu)I#hT zc0u~l9f#VVuLQaVe)nFc1?E(zF8@1Il4)_@Gl1_>5JpY&Yj6Yl=KQ`~h>?|XOyUapj4O;} z)lPPavH6E(c0MoTYuf=$4Q<*xV9=h2mDSW5FW>lR5Hb{8DA9P3Wp-_eR7rg{O`0+` zzEWA6vBC>kx&YlJm-vPPDwkz#UJ#4g5rmjbVtY<`^BJH@^x<_Ru{gbm+R6LDPpIqL zk#^KQ8>yn4Y>Cm#SBc7rTn8FvrRSXMv%##Bnn9HK3DXOsd4=^dX~AvIo$$5N8qMPM z7fk`Yn{bAqY*C&^1Eb5QsS~(#AkXkb`y6^vUPatyCrBc-%7eN=I-XskwPh-xpdDkc&Xed5`5fWYP@Fh8G=8kPgZIqh3YxPdP90$5zw&v)c9pBHbZM7qRiwkU)F}7U{ zo2pO2BiBnC$JXhpqj3udGjtcyrQx_OyGx}K96tqDcM`T)nOYGCE|+(h%VzW9d2;sF zW3q%eweLR;9f_%Jwn2BNC{^<8o2F|%B}dsD8Yo3@c7A*I4_*&@fum9RW3?8;L3Bpg_d`XLH5;OGB+w$T31{FQ5Ykp? z|LO-QKsM3(!7+xW2eB;OGgd!?*N`9vfY_H)E48t{;&`BCN>pjY-#ZYhkace@fR|d* z?DA4O^dCkRQ@VG{Y9}aSPW1hg(=+?>KAk1U$)QYD>R=*6(pR2`J5lVKf-CeTg|Mq;rWUFpKCr4viwHO4vJ|RD&W5>8#f8QYZ!ENg?OJwbw zThnMtz?MH70&Kt3V4CTMcITj~-ro)+WPxxFmrE zJs7(}SnX2j1E1uMS1dF&OcUa7UQ zAZw(@l9mM6W8^AyYtx3xg#@D5@zt8dq8#LM46kwf@lij|7}>yC`RQpd_5!aq@?!ES z;Ly2zilntkCwG3+TjnM4q;zL^nlOeiVzvmyOpSjYJ9~D$z+llT&?m)~h^ImlIin^q zLRD~uYjaSswc^0O&&i8Qi7U3gz#)e) zqrD!2$AgYR&cVmhYxbq^YgES3zL4MZ?x~UH6^k|8m9f-;1TK85^{BifN>YaF0IOB z_b>pe0tdt#1jiuFm3S&wumjiGX=ro1K20O>_cH{zO{qsS{DV#!T$6px38wrh~bHUK1 zMzQniz*!MsRhK{rSUmZPKu?3XNYhX0Xd^<;ANW33A_aawk<_q&U+=CYdW*qZ=)t(@ z(xaPu_$3}EN!aRDr|mc*<4#IWy#%M%yW^eb}S;jgKx$v0wAO1&!MRUqkt z+BmKmOVgJFMmtTbU(rq8Bye?hbi&%Fvc|{Y&T^L z#vg;sTOZ`QgZ61s!h5-BIMDqGyBX>pLnpJJ*4Zc!gCD8}S_KB&38c8FQ#R=ckA9=4 zI^y8F4n;J%^dRwWYOIfIGQ?Ic@uY@b-14W7`^i^YmuqA-Gm5HL9{WHQh>QIF3Frku zlMpl&N4QiLM~|JVFg2OON-Gk|Hq)mMjig3;U-K1TV+2#nu}Hpt+7v|C{pnax(D*dn z?`)!f?3ORFA2#pf3@eOJ6xr-J(qAL?zPv-qK#MG{vPZC-QC~_(@{zF8c}93u4i8!vKkC-<#h0q~c}H z=2-U*taT`q;n*pir^VUqLj}?qDx{lR)OFF`kXpB&LW^t@AcDSImlm__)>XNh6XuHE zvxb7Nd9aV7&vH9QxFZXu8|s7as5;*JG2&N8Er&hz7?wdlnS-%XSkOWEy zUtwV^(IQ=_GqAB?+NBp-H#R*?d})AF)1Xfoyn;c%F>VMRI_~ao-{R8OGpN+os#DobC;jj;brB7k)-848rsB z{93zEJ+Sq1M1Zd}(w}|!cZC3dKPb?*==d|G`9Gs4du>+}obQZNo=KaMlT9z^#R$=2UAD^WUI@aM(9?8rV0N|zrAn#KH-JpJ?Vp`olU*uram(l+t-UuzpI z1Fo$%WrYH+2z*7A;#X#h4@s?Ad)561kAYso`Q0(|oE?`H=x37nuQe9iGICp^Sz- z2(7}-p4>K^$fG|I;h#QgbmfnX7wlH0w@y7EY`!5X+N(}A@n{xjxu5XhlGE?W^IoYY zWo2Y+;Sx(J)vXe}K)coG4ae|5Z^=*n!t&93T?SnSS}Jp&60_s*j5p4Nx)rC~lw4uJ zsm!-E_w^uCSF#f2TRum3`?nqZ-(LUUBZq~scWsQuJ&tK!m5osMFwr{>kh79Wgs`>O~odn_l?Q4>}) zv7#ppY|El#>0Rpd^6b8}#IYk^+QaCS=Rp1{qSl#dt!GC|-CH=GlqKJ#iVWD;He1gk z)nn4;RmHcI4(?_}Jl}Y4>GLI{*R*^6!jcqteN1wnMox9XpCtG1efV27LQBr~7{K2+ z9740L;M6FnrPy(b?~OPC+tiz*3B$;^QSUgNckq=F__OZ!LCpHy)u02)q?06MMyj9P zQlewixn5R8XOfE-H!?j0f|@`Hr=*E*)G~TkaBldKpXYG@;vUBNR+nswenW;%<=;`- zwqVtnZ)_0`GcLD|#HKzGV8_dxM zr#jserK}TTHKv~ef|S}8WS-ZjAgz}Jg;qHAcARyn|0^J2<82Z4KQ{N@!yrr#d)5}PL)+x1)0)>@HsyR?#lY{*CN#pAb_ zUVq(nTh*nz(C>$AMWW5cTlGk6C8UEiT`je_jj|!2Zi`%4`z|hTAc4u*g=(>8DAR|R zpOh8}()MaCvXGGyEBK==fvSm99eFXt5Drl5+ksXvX6?LdeP`FyV_}a2wlm|@Ugib} zYtRbQ*4jia=e5rzKNXIfA46^iwCU;({Bjoor9`llNQYxOiwM)43n1KfTsS2r=)gn7WRd)n7#ZTcmwhK)v$r7V9F#q?t>Q|eb@Jdj)T1}$X3 z(nQw5(e)zdI_ki+lW7}0Wr5V`bD^6X%hQ=W2ITzGoS{&e;PH;R?C4Nza^boPr@Dm3 z>IJnQR-M$g7kRx%ExOIrgWVy|Wi~o5Poc=}SJdW~Uzd?()R3mejM2~J+^`OEdA4?K1-TjtAfK zEFdf!)Y3CmnnAy4wY3meuOu2Wx#i;Q-P4lvPCzW!AN6BjQbk{qaE2NsVfn3qIx_zT za0LuA>el#c(4o>XAsF*FWdTalt~r&mJkP6~9pf-8;b}!JLHiYQXmgXMYk>EgJ0x*&a}`O-+8qiv^d|5nZJltd>rd=- z`#D?x@2%FWX!J)u+w9YtOFr7HY}I~~)Q=9zU`34ci4a$(0uP7K=vvuUenG)-81GF) zqdhXq=>EbWw(hKkS7ii}?p6a6GAliyafgPV)^1f}9go)$PtgpSb|lwZFMc9r$AvD# zoxeP5p{_JGo>v|F44ux+${bVB5s%l`oT5-Q8~#0$p9`IjbzJa~rz$eGOA+(Jw4F5P z2VjmOvkVH790t<84~AUY_)cp`uNVrX(ishe@*U6ydw4}g zA*Vf16mfEH>EUvn2fUgr8hf(QAJD(VZAC=Qf#3^TnoA|Iq%S0pbKmD!&i4SNz?8DE z?^;%SvFu|Yn&uINTD-fq!6@Va(fP=w48T>$OE<2*+*%D*U7k0^2CB`@-(Q~8(RiEr zgQQ8|x{f{-Q;7#|kaN3v;3whGpFXlZ%drk@^9eoCH0f-~qQq{(oca9{n-Wc&0hSGn zVMVlN_}>Vms}ZoJqlGmysUb@Zq?HkCZ63{x9*vm6=y{8cJ&eM}KN>%~o!J8pi zRsbWLs@Y>IOikl`KzZmI=F#bF2X~u#&j~-wX17LFVaTGU z!4(31APJXpuCfx|*wkxmYGdRfPFv5!)aVC$d)pi;Z1|2|P9=#y3-KLYgTaCH`L;uk z-I|}ASZHk^4lWNUiDJBJ_cAG&j!R)Hx$hi@`+A04|3jPo2yj31vQ3lDJb0gkcskF> zmZ0-CoAf@f@M=*|XTv+`%0YGNLEGSA`Sgr>8bf>lUf8f1IAz15b&Nt=M!pmFs8wP7 z0Nmle;MIejOPAjOH6JSi4I9+u2T3CQEr9`T&oh#FLR)mnxjEw~_o{{LLlQb6WSk=w zh7A5uSDmZypW^)6f62af7-F~4PxR~4lJ}gR+#~zGx7{gX0sX#wT4rv31^ z9Z(SV_{c-eF-jAy(yTfZ2mVp^tWfsYpt-CPff|B zD8Q|5g9iY`Bfj&CGR;iLpZ^0Azj^up1rm2va2~a(WL5*YI%L9z5={@a`PFxvNBq=qVv)i40VbWzVY!+=f|iV;gQN5>WxkZ*iCd#9>rEISntp}j zX7-29+t#wi8%5WeTOUR82b908{XH!7?$y!PZ8y!on@&uY*awuj*1Y?*==&Z}pn{=m z`UaM!9FG>rg2t)7tJe5uWS<(iE;#jy?CzRQ{6?ZARS?|CzZlSFYy}#tGISjkYKFX) zu3_vr#Ro^ypKxqPpT_lTcyG)j0Y*tcq6W?X(y(JUKIPZ8gS7EuWBK`#jH~Yw&}x>N zzSdXw>R-Y5<)n?%aB9Bzx$X~N(7Kracf7GF@%x4v_@w%ltp6~rCqWh)%Nm1H+?cTSKcBjC8=My-Jexsl3 z1#^$b%Z7$TMKm*QU+m~J`@_lb8H|mqAly??Ph6BH*{D2VS{FACc<@A@HfNi=D)yBT zvkB=72`9_?hW-R-{>O>U;7VPq6VKWmLXSw)-XuOHWpygzs{jtp{dW$|>;F#Zk%mG*=PoN-TxBS|%Gb%T8 z>CtYvJ5NrV+j#buPXU+4&lc~8rfNPwsA)LSfeQ7CeDSxiBq_zS^G^tJzoEdJ>? z!d{-(FCjfz9(+0KyHTrCMYku8c7PNE7l>IrhjpaSVb!tme_OH;~?5JHGfbD1mCvj%A&Qy@|MPMbh zvMlHuP@#6!;cO@a-^rl{(Km0we*^N?F55_qU`w}Sq-%UB7}iC@}MIW0-Sj>K=9D)*ZxFY}gE$IT_TawbcZS*!x2x9ouQ z)7h@jV3(NFzU}ERpE=w(n3v$o~J$W_vncoysbkIw6d8VKyKJb|7p>;`&Yy38=Q7 zxS6EsH#>69u`6{#wM|t&z_qS=L!ovmGHO61>!XbIZVDLQJFCWBzt8;8CAr=&&#aY8sbZMg8jv<|pU6ALy6>=)r+EEw>KZ<|CgM z?zF4zM(dJyXY3s;jAwSMxb|P6o`0-F_>eE;30C+N?VY!OTu&&h#NkKF-M*H<8nLhv zFgMf{=;jIsXvg59lT3;LScsf1`)oA{c%|S2F2xAaon~=O0MBnLg?tv9nklJcun~-cHb5sy*Il=8&ZnTeL&9@CBP~`3@#42D%C5e2R7% z*wc5RwIv#RbsPTTs{5EF#evh~lYY1Ee#;oDXHL<(b69>+44u0MNG4q^_ z%#^K&1zOzr@C73j-xQ!HBb-Zm0>&c;d3%=u5*RCJZaArtAVgGW>SFZ z;gUI6w`)*fFI@7RGfCM2OF}@$++`p7WONDM3733FHG+RhmEcbIEtJJd0C#uT-4;^Y zDX?@35f1d_MS>HvgbOqHrOd7;F_=vMZg2RV!bZFwAuSQ_s{;>%`Fq}DM{^xc{ND1x zlJY*u>3MimK-ydxTn(E<76Y!aPFtQlf<~W0xL^QCjoB%jZOvM=q|`3|L^H1NYS3W9VcE%1}Tcu@|nW z7~`W>=El7EFu#j0@M+WM&CAz4-HlIeD>|>4D?7;hd~+wyY$oZkt%0annvzH`{XDdiXB zW*zo<6Cd3E;Y{!#*I~%{5+i@lQ&pgk=r_eIr>Cm|-T8wtcWyaG#`BDSvVLeC9B zNZOE>FVmv>YJ51Z9`vSJwbl%1CQls{vUEkXC9@#{9=5E&G;bAl!&Y9Q@*#O+Z2X*ZFySF*vwL4bccV8VM9gsp8Hx=+?&CQZGbTX@;m;c7%Bt zn0@}+fJ0?g(5zri(_@%_i4eR*D6+(%uSLYXBdkSz-QLo=u$^c|a<~ZCfU}qzc;{V_ zGk)Taz{8gS^9F*LtGbE#M!#g<*#@?2i!Y`MiS6Xwk@NQKJUT*fa0H+CeK6-C92R(z z8$K%LtT7FE&H1m9hyuXjz0`?jQinp?;0~SgdpMGWGx5L!tY9$NIs^4>qp`X~lYx&s2 z4|PFg$_{8Ks3Pp;`^-F+SMz}W_1NwsXfOP!%GggJ7_SKewkBVEqQVp(feR8|7 zLQK{`{ufrB&+0c?u3lhP@(?=Er|MD0nq5{27r*0!=Gpq z-=J!XEU{te_n!ICTSgIr!V>C2PfAWKL^G{ddaurl1%UPU+ckp%z=^6k$yj4Jj~tK9 zzL@-ETEFPc2KySz;mUS6zD*h!^w5!JuP?YJES?P^oeAby5o}F5f_}7{|J`0C%Lerq zXaA}`|2Jg&Gx(HaVXXwtM{_t04>RDELCo9p9Dyv$C8@=rl4(SQy=!Z4eW7IZdM%zD zXm%gW(_z=*P8=jS$p@dAf?Nk12x};3E!$=U*ku0Rmztso=(a1<%rF3%ckKsmNN{kr zwY|ot?CLXc;70!Y$E;DjuBo^WX*A=jpYFhypEX5GynxzhwdVkrZ|C!x%+$uR2t&7W zw2Za1CFIcsEnzV!<{$h$fhQKyLJBP`m>2Ww{bu#S4giC5Sz^ibkw6|;An%)<1TqO3 z)5X#fkJS!q5wUc&Az>zf^>?FyUJjo@jpjk=L-HSsM z&WzVuIxt=8!On#^DkmrR)V+og8`#$}a_&JiuQ>t);6|T>_idvgLUljx0UkR;Vj#fg z7S^R27?-CjtYyToI<39LJweUjcp9xTpOYR7-1!}I9%_{zDs3jGE^gpoqFK)kyo((vnzUjS zP=isW9iT%Pxm}5D2+y%MJ)rR$Lid(P-MVC%TdDjqmY{1=%unF_@nWFf!3Vp@HR6YC4UcJ4q!7_yd{cCFw;Ad9dU9me9MCa(juQww3tiX` z3MUQhIc;*ad4N$Mac+Qdv*9ts-qlu#712}CW7AQfQMQNW65$}n*}jGYOuLO_)v)ik zs=mLhhWOqC;@s4H*vWO}I zweg(ZhAW_OYDc%M9sLTj)uynWDMZfZuD{Sm81^^`1Tn8Qwu-yXU3?rPWjT%#CZ@iO zhVdMRWXp7$TVnT)I*p2_R09_t!D1+o8o)PKCPCE_Cq3RKrUSs|gNUM(f#m?vtJ-}Z zcp~KOHJ2LI_`yAA1}HlXZdSCnDh;skW-0=~TzE{hv1Vb;4JIoI0B0``d2Ixe^YZg1 zq3?&pAS^9i-IZK5{7)L%%(}={dMK^WjCurgeKhLJ8!@=0l1ve4bkq)eAc(`a8FSx*JQQfE2b58y@AP3aV85;RiOL6`L)XeyQYyMA z_M*D(*g@{z5@`wpQ6Qkhbd(VG_&m?P6>@xd)LPYt41J=dX<@t9@dm+9u_?sC z*yA+p;HyCblv@sc+=qMf1<;BagBs`owbw6#@dFOHQ?1W%iWQGT{PM)_zG0rb%rQ3^qHa*s<>1AbI zNwXQKoQtM-IeS>l8Zkc%Ye{r3R2MIP}lQ|+}De~TfYmWyZ zM>aIct!@3wDFdJ>OeWtVvbZkf9R*#lLWL(%+Pt(#@6{HWhfp2&!rVW8Tv_)D${B&b zpRA(1@!k7yg`$Tk(45Pdq(0;Tc#`4p$HbP3ImycQ#rY6(ojW4+4e9_vHACw|?=okYr|;`_Eeq@s;Xa*M=SR*of)~N6xAZAX^|@vS=J&xi z8&@uTbf>Qubd#oG_<}9#IBYD_fOh$)oZ*p=?u=y%aqI{s*?QLTC{8aB5OYOR1YFDK@iytTovhp2Zfo2Nk}2~_ z30N8y_#!3HSzc7k2xyc|nNji|^q^1O_hGgQ3gAA6SQ(Fb>26v>Seq3s0GcndqJ{KD z$ezUN7fkcUje2|_m(_dut=Gz%%77w}y}OUy+2z7Civ8Nq=(zcdT5c%ppx!6NeGZjUI^^kZJmEfT@%=`l5 z*-xC28!X5OA&;Bah|2fE_0aWKcLP6oV5Y1Hi#k8qR}m(Ubj+NEFONKW%yg{qI`4ew zg^%T;^r-Boi0I`=Zl8wug8l@$v8q)QOMT}eFRz|mgd98}!PU_tsgU;5_v za|NyY2t|*X_KY3uVLhM1DRf~Ro-M|K{y~H`TyHhHb%oyMd+=6z@eXL^SKA=Q6%o`aVzX!LW2(eF#RK+R#3HRSWk}_FX(ObcWO+{oQchF!tcj< zoLb}HY4tx)ifbo;2;0e3-`$BTaEda~lR|f8UX7;sL8qk#>x)Ka0Z0@Bb9oA$i>6Dr zr&ZMSUuImA9p{$w<=&MDlSr!J6j<^M5LL2D#OB|U-%~Q8)pKQ?`IGRu^j9$%gyqOc zwRZ|qnnqguOrAB;C5^vb`jBcc0D!oZ94bKCrm==QSRuVK3u)i%XC*lK0iyP%ef(WL zn0}dgrR>_^N+6DDRkl6B<+Ep($xzi9QGwh~suBWjiv~Z&yk3LFak#)!pObM)`i2>~ z*{8_lsPzMnSAK_>K-9a&D~D2B%=|Xqbx^5{L%mr~7|6FWf*snzHVBdZbG@E= z9(7n|vZ5A{Zua%pvAZGnN`7nY2czF7W4%co1@bYTD!hsvS^;WB-up`|mD;qAphqhH z5loF3Kwa!ZX`;3rVV@SywxKNDttw+tX0zm*)}<#PH!V|kzR!KjSsE2{7>Xz}Bk@5) zGrJqVz!R|Ve%=xOXGOb{a=xp%{2>qd^N6hz*gdDMr!xi zjF2Edy*-{B?27+ls|5so)kU=$hGgJ*fwTt?fVi`=Fd`0^mux%O*vAzJdt-P7BuYWL zeVi95`wCEv!*>HG75Z6R@_Ga^-4ezhhkJkg#NoW48r=&tlC)tYnKzz>-C;l`MGfFj z&*1CUXOjo3x`sMq5LO#EkDwtcJK0?OIDV`1D85#WKeg}C0dHk+kvGmPNw>NoU)-e{ zpu2d)6HlHddsSOjMhIBYE<2TghwYbDJX<$*$H2 zM9PgX@=0QVN0^ zkhLqGz$G`JZDkUZ2_J1J7IO2fgC!*&K(YS<>~mv{|5&qBNOXoNoB?RF`}TUWcrMvk zprdxz5p+EpLd6NBl+sQ_S@?s8n+^2#iau!p!9Z1f9)H{gP?CN&uvimfIbzG8vJ0>c zFrTXqJQR6&oZ>ffEADf$&Mm}#;HDtxLqSd*yAFCAEcNX${_JniOc^xHRZ3-M&`k|X zs}TbO8}P&dSzU-x{qUruxn5t38X4VxuE%A-=B07FOs%GsA0= zqJVD;CXC!QpQ`qrFk#zWp??y%0?Aw1^WA=^yOFgq66F;tO9YH)EnBr9XqPP76TE28 z=aJ8b*yi|zrSZi{brIjzjDw%wGU9X(-T3` zmEI}?F^j3O5;7AEV8jH@_b{;lPoT%!nL)*N;+f|pSXd7pyVok}mt~x1imhwmLM%;m z6|hB1JOT;7X)f=yg^F12t{6`e99ax zbj=NHnU6vDC(DJ^%$ld005l>EYXIaz?GeX0`El5ox){PgSX|_e@%gV~!>FyCqRlzg z^|4o6awbL)ok(z7iIjD%zPaC*G1Lt6nf1eR0j%3S+d#L~tqj}QZ|8cwNuAdWW9RKi z8MEJ8RvPW;zFK}~@7@PT2xMX`-Pr{so(={~PpVJb=R=1loy3iFWS`8xjTfK?z{`&R z73cosBpW{v84pS_J%R>30<%Vd(|po@Q!T=UBaH2f1>F(PlK@yx43YH$QU(Ais1p#h zilTpx1}d>Szm7EypJ_CEB!InDcB-IZ6IE@&e5iBYHi{i6j4v$n>VJO@$qnvGImm+k zC^udk(D~XvD6*tYbwHO#s-kEqi?p4wKph9}rjA|UaI z$68_J2ax9Mz%w6#valLS-6L-~&S`$jJAqY>)w=$8=Ctt8;h-;KM-OXXYK~zu9QweU zPaGi0)4+6$_L79LYPX*LW;JMoX!>EpNc z-S!F>(9N}VERZD^40~&wxF_G36Q)hNT@cib|9+X!FQCwP>JGKV#~Gts zPy73vsLKSQA@(j?Ll(46m=U99pH*12U>wWGU; z$?o8IUd`IEk*jQxswyN@erL~8UXzZ69VAU>QwdSVfRmpj*&B++E zx1cvIm(bUHO`D=koO-yWAl;dHa^^=acUQxbOlrz!J;dZ1Cwaiuz}uG}FXIc_dw^6F z@yC~ESBJ}T8EJ98zb4`RktbP!YS-t$YsIgO1tbvrGXcf*dkXfAi03)m2okmS*(p#k z|F)}q-TwCNgj;Xozh{nrv1oBPe1FIPeuDmA|7CypeUegIhF6tx?Ogyjm ztwNnsLKjapO(vbLu_V4YI~2HN%F#7YRi@J$V<;&e$Uj1xux@mLNeup4v%t+ zC_Ro>yzu}q%ugW5`dzagdBDS%BLYBE#XCX3dTznNXK%bM~z!a+{9_4?58);0{`=+!~Y}JV|&6Hwp3)dKsHZ0 zzh`Df#1HIvD_=EfIj}!d-)^rQ#MDmjw#ltN2XgzZN8sL^PdQCGPg9eZNI#6mjjZA=aw@E?2s(ji+pkH>N91!<1c&Hl15^C67ff_{GC&VY|*@T zj-w|5djMW>pC0Rw!tB^a+c4w>;b~Wtmjm|lc{x|qGZ04vG3$v1L~H4J!dRL>^@RK#^w%^x@Z`Fj;mJ@yKo`(CTaOcB z5s0%iBx~lEMy+2nf@7yx_R38k;^ox{_OX{Jo7^LV0xHdnL9kGKE4Pffw;T40w+4zj z-EaYb7+>F(t{-JV=RK7=@=N~e>2(f!;01M1kElJO-pH-;LDay=*?G&#;Q|#Fi0%WG zUvPcExRp8Q9{>-oLmYHoC(k~PkKNyW2Aoz*w%fq-{{1_4`4V>fn9RuAsxTwX0FV$vV#t8Nc`A&k#}_h z_wwvi>1nSYli<@gv4ETsy|j%tc`Z~ z%%p=aE2mX%myuA&2*Qzf9xK2d#f5dZ>9JPnV%B8tWQicf)0S1Q4}t4{OFVUEZHc%r z9QXJud(?}<6BZTjG}RpH?oNl18V~>S)`-VN^_v;zg0joIbycby>uz@^_LCHaenALf zt1{<-8mElwkKWVT+KgSaH8Xb#Ww@_I?g@q8zuo@o_>Xaw!{&#*V~=``HD8sDf=aP~QbbS$q@y(H3Iftmq<3jT=p8}`h=2-Gq<59xrGyqb3Q|K45Fk$;isO*P8R1*SzMOj?Rd*`%O$Jq<6__#iHOU zyWt-{EXZ?4@d$nOUir(fMzWI+cZUT`l^R5AqqbKkCW@qGgb&O8<6jgEwqTzR<*4$6 z5uM^tPO2NQ59j`7CATRalAqk^gl~%Fvgx2fSx{+@UbEA$%R@D8fzQnDLGCq^9>^E! z6eL}qDf99CAnCP{5c+{7F&$1AN9xpgsV8r)-`JF(7>5oBGDsXc3e^_f$ly@7-?N&N z9IEPq5FlUw=ac#WJpNr_V7R#J?zcl7^AW)>=dP8kY$pp|NI{kI3Amm>vA$fW`Ubi?;1@lzWw;|)Ytj2bEpV+c@L9`H^nfHm*s8a z@E`Rgss7ID-97XBXU53^rXx|rzW3qEOl3379>FwL##WgNwJSScd6|xGv2dHd_r7Z# zBA!RDx4c_k9P^=;lPXd1$&)v3$)*P#iY%t-=O!*E+LXJNGTG7H<%0jYhiMU^xNP2V zt_mc*U`Q5`$ZzsdIC-n{Phobtfu3;n`|2 za@Uu0M z)A2O+r3gFS+uNF%9mlVJp`JgQN50X z8t`moZNjN6Q`7RH(O#EKOiqSWy*ywTuoJwfYFylP0gvdM9$i02cToVCKB@P%v!zs+ zZl`=JUbC{5>fFpJZOX80zcJpF6A=K)fAw_(W|3vJ%Vhb4+;7aoEVzE_=$Iq5h(>C= z%Ky0iCMZ_?_~p>U0&kr31M>mFy;nn4$nsL=5AkCQHemXd`=gi;4q6b!Bz`KxOxIIo&g=ePekh>iXd%DnO!ZdYzx0)vP@A@OipmV zI<-Cj*HSJ}a%3)cUB3NefskJqsXLK}uLpWO;mMDPf5!sl7ig4v)So7ft?Pni+txVu zQHhMdHPPHvB==btJN#*84}a{pjw^c{Y#zP@#fkV=0`j=zA$F_52Ao3syQa1`Kncx` zPstfyz~ev@=k1Z*Zw*_W`=`#95+3n1`gtWF&k;8#+2c;lb7rc?V2nJsR@t=L-?(P2 zt;W`yIc&QEq7XKvs4jOt$!<->(H~kIB^-MzojB#i_luBQOFS*(pQkTv^&BeLz@B8D z_Z7Au?Vu`x#y0r6!K>4x!5W3=;U5_A#^hl6Rc*TGou#~Y{@8XulA>h=<|xrD0@lRv z9lv^R!h^+4f|dt7kHoJ zcJ&{_$;-`WPc9a|4*0S}GYF5SG-?dXjjMy#A8rlTc!8~~vg3+gpo(6bM6OL(jSrBX zm#8QV4$SBF8RcjmBYO*d39TKksmU80=UG19miL2C0E?Shw!J+L%sihPp3S(lfZ$mf z&WkmWCGqm;mTnG%Jlr!R-IRCi;}tLxahq`k=&o|vr~@e*65==(4tn9~cuBP7p~o(2 z$h$=gSzf>8QK^(&e*cL_t{Oux{@C|WK-}eL^q~diXyZZ@-B!o1$6jYU#+T(04EBM~ z!q&!ClbUjea^9;_wF@{-3NMKujy=ntC@A=@Pv%%M`GOWMmk}PFR)l1%LTqE?KHgP+ zN@`0`^$IOX}yDcQq@)$np zXgmm=2s^hTtc4N(a8p#vuzA230KDsyBY-z&Aa&^LrIrF9Ql}|JYLL=`LnZFVOeyhM zzP`c)92Yok!?!H5ES{$Q^>haZjVzmx?a_kNi;V|`8vG{00HvH$rW_n7;ox2Pf~&8u zd@#(5RwZz=EhPN;7YEN1`0Pv74+Si)0GlbxPeu;^NpEz|FyG|+TbMLpi|>FE5Mr4* zmD75G&w@jw;oi}a0@V-5EBcQ*{|z%89whG6Ws=+zrWlyZhMLd+o%Iyp^u zY(R-}uzyA?j7FB84~5A<+}TBP3Vm;SpCGF3fVfMto<>RNJ?Dt(guGQ`bh zcL)?bH~>c%J#bi{FH`5s1RKiu9P{bH0kitaB28o@Spt|g0RYF+0FT8jS*10>{v5JdU$g1&els5#(E9az9tHM?83a_ggiS+@<5+Lq{U}*PN9y^k-sd1 z>@2-(im-uU^~#)Q@6H)|R27c@n#_K0k){aCKusR4fbKU?s;xX=_8_6ayvc`FZzu!! z>i2W-eq_>RCU9bk4sV%p-JI07Vz0VaBw}ebaFP66Vy?zpA4C?7UN4~rCNRo}MLjL` z&nZ?X7?an_pWFGkq$$nc70X$PGWdfy_}^~{N|pb;1)W%c{`mEMTB1fIl|mWiM^x^Yj7$uiZ7gP=#>jJ9AMm z;BX~zOAgyVR!TVZBa4td-YOb#-p-#+bblQ7wbZ1jzRK|^P1tU#MMr$N4)AZEk3z1R zkF16ph77t-R&Ua9>9b}6&O0N|++YIw^HXE#r5fA*yGk?;UN942_MufBV}7hCX#>v!0Q0-lp6{m=3&(;Lj) zT2IjVWpHj64$ z5q=uwgG%>Kcfbor;&qCUzNVpR;!Q@+^E4JmokU&~nwlk)={uH8;;$)HtL|^FtnIj* zqrV|wz~RXxb=OdR1f6}_l=*qR8E!+SndXpLN%GIBoNt=HqYvZ5v-IwmZZ5+KM2g3K z0I@yKc6rR@;2+QX>;>ak+5BYZq#h=#BO-P~`am+Z)UM7wZ0y162fcMm=fzW5=T;1> z)>tJ*ywqZ;Jpr#?)B8T6A7Ysm1zgjWO4?{4+kOdw$HMx)8{bUzQZetFKQ|>FlPco` z^d@Jd(@rZ&cfAe?U!3jCjE&{goWN|?d}{ZFJ`AV=l5Ue|{;EIr(SeVoZu~|bKg

io$sv!1zYAg1&3MByjF-zSXMxM z&TD5jAE~tW*(@2v^(1+7{+Ou5G?aEtXytlb*z??=l#9@yP55In;^`xR44VIC5yI0_ z`>Tfid6CmKWcmq8zutFj`jP&0wRs-d9=<-5lB2~($7!(;_F%RhvCvb1-lrN{ap{3q z7rQW+u0LKd+^1k~(Z@9@S#30g^lPW|gb|#g9SkacU`ABz-DA<}sWpa0OHy`2*;~Y} z#xJ-Sk?#tDgA$3MH%1NnM*GmhpPXq11vvhkK6vW$d*R8z!hLpsJnKrX$wsCZn~IvL zByw+z-f|#6t>|3fjv*x=IG}v(iV+D%=c)bHe{Fs%-&z}v0>dPkM$%XEFxWpB&6D=q6Sa$s1(e{adN86No#;Pvp|7|lltBg_JJN-8Ie z`%^h<)+XzRiqijr_5dj2lMQD8)}3Dy&7p0&cYuQ$d*kT3~sk9n_*j8 ziwu zi{l0i82_@`72z!F4MXRj%JRIjF0mcGd5zPcXvsKigM=B8Q3ilhQTktyY5#Z8LM{4m zQ*DE}kA0PGvz0c=Sr2&d4n`6be$WA+`63zFo&bk$kP& zS5yLBM^`<+;F1W~v`dl$8zcb=&*NjNMwPT)1*Uq{IZ0KOlDb1yna&*Hacz-62tIFfIwP^edOcV#FC zrd}MFiS{sUw&aWCBXU`3^sY|pBFRl!t;svVfCN|+{Y?NqHiuT<2jL+x_wR(-t- zKtNBERjraSNCgmo=z()CE?RYcnJ5+2{quYTs|CQaM6_dQ`++=)VvnfFLY8vu(afM@ z4ZB#;FLvOHM;`eaYh*|*UX&sUV?lK_{nLca8vDS_NHXguVcuiPVfXqIM8d85{NPr7 zZg7TGqiX;7o8o@GUKQ6p=;!5uD5)cY;jZZZK!7}z;4NuF9xc~110W&>nIwQp<1Yd* zV`BV~wHEB_+=z3}KaILLN6PcIUuU+f_w?mD+1GEE265R2OG3ex_dnSrVr$`Gr3 zp8~F&IRv=ImM3(}3V;OpL#0F<-7?hc#B@P~uQyhHToz;m?!z zSAibp9S{9&W~l{NOKwVej8m3D#E!zEaS=e-_yp_?jb!7v%1i)W1SJJ*TYo8gOMYix zh7h9IJ|8D97^{o9IdV+5GLw^T;5mf6zsc2-B(ANv=h({Yp6_giRAKrHKMVcsXCvnB zw7i;*JVy^Fu5p5a8KNov{o262-2iFn-GbDtmt{ez{t{J$6P9}MdMUc7(8;Q#;dcy`7Y0LH$n`PkGY zeWM!nQFZRyI}^v^H=5?hLBSt6JnfY;WlVN8+=BgYkZDJ!5k3vyfpg>WX=|Xay7e#taC|Hewsf zlu*m!?N!Ndo~N;Td>`k&zptIVx^)q!Cl}yn{HH|OBmduv)XaarvtQx=-TuHqJ#~t6 zxM}~JDG@f%XbWW!u}MSb#;ZbODy&uf0FH$vU@N4`9yMwR!<;7;CjybxB(~|gRGBt$ zWnkkhgV3s{865pfl+335n1Zl2Euh_;?Bwn5oDPRHIg_8GNxUtOt|AVH7z{VZg)08s zu}Su6c1qyvg8a{7g@3t|Z%QZe#`X)T;2T!SnL zS(o%u1_h*H(`%asCo!UQI>1nq+TQrp z88{($4b=hpYa3?t;F92Ke z*@#fSv(cz@%kOKH^^!oucQ@PDESllTyz8>*ORO3K4Tr+YPckw{BsBMz6-@a*23u12t})djd!M9*F?! zbcn=%NL`=ye$>%`4=OgTG}SFN1yQq=whJZl>loWlo0v=j1_sb6uX+Z}bEth3>9{Y! zu95b1=H~q;W2NYOxTu4tB>kpt;nCG}Z)7nyLi!+EIA0I7A>W?XqgyPx4&Gs&W2`sM ze7yP6d-A}`zdXL&^jkyIW`}tCun}9>)9yH~H1(w|*v#;W9a%*Vuait2h_Cl<^``B_ z`U^RR&Q@xOI?mWTokW)_XbijE$S+ec6l787>bxHyU{G)Dg`2Q#Hmf(u^o7hzug}05 zr+~QI^Q}0|&XF5)@5MilOwOF-&Z9XAqCk3Qpi7XHAm7qLhGXpZ5or$ntlRssKvQXhR)`}oFX0HmeQ?X|=j+j)!*dj7w z6TviBsrJsMq#@~EHM=+rU(VB=b{MS&wQTgn^RZ{D`75c=HZyXMkl%t9=J-3l*)J{t zxCxoCGd0y%_mv~J8~mA7xA*e#eGlhwOWJ4e!hh z|5sY~0K68%;dZ!tVbDCQ+3vp*YpKntq+l$#bLOX|1t3c-HFx}qON=Ug^%vYD{fRE- z6s=LJ`~^XKt!MIjW7+b%GMm6Oq0F4XQLBF20AdKj-aX@VM_b6nTJC;3x~6HAuJU`@ zn&hKb`a=fOgYL)J3xf6hCe_XV$FGZB6BEN*vgLl>(YR)qWf=jKHlDGHt z2(F~-!u0i>U%R4jH#VEJjmcQAdT&|bMIZ0ruoBYb5v?BEeQ{BJ7n1=F&J-u@dyYQ1 zjkYD_u^wUrFb);TTLJ{lTAuNua)tRn~XlKZQlt^xMrc}lU4q@qE3_c zXDC49>x@oo=$f#iRTF@)wM3`hONZ}NBg!sGWaHb_nR*Je^Q=dbTuUA6E4(}HyV7~` zCT^*&dc(QN^W<7X7HFO0`-@#nqCT-B6UtWqo$ppMs;HfQq zvC8FHy9`vnC(DBbOe=al>Gq91f7pPw=*&lEV_5t@eO?ffgFxx2wVUfCZdTsXD}Hsw z0RA#4O>hQ2w*G}j7N|f#tq!GuSQdNYD^UBymD|=maS`QYmJU;~&8M=O4%pw{5ShX5 zm=d#Z_$}=^DW^-s`}XMq5{R~@JYCOrp**YpIAwn0T2?dvy$`()LixNkPGmyG22NtB zuYH@-Ix8anAsG2@>~iZRfWnwrttU{5w(NGd*04g(bJWbkn&e6jP zO+MJi=R{ob;{k-hX)=(Go4I0o{c9iZ8{KyIvBi>k_$-)V`lA7^Ek(VlEzCp`7B8km zDOz*>n&7ZSWcp_*W*MWf!Rp-P%7Bl6a>25s`&yJpd{<^<4RZeF?6fP3awmUTt;Wux zEz6JjD4AE)CQbemH+83cS?XIEs_{VD^zyXf%8>Yq;3ON`4>@u7(vpT)dz!B09I8S; ze94$6usWbs`bCw~0O*478xLUn%TCRcOhD{FI*}mawKAjB(IDAsiVQI^(o>m{s_PSF zmXW%1NA7v9+U%r~9_fhq+N#fFWQUqmZJzL=#JF`zEQ#1tq|NwG`2L??W4#BUp2zo1 zjx}1((PKq@^q2VFQuD4F9PDiH2k$pKn6}J7&fhhgbt?_u12k)PgkAYkQM1#EB_n!( zH~((%T{G-u0OZ@N1AQvFTa%TKn@C=Y#pmet{-x}#L0Iywp`eepuDhY5&Yc%|w4BB$ z8ekl-@7px};zZLX2~Md4uD}{V*e?M2BElw5BI%B`O5#_f$j_yl8Ad#hN)4J?W;UEH zu+_|^KU2$&gd37Y9U0A1jU64${FP;Pe`&+j!yeB7`D|rC4>e6V7+QSZsLKX6xpBQe zm20#}_W??ZJmSB==akJ+7WL|En#`ejl@zied79SfbjNrtC(v~~9~w1*EWBj+xZHI( z&)8A2ra0Q)7v}=oGdj_X7{L6}8-4p}&`+l)Mf!HkX9Nx&UH|7T{ipDN^)9f@w_QyR z`diP?)7+Bat-~jJrGGP^(h!R)&{7qCVXgjwafU~z`Jl~E1Oq5SPNY13MCrLnWZi?g zouIAfNWz@K*)2aF!Tn2u{l`0E1=?ulrm1?10)w1EUz1=nu>3h{1`DwjL8KBviRl}y zpd^%JrW}8}mNnIrjEWi1(|t4CkwgxpVScq|NMf}{r{PsZ2ZCWUyq~<{)#h{fQ8Fh> zyfRB254<%d_X%J=VCaODyX2YTJj}x;nK}KJ`>Fd)dyeAG%IP2IIGDb z%3U>q-!!U@SWHW!qc0&)G1dHduLH6gEreBo5i*tU=b1YO)+}fCT0}g=fZB_QKd-%* zr2`DJsTrJTPWFGTY1Ckdy$-j^iTbGe?4=v1p0U@Vd%`2OCo-D)M*F_dFYOP&8itU~ zmeK>m=(hjr1>m{lGU7^SzAY7HI=m@w#BSL^S=xiAdwFoMAakO066!7ZtbtjeRNDY@ z2p4Zem1-?daSl|Z2&5cCKAH470zWbAUrHN#OS1{L8}1>?!rS??N{B8Y|A)Ubzv)1& z1CR=O?|^C=-l%shvR!N}XItMHzbM$zA-(5E$e^PKdfX`j!DsYm=k8B>WiwfjN}1~b z0DbiNj}tUOf+wsyd0`oU@C89XYHMr}i<#rUz0lYU?Y)-WdClghSsZ#{m#V25C9=yJ zfLNt&m4TO1?`+3^nK6^s;%Go-w$L9xJSuwlq5MRWt!K2bm-X@9voG}I5u6%nalPX2 zYa{t|M;bRKUlju)l1HXYQ=OF)AZbzZh>Umee! zO8=G4OqE1r2_Q6_(gu2*!#!|pmNV(*)20#bK=1ae<>Y^i?8;9#cwKtSjk~$0=Eoc* zvtJby-uGUxq(Qt70%8ljQsWpPA^HQDslJ|3wLjOy47zJlUu9!te3o9!afTa6+J7yR zzj3`conNF#&lLu)G?+C3mCl`&U&X^C8V3eJ`?z z1X}k$X2#oifK6#=^1=Z@s^?qO>EJNb0roYubg|Ak>AX7p^==?xWZd^}>m`T+)KQ*c zKyot4;$cy61jB=;n=kc>u6F1IZXWJI*>ysfLDi*;A=fp#03B?xj*xi*50uN@Nkj&} zko90C%ezkpu#9HVJI@Ml8Mo~B*nK_AO2!!3QpW2zg)UFKx0y~LoA-?iq&3usa zlrW^H1->4g^K|yLjgjDKMXJ~KtRv#OUN>%;-_uSw1VlcLbxbm=}*Z~ha#rtezU zvPE!-7gdtofLdM>S;W|V(h^`gM)0!JU((ew=9)CAZpaXxUOOrASsBa9?V>tmCm<-7CjUVaS`~b;-@>=`o zi1O}E;6r7*0lCVc1EyL64Y3oUo)(o|f+ru~KBg2E8sDh6I4Yb-yVntvY;%6!sH?OlY^%+*wXDANYkFff| zXyS>m=h3Zt>!9EG!&n{Y%1eHpri=M(w|>o)`3Bdl{RfcA(b;Sf|0G^d>da0%cBPVK zAAld=f*_|$wK5RoB+#~=tKh)>@^YX)pY*`KxAqgo}F=<`+r`%95ir}3tIB*1E0Sr*fa%>j_~L4duoL})BicBL=@BTEY@ZVu+R@vt@^gk zJ};yL%kG>pRoZ8mv$zq#cPO(||5USIi#}&6(11LQLj=eJ1a{-I0!xqY`Wa7>g>KmxHNO$$pyV?y`{Ga1$Ja0tFvDEz3w>#0xaXEFoYu zHJ&TuiZhJj-%O+IEV@T<3xF|?+0pJ@;A)ZfPLG-rk_ET*T$($zj#LYrO13;+GAX?SDY)oFonC+?YZ__27y5P5A zH`ke&iVLZ12k@l0-eS~ZQML*dF&cf|t4t`22FdJs&#mN40a_?yxG309m9S>87&4Z= zt`0=e=4b&p`QKp5e?j+hZQ~0-`9VKrpChVC8&E*9E8ut?bdA*jk})Fjle_|M0!yoA*=EF-RZfl77=MCfO&kLX2S5AJQ5Umt0PU|a((;Hx} zVEbcCP*V$-bECtvKMBsaP6MLgR19_9wLf;Amur)O`#Gbg84Iq<0l2@Jp{UH%Kfc|p ztw6S2%_S=0l445@Z~*|J<)4cm9vz<0RI-_u^gdwhSjXB@v_~*yyWC#DhdVrx(pt_q3T+d>j zeSGM8NBQI3cTv)>-Yc_mbF1*1MGAYXCh*#+!7Go>Q5e^`DF97a95Tf1hVGt36%Xs) z%PU28wYDp<$80uBx?zB-QtP*s?ww0i9NCBqO)Qr9u3=;!Z}fm{CK-hP?4*~FtoFwG z(v_1=KmdcFPm+O}y4fJ?{jFo(zI~gDIah|)a3bJb0}?MBovnM;j13FOxf%2y&X<@R zFthK2mUo)=@hPvl1v}+b4%qHRWaIH*Vuk;xAO7uT*17Odc#_;T{VJ;v|HJP!OOL0` z{59U+;G;D>zsBqp)U1*5@z#-I5~jcmUN1!&fLKUlzK17T_3mm~509mn;-^eXzDS|g z9_twa_20g$5qn3O5;`hnswz)2uU!f-;>gt4%k-s@mW0JgUcwOTSs3MRj8|I%0$lJT z?sx%qpzXz_M)JtFVq_OK@m)7eKOu4+q5O9gj9kb%Nr$Ss#S^UGalChZBeZ0+ea{dq0*rX(BqPz zk5UE?z|iZjuep$7$}O>ZISvIcTbb#5zcMzi-Ie{jZTk*6>kGftL)BB1wV+}3I-eIOCj7&zMjdf;MazpVXOsewU8 zz}o49$+mJq$BvuxV3(k2*tuY~4=Q;u(2I+&99b@(zXVNx$c|Ra`BSWKF^S}R+!u7eY5x9>tB@md zV2CI9%X)=vqlu}I40+Pk(noT;{udR+1~AV*Iks;1$;^-$P8Z7~Zo82Vo0mGgBQmjE z+EYu7YHZ&zz>E+c7*CQo$DzM$MYTn~U36+gBQhT=O z^P7CHC=}4KWlaZz)O`CAo)_7YzO-tmUme1{0a)WuVK6oQ3Rbsq(y7BHA#%u;hE__qMS_>qtF6OW9P4L_i{l@MboBP{T0;b+E2De)kGGfhqzsLWoyDbz7Y zCXvh$k7QZDGkrjvsG7*HoO(S9tP?vH4aV{xt!FYhHxk}wYbIW=*1x{%ze+iXuu9*S zvFQ;@2L*{gOP3d@Vg2EVgB>0w-q`_ zp1=@J$HlsbUrKN=a7w(Z%(k%5B~*c;ClfXr>)kTXs$U8?j6pAL!9TSR+TZI&|AK5T zMnwP&Ds<2D9fODG%&&01%#T~v?ew`(fESwTTu(@nncoh@^cwMk2`q8oLcKV-vs)di z!SNu`Zuqrk!LkplE+m#`{v`h`=;(~mZ)3YOeT|R%7q0n6_K|9^S_DBripx8mZ}x1Z z)4Z-hg##>3kQ-$f-_YpwWw!pz4E;fj!(@H?n&p&H@Lhw7Y~(1q&(B4UMzCYsU99O4 z8m={y$#f(HtnZ#?IOM+<#ntYd;E+L~W6MPehT~0IyaS`$syU^#@x!;#7oZDKd#6A6 z;jS4p+o9C2nXeHiUy1wd=HLJ3K`rjTo%GCcrGl2{xt8hbZi6Nzu*DnOKgf3$oBX^F zrl8~bG*+m7=~72P3tp~8k{T*9VoLn}!Pu4Ls#i8YVMC*$Zb`A3)aLiEeat#Xi;^OK z``}NzxjJXX1h**IW!q;sYwscf8>@F-I?sCsYX!x-Sv zl+k)<(c3^N(6fupfq3uys2K)E(lw3&p}X6sEY)Paue}W1R6M5iB1Ia(ZLEFw%gn1rO$Vn#WCY8WJ~4s0|t+7}E*(arBcXcN3)Q+<&BpY^mnJRSskd2L1Rt z?W}8AF#K`TVCp2DIX6tVQ>ZYEQGX+CqM;Pm@|jIB=aa~iXZk~5l?tn_yNu)c&M!<= z0f&zU4O96+3_~=`ej=JuoH8`S*c@M!B|wb1vh1gsJ4Ac{SMX|BEkE`lO2( z6ts&ySp5NL_G_ZN`IlXKu%eE1%!N8(WQZ8)@HieF-cO(LJ|@(;WLeap;`XrBZ0Pu~ zZ7#BGc>K!IU7c>)r3yR?kZ@r3^6UNDWGY|fB=$PIj0qruxW-S-zebay-JzL@R~0V3 z)9o6@8adad-Jj`M)!BH^SkeTO8%*&`?SpveuNjM1sF+%guYn_GOQdx9pyyCuv`R2|hv6j%O8!S!4)li}(Zt_!bdU4@Ro4V!=osl?QwS3K4Y zs*HCc-@Y9{RdO=7xsFb%FJX{f+Ifp>bZ;07*~$4F2vO8jW|!Hnj|80f7<{Lo}Gag`B7Pd(h=gaHvYJq_nQ)z zdJp>6lxjqrT%zu(Gn&-#=56{Kj_pc$YJ4^=!|P+Vb{aa*M`YfYWk43oasrq#j%;ru zy6r4J1uc*)WbKmCVH}Ce)Etv!?#0v~`xvNNo8V*Rzdc4}B+xi<5fiSCmn-tgL(;k@zTb0*6x{m#Q% zNHp7+!}y)l>NP>xYde*!L%V4s=pcIrofkVMl^jPA0|2DyAfh#!4{bz2I|@fpH$8^X zMyoT-NA3Wn;p$Sgd9 zzL1VA=-G$VOztwb?K5N_&whU}FMDoCWiciuR3}Jk36m=$3)TLLjwRGZpwqX#wIE6) z{W@K0W*M6R8LVxcOt59=@y9MRsKCBtxCV_#scq)ue&c{W#SU@#Q)z4a6ExEivzGd{ zH|rwjVMMIz*ES^5`2UI#euc%Y@6^P!Zf-CelF6hrZe!L=+FodwL7ik=W-F&=dQ5zWJIdPM2Cy%N z8VsXNJK(QsU0>Y1VHfAHwwa4Hu6VDBCQ#WPINIXE(W-(^DZvx$9DSCn^ zizOYuXbkvIwRW|hC(fm{a!hug#z)%khv|_#sQK1X7k86^VX%;_;}8bA8ybhLMk+qX0bVu@g;UyU#fi8GP>Pv@)k;A`Y&f$df7eGT6ex9|apnX!_ z*{$_n@v%Zs>F0_dY001z_Bxk*M{vWi56*6 zF{mA`F&WssN%U8jp!6Y9Bfud-kaZ@tK-X1o+3q#7>&amt^JmAON=5@b>3weRR~AGdqcV+x*!VB=&kQDTh< zYV1mMgLYH;qPy&oyhAu=s>)9Hq6w-5hvcoXeIWncOat!M5OC}z*n}oi*Ig7@Kc0>` zY9z}iMZ)@vP-rOV6Tk+FIit7?sk$yc0~4}l6FXVz5p;q+>wT^^%Zg>zb4hyAH1|Vq zLpyibc5dTMhKQZwcCTKJqpqc?6Dpd(hB`0u#wi3%d)zNBNmmmF1q(wBid)`8*g9T2 z6+JJH=uMMGH*sPJbBY5s2jPebb;bwu2dx-FeOZXiG%95_M+?9{cr`S0y0Dcw6`#`% zR9X=LaiZ`+m)x>~oysWX#f#Sdjuj|BYI?hKFV@dRou#72!p+r@ZjEE*8F&yRDqSa5 z`e4L+e6OD~4#9~{A43E{Gtn7D`;xG&4a3ee`^Fcm`XpwM#?3d%Fqm*s=fw=Vwy0+pfO;RyfcQYAt61!qHK}4Y+l-0r}^D2$7-U` z(-ircim}+(`HZcI+7gIySLJ-!Wy^cGx=~Co3#QAva>=l$w2ri4epJc~NMQV?8Al3k zyDte;Bd`suX3g`GHCVTMvfk;4IYkmQJgRwjj6{;$no^v2*nvOY6$!{_(YQjYIc;9@A`l z-kSUmd306_KpqkijLCzV-y{yw;?)eh)a0MHYK*Z&l^pHR6y$v9ex3ZlCRzk5mU>zY zgi5l2+=V?`SK=)39|Zj}BCz}r(aNE_uoLypa8R+RdO2GsKv>XH9qBjqYw(&CmN0E2 z#z;0W&~t(9QNd}>lE5#Y)r(%H>wX(L7*aldnggYPd+b$ghHdH)SoUG{pQ$U?orf;B zr{esroT^EWilDrFzs$xJ#e{{q$L;7CF)R!)6XLM!>SA|HU%p`_x|AXpH7Si&5hh{?Oq)_ znPPylb&-4fpRd%8sKoeXI@7FMH(%TDgfd^%MBMn z^I#w{o}tVUBbP%`>=e{X9_8q~_84mUzKlE=rxPGQdZtZ``ham+Sa!|Iw3|#nQi3!_ zHq^PNIPPm6)r<6Yr({?#Q(XjFOO55bcns0q*_bXVDUyr85Q|33Gn9DNq*28MqYENP z^ZauW9z)*lE;tR-gI$nI1`QLW=b1)DxP?Zm zHC;?^G&vWXt`2^S-f`)S3jiJr`3Q+v0- z8(FP+{=V+XxWE8{VJjt8zr-(z4`hP}BldYm9QUP*&~S+o@R|k?TF#aC_QOHeFKpC zg++W9KE1jWy*Fdzn1R=tF~N@ijt~qw4^qrMp*A;$-;M}8ALz98!X=-V2ruCLRD=a_Teca({^t(QW<5H z$^aMv*bjFW9{rTT+74@=lREFul6#Yaq&rVly~6@MYVNV$X0-^sN6(s1K|yxs>7_5M z-1?vB$I0&8RtfV+%zoqH{qEOZpHSk2BxXvR=rW(ygWt!m^{%b;_d*G48`c^4rq$_5 z6WY}K5DBi&8Lfhcd@nbeH{^)RrkIB;yUqC78&6lU@b_D*)QEoDQBO+EWP*@e+Ez=_ zw?3wFnSuScTu!^CXZIt4`o-8m-;H{Ng2&{Cjh&Sl&Dc)Ko#-npE4)%{a}Y`E_q3C$ zvQm-gl2XmX+Is`Gf(?3Jy3nwnbzwN|fJOCdQ0F#m^HJ8_w^`3RllEL`w|4t!b#E08 z=c#8ky4}*9sReW27nO3KBq|xT?1o=$)zE&aPv3;OJf5rFIs7#6%zyrK5+I`maEQNV zy13>qSzfL3)RlxrpQ-&pj^}ViYunp5J@;vt)_wHXrb~jULn7SXd>|dBQj}l6zV&mn zpZAmHtmiIx{^tFO9t6*oXYVMkJQM6pJ}Xd#Mf6Nj(RI`qWs1Re&xXHXJ4YM}H+guS z5V{p@D+T!gcK(Xi8hW_2hzzy6LU!F=iE<_6&W*|&47b06E3CB>wR)?O@^3^OXLe6R zq|P`)LL-Lk-b}2IG{M|W->?NVp-weEx^((ApN%Vb9sA)0`WoC6n{yONjb*3z8edhS zyp=<1$3Vbz6;kYAWP?nR;(J8iV{p)bkH1REH)4h|7MW&f9@k3059b zT6fe;a6xRIM8W3EbmpL58>%vBZLY=4b&+y`T+C^Va0c}UEn4f2ugf!yI2VZeXUaJv z$tO9DuV`reldf3;9k0ThHLLxfhTBiKQ>5+_2xZa-*5mn_9x<|~%OE$0yx_n>-sXKs z7xC*7CL~X3o@;Dc==ylT(Y!F-mRtPBYq8U8 z4P|n?7$dY_q&Hf0LSV_Y3QwB^*; zZYh24J0<7P%SlN__2y0oS+nCk=rdpG*GR$MV0fo0l=$%LuZ5ud9Q2>dOeCsYFVIoN zbmFJaKa{7JwaIs23n~kFyJOR!WaEO03e6AnNyE;L%K%pE3(aLbX zDLm?4StJF>IlM{@{gVzw(Jy;!p2X!<-P)iu|MStlIWj2G(U@&%`W9w+yrh|}bZvxm zfy*rS;d|Hwn&s{EPb%8$y`GWt5<557DDIr+cJ}p+yN9Niiw}<7JjL=B=r&fXfMG^& zkD|uV?#PVrdRN5n#nBheT#57hKK{H20`F*y7T*Nqx0Kl+SiD4@!Z*m z;{wo;MG6o)3tuVW0ONN#_=<~K!d*k*IX7Zy1?nGLCj3t~m+kk>wX$rTcrzUweL16^ zOa#0+Zj$uIot0API@G+Ho1_15qZC$Y@cNu-`Xa0uB>2ueuCfS{~vfxUuRG&a;Orjsv81mwqxGc1vNoL#dmt1jgH3WG- zR<-3-xWQCNQLbin;B2~>9}kWCm5i5b;@jI0kxuYrsaAN)yc0heYEDDbbGpYL2I{yt zgX0k>d3TlFP29hu`0T381Es`d5P7os8?%Sh8Q7oemo5!n?yj$?ap~fI{Pcy1-i3Fp zUqdHu(R&7k(Y!rd?m`WZ_kAG8?~J_Pk1L{lPs&?s85$PzX&xfQc zot@P=N4wHjevUm3VLkGq9YZ=p|(aM5gQym0;PLj@$B&AcI=v>NHNOR*#&^JS?9 z*S8l!93?oiY;xz)G8fhNi81H&6k;7uE~jc8%JUYhvTV>rwc*U0 zVo8$~2d`w0r7zG2LyoDlX-KO{#OK>L!2+Wun1CckX%#BI_XObGP;m zE3ZK1$J5quFIaD9TZX6Bc>BZ4GKNSQaMDtn5xmtnGh;uj0*yJky=!r~*s;i_o5`O& zoj{n?Y*}7^7>B~;n@CiT-r9I3H? zbi*+4-MIJjzI%Po^X~or{C<3E@q;z6X70JJGmi5(%S`jhhD?hohRu>0#=>U`2H&{8 zYLtty?rwi?Tpi(M36w|H@uoxbyPaMx-+T{SJl^4_z(P7tK}!uAGZ$+mXSM!T>bm!@;3&qsU3gj}> zO{=0aK!|a2+a5j{m?dhMFSkIT+Sja_eB&W{7NLMfgqx1<#CY-~FuY%lRI}k-#wTQ3 zLbes1K=2F9v~k>t;a3CwKJ8MKruf0}Sk$0MSOj?z+nX{2XN_$_{tvtaTNOf6J zy*59o9BtL==3M|`M09ynr=hcY=~jMu*Kq}Sg=0rKgCrkyhazq33z~{m^VDk3j;@o= z3U2FRyDO&^hvd#i!i!?P1vPiU{Q`^ksKnC~1X6EAk-@z@sneoJqC zD$2FIsk+O|HGbeS(JW&U?k@64GG-?8s1%H}YqUPK)>-@lE)XtsS#UCqbmp6vNICo% zd(_$jx9<65F0&ry27h;$Ges=K6|C@OpYugkjVri&GWp_2f1&>2(f+=S9`wh&UXgrYS}8PPq_FL>RUmDn3c2TV&I@3k5+nbE8VGMzSUL6ZgA+8UKMXA415 zqEqO_L2Lp@4a-*Gs*xBgPl`1L(8lzfCA%Sf=0dig0DC52tAeLBm|3r)u}zY%_2*~J zgd7owR?`j=qaH-$3f9|<-l5dQ^k3o^GI_fXMyK>(gr9YB#zToVKeMfgJ;fq@fi=5d ztRn+%mHT!ouWd1CJ$V#e)l8RiK5JC!crTH&dtg}<`#raM*5w<(jD8|FdIgPlYl^R$ zu)0(hy4lrC_9%1}GgHj*pPJ0b+q5+QEuOmucQfU>80+sA6FZ~5k5p40-fr~H!a={B zMXe2_YT^49&Gl2=?q_h;GSh7aZ3y|w5(?kBMz#m1|B-g%{?bU&nE4BF5B4}@`RJY&%=d1&+ zsI+ftW9~MKUd0eG{)xB02O4A}A9_)rm{P-E=_T41M_&Z{(Y}dq^8~#zd>BB#pO@in5>5!g34$K8u*OS@OI@Ed_L#O9L@RJ2QW-sv>97p zl-YE=R)RFQSqBe#Ki7u-bUjRlNKMyb#MPq`+YXWNt-Xi4qyOQ(L#Ah6#hXvgdZ1Ze z&TRg`{!{FB$#LUL4q6+EFWq7jsuVZJ5vWL8aT}H?4Y@H{*QLTz$AHbP|7``K2)Wo^ z2axQPoFBraWT8IQhAviqjRL3KGKuFbd&IvQd>>s8;z(=O;hq8BmS`93K;dzyMn{BT z061?r*6GLk38gd(G~e!N$qQm!%{fuv4i>u+8-+-7b?{v1T9cMwffc1AH2V=nL)F|L znunI;G@W|DsqX7z0aD%1r^|ZQ3)I#RBUT4%e~aF1L5@9QHN?ZKqn-wG#O9{w@UWBV zpFDQGVM zqk-uY2CZ!y?WZ5J(-ZU$?ITOd!7GvSRuAHp`RG2_kJ6y#8z^pRi}Po|BY$a zq!8>D^6KUm=X*f5TMy8vJL9B;lmW$6GdgvaVF>zYL(;F05r!fn3rCjqZ_jmFp zT)2YifrkX4-29BHhSd@mOuK>UxL|^o@SroB5LYZp%M|5Sce;mW7?v$S$nNVA_8V#< zQQf=d>PBWB>l<$_Get(k&@C_2pC?J%x!SST2W;Ce1pAz@B`*cLG&v{{U zPBYqCroqwZ1vdJP@25&Mxkvp;(PxEnnvr#_gA;?}e#EjiC#EZmr}sV_sqq{`2KHBb znOP%B3b^Zb=V;2Vqh|@G>}+n^8v?P1+ajkzg-wGH;A-7K!#~@maM+3z(~3$((Ce#Z zU0u(61FWkEO8Fet;!mGlHw{jaQO*-`r@HS_Zq~%7pd?MjiJ&eV)?Ivk5>0ZDnJtF1 z>A1tE9xU_DIN;0qg$MWMVby$-eOQCwHD}|qWoB?LOq@dhOl!u&QBVAB}C7kCfldl5`6CVMrIV>{&A51$qz93Sv!C7n!LiL`q}<)5-Wr z6GPG&bIPpd21_K{hy(T#^QhCM=(h-J;wRWnWOG;1)vDo1b9DH?yj8+VWoy6im2_fkvD0b;-a_i&2Pw$mUm1u3Lc*<(|QVSiA5@ycQ`J zT`pK78c|%qCNzg9wfN8~*!58HDL!n4$ z7yJcR?wiNdzZ`w(EZK>ZuuKex-#qvB^noSwi}J>GWc%dVboC_YyneC7Rho)JDy|bi z12_i@(=IF=X}7ra#G8-L+Ve~~`PK>vIt-Z%m&HKW%ZurD_$4jELYhrVTqRwO8}{z) zHdTtEaD&g`OrmR@@Qg=WSPc$385yxkpH`y>okA8c;WQnWTRxQQjPH4AQ&W8QT-Y1F7sBLbu^u(k{$} zJs=)>6ftY?^BI5edsDCCkB1kxVs%<-$0zy_m0W)bxKd|u5Ar$!$NT&z{1B&-;1JZ! zYr9na(A+emV3bO!hK?Y7b0?G-DVEbS|Kq|(#Xh$KiQdBm1T|ot@*%T;O$pxp+eZHF z`7{9Ja-YdIS3hrk4tg{GZqgO6b9X5%<9JX?mlvkvxlAvpM$|E0aMu$o_-Ok`WNG$; zb}=e)!V6apQglJuO@yyNboa5DG!`!nH`Quh6x`qL=;xv7yer9lMp{JsgWRle^z!h+ z!;#@yI^_9DE&rXa{MRq*va+QwoZn0^5x*x_>PaZyJar5(Kd8+i8(k!YH2A+Heq?KY z1O|#7%>=j@M5~;=j?75wNGC5&7YTXO)osXUiW5hR8gsQIfu(DOI@pBLkN$-|*+g5E6EH zQcJA?^mYyUO|pK0Y07+Z(xZ1$$Qq7%e)Tcf*v1O=Zf88dXg@Cj>EOApJLmLK;r9L+ zTipSplji;cl%<_0 zRUxuh;YQoC$_!jP`_!g)v&pv{=U-b*_&cHC%aX-;n<0zgniLfVT_s^PCq_x~5SV1} z7u(!DT$cEQ%=w8Q=tRD*VO>Os+rG{GRj6!=2vjqn`;MUoyUtHh-BUl!YnA1j?3Ci? ztXz{W_^Tq5s`B7f@dK!zU-6kA`NTudHIMY31z~7WWPkNz=(JK_eAS zDtU^M9tz56;$0|Mduq%Bqr;`p3$J*O1N1TcBB5~d zzt?WyuliEnh+Vu%W%uDLQRNbPS}1FIqX$yCi^ZTNt4bOsh9=H~DStl@)7CV>{HjIT zi@}FO!lo#}4qcL%+gY8E9)owqLrd>iJX%*yzdpOOr$1Sn?n)Y3m{}&;JAUN&$rRTR z&`{pVKJI5myK%xZIpeEhFHH;%Q;=9?ZwI2hhq2n#%GC9@vpbBM3NOApBZ*4J>yyYL zN}l#(Nd#&6O>d!BW(?VR9a%@U2CAFn_DE&%|Tt9lB!K&pbt^5T@16Kho-r< z=gY}8tSLQI*!S1LC>lPul$WCvrs^UijrT%UN1Jt%z+l$(CA(Oq6g3l;FLM#M!TyO4 zs4aVAD6wA!mnp;xbPJ|CU`AjuSR~yod)5}|m{?U8?b;WQY_|(^O%UyZkYB7FzX#AV zRhMb7czAc?NK=>MBYQ%&>-z3xPbdJ)2A^t%00`{G&2eHzSGp#PG1%a7RWl1+1qZ-_ zC`ZRXSxI&^)PR)uRSO~>HP8}nk;#CT4MMX@JsXg-ngVe`(Un6w0m9; zh^0dSIlok`S-aq(R6VR$$ zQcCikaf`e%zMDGicr#Ae9(ar5G=dzVJZ5ds--GeIyP;lzi@M z0u5weO+Fky%)NFCBRmy|WR7N5tE*`^ox@-FrqA~e@j8+o;9b z^ser_5Kj0`$T&ZQcEI!)xB0;}n z?#CZ-iI({(<865cM#~jIg~goU)aHW%?MYpd6eT)&xHbkX#HR20!-HtoXcQKADV!I0 zE78SXN!dkymDly5wGdR62=rQ%_M>=fmvDO?E!ImNJ^A^)U~4a@qjU(oW@Gs)?y@K4 zD;wHSahWDoi;K5v4LSH0ozK-VILGXt-jzr3*+7_C?9$W zLp4r8tPS-TFBL-rqss&zmZ0KefN!T&cRW>PbCz52Q~c)+Vu-AV+mMZJCZik#oe?sc z!Ou4)QHhq>pHNa+20xapD%n&ZjApZ9HlR(`h zJLtRT)%=q%;a4peSsB+0>q-m$mKXuQDRQ#l90#_^qGDKNu}8{Gb$>d{y;Q<)gV?;8 zb!iT#wefos&pxGdDJ%a0BaVG*0SDkfaAAl0{@~0jvhSE==0{prR>zodF=W8e?z9xl zNTqrmxcEzc$PU?F|KTHe3SR)+YwE3yxYw=|Q~bX?kKO`Wvb+WwY|h1MsmV>z)t=KI zi~>qn)gA&t2ZSs#PLfYAQcP1PA>op+)UTfH3+s`ggJ3Q!%zLGrx~L12cje}aKHOo!qvt;O~@}1%<^IN<<+*!{d4#)mzru@_b`tin=SGq$>fv{s0 zAGYaNH#J#u{n;G%S((y$Z(P62;l8+?WZWTZgBpV4wLTV0O>Sje6uX^faN|6AD>(kz z$M>pK&d0sBGH@8Hc8|WI#w=J9j{@Oue|!$<0HzZ(NJ#+0`SduYo1@^D25RGm8Sc4i zeIRlke*V{XsnCLxU;>>M-)Q0BeOvVk(Hb`9Q{_E95Dxefuc`$`BsxMS;Hku1L93>3 zJ(?>q#DoyEJqPpB_M;T^x=klgPxxi9?`TDXU2LxBB8gT#k}01hc519lIs4II&?+l6 z&4DSNl-EVPDBpE3g}K~NnNpzi`*#1556%jP^`xBE(8HqPhqB>8P!dsGtx=h%g>7?G z_9kpgay}=~fZ)NjnG1q1xEIm=am6(*IOPJqiX~E^Dd8DAFn=5nD&l5b}&An_&J1rQEl%PgrHHjT7@j&IK1W ziuw=}jHd3A3bTDX^6aRv-rGm^q+*x;ahc_^TESqYiGc*_M7;IOGKi-a`o^EJ5TVH~ z#_82Z45F{OIEyq8A&JZP?kG{zN32WTXGF>ej9bmMO{}`LFI;2jOi)K3a)*%a9>Rn6 z1*F-Ad_-VIlNcMx0DVZn_KiHX1-jVU-aU4~o`4nntY>QVTEHxfvhHB`y!0pr3!u#_ zYt&p_<$;;b=s!K7Ui}cfYE6&*>%?x&Mxj>p3mQ~fR;`-*>Fp6o(`AAUmS-BCe9(XUWm`=@srZGEm7X=v59Nj|M*saaPlibz#ASRrTmQi3nkFOf`CqB z5NBQ&1vXxPYrV-GB(b_=^$*BHU+ZA9T3zA_E`vAX)Bz_gA}EPks8KD`<#81yq!t%ynW=L0 zg56Fh1@aGx<3F7xHdZ+C- z*ZaoJj~Bh}{G_p`AK$Bm^b``%z)ejbNf!f9Rnk>V#5B4o&z08p-6NUIMj(PV)x$@w zYmXp*)uF#==6|~?pV`<4lVMhtJFGf&FX-fxV@6Ge`hmemYi(KmvM5JkTJ`6}b2Uf8 zX(THXm$Z-!{JhuH02-&3bIZ{vkfU|ZlBG8r&5Xp{9+BP%*`{{Pe}bkT|NJ3ZxHmZ< zme&YRX6wUJd6^j}tRl}6ofQj-W=mW>@4rDHEyB$^jVeRjJf)^+!`>vyZ&CqjjzVE= z=1o)o>`N~xZ3KRhBa2<;j)?3EyS&9B`@<7NHC)Yo61U7Zeh~!wB7V!!I7N8>HRTsyt}~MV<~)5A(;y03Riq zcyDppgb(e``mHh6fa=zxU;K&Gzb^(gRwUFSA?riC23prMs#K{~9W+qrKiiZSsMHl_ z8CwI45TF2IFIL!jHwMZ;&Gi*&<+BLhQ@c8sh#RFc)7R+4tBAa_6h6EZ_t{pFs>CU; z!@wK1b>~|5b@F=k98M!?I*H&X;WxGRrnYbMqv?_MbIkq-s!J7?n(uC--ozE7ihF>u zpT}AXoc92E-wduK4hask@dh6|&4 z;NyL7dO!NDl<-)5`F*M_*(2xXinJtEujCcBY%1H z|9bK8Ek#-pxht9F*vP1?R+SxGmCtThWh|NJ4f@(o$}2mgC_w&{bGKsfoG+;!=&!a? z(HSnbW|VCnwCfa@41IgY5XVeQ94#FyiAas}n{jVuf(Gr;|4c z<(0%eb2&`utvm5EzEOR`YHq`hs13ov6jt))Lyh4j2t6+0qwqgke$faKQ7vI~$Vuot z%xHRt`8YM8&*p(rp`sAI7F#v7LhP8>V}b~gL!_cxdpW0lAqq|Bz2FrtOcM!<(N9m` zNJo5j{cH`90vve9GXNm0qQZCm`i-XZzq0%o&()CM8OvQ$>cadDOhioa;xD=xt_-gD z;|AXssA%kr7w#w#><|luiF)`?3ID~b|EGhc@Ufu`SDXk#TyFdXe>GiZU>%(wguh5> z6igVY)QK)R-HnJB_!tprWQslW+K+t3b4$2>O7mqcPSz+`lHLRXxPH_7xEX5$C5>PB zdW9}7cFDC=1`gBii>LhkfvA>R)UT?O~^136-V63{Larh7yc0fK*)V;bb&*> zOkaOySRH`Vg!)vw^i(jC!RS~-zMY>M#{h{%yNRKI$LfMeqwaeh%R#XQZP(075a4k? zrUI&8I;5TrZ&IAFoY}94rWfPi9xI}0mlP0>zZWZ?Lke%32jro5@8na2pbj-!dEm9Q zN8HKoFX@0=UZ2)q{J$OdzhQ~C2$7m%@+$)&uU`IX=R?!zPvgWzJm~lY9fAlDZr%L0 zZ}pl@TGN^5jq7v433Lc}sPFC(+HuE?{oXfBuRDy^H^HR=&n6=xJJA!Es{hb`eQ~{$ z32hG7E>KQ4P$L$(pq(0F2xzELT0EfePfYwg{)+kjOF{nni!>@yn zfAOB4>H|mCi8Jp-lt>Z|ujg?glTPXDLEVPk5i&vcx+Oh-B++m82L=+n?^sJU8F1F{ zx#*0WskS?Q`v)QdE(2Q#*4l|-5ZqOnrBd2PDTo6|n48-a-#L22i*7o)#j)?U3)tiaFpwUgOI^}d}&pef?+&9FWz=5RQ^N8vx z_!lNS4-QJ2u6!alX;LrxEA@y%+*NQSbeOfo}Ol*#AchL_?7Rja|_gQO>K>$XeZlip0VO_L<| zm$ZP8cYe@M{YN3kL-(=K5@XMVT0N%^9)G z_or}Ya239v&I%z0)aw@|;QE+`I=|x@tT*2Qeb{W@O{8R^QLn8?{)aO)nD7@i+sE${PNAZ;wyR$9)o%zHU#`D-YWrJxA&>4uOjokDD`cA3;ltDW z1IC#^YTLUdvw}Z{2Ky!hmg%qNM-7p(iZx8#mYOT;38D2)H(bJr#M~Lv^ zPi2bNnn&|as^ifNSdJI6G_sLU%tZod4e$!FbT71Y6+aSVC@9=ttiA%p-2TDx#9~Ra z2=9Y`_)rR;*-(+1qHc?YDk)X#x%~mPd+B{Y%K!VwO%R3OCez(ZJZ9R*ZE&XHFyj5| z6h4>t5hByj>zbzT9UQSGd~mNjI319#b-YgGFhZvTfN6Nkp4^|$F}uBUy*a}jarZ(p ziEBIgek=bUZ< zI(;u_yd!K;sl#pVgWi67;v&|Ur6DvH@?OVdX1$RoWI4?|MbF=qwBebA-&-vt@%x8L zn^?SqA9&fVZK2gFqn&dAK+$>5R!U?JUxUWRbaCC$ME#&~T54o2W~P@ex?_)H5W=DR z9OW+@#b098KdqpM4U(v(AZ_@&=YqBx#)F#6N|lZ$b-=6=r8o{_X$r01CeNM6sOZfF zd7-jInbF|VZ*(DYb59M#PIpgY^aZ54vy^W#{mKC6%Q2b_7e)+4r106RrdTab=Z&7G z`5*5p|444g=>V`Rqh*Tg@R6c=!A9e;-DYS?z8e#4117^^JECH;$fN7YWe68 z;&$(dZK!Z`FPy8bkdhwz-I579DQz(8un64E(4kWIC0tatj09lnJiU4qZ2S1U#zTFkg(UNrDFcHGOv)%rv%J}< zvwe~&e8Q=L6S~mT?{N0IFTg!;KlE$CPi5`@v-(jj2C35esBHXr-C6lG9{p(#W3eOl zO~8X?q!lM(6W*U_FxkH~syHV|D_}v|c--26up;foq_zHJ=6L1KY4&D@u!!#HO=+)K zAT>>|n;-b3FEvV3(yIjT^40?-wC^Shn)Hq%q{8$%_7=B#C~Q|*2t8?|oWlfA|!*)IcfH-ygz!bLvQ9QN)~ z9ry0`=vE^M{6a@Qh}?u45n8OauRw3fcGXwQoBLIZ;)a@1wj4;8(;1`bDL+JOo}2e2 z9smWhhS1RFwMDPue)u?JuW}fV{SNJE2jSu^lNXb2M!<*W^Tv^~PG2;;p6Y&H{IxY( z#s5)A?#&|#QjO;mlyE^V+oO@ES7vVFhEnDAdsJt@g2fKnXOXia$RXJ$K#f5}9XCTf zQ*||^50HWFfyk6=N#3eQX)thTq2H^PH`2;|p+o6_U=tG6FfQ9|CAjzr3)%bJsWffP ztF`tB9W)@9mVR6%*Pki9T;E8xxj+6(CDVI2d?^+Q@0HIv`9U;yJ!?k~1*Oi-u3sCw zUC~wwb<_UA@Lo7zhT=yaKVZ9shep|}6(G`Cc)8A0kehanDg{bv3mX@l>o8x`&rd+7*-Bd;xa01^E zC0!c&)N+%agP!0Ho}CVBACL4muGf>(nsY3A3Ce2-?eInC)O#Nl)hl82>6xnyM3ot( zyG8IjTn4VOC~8dF-UWtw+n&$)l>yt_J-z^z=husVM<`Ekl56l<3$Vk7$C_+=x-sev ze&Q?Zm{eN$i^OV+sIzCUpay-w{)H@Ag1GY`p*(RaVyzvj)qUpUt{8)RSNQTw(FWMU zOMKT^UA8`CdB3%^)@e`oet@e=zPh~5{@lA3I{sJZ<;80qLCTTu&Y*r+3I!+HQRH-T zF)@sjJza8MeL%NJ)~|WwOt*2_5ugL4_Zv-l|28Gj2Vg1B05BBmmF2B67AgM=@cAvX zT9Ho7&DHVN;SwpZc^%mPv19j~W8X4W*y}Z|J1nYDqf&K(-l=D@q4-;c>4Nw-!7)X0 zuZ@8ZSE|}P(6OmW%PDH9FtRwj&fq>^KLLY+;PeP%b-@lH+iLTT>O|4}()0Vd|J&KJn-X4;Y3f`_tN`yHV0E>zYvnCN6gl#u84D?%KaR>0>Q+@?e>w?8zGi zFm9n=naLsopW6)tT{m1D@XV}y_J%Qi>u+R&(*3AyS9-!9Z4PROC)g80&p)Xaznm^N zde>Ydz+4cWYd7B|d|%7C6W^xvLy4GPGe3T`*f$`wPovTco^6>wXe*P-@?-4B2f3X& zHJZLgi_LvtNo;#+qgigL&*f_ z@4FR^a2!2fT|bV4I~gA=q8Lpy(3E*hlJOO%ROY5~f>i-_D(wm5$~?5x1506>hH;_~ zfqNljdsL=zX1p@jZt+&k?d|;_if`Nuy@Gz|ya6kX`bTBlWgpKTaih?kaniiW4pE}e zwK5xCL)d#gn&-}m`(;&pvIY2VKgx9AUbia}z1b-lKgXWWOn!!`hhuj_f1TnF-Eu#x z>bdD;G`wr2vXg*6eB6*sz!I;J;!+WWKASSEvi`SCXQE&34Uh@{2I~hp7?$LhoBaYM zjr~Ew>xeR-p7Zs|R1oJev|*h^Pm`yZv3EWllGX;4i$sY@Df9sMFn&jf^u-$(T7dyl zGVaHFFXinO22|hG*zW`1TQ)zscik)B{VvVG?s~wL4}6=RDV_56hf!7xqO1K@CG~Uf zEhE@?smXiIO;#OM$4FV#arxKpyW;?OcGnBUfsS{-q}E2W@)fV~%_uP>pV9oTUkhr?yUC>K0ij!4~ZPM76)oMfo4*qDwfoC>#a_Wzut5Uw?ml zd-NvZ7RqFHZSzcfqQsN*%5mp{jD8xi3s{OHU0pQP&)9ekt@}zML@}D<{%pM_uC9rJ z{3~2Eu9-bI%)JO=jx*kxPNgRqq6oox>| zk4tNpawVOOTNMr;%^5OfO_6NbvEzu@KH2I2eC5OwD63h$w-qw#A$+Xsbrb*yd)JKFZZ-CDgz3>DpKUM7PdsH3t6N9O&v_M_fCtUP4nU-dTK59n>~o0BN= zGOSCb`dUZW)}B!cOa|(!0U9KyO;SuL^P*9Du)91Aoa!yW4puo3Z{+n0SFaugWl#HQ z6Aorg(91LPMe{N@*z_d6(R5$nFLWyZ(?~PU&3^L!$(pg(v_QN4@j{7^-Da{(k_Qe# zC~Su=S!!zcwo%LPB6}rTceC$waU;$Gd|cE3d; zwSm9f&r?XQ6b~MG+y51+s{Eqbf%aM1tO!!b)(rr4)~?T<4e|nH0EtA0^YJMk^C#b> zUs04DHkIf!(fGjv$@7yjkM!F*0?$~VwAmZ^pPCuMHB_}f=9Bp{SyrB=E zC<+Db@~1@#c`EU}NvI##mY3uN3>NZUwHtxkaVtI#?Ekvz%LpoUHIIJDAC8b+%BW-JltTWl;QStv!>bU zAf~O85ehxcc3uG{%%6{ur*1LB`4YzopI2!Yc|L^_Cc3GO44?q|C2`6~PB5F){aZ6_;7*3U0l zG-IWCZU^PD(E@)-F}|DF_#u$oh{Ld;O;Oz93*GmCow1IW;OjiNZv<98s1F(?9i}>O zwWq>dcr==)3)x$!U(C~F^CIg_=1gSLDwB~^d7>aJJMqK3S}fvP+HPGIuQ2wbDM zFC-jiflfeZv@Z|e6!CL4VIB6^`0(P|j+*yjooOPPdE-fO<8jqDDNeG$r^_Gks?0AI z<@s(eJ)F19ueCNnXseDoMZ>R)9bbKDe0&ByJWa4a z-pDQ8A!ixfx&7!V`-G92K8h~xJ@l2p$!O6}Dy2paFPSZl+JV2JDj`o0ipV?n9B$Yg zQ#6JyyUi-r#VRZ5UguDV;{zqP%RV-J2VONHOQp1x(aHfmY+j+_ow6}X!?CLUKAx#=sGly8_HQS!M%P#!?IZt#=TvyloL(x_&_;gWS>ofvbNFT zD*uMpvT2mZ|EaSg+iS!l`QLok=X*tZq1WJk^(AueiqYb2B_AWsh3di|5^DcLR?Wzu;Zr zoOZ>>5L6<{BIxaLuu_9ME~zY^#Fr=h9>BV}Ua<3r9BkC}n~9R!zK5zE#Z;krIGJIB z3-U`rc#k5M`)JCOa58}g6`VMc-D-O^NHtXmv@>*ww^@T1e4aGs{yh(U`8lP~)m2V$ zQh2|7P6M?CE+vTf918nV;gOzoYlMX56JDYp6&w1i`1WDtTJ<(=N>|C<&<_EXRey()(WRXMS>rISrgAQ>VaWu=RgM&7Fj5!uy@mBV{<-8{lIg5M(FEMn){!f z3Cf$>6F;*g$I%Bp&VhdCzQU_Fr=Ad3E4Z{l4od^t45D~o9ly}%$kEH1PxG|QI1X5f zXsCp`7LyvXPg&DvMO?P9g@bDd)5zLDt01MVIrUc#?6mz9UQL`wT=!8be2jWLZApRH_5N8^Z|oj8nLYD8 zWf-}YLvvJf+JviTO07CeF54iRi#ij)mS7Qk$hsQM)j_TKYc;q9Tpu>3_b`r68(?GVN4-1sb zmbf~Ulmz0J)~MWHTZcQSK7{~txxYAS8bJp=jH{#RJ*%S`iHT(}D2tn6uAy4W`kDH? zo2zormpNj%ZeJuA=8mnnv52}*6rxO@$#XdE3Q>SBq|nJ*iyTTn*IDIyw&Wo<*pa+R z@0vXyeni{P3?%kIc=z<*a(A2_H4ZQkLX(ZTHf2 z3B!K3NnKjK06K(>`tR7wIk>UJh)o<%&38^-hg6$G8JIhQ(SWy*^c1qHz5DgLd-a)s-j6iWTp0&_EDwo_9z?AJmI{j-7_Iw1n8OzBv2M*+E z0wFOhwqaN_KR9=&#=3&czXRJG$H@<5*PSoAQ@pf-bTM#u2XK=P25 z(U(Q`R5~5rhol^2Z&Q^gW!^hB{`$`t^AFC6A{y-;X&P?^lN;{xb|M)f$8~sVHo2X5KRTF|MDg%U5Jf!QF(6v63`3B}luDl@^ zYalkIDA%Kd2<-6!mfjdeTz}7-S6*4czsl~bIchE5HQoV)z-PAQE+FPF2;;e;>Q>lZ zm0LLHa+Q_v`o?dbH%)V8XA(8@CCzdX&Y{M48ffDk!Kq1`r&}J`C_p`$T426F1j!zP z^m1EY4=|%qtNbQQ2fF@HtTr;L8}@;D&ilF##ss@3wr$d4mnaC|ErG9atp!9@VHIwY zYRvD;;tMtTbjhr)D&8jF*0YrRS+G;;J%6?4xIJGUJkL$1U9V$L_x4mR)$8Tq>1J<= zM^Ui*)fu+i+hkuds0WTxm{K`E0q8Sba?C~sW4_6CInk|RXD6R9n$ zfyV=cQ2?R6WsXDkDvT`9oqZJLn5q>g(CVf4B^Ie=OZOyvzJh$-(*1&P0ezEMgC#2a zr=7T;F(wY-`Ob(u`eO3p6Ej?{4nJW3O8v`0m5NT=c3?^F2M&|5dK}qmljdqUZc;oV zMA6xQQ5_On58K3m5%?dwkD_w2E?Oe$LhDQR!q&}^R)o~uXxhl;a)N_haM$0c6-6EC zDu^n9Vb3O->*Z(RVza4D!W~BpLFiQ!CFRFk>QyT29?ylA_>O*D2-}My1M>uAj@Ns) zcd@zj!7h-=PQsM0*Os|auJ|Ep11yb;HciR~K!=~*<2rEc4N%E_);~?{W^wcH zxwij)#Yd60mug5yBIqQuhf)MToDxu*yZrKqbnu?@mhh#JPGDBI`!&6gCm}0lO z--tw}$~!gIfWyx9y=i}y=2&-l^hFlYMAGBpsK%x(7-P)a|8$80M zvycOEUbM}1Mq$sXy|&xzSs9b~SbF>mRUwKkkP$asVC*YmmtrdA`GM2@s0pSacXu>N zfUB{@b`h~TI$e~%)bY)HQ$4xn8`cfAxF}-n>8XB{pzQ{gYYFVUWdN3!?Qr8_%Rp3a zO?L^eiCpaGO5A+PUC3q;yDVyh`N$sNl8~f@3Qh;UBkwElc+qv?kf?fmm4i!G34F+2 z$E4q zdTqMleswH;>+Wi_o1@FW(grlHIX(k1oe)a8Y@E{SRTPT>W6rMB7G+?exG&zF*4s%; z>V6`kpk3o_;7Sp|y;?Cmw@+k472i~INW|%_?rQYW1;R8f3jq<{Ltj z2`Lrn;Q3@~O~-Tu1S~Pq2g-tPQBkC@y|lsmmE9g1@Z$oh=4ywCdQ=?!9Ncq4f3{Xh z-dqeT&qAH%4?<*jjf0d-$1Mx2)Af#S^VvU+TWhR;H-vIzTC3#@sx2x+NW4*l-uPnA zv4(N|IN0S3O3fLHB`zn}v4{}8S|2YO+O8n|HUn>v3QZ1g5?dHQV>=I9B+k7OdCMnf zt69Zx9QI4{>{DMm_V}oySBj9-?OfBFoJNldx0;A23^+&7*Qg*$Hi#n_XdQH$g7ZR3 z{D0y43l|O(Di{azSuUi#l6Pj$CC2EeT$E2LD(?&?h*XR2!6n$4Oq)LdqZ2BME}d)K zz$O>q$G*qE%mNfIUUj>D|9cg{q57=?;SfC0nP<#_gv+HkEBwxh$Ft;i8*|9oA>qCI zh*G<}`JgAa_ItY8bP9t^O4X9c$ynkoE~gYU`4&iL9{2ah1~DcQHgMcX0UMQ zOPtnU!XY^_sLWqqi=(k;u{%?~5LKa4;Rf0;g!O^E`Xr(_`L4a>eSaC7{gT!WO@}61^1I-}aLz;&HpkIgj_6#Pl{!FxE z#nnghLa&fR;>LaX9sN-w9+Aw|+b6I6#3w$V+J?xwpTMcjU<1K*sA=DD%ZN7=T1>%| zVH9xwbEOFXW;)lv4^^$>RTrvj?Uzi|~Z|9mEKbG%!QhEJNN^ELnJ zS^}MhIB^-Xk^}b8&1m!@(!!?RI73{_xn;WBQZkog>_Aev^yJY*S2VG`;q?Ko*J*94 zNlT)^T1y($^<`XiT!&q3oVa$bkYp$^E6~|G@d46rwfHKXTo|5y&BK%Kb)ibnIwVAV zHK0{%oDl*)mhyPJqeAZ(xOMidn*XENu6M~%ME2t~pKs$1g6<9gbCy(p6;P$T6-xy4 zu^JA2`%t9Y%tETOf3Qm`*if8#j?)E8OwH~EXlMsPh|WH&?_`kO32{X@a9i)tZYslG zAQ`LDwn<-IC!!ZDDhB6=I4ihZL43e<6=sHeUBcQOt%onC zoA#)bDr{bZd;WIniAWDVkU`p27{*rK>|JNP<9%G|o?Asw#)nM#%?6uNyGF!noti3Z ze~;j40oQ4#cQ2|f@ek9awI*`LHG_csoteBCvYhehh?F7Fg_oclh_ zX-ZIpV|MKFX_OtR<*8#CG=b=T2&26@I+Nq4#HrlNz`IOZ)zrXB|W{LHQNG>Gw6$D_1t!BdZ zU#Y;%!3dBCIHDrzsjV4f#WV6d4nR{w*4tjN!_2Kn&qoN0|9C@nWQFDUz_ew6p4DuA z-i+?_vsfW(`}=MzWm&oGQJutWc0(S zz_dBrYlEFx&70#r4;qyY*3G2^>z=@{)BPv(kr<*F;Id(Z=Ge!H)!W*R`^5|jpqUP3 zZsO}fx-v~yxUUbcdaI~eQWHq}92nd1oZs&4#WX>N|Ft8p%_U0N-ID&i^#yjIF1h7| z2t_z|bF4#gJLvB!NFe<8Jk&m>-1}ueg%8Vq&nAClcX)1ptR5!ft*`ek_53OOQIy;o zAaj{C%R9|zl>IAaFfx>FIFI(CYeU7nf!2BO>B#@7>`S1bZrlG6QOZ(bgpeXj7)5rH zrL2`Kg`t^HcG;IPRF*Pm!Pr%{vX5=DD~x@Qu?yLav5jGD|F53+dC&iS-lzBH={P5c zWA6LD?rXn3*L6SU)-SpZ5lr6;u8;Cxk?Yl>^zdJRYDd1zP;?b=lkF-<8Km1cQTC$}%0x}wo z^WOH{`FTGU>=E1Z4*AMbzdrH+>AU}{&MXrm_BBL|aY?h%vt)s6CJyqT@B&8YQQfiNvgh4x96J>z{G21K4>6qvl^|t=mnd7+G$z zf4z1p?3%{hMAC>{lS%E-*&-!M7%6G@XC)ZsW+tavu)X!XX$T&X>)! zc)84omnYQi^`UP9EJjbLv_%C!94{9Gu@VW#Kne9`}_8l@b zkOF)ij!*6;0v>Jy@LI1cepCZz-$T9=5CTqL1V+ko`>mF``jujqG{;qFEQiu9?^KA@ zXZSns2DRO-(yY_c&rOhdgzGa9O{#;w;_W!?M=ih0eAWiA`MrPI{NBC6hY=j#b9f^p zLp`VEx9`?fvP=S4!30^Cq#p>_4YGSw!a(8tV(A)NDllQUO>}F0m-m@@`+)oS#KpNm zpHKQfKeq#rpVY!to5yy;KF=>UwV7w-7*}3DnCLn>C0Ha8j+eA}F$qVzzVu05ShIKA zMSK^V;z3@S9^C-i+M9C~m?2N){ur?3kcsVQg*SjKfQhV;tUB{v@+8`1KjDRYH=)Kr zTS3}Ts2;$&D+4&8VNx@(j1Iz}bXGy3*Rb?hTFKAWU9 zjk9Bfi72NxrwZ_f&!k|>b+i$_0mi1sK>Nm}x7_gxv#xVVG)`ZevsJ!c^GR2b0N#Bb{P^8A+NkU1iYX>h-XxRi5G^EjImi|5j?i*j`K;9^3? z(}9n7O}W_GOa2vqU3t-6e>^jPASuODHtGOC7jpVn;c6PlxA%Xm{1^1BldsYpANQ!; zD?-_qE?x#tdr-~M^&u;RrhNmOo4(!%CNAkIatd+{s?mu9-T2Bl$d4>t%&;5Fb$ct|QCGY5eeTLz8I7Mu7FyBO7wiWcQPE16rPpt# zY@{37ci{!!bJ=4(+^OGuVWFJNNJd=dGWS)(JOIi`mzj!cHK0omfXeNQ>Tp{|BpuA` zi>KvG+1dU))H-2^k4{tm0`Dm&MjECA#$5X|u?__HvR4JX`Q401?0qPk!Lv#TPpSDKe8 znMpUcDLkcLWZW=5XsyOyS8xN&{OscwuY&+UkS;KJVrp^EVZt?oK(sMJ8D2gsv%ReG z!-N1AlJg++ST4s%L!HC~>ed5pXgmVE+4U-*d>>63MV_(!*twy3A7dr?1jwbE;r70G z&J}RVIcpxDZn0;hO;}Qx<(hi96`4DBKM*gP_07YlK(M+!YOcBsn54-NB5lw3df}RV z6nL`AnE}4^xgpi2UP6}JX0qtU4LL5umubSdrp-KoxsOz@W)>9AkH+3+n#YX`=T%>{ zZcplgBenuFicq?VXvKr-Rsm9g(7c|gU%za4QZ)jT1C!Bdt zqc?%+f3nrrKCf33Qn+7bk;bXWie`Q0+w@U0;lAwMge}afyo{dL`>WAXIyL}eQu_=u z-}&2L<6%#y>c(^9d7~#aYdnp2Ira(khOweH!CY>(ksjYHlNEg+1Wc{AQJ%5Nt23>W zoUl_&GAfI7TG@Rkk)B`xP&Wum;d*-3?Vy+A9l|WN){-w-RffK_+U7}9Rt4+eQJp4W zwB7m^Q?6X3{d5J()AhP18KK3QQBgZy1#({eKyq>7pjQ{&s+dc|XC*yKjl5#j9v%EZ z{&Utyn(Ao*276TMt#@&?@(%drW)|Vds3z6%VKx1?-lQU z+*BY_lDD(HB<6P`5}g@6D>Hr|hV0&4{Px)Bv+4Exz!_!ZKdUoW6@Sd3A3iU8dreQ2 zt8Zl%Jy`&qIvXoB=`lB0no^?=<8c?J!deO&wp2>;)nCoqm=$5X=J>Hv2Ri2e)lufW z;rhi~8sy`(hL_cillHPapL2ksQtG6Oo)(aaaFy8_2?R@NEqIdTsg9S|rVxS%d5%8wf;sR)!Iy~B ziF0+S6BjAHFDl?D0K5UjzZ+CZIKz9@H5AMESTFKmD9+f--gp7ycuH8{pftX-Gd-eT zck^K;P))weR!XmaU={@C15RBoeQ4`1IsZF+m|&v|{6A2Ayi;bB<^b)p{Y_LlDhT8SZVj9t7ozix9f zpC`|YK2^Ohd-0KW+D9!FTZ-`sOBT0NhQ&Yz`@y9=-G(TC=CJFJj-k`+XZaG?5@dY7 zvU6`(YbOKa_l??|$q%_F&)EzvmdlyzUY~W+D;e*oel#+jC|Z6uOa9){ zK>BQTCdr-;rsUngIpJ3RWX(`eADvz}<_SJQOUWl&>D40T^?tmzx##;W>k~<*Br}cP zuvhyEFv$#Vko_M@EarrCzD?gDu`AuGER%HG=wCT~H^Yy9pv*pa0PnWFm60+vCv`_n zB8dSo4t(>EAb;z}?_(~;uLc(eA6T|NFr?ByZ1J@oX+wCjmC8OyoK ziMaD_83m|107GmvQU6NRXN8sk4C*?jA~Lk`fE0fBlQQIDel_oMw#QyN=23ZgB9?pb z2n2tj)J2_udRStuN_yS2v86-)SH3~JZ<3Wt#*^YIZ^<1&Q>@It0x$*95HntHNgy{i z-Zd$Z28=pNFNa`0>!*EBQnV+;LWEuZ4@iF}^@O(q|7`dJ+C6y}H0ylq$HCr~TL6ws zTj~M^zy@?VlbBiogffe#_|>D7ql;=;#dIB!UPD-v|H{Oau!Qx>*~^COeb*v03_ofi zFF%hE@4qrFikcMQ<3j%U5>za#wIRa(1_@p6mbPB8VbhDgBQzpH=SDeMbwtB*ulr_1 zJX1i)?PrESGmrA)U#%W9yj76fb{{5UNI_Z|Gs#iH^C-&C-5O=bOVm8~#E10@G8czg zc#pCx7FDl%PhG@+6|1F0kTAWzRFBGbPV$S8Y7>%ihm<MwuZ6v{Y$-> zs>kp0sCe1_e9R7Hyh!?TULnim^(B4}wpK#qmgc=GOjS0d5Mp#6$T6TcTAi~D^WFie z=UKZ0|9N}5@pg7_1ZVFkZ-(yv$UU5iXE>)MG-Ts(U+UKBvR1Pi(9IzLog-Q;$QUi| zh;<* zE%vsUqr?+0A&-^x$qx_pP|;^UKLfDvg{(e%2{mteMt`oT#zkPVVK`fDNlu(5Fn>^v zSVn~Fw|E?1tAQ`+JC)(CA9FU?%20OW3RRmjh;v!g?3Lj^TF_Eqs&ya;@1LerR~8&-@^D7 zzn}rknGQGZv-l{idh=__d)9CDfal%>?-lY);C5kf_}^Vl&>&^gyoNf}Jfoa3eiQda zH(djuF@sBUK#=aM>H$JpFX1Ym#0iO@?)Qcln*jLO6(Q+pV~i#006rG}7(lqh`d@1v zmMORy3-!<Iy*xx}JQ-Dj4_AHpoqSjHvuXOL@KH;GiyomL zt{j~^x_D+Rt&jPf{oS4>A1&bZ;oF9e{i5!lZ1k_avElyE5X#mhz!VacdD*n;`o<2| zcqbKHf{SFd0klX_70Kets5?ZXQbY%R=#9d6!NnreyAQ6 ziD5`m;^qyy^$zP2qr>U3yN@4%zc!nKb1z$aq(v2-di$D*E|UrJ4E%!aNFc>!uvD%s zCRKO7&vK9*v!o?q-#z!J+g&zst|(DABevv9qIR720{6!ASYg2<3*q9p11etU6Zp;_UTaYAuO3aF<&xMw655$NSH}lOXy%J|3+$A)7Hyi zLu@|v-k|Ns*mdN(>C--xxB+qUE1PH#)f#$b8Ht+++4{LWSBx`_5*?QFL_7&`YPHR) zzSF*zhT50L_g5+4FPFrJ<)n0s_3X7wKG-Z32!K%4Zmhv`raAVZn-7?=p}io!O_ciG zK?6JCN`l^^&bH`-o$67axhPz7j#I}7TEWa9dzDT2XfAOjI70D_-|jU;d4f9i8N?jh zo>i%i<4&5;H^*(!viF;8{akPzm(|Yk3F}cAGt_DXf_?WodY8}=c%6ERv^1i>!{#&- zMVe*(LRbj0245=K5AWp|8%Df0y}b5qV#Zb8R>_4ueP&VpY6ya1VGW6Bhi1+8Llg|Z zR($F=RM@MiQ?gw_a7xoL@hJOKP*PF<$2S&k)P3`7F@;`bI1;z^{`OvbV$9xrhwml( z(W`z^D6TSGJ8SZNZo!fpN%Fo$bn}TfO{5$(Pn$@wBv0Bc^sCuj<`z*jQZj8_!9#f? zHaX{pC$6M(4PkNz&mYyu2+uGsUTy69d{wauz5EiNDsXhKLz7wzu}yD|_gvT;Hp4`J zZ3)*sNk!eKt?cg{I2XJkc1c)!Vink=G21Va_fN|;B5o>@0>#qA#`cwE>dRl@Kkq+4 z>l8o#ufzO#Jn`)0hO3x2d+%^&vAN~sr;e8b+<2Ea-*u!Z`R>g174|EZseMqfu%M;} z8Exi(`l{b&i-)$H=25nor3u2%Z0EOmyx#ZUSw(ERdQX?KwFvGdR`0!Z9cm+R{G5y9 z_$v+nTsB{ms+R24N=oupRM_b!nP{R{e<*O5XZ0HLinmgc{8sj^x-F_MFgsN3Rn84& ziP{z$0LS#9q{OoLF$NKVlehjlImkQU;r5oI-5S?VB_0F`wWn5^=F+-izF%mr&Bi)J zl>Ltz{B!kRmwT{&lYYrcW`=8}^4Zl4|d@-QU1 z`TMbKrC)371^?aJ-<5cR8LHp;tt9WoKlM&YNtvtOX|C4f^Vii*HcUursE`7%>J{2Z&yY2{3!xHu!2gG3AIvV*LQ1I8uF^%?DbeLUCzJ|KGJu0 zz~_^$CuVvv{~cc+>?KP?yfwL@P6)Y=_ozby45V{$9!0ghgiB= zb1hkWs6@ADQ0PFK#eJzX!gbRS`?ksUI=;}B^!14Sy^II=>)D&|t09C}GQXwJf|>^%zNW%~DcWWql+78O1ONX@9nL(g{ z6ySj_RMiIFb+hrFtuCJ$9>!hDhadFP&C(0}H#bcp{I)pe-!{wZk z;|DT)BDrRlQ9kmV=dxVR%Q4RppC0qg#>AWr9>X2QvvnTz3BQxG0OEOZWHL9gfKal# zlw>B?oeVrP|70zF1tQj?XQ4wAo-7@^N4UnV3%rCr9}{ffDJOlE?nJ6T#pLno%MnUj z+GK7LJjV^S=8o5{Ud9lf94G=n`)pG0O*#(8kIG69#BG{aY(JkX%NpxBG|9SDHW(Gi z_q-?O*kprLLsp(6$o#@!_fL;cJX6lD*eo7HJc4+x#0iV(V1}J~d9=GY0rjH{+#_Ye zl(gH#&!+X3>eX0Rva3(PRKytEsBB|iIC^q5_xc~gv|K5WtOJZn-0L&bP*?9(+M-R? zjK|V$S4*vJem?^-Muw~U!2A9 zj0YMr(-)5^f;`fVT#QKN*in8?E@W_ji|7qub?cTeDb5CWB( z%?oQm1JNGmUWQ9D&6bRKX-~d|`-JwYIGR3ZFTF@fWqqEf#*^Ph%eO6ln=##dzja0y z;?_enN=qge5(u+B2kIx_E9>US{#x65BTUatdzAp6KRvgjP%cY8T0BnbdS7*ATJ8Va z!G9Q#6InI+2*2+UKA0%bq#KGwczDEj*T-IPgz+P|MTp5nT?w!yJ!@esDwF(%5K}`N zHoklzKqX{-OXxPLTCpJa-H31-#)l6%XnTw}YiT4O{l(kq|K@G3cjdd$3q8SvikuY3 zP7XirlVrI^e!nA6O+H;Vsw0BD;y2rs(S2I4THJBAzyERBcthSzoEdzp!bO%Y`$f&9 zo+9epZi;nJYwKb~i@D2G+FUe8ZX7P}K##Kmdc2-A)04dG>rEu?a(N)5K!m+zy+Pfw z8)^r4_MDHYWp9iWH5QLqolx^3r6%wGY@oINs1@^aOhclz;PM1d?89@E$XyW*R*F}-j=pOA?>C)Xh%(pFQ`p_9_yL_&^;}6x-nv=lITm;$Y>L!%yBf|9 zSl$0%&wopG_OqID{8~!dYsIbB(77&wBpm_WBw*yL4{1@;$DN=(!#ZAoA1X=DIUV1V zz7qBQ9B(Ms`ialcA}%}B+m3ru3yJR`jal< z6als?&x$ie?g-<5T1I>Q_0K}#S?o70q=5^? zZNgvm=2h)BPR?}^6cV9ZPiTKha(GQ+NV>m>%NnEdy>2})2`3VBkoVN zA{K8V6_JrBSCUaVL(oN;`rcVFCz!sK3X5R;VQ1u^>BpUukRuGZ)_;FlwHJS^r4;&9 zhjS`KZ*a$G(+V(V+z5_H_XyWzkqJ3H_p?b~Z3!lIe`0RY7gw95@hTW3T=L=^_n5J- zVYy&pq1$;NT$98PmMY)}DzDfNm02ptr&|N2lsUlL-`UPiq`o7ltE=lwb+8LNal4G`d}Md6l-<^^!g7p54O-_rK4!eLQ)4Db{& z9^cpgEh+!kHvpr9K2g9tSwg|ALpO|sMW+qeU51V$Z!8NmpE>q_{q>(4Q0Au6Iwo#t zqG`m|W}>N)Rch&4IFg{=aXA(Co&xqx>-XyDuXph)3?Kqw%lW?qhkt)(2c!{SraHoI zfy_x+E9oMZT~4Ll>v#urg8%iCe{+_ADr)jcal;{vh%-ZKMkuqc%c%<=>b=eU(Rrlr zJpX7D92VKMYzxEzs2esmHWnI)?YX#=wB~+NzToiNp`=QrYNMEZjIM)b=k`OK(S=nu z!s2~5LW$ngAlOCVg9lb`QG?{hZeKq9-cC@-)JRK7JqThA=C3h!5HJ(@_QeO+{&b?& z-#+}%+~uz<#3mu{b*6eN$N6J-fmOe^EGC?+_&&Xf05NRo0fHf2%*+GF5dS%w6~-yqeq(=l8~4v_&nUC#7-B8d)CWP ze*a*(8tQP)M9qe9E-{st=)iSjW(y<>>kuWjeHmIzo`^dNa)|-w)fi{@7&~xRx-@K1 zrbu!f{^Q@HYJPsG6)brp+Gij_h3q|!Bz8h8AvA18k7=+m7B5Z@nYX!Te~~sxig;QA zGKsF&M1ogF%G>coB>B+7P7y$PTH0wDf1cLZl7cT=y6ne%Xd=L)m{&^%1mc$*Ke_hqlUk;6+cy2VZD|{w^un0NK19>5BK*^{qY+!e;~3oOmr1)Kk(4# zj8OX&jh=pE7K>IB)oVuPNO#GSK`Yr{U?pzMK>IH7=~oKa38Xd*_X7$EbatS@(t)vP zmj;<9lh`oJk!y#iqJaSYq#QU1@T8-|P?*v+Spm1Wnw*?v! zu?%GpP`{st;}#>~w&RFNlvq%V5hR-yl#&OdpMze+_N$l8hrUcJ@NyerDs7U1TefDJ8> zx1kO*`OtDR1&yT$Lk94)fE{pMg^O2!i>%*>?k4iUaxs`Ux!9%A;zD2l>669(fccAA zLKepYp1)Ju%sayMf_ekZc+dUqU~-X||zdDyX{l}EV>IcH&tWR2n>Oi=H<1qSr2YI2xvBink$^hR%b?VP&8~gUEgtje19W|(AIAtzP?5{u)8v}TD-7qlEls`-OIhOSr?7WQtPnH}JP)(>2Rm{n-UzewaoB0qMxTcNO7t8=FuIptKr!>*b{}3zpy6Oaj5! zRxEDVu-m!Pv(ap3sTE~-XjkSS%LwS@rZb_Zv2QTp;k|5pt|0TZ8=+gKB>R;3hxJxI z=mwLg{^%HWcK7UWNQ2v8T+1?)nyuwMthnCwyZ%Zw5P$T9bIpiIanJk%k+5WycAZUh zUH$$!gd*BR9itLz_AIoG2lj$uZ8OG@L11Sh?>*D6i^PZqEYpI z=9giLJrch=AV1nU@CAL5X^#szhnVl<(mtM z4!}a4BAr{KA7X6+HH5*@7pi`n4*ly!0$5Hr3X5S+o=1X!|Lz=Rak;-fLovBiP+0&~ zG=jX}WW|%a>~I~#Ob@_Oy#0b{LBpX#%!d{vX9!Cux2mW*2jv|50><|{>+TxP|Mk;< z&($>&beyv5y0TvL_q@{RfX*FoFcY75%)*GFS?kPGb?~6`)s}+7zT~0Z=rtYqq|*jtuvVm=~tzs;@lIFMDapJ5G|t>(-Skv>zAXTb)#VvZb%M_$pR@HJ2d zQK=T+q2c5qEy3;M;#eA=V9@pnarH*@7hO9(uBLjSR*+F{xN(?s!-mT%1NMfesJDmK z%S^_q7FJgLT(Ud5rSs5pXTR3dT~JWhO>)voFY5Q8d-TM!8b&2xNWxzeY zkSHrTvLs@|Y>Z4PR&c#T5c>kkTX0;M{jxXkf~^I|HvY}{6nddcK5A&B**8iWGi&6G z;j}<yPjr%k#yH$6`{|0WjuJ1z{YX?iEn1LTXIczav5r!E6zJ!GwaO~F=r5*E+|_a2xe>XIrFf#>Iib>nS6ywY~Gm})@`9YNsmfSt8B;^ zq!)~(ZUl(X6Wtd7g8fL{uT=731c4wMKzA8&Inf|`TJ=8BaB^m7 z*OK*Ay^X0{WK8X=HVNz$UvdlnilC1JZ=zWA^H(E-Tc(w*;`B72$p<-WF2wMv($Jr_ z^`*Xb-O(@@>>yNnQrrLd{!vd~nL}SnP5$vZsQ-$jf>qmtMpmhi>xLQ(MLjw z{?G)Gq0$77d)z(QMtO5wkX9U0g4;Q4N;(?2jreK6;Y-@iw17$$8{S0UJnx=FF#Uk8 z&|S%?JOv9h1JsqBTV=mVIlPE+Vz7m#yQ=E!-`&dJlFe8lVC(*ut=Li_IUsTcThFYz zcY%kjZh|A8Y#6~%tiV%&C*UN-jDh5 ziTsUC|5sa)MnY9nOfc7blM~3_*H`~)@lU){W|7D)aO$nd@@{Wci#-zZ=5tFqD+=zy z7}ue31?Mubm^Ik@1<=M)3ti!SxfoLob4u)&`wSd?j;4ebqW8cF@bp4mOuMt?c{IyC zk%qRBPXHpNO6bUWLwjT|+sskitDUk3D4|1>1*i@=JgjLP2@A(Q^D|Hv<&SS@6P`)B zKwg-MBU+W0SrHcc2r?d_1&z4Rq(q%c0tIY1)#Oq>K%GQOADVjKF!C0J*w_E5m5Ycj z6+>;AItU-yI0J=6S9-eXDpTxQT^yp+Ol&)^;afm&b3j``g&)d*X6=h~wQ0j!Dx`Cd z!}YJ;=D8lxczj|?wO=lz1LwFgwO4?2bkR768Y*4vP44&y>+;a8A3xf+y~bb91uv$6 zF6E&r=hcoo0p8cc>Yo&_iccoW2PZ7GtG3#+L2j9z8_H^({D|a>$a-+y+e%|i*4mvV z@3)oR{}PobWd=skUOrluwS{@yoM8u?SOAGwP%WRgrDX#mHM_nPE{!Tk?}JeS5ZxdJ zUFr9Fx6697s@i)&3%KUIIfK3ib%k*G5}V>`(!68D?Zf+)wgR2Poc)V=7#H{xOSy^| zb2hz^tR%Jh8JDrA(bj;>-EyTr*0;@?OX7ZHH=5-)g6zr>OV&TMgA;I_ZI%_3O>)bc zsamp>`vU4nY7n{Cp$_nZV`)nvV0Ipr(a^@jiSD^G1w2p8_()R)Uc26iHPfO64HvuJ z1ldD#sQ!$2>7d$sV|P}f+FScG}|YW5K%I2Q8otM~o{ z(fu{hp+sJo-@VhJ;iA1{boetpJT=RD&TeDMU379Q)7#n!Jz{>WmD`+%tHw2a-!}SK zB|?dfG0tOpScv%G?&gr{+fM%C4HqN{iTyl$WC(H!fPDRGg{U0KC5;Nno-0?#Re4-; zMJI31*RQn2l3pBJ7k9mu{3w2A9`6m`bCWf)N#lzxfj6POo14sD*9sfGLRX@0vCxZyY23o}7%pYg} zgT!tc4f!N}Ndu8V1Zz4pnHekA=b&ir;=8t^S&UMsi4iEf?UX-M>bE^X9>E%ta=wLm zG0;a<=Ak>Y5)RMFyT?Q>gZdc+`|?H?&W@0`@hv!llwz_;*wcC{q}M7v4UJEti4CI4 zsM<1bfwb*Cd1wkLD^azuoLY2COxtKk@Tp8sp1I!+7CvWzY}z;tGJV{tG9#g`1Y0!* zDxAL~@?ZQW(A5CHXMyx#_h(K0`uwmz(ZXiAn*QiLrd{fj+w7-tiyo%Cz!Qmw-64Q^ zAfG(DaV0kyY&(tyl8T;ECCTvx{o()`nE$+ Date: Tue, 5 Dec 2023 14:11:58 -0600 Subject: [PATCH 240/648] smhp/easy-ssh: support custom controller group name --- .../5.sagemaker-hyperpod/easy-ssh.sh | 37 ++++++++++++++++++- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index 0d98a828..db61325e 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -3,10 +3,43 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -(( ! $# == 1 )) && { echo "Must define cluster name" ; exit -1 ; } +declare -a HELP=( + "[-h|--help]" + "[-c|--controller-group]" + "CLUSTER_NAME" +) -cluster_id=$(aws sagemaker describe-cluster --cluster-name $1 | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') +cluster_name="" node_group="controller-machine" + +parse_args() { + local key + while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -h|--help) + echo "Access a HyperPod Slurm controller via ssh-over-ssm." + echo "Usage: $(basename ${BASH_SOURCE[0]}) ${HELP[@]}" + exit 0 + ;; + -c|--controller-group) + node_group="$2" + shift 2 + ;; + *) + [[ "$cluster_name" == "" ]] \ + && cluster_name="$key" \ + || { echo "Must define one cluster name only" ; exit -1 ; } + shift + ;; + esac + done + + [[ "$cluster_name" == "" ]] && { echo "Must define a cluster name" ; exit -1 ; } +} + +parse_args $@ +cluster_id=$(aws sagemaker describe-cluster --cluster-name $cluster_name | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $1 --region us-west-2 --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') echo "Cluster id: ${cluster_id}" From 2690993184de18c2b2b563289db80022b22fbaa4 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Fri, 1 Dec 2023 14:21:30 -0800 Subject: [PATCH 241/648] Update lifecycle_script.py install pyxis and enroot by default --- .../LifecycleScripts/base-config/lifecycle_script.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py index e26f6a5f..e20df155 100644 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py @@ -152,8 +152,8 @@ def main(args): ExecuteBashScript("./start_slurm.sh").run(node_type, ",".join(controllers)) # Note: Uncomment the below lines to install docker and enroot - # ExecuteBashScript("./utils/install_docker.sh").run() - # ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) + ExecuteBashScript("./utils/install_docker.sh").run() + ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) print("[INFO]: Success: All provisioning scripts completed") From 8d3e3272a7b80edff984626c4de6e0be362e20e9 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 5 Dec 2023 15:53:03 -0800 Subject: [PATCH 242/648] Update easy-ssh.sh Fix typo --- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index db61325e..859bffab 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -40,7 +40,7 @@ parse_args() { parse_args $@ cluster_id=$(aws sagemaker describe-cluster --cluster-name $cluster_name | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') -instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $1 --region us-west-2 --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') +instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $cluster_name --region us-west-2 --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') echo "Cluster id: ${cluster_id}" echo "Instance id: ${instance_id}" From fee27f43f424bbe58b0474da8cd54addb3eb93f7 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 6 Dec 2023 17:44:54 -0800 Subject: [PATCH 243/648] Update install_docker.sh Tested on CPU and Trn1 instances --- .../LifecycleScripts/base-config/utils/install_docker.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh index 59d3d8dc..f162096f 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh @@ -15,3 +15,10 @@ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null sudo apt-get -y update sudo apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +# install nvidia docker toolkit +curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list +sudo apt-get install -y nvidia-container-toolkit From 96a79d415b6b382259583d4bec728deb10ae45c8 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 6 Dec 2023 19:32:30 -0800 Subject: [PATCH 244/648] remove redundant dockerfile Signed-off-by: Sean Smith --- .../0.data-preprocessing.Dockerfile | 10 ----- ...file => 0.distributed-training.Dockerfile} | 0 .../1.megatron-lm/1.data-preprocessing.sbatch | 5 +-- ...g.sbatch => 2.distributed-training.sbatch} | 3 +- 3.test_cases/1.megatron-lm/Makefile | 10 +++++ 3.test_cases/1.megatron-lm/README.md | 41 ++++++------------- .../1.megatron-lm/test_megatron_lm.py | 7 +--- 7 files changed, 28 insertions(+), 48 deletions(-) delete mode 100644 3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile rename 3.test_cases/1.megatron-lm/{2.distributed-training.Dockerfile => 0.distributed-training.Dockerfile} (100%) rename 3.test_cases/1.megatron-lm/{3.distributed-training.sbatch => 2.distributed-training.sbatch} (97%) create mode 100644 3.test_cases/1.megatron-lm/Makefile diff --git a/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile b/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile deleted file mode 100644 index 82a49439..00000000 --- a/3.test_cases/1.megatron-lm/0.data-preprocessing.Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# Container file for data prep -# This could be reduced in the future -FROM nvcr.io/nvidia/pytorch:23.08-py3 -RUN apt-get update -y && apt-get install wget xz-utils git -y -RUN apt-get install python3 python3-pip -y -RUN pip3 install nltk -RUN git clone https://github.com/NVIDIA/Megatron-LM.git diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile similarity index 100% rename from 3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile rename to 3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index 3e1e0fae..d8c91693 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -10,11 +10,10 @@ ###### User Variables ##### ########################### -: "${IMAGE:=$APPS_PATH/megatron-preprocess.sqsh}" -: "${FSX_MOUNT:=/fsx/gpt2:/fsx}" +: "${IMAGE:=$(pwd)/megatron-training.sqsh}" +: "${FSX_MOUNT:=$(pwd)/gpt2:/fsx}" # default variables for Enroot -: "${APPS_PATH:=/apps}" : "${DATA_PATH:=/fsx}" declare -a ARGS=( diff --git a/3.test_cases/1.megatron-lm/3.distributed-training.sbatch b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch similarity index 97% rename from 3.test_cases/1.megatron-lm/3.distributed-training.sbatch rename to 3.test_cases/1.megatron-lm/2.distributed-training.sbatch index 762e0c5c..923225fe 100644 --- a/3.test_cases/1.megatron-lm/3.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch @@ -32,11 +32,10 @@ set -ex; : "${GLOBAL_BATCH_SIZE:=288}" # default variables for Enroot -: "${APPS_PATH:=/apps}" : "${DATA_PATH:=/fsx}" # default variables for Enroot -: "${IMAGE:=$APPS_PATH/megatron-training.sqsh}" +: "${IMAGE:=$(pwd)/megatron-training.sqsh}" : "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" ########################### diff --git a/3.test_cases/1.megatron-lm/Makefile b/3.test_cases/1.megatron-lm/Makefile new file mode 100644 index 00000000..9a040dda --- /dev/null +++ b/3.test_cases/1.megatron-lm/Makefile @@ -0,0 +1,10 @@ +all: build clean import + +build: + docker build -t megatron-training -f 0.distributed-training.Dockerfile . + +clean: + -rm megatron-training.sqsh + +import: + enroot import -o megatron-training.sqsh dockerd://megatron-training:latest \ No newline at end of file diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 370e0457..6818ee91 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -8,7 +8,7 @@ To run a test case you will go through a series of steps described below: 1. Build the data preprocessing container. -2. Preprocess the data using a tokenizer and the preprocessing container. +2. Pre-process the data using a tokenizer and the preprocessing container. 3. Build the container for distributed training 4. Train! @@ -29,9 +29,6 @@ You will also setup the following variables in your terminal environment. ```bash export DATA_PATH=/fsx # FSx for Lustre shared file-system -export APPS_PATH=/apps # this is were the squash file (Enroot file) will be stored -export TEST_CASE_PATH=${HOME}/1.megatron-lm # it is assumes that this test case is copied in your home directory -cd ${TEST_CASE_PATH} ``` ## 1. Data Preprocessing @@ -40,29 +37,27 @@ Before running training jobs you need to retrieve input data and preprocess it. Below are the steps you need to follow: -1. Copy the file `0.data-preprocessing.Dockerfile` or its content to your head-node. +1. Copy the file `0.distributed-training.Dockerfile` or its content to your head-node. 2. Build the container image with the command below ```bash - docker build -t megatron-preprocess -f 0.data-preprocessing.Dockerfile . + docker build -t megatron-training -f 0.distributed-training.Dockerfile . ``` 3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: ``` [ec2-user@ip-10-0-10-78 ~]$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE - megatron-preprocess latest a33c9d5bcb6e 9 seconds ago 20.7GB - 36b0f224fb00 25 minutes ago 20.5GB - nvcr.io/nvidia/pytorch 23.01-py3 9eda6061497d 5 months ago 20.5GB + megatron-training latest a33c9d5bcb6e 9 seconds ago 20.7GB ``` 4. Create the squash file with the command below. ```bash - enroot import -o ${APPS_PATH}/megatron-preprocess.sqsh dockerd://megatron-preprocess:latest + enroot import -o megatron-training.sqsh dockerd://megatron-training:latest ``` - The file will be stored in the `/apps` directory (if left as default). The output should look as below. + The file will be stored in the current directory (if left as default). The output should look as below. ```bash - [ec2-user@ip-10-0-10-78 ~]$ enroot import -o ./megatron-preprocess.sqsh dockerd://megatron-preprocess:latest + [ec2-user@ip-10-0-10-78 ~]$ enroot import -o ./megatron-training.sqsh dockerd://megatron-training:latest [INFO] Fetching image e19aa13505c1710876982dc440226dc479da5177dc4770452cc79bedc8b5b41d @@ -71,7 +66,7 @@ Below are the steps you need to follow: [INFO] Creating squashfs filesystem... Parallel mksquashfs: Using 32 processors - Creating 4.0 filesystem on /home/ec2-user/megatron-preprocess.sqsh, block size 131072. + Creating 4.0 filesystem on /home/ec2-user/megatron-training.sqsh, block size 131072. [==========================================================/] 299550/299550 100% Exportable Squashfs 4.0 filesystem, gzip compressed, data block size 131072 @@ -84,14 +79,13 @@ Below are the steps you need to follow: ```bash #!/bin/bash - mkdir -p ${DATA_PATH}/gpt2 - cd ${DATA_PATH}/gpt2 + mkdir -p gpt2 + cd gpt2/ wget https://huggingface.co/bigscience/misc-test-data/resolve/main/stas/oscar-1GB.jsonl.xz wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt xz -d oscar-1GB.jsonl.xz - cd ${TEST_CASE_PATH} # return to original testcase directory ``` 6. Now you copy the file `1.data-preprocessing.sbatch` or its content on your cluster then submit a preprocessing jobs with the command below: @@ -100,7 +94,7 @@ Below are the steps you need to follow: sbatch 1.data-preprocessing.sbatch ``` -7. You will see a new file in your current working directory called `slurm-XY.out` where `XY` is a number. This is your outputfile and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f slurm-XY.out` but with the relevant filename. The file content will be similar to the below: +7. You will see a new file in your current working directory called `slurm-XY.out` where `XY` is a number. This is your output file and will capture the `STDOUT` and `STDERR` from your job. You can check how it progresses via the command `tail -f slurm-XY.out` but with the relevant filename. The file content will be similar to the below: ``` 0: Opening /fsx/oscar-1GB.jsonl @@ -119,21 +113,12 @@ Voilà! You have executed the preprocessing job. You will go through the steps t Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. - -1. Copy the file `2.training-batch.Dockerfile` or its content to your cluster. -2. Build the container with the command below. This will build a container with all the required dependencies to train your models with MegatronLM. - ```bash - docker build -t megatron-training -f ./2.distributed-training.Dockerfile . - ``` -3. Convert the docker container to a squash file in `/apps`. - ```bash - enroot import -o ${APPS_PATH}/megatron-training.sqsh dockerd://megatron-training:latest - ``` -4. You copy the file `3.distributed-training.sbatch` on your cluster then submit a training jobs with the command below: +1. Copy the file `2.distributed-training.sbatch` to your cluster then submit a training jobs with the command below: ```bash sbatch 3.distributed-training.sbatch ``` + 5. The training starts running and should produce an output similar to below if successful. diff --git a/3.test_cases/1.megatron-lm/test_megatron_lm.py b/3.test_cases/1.megatron-lm/test_megatron_lm.py index 4462d7de..5be8c0fd 100644 --- a/3.test_cases/1.megatron-lm/test_megatron_lm.py +++ b/3.test_cases/1.megatron-lm/test_megatron_lm.py @@ -2,11 +2,8 @@ import pytest import os -def test_img_data_processing(docker_build): +def test_img_megatron_training(docker_build, docker_run): print(f"module file {os.path.dirname(__file__)}") print(f"cwd {os.getcwd()}") - docker_build('megatron-preprocess', '0.data-preprocessing.Dockerfile') - -def test_img_megatron_training(docker_build, docker_run): - img = docker_build('megatron-preprocess', '2.distributed-training.Dockerfile') + img = docker_build('megatron-training', '0.distributed-training.Dockerfile') docker_run(img, ['python3', '-c', 'import torch']) From fd344b171a54dae93a20894e38ec70d4fd437f87 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 7 Dec 2023 21:56:58 +0900 Subject: [PATCH 245/648] Add region spec in easy-ssh.sh --- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index 859bffab..421691a3 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -6,6 +6,7 @@ declare -a HELP=( "[-h|--help]" "[-c|--controller-group]" + "[-r|--region]" "CLUSTER_NAME" ) @@ -26,6 +27,10 @@ parse_args() { node_group="$2" shift 2 ;; + -r|--region) + region="$2" + shift 2 + ;; *) [[ "$cluster_name" == "" ]] \ && cluster_name="$key" \ @@ -39,13 +44,13 @@ parse_args() { } parse_args $@ -cluster_id=$(aws sagemaker describe-cluster --cluster-name $cluster_name | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') -instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $cluster_name --region us-west-2 --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') +cluster_id=$(aws sagemaker describe-cluster --region ${region} --cluster-name $cluster_name | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') +instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $cluster_name --region ${region} --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') echo "Cluster id: ${cluster_id}" echo "Instance id: ${instance_id}" echo "Node Group: ${node_group}" -echo "aws ssm start-session --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" +echo "aws ssm start-session --region ${region} --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" -aws ssm start-session --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} +aws ssm start-session --region ${region} --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} From 22c6f9700e808576f3b30d42d83ea4fb91882a6d Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 7 Dec 2023 12:15:17 -0800 Subject: [PATCH 246/648] Update install_docker.sh add `ubuntu` user to docker group. --- .../LifecycleScripts/base-config/utils/install_docker.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh index f162096f..7dc15548 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh @@ -22,3 +22,6 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list sudo apt-get install -y nvidia-container-toolkit + +# add user to docker group +sudo usermod -aG docker ${USER} From 4c81320e128f619b88260ddb71198a211601eb53 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 7 Dec 2023 15:54:46 -0600 Subject: [PATCH 247/648] Update readme --- 3.test_cases/1.megatron-lm/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 6818ee91..2bee3a49 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -24,13 +24,14 @@ This guide assumes that you have the following: It is recommended that you use the templates in the architectures [directory](../../1.architectures) - You will also setup the following variables in your terminal environment. ```bash export DATA_PATH=/fsx # FSx for Lustre shared file-system ``` +Make sure that your current directory is under a shared filesystem such as `/fsx/` or the home directory when using [Parallel Cluster](../../1.architectures/aws-parallelcluster). + ## 1. Data Preprocessing Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data ans tokenize it using the GPT2 vocabulary. @@ -45,15 +46,19 @@ Below are the steps you need to follow: ``` 3. Once the image is built, you can check if it is present with `docker images`. You should see an output similar to this one: + ``` [ec2-user@ip-10-0-10-78 ~]$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE megatron-training latest a33c9d5bcb6e 9 seconds ago 20.7GB ``` + 4. Create the squash file with the command below. + ```bash enroot import -o megatron-training.sqsh dockerd://megatron-training:latest ``` + The file will be stored in the current directory (if left as default). The output should look as below. ```bash @@ -108,7 +113,6 @@ Below are the steps you need to follow: Voilà! You have executed the preprocessing job. You will go through the steps to run your training job. - ## 2. Distributed training Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. @@ -121,7 +125,6 @@ Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. 5. The training starts running and should produce an output similar to below if successful. - ``` 1: iteration 25/73242187 | consumed samples: 50 | elapsed time per iteration (ms): 87.0 | learning rate: 1.638E-08 | global batch size: 2 | lm loss: 1.086954E+01 | loss scale: 4294967296.0 | grad norm: 0.000 | number of skipped iterations: 0 | number of nan iterations: 0 | 1: iteration 26/73242187 | consumed samples: 52 | elapsed time per iteration (ms): 86.5 | learning rate: 1.704E-08 | global batch size: 2 | lm loss: 1.086217E+01 | loss scale: 4294967296.0 | grad norm: 0.000 | number of skipped iterations: 0 | number of nan iterations: 0 | From 7766dfaedab81f08fd5c96705afa89eccd3cba72 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 7 Dec 2023 16:05:26 -0600 Subject: [PATCH 248/648] Make --region optional; support --profile --- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index 421691a3..c5d83279 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -7,11 +7,13 @@ declare -a HELP=( "[-h|--help]" "[-c|--controller-group]" "[-r|--region]" + "[-p|--profile]" "CLUSTER_NAME" ) cluster_name="" node_group="controller-machine" +declare -a aws_cli_args=() parse_args() { local key @@ -28,7 +30,11 @@ parse_args() { shift 2 ;; -r|--region) - region="$2" + aws_cli_args+=(--region "$2") + shift 2 + ;; + -p|--profile) + aws_cli_args+=(--profile "$2") shift 2 ;; *) @@ -44,13 +50,13 @@ parse_args() { } parse_args $@ -cluster_id=$(aws sagemaker describe-cluster --region ${region} --cluster-name $cluster_name | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') -instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $cluster_name --region ${region} --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') +cluster_id=$(aws sagemaker describe-cluster "${aws_cli_args[@]}" --cluster-name $cluster_name | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') +instance_id=$(aws sagemaker list-cluster-nodes "${aws_cli_args[@]}" --cluster-name $cluster_name --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') echo "Cluster id: ${cluster_id}" echo "Instance id: ${instance_id}" echo "Node Group: ${node_group}" -echo "aws ssm start-session --region ${region} --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" +echo "aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" -aws ssm start-session --region ${region} --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} +aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} From 0ebe80a60423b081bc5fba2ed87d9eaf3dc4d8fa Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Fri, 8 Dec 2023 16:34:17 -0800 Subject: [PATCH 249/648] Update README.md --- 3.test_cases/1.megatron-lm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 2bee3a49..32628eb2 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -120,7 +120,7 @@ Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. 1. Copy the file `2.distributed-training.sbatch` to your cluster then submit a training jobs with the command below: ```bash - sbatch 3.distributed-training.sbatch + sbatch 2.distributed-training.sbatch ``` 5. The training starts running and should produce an output similar to below if successful. From 4ca9e4d7b581783b061e8b1e3e92913fe742f829 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 11 Dec 2023 16:18:29 -0800 Subject: [PATCH 250/648] Add SSM Proxy command Signed-off-by: Sean Smith --- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index c5d83279..136e85e3 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -57,6 +57,17 @@ echo "Cluster id: ${cluster_id}" echo "Instance id: ${instance_id}" echo "Node Group: ${node_group}" -echo "aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" +echo -e "aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" + +echo -e "Add the following to your ~/.ssh/config to easily connect:" +echo " +Host ${cluster_name} + User ubuntu + ProxyCommand sh -c \"aws ssm start-session ${aws_cli_args[@]} --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} --document-name AWS-StartSSHSession --parameters 'portNumber=%p'\" + +Add your ssh keypair and then you can do: + +$ ssh ${cluster_name} +" aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} From 70b2833457ec7b8823fadb57c126f08bf9fcdca6 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 13 Dec 2023 12:26:12 -0800 Subject: [PATCH 251/648] Update easy-ssh.sh --- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index 136e85e3..d190ff24 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -61,9 +61,11 @@ echo -e "aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:$ echo -e "Add the following to your ~/.ssh/config to easily connect:" echo " +cat <> ~/.ssh/config Host ${cluster_name} User ubuntu ProxyCommand sh -c \"aws ssm start-session ${aws_cli_args[@]} --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} --document-name AWS-StartSSHSession --parameters 'portNumber=%p'\" +EOF Add your ssh keypair and then you can do: From 6a7623405aa8ca1c9ff4047d376c01db1d327278 Mon Sep 17 00:00:00 2001 From: Suhit Kodgule Date: Thu, 14 Dec 2023 09:40:39 -0800 Subject: [PATCH 252/648] Fix arg parsing for tokenizer and max_context_width --- 3.test_cases/10.FSDP/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/10.FSDP/train.py b/3.test_cases/10.FSDP/train.py index ce061ca5..9aec5687 100644 --- a/3.test_cases/10.FSDP/train.py +++ b/3.test_cases/10.FSDP/train.py @@ -45,7 +45,7 @@ def create_streaming_dataloaders(dataset, val_batch_size=1, max_context_width=4096, workers=4): - tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + tokenizer = AutoTokenizer.from_pretrained(tokenizer) data = load_dataset(dataset, 'en', streaming=True).shuffle(42+global_rank) train_concat_dataset = ConcatTokensDataset(data['train'], tokenizer, max_context_width, True) val_concat_dataset = ConcatTokensDataset(data['validation'], tokenizer, max_context_width, True) @@ -252,7 +252,7 @@ def main(args): global_rank=global_rank, train_batch_size=args.train_batch_size, val_batch_size=args.val_batch_size, - max_context_width=4096, + max_context_width=args.max_context_width, workers=4) train(model, From 567d858cea75c17858c823ca1a91793508d03a56 Mon Sep 17 00:00:00 2001 From: Suhit Kodgule Date: Thu, 14 Dec 2023 09:40:59 -0800 Subject: [PATCH 253/648] Use llama tokenizer for llama model to fix nan loss --- 3.test_cases/10.FSDP/1.distributed-training.sbatch | 1 + 1 file changed, 1 insertion(+) diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch index 2d5fc8ca..5ed8ece9 100755 --- a/3.test_cases/10.FSDP/1.distributed-training.sbatch +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -51,6 +51,7 @@ declare -a TRAINING_ARGS=( --num_layers=32 \ # 7b: 32 13b: 40 70b: 80 --num_heads=32 \ # 7b: 32 13b: 40 70b: 64 --model_type=llama_v2 \ + --tokenizer="hf-internal-testing/llama-tokenizer" \ --checkpoint_freq=50 \ --validation_freq=500 \ --checkpoint_dir=./checkpoints \ From 1b5da78142782f4a6541428ed5daa3a415097f1e Mon Sep 17 00:00:00 2001 From: johnbensnyder Date: Thu, 14 Dec 2023 10:39:24 -0800 Subject: [PATCH 254/648] Removed docker from default config --- .../LifecycleScripts/base-config/lifecycle_script.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py index e20df155..e26f6a5f 100644 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py @@ -152,8 +152,8 @@ def main(args): ExecuteBashScript("./start_slurm.sh").run(node_type, ",".join(controllers)) # Note: Uncomment the below lines to install docker and enroot - ExecuteBashScript("./utils/install_docker.sh").run() - ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) + # ExecuteBashScript("./utils/install_docker.sh").run() + # ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) print("[INFO]: Success: All provisioning scripts completed") From a75f0178bb728b306c379015fd95ec0d3bb767e3 Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Thu, 14 Dec 2023 10:41:25 -0800 Subject: [PATCH 255/648] Revert "Update lifecycle_script.py install pyxis and enroot by default" This reverts commit 2690993184de18c2b2b563289db80022b22fbaa4. --- .../LifecycleScripts/base-config/lifecycle_script.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py index e20df155..e26f6a5f 100644 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py @@ -152,8 +152,8 @@ def main(args): ExecuteBashScript("./start_slurm.sh").run(node_type, ",".join(controllers)) # Note: Uncomment the below lines to install docker and enroot - ExecuteBashScript("./utils/install_docker.sh").run() - ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) + # ExecuteBashScript("./utils/install_docker.sh").run() + # ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) print("[INFO]: Success: All provisioning scripts completed") From 772ff13426a9de575721e94651e25bfe20041e61 Mon Sep 17 00:00:00 2001 From: Andrew Tian Date: Wed, 13 Dec 2023 15:03:01 -0800 Subject: [PATCH 256/648] starting scripts for smmodelparallel --- 3.test_cases/11.modelparallel/README.md | 70 ++ 3.test_cases/11.modelparallel/arguments.py | 333 +++++++++ 3.test_cases/11.modelparallel/conda_launch.sh | 34 + 3.test_cases/11.modelparallel/data/README.md | 29 + .../11.modelparallel/data/__init__.py | 0 .../11.modelparallel/data/dataset/__init__.py | 0 .../data/dataset/dummy_dataset.py | 21 + .../data/dataset/gpt_dataset.py | 76 ++ .../data/pipelines/__init__.py | 44 ++ .../data/pipelines/data_pipeline.py | 105 +++ .../data/pipelines/dummy_data_pipeline.py | 34 + .../data/pipelines/gpt_data_pipeline.py | 160 ++++ .../data/pipelines/hf_data_pipeline.py | 51 ++ .../nemo_megatron_gpt_data_pipeline.py | 144 ++++ .../prep/_prepare_nemo_megatron_dataset.py | 392 ++++++++++ .../data/prep/prep_hf_dataset.slurm | 25 + .../data/prep/prep_nmt_dataset.slurm | 13 + .../data/prep/prepare_hf_dataset.py | 186 +++++ .../prep/prepare_nemo_megatron_dataset.py | 39 + 3.test_cases/11.modelparallel/data/utils.py | 10 + 3.test_cases/11.modelparallel/data_utils.py | 10 + 3.test_cases/11.modelparallel/fsdp_utils.py | 60 ++ .../11.modelparallel/scripts/model.sh | 142 ++++ 3.test_cases/11.modelparallel/train.py | 21 + .../11.modelparallel/train_external.py | 14 + 3.test_cases/11.modelparallel/train_lib.py | 685 ++++++++++++++++++ 3.test_cases/11.modelparallel/train_utils.py | 281 +++++++ 27 files changed, 2979 insertions(+) create mode 100644 3.test_cases/11.modelparallel/README.md create mode 100644 3.test_cases/11.modelparallel/arguments.py create mode 100644 3.test_cases/11.modelparallel/conda_launch.sh create mode 100644 3.test_cases/11.modelparallel/data/README.md create mode 100644 3.test_cases/11.modelparallel/data/__init__.py create mode 100644 3.test_cases/11.modelparallel/data/dataset/__init__.py create mode 100644 3.test_cases/11.modelparallel/data/dataset/dummy_dataset.py create mode 100644 3.test_cases/11.modelparallel/data/dataset/gpt_dataset.py create mode 100644 3.test_cases/11.modelparallel/data/pipelines/__init__.py create mode 100644 3.test_cases/11.modelparallel/data/pipelines/data_pipeline.py create mode 100644 3.test_cases/11.modelparallel/data/pipelines/dummy_data_pipeline.py create mode 100644 3.test_cases/11.modelparallel/data/pipelines/gpt_data_pipeline.py create mode 100644 3.test_cases/11.modelparallel/data/pipelines/hf_data_pipeline.py create mode 100644 3.test_cases/11.modelparallel/data/pipelines/nemo_megatron_gpt_data_pipeline.py create mode 100644 3.test_cases/11.modelparallel/data/prep/_prepare_nemo_megatron_dataset.py create mode 100644 3.test_cases/11.modelparallel/data/prep/prep_hf_dataset.slurm create mode 100644 3.test_cases/11.modelparallel/data/prep/prep_nmt_dataset.slurm create mode 100644 3.test_cases/11.modelparallel/data/prep/prepare_hf_dataset.py create mode 100644 3.test_cases/11.modelparallel/data/prep/prepare_nemo_megatron_dataset.py create mode 100644 3.test_cases/11.modelparallel/data/utils.py create mode 100644 3.test_cases/11.modelparallel/data_utils.py create mode 100644 3.test_cases/11.modelparallel/fsdp_utils.py create mode 100755 3.test_cases/11.modelparallel/scripts/model.sh create mode 100644 3.test_cases/11.modelparallel/train.py create mode 100644 3.test_cases/11.modelparallel/train_external.py create mode 100644 3.test_cases/11.modelparallel/train_lib.py create mode 100644 3.test_cases/11.modelparallel/train_utils.py diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md new file mode 100644 index 00000000..e293db8a --- /dev/null +++ b/3.test_cases/11.modelparallel/README.md @@ -0,0 +1,70 @@ +## SMP v2 Examples +In this directory we have example scripts for training with SMP Pytorch. We assume you have already setup a conda environment with SMP Pytorch. Below we first describe the files in this directory, and then go over how to run some jobs. + +### Files +- `train_lib.py` : Main training script +- `train.py` : Entrypoint to launch `train_lib.py` +- `scripts/model.sh` : Main script which passes the config and launches `train.py`. This is used by `conda_launch.sh` and scripts in convergence_jobs folder. If you want to define your own model configuration you might want to modify this. +- `arguments.py` : Parses arguments for the job. Please refer to this file for all the options the script supports. +- `checkpoints.py` : Handles saving and loading of checkpoints +- `data_pipeline.py`: Creates dataloaders for the job. Modify this file to load your own dataset. +- `delayed_param.py` : Delayed parameter initialization to init large models without OOM +- `learning_rates.py`, `train_utils.py`, `fsdp_utils.py`, `utils.py`, `memory_tracker.py` have utilities used by the main script. + +#### Launch scripts +- `conda_launch.sh` : This is a slurm script which launches a job using the activated conda environment. It expects to be run on the master node of the Slurm cluster. See below section for instructions. By default it runs with synthetic data to make it easy to test the scripts. +- `convergence_jobs/neox_7b/neox_7b_4Mtokens.sh` : This is an example for launching a convergence job with slurm, an extension of `conda_launch.sh` + +## Note on paths +These scripts need to be put on a directory that can be accessed on all nodes, such as FSX. +We also recommend setting all paths (for input data and checkpoints) as shared directories using FSX. +These paths can be set in scripts as shown in `convergence_jobs/neox_7b/neox_7b_4Mtokens.sh`. + +## User Guide + +1. Launching a job with synthetic data on 16 nodes. The default config in the script launches a 7B GPT NeoX model with synthetic data. +``` +conda activate /PATH/TO/CONDA/ENV +sbatch -N 16 conda_launch.sh + +# or + +sbatch -N 16 conda_launch.sh /PATH/TO/CONDA/ENV +``` + +2. Changing arguments taken by the script. +`model.sh` takes certain arguments from the launch script, and uses them to pass args to the training script. You can refer to `model.sh` if those are the arguments you would like to change. For example, it takes the model size and sets the appropriate hidden_width,num_layers etc for the training script. + +If model.sh doesn't take the argument but is taken by train_lib (arguments.py), you can still pass it to model.sh and the script will forward the arg. This is how the above script passes `--use_synthetic_data 1`. + +3. To run with your own data +With the current dataloader in the script data needs to be prepared as json or json.gz (needs the arg `--zipped_data 1`) files, where each file has a json line with input_ids and attention_mask in them. Please refer to data_pipeline.py for more. You can always replace with your own dataloader. +``` +# 2a. modify the conda_launch.sh script with path to data +# 2b. start training +sbatch -N 16 conda_launch.sh /PATH/TO/CONDA/ENV +``` + +4. Running a convergence job or experiment +We have put together an example of a convergence script using the above referenced `scripts/model.sh` script. The script sets the model type, size, checkpointing directory, tensorboard directory for metrics, and other hyperparameters. This is a slurm script, used with sbatch similar to above. + +``` +sbatch -N 64 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +``` +or +``` +sbatch -N 64 --job-name neox_7b_4M_trial1 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +``` + +5. Resuming convergence job from a checkpoint +Modify the --resume_from_checkpoint arg with the path of the checkpoint. Then the job is started same as before. +``` +sbatch -N 64 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +``` + +6. Running a finetuning job or experiment +In order to run a finetune experiment `--finetune 1` needs to be set. Either pretrained model name `--pretrained_model_name ` arg or a checkpoint file name `--pretrained_checkpoint_file` arg needs to be provided. + +If `--pretrained_model_name ` is provided pretrained model config will be used for finetuning. If `--pretrained_model_name` is provided `--finetune_checkpoint_load_dir` also needs to be provided. + +If `--finetune 1` is set together with `--resume_from_checkpoint`, training will resume from the provided checkpoint. diff --git a/3.test_cases/11.modelparallel/arguments.py b/3.test_cases/11.modelparallel/arguments.py new file mode 100644 index 00000000..5de56f80 --- /dev/null +++ b/3.test_cases/11.modelparallel/arguments.py @@ -0,0 +1,333 @@ +"""FSDP binary script arguments.""" + +import argparse +import os + + +def parse_args(): # pylint: disable=too-many-statements + """Parse args.""" + parser = argparse.ArgumentParser() + + # hyperparameters sent by the client are passed as command-line arguments to the script. + + ### OPTIMIZATION + opt_grp = parser.add_argument_group( + title="optimization", description="arguments for optimization" + ) + opt_grp.add_argument( + "--train_batch_size", + type=int, + default=2, + help="batch size per dp rank, for tensor parallelism degree 8 with pipeline parallel degree 1 this means 8*this batch size per node", # pylint: disable=line-too-long + ) + opt_grp.add_argument("--max_steps", "--max_training_steps", type=int, default=5000) + opt_grp.add_argument( + "--epochs", type=int, default=3, help="times of iterating over the training dataset" + ) + opt_grp.add_argument("--seed", type=int, default=12345) + opt_grp.add_argument("--same_seed", type=int, default=0) + opt_grp.add_argument("--bf16", default=1, type=int, help="automatic mixed precision training") + opt_grp.add_argument("--grad_clip", default=1.0, type=float, help="gradient clipping") + opt_grp.add_argument("--weight_decay", default=0.2, type=float, help="weight decay") + opt_grp.add_argument( + "--beta1", default=0.9, type=float, help="beta1 parameter for Adam optimizer" + ) + opt_grp.add_argument( + "--beta2", default=0.95, type=float, help="beta2 parameter for Adam optimizer" + ) + + # Learning rate + lr_grp = parser.add_argument_group( + title="lr", description="arguments for learning rate schedule" + ) + lr_grp.add_argument("--lr", type=float, default=0.0001, help="Initial learning rate.") + lr_grp.add_argument( + "--lr_decay_style", + type=str, + default="cosine", + choices=["constant", "linear", "cosine", "exponential", "plateau"], + help="Learning rate decay function.", + ) + lr_grp.add_argument( + "--lr_decay_iters", + type=int, + default=47683, + help="number of iterations to decay learning rate over," " If None defaults to train iters", + ) + lr_grp.add_argument( + "--min_lr", + type=float, + default=1e-05, + help="Minumum value for learning rate. The scheduler" "clip values below this threshold.", + ) + lr_grp.add_argument( + "--warmup", + type=float, + default=0.0032, + help="Percentage of total iterations to warmup on " + "(.01 = 1 percent of all training iters).", + ) + lr_grp.add_argument( + "--plateau", + type=float, + default=0.0, + help="Percentage of total iterations to keep at max if using plateau lr", + ) + + ### MEMORY USAGE RELATED + mem_grp = parser.add_argument_group(title="memory usage", description="arguments for memory") + mem_grp.add_argument( + "--activation_checkpointing", + type=int, + default=1, + help="enable gradient checkpointing to reduce memory consumption", + ) + mem_grp.add_argument("--offload_activations", type=int, default=0) + mem_grp.add_argument("--activation_loading_horizon", type=int, default=2) + mem_grp.add_argument("--patch_neox_rope", type=int, default=1) + mem_grp.add_argument("--delayed_param", type=int, default=1) + mem_grp.add_argument( + "--enable_memory_profiling", type=int, default=0, help="Enable memory profile" + ) + mem_grp.add_argument( + "--clean_cache", + type=int, + default=0, + help="Clean torch reserved memory at he end of every step", + ) + + ### LOGGING + logging_grp = parser.add_argument_group( + title="logging", description="arguments for logging metrics" + ) + logging_grp.add_argument( + "--logging_freq", type=int, default=1, help="number of iterations between logging" + ) + logging_grp.add_argument( + "--logging_freq_for_avg", + type=int, + default=50, + help="number of iterations between logging the running avg", + ) + logging_grp.add_argument( + "--log_reduced_training_loss", + type=int, + default=0, + help="to log training loss after reducing across all data parallel ranks with logging_freq frequency", # pylint: disable=line-too-long + ) + logging_grp.add_argument("--tensorboard_dir", type=str, nargs="+", default=None) + + ### CHECKPOINTS + ckpt_grp = parser.add_argument_group(title="checkpoints", description="checkpointing arguments") + ckpt_grp.add_argument( + "--num_kept_checkpoints", + nargs="+", + type=int, + default=[2], + help="how many checkpoints to keep before deleting", + ) + ckpt_grp.add_argument( + "--checkpoint_freq", + nargs="+", + type=int, + default=[1000], + help="number of iterations between checkpointing", + ) + ckpt_grp.add_argument( + "--checkpoint_dir", + nargs="+", + type=str, + default=["/opt/ml/checkpoints"], + help="Saves partial checkpoints (model, optimizer) to this dir, and loads latest checkpoint from this if load_partial is specified.", # pylint: disable=line-too-long + ) + ckpt_grp.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="Checkpoint folder name to load from", + ) + ckpt_grp.add_argument( + "--checkpoint_type", type=str, default="sharded", choices=["local", "sharded", "use_pg_with_util"] + ) + ckpt_grp.add_argument( + "--model_dir", + type=str, + default=None, + help="If not passed, saves it to checkpoint_dir/model. Only saved when save_final_model is 1", + ) + ckpt_grp.add_argument("--save_final_model", type=int, default=0) + + ### I/O + input_grp = parser.add_argument_group(title="inputs", description="location for data") + + input_grp.add_argument( + "--dataset_type", type=str, default="gpt_jsonl", choices=["gpt_jsonl", "hf"] + ) + input_grp.add_argument("--data_num_workers", type=int, default=0) + + input_grp.add_argument("--data_type", type=str.lower, default="gpt", choices=["gpt", "bert"]) + # dummy dataset + input_grp.add_argument("--use_synthetic_data", type=int, default=0) + + # gpt dataset + input_grp.add_argument("--zipped_data", type=int, default=1, help="input data is zipped files") + input_grp.add_argument("--training_dir", type=str, default=os.getenv("SM_CHANNEL_TRAIN")) + input_grp.add_argument("--test_dir", type=str, default=os.getenv("SM_CHANNEL_TEST")) + + ### MODEL + model_grp = parser.add_argument_group( + title="model", description="arguments to describe model configuration" + ) + model_grp.add_argument( + "--hf_pretrained_model_name_or_dir", + type=str, + default=None, + help=( + "For finetuning, pass the pretrained Huggingface model name or path where the model is downloaded. " + "Example: EleutherAI/gpt-neox-20b. or /path/to/downloaded/model. " + "This flag is used for loading both config and weights. " + "When this config is used, flags such as vocab_size, hidden_width etc are ignored in creating the model. " + "For finetuning you need to set this flag even when resuming from a checkpoint. " + ), + ) + model_grp.add_argument("--max_context_width", type=int, default=2048) + model_grp.add_argument("--vocab_size", type=int, default=50432) + model_grp.add_argument("--hidden_width", type=int, default=768) + model_grp.add_argument("--num_layers", type=int, default=12) + model_grp.add_argument("--num_heads", type=int, default=12) + model_grp.add_argument("--resid_pdrop", type=float, default=0.1) + model_grp.add_argument("--embd_pdrop", type=float, default=0.1) + model_grp.add_argument("--attn_pdrop", type=float, default=0.1) + model_grp.add_argument("--summary_first_pdrop", type=float, default=0.1) + model_grp.add_argument("--initializer_range", type=float, default=0.02) + model_grp.add_argument( + "--model_type", type=str, default="gpt_neox", choices=["gpt_neox", "llama_v2", "gpt2"] + ) + model_grp.add_argument("--rotary_pct", type=float, default=0.25) + model_grp.add_argument("--rotary_emb_base", type=int, default=10000) + model_grp.add_argument("--use_smp_flash_attn", type=int, default=1) + model_grp.add_argument( + "--llama_intermediate_size", + type=int, + default=11008, + help="intermediate_size for Llama v2, a dimension associated with MLP", + ) + model_grp.add_argument( + "--num_key_value_heads", + type=int, + default=None, + help="num_key_value_heads for Llama v2", + ) + model_grp.add_argument( + "--use_smp_implementation", + type=int, + default=0, + help="Whether to use SMP optimized implementation of model. " + "All models may not be supported." + "When using tensor_parallel_degree, this is automatically enabled.", + ) + model_grp.add_argument( + "--tensor_parallel_degree", + type=int, + default=1, + help="Whether to enable tensor parallelism. If degree > 1, then " + "--use_smp_implementation is assumed to be 1.", + ) + + ### FSDP args + fsdp_grp = parser.add_argument_group( + title="fsdp", description="arguments for fully sharded data parallel" + ) + fsdp_grp.add_argument("--limit_all_gathers", default=1, type=int) + fsdp_grp.add_argument("--forward_prefetch", default=1, type=int) + fsdp_grp.add_argument( + "--sharding_strategy", + type=str, + default="hybrid_shard", + help="options: no_shard, shard_grad_op, hybrid_shard, _hybrid_shard_zero2, full_shard", + ) + fsdp_grp.add_argument( + "--use_orig_params", + default=0, + type=int, + help="This flag needs to be set when you need multiple param groups for optimizer, such as for weight decay", + ) + # Note that `shard_degree` might rewrite `sharding_strategy`: + # + # 1. When there is no explicit `shard_degree` or `0`, will fall back to native PyTorch, for all + # `sharding_strategy` cases. + # + # 2. When there is explicit `shard_degree` and it's in `[1, world_size]`: + # - Will rewrite `sharding_strategy` to `HYBRID_SHARD`, when and only when it's not either of + # the two native hybrid strategies, i.e. `{HYBRID_SHARD, _HYBRID_SHARD_ZERO2}`. + # + # - Will use hybrid sharding implementation by SageMaker: + # - 1: Should be equivalent to native PyTorch's `NO_SHARD`. + # - Might have some issues when exporting checkpoints to the disk in native PyTorch. + # - 8: Should be equivalent to native PyTorch's `HYBRID_SHARD`. + # - $world_size: Should be equivalent to native PyTorch's `FULL_SHARD`, though throughput + # might be worse with unnecessary communications. + # - Other values e.g. 2, 4, 16, etc, as long as $world_size is divisible by them: + # - Newly supported sharding implementation by SageMaker. + fsdp_grp.add_argument( + "--shard_degree", + type=int, + default=None, + nargs="?", + help="Sharding degree for partial shard strategy", + ) + fsdp_grp.add_argument( + "--backward_fetch_policy", + type=str, + default="backward_pre", + help="options: backward_post, backward_pre", + ) + fsdp_grp.add_argument( + "--auto_wrap_policy", + type=str, + default="transformer_auto_wrap_policy", + help="options: size_based_auto_wrap_policy, transformer_auto_wrap_policy", + ) + + ### VALIDATION + validation_grp = parser.add_argument_group( + title="validation", description="arguments for validation" + ) + validation_grp.add_argument( + "--validation_freq", + type=int, + default=None, + help="number of iterations to print validation loss", + ) + validation_grp.add_argument( + "--validation_batches", + type=int, + default=10, + help="number of batches to estimate validation loss", + ) + validation_grp.add_argument( + "--preserve_np_state", + type=int, + default=0, + help="Perserve the numpy random state between validation", + ) + validation_grp.add_argument( + "--fast_validation", + type=int, + default=1, + help="Running validation only with the last data file for faster speed", + ) + validation_grp.add_argument("--val_batch_size", type=int, default=4) + + ### OTHERS + parser.add_argument( + "--distributed_backend", + type=str, + default="smddp", + choices=["smddp", "nccl"], + help="Distributed backend to use for collectives", + ) + parser.add_argument("--profile_nsys", type=int, default=0) + parser.add_argument("--framework", type=str, default="fsdp") + + return parser.parse_known_args() diff --git a/3.test_cases/11.modelparallel/conda_launch.sh b/3.test_cases/11.modelparallel/conda_launch.sh new file mode 100644 index 00000000..000e75f9 --- /dev/null +++ b/3.test_cases/11.modelparallel/conda_launch.sh @@ -0,0 +1,34 @@ +#!/bin/bash +#SBATCH --output=logs/%x_%j.out # Redirects outputs to file in current_dir/logs +#SBATCH --error=logs/%x_%j.out # Redirects err to same file in current_dir/logs +#SBATCH --job-name=fsdp_smp + +# has to be shared dir +CONDA_ENV_PATH=${1:-"$CONDA_DEFAULT_ENV"} +SHELL_SCRIPT=${2:-"scripts/model.sh"} +shift 2 +SCRIPT_ARGS=$@ + +if [ -z $CONDA_ENV_PATH ]; then + echo "Conda env path needs to be passed. Exiting" + exit 1 +fi +if [ -z "$SCRIPT_ARGS" ]; then + SCRIPT_ARGS="" +else + SCRIPT_ARGS+=" " +fi + +SCRIPT_ARGS+="--use_synthetic_data 1 " + +# Replace with real data like below +# SCRIPT_ARGS+="--training_dir /fsx/datasets/train_ids_wsvocab_redo_2048_smaller " +# SCRIPT_ARGS+="--test_dir /fsx/datasets/val_ids_wsvocab_2048 " + +SCRIPT_ARGS+="--model_type gpt_neox --model_size 7b " +# SCRIPT_ARGS+="--max_steps 10 " +# SCRIPT_ARGS+="--train_batch_size 1 " + +HOSTFILE=hosts_${SLURM_JOB_ID} +scontrol show hostnames | sort > $HOSTFILE +srun -l -D `pwd` conda run -p $CONDA_ENV_PATH --no-capture-output $SHELL_SCRIPT --hostfile $HOSTFILE $SCRIPT_ARGS diff --git a/3.test_cases/11.modelparallel/data/README.md b/3.test_cases/11.modelparallel/data/README.md new file mode 100644 index 00000000..27fd83e7 --- /dev/null +++ b/3.test_cases/11.modelparallel/data/README.md @@ -0,0 +1,29 @@ +## Installation +### When using HF dataset + +``` +pip install datasets +``` +### When using nemo megatron dataset + +``` +conda install torchvision torchaudio --override-channels -c pytorch -c conda-forge +pip install Cython +pip install nemo_toolkit['all'] +``` + +## Preparation of datasets +``` +sbatch prep/prep_hf_dataset.slurm +``` +or +``` +sbatch prep/prep_nmt_dataset.slurm +``` + +## Using prepared datasets +1. Using HF dataset: +You will need to pass at least `--dataset_type hf` and `--training_dir` and `--test_dir` args. + +2. Using NMT dataset: +Currently there's a limitation in NMT to only use upto 255 files. That said, refer to the args for `# megatron dataset` in arguments.py. diff --git a/3.test_cases/11.modelparallel/data/__init__.py b/3.test_cases/11.modelparallel/data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/3.test_cases/11.modelparallel/data/dataset/__init__.py b/3.test_cases/11.modelparallel/data/dataset/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/3.test_cases/11.modelparallel/data/dataset/dummy_dataset.py b/3.test_cases/11.modelparallel/data/dataset/dummy_dataset.py new file mode 100644 index 00000000..92f1fc6d --- /dev/null +++ b/3.test_cases/11.modelparallel/data/dataset/dummy_dataset.py @@ -0,0 +1,21 @@ +import torch + + +class DummyDataset(torch.utils.data.dataset.Dataset): + """Dummy Dataset.""" + + def __init__(self, vocabulary_size=1024, seqlen=2048, length=100000, data_type="gpt"): + self.vocabulary_size = vocabulary_size + self.seqlen = seqlen + if data_type == "gpt": + self.mask = torch.ones((seqlen,)) + elif data_type == "bert": + raise NotImplementedError + self.length = length + self.input_paths = None + + def __getitem__(self, index): + return torch.randint(self.vocabulary_size, (self.seqlen,), dtype=torch.long), self.mask + + def __len__(self): + return self.length diff --git a/3.test_cases/11.modelparallel/data/dataset/gpt_dataset.py b/3.test_cases/11.modelparallel/data/dataset/gpt_dataset.py new file mode 100644 index 00000000..69c1d96d --- /dev/null +++ b/3.test_cases/11.modelparallel/data/dataset/gpt_dataset.py @@ -0,0 +1,76 @@ +"""Data pipeline.""" +import gzip +import json +from io import BytesIO +from typing import List, Tuple, TypeVar + +import numpy as np +import torch +import torch.distributed as dist +from logging_utils import get_logger + +logger = get_logger() +T_co = TypeVar("T_co", covariant=True) + + +def chunks(l, n): + """Yield n number of striped chunks from l.""" + for i in range(0, n): + yield l[i::n] + + +###### Load GPT pretraining data ###### +class GPTPretrainingDataset(torch.utils.data.Dataset): + """GPT Pretraining Dataset.""" + + def __init__( + self, + input_paths: List[str], + max_sequence_length=None, + zipped=True, + ): + self.input_paths = input_paths + self.max_sequence_length = max_sequence_length + self.zipped = zipped + self.drop_last = True + self.input_data = [] + self.num_replicas = dist.get_world_size() if dist.is_initialized() else 1 + self.rank = dist.get_rank() if dist.is_initialized() else 0 + self.__read_examples(self.input_paths) + + def __read_examples(self, paths: List[str]): + for path in paths: + self.input_data = [] + # 1 below: each item of an S3Dataset object is a pair + # The 0th element is a string for S3 object address + # The 1st element is binary data + if isinstance(path, tuple): + filepath = path[0] + fileobj = BytesIO(path[1]) + else: + fileobj = path + + if self.zipped: + with gzip.open(fileobj, "rt") as f: + self.input_data = [ln for _, ln in enumerate(f, 1)] + else: + with open(fileobj, "r") as f: + self.input_data = [ln for ln in f] + if dist.get_rank() == 0: + logger.debug(f"Read {len(self.input_data)} sequences from file") + + def __len__(self) -> int: + return len(self.input_data) + + def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: + obj = json.loads(self.input_data[index]) + iids = torch.tensor(obj["input_ids"], dtype=torch.long) + attns = torch.tensor(obj["attention_mask"], dtype=torch.long) + self.actual_sequence_length = len(obj["input_ids"]) + + if self.actual_sequence_length > self.max_sequence_length: + s_idx = np.random.randint(0, self.actual_sequence_length - self.max_sequence_length) + e_idx = s_idx + self.max_sequence_length + iids = iids[s_idx:e_idx] + attns = attns[s_idx:e_idx] + return iids, attns diff --git a/3.test_cases/11.modelparallel/data/pipelines/__init__.py b/3.test_cases/11.modelparallel/data/pipelines/__init__.py new file mode 100644 index 00000000..63ad41ba --- /dev/null +++ b/3.test_cases/11.modelparallel/data/pipelines/__init__.py @@ -0,0 +1,44 @@ +from data.pipelines.data_pipeline import DataPipeline +from data.pipelines.dummy_data_pipeline import DummyDataPipeline +from data.pipelines.gpt_data_pipeline import GPTDataPipeline +from data.pipelines.hf_data_pipeline import HFDataPipeline + + +def create_data_pipeline( + args, start_train_path_index, resume_from_sequence_number, dp_rank, dp_size +): + if args.use_synthetic_data: + data_pipeline = DummyDataPipeline( + vocabulary_size=args.vocab_size, + train_batch_size=args.train_batch_size, + sequence_length=args.max_context_width, + ) + elif args.dataset_type == "gpt_jsonl": + data_pipeline = GPTDataPipeline( + dataset_train_path=args.training_dir, + train_batch_size=args.train_batch_size, + dataset_val_path=args.test_dir if args.validation_freq else None, + val_batch_size=args.val_batch_size if args.validation_freq else None, + start_path_index=start_train_path_index, + use_last_file_only_for_valid=args.fast_validation > 0, + sequence_length=args.max_context_width, + zipped_data=args.zipped_data, + seed=args.seed, + num_workers=args.data_num_workers, + resume_from_sequence_number=resume_from_sequence_number, + dp_rank=dp_rank, + dp_size=dp_size, + ) + elif args.dataset_type == "hf": + data_pipeline = HFDataPipeline( + dataset_train_path=args.training_dir, + train_batch_size=args.train_batch_size, + dataset_val_path=args.test_dir if args.validation_freq else None, + val_batch_size=args.val_batch_size if args.validation_freq else None, + seed=args.seed, + num_workers=args.data_num_workers, + resume_from_sequence_number=resume_from_sequence_number, + dp_rank=dp_rank, + dp_size=dp_size, + ) + return data_pipeline diff --git a/3.test_cases/11.modelparallel/data/pipelines/data_pipeline.py b/3.test_cases/11.modelparallel/data/pipelines/data_pipeline.py new file mode 100644 index 00000000..4f4feff5 --- /dev/null +++ b/3.test_cases/11.modelparallel/data/pipelines/data_pipeline.py @@ -0,0 +1,105 @@ +from abc import abstractmethod + +import torch +import torch.distributed as dist +from torch.utils.data import DataLoader + + +# Adapted from accelerate's SkipDataLoader to skip certain number of sequences instead of batches +# https://github.com/huggingface/accelerate/blob/80da9cfb09bb3cc9f1b385cb55d6b90d025a5fd9/src/accelerate/data_loader.py#L858C1-L878C28 +class SkipDataLoader(DataLoader): + """ + Subclass of a PyTorch `DataLoader` that will skip the first batches. + + Args: + dataset (`torch.utils.data.dataset.Dataset`): + The dataset to use to build this datalaoder. + skip_batches (`int`, *optional*, defaults to 0): + The number of batches to skip at the beginning. + kwargs: + All other keyword arguments to pass to the regular `DataLoader` initialization. + """ + + def __init__(self, *args, resume_from_sequence_number=0, **kwargs): + super().__init__(*args, **kwargs) + self.resume_from_sequence_number = resume_from_sequence_number + + def __iter__(self): + cur_seq_index = 0 + for batch in super().__iter__(): + num_seq = int(self.batch_size) + if cur_seq_index + num_seq > self.resume_from_sequence_number: + yield batch + else: + if dist.get_rank() == 0: + print( + f"Dataloader skipping {num_seq} sequences in this batch as starting from {self.resume_from_sequence_number} sequences" + ) + cur_seq_index += num_seq + + +class DataPipeline: + def __init__( + self, + train_batch_size, + val_batch_size=None, + seed=1234, + num_workers=0, + resume_from_sequence_number=0, + dp_rank=0, + dp_size=1, + shuffle=False, + collate_fn=None, + ): + self.seed = seed + self.num_workers = num_workers + self.resume_from_sequence_number = resume_from_sequence_number + self.dp_rank = dp_rank + self.dp_size = dp_size + self.shuffle = shuffle + self.collate_fn = collate_fn + + self.train_batch_size = train_batch_size + self.val_batch_size = val_batch_size + + self.train_dataset = None + self.val_dataset = None + self.train_dataloader = None + self.val_dataloader = None + + def _create_dataloader(self, dataset, batch_size): + # TODO: set sampler.epoch to correctly shuffle across epochs, else same order will be used for + # all epochs not relevant now as we have no epochs + sampler = torch.utils.data.DistributedSampler( + dataset, + shuffle=self.shuffle, + seed=self.seed, + rank=self.dp_rank, + num_replicas=self.dp_size, + drop_last=True, + ) + + kwargs = { + "sampler": sampler, + "batch_size": batch_size, + "num_workers": self.num_workers, + "collate_fn": self.collate_fn, + "pin_memory": True, + "drop_last": True, + } + + if self.resume_from_sequence_number > 0: + dataloader = SkipDataLoader( + dataset, resume_from_sequence_number=self.resume_from_sequence_number, **kwargs + ) + else: + dataloader = torch.utils.data.DataLoader(dataset, **kwargs) + return dataloader + + @abstractmethod + def get_batch(self, data): + pass + + @abstractmethod + def get_val_batch(self, data): + pass diff --git a/3.test_cases/11.modelparallel/data/pipelines/dummy_data_pipeline.py b/3.test_cases/11.modelparallel/data/pipelines/dummy_data_pipeline.py new file mode 100644 index 00000000..1773e378 --- /dev/null +++ b/3.test_cases/11.modelparallel/data/pipelines/dummy_data_pipeline.py @@ -0,0 +1,34 @@ +from data.dataset.dummy_dataset import DummyDataset +from data.pipelines import DataPipeline + + +class DummyDataPipeline(DataPipeline): + def __init__( + self, + vocabulary_size, + train_batch_size, + sequence_length, + val_batch_size=None, + data_type="gpt", + ): + super().__init__( + train_batch_size=train_batch_size, + ) + self.vocab_size = vocabulary_size + self.seq_length = sequence_length + self.train_dataset = DummyDataset( + data_type=data_type, vocabulary_size=vocabulary_size, seqlen=sequence_length + ) + self.train_dataloader = self._create_dataloader(self.train_dataset, self.train_batch_size) + + if val_batch_size: + self.val_dataset = DummyDataset( + data_type=data_type, vocabulary_size=vocabulary_size, seqlen=sequence_length + ) + self.val_dataloader = self._create_dataloader(self.val_dataset, self.val_batch_size) + + def get_batch(self, data): + return data[0], data[1], data[0] + + def get_val_batch(self, data): + return self.get_batch(data) diff --git a/3.test_cases/11.modelparallel/data/pipelines/gpt_data_pipeline.py b/3.test_cases/11.modelparallel/data/pipelines/gpt_data_pipeline.py new file mode 100644 index 00000000..68480cce --- /dev/null +++ b/3.test_cases/11.modelparallel/data/pipelines/gpt_data_pipeline.py @@ -0,0 +1,160 @@ +"""Data pipeline.""" +import os +from typing import List, Union + +from data.dataset.gpt_dataset import GPTPretrainingDataset +from data.pipelines.data_pipeline import DataPipeline +from data.utils import is_s3_source +from logging_utils import get_logger + +try: + from awsio.python.lib.io.s3.s3dataset import S3Dataset +except ModuleNotFoundError: + S3Dataset = None + +logger = get_logger() + + +class GPTDataPipeline(DataPipeline): + def __init__( + self, + dataset_train_path, + train_batch_size, + dataset_val_path=None, + val_batch_size=None, + start_path_index=0, + use_last_file_only_for_valid=False, + sequence_length=2048, + dataset_type="gpt", + zipped_data=False, + seed=1234, + num_workers=0, + resume_from_sequence_number=0, + dp_rank=0, + dp_size=1, + shuffle=False, + ): + super().__init__( + train_batch_size, + val_batch_size=val_batch_size, + seed=seed, + num_workers=num_workers, + resume_from_sequence_number=resume_from_sequence_number, + dp_rank=dp_rank, + dp_size=dp_size, + shuffle=shuffle, + ) + self.sequence_length = sequence_length + self.train_paths = self.get_train_paths( + dataset_type, dataset_train_path, zipped_data=zipped_data + ) + self.cur_train_path = start_path_index + self.zipped_data = zipped_data + self.start_path_index = start_path_index + # needs to be called explicitly + # self._create_train_dataset() + if val_batch_size and dataset_val_path: + self.val_paths = self.get_val_paths( + dataset_type, dataset_val_path, zipped_data=zipped_data + ) + self.use_last_file_only_for_valid = use_last_file_only_for_valid + self._create_val_dataset() + + def _create_val_dataset(self): + self.val_dataset = GPTPretrainingDataset( + self.val_paths if not self.use_last_file_only_for_valid else [self.val_paths[-1]], + max_sequence_length=self.sequence_length, + zipped=self.zipped_data, + ) + self.val_dataloader = self._create_dataloader(self.val_dataset, self.val_batch_size) + + def increment_path_in_epoch(self): + self.cur_train_path += 1 + if self.cur_train_path >= len(self.train_paths): + self.cur_train_path = 0 + return False + # returns if cycled through to next epoch + return True + + def create_train_dataset(self): + self.train_dataset = GPTPretrainingDataset( + self.train_paths[self.cur_train_path : self.cur_train_path + 1], + max_sequence_length=self.sequence_length, + zipped=self.zipped_data, + ) + self.train_dataloader = self._create_dataloader(self.train_dataset, self.train_batch_size) + + def get_train_paths( + self, data_type, training_dir, zipped_data=False + ) -> Union[List[str], "S3Dataset"]: + if data_type == "bert": + if is_s3_source(training_dir): + raise ValueError("Unsupported BERT data from s3") + train_paths = sorted( + [ + os.path.join(training_dir, p) + for p in os.listdir(training_dir) + if os.path.isfile(os.path.join(training_dir, p)) and "training" in p + ] + ) + elif data_type == "gpt": + if zipped_data > 0: + file_extension = ".json.gz" + else: + file_extension = ".json" + if is_s3_source(training_dir): + assert S3Dataset, "awsio package needs to be installed" + train_paths = S3Dataset(training_dir) + else: + train_paths = sorted( + [ + os.path.join(training_dir, p) + for p in os.listdir(training_dir) + if p.endswith(file_extension) + ] + ) + else: + raise NotImplementedError + + return train_paths + + def get_val_paths( + self, data_type, test_dir, zipped_data=False + ) -> Union[List[str], "S3Dataset"]: + if data_type == "bert": + if is_s3_source(test_dir): + raise ValueError("Unsupported BERT data from s3") + val_paths = sorted( + [ + os.path.join(test_dir, p) + for p in os.listdir(test_dir) + if os.path.isfile(os.path.join(test_dir, p)) and "testing" in p + ] + ) + elif data_type == "gpt": + if zipped_data > 0: + file_extension = ".json.gz" + else: + file_extension = ".json" + if is_s3_source(test_dir): + assert S3Dataset, "awsio package needs to be installed" + val_paths = S3Dataset(test_dir) + else: + val_paths = sorted( + [ + os.path.join(test_dir, p) + for p in os.listdir(test_dir) + if p.endswith(file_extension) + ] + ) + else: + raise NotImplementedError + return val_paths + + def get_batch(self, data): + input_ids, mask = data + return input_ids, mask, input_ids + + def get_val_batch(self, data): + input_ids, mask = data + return input_ids, mask diff --git a/3.test_cases/11.modelparallel/data/pipelines/hf_data_pipeline.py b/3.test_cases/11.modelparallel/data/pipelines/hf_data_pipeline.py new file mode 100644 index 00000000..8ed66aa2 --- /dev/null +++ b/3.test_cases/11.modelparallel/data/pipelines/hf_data_pipeline.py @@ -0,0 +1,51 @@ +"""Data pipeline.""" +import logging + +from data.pipelines import DataPipeline +from datasets import load_from_disk +from transformers import default_data_collator + +try: + from awsio.python.lib.io.s3.s3dataset import S3Dataset +except ModuleNotFoundError: + S3Dataset = None + +logger = logging.getLogger(__file__) + + +class HFDataPipeline(DataPipeline): + def __init__( + self, + dataset_train_path, + train_batch_size, + dataset_val_path=None, + val_batch_size=None, + seed=1234, + num_workers=0, + resume_from_sequence_number=0, + dp_rank=0, + dp_size=1, + shuffle=False, + ): + super().__init__( + train_batch_size=train_batch_size, + val_batch_size=val_batch_size, + seed=seed, + num_workers=num_workers, + resume_from_sequence_number=resume_from_sequence_number, + dp_rank=dp_rank, + dp_size=dp_size, + shuffle=shuffle, + collate_fn=default_data_collator, + ) + self.train_dataset = load_from_disk(dataset_train_path) + self.train_dataloader = self._create_dataloader(self.train_dataset, self.train_batch_size) + if val_batch_size and dataset_val_path: + self.val_dataset = load_from_disk(dataset_val_path) + self.val_dataloader = self._create_dataloader(self.val_dataset, self.val_batch_size) + + def get_batch(self, data): + return data["input_ids"], data["attention_mask"], data["labels"] + + def get_val_batch(self, data): + return data["input_ids"], data["attention_mask"] diff --git a/3.test_cases/11.modelparallel/data/pipelines/nemo_megatron_gpt_data_pipeline.py b/3.test_cases/11.modelparallel/data/pipelines/nemo_megatron_gpt_data_pipeline.py new file mode 100644 index 00000000..dbabc711 --- /dev/null +++ b/3.test_cases/11.modelparallel/data/pipelines/nemo_megatron_gpt_data_pipeline.py @@ -0,0 +1,144 @@ +"""Data pipeline.""" +import os +from pathlib import Path + +from data.pipelines.data_pipeline import DataPipeline +from logging_utils import get_logger + +logger = get_logger() + + +def make_file_list(dir_path, pattern): + files = list(Path(dir_path).glob(pattern)) + files = list(set([os.path.join(dir_path, i.stem) for i in files])) + files.sort() + files = files[:254] + proporations = [1 / len(files) for _ in range(len(files))] + return [val for pair in zip(proporations, files) for val in pair] + + +# This is still untesed end to end in a convergence run + +# Below arguments need to copied to arguments.py to run +# # megatron dataset +# input_grp.add_argument("--data_impl", type=str, default="mmap") +# input_grp.add_argument("--data_split", type=str, default="970, 30, 0") +# input_grp.add_argument("--mmap_warmup", type=int, default=0) +# input_grp.add_argument("--skip_warmup", action="store_true") +# input_grp.add_argument("--tokenizer_type", type=str, default="HFLlamaTokenizer") +# input_grp.add_argument("--tokenizer_vocab_file", type=str, default=None) +# input_grp.add_argument("--tokenizer_merge_file", type=str, default=None) +# input_grp.add_argument("--make_vocab_size_divisible_by", type=int, default=128) +# input_grp.add_argument("--data_dir", type=str) +# input_grp.add_argument("--data_file_regex", type=str) + +# Also need to add dataset_type "megatron" as a choice for the arg. + +# Below snippet needs to go into data/pipelines/__init__.py +# elif args.dataset_type == "megatron": +# from data.pipelines.nemo_megatron_gpt_data_pipeline import MegatronGPTDataPipeline + +# data_pipeline = MegatronGPTDataPipeline( +# args, +# seed=args.seed, +# num_workers=args.data_num_workers, +# resume_from_sequence_number=total_steps, +# dp_rank=dp_rank, +# dp_size=dp_size, +# ) + + +class MegatronGPTDataPipeline(DataPipeline): + def __init__( + self, + args, + seed=1234, + num_workers=0, + resume_from_sequence_number=0, + dp_rank=0, + dp_size=1, + shuffle=False, + ): + super().__init__( + train_batch_size=args.train_batch_size, + val_batch_size=args.val_batch_size, + seed=seed, + resume_from_sequence_number=resume_from_sequence_number, + num_workers=num_workers, + dp_rank=dp_rank, + dp_size=dp_size, + shuffle=shuffle, + ) + eval_iters = (args.max_steps // args.validation_freq + 1) * args.validation_batches + + train_valid_test_num_samples = [ + args.max_steps * args.train_batch_size, + eval_iters * args.val_batch_size, + 0, + ] + logger.info(f"{train_valid_test_num_samples}, {args.max_steps}, {eval_iters}") + from omegaconf import OmegaConf + + file_list = make_file_list(args.data_dir, args.data_file_regex) + assert len(file_list) > 0, "Please check your regex" + model_cfg_dict = { + "data": { + # "data_prefix": { + # "train": make_file_list(args.data_dir, args.data_file_regex), + # "test": make_file_list(args.data_dir, args.data_file_regex), + # "validation": make_file_list(args.data_dir, args.data_file_regex), + # splits_string ignored if data_prefix is a dict + # }, + "data_prefix": file_list, + "data_impl": args.data_impl, + "splits_string": args.data_split, + "seq_length": args.max_context_width, + "delay_data_mmap": False, + "validation_drop_last": True, + "skip_warmup": args.skip_warmup, + }, + "seed": args.seed, + } + model_cfg = OmegaConf.create(model_cfg_dict) + + from nemo.collections.common.tokenizers import AutoTokenizer + + tokenizer = AutoTokenizer("hf-internal-testing/llama-tokenizer") + + from megatron.core.parallel_state import initialize_model_parallel + + initialize_model_parallel() + from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import ( + build_train_valid_test_datasets, + ) + + self.train_dataset, self.val_dataset, self.test_dataset = build_train_valid_test_datasets( + model_cfg, + None, + model_cfg.data.data_prefix, + model_cfg.data.data_impl, + splits_string=model_cfg.data.splits_string, + train_valid_test_num_samples=train_valid_test_num_samples, + seq_length=model_cfg.data.seq_length, + seed=model_cfg.seed, + skip_warmup=model_cfg.data.get("skip_warmup", True), + tokenizer=tokenizer, + ) + self.train_dataloader = self._create_dataloader(self.train_dataset, self.train_batch_size) + self.val_dataloader = self._create_dataloader(self.val_dataset, self.val_batch_size) + self.test_dataloader = self._create_dataloader(self.test_dataset, self.val_batch_size) + + logger.info( + f"Lengths of dataloaders {len(self.train_dataloader)}, {len(self.val_dataloader)}" + ) + + def get_batch(self, data): + tokens = data["tokens"].long() + labels = data["labels"].long() + mask = data["attention_mask"] + return tokens, mask, labels + + def get_val_batch(self, data): + tokens = data["tokens"].long() + mask = data["attention_mask"] + return tokens, mask diff --git a/3.test_cases/11.modelparallel/data/prep/_prepare_nemo_megatron_dataset.py b/3.test_cases/11.modelparallel/data/prep/_prepare_nemo_megatron_dataset.py new file mode 100644 index 00000000..2574a40c --- /dev/null +++ b/3.test_cases/11.modelparallel/data/prep/_prepare_nemo_megatron_dataset.py @@ -0,0 +1,392 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Processing data for megatron pretraining. + +It can be used to convert the text data into indexed dataset for BERT, GPT, T5, RETRO models etc. + + +Example script to preprocess the loose JSON file for BERT model + +```python +python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=PATH_TO_THE_RETRIEVAL_DB_LOOSE_JSON_FILE \ + --json-keys=text \ + --vocab-file=PATH_TO_VOCAB_FILE \ + --dataset-impl=mmap \ + --output-prefix=YOUR_DATA_PREFIX \ + --tokenizer-library=megatron \ + --tokenizer-type=BertWordPieceCase \ + --split-sentences \ + --workers=48 +``` + +Example script to preprocess the loose JSON file for GPT model + +```python +python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=PATH_TO_THE_RETRIEVAL_DB_LOOSE_JSON_FILE \ + --json-keys=text \ + --tokenizer-library=megatron \ + --tokenizer-type=GPT2BPETokenizer \ + --dataset-impl=mmap \ + --merge-file=YOUR_MERGE_FILE \ + --vocab-file=YOUR_VOCAB_FILE \ + --output-prefix=YOUR_DATA_PREFIX \ + --append-eod \ + --workers=48 +``` + +Example script to preprocess the loose JSON file for retrieval DB Dataset + +```python +python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=PATH_TO_THE_RETRIEVAL_DB_LOOSE_JSON_FILE \ + --json-keys=text \ + --tokenizer-library=sentencepiece \ + --dataset-impl=retmmap \ + --tokenizer-model=tokenizer.model \ + --output-prefix=retro_db \ + --need-pad-id \ + --append-eod \ + --retrieval-db \ + --chunk_size=64 \ + --workers=64 +``` + +Example script to preprocess the JSON file for retrieval training dataset + +```python +python scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=PATH_TO_THE_RETRIEVAL_TRAIN_VAL_TEST_LOOSE_JSON_FILE \ + --json-keys=text \ + --tokenizer-library=sentencepiece \ + --dataset-impl=retmmap \ + --tokenizer-model=tokenizer.model \ + --output-prefix=retro_data \ + --need-pad-id \ + --append-eod \ + --chunk_size=64 \ + --workers=64 +``` +""" + +import argparse +import gzip +import json +import multiprocessing +import os +import pathlib +import sys +import time + +import ftfy +import torch +from nemo.collections.nlp.data.language_modeling.megatron import indexed_dataset +from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer + +try: + import nltk + + nltk_available = True +except ImportError: + nltk_available = False + +# https://stackoverflow.com/questions/33139531/preserve-empty-lines-with-nltks-punkt-tokenizer +class CustomLanguageVars(nltk.tokenize.punkt.PunktLanguageVars): + + _period_context_fmt = r""" + \S* # some word material + %(SentEndChars)s # a potential sentence ending + \s* # <-- THIS is what I changed + (?=(?P + %(NonWord)s # either other punctuation + | + (?P\S+) # <-- Normally you would have \s+ here + ))""" + + +class IdentitySplitter(object): + def tokenize(self, *text): + return text + + +def get_tokenizer(args): + tokenizer = get_nmt_tokenizer( + library=args.tokenizer_library, + model_name=args.tokenizer_type, + tokenizer_model=args.tokenizer_model, + vocab_file=args.vocab_file, + merges_file=args.merge_file, + delimiter=args.delimiter, + ) + if args.need_pad_id: + if not hasattr(tokenizer, "pad_id"): + tokenizer.add_special_tokens({"pad_token": ""}) + elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0): + tokenizer.add_special_tokens({"pad_token": ""}) + return tokenizer + + +class Encoder(object): + def __init__(self, args): + self.args = args + + def initializer(self): + # Use Encoder class as a container for global data + Encoder.tokenizer = get_tokenizer(self.args) + + if self.args.split_sentences: + if not nltk_available: + print("NLTK is not available to split sentences.") + exit() + splitter = nltk.load("tokenizers/punkt/english.pickle") + if self.args.keep_newlines: + # this prevents punkt from eating newlines after sentences + Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer( + train_text=splitter._params, lang_vars=CustomLanguageVars() + ) + else: + Encoder.splitter = splitter + + else: + Encoder.splitter = IdentitySplitter() + + def encode(self, json_line): + if not self.args.text_file: + data = json.loads(json_line) + ids = {} + for key in self.args.json_keys: + text = data[key] + if self.args.apply_ftfy: + text = ftfy.fix_text(text) + doc_ids = [] + for sentence in Encoder.splitter.tokenize(text): + sentence_ids = Encoder.tokenizer.text_to_ids(sentence) + if len(sentence_ids) > 0: + doc_ids.append(sentence_ids) + if len(doc_ids) > 0 and self.args.append_eod: + doc_ids[-1].append(Encoder.tokenizer.eos_id) + ids[key] = doc_ids + else: + data = json_line + ids = {} + text = data.strip() + if self.args.apply_ftfy: + text = ftfy.fix_text(text) + doc_ids = [] + for sentence in Encoder.splitter.tokenize(text): + sentence_ids = Encoder.tokenizer.text_to_ids(sentence) + if len(sentence_ids) > 0: + doc_ids.append(sentence_ids) + if len(doc_ids) > 0 and self.args.append_eod: + doc_ids[-1].append(Encoder.tokenizer.eos_id) + ids["text"] = doc_ids + return ids, len(json_line) + + +def get_args(): + parser = argparse.ArgumentParser() + group = parser.add_argument_group(title="input data") + group.add_argument( + "--input", + type=str, + required=True, + help="Path to the input json or json.gz file. If preprocessing an entire folder, set the --preproc-folder flag and provide the path to the folder in this arg.", + ) + group.add_argument( + "--json-keys", + nargs="+", + default=["text"], + help="space separate listed of keys to extract from json", + ) + group.add_argument( + "--split-sentences", action="store_true", help="Split documents into sentences." + ) + group.add_argument( + "--keep-newlines", + action="store_true", + help="Keep newlines between sentences when splitting.", + ) + group.add_argument("--text_file", action="store_true", help="Use text file instead of json.") + group = parser.add_argument_group(title="tokenizer") + group.add_argument( + "--tokenizer-library", + type=str, + required=True, + choices=["yttm", "sentencepiece", "megatron", "huggingface", "tabular"], + help="What tokenizer library to use.", + ) + group.add_argument( + "--tokenizer-type", + type=str, + default=None, + help="What type of tokenizer to use.", + ) + group.add_argument( + "--tokenizer-model", + type=str, + default=None, + help="Path to tokenizer model.", + ) + group.add_argument("--vocab-file", type=str, default=None, help="Path to the vocab file") + group.add_argument("--files-filter", type=str, default="**/*.json*", help="files filter str") + group.add_argument( + "--merge-file", type=str, default=None, help="Path to the BPE merge file (if necessary)." + ) + group.add_argument( + "--delimiter", type=str, default=None, help="delimiter used for tabular tokenizer" + ) + group.add_argument( + "--append-eod", action="store_true", help="Append an token to the end of a document." + ) + group.add_argument("--retrieval-db", action="store_true", help="Dataset used for retrieval.") + group.add_argument( + "--need-pad-id", action="store_true", help="Whether we need the pad id for the tokenizer" + ) + group = parser.add_argument_group(title="output data") + group.add_argument( + "--output-prefix", type=str, required=True, help="Path to binary output file without suffix" + ) + group.add_argument( + "--dataset-impl", type=str, default="mmap", choices=["lazy", "cached", "mmap", "retmmap"] + ) + + group = parser.add_argument_group(title="runtime") + group.add_argument( + "--workers", type=int, default=1, help="Number of worker processes to launch" + ) + group.add_argument("--chunk_size", type=int, default=64, help="chunk size used for retrieval") + group.add_argument( + "--chunk_stride_size", + type=int, + default=64, + help="the stride size for neighbor chunks used for retrieval", + ) + + group.add_argument( + "--log-interval", type=int, default=100, help="Interval between progress updates" + ) + group.add_argument( + "--preproc-folder", + action="store_true", + help="If set, will preprocess all .json or .json.gz files into a single .bin and .idx file. Folder path provided via the --input arg", + ) + group.add_argument( + "--apply-ftfy", action="store_true", help="If set, will apply ftfy to the input text" + ) + args = parser.parse_args() + args.keep_empty = False + + if args.tokenizer_type is not None and args.tokenizer_type.lower().startswith("bert"): + if not args.split_sentences: + print("Bert tokenizer detected, are you sure you don't want to split sentences?") + + # some default/dummy values for the tokenizer + args.rank = 0 + args.make_vocab_size_divisible_by = 128 + args.tensor_model_parallel_size = 1 + args.vocab_extra_ids = 0 + # TODO: There are dependencies b/w libraries and model files / tokenizer type strings to check. + assert args.tokenizer_type is not None or args.tokenizer_model is not None + return args + + +def main(): + args = get_args() + startup_start = time.time() + if args.preproc_folder: + print("Searching folder for .json or .json.gz files...") + assert os.path.exists(args.input), f"Folder does not exist: {args.input}" + json_files = (str(f) for f in pathlib.Path(args.input).glob(args.files_filter)) + json_files = [f for f in json_files if f.endswith(".json") or f.endswith(".json.gz")] + if len(json_files) == 0: + raise FileNotFoundError("No .json or .json.gz files found in folder.") + else: + print(f"Found {len(json_files)} .json or .json.gz files.") + else: + assert os.path.exists(args.input), f"File does not exist: {args.input}" + json_files = [args.input] + + if nltk_available and args.split_sentences: + nltk.download("punkt", quiet=True) + + encoder = Encoder(args) + + if args.dataset_impl == "retmmap": + assert args.need_pad_id, "retmmap need --need_pad_id flag" + tokenizer = get_tokenizer(args) + + level = "document" + if args.split_sentences: + level = "sentence" + + print(f"Vocab size: {tokenizer.vocab_size}") + print(f"Output prefix: {args.output_prefix}") + output_bin_files = {} + output_idx_files = {} + builders = {} + for key in args.json_keys: + output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix, key, level) + output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix, key, level) + builders[key] = indexed_dataset.make_builder( + output_bin_files[key], + impl=args.dataset_impl, + chunk_size=args.chunk_size, + pad_id=tokenizer.pad_id if hasattr(tokenizer, "pad_id") else 0, + retrieval_db=args.retrieval_db, + vocab_size=tokenizer.vocab_size, + stride=args.chunk_stride_size, + ) + + startup_end = time.time() + proc_start = time.time() + total_bytes_processed = 0 + print("Time to startup:", startup_end - startup_start) + + pool = multiprocessing.Pool(args.workers, initializer=encoder.initializer) + + for idx, json_file in enumerate(json_files): + print(f"Processing file {json_file} {idx + 1}/{len(json_files)}") + if json_file.endswith(".gz"): + fin = gzip.open(json_file, "r") + else: + fin = open(args.input, "r", encoding="utf-8") + + encoded_docs = pool.imap(encoder.encode, fin, 25) + + for i, (doc, bytes_processed) in enumerate(encoded_docs, start=1): + total_bytes_processed += bytes_processed + for key, sentences in doc.items(): + if len(sentences) == 0: + continue + for sentence in sentences: + builders[key].add_item(torch.IntTensor(sentence)) + builders[key].end_document() + if i % args.log_interval == 0: + current = time.time() + elapsed = current - proc_start + mbs = total_bytes_processed / elapsed / 1024 / 1024 + print( + f"Processed {i} documents", + f"({i/elapsed} docs/s, {mbs} MB/s).", + file=sys.stderr, + ) + + for key in args.json_keys: + builders[key].finalize(output_idx_files[key]) + + +if __name__ == "__main__": + main() diff --git a/3.test_cases/11.modelparallel/data/prep/prep_hf_dataset.slurm b/3.test_cases/11.modelparallel/data/prep/prep_hf_dataset.slurm new file mode 100644 index 00000000..7d3da423 --- /dev/null +++ b/3.test_cases/11.modelparallel/data/prep/prep_hf_dataset.slurm @@ -0,0 +1,25 @@ +#!/bin/bash +#SBATCH --output=logs/%x_%j.out # Redirects outputs to file in current_dir/logs +#SBATCH --error=logs/%x_%j.out # Redirects err to same file in current_dir/logs +#SBATCH --job-name=prep_hf_data +#SBATCH --ntasks-per-node=1 +#SBATCH -N 1 + +## Below examples for llama tokenizer + +## WIKICORPUS +python prepare_hf_dataset.py --dataset_name wikicorpus \ + --dataset_config_name raw_en \ + --val_split_percentage 20 \ + --hf_tokenizer_name meta-llama/Llama-2-7b-hf \ + --seq_len 4096 \ + --output_dir /fsx/datasets/temp/wikicorpus__raw_en/llama/4096/ + +## C4 +# Had to delete a file which was incomplete and crashed the job +# rm /fsx/datasets/.cache/datasets/downloads/extracted/741a4aaf04e7748f791ce4525c5876f13a45e8115d76b099c818cf7970972c48 +python prepare_hf_dataset.py --dataset_path /fsx/datasets/c4/en/hf \ + --output_dir /fsx/datasets/temp/c4/en/hf-tokenized/llama \ + --hf_tokenizer_name meta-llama/Llama-2-7b-hf \ + --seq_len 4096 \ + --val_split_percentage 20 diff --git a/3.test_cases/11.modelparallel/data/prep/prep_nmt_dataset.slurm b/3.test_cases/11.modelparallel/data/prep/prep_nmt_dataset.slurm new file mode 100644 index 00000000..e8c1f9ef --- /dev/null +++ b/3.test_cases/11.modelparallel/data/prep/prep_nmt_dataset.slurm @@ -0,0 +1,13 @@ +#!/bin/bash +#SBATCH --output=logs/%x_%j.out # Redirects outputs to file in current_dir/logs +#SBATCH --error=logs/%x_%j.out # Redirects err to same file in current_dir/logs +#SBATCH --job-name=prep_nmt_data +#SBATCH --ntasks-per-node=1 +#SBATCH -N 1 + +CONDA_ENV_PATH=${1:-"$CONDA_DEFAULT_ENV"} +if [[ -z "${CONDA_ENV_PATH}" ]]; then + echo "Conda env not set, exiting" +fi + +srun -l -D `pwd` conda run -p $CONDA_ENV_PATH --no-capture-output python data/prepare_nemo_megatron_dataset.py diff --git a/3.test_cases/11.modelparallel/data/prep/prepare_hf_dataset.py b/3.test_cases/11.modelparallel/data/prep/prepare_hf_dataset.py new file mode 100644 index 00000000..6b6ce43a --- /dev/null +++ b/3.test_cases/11.modelparallel/data/prep/prepare_hf_dataset.py @@ -0,0 +1,186 @@ +import argparse +import functools +import logging +import os +from itertools import chain + +import torch +import transformers +from datasets import load_dataset +from transformers import AutoTokenizer +from transformers.testing_utils import CaptureLogger + +# Either set token here or in the env +# login(token="", add_to_git_credential=True, new_session=False) + + +logger = logging.getLogger(__name__) + +""" +Example commands +---- +1. Wikicorpus for llama +python prepare_hf_dataset.py --dataset_name wikicorpus \ + --dataset_config_name raw_en \ + --val_split_percentage 20 \ + --hf_tokenizer_name meta-llama/Llama-2-7b-hf \ + --seq_len 4096 \ + --output_dir /fsx/datasets/wikicorpus__raw_en/llama/4096/ + +2. C4 +# Had to delete a file which was incomplete +# rm ~/.cache/huggingface/datasets/downloads/extracted/741a4aaf04e7748f791ce4525c5876f13a45e8115d76b099c818cf7970972c48 +python prepare_hf_dataset.py --dataset_path /fsx/datasets/c4/en/hf \ + --output_dir /fsx/datasets/c4/en/hf-tokenized/llama \ + --hf_tokenizer_name meta-llama/Llama-2-7b-hf \ + --seq_len 4096 \ + --val_split_percentage 20 +""" + +parser = argparse.ArgumentParser() +parser.add_argument("--dataset_name", type=str, default=None) +parser.add_argument("--dataset_config_name", type=str, default=None) +parser.add_argument("--dataset_path", type=str, default=None) +parser.add_argument("--val_split_percentage", type=int, default=20) +parser.add_argument("--hf_tokenizer_name", type=str, default="meta-llama/Llama-2-7b-hf") +parser.add_argument("--output_dir", default=None, type=str) +parser.add_argument("--num_proc", default=64, type=int) +parser.add_argument("--seq_len", type=int, default=4096) +args, _ = parser.parse_known_args() + +if args.dataset_path is not None and (args.dataset_name is not None and args.dataset_config_name): + raise ValueError("Set either (dataset_path) or (dataset_name, dataset_config_name)") +elif args.dataset_path is None: + if args.dataset_name is None or args.dataset_config_name is None: + raise ValueError( + "If dataset_path is not set, then both dataset_name and dataset_config_name need to be set" + ) +do_train = True +do_eval = True + + +def tokenize_function(tokenizer, text_column_name, examples): + tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") + + with CaptureLogger(tok_logger) as cl: + output = _tokenize_function(tokenizer, text_column_name, examples) + # clm input could be much much longer than block_size + if "Token indices sequence length is longer than the" in cl.out: + tok_logger.warning( + "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model." + ) + return output + + +def _tokenize_function(tokenizer, text_column_name, examples): + return tokenizer(examples[text_column_name]) + + +# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. +def group_texts(block_size, examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + else: + result = {} + return result + + +def tokenize_dataset( + dataset_name, + dataset_config_name, + dataset_path, + hf_tokenizer_name, + output_dir, + val_split_percentage=20, + sequence_length=4096, + num_proc=64, + overwrite_cache=False, +): + cache_dir = "/fsx/datasets/.cache/datasets/" + if dataset_path is not None: + raw_datasets = load_dataset(dataset_path, num_proc=num_proc, cache_dir=cache_dir) + else: + raw_datasets = load_dataset( + dataset_name, dataset_config_name, num_proc=num_proc, cache_dir=cache_dir + ) + + os.makedirs(output_dir, exist_ok=True) + train_split_percentage = 100 - val_split_percentage + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + dataset_name, + dataset_config_name, + split=f"train[:{val_split_percentage}%]", + cache_dir=cache_dir, + ) + + raw_datasets["train"] = load_dataset( + dataset_name, + dataset_config_name, + split=f"train[:{train_split_percentage}%]", + cache_dir=cache_dir, + ) + + tokenizer = AutoTokenizer.from_pretrained(hf_tokenizer_name, trust_remote_code=True) + + column_names = raw_datasets["train"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function + # tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") + + tokenized_datasets = raw_datasets.map( + functools.partial(tokenize_function, tokenizer, text_column_name), + batched=True, + num_proc=num_proc, + remove_columns=column_names, + load_from_cache_file=not overwrite_cache, + desc="Running tokenizer on dataset", + ) + + assert tokenizer.model_max_length >= sequence_length + + lm_datasets = tokenized_datasets.map( + functools.partial(group_texts, sequence_length), + batched=True, + num_proc=num_proc, + load_from_cache_file=not overwrite_cache, + desc=f"Grouping texts in chunks of {sequence_length}", + ) + if do_train: + if "train" not in tokenized_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = lm_datasets["train"] + train_dataset.save_to_disk(f"{output_dir}/train/", num_proc=num_proc) + + if do_eval: + if "validation" not in tokenized_datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_dataset = lm_datasets["validation"] + eval_dataset.save_to_disk(f"{output_dir}/val/", num_proc=num_proc) + + torch.save({"arguments": args}, f"{output_dir}/args") + + +if __name__ == "__main__": + tokenize_dataset( + dataset_name=args.dataset_name, + dataset_config_name=args.dataset_config_name, + dataset_path=args.dataset_path, + hf_tokenizer_name=args.hf_tokenizer_name, + output_dir=args.output_dir, + val_split_percentage=args.val_split_percentage, + sequence_length=args.seq_len, + num_proc=args.num_proc, + ) diff --git a/3.test_cases/11.modelparallel/data/prep/prepare_nemo_megatron_dataset.py b/3.test_cases/11.modelparallel/data/prep/prepare_nemo_megatron_dataset.py new file mode 100644 index 00000000..243b49ca --- /dev/null +++ b/3.test_cases/11.modelparallel/data/prep/prepare_nemo_megatron_dataset.py @@ -0,0 +1,39 @@ +import os +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path + +SRC_DIR = "/fsx/datasets/c4/en/hf/" +OUT_DIR = "/fsx/datasets/c4/en/nmt-tokenized-2/llama" + +if not Path(OUT_DIR).exists(): + os.makedirs(OUT_DIR) + + +def process_file(idx): + file_idx_str = str(idx).zfill(5) + file_stem = f"c4-train.{file_idx_str}-of-01024" + file_name = f"{file_stem}.json.gz" + cmd = f"python data/_prepare_nemo_megatron_dataset.py \ + --input {os.path.join(SRC_DIR, file_name)} \ + --output-prefix {OUT_DIR}/{file_stem} \ + --tokenizer-library=huggingface \ + --tokenizer-type hf-internal-testing/llama-tokenizer \ + --dataset-impl mmap \ + --append-eod \ + --workers 32" + os.system(cmd) + output_partition_files = list(Path(OUT_DIR).glob(f"{file_stem}_[0-9]*")) + # Running with 2 partitions creates some extra files we don't need + for a_file in output_partition_files: + a_file.unlink() + input_partition_files = list(Path(SRC_DIR).glob(f"{file_stem}.json_[0-9].gz")) + for a_file in input_partition_files: + a_file.unlink() + + +pool = ThreadPoolExecutor(max_workers=32) + +# import os +# node_id = int(os.getenv('SLURM_NODEID')) +# num_nodes = int(os.getenv('SLURM_NNODES')) +threads = [pool.submit(process_file, idx) for idx in range(95, 256)] diff --git a/3.test_cases/11.modelparallel/data/utils.py b/3.test_cases/11.modelparallel/data/utils.py new file mode 100644 index 00000000..79f41a2d --- /dev/null +++ b/3.test_cases/11.modelparallel/data/utils.py @@ -0,0 +1,10 @@ +_S3_PREFIX = "s3://" + + +def is_s3_source(src): + return src.startswith(_S3_PREFIX) + + +def parse_s3_address(address): + address = address[len(_S3_PREFIX) :] + return address.split("/", 1) diff --git a/3.test_cases/11.modelparallel/data_utils.py b/3.test_cases/11.modelparallel/data_utils.py new file mode 100644 index 00000000..79f41a2d --- /dev/null +++ b/3.test_cases/11.modelparallel/data_utils.py @@ -0,0 +1,10 @@ +_S3_PREFIX = "s3://" + + +def is_s3_source(src): + return src.startswith(_S3_PREFIX) + + +def parse_s3_address(address): + address = address[len(_S3_PREFIX) :] + return address.split("/", 1) diff --git a/3.test_cases/11.modelparallel/fsdp_utils.py b/3.test_cases/11.modelparallel/fsdp_utils.py new file mode 100644 index 00000000..c6d0f026 --- /dev/null +++ b/3.test_cases/11.modelparallel/fsdp_utils.py @@ -0,0 +1,60 @@ +"""FSDP utils.""" + +# pylint: disable=fixme,import-error,import-outside-toplevel,no-name-in-module +from torch.distributed.fsdp import BackwardPrefetch, ShardingStrategy +from torch.sagemaker.logger import get_logger + +_logger = get_logger() + + +def get_sharding_strategy(strategy: str): + """Get sharding strategy.""" + sharding_strategy = getattr(ShardingStrategy, strategy.upper()) + _logger.debug("Translating %s to %s.", strategy, sharding_strategy) + return sharding_strategy + + +def get_backward_fetch_policy(policy: str): + """Get backward fetch policy.""" + backward_fetch_policy = getattr(BackwardPrefetch, policy.upper()) + _logger.debug("Translating %s to %s.", policy, backward_fetch_policy) + return backward_fetch_policy + + +def get_transformer_layer(model_type="gpt2", use_smp_implementation=False): + """Get transformer layer.""" + if use_smp_implementation: + # We can't checkpoint transformer.TransformerLayer class + # as it takes a tuple as input + from torch.sagemaker.tensor_parallel.transformer import TETransformerLayer + + transformer_layer = TETransformerLayer + elif model_type == "gpt2": + from transformers.models.gpt2.modeling_gpt2 import GPT2Block + + transformer_layer = GPT2Block + + elif model_type == "gpt_neox": + from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXLayer + + transformer_layer = GPTNeoXLayer + + elif model_type == "bloom": + from transformers.models.bloom.modeling_bloom import BloomBlock + + transformer_layer = BloomBlock + + elif model_type == "flash_gptneox": + from flash_attn.modules.block import ParallelBlock + + # TODO: Add support for Block + transformer_layer = ParallelBlock + elif model_type == "rubik_gpt_neox": + from smpv1.transformer import DistributedTransformerLayer + + transformer_layer = DistributedTransformerLayer + elif model_type == "llama_v2": + from transformers.models.llama.modeling_llama import LlamaDecoderLayer + + transformer_layer = LlamaDecoderLayer + return transformer_layer diff --git a/3.test_cases/11.modelparallel/scripts/model.sh b/3.test_cases/11.modelparallel/scripts/model.sh new file mode 100755 index 00000000..bdd8117f --- /dev/null +++ b/3.test_cases/11.modelparallel/scripts/model.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash + +parse_inputs() { + while [[ $# -gt 0 ]]; do + key="$1" + case $key in + --hostfile) + hostfile=$2 + shift 2 + ;; + --model_type) + model_type=$2 + shift 2 + ;; + --model_size) + model_size=$2 + shift 2 + ;; + --shard_degree) + shard_degree=$2 + shift 2 + ;; + --nsys_path) + nsys_path=$2 + shift 2 + ;; + *) + shift 1 + ;; + esac + done +} + +parse_inputs $@ + +if [ -z "$hostfile" ]; then + echo "Hostfile needs to be passed" + exit 1 +fi + +num_nodes=$(cat $hostfile | wc -l) + +export NCCL_PROTO="simple" +export NCCL_SOCKET_IFNAME="^lo,docker" +export RDMAV_FORK_SAFE=1 +export FI_EFA_USE_DEVICE_RDMA=1 +export NCCL_DEBUG_SUBSYS=off +export NCCL_DEBUG="INFO" +export SM_NUM_GPUS=8 +export MASTER_ADDR=$(head -n 1 $hostfile) +export GPU_NUM_DEVICES=8 + +if [[ "$@" == *"--use_smp_implementation 1"* ]] || [[ "$@" == *"--tensor_parallel_degree"* ]]; then + # When using SMP implementation not setting NVTE_TORCH_COMPILE=0 causes a crash + export NVTE_TORCH_COMPILE=0 + # When using SMP implementation, there's a message asking to set this for better perf +fi + +if [ "$model_size" == "7b" ]; then + HIDDEN_WIDTH=4096 + NUM_LAYERS=32 + NUM_HEADS=32 + LLAMA_INTERMEDIATE_SIZE=11008 + DEFAULT_SHARD_DEGREE=8 +elif [ "$model_size" == "13b" ]; then + HIDDEN_WIDTH=5120 + NUM_LAYERS=40 + NUM_HEADS=40 + LLAMA_INTERMEDIATE_SIZE=13760 + # Reduce for better perf on p4de + DEFAULT_SHARD_DEGREE=64 +elif [ "$model_size" == "20b" ]; then + if [ "$model_type" == "llama_v2" ]; then + echo "Llama V2 is only configured for 7b, 13b and 70b, please add the configuration if you wish to run 20b" + exit 1 + fi + HIDDEN_WIDTH=6144 + NUM_LAYERS=44 + NUM_HEADS=64 + # Reduce for better perf on p4de + DEFAULT_SHARD_DEGREE=64 +elif [ "$model_size" == "65b" ]; then + if [ "$model_type" == "llama_v2" ]; then + echo "Llama V2 is only configured for 7b, 13b and 70b, please add the configuration if you wish to run 65b" + exit 1 + fi + HIDDEN_WIDTH=8192 + NUM_LAYERS=80 + NUM_HEADS=64 + # Reduce for better perf on p4de + DEFAULT_SHARD_DEGREE=128 +elif [ "$model_size" == "70b" ]; then + HIDDEN_WIDTH=8192 + NUM_LAYERS=80 + NUM_HEADS=64 + LLAMA_INTERMEDIATE_SIZE=28672 + # Reduce for better perf on p4de + DEFAULT_SHARD_DEGREE=128 +fi + +if [ -z "$shard_degree" ]; then + SHARD_DEGREE=$DEFAULT_SHARD_DEGREE +else + SHARD_DEGREE=$shard_degree +fi + +if [ -z "$LLAMA_INTERMEDIATE_SIZE" ]; then + LLAMA_ARGS="" +else + LLAMA_ARGS="--llama_intermediate_size $LLAMA_INTERMEDIATE_SIZE " +fi + +TORCH_CMD="torchrun --nnodes=${num_nodes} --nproc_per_node=8" + +# If nsys path provided, profile using Nsys, but only on 1st node. Requires job to be launched using sbatch +if [[ -n $nsys_path ]]; then + profile_nsys=1 + if [[ $SLURM_PROCID -eq 1 ]]; then + NSYS_CMD="nsys profile -w true -t cuda,nvtx,osrt,cudnn,cublas -s cpu --capture-range=cudaProfilerApi --cuda-memory-usage=true --cudabacktrace=true -x true -o $nsys_path --force-overwrite=true" + TORCH_CMD="$NSYS_CMD $TORCH_CMD" + fi +else + profile_nsys=0 +fi + +$TORCH_CMD \ + --rdzv_endpoint=$MASTER_ADDR:29400 --rdzv_id=100 --rdzv_backend=c10d \ + train.py \ + --train_batch_size 2 \ + --max_steps 100 \ + --checkpoint_freq 200 \ + --hidden_width $HIDDEN_WIDTH \ + --num_layers $NUM_LAYERS \ + --num_heads $NUM_HEADS \ + ${LLAMA_ARGS} \ + --shard_degree $SHARD_DEGREE \ + --model_type $model_type \ + --profile_nsys $profile_nsys \ + $@ + +# $@ forwards other args given to model.sh to train.py script +# if any arg is repeated second value is used automatically by argparse, so overrides the value here diff --git a/3.test_cases/11.modelparallel/train.py b/3.test_cases/11.modelparallel/train.py new file mode 100644 index 00000000..043afb6b --- /dev/null +++ b/3.test_cases/11.modelparallel/train.py @@ -0,0 +1,21 @@ +""" +Internal Train.py. + +Duplicate of train_external.py with the dummy SageMaker environment enabled. +""" +# Set dummy SageMaker env var if not set to pass guardrail +# for Rubik and Herring cluster scripts. +from sm_env_utils import enable_dummy_sm_env +enable_dummy_sm_env() # needs to be called before torch sagemaker is imported +import train_lib +from arguments import parse_args + + +def main(): + """Main function to train GPT.""" + args, _ = parse_args() + train_lib.main(args) + + +if __name__ == "__main__": + main() diff --git a/3.test_cases/11.modelparallel/train_external.py b/3.test_cases/11.modelparallel/train_external.py new file mode 100644 index 00000000..09e5207b --- /dev/null +++ b/3.test_cases/11.modelparallel/train_external.py @@ -0,0 +1,14 @@ +"""Train.py.""" + +import train_lib +from arguments import parse_args + + +def main(): + """Main function to train GPT.""" + args, _ = parse_args() + train_lib.main(args) + + +if __name__ == "__main__": + main() diff --git a/3.test_cases/11.modelparallel/train_lib.py b/3.test_cases/11.modelparallel/train_lib.py new file mode 100644 index 00000000..9818d53f --- /dev/null +++ b/3.test_cases/11.modelparallel/train_lib.py @@ -0,0 +1,685 @@ +"""Train lib function.""" +import datetime +import functools +import math +import re +import time +from contextlib import nullcontext + +# pylint: disable=fixme,import-error,import-outside-toplevel,invalid-name,no-name-in-module,wrong-import-order +import numpy as np +import torch +import torch.distributed as dist +import torch.sagemaker as tsm +import torch.utils.data +import transformers +from accelerate import init_empty_weights +from checkpoints import ( + _CHECKPOINT_DIR_REGEX, + _DEFAULT_STATE_DICT_TYPE, + CheckpointingMethod, + get_coordinator_rank, + is_action_rank, + load_checkpoint, + save_checkpoint, +) +from data.pipelines import GPTDataPipeline, create_data_pipeline +from fsdp_utils import get_backward_fetch_policy, get_sharding_strategy, get_transformer_layer +from logging_utils import ( + create_args_table, + get_logger, + log_and_write_eval_metrics, + log_train_metrics, + show_env_vars, + write_metrics_train_step, +) +from memory_tracker import memory_status, memory_status_cpu +from packaging import version as pversion +from torch import optim +from torch.distributed.elastic.multiprocessing.errors import record +from torch.distributed.fsdp import FullyShardedDataParallel as FSDP +from torch.distributed.fsdp import MixedPrecision +from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy +from torch.sagemaker import transform +from torch.sagemaker.delayed_param import DelayedParamIniter +from torch.sagemaker.distributed.fsdp.fully_sharded_data_parallel import ( + register_tp_processing_pre_comm_hook, +) +from torch.sagemaker.grad_norm import clip_grad_norm_ +from torch.sagemaker.utils import utils as tsm_utils # pylint: disable=no-name-in-module +from train_utils import ( + apply_activation_checkpoint, + compute_num_params, + compute_tflops, + create_model, + get_learning_rate_scheduler, + get_model_config, + get_param_groups_by_weight_decay, + patch_neox_rope, +) +from transformers import set_seed + +logger = get_logger() + + +def finetune_with_pretrained_weights_check(args): + # returns True for start of finetuning only + return args.hf_pretrained_model_name_or_dir is not None and args.resume_from_checkpoint is None + + +def finetune_check(args): + # returns True for start of finetuning as well as resuming + return args.hf_pretrained_model_name_or_dir is not None + + +def eval_model(model, data_pipeline, num_batches): + """Eval step.""" + model = model.eval() + n_batches = 0 + loss = 0.0 + + with torch.no_grad(): + for batch_idx, input_data in enumerate(data_pipeline.val_dataloader): + input_ids, mask = data_pipeline.get_val_batch(input_data) + + if batch_idx >= num_batches: + break + + loss += model(input_ids=input_ids, attention_mask=None, labels=input_ids)["loss"] + n_batches += 1 + + if n_batches > 0: + detached_loss = loss.detach() + torch.distributed.all_reduce(detached_loss) + loss = detached_loss.item() / dist.get_world_size() + loss /= n_batches + ppl = math.exp(loss) + else: + loss = -1.0 + ppl = -1.0 + + return loss, ppl + + +def reduce_loss(loss): + loss_detached = loss.detach() + dist.all_reduce(loss_detached) + loss_scalar = loss_detached.item() / dist.get_world_size() + return loss_scalar + + +def train_step( + args, batch_idx, nvtx_warmup_iters, data_pipeline, input_data, model, optimizer, lr_scheduler +): + if batch_idx >= nvtx_warmup_iters: + torch.cuda.nvtx.range_push(f"iteration{batch_idx}") + + input_ids, mask, labels = data_pipeline.get_batch(input_data) + + if batch_idx == 0: + # checking only on batch 0 to reduce checks during runtime + assert ( + input_ids.shape[1] == args.max_context_width + ), f"Input data passed {input_ids.shape} does not respect max_context_width set. Note that this is not strictly necessary, but added to prevent mistakes. If you intend to do this, please remove this check." + assert ( + input_ids.shape[1] <= args.max_context_width + ), "Input data passed is larger than max_context_width for model. You need to change max_context_width so model can expect larger sequences" + + optimizer.zero_grad(set_to_none=True) + + torch.cuda.synchronize() + step_start = time.time() + + if batch_idx >= nvtx_warmup_iters: + torch.cuda.nvtx.range_push("forward") + + # uses default causal mask + loss = model(input_ids=input_ids, attention_mask=None, labels=labels)["loss"] + + if batch_idx >= nvtx_warmup_iters: + # for forward + torch.cuda.nvtx.range_pop() + + if args.enable_memory_profiling > 0 and batch_idx < 5: + memory_status_cpu("After_forward_cpu") + memory_status(msg="After_forward") + + if batch_idx >= nvtx_warmup_iters: + torch.cuda.nvtx.range_push("backward") + + loss.backward() + + if batch_idx >= nvtx_warmup_iters: + # for backward + torch.cuda.nvtx.range_pop() + + if args.enable_memory_profiling > 0 and batch_idx < 5: + memory_status_cpu("After_train_step_cpu") + memory_status(msg="After_train_step") + + if batch_idx >= nvtx_warmup_iters: + torch.cuda.nvtx.range_push("opt_step") + + grad_norm = clip_grad_norm_(model, args.grad_clip) + optimizer.step() + lr_scheduler.step() + + if batch_idx >= nvtx_warmup_iters: + # for opt step + torch.cuda.nvtx.range_pop() + + if args.clean_cache > 0: + # empty the cache to avoid OOM + torch.cuda.empty_cache() + + if batch_idx >= nvtx_warmup_iters: + # for step + torch.cuda.nvtx.range_pop() + + torch.cuda.synchronize() + step_time = time.time() - step_start + + if args.enable_memory_profiling > 0 and batch_idx < 5: + memory_status(msg="After_opt_step") + + batch_num_sequences = input_ids.shape[0] + batch_seqlen = input_ids.shape[1] + return loss, step_time, batch_num_sequences, batch_seqlen, grad_norm + + +# pylint: disable=no-member,too-many-arguments,too-many-branches,too-many-locals,too-many-statements +def train( + model, + optimizer, + lr_scheduler, + writers, + model_config, + start_epoch, + start_train_path_index, + resume_from_sequence_number, + num_params, + total_steps, + args, + global_rank, + world_size, + checkpointing_pg_metadata, +): + """Train.""" + if args.enable_memory_profiling > 0: + memory_status_cpu(msg="before train step") + + model.train() + dp_rank = global_rank + dp_size = world_size + + if args.tensor_parallel_degree > 1: + dp_rank //= args.tensor_parallel_degree + dp_size //= args.tensor_parallel_degree + + if global_rank == 0: + logger.info("Creating train dataloader") + + throughputs = [] + # Set the same seed for computation + set_seed(args.seed) + + data_pipeline = create_data_pipeline( + args, start_train_path_index, resume_from_sequence_number, dp_rank, dp_size + ) + cur_seq_index = resume_from_sequence_number + epoch = start_epoch + while total_steps < args.max_steps: + nvtx_warmup_iters = 3 + if global_rank == 0: + logger.info(f"Starting training with epoch {epoch}") + + # additional loop around is for GPTDataset as there can be multiple dataloaders + if isinstance(data_pipeline, GPTDataPipeline): + # with new path if incremented at the end of this for loop + data_pipeline.create_train_dataset() + + for batch_idx, input_data in enumerate(data_pipeline.train_dataloader): + if total_steps >= args.max_steps: + break + + if args.profile_nsys > 0 and batch_idx == nvtx_warmup_iters: + torch.cuda.cudart().cudaProfilerStart() + + loss, step_time, batch_num_sequences, batch_seqlen, grad_norm = train_step( + args, + batch_idx, + nvtx_warmup_iters, + data_pipeline, + input_data, + model, + optimizer, + lr_scheduler, + ) + total_steps += 1 + cur_seq_index += batch_num_sequences + sample_processed = batch_num_sequences * dp_size + throughput = sample_processed / step_time + throughputs.append(throughput) + + tflops_per_gpu = compute_tflops(throughput, num_params, world_size, batch_seqlen) + + if not total_steps % args.logging_freq and args.log_reduced_training_loss > 0: + loss_scalar = reduce_loss(loss) + else: + loss_scalar = loss.item() + + current_lr = lr_scheduler.get_lr() + display_step = total_steps - 1 + if global_rank == 0: + write_metrics_train_step( + writers, + display_step, + loss_scalar, + throughput, + tflops_per_gpu, + current_lr, + grad_norm, + ) + if not total_steps % args.logging_freq: + log_train_metrics( + args, + total_steps, + display_step, + loss_scalar, + throughput, + tflops_per_gpu, + current_lr, + grad_norm, + throughputs, + num_params, + dp_size, + batch_seqlen, + ) + + # evaluate on validation + if args.validation_freq and not total_steps % args.validation_freq: + cur_state = np.random.get_state() + torch.cuda.empty_cache() + val_loss, val_ppl = eval_model(model, data_pipeline, args.validation_batches) + if global_rank == 0: + log_and_write_eval_metrics(writers, display_step, val_loss, val_ppl) + model = model.train() + if args.preserve_np_state > 0: + np.random.set_state(cur_state) + + # checkpoint + if not total_steps % args.checkpoint_freq[0]: + + if isinstance(data_pipeline, GPTDataPipeline): + save_train_path_index = data_pipeline.cur_train_path + else: + save_train_path_index = 0 + save_train_seq_index = cur_seq_index + # technically we have processed save_train_seq_index sequences in this file + # and so index to start from is save_train_seq_index + user_content = { + "cli_args": args.__dict__, + "model_config": model_config, + "num_params": num_params, + "total_steps": total_steps, + "epoch": epoch, + "start_train_path_index": save_train_path_index, + "resume_from_sequence_number": save_train_seq_index, + } + + subdir = f"{args.model_type}-{total_steps}steps" + if global_rank == 0 and not re.match(_CHECKPOINT_DIR_REGEX, subdir): + raise ValueError( + f"Please double check hard-coded checkpoint subdir pattern: `{subdir}` " + f"not matching `{_CHECKPOINT_DIR_REGEX}`." + ) + + if args.enable_memory_profiling > 0: + msg = f"({_DEFAULT_STATE_DICT_TYPE})" + memory_status(msg=f"Before ckpt @{display_step} {msg}") + save_checkpoint( + model, + optimizer, + lr_scheduler, + user_content, + get_sharding_strategy(args.sharding_strategy), + args.checkpoint_dir[0], + subdir, + args.num_kept_checkpoints[0], + checkpointing_pg_metadata, + tensor_parallel_degree=int(args.tensor_parallel_degree), + checkpoint_type=args.checkpoint_type, + ) + if args.enable_memory_profiling > 0: + msg = f"({_DEFAULT_STATE_DICT_TYPE})" + memory_status(msg=f"After ckpt @{display_step} {msg}") + + if isinstance(data_pipeline, GPTDataPipeline): + incremented_in_epoch = data_pipeline.increment_path_in_epoch() + if not incremented_in_epoch: + # path index set to 0 + epoch += 1 + else: + epoch += 1 + # Using median throughput across all steps, could be more robust. + return total_steps, np.median(throughputs) if throughputs else 0 + + +@record +def main(args): + """Main function to train GPT.""" + # Sanity check for args. + # - Checkpoints. + # TODO(sliuxl): Supporting one single checkpoint dir now, and multiple dirs support is missing. + ckpt_lens = ( + len(args.checkpoint_dir), + len(args.checkpoint_freq), + len(args.num_kept_checkpoints), + ) + if len(set(ckpt_lens)) != 1: + raise ValueError(f"Len mismtach for checkpoint dir, freq vs num to keep: {ckpt_lens}.") + + if args.distributed_backend == "smddp": + import smdistributed.dataparallel.torch.torch_smddp # pylint: disable=unused-import + + dist.init_process_group(args.distributed_backend, timeout=datetime.timedelta(seconds=7200)) + global_rank = dist.get_rank() + device = global_rank % torch.cuda.device_count() + world_size = dist.get_world_size() + + if args.tensorboard_dir and global_rank == 0: + from torch.utils.tensorboard import SummaryWriter + + logger.info("Writing metrics for tensorboard to %s.", args.tensorboard_dir) + writers = tuple(SummaryWriter(log_dir=tb_dir) for tb_dir in args.tensorboard_dir) + table_str = create_args_table(args.__dict__) + for writer in writers: + writer.add_text("Arguments", table_str) + else: + writers = () + + smp_config_dict = { + "activation_loading_horizon": args.activation_loading_horizon, + "sm_activation_offloading": args.offload_activations > 0, + } + if args.shard_degree is not None: + smp_config_dict["hybrid_shard_degree"] = args.shard_degree + + smp_config_dict["tensor_parallel_degree"] = args.tensor_parallel_degree + if args.tensor_parallel_degree > 1 and args.use_smp_implementation < 1: + args.use_smp_implementation = 1 + if global_rank == 0: + logger.info( + f"Tensor parallelism (TP) is enabled as tensor_parallel_degree is set to {args.tensor_parallel_degree} (>1). Switching use_smp_implementation to 1 so we can use SMP optimized implementation for TP." + ) + if args.use_smp_implementation: + # For our Mem usage fix to TE, this needs to be True + args.use_orig_params = 1 + + if args.use_synthetic_data and args.validation_freq is not None: + # Overriding validation freq to None as synthetic data + args.validation_freq = None + + tsm.init(smp_config_dict) + show_env_vars(0) + + if global_rank == 0: + for index, (key, value) in enumerate(sorted(args.__dict__.items()), 1): + logger.info("Arguments [%03d/%03d] %-30s: %s", index, len(args.__dict__), key, value) + logger.info("Transformers version: %s", transformers.__version__) + logger.info("World size = %d: # nodes = %d.", world_size, world_size / 8) + + gbs = ( + world_size + * args.max_context_width + * args.train_batch_size + / args.tensor_parallel_degree + ) + logger.info("Global batch size in tokens: %10d (%5.2fM).", gbs, gbs / 1e6) + + set_seed(args.seed) + + if args.enable_memory_profiling > 0: + memory_status_cpu(msg="before model creation") + + if args.bf16: + dtype = torch.bfloat16 + else: + dtype = torch.get_default_dtype() + + if finetune_check(args): + from transformers import AutoConfig + + # Using config for finetune mode, else uses args to create model + model_config = AutoConfig.from_pretrained(args.hf_pretrained_model_name_or_dir) + else: + model_config = get_model_config(args) + + if pversion.parse(transformers.__version__) >= pversion.parse("4.34.0"): + model_config._flash_attn_2_enabled = True + + delayed_param_initer = None + with tsm_utils.timeit(True, "Model creation", global_rank): + if args.delayed_param: + if finetune_with_pretrained_weights_check(args) and dist.get_rank() == 0: + # create model with pretrained weights on one rank even if we want to use + # delayed param, param init on other ranks will still be delayed + model = create_model( + args, + model_config=model_config, + dtype=dtype, + pretrained_model_weights=args.hf_pretrained_model_name_or_dir + if finetune_with_pretrained_weights_check(args) + else None, + ) + num_params = compute_num_params(model) + else: + with init_empty_weights(): + model = create_model( + args, + model_config=model_config, + dtype=dtype, + ) + num_params = compute_num_params(model) + if finetune_check(args): + dist.barrier() + else: + model = create_model( + args, + model_config=model_config, + dtype=dtype, + pretrained_model_weights=args.hf_pretrained_model_name_or_dir + if finetune_with_pretrained_weights_check(args) and dist.get_rank() == 0 + else None, + ) + num_params = compute_num_params(model) + + if args.use_smp_implementation: + load_state_dict_from_rank0 = ( + True if finetune_with_pretrained_weights_check(args) else False + ) + model = transform(model, load_state_dict_from_rank0=load_state_dict_from_rank0) + + if args.delayed_param: + # param init fn for delayed param creation + if finetune_check(args): + if dist.get_rank() != 0: + delayed_param_initer = DelayedParamIniter(model) + else: + delayed_param_initer = DelayedParamIniter(model) + + assert set(x.dtype for x in model.parameters()) == set( + [torch.float32] + ), "Model parameters should be in fp32 for FSDP mixed precision" + + start = time.time() + + if global_rank == 0: + logger.info( + "Created model with total parameters: %d (%.2f B)", num_params, num_params * 1e-9 + ) + + transformer_layer = get_transformer_layer(args.model_type, args.use_smp_implementation) + + if args.auto_wrap_policy == "transformer_auto_wrap_policy": + gpt_auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + transformer_layer_cls={ + transformer_layer, + }, + ) + elif args.auto_wrap_policy == "size_based_auto_wrap_policy": + gpt_auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, + ) + + torch.cuda.set_device(device) + if args.bf16: + # buffer set to fp32 as some models in HF such as llama hard code buffers to fp32 + # to be similar with that we set this to fp32 + buffer_dtype = torch.float32 if args.use_smp_implementation else dtype + mixed_precision_policy = MixedPrecision( + param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=buffer_dtype + ) + else: + mixed_precision_policy = None + + if args.enable_memory_profiling > 0: + memory_status_cpu(msg="before fsdp wrapper") + + sharding_strategy = get_sharding_strategy(args.sharding_strategy) + + with ( + delayed_param_initer.validate_params_and_buffers_inited() + if (delayed_param_initer and not finetune_with_pretrained_weights_check(args)) + else nullcontext(), + tsm_utils.timeit(True, "FSDP constructor", global_rank), + ): + model = FSDP( # pylint: disable=unexpected-keyword-arg + model, + auto_wrap_policy=gpt_auto_wrap_policy, + mixed_precision=mixed_precision_policy, + sharding_strategy=sharding_strategy, + backward_prefetch=get_backward_fetch_policy(args.backward_fetch_policy), + forward_prefetch=args.forward_prefetch, + limit_all_gathers=args.limit_all_gathers, + device_id=torch.cuda.current_device(), + use_orig_params=args.use_orig_params > 0, + param_init_fn=delayed_param_initer.get_param_init_fn() + if delayed_param_initer + else None, + post_param_init_fn=delayed_param_initer.get_post_param_init_fn() + if delayed_param_initer + else None, + sync_module_states=True if finetune_with_pretrained_weights_check(args) else False, + ) + # Barrier is a workaround to reduce extra memory usage with SMDDP backend + # after the broadcast that happens when we use sync_module_states + # This can be removed once the SMDDP issue is fixed + dist.barrier() + memory_status(msg="After FSDP") + + if global_rank == 0: + logger.info("Wrapped model with FSDP") + + if args.enable_memory_profiling > 0: + memory_status(msg="after fsdp wrapper") + + if args.activation_checkpointing > 0: + apply_activation_checkpoint(args, model=model) + + if args.offload_activations > 0: + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import offload_wrapper + + model = offload_wrapper(model) + + # Patch RoPE for GPT NEoX where they are created on Host to move them to Device + if args.model_type == "gpt_neox" and args.patch_neox_rope > 0: + patch_neox_rope(model) + + param_groups = get_param_groups_by_weight_decay(model) + + optimizer = optim.AdamW( + param_groups, betas=(args.beta1, args.beta2), lr=args.lr, weight_decay=args.weight_decay + ) + + if global_rank == 0: + logger.info("Created optimizer") + + lr_scheduler = get_learning_rate_scheduler(optimizer, args) + + checkpointing_pg_metadata = ( + model.process_group, + get_coordinator_rank(model.process_group), + is_action_rank(global_rank), + ) + + if args.resume_from_checkpoint: + ( + model, + optimizer, + lr_scheduler, + epoch, + total_steps, + start_train_path_index, + resume_from_sequence_number, + ) = load_checkpoint( + args, + model, + optimizer, + lr_scheduler, + args.resume_from_checkpoint, + sharding_strategy, + checkpointing_pg_metadata, + tensor_parallel_degree=int(args.tensor_parallel_degree), + checkpoint_type=args.checkpoint_type, + ) + + else: + total_steps = 0 + epoch = 0 + start_train_path_index = 0 + resume_from_sequence_number = 0 + + start = time.time() + # total_steps, throughput, loss + total_steps, _ = train( + model, + optimizer, + lr_scheduler, + writers, + model_config, + epoch, + start_train_path_index, + resume_from_sequence_number, + num_params, + total_steps, + args, + global_rank, + world_size, + checkpointing_pg_metadata, + ) + time_to_train = time.time() - start + + dist.barrier() + + if args.save_final_model: + save_checkpoint( + model, + None, + None, + {}, + None, + args.model_dir if args.model_dir is not None else args.checkpoint_dir[0], + "" if args.model_dir is not None else "model", + 1, + None, + int(args.tensor_parallel_degree), + checkpoint_type=CheckpointingMethod.FULL, + ) + + if global_rank == 0: + logger.info( + "FSDP training finished successfully %fs (%fmin).", time_to_train, time_to_train / 60.0 + ) + dist.destroy_process_group() diff --git a/3.test_cases/11.modelparallel/train_utils.py b/3.test_cases/11.modelparallel/train_utils.py new file mode 100644 index 00000000..13992087 --- /dev/null +++ b/3.test_cases/11.modelparallel/train_utils.py @@ -0,0 +1,281 @@ +"""Train utils.""" + +import functools + +import numpy as np +import torch + +# pylint: disable=import-error,import-outside-toplevel,invalid-name,no-member,no-name-in-module,protected-access +import transformers +from fsdp_utils import get_transformer_layer +from learning_rates import AnnealingLR # pylint: disable=wrong-import-order +from logging_utils import get_logger +from packaging import version as pversion +from torch.nn import LayerNorm +from transformers import AutoModelForCausalLM +from transformers.models.llama.modeling_llama import LlamaRMSNorm + +_logger = get_logger() + + +def compute_num_params(model): + """Get num params.""" + num_params = 0 + seen = set() + for p in model.parameters(): # pylint: disable=invalid-name + if p not in seen: + seen.add(p) + if hasattr(p, "ds_shape"): + num_params += np.prod(p.ds_shape) + else: + num_params += np.prod(p.size()) + + return num_params + + +def compute_tflops(throughput, num_params, dp_size, seq_len): + """ + Compute TFLOPs by using the 6 factor which gives us model tflops. + This makes it easier to compare with frameworks such as megatron + which may not use activation checkpointing. + Using factor 8 gives us hardware tflops when using activation checkpointing. + + Based on the formula in + https://developer.nvidia.com/blog/scaling-language-model-training-to-a-trillion-parameters-using-megatron/ + """ + return 6 * throughput * num_params / dp_size * seq_len * 1e-12 + + +def get_learning_rate_scheduler(optimizer, args): + """Get learning rate scheduler.""" + use_checkpoint_lr_scheduler = args.resume_from_checkpoint is not None + + # Add linear learning rate scheduler. + if args.lr_decay_iters is not None: + num_iters = args.lr_decay_iters + else: + num_iters = args.max_steps + num_iters = max(1, num_iters) + init_step = 0 + warmup_iter = args.warmup * num_iters + plateau_iter = warmup_iter + args.plateau * num_iters + lr_scheduler = AnnealingLR( + optimizer, + start_lr=args.lr, + warmup_iter=warmup_iter, + plateau_iter=plateau_iter, + total_iters=num_iters, + decay_style=args.lr_decay_style, + last_iter=init_step, + min_lr=args.min_lr, + use_checkpoint_lr_scheduler=use_checkpoint_lr_scheduler, + override_lr_scheduler=False, + ) + + return lr_scheduler + + +def get_param_groups_by_weight_decay(module): + """Get param groups.""" + weight_decay_params = {"params": []} + no_weight_decay_params = {"params": [], "weight_decay": 0.0} + param_ids = set() + + for module_ in module.modules(): + # if isinstance(module_, FusedLayerNorm) or + if isinstance(module_, (LayerNorm, LlamaRMSNorm)): + for p in list( + module_._parameters.values() + ): # pylint: disable=invalid-name,protected-access + if p is not None and id(p) not in param_ids: + no_weight_decay_params["params"].append(p) + param_ids.add(id(p)) + else: + for n, p in list( + module_._parameters.items() + ): # pylint: disable=invalid-name,protected-access + if p is not None and n != "bias" and id(p) not in param_ids: + weight_decay_params["params"].append(p) + param_ids.add(id(p)) + for n, p in list( + module_._parameters.items() + ): # pylint: disable=invalid-name,protected-access + if p is not None and n == "bias" and id(p) not in param_ids: + no_weight_decay_params["params"].append(p) + param_ids.add(id(p)) + return weight_decay_params, no_weight_decay_params + + +def create_model(args, model_config, dtype, pretrained_model_weights=None): + """Create model.""" + if pretrained_model_weights: + _logger.info("Loading pretrained weights from %s.", pretrained_model_weights) + model = AutoModelForCausalLM.from_pretrained(pretrained_model_weights) + else: + model = AutoModelForCausalLM.from_config(model_config) + + if args.use_smp_flash_attn: + if args.model_type == "gpt_neox": + layout = "b h s d" + layers = model.gpt_neox.layers + attn_name = "attention" + elif args.model_type == "gpt2": + layout = "b h s d" + layers = model.transformer.h + attn_name = "attn" # Note: Only self attention is referenced + elif args.model_type == "llama_v2": + layout = "b s h d" + layers = model.model.layers + attn_name = "self_attn" + else: + raise ValueError(f"Unsupported model type {args.model_type}") + + def new_attn( + self, q, k, v, attention_mask=None, head_mask=None + ): # pylint: disable=too-many-arguments + del attention_mask + del head_mask + attn_weights = None + return ( + self.flashmod((q, k, v), causal=True, cast_dtype=dtype, layout=layout), + attn_weights, + ) + + if args.model_type == "llama_v2": + if pversion.parse(transformers.__version__) < pversion.parse("4.34.0"): + # pre 4.34 we use rubik's class + from torch.sagemaker.nn.huggingface.llama_flashattn import LlamaFlashAttention + + flash_attn_class = LlamaFlashAttention + else: + # 4.34 has flash attn already + from transformers.models.llama.modeling_llama import LlamaFlashAttention2 + + flash_attn_class = LlamaFlashAttention2 + # we still create it again here because for pretrained models + # flash attn wouldn't be enabled even for 4.34 + for layer in layers: + prev_layer = getattr(layer, attn_name) + setattr(layer, attn_name, flash_attn_class(model.config)) + attn_layer = getattr(layer, attn_name) + attn_layer.pretraining_tp = model.config.pretraining_tp + with torch.no_grad(): + attn_layer.q_proj.weight.copy_(prev_layer.q_proj.weight) + attn_layer.k_proj.weight.copy_(prev_layer.k_proj.weight) + attn_layer.v_proj.weight.copy_(prev_layer.v_proj.weight) + attn_layer.o_proj.weight.copy_(prev_layer.o_proj.weight) + else: + from torch.sagemaker.nn.attn import ( # pylint: disable=no-name-in-module + FlashSelfAttention, + ) + + for layer in layers: + getattr(layer, attn_name).flashmod = FlashSelfAttention(attention_dropout_prob=0.0) + getattr(layer, attn_name)._attn = functools.partial( + new_attn, getattr(layer, attn_name) + ) + + return model + + +def get_model_config(args): + """Get model config.""" + if "gpt_neox" in args.model_type: + from transformers import GPTNeoXConfig + + model_config = GPTNeoXConfig( + vocab_size=args.vocab_size, + hidden_size=args.hidden_width, + num_hidden_layers=args.num_layers, + num_attention_heads=args.num_heads, + hidden_act="gelu", + intermediate_size=4 * args.hidden_width, + rotary_pct=args.rotary_pct, + rotary_emb_base=args.rotary_emb_base, + max_position_embeddings=args.max_context_width, + layer_norm_eps=1e-05, + initializer_range=args.initializer_range, + use_cache=False, + tie_word_embeddings=False, + use_parallel_residual=True, + attention_dropout=0.0, + hidden_dropout=0.0, + ) + elif "gpt2" in args.model_type: + from transformers import GPT2Config + + model_config = GPT2Config( + vocab_size=args.vocab_size, + n_positions=args.max_context_width, + n_embd=args.hidden_width, + n_layer=args.num_layers, + n_head=args.num_heads, + n_inner=None, + activation_function="gelu_new", + resid_pdrop=args.resid_pdrop, + embd_pdrop=args.embd_pdrop, + attn_pdrop=args.attn_pdrop, + layer_norm_epsilon=1e-05, + initializer_range=args.initializer_range, + summary_type="cls_index", + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=args.summary_first_pdrop, + use_cache=False, + bos_token_id=50256, + eos_token_id=50256, + return_dict=True, + ) + elif "llama_v2" in args.model_type: + from transformers import LlamaConfig + + model_config = LlamaConfig( + vocab_size=args.vocab_size, + hidden_size=args.hidden_width, + intermediate_size=args.llama_intermediate_size, + num_hidden_layers=args.num_layers, + num_attention_heads=args.num_heads, + num_key_value_heads=args.num_key_value_heads, + hidden_act="silu", + max_position_embeddings=args.max_context_width, + initializer_range=args.initializer_range, + rms_norm_eps=1e-5, + use_cache=False, + pretraining_tp=1, + tie_word_embeddings=False, + rope_scaling=None, + ) + else: + raise NotImplementedError + return model_config + + +def apply_activation_checkpoint(args, model=None): + """Apply activation checkpoint.""" + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + CheckpointImpl, + apply_activation_checkpointing, + checkpoint_wrapper, + ) + + transformer_layer = get_transformer_layer(args.model_type, args.use_smp_implementation) + check_fn_gpt = lambda submodule: isinstance( # pylint: disable=unnecessary-lambda-assignment + submodule, transformer_layer + ) + # flash attn v2 does not work with no_reentrant + # our activation offloading for 2.0 also does not work with no_reentrant + entrant_wrapper = functools.partial( + checkpoint_wrapper, checkpoint_impl=CheckpointImpl.REENTRANT + ) + apply_activation_checkpointing( + model, checkpoint_wrapper_fn=entrant_wrapper, check_fn=check_fn_gpt + ) + + +def patch_neox_rope(model): + """Patch neox rope.""" + device = torch.cuda.current_device() + for layer in model.gpt_neox.layers: + layer.attention.rotary_emb.sin_cached = layer.attention.rotary_emb.sin_cached.to(device) + layer.attention.rotary_emb.cos_cached = layer.attention.rotary_emb.cos_cached.to(device) From df64561f0b61f8df08442b617ef76361d02461dc Mon Sep 17 00:00:00 2001 From: Andrew Tian Date: Wed, 13 Dec 2023 16:23:10 -0800 Subject: [PATCH 257/648] adding more scripts needed --- 3.test_cases/11.modelparallel/checkpoints.py | 514 ++++++++++++++++++ .../11.modelparallel/learning_rates.py | 143 +++++ .../11.modelparallel/memory_tracker.py | 113 ++++ 3 files changed, 770 insertions(+) create mode 100644 3.test_cases/11.modelparallel/checkpoints.py create mode 100644 3.test_cases/11.modelparallel/learning_rates.py create mode 100644 3.test_cases/11.modelparallel/memory_tracker.py diff --git a/3.test_cases/11.modelparallel/checkpoints.py b/3.test_cases/11.modelparallel/checkpoints.py new file mode 100644 index 00000000..296b8fc2 --- /dev/null +++ b/3.test_cases/11.modelparallel/checkpoints.py @@ -0,0 +1,514 @@ +"""Export distributed checkpoints.""" + +import os +import pickle +import statistics +import time +import warnings +from enum import Enum, auto +from typing import Any, Optional + +import numpy + +# pylint: disable=import-error,no-name-in-module +import torch +import torch.distributed as dist +import torch.sagemaker.checkpoint.utils as tsm_checkpoint +from data.utils import is_s3_source, parse_s3_address +from logging_utils import get_logger +from torch.distributed import checkpoint +from torch.distributed._shard.api import load_with_process_group +from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict +from torch.distributed.fsdp import FullyShardedDataParallel as FSDP +from torch.distributed.fsdp import StateDictType +from torch.distributed.fsdp.api import FullStateDictConfig, ShardedOptimStateDictConfig +from torch.sagemaker.distributed.fsdp import checkpoint as tsm_fsdp_checkpoint +from torch.sagemaker.utils.process_group_utils import get_global_ranks + +logger = get_logger() + + +# How to remove extra checkpoints, `regex` and `sort_fn` need to match for correctness. +# - Sort subdir by the **last** int, right before `steps` as shown in the regex. +_CHECKPOINT_DIR_REGEX = r"^.*\d+steps$" +_CHECKPOINT_SORT_FN = tsm_checkpoint.SORT_BY_LAST_INT +_DEFAULT_STATE_DICT_TYPE = StateDictType.SHARDED_STATE_DICT + +_EXPORT_KEYS = ( + "resume_from_sequence_number", + "start_train_path_index", + "total_steps", +) + +_MAX_ATTEMPTS = 3 + + +class CheckpointingMethod(Enum): + SHARDED = auto() + LOCAL = auto() + FULL = auto() + USE_PG_WITH_UTIL = auto() + + +def backward_compat_get_resume_from_sequence_number(args, state_dict): + if "resume_from_sequence_number" not in state_dict: + return state_dict["start_batch_index"] * args.train_batch_size + else: + return state_dict["resume_from_sequence_number"] + + +def compute_stats_of_metric(metric: float, key: str, group: Optional[Any] = None): + """Compute metric stats.""" + times = [None for _ in range(dist.get_world_size(group))] + dist.all_gather_object(times, metric, group=group) + + if dist.get_rank() == 0: + logger.info( + "Time taken (min, max, mean, stddev, median, len) = " + "(%7.2f, %7.2f, %7.2f, %7.2f, %7.2f, %02d): %s.", + numpy.min(times), + numpy.max(times), + statistics.mean(times), + statistics.stdev(times), + statistics.median(times), + len(times), + key, + ) + + +def is_action_rank(global_rank): + from torch.sagemaker import state + return state.ranker.get_rep_rank(global_rank) == 0 + + +def get_coordinator_rank(process_group): + model_pg_ranks = get_global_ranks(process_group) + return min(model_pg_ranks) + + +def _retry_write_to_disk(func, max_attempts=_MAX_ATTEMPTS): + for retry in range(max_attempts): + try: + func() + return + except (RuntimeError, pickle.UnpicklingError) as error: + if isinstance(error, pickle.UnpicklingError) or ("unexpected pos" in str(error)): + # TODO(sliuxl): Sometimes writes to fsx fail, not sure why yet, retry for now. + logger.error(error) + logger.error( + "Retry [%d/%d] failed to write to disk, in case it was due to transient error.", + retry, + max_attempts, + ) + if retry < max_attempts - 1: + continue + + raise error + + +def _save_with_util( # pylint: disable=too-many-arguments + model, + optimizer, + scheduler, + user_content, + sharding_strategy, + save_dir: str, + checkpointing_pg_metadata, +): + """Save FSDP checkpoint: With process groups.""" + # By default, it'll use process groups when exporting checkpoints. + tsm_fsdp_checkpoint.save_model_checkpoint( + model, + _DEFAULT_STATE_DICT_TYPE, + save_dir, + sharding_strategy, + checkpointing_pg_metadata, + log=dist.get_rank() == 0, + optimizer=optimizer, + scheduler=scheduler, + extra_exports=( + {key: user_content[key] for key in _EXPORT_KEYS} if user_content is not None else None + ), + ) + + +def _save_sharded( # pylint: disable=too-many-arguments + model, + optimizer, + scheduler, + user_content, + save_dir: str, + checkpointing_pg_metadata, +): + """Save FSDP checkpoint: Without process groups.""" + with FSDP.state_dict_type(model, _DEFAULT_STATE_DICT_TYPE): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + # pylint: disable=line-too-long + # torch/distributed/fsdp/_common_utils.py:291: UserWarning: + # An unexpected prefix is detected. This case should only happen when using DMP with FSDP. + # prefix = _checkpoint_wrapped_module.gpt_neox.layers.34., submodule_name = _fsdp_wrapped_module + # pylint: enable=line-too-long + # TODO(rubik) Not sure why this shows up + + optim_state_dict = FSDP.optim_state_dict(model, optimizer) + + state_dict = { + "model": model.state_dict(), + "optimizer": optim_state_dict, + "scheduler": scheduler.state_dict(), + } + # merge user content to state_dict + state_dict = state_dict | user_content + + if dist.get_rank() == 0: + logger.info("Processed state dict to save. Starting write to disk now.") + + process_group, coordinator_rank, action_rank = checkpointing_pg_metadata + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + # torch/distributed/checkpoint/filesystem.py:157: UserWarning: TypedStorage is deprecated. + + if action_rank: + checkpoint.save_state_dict( + state_dict=state_dict, + storage_writer=checkpoint.FileSystemWriter(save_dir), + planner=checkpoint.DefaultSavePlanner(), + process_group=process_group, + coordinator_rank=coordinator_rank, + ) + + +def _save_full( # pylint: disable=too-many-arguments + model, + save_dir: str, +): + """Save FSDP checkpoint: Without process groups.""" + if dist.get_rank() == 0: + logger.warning("Full checkpoint only saves the model") + + with FSDP.state_dict_type( + model, + StateDictType.FULL_STATE_DICT, + FullStateDictConfig(rank0_only=True, offload_to_cpu=True), + ): + state_dict = model.state_dict() + if dist.get_rank() == 0: + logger.info("Processed state dict to save. Starting write to disk now.") + os.makedirs(save_dir, exist_ok=True) + # this name is needed for HF from_pretrained API to work fine + torch.save(state_dict, os.path.join(save_dir, "pytorch_model.bin")) + model.config.save_pretrained(save_dir) + dist.barrier() + + +def _save_local( # pylint: disable=too-many-arguments + model, + optimizer, + scheduler, + user_content, + save_dir: str, +): + """Save FSDP checkpoint: Without process groups.""" + os.makedirs(save_dir, exist_ok=True) + with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT): + optim_state_dict = optimizer.state_dict() + + state_dict = { + "model": model.state_dict(), + "optimizer": optim_state_dict, + "scheduler": scheduler.state_dict(), + } + # merge user content to state_dict + state_dict = state_dict | user_content + + if dist.get_rank() == 0: + logger.info("Processed state dict to save. Starting write to disk now.") + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + # torch/distributed/checkpoint/filesystem.py:157: UserWarning: TypedStorage is deprecated. + def write_fn(): + torch.save(state_dict, os.path.join(save_dir, f"{dist.get_rank()}.pt")) + + _retry_write_to_disk(write_fn) + + +def save_checkpoint( # pylint: disable=too-many-arguments,too-many-locals + model, + optimizer, + scheduler, + user_content, + sharding_strategy, + root_dir: str, + subdir: str, + num_kept_checkpoints: int, + checkpointing_pg_metadata, + tensor_parallel_degree: int, + checkpoint_type=CheckpointingMethod.LOCAL, +): + """Export checkpoint.""" + from torch.sagemaker import state + # seeing a NCCL crash during broadcast in checkpointing sometimes + # seems like that happens when cached memory usage is at the limit + # so clearing cache + torch.cuda.empty_cache() + + if not root_dir: + return + + save_dir = os.path.join(root_dir, subdir) + if is_s3_source(root_dir): + save_dir = os.path.join(f"/tmp/checkpoint_{dist.get_rank()}", subdir) + + if dist.get_rank() == 0: + logger.info("Checkpointing to %s ...", save_dir) + + if isinstance(checkpoint_type, str): + checkpoint_type = CheckpointingMethod[checkpoint_type.upper()] + + ckpt_start = time.process_time() + if checkpoint_type == CheckpointingMethod.SHARDED: + if tensor_parallel_degree > 1: + save_dir = os.path.join(save_dir, f"tp{tensor_parallel_degree}-{state.tp_rank}") + _save_sharded(model, optimizer, scheduler, user_content, save_dir, checkpointing_pg_metadata) + elif checkpoint_type == CheckpointingMethod.LOCAL: + if tensor_parallel_degree > 1: + raise NotImplementedError("Local checkpointing unsupported with tensor parallelism") + _save_local(model, optimizer, scheduler, user_content, save_dir) + elif checkpoint_type == CheckpointingMethod.FULL: + _save_full(model, save_dir) + elif checkpoint_type == CheckpointingMethod.USE_PG_WITH_UTIL: + _save_with_util( + model, optimizer, scheduler, user_content, sharding_strategy, save_dir, checkpointing_pg_metadata + ) + ckpt_time = time.process_time() - ckpt_start + dist.barrier() + + process_group = None if checkpointing_pg_metadata is None else checkpointing_pg_metadata[0] + compute_stats_of_metric(ckpt_time, "saving checkpoint (s)", process_group) + + if dist.get_rank() == 0: + logger.info("Finished checkpointing to %s.", save_dir) + + if is_s3_source(root_dir): + s3_start = time.process_time() + + bucket, bucketdir = parse_s3_address(root_dir) + bucketdir = os.path.join(bucketdir, subdir) + import boto3 + + s3_client = boto3.client("s3") + for fname in os.listdir(save_dir): + fpath = os.path.join(save_dir, fname) + bucketobj = os.path.join(bucketdir, fname) + s3_client.upload_file(fpath, bucket, bucketobj) + + s3_time = time.process_time() - s3_start + logger.info("Rank %d: saved to %s in %f sec", dist.get_rank(), bucketdir, s3_time) + dist.barrier() + + # Only limit subdirs when writing intermediate checkpoints, not the final checkpoint. + if not subdir: + return + + # Limit checkpoints after writing the latest one. + tsm_checkpoint.limit_num_subdirs( + # Need to access the **full** path. + os.path.abspath(root_dir), + num_kept_checkpoints, + sort_fn=_CHECKPOINT_SORT_FN, + regex=_CHECKPOINT_DIR_REGEX, + # Both log messages and do the actual remove as needed for one single rank. + log=dist.get_rank() == 0, + ) + + +# pylint: disable=too-many-arguments,too-many-locals +def _load_with_util( + model, + optimizer, + scheduler, + checkpoint_dir, + sharding_strategy, + checkpointing_pg_metadata, +): + """Load FSDP checkpoint: With process groups.""" + # By default, it'll use process groups when exporting checkpoints. + return tsm_fsdp_checkpoint.load_model_checkpoint( + model, + _DEFAULT_STATE_DICT_TYPE, + checkpoint_dir, + sharding_strategy, + checkpointing_pg_metadata, + log=dist.get_rank() == 0, + optimizer=optimizer, + scheduler=scheduler, + extra_imports={key: 0 for key in _EXPORT_KEYS}, + ) + + +def _load_sharded(model, optimizer, scheduler, checkpoint_dir, checkpointing_pg_metadata): + process_group, coordinator_rank, _ = checkpointing_pg_metadata + with FSDP.state_dict_type( + model, + _DEFAULT_STATE_DICT_TYPE, + optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), + ): + state_dict = { + "model": model.state_dict(), + "scheduler": scheduler.state_dict(), + "epoch": 0, + "total_steps": 0, + "start_train_path_index": 0, + "resume_from_sequence_number": 0, + # cannot load the optimizer state_dict together with the model state_dict + } + + def _load_from_disk(): + # NOTE: `_{save, load}_sharded` need to be consistent using the `process_group`s. + checkpoint.load_state_dict( + state_dict=state_dict, + storage_reader=checkpoint.FileSystemReader(checkpoint_dir), + process_group=process_group, + coordinator_rank=coordinator_rank, + planner=checkpoint.DefaultLoadPlanner(), + ) + + try: + _load_from_disk() + except KeyError(): + # when loading old checkpoints which had start_batch_index instead of resume_from_sequence_number + # replace the key in dummy state_dict, and retry + del state_dict["resume_from_sequence_number"] + state_dict["start_batch_index"] = 0 + _load_from_disk() + + if dist.get_rank() == 0: + logger.info("Loaded model state from disk") + + model.load_state_dict(state_dict["model"]) + scheduler.load_state_dict(state_dict["scheduler"]) + optim_state = load_sharded_optimizer_state_dict( + model_state_dict=state_dict["model"], + optimizer_key="optimizer", + storage_reader=checkpoint.FileSystemReader(checkpoint_dir), + process_group=model.process_group, + ) + + if dist.get_rank() == 0: + logger.info("Loaded and sharded optimizer state from disk") + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + # UserWarning to replace all_gather_base with all_gather_into_tensor floods the logs + flattened_osd = FSDP.optim_state_dict_to_load( + optim_state["optimizer"], model, optimizer + ) + + if dist.get_rank() == 0: + logger.info("Converted optimizer state dict for FSDP") + + optimizer.load_state_dict(flattened_osd) + + return state_dict + + +def gather_and_log_param_buffer_norms(model): + with FSDP.state_dict_type( + model, + StateDictType.FULL_STATE_DICT, + state_dict_config=FullStateDictConfig(offload_to_cpu=True, rank0_only=True), + ): + sd = model.state_dict() + for k, v in sd.items(): + if dist.get_rank() == 0: + print(k, torch.linalg.norm(v), v.sum()) + for n, m in model.named_buffers(): + if dist.get_rank() == 0: + print(dist.get_rank(), n, torch.linalg.norm(m), m.sum()) + + +def _load_local(model, optimizer, scheduler, checkpoint_dir): + with load_with_process_group(model.process_group): + state_dict = torch.load(os.path.join(checkpoint_dir, f"{dist.get_rank()}.pt")) + + with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT): + if dist.get_rank() == 0: + logger.info("Loaded model state from disk") + + model.load_state_dict(state_dict["model"]) + scheduler.load_state_dict(state_dict["scheduler"]) + optimizer.load_state_dict(state_dict["optimizer"]) + + return state_dict + + +def load_checkpoint( + args, + model, + optimizer, + scheduler, + checkpoint_dir: str, + sharding_strategy, + checkpointing_pg_metadata, + tensor_parallel_degree: int, + checkpoint_type=CheckpointingMethod.LOCAL, +): + """Load checkpoint.""" + from torch.sagemaker import state + if dist.get_rank() == 0: + logger.info("Loading checkpoint from %s ...", checkpoint_dir) + + load_start = time.process_time() + if isinstance(checkpoint_type, str): + checkpoint_type = CheckpointingMethod[checkpoint_type.upper()] + + if checkpoint_type == CheckpointingMethod.USE_PG_WITH_UTIL: + loaded = _load_with_util( + model, optimizer, scheduler, checkpoint_dir, sharding_strategy, checkpointing_pg_metadata + ) + elif checkpoint_type == CheckpointingMethod.SHARDED: + if tensor_parallel_degree > 1: + checkpoint_dir = os.path.join(checkpoint_dir, f"tp{tensor_parallel_degree}-{state.tp_rank}") + loaded = _load_sharded(model, optimizer, scheduler, checkpoint_dir, checkpointing_pg_metadata) + elif checkpoint_type == CheckpointingMethod.LOCAL: + if tensor_parallel_degree > 1: + raise NotImplementedError("Local checkpointing unsupported with tensor parallelism") + loaded = _load_local(model, optimizer, scheduler, checkpoint_dir) + else: + raise NotImplementedError + + load_time = time.process_time() - load_start + dist.barrier() + compute_stats_of_metric(load_time, "loading checkpoint (s)") + + if dist.get_rank() == 0: + logger.info("Checkpoint loaded from %s.", checkpoint_dir) + + if checkpoint_type == CheckpointingMethod.USE_PG_WITH_UTIL: + model = loaded[tsm_fsdp_checkpoint.EXPORT_KEY_MODEL] + optimizer = loaded[tsm_fsdp_checkpoint.EXPORT_KEY_OPTIMIZER] + scheduler = loaded[tsm_fsdp_checkpoint.EXPORT_KEY_SCHEDULER] + state_dict = loaded[tsm_fsdp_checkpoint.EXPORT_KEY_IDENTITY] + else: + state_dict = loaded + + resume_from_sequence_number = backward_compat_get_resume_from_sequence_number(args, state_dict) + if dist.get_rank() == 0: + logger.info( + "Loaded state from disk: epoch %d, start_train_path_index %d, resume_from_sequence_number %d.", + state_dict["epoch"], + state_dict["start_train_path_index"], + resume_from_sequence_number, + ) + + return ( + model, + optimizer, + scheduler, + state_dict["epoch"], + state_dict["total_steps"], + state_dict["start_train_path_index"], + resume_from_sequence_number, + ) diff --git a/3.test_cases/11.modelparallel/learning_rates.py b/3.test_cases/11.modelparallel/learning_rates.py new file mode 100644 index 00000000..ccdf91dc --- /dev/null +++ b/3.test_cases/11.modelparallel/learning_rates.py @@ -0,0 +1,143 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Learning rate decay functions.""" + +import math + +import torch.distributed as dist +from logging_utils import get_logger + +logger = get_logger() + +# pylint: disable=invalid-name +class AnnealingLR: # pylint: disable=too-many-instance-attributes + """Anneals the learning rate.""" + + def __init__( # pylint: disable=too-many-arguments + self, + optimizer, + start_lr, + warmup_iter, + plateau_iter, + total_iters, + decay_style, + last_iter, + min_lr=0.0, + use_checkpoint_lr_scheduler=True, + override_lr_scheduler=False, + ): + + # Class values. + self.optimizer = optimizer + self.start_lr = start_lr + self.min_lr = min_lr + self.warmup_iter = warmup_iter + self.plateau_iter = plateau_iter + self.num_iters = last_iter + self.end_iter = total_iters + assert self.end_iter > 0 + self.decay_style = decay_style + self.override_lr_scheduler = override_lr_scheduler + self.use_checkpoint_lr_scheduler = use_checkpoint_lr_scheduler + if self.override_lr_scheduler: + assert not self.use_checkpoint_lr_scheduler, ( + "both override and " "use-checkpoint are set." + ) + # Set the learning rate + self.step(self.num_iters) + self.rank = dist.get_rank() + + def get_lr(self): + """Learning rate decay functions from: + https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" + + num_iters_ = min(self.num_iters, self.end_iter - self.warmup_iter) + # Warmup. + if self.warmup_iter > 0 and self.num_iters <= self.warmup_iter: + return float(self.start_lr) * num_iters_ / self.warmup_iter + + num_iters_ = num_iters_ - self.warmup_iter + if self.decay_style == "linear": + lr = self.start_lr * (self.end_iter - num_iters_) / self.end_iter + elif self.decay_style == "plateau": + if self.num_iters <= self.plateau_iter: + lr = self.start_lr + else: + lr = ( + self.start_lr + * (self.end_iter - self.num_iters) + / (self.end_iter - self.plateau_iter) + ) + elif self.decay_style == "cosine": + lr = self.start_lr / 2.0 * (math.cos(math.pi * num_iters_ / self.end_iter) + 1) + elif self.decay_style == "exponential": + # exp(-0.693) = 1/2 + lr = self.start_lr * math.exp(-0.693 * num_iters_ / self.end_iter) + else: + lr = self.start_lr + return max(lr, self.min_lr) + + def step(self, step_num=None): + """Set lr for all parameters groups.""" + if step_num is None: + step_num = self.num_iters + 1 + self.num_iters = step_num + new_lr = self.get_lr() + for group in self.optimizer.param_groups: + group["lr"] = new_lr + + def state_dict(self): + """State dict.""" + state_dict = { + "start_lr": self.start_lr, + "warmup_iter": self.warmup_iter, + "num_iters": self.num_iters, + "decay_style": self.decay_style, + "end_iter": self.end_iter, + "min_lr": self.min_lr, + } + return state_dict + + def _check_and_set(self, cls_value, sd_value, name): + """Auxiliary function for checking the values in the checkpoint and + setting them.""" + if self.override_lr_scheduler: + if self.rank == 0: + logger.info(f"Overriding {name} value to {cls_value}") + return cls_value + + if not self.use_checkpoint_lr_scheduler: + assert ( + cls_value == sd_value + ), f"AnnealingLR: class input value and checkpoint values for {name} do not match" + if self.rank == 0: + logger.info(f" > using checkpoint value {sd_value} for {name}") + return sd_value + + def load_state_dict(self, sd): + """Load state dict.""" + self.start_lr = self._check_and_set(self.start_lr, sd["start_lr"], "learning rate") + self.min_lr = self._check_and_set(self.min_lr, sd["min_lr"], "minimum learning rate") + self.warmup_iter = self._check_and_set( + self.warmup_iter, sd["warmup_iter"], "warmup iterations" + ) + self.end_iter = self._check_and_set( + self.end_iter, sd["end_iter"], "total number of iterations" + ) + self.decay_style = self._check_and_set(self.decay_style, sd["decay_style"], "decay style") + + self.num_iters = sd["num_iters"] + self.step(self.num_iters) diff --git a/3.test_cases/11.modelparallel/memory_tracker.py b/3.test_cases/11.modelparallel/memory_tracker.py new file mode 100644 index 00000000..aac5ffe3 --- /dev/null +++ b/3.test_cases/11.modelparallel/memory_tracker.py @@ -0,0 +1,113 @@ +"""Memory tracker.""" + +import os + +import psutil +import torch +import torch.distributed as dist + +try: + from py3nvml import py3nvml +except ImportError: + py3nvml = None + +# pylint: disable=global-statement,no-member +dtype_to_bit = { + torch.float32: 32, + torch.float64: 64, + torch.float16: 16, + torch.bfloat16: 16, + torch.uint8: 8, + torch.int8: 8, + torch.int16: 16, + torch.int32: 32, + torch.int64: 64, + torch.bool: 1, +} + +process = psutil.Process(os.getpid()) +base_mem_usage = process.memory_info().data +last_mem_usage = base_mem_usage + +_GB = 1024**3 +_FORMAT = "7.4f" + + +def memory_status(msg="", reset_max=True, sync=True): + """Memory status gpu.""" + rank = dist.get_rank() + local_rank = rank % torch.cuda.device_count() + + if rank > 0: + return + + if sync: + torch.cuda.synchronize() + + if py3nvml is not None: + py3nvml.nvmlInit() + handle = py3nvml.nvmlDeviceGetHandleByIndex(local_rank) + info = py3nvml.nvmlDeviceGetMemoryInfo(handle) + total_used = info.used / _GB + total_used_str = f"Totally used GPU memory: {total_used} GB." + else: + total_used_str = "" + + # Convert to GB for printing. + alloced = torch.cuda.memory_allocated(device=local_rank) / _GB + max_alloced = torch.cuda.max_memory_allocated(device=local_rank) / _GB + cached = torch.cuda.memory_reserved(device=local_rank) / _GB + max_cached = torch.cuda.max_memory_reserved(device=local_rank) / _GB + + print( + f"[MEMORY] (torch, rank, device) = ({torch.__version__}, {rank}, {local_rank}), " + f"(alloc, max_alloc, cache, max_cache) = ({alloced:{_FORMAT}}, {max_alloced:{_FORMAT}}, " + f"{cached:{_FORMAT}}, {max_cached:{_FORMAT}}) GB. " + f"{total_used_str} [{msg:10s}]", + ) + + if reset_max: + torch.cuda.reset_peak_memory_stats() + + if py3nvml is not None: + py3nvml.nvmlShutdown() + + +def memory_status_cpu(msg=""): + """Memory status cpu.""" + rank = dist.get_rank() + local_rank = rank % torch.cuda.device_count() + + if rank > 0: + return + + import gc # pylint: disable=import-outside-toplevel + + global last_mem_usage + global base_mem_usage # pylint: disable=global-variable-not-assigned + + gc.collect() + gc.collect() + gc.collect() + objects = gc.get_objects() + tensors = [obj for obj in objects if isinstance(obj, torch.Tensor) and not obj.is_cuda] + torch_usage = 0 + for t in tensors: # pylint: disable=invalid-name + torch_usage += t.numel() * dtype_to_bit[t.dtype] + # total_usage = psutil.virtual_memory()[3] # This will get the total usage for all processes + current_usage = process.memory_info().data + total_usage = current_usage - base_mem_usage + usage_change = current_usage - last_mem_usage + last_mem_usage = current_usage + + torch_usage /= _GB + total_usage /= _GB + usage_change /= _GB + base_usage = base_mem_usage / _GB + + print( + f"[CPU MEMORY] (torch, rank, device) = ({torch.__version__}, {rank}, {local_rank}), " + f"(torch tensor, mem, change since last measurement, base) = ({torch_usage:{_FORMAT}}, " + f"{total_usage:{_FORMAT}}, {usage_change:{_FORMAT}}, {base_usage:{_FORMAT}}): " + f"{msg}" + ) From 4298f639cdf63a20ecb21b7fa272c3f267bcb6c9 Mon Sep 17 00:00:00 2001 From: adtian2 <55163384+adtian2@users.noreply.github.com> Date: Wed, 13 Dec 2023 16:04:25 -0800 Subject: [PATCH 258/648] Update README.md --- 3.test_cases/11.modelparallel/README.md | 114 +++++++++++++--- 3.test_cases/11.modelparallel/checkpoints.py | 37 +++-- 3.test_cases/11.modelparallel/conda_launch.sh | 6 +- 3.test_cases/11.modelparallel/data_utils.py | 10 -- 3.test_cases/11.modelparallel/hosts/README.md | 1 + .../11.modelparallel/logging_utils.py | 129 ++++++++++++++++++ .../11.modelparallel/scripts/model.sh | 2 +- 3.test_cases/11.modelparallel/train.py | 21 --- .../11.modelparallel/train_external.py | 2 +- 3.test_cases/11.modelparallel/train_lib.py | 2 +- 10 files changed, 259 insertions(+), 65 deletions(-) delete mode 100644 3.test_cases/11.modelparallel/data_utils.py create mode 100644 3.test_cases/11.modelparallel/hosts/README.md create mode 100644 3.test_cases/11.modelparallel/logging_utils.py delete mode 100644 3.test_cases/11.modelparallel/train.py diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md index e293db8a..8dc19d4a 100644 --- a/3.test_cases/11.modelparallel/README.md +++ b/3.test_cases/11.modelparallel/README.md @@ -1,5 +1,5 @@ -## SMP v2 Examples -In this directory we have example scripts for training with SMP Pytorch. We assume you have already setup a conda environment with SMP Pytorch. Below we first describe the files in this directory, and then go over how to run some jobs. +## Using Model Parallelism with Simple Llama 2 Training Job +In this directory, we have example scripts for training with SMP Pytorch. We assume you have already setup a Hyperpod instance. Below we first describe the files in this directory, and then go over how to run some jobs. ### Files - `train_lib.py` : Main training script @@ -7,37 +7,106 @@ In this directory we have example scripts for training with SMP Pytorch. We assu - `scripts/model.sh` : Main script which passes the config and launches `train.py`. This is used by `conda_launch.sh` and scripts in convergence_jobs folder. If you want to define your own model configuration you might want to modify this. - `arguments.py` : Parses arguments for the job. Please refer to this file for all the options the script supports. - `checkpoints.py` : Handles saving and loading of checkpoints -- `data_pipeline.py`: Creates dataloaders for the job. Modify this file to load your own dataset. -- `delayed_param.py` : Delayed parameter initialization to init large models without OOM -- `learning_rates.py`, `train_utils.py`, `fsdp_utils.py`, `utils.py`, `memory_tracker.py` have utilities used by the main script. +- `data/pipelines/data_pipeline.py`: Creates dataloaders for the job. Modify this file to load your own dataset. +- `data/utils.py`, `fsdp_utils.py`, `learning_rates.py`, `logging_utils.py`, `memory_tracker.py`, `train_utils.py` have utilities used by the main script. #### Launch scripts -- `conda_launch.sh` : This is a slurm script which launches a job using the activated conda environment. It expects to be run on the master node of the Slurm cluster. See below section for instructions. By default it runs with synthetic data to make it easy to test the scripts. -- `convergence_jobs/neox_7b/neox_7b_4Mtokens.sh` : This is an example for launching a convergence job with slurm, an extension of `conda_launch.sh` +- `conda_launch.sh` : This is a slurm script which launches a job using the activated conda environment. It expects to be run on the head node of the Slurm cluster. See below section for instructions. By default it runs with synthetic data to make it easy to test the scripts. ## Note on paths These scripts need to be put on a directory that can be accessed on all nodes, such as FSX. We also recommend setting all paths (for input data and checkpoints) as shared directories using FSX. -These paths can be set in scripts as shown in `convergence_jobs/neox_7b/neox_7b_4Mtokens.sh`. + +### Conda Environment Setup +All commands should be run on a compute node. Also the cuda version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda and installing it in `/fsx` so that it can be sourced on any node. Instructions here: https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html + +``` +SMP_CUDA_VER=11.8 or 12.1 + +source /fsx//miniconda3/bin/activate + +export ENV_PATH=/fsx//miniconda3/envs/ +conda create -p ${ENV_PATH} python=3.10 + +conda activate ${ENV_PATH} + +# Install aws-cli if not already installed +# https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#cliv2-linux-install + +aws s3 sync s3://sagemaker-distributed-model-parallel/smp-2.0.0-pt-2.0.1/2023-12-11/smp-v2/ /tmp/local_smp_install_channel/ + +conda install pytorch="2.0.1=sm_py3.10_cuda${SMP_CUDA_VER}*" packaging --override-channels \ + -c file:///tmp/local_smp_install_channel/ \ + -c pytorch -c numba/label/dev \ + -c pytorch-nightly -c nvidia -c conda-forge + +# Install dependencies of the script as below +python -m pip install packaging transformers==4.31.0 accelerate ninja tensorboard h5py datasets \ + && python -m pip install expecttest hypothesis \ + && python -m pip install "flash-attn>=2.0.4" --no-build-isolation + +# Install SMDDP wheel (only run for cuda11.8) +SMDDP_WHL="smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl" \ + && wget -q https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.1/cu118/2023-12-07/${SMDDP_WHL} \ + && pip install --force ${SMDDP_WHL} \ + && rm ${SMDDP_WHL} +``` +### cuDNN Installation for cuda11.8 and cuda12.1 +We recommend that you install cuDNN for your desired cuda version using from the NVIDIA Developer page: https://developer.nvidia.com/cudnn. Once you visit the link you will need to: +1. Make a developer account. +2. Click on "Download cuDNN Library". +3. Agree to the terms. +4. Download the Local Installer for Linux x86_64 (Tar) for cuda11 or cuda12 (we recommend version 8.9.5 and will use that version in the example going forward). + +Once you have the tar file downloaded you can run the following commands to finish the installation: +``` +# cuDNN installation for TransformerEngine installation for cuda11.8 +tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive/ + +# cuDNN installation for TransformerEngine installation for cuda12.1 +tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ +``` +### TransformerEngine installation +``` +# TransformerEngine installation +export CUDA_HOME=/usr/local/cuda-$SMP_CUDA_VER +export CUDNN_PATH=/usr/local/cuda-$SMP_CUDA_VER/lib +export CUDNN_LIBRARY=/usr/local/cuda-$SMP_CUDA_VER/lib +export CUDNN_INCLUDE_DIR=/usr/local/cuda-$SMP_CUDA_VER/include +export PATH=/usr/local/cuda-$SMP_CUDA_VER/bin:$PATH +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-$SMP_CUDA_VER/lib + +pip install --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v1.0 +``` ## User Guide +1. **Launching a job with synthetic data on 16 nodes** -1. Launching a job with synthetic data on 16 nodes. The default config in the script launches a 7B GPT NeoX model with synthetic data. +The default config in the script launches a 7B GPT NeoX model with synthetic data. ``` +source /fsx/PATH/TO/CONDA/bin/activate conda activate /PATH/TO/CONDA/ENV sbatch -N 16 conda_launch.sh - -# or - -sbatch -N 16 conda_launch.sh /PATH/TO/CONDA/ENV ``` -2. Changing arguments taken by the script. +2. **Changing arguments taken by the script** + `model.sh` takes certain arguments from the launch script, and uses them to pass args to the training script. You can refer to `model.sh` if those are the arguments you would like to change. For example, it takes the model size and sets the appropriate hidden_width,num_layers etc for the training script. If model.sh doesn't take the argument but is taken by train_lib (arguments.py), you can still pass it to model.sh and the script will forward the arg. This is how the above script passes `--use_synthetic_data 1`. -3. To run with your own data +3. **To run with your own data** + With the current dataloader in the script data needs to be prepared as json or json.gz (needs the arg `--zipped_data 1`) files, where each file has a json line with input_ids and attention_mask in them. Please refer to data_pipeline.py for more. You can always replace with your own dataloader. ``` # 2a. modify the conda_launch.sh script with path to data @@ -45,7 +114,8 @@ With the current dataloader in the script data needs to be prepared as json or j sbatch -N 16 conda_launch.sh /PATH/TO/CONDA/ENV ``` -4. Running a convergence job or experiment +4. **Running a convergence job or experiment** + We have put together an example of a convergence script using the above referenced `scripts/model.sh` script. The script sets the model type, size, checkpointing directory, tensorboard directory for metrics, and other hyperparameters. This is a slurm script, used with sbatch similar to above. ``` @@ -56,15 +126,17 @@ or sbatch -N 64 --job-name neox_7b_4M_trial1 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh ``` -5. Resuming convergence job from a checkpoint -Modify the --resume_from_checkpoint arg with the path of the checkpoint. Then the job is started same as before. +5. **Resuming convergence job from a checkpoint** + +Modify the `--resume_from_checkpoint` arg with the path of the checkpoint. Then the job is started same as before. ``` sbatch -N 64 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh ``` -6. Running a finetuning job or experiment -In order to run a finetune experiment `--finetune 1` needs to be set. Either pretrained model name `--pretrained_model_name ` arg or a checkpoint file name `--pretrained_checkpoint_file` arg needs to be provided. +6. **Running a finetuning job or experiment** + +In order to run a finetune experiment `--finetune 1` needs to be set. Either pretrained model name `--pretrained_model_name` arg or a checkpoint file name `--pretrained_checkpoint_file` arg needs to be provided. -If `--pretrained_model_name ` is provided pretrained model config will be used for finetuning. If `--pretrained_model_name` is provided `--finetune_checkpoint_load_dir` also needs to be provided. +If `--pretrained_model_name` is provided pretrained model config will be used for finetuning. If `--pretrained_model_name` is provided `--finetune_checkpoint_load_dir` also needs to be provided. If `--finetune 1` is set together with `--resume_from_checkpoint`, training will resume from the provided checkpoint. diff --git a/3.test_cases/11.modelparallel/checkpoints.py b/3.test_cases/11.modelparallel/checkpoints.py index 296b8fc2..9de56b6f 100644 --- a/3.test_cases/11.modelparallel/checkpoints.py +++ b/3.test_cases/11.modelparallel/checkpoints.py @@ -6,7 +6,7 @@ import time import warnings from enum import Enum, auto -from typing import Any, Optional +from typing import Any, Dict, Optional import numpy @@ -78,6 +78,7 @@ def compute_stats_of_metric(metric: float, key: str, group: Optional[Any] = None def is_action_rank(global_rank): from torch.sagemaker import state + return state.ranker.get_rep_rank(global_rank) == 0 @@ -182,6 +183,7 @@ def _save_sharded( # pylint: disable=too-many-arguments def _save_full( # pylint: disable=too-many-arguments model, save_dir: str, + user_content: Dict, ): """Save FSDP checkpoint: Without process groups.""" if dist.get_rank() == 0: @@ -198,7 +200,7 @@ def _save_full( # pylint: disable=too-many-arguments os.makedirs(save_dir, exist_ok=True) # this name is needed for HF from_pretrained API to work fine torch.save(state_dict, os.path.join(save_dir, "pytorch_model.bin")) - model.config.save_pretrained(save_dir) + user_content["model_config"].save_pretrained(save_dir) dist.barrier() @@ -249,6 +251,7 @@ def save_checkpoint( # pylint: disable=too-many-arguments,too-many-locals ): """Export checkpoint.""" from torch.sagemaker import state + # seeing a NCCL crash during broadcast in checkpointing sometimes # seems like that happens when cached memory usage is at the limit # so clearing cache @@ -271,16 +274,24 @@ def save_checkpoint( # pylint: disable=too-many-arguments,too-many-locals if checkpoint_type == CheckpointingMethod.SHARDED: if tensor_parallel_degree > 1: save_dir = os.path.join(save_dir, f"tp{tensor_parallel_degree}-{state.tp_rank}") - _save_sharded(model, optimizer, scheduler, user_content, save_dir, checkpointing_pg_metadata) + _save_sharded( + model, optimizer, scheduler, user_content, save_dir, checkpointing_pg_metadata + ) elif checkpoint_type == CheckpointingMethod.LOCAL: if tensor_parallel_degree > 1: raise NotImplementedError("Local checkpointing unsupported with tensor parallelism") _save_local(model, optimizer, scheduler, user_content, save_dir) elif checkpoint_type == CheckpointingMethod.FULL: - _save_full(model, save_dir) + _save_full(model, save_dir, user_content) elif checkpoint_type == CheckpointingMethod.USE_PG_WITH_UTIL: _save_with_util( - model, optimizer, scheduler, user_content, sharding_strategy, save_dir, checkpointing_pg_metadata + model, + optimizer, + scheduler, + user_content, + sharding_strategy, + save_dir, + checkpointing_pg_metadata, ) ckpt_time = time.process_time() - ckpt_start dist.barrier() @@ -457,6 +468,7 @@ def load_checkpoint( ): """Load checkpoint.""" from torch.sagemaker import state + if dist.get_rank() == 0: logger.info("Loading checkpoint from %s ...", checkpoint_dir) @@ -466,12 +478,21 @@ def load_checkpoint( if checkpoint_type == CheckpointingMethod.USE_PG_WITH_UTIL: loaded = _load_with_util( - model, optimizer, scheduler, checkpoint_dir, sharding_strategy, checkpointing_pg_metadata + model, + optimizer, + scheduler, + checkpoint_dir, + sharding_strategy, + checkpointing_pg_metadata, ) elif checkpoint_type == CheckpointingMethod.SHARDED: if tensor_parallel_degree > 1: - checkpoint_dir = os.path.join(checkpoint_dir, f"tp{tensor_parallel_degree}-{state.tp_rank}") - loaded = _load_sharded(model, optimizer, scheduler, checkpoint_dir, checkpointing_pg_metadata) + checkpoint_dir = os.path.join( + checkpoint_dir, f"tp{tensor_parallel_degree}-{state.tp_rank}" + ) + loaded = _load_sharded( + model, optimizer, scheduler, checkpoint_dir, checkpointing_pg_metadata + ) elif checkpoint_type == CheckpointingMethod.LOCAL: if tensor_parallel_degree > 1: raise NotImplementedError("Local checkpointing unsupported with tensor parallelism") diff --git a/3.test_cases/11.modelparallel/conda_launch.sh b/3.test_cases/11.modelparallel/conda_launch.sh index 000e75f9..1f3b105d 100644 --- a/3.test_cases/11.modelparallel/conda_launch.sh +++ b/3.test_cases/11.modelparallel/conda_launch.sh @@ -29,6 +29,8 @@ SCRIPT_ARGS+="--model_type gpt_neox --model_size 7b " # SCRIPT_ARGS+="--max_steps 10 " # SCRIPT_ARGS+="--train_batch_size 1 " -HOSTFILE=hosts_${SLURM_JOB_ID} +echo $CONDA_ENV_PATH + +HOSTFILE=hosts/hosts_${SLURM_JOB_ID} scontrol show hostnames | sort > $HOSTFILE -srun -l -D `pwd` conda run -p $CONDA_ENV_PATH --no-capture-output $SHELL_SCRIPT --hostfile $HOSTFILE $SCRIPT_ARGS +srun -l -D `pwd` $SHELL_SCRIPT --hostfile $HOSTFILE $SCRIPT_ARGS diff --git a/3.test_cases/11.modelparallel/data_utils.py b/3.test_cases/11.modelparallel/data_utils.py deleted file mode 100644 index 79f41a2d..00000000 --- a/3.test_cases/11.modelparallel/data_utils.py +++ /dev/null @@ -1,10 +0,0 @@ -_S3_PREFIX = "s3://" - - -def is_s3_source(src): - return src.startswith(_S3_PREFIX) - - -def parse_s3_address(address): - address = address[len(_S3_PREFIX) :] - return address.split("/", 1) diff --git a/3.test_cases/11.modelparallel/hosts/README.md b/3.test_cases/11.modelparallel/hosts/README.md new file mode 100644 index 00000000..f403ddb1 --- /dev/null +++ b/3.test_cases/11.modelparallel/hosts/README.md @@ -0,0 +1 @@ +## Placeholder for hostfiles diff --git a/3.test_cases/11.modelparallel/logging_utils.py b/3.test_cases/11.modelparallel/logging_utils.py new file mode 100644 index 00000000..f61a9532 --- /dev/null +++ b/3.test_cases/11.modelparallel/logging_utils.py @@ -0,0 +1,129 @@ +import logging +import os +from typing import Dict, Optional + +import numpy as np +import torch.distributed as dist + +_logger = None + + +def create_args_table(args: Dict) -> str: + table_str = "" + table_header = "|" + "#" + "|" + "Arguments" + "|" + "Value" + "|" + "\n" + separator = "|-----" * 3 + '|' + "\n" + table_str += table_header + separator + for idx, (key, col) in enumerate(sorted(args.items())): + table_row = f"| {idx} | {key} | {col} |\n" + table_str += table_row + return table_str + + +def get_logger(): + global _logger + if _logger is None: + logging.getLogger("torch.distributed.checkpoint._dedup_tensors").setLevel(logging.ERROR) + logging.getLogger("torch.distributed.distributed_c10d").setLevel(logging.ERROR) + _logger = logging.getLogger(__name__) + _logger.setLevel(logging.INFO) + _logger.handlers = [] + ch = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s %(levelname).1s " "[%(filename)s:%(lineno)d] %(message)s", + "%Y-%m-%d %H:%M:%S", + ) + ch.setFormatter(formatter) + _logger.addHandler(ch) + _logger.propagate = False + return _logger + + +def show_env_vars(rank: Optional[int] = 0): + """Show env vars.""" + my_rank = dist.get_rank() + + env_var = os.environ + if rank is None or my_rank == rank: + _logger.info("Env variables (len = %d):", len(env_var)) + + count = 0 + for key, value in sorted(env_var.items()): + _logger.info( + "[%03d] env [%03d/%03d] %-20s: `%s`", my_rank, count, len(env_var), key, value + ) + count += 1 + + keys = ( + "HOSTNAME", + "SLURM_PROCID", + ) + values = tuple(str(env_var.get(key)) for key in keys) + if my_rank % 8 == 0: # Print from each node exactly once. + _logger.info("[%03d] env from all nodes `%s`: `%s`", my_rank, keys, values) + + +def write_metrics_train_step( + writers, display_step, loss_scalar, throughput, tflops_per_gpu, current_lr, grad_norm +): + for writer in writers: + writer.add_scalar("Loss/train", loss_scalar, display_step) + writer.add_scalar("Perf/SeqPerSec", throughput, display_step) + writer.add_scalar("Perf/ModelTFLOPs", tflops_per_gpu, display_step) + writer.add_scalar("LR/learning_rate", current_lr, display_step) + writer.add_scalar("Norms/grad_norm", grad_norm, display_step) + + +def log_train_metrics( + args, + total_steps, + display_step, + loss_scalar, + throughput, + tflops_per_gpu, + current_lr, + grad_norm, + throughputs, + num_params, + dp_size, + batch_seqlen, +): + _logger.info( + "Batch %d Loss: %s, Speed: %.2f samples/sec, Model TFLOPS/GPU: %.2f, lr: %.6f, gradnorm: %.4f", # pylint: disable=line-too-long + display_step, + loss_scalar, + throughput, + tflops_per_gpu, + current_lr, + grad_norm, + ) + + # Compute average throughput and tflops after 30 steps to remove + # high variance in initial steps + if len(throughputs) > 30 and not total_steps % args.logging_freq_for_avg: + avg_throughput = np.average(throughputs[30:]) + from train_utils import compute_tflops + + avg_tflops = compute_tflops(avg_throughput, num_params, dp_size, batch_seqlen) + _logger.info( + "Batch %d Running Avg Speed: %.2f samples/sec, Running Avg Model TFLOPS/GPU: %.2f", # pylint: disable=line-too-long + display_step, + avg_throughput, + avg_tflops, + ) + + +def log_and_write_eval_metrics(writers, display_step, val_loss, val_ppl): + for writer in writers: + writer.add_scalar("Loss/val", val_loss, display_step) + writer.add_scalar("Loss/perplexity", val_ppl, display_step) + + _logger.info( + "Batch %d Validation loss: %s", + display_step, + val_loss, + ) + _logger.info( + "Batch %d Validation perplexity: %s", + display_step, + val_ppl, + ) diff --git a/3.test_cases/11.modelparallel/scripts/model.sh b/3.test_cases/11.modelparallel/scripts/model.sh index bdd8117f..83b4a031 100755 --- a/3.test_cases/11.modelparallel/scripts/model.sh +++ b/3.test_cases/11.modelparallel/scripts/model.sh @@ -125,7 +125,7 @@ fi $TORCH_CMD \ --rdzv_endpoint=$MASTER_ADDR:29400 --rdzv_id=100 --rdzv_backend=c10d \ - train.py \ + train_external.py \ --train_batch_size 2 \ --max_steps 100 \ --checkpoint_freq 200 \ diff --git a/3.test_cases/11.modelparallel/train.py b/3.test_cases/11.modelparallel/train.py deleted file mode 100644 index 043afb6b..00000000 --- a/3.test_cases/11.modelparallel/train.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Internal Train.py. - -Duplicate of train_external.py with the dummy SageMaker environment enabled. -""" -# Set dummy SageMaker env var if not set to pass guardrail -# for Rubik and Herring cluster scripts. -from sm_env_utils import enable_dummy_sm_env -enable_dummy_sm_env() # needs to be called before torch sagemaker is imported -import train_lib -from arguments import parse_args - - -def main(): - """Main function to train GPT.""" - args, _ = parse_args() - train_lib.main(args) - - -if __name__ == "__main__": - main() diff --git a/3.test_cases/11.modelparallel/train_external.py b/3.test_cases/11.modelparallel/train_external.py index 09e5207b..e7b6167d 100644 --- a/3.test_cases/11.modelparallel/train_external.py +++ b/3.test_cases/11.modelparallel/train_external.py @@ -5,7 +5,7 @@ def main(): - """Main function to train GPT.""" + """Main function to train.""" args, _ = parse_args() train_lib.main(args) diff --git a/3.test_cases/11.modelparallel/train_lib.py b/3.test_cases/11.modelparallel/train_lib.py index 9818d53f..d06a2f34 100644 --- a/3.test_cases/11.modelparallel/train_lib.py +++ b/3.test_cases/11.modelparallel/train_lib.py @@ -668,7 +668,7 @@ def main(args): model, None, None, - {}, + {"model_config": model_config}, None, args.model_dir if args.model_dir is not None else args.checkpoint_dir[0], "" if args.model_dir is not None else "model", From cffbd24ca91e7888f6e2970a4bca6a08f17dc111 Mon Sep 17 00:00:00 2001 From: rohithn1 <35045363+rohithn1@users.noreply.github.com> Date: Fri, 15 Dec 2023 16:49:16 -0800 Subject: [PATCH 259/648] Updates to README and addition of conda_env_setup script --- 3.test_cases/11.modelparallel/README.md | 27 +++++---- .../11.modelparallel/conda_env_setup.sh | 55 +++++++++++++++++++ 2 files changed, 68 insertions(+), 14 deletions(-) create mode 100644 3.test_cases/11.modelparallel/conda_env_setup.sh diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md index 8dc19d4a..6d743fb2 100644 --- a/3.test_cases/11.modelparallel/README.md +++ b/3.test_cases/11.modelparallel/README.md @@ -1,4 +1,4 @@ -## Using Model Parallelism with Simple Llama 2 Training Job +## Using SageMaker Model Parallelism with Simple Llama 2 Training Job In this directory, we have example scripts for training with SMP Pytorch. We assume you have already setup a Hyperpod instance. Below we first describe the files in this directory, and then go over how to run some jobs. ### Files @@ -17,8 +17,18 @@ In this directory, we have example scripts for training with SMP Pytorch. We ass These scripts need to be put on a directory that can be accessed on all nodes, such as FSX. We also recommend setting all paths (for input data and checkpoints) as shared directories using FSX. +### cuDNN Download for cuda11.8 and cuda12.1 +We recommend that you install cuDNN for your desired cuda version using from the NVIDIA Developer page: https://developer.nvidia.com/cudnn. Once you visit the link you will need to: +1. Make a developer account. +2. Click on "Download cuDNN Library". +3. Agree to the terms. +4. Download the Local Installer for Linux x86_64 (Tar) for cuda11 or cuda12 (we recommend version 8.9.5 and will use that version in the example going forward). +4. Sync it with your cluster to the root directory. + +Once you have the tar file downloaded you can run the following commands to finish the installation: + ### Conda Environment Setup -All commands should be run on a compute node. Also the cuda version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda and installing it in `/fsx` so that it can be sourced on any node. Instructions here: https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html +All commands below should be run on a compute node. You can run it as a script using ```awsome-distributed-training/3.test_cases/11.modelparallel/conda_env_setup.sh``` or manually run the script as individual commands which are listed below. Also, the cuda version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda and installing it in `/fsx` so that it can be sourced on any node. Instructions here: https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html ``` SMP_CUDA_VER=11.8 or 12.1 @@ -50,16 +60,7 @@ SMDDP_WHL="smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl" \ && wget -q https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.1/cu118/2023-12-07/${SMDDP_WHL} \ && pip install --force ${SMDDP_WHL} \ && rm ${SMDDP_WHL} -``` -### cuDNN Installation for cuda11.8 and cuda12.1 -We recommend that you install cuDNN for your desired cuda version using from the NVIDIA Developer page: https://developer.nvidia.com/cudnn. Once you visit the link you will need to: -1. Make a developer account. -2. Click on "Download cuDNN Library". -3. Agree to the terms. -4. Download the Local Installer for Linux x86_64 (Tar) for cuda11 or cuda12 (we recommend version 8.9.5 and will use that version in the example going forward). -Once you have the tar file downloaded you can run the following commands to finish the installation: -``` # cuDNN installation for TransformerEngine installation for cuda11.8 tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ @@ -75,9 +76,7 @@ tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ -``` -### TransformerEngine installation -``` + # TransformerEngine installation export CUDA_HOME=/usr/local/cuda-$SMP_CUDA_VER export CUDNN_PATH=/usr/local/cuda-$SMP_CUDA_VER/lib diff --git a/3.test_cases/11.modelparallel/conda_env_setup.sh b/3.test_cases/11.modelparallel/conda_env_setup.sh new file mode 100644 index 00000000..8e050080 --- /dev/null +++ b/3.test_cases/11.modelparallel/conda_env_setup.sh @@ -0,0 +1,55 @@ +SMP_CUDA_VER=11.8 or 12.1 + +source /fsx//miniconda3/bin/activate + +export ENV_PATH=/fsx//miniconda3/envs/ +conda create -p ${ENV_PATH} python=3.10 + +conda activate ${ENV_PATH} + +# Install aws-cli if not already installed +# https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#cliv2-linux-install + +aws s3 sync s3://sagemaker-distributed-model-parallel/smp-2.0.0-pt-2.0.1/2023-12-11/smp-v2/ /tmp/local_smp_install_channel/ + +conda install pytorch="2.0.1=sm_py3.10_cuda${SMP_CUDA_VER}*" packaging --override-channels \ + -c file:///tmp/local_smp_install_channel/ \ + -c pytorch -c numba/label/dev \ + -c pytorch-nightly -c nvidia -c conda-forge + +# Install dependencies of the script as below +python -m pip install packaging transformers==4.31.0 accelerate ninja tensorboard h5py datasets \ + && python -m pip install expecttest hypothesis \ + && python -m pip install "flash-attn>=2.0.4" --no-build-isolation + +# Install SMDDP wheel (only run for cuda11.8) +SMDDP_WHL="smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl" \ + && wget -q https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.1/cu118/2023-12-07/${SMDDP_WHL} \ + && pip install --force ${SMDDP_WHL} \ + && rm ${SMDDP_WHL} + +# cuDNN installation for TransformerEngine installation for cuda11.8 +tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive/ + +# cuDNN installation for TransformerEngine installation for cuda12.1 +tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ + +# TransformerEngine installation +export CUDA_HOME=/usr/local/cuda-$SMP_CUDA_VER +export CUDNN_PATH=/usr/local/cuda-$SMP_CUDA_VER/lib +export CUDNN_LIBRARY=/usr/local/cuda-$SMP_CUDA_VER/lib +export CUDNN_INCLUDE_DIR=/usr/local/cuda-$SMP_CUDA_VER/include +export PATH=/usr/local/cuda-$SMP_CUDA_VER/bin:$PATH +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-$SMP_CUDA_VER/lib + +pip install --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v1.0 \ No newline at end of file From 32e40b6a4fd7acdf251d3f30b0978b80b945de1e Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Sat, 16 Dec 2023 01:19:53 +0000 Subject: [PATCH 260/648] Add more info about SMP library in README.md --- 3.test_cases/11.modelparallel/README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md index 6d743fb2..eb096c25 100644 --- a/3.test_cases/11.modelparallel/README.md +++ b/3.test_cases/11.modelparallel/README.md @@ -1,4 +1,8 @@ ## Using SageMaker Model Parallelism with Simple Llama 2 Training Job +The Amazon SageMaker model parallelism library (SMP) is a capability of SageMaker that enables high performance and optimized large scale training on SageMaker accelerate compute instances. Its core features include techniques and optimizations to accelerate and simplify large model training, such as hybrid sharded data parallelism, tensor parallelism, activation checkpointing, and activation offloading. You can use SMP to accelerate the training and fine-tuning of large language models (LLMs), large vision models (LVMs), and foundation models (FMs) with hundreds of billions of parameters. + +The latest release of Amazon SageMaker model parallelism (SMP v2) aligns the library’s APIs and methods with open source PyTorch Fully Sharded Data Parallelism (FSDP), allowing users to easily enable SMP’s performance optimizations with minimal code change. Now, you can achieve state-of-the-art large model training performance on SageMaker in minutes by migrating your existing FSDP training scripts to SMP. + In this directory, we have example scripts for training with SMP Pytorch. We assume you have already setup a Hyperpod instance. Below we first describe the files in this directory, and then go over how to run some jobs. ### Files @@ -115,21 +119,21 @@ sbatch -N 16 conda_launch.sh /PATH/TO/CONDA/ENV 4. **Running a convergence job or experiment** -We have put together an example of a convergence script using the above referenced `scripts/model.sh` script. The script sets the model type, size, checkpointing directory, tensorboard directory for metrics, and other hyperparameters. This is a slurm script, used with sbatch similar to above. +We have put together an example of a convergence script using the above referenced `scripts/model.sh` script. The script sets the model type, size, checkpointing directory, tensorboard directory for metrics, and other hyperparameters. This is a slurm script, used with sbatch similar to above. Note that you will need to provide your own path to your dataset within the launch script below. ``` -sbatch -N 64 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +sbatch -N 16 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh ``` or ``` -sbatch -N 64 --job-name neox_7b_4M_trial1 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +sbatch -N 16 --job-name neox_7b_4M_trial1 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh ``` 5. **Resuming convergence job from a checkpoint** Modify the `--resume_from_checkpoint` arg with the path of the checkpoint. Then the job is started same as before. ``` -sbatch -N 64 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +sbatch -N 16 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh ``` 6. **Running a finetuning job or experiment** From 05314631a5f37c48e0fde04fa2ec4ed6e863fb81 Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Sat, 16 Dec 2023 01:23:33 +0000 Subject: [PATCH 261/648] Add convergence jobs and instructions for how to set dataset path --- .../neox_7b/neox_7b_4Mtokens.sh | 59 +++++++++++++++++++ .../convergence_jobs/neox_7b/sync_logs.sh | 2 + convergence_jobs/neox_7b/neox_7b_4Mtokens.sh | 59 +++++++++++++++++++ convergence_jobs/neox_7b/sync_logs.sh | 2 + 4 files changed, 122 insertions(+) create mode 100644 3.test_cases/11.modelparallel/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh create mode 100644 3.test_cases/11.modelparallel/convergence_jobs/neox_7b/sync_logs.sh create mode 100644 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh create mode 100644 convergence_jobs/neox_7b/sync_logs.sh diff --git a/3.test_cases/11.modelparallel/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh b/3.test_cases/11.modelparallel/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh new file mode 100644 index 00000000..e53edc7e --- /dev/null +++ b/3.test_cases/11.modelparallel/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh @@ -0,0 +1,59 @@ +#!/bin/bash +#SBATCH --output=logs/%x_%j.out # Redirects outputs to file in current_dir/logs +#SBATCH --error=logs/%x_%j.out # Redirects err to same file in current_dir/logs +#SBATCH --job-name=neox_7b + +# has to be shared dir +CONDA_ENV_PATH=${1:-"$CONDA_DEFAULT_ENV"} +SHELL_SCRIPT=${2:-"scripts/model.sh"} + +set -ex + +if [ -z $CONDA_ENV_PATH ]; then + echo "Conda env path needs to be passed. Exiting" + exit 1 +fi + +# To keep track of which job used which node for identifying node causing crash if any +HOSTFILE=hosts_${SLURM_JOB_ID} +scontrol show hostnames | sort > $HOSTFILE +NUM_NODES=$(cat $HOSTFILE | wc -l) + +## DATA +## CHANGE TO YOUR OWN CUSTOM DATASET PATH +SCRIPT_ARGS="--training_dir /fsx/datasets/train_ids_wsvocab_redo_2048_smaller " +SCRIPT_ARGS+="--test_dir /fsx/datasets/val_ids_wsvocab_2048 " + +## MODEL +model_type=gpt_neox +SCRIPT_ARGS+="--model_type $model_type --model_size 7b " + + +## BATCH SIZE +if [ $NUM_NODES -lt 16 ]; then + echo "Can't use 4M tokens with less than 16 nodes" + exit 1 +else + GLOBAL_BATCH_SIZE=4194304 +fi +max_context_width=2048 # seqlen +train_batch_size=$(python -c "print($GLOBAL_BATCH_SIZE//($NUM_NODES * 8 * $max_context_width))") + +if [ $train_batch_size -le 2 ]; then + SCRIPT_ARGS+="--activation_checkpointing 0 " +fi + +SCRIPT_ARGS+="--train_batch_size $train_batch_size " +SCRIPT_ARGS+="--val_batch_size $train_batch_size " +SCRIPT_ARGS+="--max_context_width $max_context_width " +SCRIPT_ARGS+="--max_steps 143000 " +SCRIPT_ARGS+="--validation_freq 200 " + +## ARTIFACTS +SCRIPT_ARGS+="--checkpoint_dir checkpoints/$SLURM_JOB_NAME/ " +SCRIPT_ARGS+="--tensorboard_dir tensorboard_logs/$SLURM_JOB_NAME/ " + +## RESUME +# SCRIPT_ARGS+="--resume_from_checkpoint checkpoints/$SLURM_JOB_NAME/$model_type-400steps " + +srun -l -D `pwd` conda run -p $CONDA_ENV_PATH --no-capture-output $SHELL_SCRIPT --hostfile $HOSTFILE $SCRIPT_ARGS diff --git a/3.test_cases/11.modelparallel/convergence_jobs/neox_7b/sync_logs.sh b/3.test_cases/11.modelparallel/convergence_jobs/neox_7b/sync_logs.sh new file mode 100644 index 00000000..50d0f556 --- /dev/null +++ b/3.test_cases/11.modelparallel/convergence_jobs/neox_7b/sync_logs.sh @@ -0,0 +1,2 @@ +aws s3 sync s3://rubik-convergence-jobs/logs/neox_7b_4Mtokens/ ./logs/ +# aws s3 sync s3://rubik-convergence-jobs/tensorboard/neox_7b_4Mtokens/ ./tensorboard/ diff --git a/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh b/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh new file mode 100644 index 00000000..e53edc7e --- /dev/null +++ b/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh @@ -0,0 +1,59 @@ +#!/bin/bash +#SBATCH --output=logs/%x_%j.out # Redirects outputs to file in current_dir/logs +#SBATCH --error=logs/%x_%j.out # Redirects err to same file in current_dir/logs +#SBATCH --job-name=neox_7b + +# has to be shared dir +CONDA_ENV_PATH=${1:-"$CONDA_DEFAULT_ENV"} +SHELL_SCRIPT=${2:-"scripts/model.sh"} + +set -ex + +if [ -z $CONDA_ENV_PATH ]; then + echo "Conda env path needs to be passed. Exiting" + exit 1 +fi + +# To keep track of which job used which node for identifying node causing crash if any +HOSTFILE=hosts_${SLURM_JOB_ID} +scontrol show hostnames | sort > $HOSTFILE +NUM_NODES=$(cat $HOSTFILE | wc -l) + +## DATA +## CHANGE TO YOUR OWN CUSTOM DATASET PATH +SCRIPT_ARGS="--training_dir /fsx/datasets/train_ids_wsvocab_redo_2048_smaller " +SCRIPT_ARGS+="--test_dir /fsx/datasets/val_ids_wsvocab_2048 " + +## MODEL +model_type=gpt_neox +SCRIPT_ARGS+="--model_type $model_type --model_size 7b " + + +## BATCH SIZE +if [ $NUM_NODES -lt 16 ]; then + echo "Can't use 4M tokens with less than 16 nodes" + exit 1 +else + GLOBAL_BATCH_SIZE=4194304 +fi +max_context_width=2048 # seqlen +train_batch_size=$(python -c "print($GLOBAL_BATCH_SIZE//($NUM_NODES * 8 * $max_context_width))") + +if [ $train_batch_size -le 2 ]; then + SCRIPT_ARGS+="--activation_checkpointing 0 " +fi + +SCRIPT_ARGS+="--train_batch_size $train_batch_size " +SCRIPT_ARGS+="--val_batch_size $train_batch_size " +SCRIPT_ARGS+="--max_context_width $max_context_width " +SCRIPT_ARGS+="--max_steps 143000 " +SCRIPT_ARGS+="--validation_freq 200 " + +## ARTIFACTS +SCRIPT_ARGS+="--checkpoint_dir checkpoints/$SLURM_JOB_NAME/ " +SCRIPT_ARGS+="--tensorboard_dir tensorboard_logs/$SLURM_JOB_NAME/ " + +## RESUME +# SCRIPT_ARGS+="--resume_from_checkpoint checkpoints/$SLURM_JOB_NAME/$model_type-400steps " + +srun -l -D `pwd` conda run -p $CONDA_ENV_PATH --no-capture-output $SHELL_SCRIPT --hostfile $HOSTFILE $SCRIPT_ARGS diff --git a/convergence_jobs/neox_7b/sync_logs.sh b/convergence_jobs/neox_7b/sync_logs.sh new file mode 100644 index 00000000..50d0f556 --- /dev/null +++ b/convergence_jobs/neox_7b/sync_logs.sh @@ -0,0 +1,2 @@ +aws s3 sync s3://rubik-convergence-jobs/logs/neox_7b_4Mtokens/ ./logs/ +# aws s3 sync s3://rubik-convergence-jobs/tensorboard/neox_7b_4Mtokens/ ./tensorboard/ From b7f731aa7fd6d7d6b4250fd9fb9c4e09319828bd Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Sat, 16 Dec 2023 01:36:59 +0000 Subject: [PATCH 262/648] Add if condition for cudnn installation based on cuda version --- 3.test_cases/11.modelparallel/README.md | 37 ++++++++++--------- .../11.modelparallel/conda_env_setup.sh | 37 ++++++++++--------- 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md index eb096c25..3f90258e 100644 --- a/3.test_cases/11.modelparallel/README.md +++ b/3.test_cases/11.modelparallel/README.md @@ -32,10 +32,11 @@ We recommend that you install cuDNN for your desired cuda version using from the Once you have the tar file downloaded you can run the following commands to finish the installation: ### Conda Environment Setup -All commands below should be run on a compute node. You can run it as a script using ```awsome-distributed-training/3.test_cases/11.modelparallel/conda_env_setup.sh``` or manually run the script as individual commands which are listed below. Also, the cuda version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda and installing it in `/fsx` so that it can be sourced on any node. Instructions here: https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html +All commands below should be run on a compute node. You can run it as a script using ```awsome-distributed-training/3.test_cases/11.modelparallel/conda_env_setup.sh``` or manually run the script as individual commands which are listed below. Also, the cuda version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda or Mamba and installing it in `/fsx` so that it can be sourced on any node. Instructions here: https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html ``` -SMP_CUDA_VER=11.8 or 12.1 +# specify which CUDA version you are using +SMP_CUDA_VER=11.8 #or 12.1 source /fsx//miniconda3/bin/activate @@ -65,21 +66,23 @@ SMDDP_WHL="smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl" \ && pip install --force ${SMDDP_WHL} \ && rm ${SMDDP_WHL} -# cuDNN installation for TransformerEngine installation for cuda11.8 -tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ - && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ - && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ - && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ - && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ - && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive/ - -# cuDNN installation for TransformerEngine installation for cuda12.1 -tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ - && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ - && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ - && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ - && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ - && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ +if [ $SMP_CUDA_VER == "11.8" ]; then + # cuDNN installation for TransformerEngine installation for cuda11.8 + tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive/ +else + # cuDNN installation for TransformerEngine installation for cuda12.1 + tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ +fi # TransformerEngine installation export CUDA_HOME=/usr/local/cuda-$SMP_CUDA_VER diff --git a/3.test_cases/11.modelparallel/conda_env_setup.sh b/3.test_cases/11.modelparallel/conda_env_setup.sh index 8e050080..b319a8ad 100644 --- a/3.test_cases/11.modelparallel/conda_env_setup.sh +++ b/3.test_cases/11.modelparallel/conda_env_setup.sh @@ -1,4 +1,5 @@ -SMP_CUDA_VER=11.8 or 12.1 +# specify which CUDA version you are using +SMP_CUDA_VER=11.8 #or 12.1 source /fsx//miniconda3/bin/activate @@ -28,21 +29,23 @@ SMDDP_WHL="smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl" \ && pip install --force ${SMDDP_WHL} \ && rm ${SMDDP_WHL} -# cuDNN installation for TransformerEngine installation for cuda11.8 -tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ - && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ - && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ - && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ - && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ - && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive/ - -# cuDNN installation for TransformerEngine installation for cuda12.1 -tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ - && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ - && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ - && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ - && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ - && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ +if [ $SMP_CUDA_VER == "11.8" ]; then + # cuDNN installation for TransformerEngine installation for cuda11.8 + tar xf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.5.30_cuda11-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.5.30_cuda11-archive/ +else + # cuDNN installation for TransformerEngine installation for cuda12.1 + tar xf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf /usr/local/cuda-$SMP_CUDA_VER/include/cudnn* /usr/local/cuda-$SMP_CUDA_VER/lib/cudnn* \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/include/* /usr/local/cuda-$SMP_CUDA_VER/include/ \ + && cp ./cudnn-linux-x86_64-8.9.7.29_cuda12-archive/lib/* /usr/local/cuda-$SMP_CUDA_VER/lib/ \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz \ + && rm -rf cudnn-linux-x86_64-8.9.7.29_cuda12-archive/ +fi # TransformerEngine installation export CUDA_HOME=/usr/local/cuda-$SMP_CUDA_VER @@ -52,4 +55,4 @@ export CUDNN_INCLUDE_DIR=/usr/local/cuda-$SMP_CUDA_VER/include export PATH=/usr/local/cuda-$SMP_CUDA_VER/bin:$PATH export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-$SMP_CUDA_VER/lib -pip install --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v1.0 \ No newline at end of file +pip install --no-build-isolation git+https://github.com/NVIDIA/TransformerEngine.git@v1.0 From d6123290ad2b41afea093cd4458a9c0cc59d1d31 Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Sat, 16 Dec 2023 01:55:53 +0000 Subject: [PATCH 263/648] Minor indentation and README fix --- 3.test_cases/11.modelparallel/README.md | 2 +- 3.test_cases/11.modelparallel/checkpoints.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md index 3f90258e..8b29d3f9 100644 --- a/3.test_cases/11.modelparallel/README.md +++ b/3.test_cases/11.modelparallel/README.md @@ -1,4 +1,4 @@ -## Using SageMaker Model Parallelism with Simple Llama 2 Training Job +## Using SageMaker Model Parallelism with Simple GPT-Neox Training Job The Amazon SageMaker model parallelism library (SMP) is a capability of SageMaker that enables high performance and optimized large scale training on SageMaker accelerate compute instances. Its core features include techniques and optimizations to accelerate and simplify large model training, such as hybrid sharded data parallelism, tensor parallelism, activation checkpointing, and activation offloading. You can use SMP to accelerate the training and fine-tuning of large language models (LLMs), large vision models (LVMs), and foundation models (FMs) with hundreds of billions of parameters. The latest release of Amazon SageMaker model parallelism (SMP v2) aligns the library’s APIs and methods with open source PyTorch Fully Sharded Data Parallelism (FSDP), allowing users to easily enable SMP’s performance optimizations with minimal code change. Now, you can achieve state-of-the-art large model training performance on SageMaker in minutes by migrating your existing FSDP training scripts to SMP. diff --git a/3.test_cases/11.modelparallel/checkpoints.py b/3.test_cases/11.modelparallel/checkpoints.py index 9de56b6f..f76c537b 100644 --- a/3.test_cases/11.modelparallel/checkpoints.py +++ b/3.test_cases/11.modelparallel/checkpoints.py @@ -490,7 +490,7 @@ def load_checkpoint( checkpoint_dir = os.path.join( checkpoint_dir, f"tp{tensor_parallel_degree}-{state.tp_rank}" ) - loaded = _load_sharded( + loaded = _load_sharded( model, optimizer, scheduler, checkpoint_dir, checkpointing_pg_metadata ) elif checkpoint_type == CheckpointingMethod.LOCAL: From 954e26cd79e97d0ebae2e4b2af9957446b5706f5 Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Tue, 19 Dec 2023 04:14:12 +0000 Subject: [PATCH 264/648] Updated README.md --- 3.test_cases/11.modelparallel/README.md | 41 +++++++++++++++---------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/3.test_cases/11.modelparallel/README.md b/3.test_cases/11.modelparallel/README.md index 8b29d3f9..60ff9a91 100644 --- a/3.test_cases/11.modelparallel/README.md +++ b/3.test_cases/11.modelparallel/README.md @@ -1,38 +1,47 @@ ## Using SageMaker Model Parallelism with Simple GPT-Neox Training Job -The Amazon SageMaker model parallelism library (SMP) is a capability of SageMaker that enables high performance and optimized large scale training on SageMaker accelerate compute instances. Its core features include techniques and optimizations to accelerate and simplify large model training, such as hybrid sharded data parallelism, tensor parallelism, activation checkpointing, and activation offloading. You can use SMP to accelerate the training and fine-tuning of large language models (LLMs), large vision models (LVMs), and foundation models (FMs) with hundreds of billions of parameters. +The Amazon SageMaker model parallelism library (SMP) is a capability of SageMaker that enables high performance and optimized large scale training on SageMaker accelerated compute instances. Its core features are hybrid sharded data parallelism, tensor parallelism, activation checkpointing, and activation offloading. You can use SMP to accelerate the training and fine-tuning of large language models (LLMs), large vision models (LVMs), and foundation models (FMs) with hundreds of billions of parameters such as [Llama2](https://huggingface.co/docs/transformers/model_doc/llama2) and [GPT-NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox). -The latest release of Amazon SageMaker model parallelism (SMP v2) aligns the library’s APIs and methods with open source PyTorch Fully Sharded Data Parallelism (FSDP), allowing users to easily enable SMP’s performance optimizations with minimal code change. Now, you can achieve state-of-the-art large model training performance on SageMaker in minutes by migrating your existing FSDP training scripts to SMP. +The latest release of Amazon SageMaker model parallelism (SMP v2) aligns the library’s APIs and methods with open source PyTorch Fully Sharded Data Parallelism ([FSDP](https://pytorch.org/docs/stable/fsdp.html)), allowing users to easily enable SMP’s performance optimizations with minimal code change. Now, you can achieve state-of-the-art large model training performance on SageMaker in minutes by migrating your existing FSDP training scripts to SMP. In this directory, we have example scripts for training with SMP Pytorch. We assume you have already setup a Hyperpod instance. Below we first describe the files in this directory, and then go over how to run some jobs. ### Files +**Training Scripts** - `train_lib.py` : Main training script -- `train.py` : Entrypoint to launch `train_lib.py` -- `scripts/model.sh` : Main script which passes the config and launches `train.py`. This is used by `conda_launch.sh` and scripts in convergence_jobs folder. If you want to define your own model configuration you might want to modify this. -- `arguments.py` : Parses arguments for the job. Please refer to this file for all the options the script supports. -- `checkpoints.py` : Handles saving and loading of checkpoints +- `train_utils.py`: Implements several key functions in the central training script for model initialization, activation checkpointing, and more. + +#### Launch Scripts +- `conda_launch.sh`: Slurm sbatch script which launches a job using the activated conda environment. It should be run on head-node, and it uses synthetic data by default allowing training to be tested easily. +- `scripts/model.sh`: Main script which passes the config and launches training. This is used by `conda_launch.sh` and scripts in `convergence_jobs` folder. If you want to define your own model configuration you might want to modify this. + +**Dataset and Dataloading Scripts** - `data/pipelines/data_pipeline.py`: Creates dataloaders for the job. Modify this file to load your own dataset. -- `data/utils.py`, `fsdp_utils.py`, `learning_rates.py`, `logging_utils.py`, `memory_tracker.py`, `train_utils.py` have utilities used by the main script. +- `data/utils.py`: Utility file to facilitate using datasets stored in AWS S3. + +**Miscellaneous Utility Scripts** +- `arguments.py`: Parses arguments for the job. Please refer to this file for all the options the script supports. +- `checkpoints.py`: Handles saving and loading of checkpoints +- `learning_rates.py`: Utility file for implementing learning rate annealing during training +- `logging_utils.py`: Implements several helper functions for logging key information during training such as loss, training throughput speeds, and environment variables +- `memory_tracker.py`: Implements functions for monitoring CPU and GPU memory usage -#### Launch scripts -- `conda_launch.sh` : This is a slurm script which launches a job using the activated conda environment. It expects to be run on the head node of the Slurm cluster. See below section for instructions. By default it runs with synthetic data to make it easy to test the scripts. ## Note on paths -These scripts need to be put on a directory that can be accessed on all nodes, such as FSX. -We also recommend setting all paths (for input data and checkpoints) as shared directories using FSX. +These scripts need to be put in a shared file system that can be accessed by all nodes, such as [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html). +We also recommend setting all paths for input data and checkpoints as shared directories using FSx for Lustre. ### cuDNN Download for cuda11.8 and cuda12.1 -We recommend that you install cuDNN for your desired cuda version using from the NVIDIA Developer page: https://developer.nvidia.com/cudnn. Once you visit the link you will need to: +We recommend that you install cuDNN for your desired cuda version using from the [NVIDIA Developer page](https://developer.nvidia.com/cudnn). Click on the link and: 1. Make a developer account. 2. Click on "Download cuDNN Library". 3. Agree to the terms. -4. Download the Local Installer for Linux x86_64 (Tar) for cuda11 or cuda12 (we recommend version 8.9.5 and will use that version in the example going forward). -4. Sync it with your cluster to the root directory. +4. Download the Local Installer for Linux x86_64 (Tar) for cuda11 or cuda12 (we will use version 8.9.5 in the example going forward). +4. Move the tar file from your local machine to your cluster root directory. -Once you have the tar file downloaded you can run the following commands to finish the installation: +The next section will walk through how to finish the cuDNN installation. ### Conda Environment Setup -All commands below should be run on a compute node. You can run it as a script using ```awsome-distributed-training/3.test_cases/11.modelparallel/conda_env_setup.sh``` or manually run the script as individual commands which are listed below. Also, the cuda version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda or Mamba and installing it in `/fsx` so that it can be sourced on any node. Instructions here: https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html +All commands below should be run on a compute node as some of the setup steps are compute intensive. You can run it as a script using `conda_env_setup.sh` or manually run the script as individual commands which are listed below. Also, the CUDA version should be decided here between versions 11.8 and 12.1. We recommend using Miniconda or Mamba and installing it on the shared file system, which in our example is FSx for Lustre mounted at `/fsx`. Instructions for conda installation can be found [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html) ``` # specify which CUDA version you are using From c0d42219c2dc85f1751a4a9f65d7328cd025044e Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Tue, 19 Dec 2023 16:48:29 +0000 Subject: [PATCH 265/648] SageMaker data parallel examples --- .../0.create_conda_env.sh | 27 ++ .../12.SM-dataparallel-FSDP/1.prepare_data.sh | 1 + .../2.run_training.sbatch | 10 + .../12.SM-dataparallel-FSDP/README.md | 46 +++ .../12.SM-dataparallel-FSDP/exec_torchrun.sh | 28 ++ .../prepare_dataset.py | 67 ++++ .../scripts/__pycache__/utils.cpython-310.pyc | Bin 0 -> 1521 bytes .../scripts/requirements.txt | 6 + .../12.SM-dataparallel-FSDP/scripts/train.py | 301 ++++++++++++++++++ .../12.SM-dataparallel-FSDP/scripts/utils.py | 59 ++++ .../0.create_conda_env.sh | 23 ++ .../1.run_training.sbatch | 9 + .../13.SM-dataparallel-deepspeed/README.md | 45 +++ .../code/__pycache__/utils.cpython-310.pyc | Bin 0 -> 1587 bytes .../code/dsconfig.json | 19 ++ .../code/requirements.txt | 7 + .../code/train.py | 215 +++++++++++++ .../code/utils.py | 38 +++ .../exec_torchrun.sh | 28 ++ 19 files changed, 929 insertions(+) create mode 100755 3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh create mode 100755 3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh create mode 100644 3.test_cases/12.SM-dataparallel-FSDP/2.run_training.sbatch create mode 100644 3.test_cases/12.SM-dataparallel-FSDP/README.md create mode 100755 3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh create mode 100644 3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py create mode 100644 3.test_cases/12.SM-dataparallel-FSDP/scripts/__pycache__/utils.cpython-310.pyc create mode 100755 3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt create mode 100644 3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py create mode 100755 3.test_cases/12.SM-dataparallel-FSDP/scripts/utils.py create mode 100755 3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh create mode 100644 3.test_cases/13.SM-dataparallel-deepspeed/1.run_training.sbatch create mode 100644 3.test_cases/13.SM-dataparallel-deepspeed/README.md create mode 100644 3.test_cases/13.SM-dataparallel-deepspeed/code/__pycache__/utils.cpython-310.pyc create mode 100755 3.test_cases/13.SM-dataparallel-deepspeed/code/dsconfig.json create mode 100755 3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt create mode 100755 3.test_cases/13.SM-dataparallel-deepspeed/code/train.py create mode 100755 3.test_cases/13.SM-dataparallel-deepspeed/code/utils.py create mode 100755 3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh diff --git a/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh b/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh new file mode 100755 index 00000000..07a48835 --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +set -ex +MAMBA_VERSION=23.1.0-1 + +curl -L -o ./mambaforge.sh https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-x86_64.sh +chmod +x ./mambaforge.sh +./mambaforge.sh -b -p ./conda +rm ./mambaforge.sh +./conda/bin/mamba clean -afy + +source ./conda/bin/activate + +conda create -n smdataparallel python=3.10 +conda activate smdataparallel + +# Install pytorch and SM data parallelism library +conda install -y pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.8 -c pytorch -c nvidia +pip install https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.1/cu118/2023-11-07/smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl +pip install -r scripts/requirements.txt + + + + diff --git a/3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh b/3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh new file mode 100755 index 00000000..1689d0a7 --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh @@ -0,0 +1 @@ +python prepare_dataset.py diff --git a/3.test_cases/12.SM-dataparallel-FSDP/2.run_training.sbatch b/3.test_cases/12.SM-dataparallel-FSDP/2.run_training.sbatch new file mode 100644 index 00000000..9c0541d1 --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/2.run_training.sbatch @@ -0,0 +1,10 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +#SBATCH --nodes=2 +#SBATCH --job-name=smddp-fsdp +#SBATCH --exclusive + +srun -l exec_torchrun.sh diff --git a/3.test_cases/12.SM-dataparallel-FSDP/README.md b/3.test_cases/12.SM-dataparallel-FSDP/README.md new file mode 100644 index 00000000..116a12f3 --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/README.md @@ -0,0 +1,46 @@ +# How to Use SageMaker Distributed Data Parallel Library (SMDDP) with PyTorch FSDP + +## What is SMDDP? +The SMDDP library provides fast GPU collective communication algorithms on P4d/P4de instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library (NCCL). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your PyTorch FSDP training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the Falcon model with FSDP. + +## 0. Prerequisites +You will need a slurm cluster with an FSx for Lustre file system. See the sagemaker-hyperpod section in the [1.architectures](https://github.com/ruhanprasad/awsome-distributed-training/tree/main/1.architectures) folder for setup instructions. + +### Required Dependencies of SMDDP Library +* Python==3.10 +* CUDA==11.8 +* PyTorch==2.0.1 + +Additionally, SMDDP must be used on AWS P4d or P4de instances. This example also uses mamba as a package manager. Mamba is a drop-in replacement for conda and it is recommended over Miniconda or Anaconda with SMDDP (see Known Issues section for more details) + +## 1. Create Environment +1. On your cluster head node, navigate to your shared FSx filesystem, which should be located at `/fsx` +2. Clone this repo +``` +cd /fsx +git clone https://github.com/aws-samples/awsome-distributed-training/ +cd awsome-distributed-training/3.test_cases/12.SM-dataparallel-FSDP +``` +3. Run the `0.create_conda_env.sh` script. This will install [Mamba](https://github.com/mamba-org/mamba) and create an environment called `smdataparallel`. Since the environment is created on the shared FSx filesystem, all compute nodes will have access to it. Activate this environment via `conda activate smdataparallel`. + +## 2. Prepare Dataset +To run this example, we will use part of the [glue/sst2 dataset](https://huggingface.co/datasets/glue) from HuggingFace. To download and prepare a subset of this dataset for our example, run `./1.prepare_data.sh` + +## 3. Launch Training +To launch the distributed training job, run `sbatch 2.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. + +Launching the job will create a log file in the current directory (`slurm-`) which you can tail to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `scripts/train.py` + +This example only runs training for one iteration and exits immediately. You should see output similar to below +``` + 4%|▍ | 1/25 [00:04<01:43, 4.32s/it] +0: ******epoch=0: train_ppl=tensor(653844.6250, device='cuda:0') train_loss=tensor(13.3906, device='cuda:0')****** +0it [00:00, ?it/s] +0: *******epoch=0: eval_ppl=tensor(nan, device='cuda:0') eval_loss=tensor(nan, device='cuda:0')******* +0: Training done! +``` +## 4. Known Issues +When using SMDDP in your own conda environment, you may encounter the following error after importing SMDDP in your training script: ``version `GLIBCXX_3.4.30' not found`` + +If this occurs, firstly ensure that you are installing PyTorch via conda before pip installing SMDDP (i.e. install PyTorch through `conda install` before installing SMDDP in your environment). If this still does not resolve the error, please use [Mamba](https://github.com/mamba-org/mamba) as your package manager rather than Miniconda or Anaconda. Mamba is a drop-in replacement for conda with improvements in dependency resolution, and creating an environment with Mamba is known to resolve this issue. + diff --git a/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh b/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh new file mode 100755 index 00000000..56f8ae41 --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +export OMP_NUM_THREADS=1 +export GPUS_PER_NODE=8 +MASTER_NODE=$(scontrol show hostname | head -n 1) +export MASTER_ADDR=$(scontrol show node=$MASTER_NODE | awk -F= '/NodeAddr=/{print $2}' | awk '{print $1}') +export NNODES=$SLURM_NTASKS +export NODE_RANK=$SLURM_NODEID +export MASTER_PORT=9001 +export WORLD_SIZE_JOB=$SLURM_NTASKS +export DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT " + +echo "Launching torchrun..." +torchrun $DISTRIBUTED_ARGS \ + scripts/train.py \ + --gradient_checkpointing True \ + --bf16 True \ + --optimizer "adamw_torch" \ + --per_device_train_batch_size 1 \ + --epochs 1 \ + --max_steps 1 \ + --dataset_path "processed/data" \ + --fsdp "full_shard auto_wrap" + + diff --git a/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py b/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py new file mode 100644 index 00000000..762ffdcc --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py @@ -0,0 +1,67 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +model_id = "tiiuae/falcon-7b-instruct" +dataset_name = "glue" +dataset_config = "sst2" + +from datasets import load_dataset +from transformers import AutoTokenizer + +# Load Tokenizer + +tokenizer = AutoTokenizer.from_pretrained(model_id) + +# Load dataset from huggingface.co +dataset = load_dataset(dataset_name, dataset_config) + +# downsample dataset to 10k +dataset = dataset.shuffle(42) + +if "validation" not in dataset.keys(): + dataset["validation"] = load_dataset( + dataset_name, + split="train[:5%]" + ) + + dataset["train"] = load_dataset( + dataset_name, + split="train[5%:]" + ) + +from itertools import chain +from functools import partial + +def group_texts(examples,block_size = 2048): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + +column_names = dataset["train"].column_names +text_column_name = "text" if "text" in column_names else column_names[0] + +lm_dataset = dataset.map( + lambda sample: tokenizer(sample[text_column_name]), + batched=True, + remove_columns=list(column_names), + desc="Running tokenizer on dataset", +).map( + partial(group_texts, block_size=2048), + batched=True, +) + +training_input_path = f'processed/data/' +lm_dataset.save_to_disk(training_input_path) +print(f"Saved data to: {training_input_path}") + diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/__pycache__/utils.cpython-310.pyc b/3.test_cases/12.SM-dataparallel-FSDP/scripts/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b174ca46ac966b10e08167ab0697767217579792 GIT binary patch literal 1521 zcmZWp&2J+$6t_JenM{+m>O!@Ai3n*ASd3ljg5ublV~P%rR2(>B#&Yu@-hzwi0Ik=<^LV156`&pe0_`pYp77Xju= zc=U513^6Rw)V+zo8JQC7&Ytixf9ls~EP`pkd=~tQrXdSi3*Ie;$EY9u4Ia>GAKQ3D z4Ly;RaRuDFjL&H)Ou|5#%%l+1NHuFY?c22lZ%7Uu!J`=vgO+$nmR+X0}LlddY2Eq43+{aL!v)Qs;qbN9Xx|$o?;jgK<<5UPb zHolNbusWy;oM5*xd63IaZM6|hZ@ua!;I#32%iY<#kfQ#2wz1y`{zU3AM7rU_x&CQb zYOeIKP*hW<$|1ebGUEqqQ`bSAP@10&pA8JxCV_5pJv=-djK4i_UJI(I5L_G_k4GVGQQfX2n(*sHH=V&uR0aE#u)v8Sm+}k{o=teh2#J6he*G;06n*V zgvaUhTbG}H%|*dgFXLt*SDf;P+e!@yLv5=Fgyh{3+pdsnu8;dCWYcP%sA_}ZBXZB+eV)A9HtS* zRF=0.21 +bitsandbytes +einops +scipy diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py b/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py new file mode 100644 index 00000000..020407dc --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py @@ -0,0 +1,301 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +import os +import argparse +import math +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + set_seed, + get_scheduler, + SchedulerType, + FalconConfig +) + +from transformers.models.falcon.modeling_falcon import FalconDecoderLayer +from datasets import load_from_disk +import torch +import torch.distributed as dist +from utils import create_dataloaders,save_model +import time +from tqdm import tqdm + +from torch.distributed.fsdp import ( + FullyShardedDataParallel as FSDP, + MixedPrecision, + ShardingStrategy, + BackwardPrefetch, + CPUOffload, + +) +from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + checkpoint_wrapper, + CheckpointImpl, + apply_activation_checkpointing) + +from torch.distributed.fsdp.wrap import ( + transformer_auto_wrap_policy, +) +import functools +try: + backend = "smddp" + import smdistributed.dataparallel.torch.torch_smddp +except ModuleNotFoundError: + backend = "nccl" +print(f"using backend: {backend}") + + +def parse_arge(): + """Parse the arguments.""" + parser = argparse.ArgumentParser() + # add model id and dataset path argument + parser.add_argument( + "--model_id", + type=str, + default="google/flan-t5-xl", + help="Model id to use for training.", + ) + parser.add_argument("--dataset_path", type=str, default="lm_dataset", help="Path to dataset.") + # add training hyperparameters for epochs, batch size, learning rate, and seed + parser.add_argument("--epochs", type=int, default=1, help="Number of epochs to train for.") + parser.add_argument("--max_steps", type=int, default=None, help="Number of epochs to train for.") + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=1, + help="Batch size to use for training.", + ) + parser.add_argument("--lr", type=float, default=3e-5, help="Learning rate to use for training.") + parser.add_argument("--optimizer", type=str, default="adamw_hf", help="Learning rate to use for training.") + parser.add_argument("--seed", type=int, default=42, help="Seed to use for training.") + parser.add_argument("--num_train_epochs", type=int, default=1, help="Total number of training epochs to perform.") + + parser.add_argument( + "--gradient_checkpointing", + type=bool, + default=True, + help="Path to deepspeed config file.", + ) + parser.add_argument( + "--bf16", + type=bool, + default=True if torch.cuda.get_device_capability()[0] == 8 else False, + help="Whether to use bf16.", + ) + parser.add_argument("--fsdp", type=str, default=None, help="Whether to use fsdp.") + parser.add_argument( + "--fsdp_transformer_layer_cls_to_wrap", + type=str, + default=None, + help="Which transformer layer to wrap with fsdp.", + ) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="linear", + help="The scheduler type to use.", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], + ) + parser.add_argument( + "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument("--limit_all_gathers", type=bool, default=False) + parser.add_argument("--forward_prefetch", type=bool, default=False) + parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") + parser.add_argument("--model_dir",type=str,default="/opt/ml/model") + parser.add_argument("--cache_dir",type=str,default=None) + + args = parser.parse_known_args() + return args + + +def training_function(args): + # set seed + set_seed(args.seed) + + dataset = load_from_disk(args.dataset_path) + # load model from the hub + config = FalconConfig(vocab_size=65024, + use_cache=True, + parallel_attn=True, + num_hidden_layers=16, + num_attention_heads=71, + new_decoder_architecture=False, + multi_query=True, + layer_norm_epsilon=1e-05, + initializer_range=0.02, + hidden_size=2272, + hidden_dropout=0.0, + eos_token_id=11, + bos_token_id=11, + bias=False) + + + model = AutoModelForCausalLM.from_config(config) + + + tokenizer = AutoTokenizer.from_pretrained(args.model_id) + + train_dataset = dataset["train"] + eval_dataset = dataset["validation"] + + train_dataloader,eval_dataloader = create_dataloaders(train_dataset,eval_dataset,args.rank,args.world_size,args.seed,args.per_device_train_batch_size,args.per_device_train_batch_size) + + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + transformer_layer_cls={ + FalconDecoderLayer + }, + ) + + torch.cuda.set_device(args.local_rank) + + dtype = torch.bfloat16 + + mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) + + model = FSDP( + model, + auto_wrap_policy=auto_wrap_policy, + mixed_precision=mixed_precision_policy, + sharding_strategy=ShardingStrategy.FULL_SHARD, + backward_prefetch=BackwardPrefetch.BACKWARD_PRE, + forward_prefetch=args.forward_prefetch, + limit_all_gathers=args.limit_all_gathers, + device_id=torch.cuda.current_device(), + ) + + non_reentrant_wrapper = functools.partial(checkpoint_wrapper, offload_to_cpu=True, + checkpoint_impl=CheckpointImpl.NO_REENTRANT) + check_fn_gpt = lambda submodule: isinstance(submodule, FalconDecoderLayer) + apply_activation_checkpointing(model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn_gpt) + + # Optimizer + # Split weights in two groups, one with weight decay and the other not. + no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": args.weight_decay, + }, + { + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }, + ] + + optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.rank==0: + print(f"Number of update steps per epoch {num_update_steps_per_epoch}") + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + start = time.time() + device = torch.device(f"cuda:{args.local_rank}") + + for epoch in range(args.num_train_epochs): + + model.train() + total_steps=0 + fsdp_loss = torch.zeros(2).to(args.local_rank) + + for _, batch in enumerate(tqdm(train_dataloader,disable=not (args.rank==0))): + + batch = {k: v.to(device) for k, v in batch.items()} + output = model(**batch) + loss = output["loss"] + loss.backward() + fsdp_loss[0] += loss.item() + fsdp_loss[1] += len(batch["input_ids"]) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + total_steps += 1 + if args.max_steps is not None and total_steps > args.max_steps: + break + + + torch.distributed.all_reduce(fsdp_loss, op=torch.distributed.ReduceOp.SUM) + train_loss = fsdp_loss[0] / fsdp_loss[1] + train_ppl = torch.exp(train_loss) + + if args.rank==0: + print(f"******{epoch=}: {train_ppl=} {train_loss=}******") + + + model.eval() + eval_loss = 0 + fsdp_eval_loss = torch.zeros(2).to(args.local_rank) + for steps, batch in enumerate(tqdm(eval_dataloader,disable=not (args.rank==0))): + batch = {k: v.to(device) for k, v in batch.items()} + with torch.no_grad(): + outputs = model(**batch) + loss = outputs["loss"] + + fsdp_eval_loss[0] += loss.item() + fsdp_eval_loss[1] += len(batch["input_ids"]) + if args.max_steps is not None and steps > args.max_steps: + break + + torch.distributed.all_reduce(fsdp_eval_loss, op=torch.distributed.ReduceOp.SUM) + eval_loss = fsdp_eval_loss[0] / fsdp_eval_loss[1] + eval_ppl = torch.exp(eval_loss) + + if args.rank==0: + print(f"*******{epoch=}: {eval_ppl=} {eval_loss=}*******") + + if args.max_steps is not None and total_steps > args.max_steps: + break + + #save_model(model,tokenizer,args.model_dir,args.rank) + if args.rank == 0: + print("Training done!") + dist.barrier() + + + +import torch.distributed as dist +def main(): + dist.init_process_group(backend="smddp") + args, _ = parse_arge() + args.local_rank = int(os.environ["LOCAL_RANK"]) + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ["WORLD_SIZE"]) + training_function(args) + + +if __name__ == "__main__": + main() + diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/utils.py b/3.test_cases/12.SM-dataparallel-FSDP/scripts/utils.py new file mode 100755 index 00000000..52ad85de --- /dev/null +++ b/3.test_cases/12.SM-dataparallel-FSDP/scripts/utils.py @@ -0,0 +1,59 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +import torch +from torch.utils.data import DataLoader +from transformers import ( + default_data_collator +) +import os + +def create_dataloaders(train_dataset,eval_dataset,rank,world_size,seed,train_batch_size,eval_batch_size): + + train_sampler = torch.utils.data.DistributedSampler( + train_dataset, + shuffle=True, + seed=seed, + rank=rank, + num_replicas=world_size, + drop_last=True, + ) + + eval_sampler = torch.utils.data.DistributedSampler( + eval_dataset, + shuffle=True, + seed=seed, + rank=rank, + num_replicas=world_size, + drop_last=True, + ) + + train_dataloader = DataLoader( + train_dataset, sampler=train_sampler, collate_fn=default_data_collator, batch_size=train_batch_size, pin_memory=True,drop_last=True + ) + + eval_dataloader = DataLoader( + eval_dataset,sampler=eval_sampler, collate_fn=default_data_collator, batch_size=eval_batch_size, pin_memory=True,drop_last=True + + ) + + return train_dataloader,eval_dataloader + +def save_model(model, tokenizer, output_dir,rank): + """Helper method to save model when using FSDP.""" + + from torch.distributed.fsdp import ( + FullyShardedDataParallel as FSDP, + FullStateDictConfig, + StateDictType, + ) + save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) + with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, save_policy): + cpu_state_dict = model.state_dict() + """ + To save optimizer states as well in case you want to resume training + on the same dataset, see: https://pytorch.org/docs/stable/fsdp.html + """ + if rank==0: + torch.save(cpu_state_dict,os.path.join(output_dir,"model_weights.pt")) + tokenizer.save_pretrained(output_dir) diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh b/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh new file mode 100755 index 00000000..e911f148 --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +set -ex +MAMBA_VERSION=23.1.0-1 + +curl -L -o ./mambaforge.sh https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-x86_64.sh +chmod +x ./mambaforge.sh +./mambaforge.sh -b -p ./conda +rm ./mambaforge.sh +./conda/bin/mamba clean -afy + +source ./conda/bin/activate + +conda create -n smdataparallel python=3.10 +conda activate smdataparallel + +# Install pytorch and SM data parallelism library +conda install -y pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.8 -c pytorch -c nvidia +pip install https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.1/cu118/2023-11-07/smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl +pip install -r code/requirements.txt diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/1.run_training.sbatch b/3.test_cases/13.SM-dataparallel-deepspeed/1.run_training.sbatch new file mode 100644 index 00000000..2714852a --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/1.run_training.sbatch @@ -0,0 +1,9 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +#SBATCH --nodes=2 +#SBATCH --job-name=deepspeed-smddp +#SBATCH --exclusive +srun -l exec_torchrun.sh diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/README.md b/3.test_cases/13.SM-dataparallel-deepspeed/README.md new file mode 100644 index 00000000..f270a71f --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/README.md @@ -0,0 +1,45 @@ +# How to Use SageMaker Distributed Data Parallel Library (SMDDP) with DeepSpeed ZeRO + +## What is SMDDP? +The SMDDP library provides fast GPU collective communication algorithms on P4d/P4de instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library (NCCL). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your DeepSpeed training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the Llama2 model with DeepSpeed. + +## 0. Prerequisites +You will need a slurm cluster with an FSx for Lustre file system. See the sagemaker-hyperpod section in the [1.architectures](https://github.com/ruhanprasad/awsome-distributed-training/tree/main/1.architectures) folder for setup instructions. + +### Required Dependencies of SMDDP Library +* Python==3.10 +* CUDA==11.8 +* PyTorch==2.0.1 + +Additionally, SMDDP must be used on AWS P4d or P4de instances. This example also uses mamba as a package manager. Mamba is a drop-in replacement for conda and it is recommended over Miniconda or Anaconda with SMDDP (see Known Issues section for more details) + +## 1. Create Environment +1. On your cluster head node, navigate to your shared FSx filesystem, which should be located at `/fsx` +2. Clone this repo +``` +cd /fsx +git clone https://github.com/aws-samples/awsome-distributed-training/ +cd awsome-distributed-training/3.test_cases/13.SM-dataparallel-deepspeed +``` +3. Run the `0.create_conda_env.sh` script. This will install [Mamba](https://github.com/mamba-org/mamba) and create an environment called `smdataparallel`. Since the environment is created on the shared FSx filesystem, all compute nodes will have access to it. Activate this environment via `conda activate smdataparallel`. + +## 2. Launch Training +No dataset preparation is needed as this example uses synthetic data for simplicity. To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. + +Launching the job will create a log file in the current directory (`slurm-`) which you can tail to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `code/train.py` + +This example only runs training for one iteration and exits immediately. You should see output similar to below +``` +Processing +Processing training batch 1 +******epoch=0: train_ppl=tensor(71973.6484, device='cuda:0') train_loss=tensor(11.1841, device='cuda:0')****** +Performing validation on training batch 1 +Performing validation on training batch 1 +*******epoch=0: eval_ppl=tensor(70934.4062, device='cuda:0') eval_loss=tensor(11.1695, device='cuda:0')******* +Training done! +``` +## 4. Known Issues +When using SMDDP in your own conda environment, you may encounter the following error after importing SMDDP in your training script: ``version `GLIBCXX_3.4.30' not found`` + +If this occurs, firstly ensure that you are installing PyTorch via conda before pip installing SMDDP (i.e. install PyTorch through `conda install` before installing SMDDP in your environment). If this still does not resolve the error, please use [Mamba](https://github.com/mamba-org/mamba) as your package manager rather than Miniconda or Anaconda. Mamba is a drop-in replacement for conda with improvements in dependency resolution, and creating an environment with Mamba is known to resolve this issue. + diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/__pycache__/utils.cpython-310.pyc b/3.test_cases/13.SM-dataparallel-deepspeed/code/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..314ab5bcd8e0662427309e57a651ec4f331b0cd6 GIT binary patch literal 1587 zcmZ8hOK&7K5Vre0J)PMQ9(!QLf!iQuMhhGfT8Wjoz+A$qK{8I;lXT*KV7uAfZ10J^ z@((z$7bO0YublE15Ui+jPm&>Sxm;DQa#ww>a(8f$6BzuDm-3&OkiSsa9}x&oV45F+ z2qLH=<+Rc!C_{>TprR%&W6y^wY0@(Fd<6Qe%sd~fS(BGJC0`Sfi1alPsmy)}%elzJ z>@_J1K~KmszlOh%&62wODYyKajti+ldSA#bACzSRw5;ls;`>&Kt2PQE=xP`xyw&gS6zCLJ+u?S`*Cx| z;1+5t8^#`h!k}M77r}xqX!7sX!~f%OYr9#-E@Q0ajbzN_j5VDY6!HaQKMh<>JNf{Q z(;pzgJKZDrcbx!13xiBXGFnits}O*jfsPqWkF)^Bk_&nfT!bTFK1O3OhFGV+Ca==l z9eQskv(ZS#)W%~Bwp4^*Jp&ybGtecsx_q3C`8I%?i8kLsim5ptwp%5!GNo`4;Ctzc zc4(NEy{e(q+)U`MXNnu^?Wgu~`P7M$ZCik!+v?oI@AAHe%acu~U+9k@mt~@lU~#dp zMraHuSPicY(O7I6t86)U2foIAi$h1dLjKIvjij+*U4GX``a@8Zhm*_QugeGCYfrH| z)l4Cm)2Tbj?ffww<=+}N`QIt-U8Xy#wB%O$xF#3#5F#-j1IdD%1~hq_h0_{D35D-% z5Ke0r>QBIIc`%{zjxXocUYEQ1o}jTDAVRG9)zrowH-yX;ka%mhoo?Xa@BnIjsokUd q=KIG_kYg44IQCNWW$Rld`(mo`FR?`pbS=rk0xA~1prD6gk^cwqv7uf7 literal 0 HcmV?d00001 diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/dsconfig.json b/3.test_cases/13.SM-dataparallel-deepspeed/code/dsconfig.json new file mode 100755 index 00000000..e3211fee --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/code/dsconfig.json @@ -0,0 +1,19 @@ +{ + "bf16": { + "enabled": "auto" + }, + "zero_optimization": { + "stage": 3, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": 5e8, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": false + }, + "gradient_accumulation_steps": 1, + "steps_per_print": 2000, + "train_micro_batch_size_per_gpu": 4, + "wall_clock_breakdown": false +} diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt b/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt new file mode 100755 index 00000000..b844e817 --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt @@ -0,0 +1,7 @@ +transformers==4.31 +datasets +accelerate>=0.21 +bitsandbytes +peft +deepspeed==0.9.2 +Pydantic==1.10.13 diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py b/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py new file mode 100755 index 00000000..edc8c40c --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py @@ -0,0 +1,215 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +import argparse +import math +from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed, get_scheduler, SchedulerType +from datasets import load_from_disk +import torch +import torch.distributed as dist + +from utils import create_dataloaders, StubDataset +import functools +import deepspeed +try: + backend = "smddp" + import smdistributed.dataparallel.torch.torch_smddp +except ModuleNotFoundError: + backend = "nccl" + print("Warning: SMDDP not found on this image, falling back to NCCL!") + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_id", + type=str, + default="meta-llama/Llama-2-7b-chat-hf", + help="Model id to use for training.", + ) + parser.add_argument("--epochs", type=int, default=2, help="Number of epochs to train for.") + parser.add_argument("--max_steps", type=int, default=None, help="Number of epochs to train for.") + parser.add_argument( + "--batch_size", + type=int, + default=1, + help="Batch size to use for training.", + ) + parser.add_argument("--lr", type=float, default=3e-5, help="Learning rate to use for training.") + parser.add_argument("--optimizer", type=str, default="adamw_hf", help="Learning rate to use for training.") + parser.add_argument("--seed", type=int, default=42, help="Seed to use for training.") + parser.add_argument("--num_train_epochs", type=int, default=1, help="Total number of training epochs to perform.") + + parser.add_argument( + "--gradient_checkpointing", + type=bool, + default=True, + help="Whether to use gradient checkpointing to save memory.", + ) + parser.add_argument( + "--bf16", + type=bool, + default=True if torch.cuda.get_device_capability()[0] == 8 else False, + help="Whether to use bf16.", + ) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="linear", + help="The scheduler type to use.", + choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], + ) + parser.add_argument( + "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--deepspeed_config", type=str, help="Path to deepspeed config json" + ) + + parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") + parser.add_argument("--model_dir",type=str,default="/opt/ml/model") + parser.add_argument("--cache_dir",type=str,default=None) + args = parser.parse_known_args() + return args + +def training_function(args): + # smddp example specifically tailored for p4d(e) instance types. + local_rank = dist.get_rank() % 8 + seed = args.seed + set_seed(seed) + torch.cuda.set_device(local_rank) + + dataset = { + 'train': StubDataset(), + 'validation': StubDataset() + } + + dtype = torch.bfloat16 + + from transformers import LlamaConfig + configuration = LlamaConfig(use_cache=False) + from transformers.models.llama import LlamaForCausalLM + with deepspeed.zero.Init(dtype=dtype, enabled=True): + model = AutoModelForCausalLM.from_config(configuration) + model.gradient_checkpointing_enable() + + train_dataset = dataset["train"] + eval_dataset = dataset["validation"] + train_dataloader, eval_dataloader = create_dataloaders( + train_dataset, eval_dataset, dist.get_rank(), dist.get_world_size(), + seed, args.batch_size, args.batch_size) + + no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"] + optimizer_grouped_parameters = [{ + "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + "weight_decay": args.weight_decay, + },{ + "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], + "weight_decay": 0.0, + }] + + optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if dist.get_rank()==0: + print(f"Number of update steps per epoch {num_update_steps_per_epoch}") + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + model, optimizer, _, _ = deepspeed.initialize( + model=model, + optimizer=optimizer, + model_parameters=model.parameters(), + config=args.deepspeed_config + ) + device = torch.device(f"cuda:{local_rank}") + for epoch in range(args.num_train_epochs): + model.train() + total_steps=0 + ds_loss = torch.zeros(2).to(local_rank) + + for batch_idx, batch in enumerate(train_dataloader): + batch = {k: v.to(device) for k, v in batch.items()} + output = model(**batch) + if dist.get_rank() == 0: print(f"Processing training batch {batch_idx}") + loss = output["loss"] + loss.backward() + ds_loss[0] += loss.item() + ds_loss[1] += len(batch["input_ids"]) + optimizer.zero_grad() + lr_scheduler.step() + total_steps += 1 + if args.max_steps is not None and total_steps > args.max_steps: + break + + torch.distributed.all_reduce(ds_loss, op=torch.distributed.ReduceOp.SUM) + train_loss = ds_loss[0] / ds_loss[1] + train_ppl = torch.exp(train_loss) + + if dist.get_rank()==0: + print(f"******{epoch=}: {train_ppl=} {train_loss=}******") + + model.eval() + eval_loss = 0 + ds_eval_loss = torch.zeros(2).to(local_rank) + for steps, batch in enumerate(eval_dataloader): + batch = {k: v.to(device) for k, v in batch.items()} + + if dist.get_rank() == 0: print(f"Performing validation on training batch {batch_idx}") + with torch.no_grad(): + outputs = model(**batch) + loss = outputs["loss"] + ds_eval_loss[0] += loss.item() + ds_eval_loss[1] += len(batch["input_ids"]) + if args.max_steps is not None and steps > args.max_steps: + break + + torch.distributed.all_reduce(ds_eval_loss, op=torch.distributed.ReduceOp.SUM) + eval_loss = ds_eval_loss[0] / ds_eval_loss[1] + eval_ppl = torch.exp(eval_loss) + + if dist.get_rank()==0: + print(f"*******{epoch=}: {eval_ppl=} {eval_loss=}*******") + + if args.max_steps is not None and total_steps > args.max_steps: + break + + if dist.get_rank() == 0: + print("Training done!") + dist.barrier() + +def main(): + deepspeed.init_distributed(dist_backend=backend) + + args, _ = parse_args() + training_function(args) + +if __name__ == "__main__": + main() diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/utils.py b/3.test_cases/13.SM-dataparallel-deepspeed/code/utils.py new file mode 100755 index 00000000..628222f3 --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/code/utils.py @@ -0,0 +1,38 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +import torch +import torch.distributed as dist +from torch.utils.data import DataLoader +from transformers import default_data_collator + +from torch.utils.data import Dataset +from torch.utils.data.distributed import DistributedSampler + +# dummy dataset for this example +class StubDataset(Dataset): + def __len__(self): return dist.get_world_size()*2 + def __getitem__(self, index): + block_size = 4096 + return { + 'input_ids': torch.randint(1, 31580, (block_size,)), + 'attention_mask': torch.randint(0, 2, (block_size,)), + 'labels': torch.randint(1, 31579, (block_size,)) + } + +def create_dataloaders(train_dataset, eval_dataset, rank, world_size, seed, + train_batch_size, eval_batch_size): + train_sampler = torch.utils.data.DistributedSampler( + train_dataset, shuffle=True, seed=seed, rank=rank, num_replicas=world_size, + drop_last=True,) + eval_sampler = torch.utils.data.DistributedSampler( + eval_dataset, shuffle=True, seed=seed, rank=rank, num_replicas=world_size, + drop_last=True,) + + train_dataloader = DataLoader( + train_dataset, sampler=train_sampler, collate_fn=default_data_collator, + batch_size=train_batch_size, pin_memory=True,drop_last=True) + eval_dataloader = DataLoader( + eval_dataset,sampler=eval_sampler, collate_fn=default_data_collator, + batch_size=eval_batch_size, pin_memory=True,drop_last=True) + return train_dataloader,eval_dataloader diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh b/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh new file mode 100755 index 00000000..5390fb13 --- /dev/null +++ b/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +export OMP_NUM_THREADS=1 +export GPUS_PER_NODE=8 +MASTER_NODE=$(scontrol show hostname | head -n 1) +export MASTER_ADDR=$(scontrol show node=$MASTER_NODE | awk -F= '/NodeAddr=/{print $2}' | awk '{print $1}') +export NNODES=$SLURM_NTASKS +export NODE_RANK=$SLURM_NODEID +export MASTER_PORT=9001 +export WORLD_SIZE_JOB=$SLURM_NTASKS +export DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT " + +echo "Launching torchrun..." + +torchrun $DISTRIBUTED_ARGS \ + code/train.py \ + --gradient_checkpointing True \ + --bf16 True \ + --optimizer "adamw_torch" \ + --per_device_train_batch_size 1 \ + --epochs 1 \ + --max_steps 30 \ + --deepspeed_config "code/dsconfig.json" + + From 2aa5f4b9fee6e6a79601649a22f1d78b00ac1a45 Mon Sep 17 00:00:00 2001 From: mhuguesaws <71357145+mhuguesaws@users.noreply.github.com> Date: Tue, 19 Dec 2023 13:57:28 -0600 Subject: [PATCH 266/648] Change folder for nccl tests Cose #28 --- .../1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml index 2035559e..eb76f9d8 100644 --- a/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml +++ b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml @@ -51,10 +51,10 @@ - name: Clone Nvidia NCCL tests repository ansible.builtin.git: repo: https://github.com/NVIDIA/nccl-tests.git - dest: /tmp/nccl-tests + dest: /opt/nccl-tests - name: Build NCCL tests ansible.builtin.shell: | make MPI=1 CUDA_HOME=/usr/local/cuda MPI_HOME=/opt/amazon/openmpi NCCL_HOME=/opt/nccl/build args: - chdir: /tmp/nccl-tests + chdir: /opt/nccl-tests From ab59027676c1d3d9df8d6209d950e0db5157adcb Mon Sep 17 00:00:00 2001 From: mlopezr Date: Wed, 20 Dec 2023 17:03:16 +0100 Subject: [PATCH 267/648] Update doc to add "sagemaker-" prefix to S3 bucket required by IAM policy --- 1.architectures/5.sagemaker-hyperpod/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/README.md b/1.architectures/5.sagemaker-hyperpod/README.md index e96eb8a8..35c6bae3 100644 --- a/1.architectures/5.sagemaker-hyperpod/README.md +++ b/1.architectures/5.sagemaker-hyperpod/README.md @@ -43,7 +43,7 @@ Next, we'll need an S3 bucket. This bucket will be used to store the lifecycle s ``` # generate a unique name for the bucket -BUCKET="lifecycle-$(python3 -S -c 'import uuid; print(str(uuid.uuid4().hex)[:10])')" +BUCKET="sagemaker-lifecycle-$(python3 -S -c 'import uuid; print(str(uuid.uuid4().hex)[:10])')" # create the bucket aws s3 mb s3://${BUCKET} @@ -317,4 +317,4 @@ When you're done with your cluster, you can delete it down with aws sagemaker delete-cluster --cluster-name ml-cluster --region us-west-2 ``` -Your FSx for Lustre volume will retain anything saved to it, and can be reattached to a future cluster. \ No newline at end of file +Your FSx for Lustre volume will retain anything saved to it, and can be reattached to a future cluster. From f2af7adc5c57128022efb22ab5d325f08d281235 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 4 Jan 2024 10:37:40 +0800 Subject: [PATCH 268/648] Dockerfile template: clarify efa_installer flags, bump versions --- .../2.docker/0.nvcr-pytorch-aws.dockerfile | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index 70b78c14..0e0e0cbc 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -19,13 +19,13 @@ # # Load image to local docker registry -> on head node, or new compute/build node. # docker load < /fsx/nvidia-pt-od__2310.tar #################################################################################################### -FROM nvcr.io/nvidia/pytorch:23.10-py3 +FROM nvcr.io/nvidia/pytorch:23.12-py3 ENV DEBIAN_FRONTEND=noninteractive # The three must-be-built packages. # Efa-installer>=1.29.0 required for nccl>=2.19.0 to avoid libfabric NCCL error. -ENV EFA_INSTALLER_VERSION=1.29.0 -ENV AWS_OFI_NCCL_VERSION=1.7.3-aws +ENV EFA_INSTALLER_VERSION=1.30.0 +ENV AWS_OFI_NCCL_VERSION=1.7.4-aws ENV NCCL_TESTS_VERSION=master RUN apt-get update -y @@ -63,7 +63,13 @@ RUN apt-get update && \ curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ - ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify && \ + # Anything from --skip-kmod onwards are used ONLY for container image. Do not use these + # flags on the host, unless you want to fine-grained control the installation process. + # + # NOTE: --skip-kmod and --no-verify in Dockerfile causes docker build to hard fail. The + # skip-limit-conf can be retained in Dockerfile, but it's redundant as the host already has + # these limits set by efa_installer. + ./efa_installer.sh -y -g -d --skip-kmod --no-verify --skip-limit-conf && \ ldconfig && \ rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* ENV LD_LIBRARY_PATH=/opt/amazon/efa/lib:$LD_LIBRARY_PATH From 2880c55cb073a114141320d5f0c67f2a1d44471e Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 4 Jan 2024 11:22:07 +0800 Subject: [PATCH 269/648] Change example tag to latest to future-proof version bumps --- .../2.docker/0.nvcr-pytorch-aws.dockerfile | 8 ++++---- 2.ami_and_containers/2.docker/README.md | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index 0e0e0cbc..cff7eab7 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -7,17 +7,17 @@ # # Sample build instructions: # -# docker build --progress=plain -t nvidia-pt-od:2310 -f 0.nvcr-pytorch-aws.dockerfile . -# rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__2310.sqsh dockerd://nvidia-pt-od:2310 +# docker build --progress=plain -t nvidia-pt-od:latest -f 0.nvcr-pytorch-aws.dockerfile . +# rm /fsx/nvidia-pt-od__latest.sqsh ; enroot import -o /fsx/nvidia-pt-od__latest.sqsh dockerd://nvidia-pt-od:latest # # Compute nodes (aka build nodes) are transient, so we need to keep the docker image on shared fs, # which head node can load into its local registry. # # # Build node: save image to file -# docker save nvidia-pt-od:2310 > /fsx/nvidia-pt-od__2310.tar +# docker save nvidia-pt-od:latest > /fsx/nvidia-pt-od__latest.tar # # # Load image to local docker registry -> on head node, or new compute/build node. -# docker load < /fsx/nvidia-pt-od__2310.tar +# docker load < /fsx/nvidia-pt-od__latest.tar #################################################################################################### FROM nvcr.io/nvidia/pytorch:23.12-py3 ENV DEBIAN_FRONTEND=noninteractive diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/2.docker/README.md index de328c9b..79d65795 100644 --- a/2.ami_and_containers/2.docker/README.md +++ b/2.ami_and_containers/2.docker/README.md @@ -58,7 +58,7 @@ docker images # Convert to enroot format. Attempt to remove an existing .sqsh, otherwise enroot refuses to # run when the output .sqsh file already exists. -rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__latest.sqsh dockerd://nvidia-pt-od:latest +rm /fsx/nvidia-pt-od__latest.sqsh ; enroot import -o /fsx/nvidia-pt-od__latest.sqsh dockerd://nvidia-pt-od:latest ``` Tips: when building on a compute node (or a build node), you save the built Docker image on a shared From 13760fb1e6dfbd7ec1451dcd64ba94eb6d5f643f Mon Sep 17 00:00:00 2001 From: Pierre-Yves <62559210+perifaws@users.noreply.github.com> Date: Thu, 4 Jan 2024 15:22:07 -0600 Subject: [PATCH 270/648] Delete convergence_jobs directory --- convergence_jobs/neox_7b/neox_7b_4Mtokens.sh | 59 -------------------- convergence_jobs/neox_7b/sync_logs.sh | 2 - 2 files changed, 61 deletions(-) delete mode 100644 convergence_jobs/neox_7b/neox_7b_4Mtokens.sh delete mode 100644 convergence_jobs/neox_7b/sync_logs.sh diff --git a/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh b/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh deleted file mode 100644 index e53edc7e..00000000 --- a/convergence_jobs/neox_7b/neox_7b_4Mtokens.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -#SBATCH --output=logs/%x_%j.out # Redirects outputs to file in current_dir/logs -#SBATCH --error=logs/%x_%j.out # Redirects err to same file in current_dir/logs -#SBATCH --job-name=neox_7b - -# has to be shared dir -CONDA_ENV_PATH=${1:-"$CONDA_DEFAULT_ENV"} -SHELL_SCRIPT=${2:-"scripts/model.sh"} - -set -ex - -if [ -z $CONDA_ENV_PATH ]; then - echo "Conda env path needs to be passed. Exiting" - exit 1 -fi - -# To keep track of which job used which node for identifying node causing crash if any -HOSTFILE=hosts_${SLURM_JOB_ID} -scontrol show hostnames | sort > $HOSTFILE -NUM_NODES=$(cat $HOSTFILE | wc -l) - -## DATA -## CHANGE TO YOUR OWN CUSTOM DATASET PATH -SCRIPT_ARGS="--training_dir /fsx/datasets/train_ids_wsvocab_redo_2048_smaller " -SCRIPT_ARGS+="--test_dir /fsx/datasets/val_ids_wsvocab_2048 " - -## MODEL -model_type=gpt_neox -SCRIPT_ARGS+="--model_type $model_type --model_size 7b " - - -## BATCH SIZE -if [ $NUM_NODES -lt 16 ]; then - echo "Can't use 4M tokens with less than 16 nodes" - exit 1 -else - GLOBAL_BATCH_SIZE=4194304 -fi -max_context_width=2048 # seqlen -train_batch_size=$(python -c "print($GLOBAL_BATCH_SIZE//($NUM_NODES * 8 * $max_context_width))") - -if [ $train_batch_size -le 2 ]; then - SCRIPT_ARGS+="--activation_checkpointing 0 " -fi - -SCRIPT_ARGS+="--train_batch_size $train_batch_size " -SCRIPT_ARGS+="--val_batch_size $train_batch_size " -SCRIPT_ARGS+="--max_context_width $max_context_width " -SCRIPT_ARGS+="--max_steps 143000 " -SCRIPT_ARGS+="--validation_freq 200 " - -## ARTIFACTS -SCRIPT_ARGS+="--checkpoint_dir checkpoints/$SLURM_JOB_NAME/ " -SCRIPT_ARGS+="--tensorboard_dir tensorboard_logs/$SLURM_JOB_NAME/ " - -## RESUME -# SCRIPT_ARGS+="--resume_from_checkpoint checkpoints/$SLURM_JOB_NAME/$model_type-400steps " - -srun -l -D `pwd` conda run -p $CONDA_ENV_PATH --no-capture-output $SHELL_SCRIPT --hostfile $HOSTFILE $SCRIPT_ARGS diff --git a/convergence_jobs/neox_7b/sync_logs.sh b/convergence_jobs/neox_7b/sync_logs.sh deleted file mode 100644 index 50d0f556..00000000 --- a/convergence_jobs/neox_7b/sync_logs.sh +++ /dev/null @@ -1,2 +0,0 @@ -aws s3 sync s3://rubik-convergence-jobs/logs/neox_7b_4Mtokens/ ./logs/ -# aws s3 sync s3://rubik-convergence-jobs/tensorboard/neox_7b_4Mtokens/ ./tensorboard/ From 8cc238c56faee16664ba32855a04738b043eef45 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 5 Jan 2024 10:07:58 +0800 Subject: [PATCH 271/648] Update 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile Co-authored-by: mhuguesaws <71357145+mhuguesaws@users.noreply.github.com> --- 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index cff7eab7..a65cbc06 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -63,7 +63,7 @@ RUN apt-get update && \ curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ - # Anything from --skip-kmod onwards are used ONLY for container image. Do not use these + # ONLY add `--skip-kmod`, `--skip-limit-conf` and `--no-verify` flags to container image. # flags on the host, unless you want to fine-grained control the installation process. # # NOTE: --skip-kmod and --no-verify in Dockerfile causes docker build to hard fail. The From c8190c719b193f2851b757c7c2f4df44b6083df0 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 5 Jan 2024 10:08:09 +0800 Subject: [PATCH 272/648] Update 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile Co-authored-by: mhuguesaws <71357145+mhuguesaws@users.noreply.github.com> --- 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index a65cbc06..5359e707 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ # ONLY add `--skip-kmod`, `--skip-limit-conf` and `--no-verify` flags to container image. - # flags on the host, unless you want to fine-grained control the installation process. + # Those three flags must NOT be used on the host. # # NOTE: --skip-kmod and --no-verify in Dockerfile causes docker build to hard fail. The # skip-limit-conf can be retained in Dockerfile, but it's redundant as the host already has From bb0e33c5710654a9fac945f995bf30cac15d6746 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 5 Jan 2024 10:11:23 +0800 Subject: [PATCH 273/648] Incorporate sean's comment --- .../2.docker/0.nvcr-pytorch-aws.dockerfile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index 5359e707..d4de6ee3 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -63,12 +63,15 @@ RUN apt-get update && \ curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ - # ONLY add `--skip-kmod`, `--skip-limit-conf` and `--no-verify` flags to container image. + # ONLY add `--skip-kmod`, `--no-verify` and `--skip-limit-conf` flags to container image. # Those three flags must NOT be used on the host. # - # NOTE: --skip-kmod and --no-verify in Dockerfile causes docker build to hard fail. The - # skip-limit-conf can be retained in Dockerfile, but it's redundant as the host already has - # these limits set by efa_installer. + # Explanations: + # - to build EFA in the Dockerfile, we added --skip-kmod and --no-verify. Without these flags, + # the Dockerfile will fail to build. If installing EFA on the host and not in a container, + # please remove these flags. + # - The --skip-limit-conf can be retained in Dockerfile, but it's redundant as the host already + # has these limits set by efa_installer. ./efa_installer.sh -y -g -d --skip-kmod --no-verify --skip-limit-conf && \ ldconfig && \ rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* From 0588cc7874e4573fe2732765da6a43d715f030f5 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 5 Jan 2024 19:19:54 +0800 Subject: [PATCH 274/648] Do not version Python runtime files --- .gitignore | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 116 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 278a3fae..daf3b51d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .env.unversioned + *.out *.err *.log @@ -9,6 +10,14 @@ inventory/packer-provisioner-ansible* .vscode/* !.vscode/launch.json +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + # MS-Office temporary files ~$* @@ -19,6 +28,106 @@ inventory/packer-provisioner-ansible* .*.sw[po] *.sw[po] +# Python distribution / packaging +MANIFEST +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuffs +instance/ +.webassets-cache + +# Scrappy stuffs +.scrapy + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Sphinx documentation +docs/_build/ + +# mkdocs documentation +/site + +# Database +*.db +*.rdb + +# PyBuilder +target/ + +# Mypy cache +.mypy_cache/ + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# IDE +.c9/ +.idea/ +.spyproject +.spyderproject +.ropeproject +.vscode + # pyspark derby.log metastore_db @@ -34,9 +143,13 @@ spark-warehouse *.venv* .ipynb_checkpoints -# slurm outputs -*.out -*.err +# Neuron-SDK >= 2.14 +log-neuron-cc.txt +all_metrics.csv +neuronxcc-*/ + +# WanDb temp files or offline artifacts +wandb/ # Enroot container image *.sqsh From 3a5a247e01b9769ef9de7e8adec88764c6c4055e Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 5 Jan 2024 10:37:42 -0600 Subject: [PATCH 275/648] Add EFA node exporter for Prometheus --- .../3.efa-node-exporter/Dockerfile | 33 ++ .../3.efa-node-exporter/Makefile | 10 + .../3.efa-node-exporter/README.md | 73 +++++ .../3.efa-node-exporter/amazon_efa_linux.go | 170 ++++++++++ .../3.efa-node-exporter/class_amazon_efa.go | 308 ++++++++++++++++++ .../3.efa-node-exporter/docker-compose.yml | 18 + 6 files changed, 612 insertions(+) create mode 100644 4.validation_scripts/3.efa-node-exporter/Dockerfile create mode 100644 4.validation_scripts/3.efa-node-exporter/Makefile create mode 100644 4.validation_scripts/3.efa-node-exporter/README.md create mode 100644 4.validation_scripts/3.efa-node-exporter/amazon_efa_linux.go create mode 100644 4.validation_scripts/3.efa-node-exporter/class_amazon_efa.go create mode 100644 4.validation_scripts/3.efa-node-exporter/docker-compose.yml diff --git a/4.validation_scripts/3.efa-node-exporter/Dockerfile b/4.validation_scripts/3.efa-node-exporter/Dockerfile new file mode 100644 index 00000000..9d4671c1 --- /dev/null +++ b/4.validation_scripts/3.efa-node-exporter/Dockerfile @@ -0,0 +1,33 @@ +FROM ubuntu:20.04 + +ARG EFA_INSTALLER_VERSION=latest +ARG NODE_EXPORTER_VERSION=v1.6.1 +ARG PROCFS_EXPORTER_VERSION=v0.12.0 + +# install go +RUN apt update && apt install curl git build-essential -y +RUN curl -OL https://go.dev/dl/go1.21.1.linux-amd64.tar.gz && \ + tar -C /usr/local -xvf go1.21.1.linux-amd64.tar.gz +ENV PATH=$PATH:/usr/local/go/bin + +# Install EFA +RUN cd $HOME \ + && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && cd aws-efa-installer \ + && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ + && rm -rf $HOME/aws-efa-installer + +# Install ProcFS +RUN git clone -b $PROCFS_EXPORTER_VERSION https://github.com/prometheus/procfs.git /workspace/procfs +COPY class_amazon_efa.go /workspace/procfs/sysfs/ +RUN cd /workspace/procfs && make test + +# Install Node Exporter +RUN git clone -b $NODE_EXPORTER_VERSION https://github.com/prometheus/node_exporter.git /workspace/node_exporter +COPY amazon_efa_linux.go /workspace/node_exporter/collector/ + +WORKDIR /workspace/node_exporter +RUN go mod edit --replace=github.com/prometheus/procfs=/workspace/procfs +RUN go mod tidy && go build -o node_exporter +ENTRYPOINT ["./node_exporter"] \ No newline at end of file diff --git a/4.validation_scripts/3.efa-node-exporter/Makefile b/4.validation_scripts/3.efa-node-exporter/Makefile new file mode 100644 index 00000000..c40432a1 --- /dev/null +++ b/4.validation_scripts/3.efa-node-exporter/Makefile @@ -0,0 +1,10 @@ +IMAGE=node_exporter_efa +push: + docker build -t ${IMAGE} . +run: + docker run -d \ + --net="host" \ + --pid="host" \ + -v "/:/host:ro,rslave" \ + ${IMAGE}:latest \ + --path.rootfs=/host diff --git a/4.validation_scripts/3.efa-node-exporter/README.md b/4.validation_scripts/3.efa-node-exporter/README.md new file mode 100644 index 00000000..936284f6 --- /dev/null +++ b/4.validation_scripts/3.efa-node-exporter/README.md @@ -0,0 +1,73 @@ +# EFA node Exporter for Prometheus + +Scripted fork of the [Prometheus Node Exporter](https://github.com/prometheus/node_exporter) and [ProcFS](https://github.com/prometheus/procfs) repositories to export Amazon EFA metrics counters on compatible instances including c5n, hpc6ad, P5, P4. + +# How to run the collector + +To create the docker image run: + +```bash +git clone https://github.com/aws-samples/awsome-distributed-training.git +cd awsome-distributed-training/4.validation_scripts/3.efa-node-exporter +make +``` + +Then you can execute it, i.e. + +```bash +# alternatively you can run: +# make run +docker run -d \ + --net="host" \ + --pid="host" \ + -v "/:/host:ro,rslave" \ + node_exporter_efa:latest \ + --path.rootfs=/host +``` + +Now you can make sure the metrics are being correctly exported by running: + +```bash +curl -s http://localhost:9100/metrics | grep amazonefa +``` + +You should see a bunch of metrics like the following: + +``` +# HELP node_amazonefa_tx_pkts Number of packets transmitted +# TYPE node_amazonefa_tx_pkts counter +node_amazonefa_tx_pkts{device="rdmap113s0",port="1"} 1.664737e+06 +node_amazonefa_tx_pkts{device="rdmap114s0",port="1"} 1.664737e+06 +node_amazonefa_tx_pkts{device="rdmap115s0",port="1"} 1.664738e+06 +node_amazonefa_tx_pkts{device="rdmap116s0",port="1"} 1.664746e+06 +node_amazonefa_tx_pkts{device="rdmap130s0",port="1"} 1.6642e+06 +node_amazonefa_tx_pkts{device="rdmap131s0",port="1"} 1.664199e+06 +node_amazonefa_tx_pkts{device="rdmap132s0",port="1"} 1.6642e+06 +node_amazonefa_tx_pkts{device="rdmap133s0",port="1"} 1.664208e+06 +node_amazonefa_tx_pkts{device="rdmap147s0",port="1"} 1.663841e+06 +node_amazonefa_tx_pkts{device="rdmap148s0",port="1"} 1.66384e+06 +node_amazonefa_tx_pkts{device="rdmap149s0",port="1"} 1.663842e+06 +node_amazonefa_tx_pkts{device="rdmap150s0",port="1"} 1.66385e+06 +node_amazonefa_tx_pkts{device="rdmap164s0",port="1"} 1.65972e+06 +node_amazonefa_tx_pkts{device="rdmap165s0",port="1"} 1.659707e+06 +node_amazonefa_tx_pkts{device="rdmap166s0",port="1"} 1.65973e+06 +node_amazonefa_tx_pkts{device="rdmap167s0",port="1"} 1.659725e+06 +node_amazonefa_tx_pkts{device="rdmap181s0",port="1"} 1.658642e+06 +node_amazonefa_tx_pkts{device="rdmap182s0",port="1"} 1.658642e+06 +node_amazonefa_tx_pkts{device="rdmap183s0",port="1"} 1.658642e+06 +node_amazonefa_tx_pkts{device="rdmap184s0",port="1"} 1.658651e+06 +node_amazonefa_tx_pkts{device="rdmap198s0",port="1"} 1.655952e+06 +node_amazonefa_tx_pkts{device="rdmap199s0",port="1"} 1.655953e+06 +node_amazonefa_tx_pkts{device="rdmap200s0",port="1"} 1.655953e+06 +node_amazonefa_tx_pkts{device="rdmap201s0",port="1"} 1.655961e+06 +node_amazonefa_tx_pkts{device="rdmap79s0",port="1"} 1.667468e+06 +node_amazonefa_tx_pkts{device="rdmap80s0",port="1"} 1.667512e+06 +node_amazonefa_tx_pkts{device="rdmap81s0",port="1"} 1.667507e+06 +node_amazonefa_tx_pkts{device="rdmap82s0",port="1"} 1.667491e+06 +node_amazonefa_tx_pkts{device="rdmap96s0",port="1"} 1.664917e+06 +node_amazonefa_tx_pkts{device="rdmap97s0",port="1"} 1.664916e+06 +node_amazonefa_tx_pkts{device="rdmap98s0",port="1"} 1.664917e+06 +node_amazonefa_tx_pkts{device="rdmap99s0",port="1"} 1.664925e+06 +node_scrape_collector_duration_seconds{collector="amazonefa"} 0.084817395 +node_scrape_collector_success{collector="amazonefa"} 1 +``` diff --git a/4.validation_scripts/3.efa-node-exporter/amazon_efa_linux.go b/4.validation_scripts/3.efa-node-exporter/amazon_efa_linux.go new file mode 100644 index 00000000..d18c7105 --- /dev/null +++ b/4.validation_scripts/3.efa-node-exporter/amazon_efa_linux.go @@ -0,0 +1,170 @@ +// Copyright 2022 Amazon Web Services +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "errors" + "fmt" + "os" + "strconv" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +type AmazonEfaCollector struct { + fs sysfs.FS + metricDescs map[string]*prometheus.Desc + logger log.Logger + subsystem string +} + +func init() { + registerCollector("amazonefa", defaultEnabled, NewAmazonEfaCollector) +} + +// NewAmazonEfaCollector returns a new Collector exposing Amazon EFA stats. +func NewAmazonEfaCollector(logger log.Logger) (Collector, error) { + var i AmazonEfaCollector + var err error + + i.fs, err = sysfs.NewFS(*sysPath) + if err != nil { + return nil, fmt.Errorf("failed to open sysfs: %w", err) + } + i.logger = logger + + // Detailed description for all metrics. + descriptions := map[string]string{ + "alloc_pd_err": "Number of allocations PD errors", + "alloc_ucontext_err": "Number of allocations UContext errors", + "cmds_err": "Number of commands errors", + "completed_cmds": "Number of completed commands", + "create_ah_err": "Number of create AH errors", + "create_cq_err": "Number of create CQ errors", + "create_qp_err": "Number of create qp errors", + "keep_alive_rcvd": "Number of keep-alive packets received", + "lifespan": "Lifespan of the port", + "mmap_err": "Number of mmap errors", + "no_completion_cmds": "Number of commands with no completion", + "rdma_read_bytes": "Number of bytes read with RDMA", + "rdma_read_resp_bytes": "Number of read reponses bytes with RDMA", + "rdma_read_wr_err": "Number of read write errors with RDMA", + "rdma_read_wrs": "Number of read rs with RDMA", + "rdma_write_bytes": "Number of bytes wrote with RDMA", + "rdma_write_recv_bytes": "Number of bytes wrote and received with RDMA", + "rdma_write_wr_err": "Number of bytes wrote wr with error RDMA", + "rdma_write_wrs": "Number of bytes wrote wrs RDMA", + "recv_bytes": "Number of bytes recv bytes", + "recv_wrs": "Number of bytes recv wrs", + "reg_mr_err": "Number of reg_mr errors", + "rx_bytes": "Number of bytes received", + "rx_drops": "Number of packets droped", + "rx_pkts": "Number of packets received", + "send_bytes": "Number of bytes send", + "send_wrs": "Number of wrs send", + "submitted_cmds": "Number of submitted commands", + "tx_bytes": "Number of bytes transmitted", + "tx_pkts": "Number of packets transmitted", + } + + i.metricDescs = make(map[string]*prometheus.Desc) + i.subsystem = "amazonefa" + + for metricName, description := range descriptions { + i.metricDescs[metricName] = prometheus.NewDesc( + prometheus.BuildFQName(namespace, i.subsystem, metricName), + description, + []string{"device", "port"}, + nil, + ) + } + + return &i, nil +} + +func (c *AmazonEfaCollector) pushMetric(ch chan<- prometheus.Metric, name string, value uint64, deviceName string, port string, valueType prometheus.ValueType) { + ch <- prometheus.MustNewConstMetric(c.metricDescs[name], valueType, float64(value), deviceName, port) +} + +func (c *AmazonEfaCollector) pushCounter(ch chan<- prometheus.Metric, name string, value *uint64, deviceName string, port string) { + if value != nil { + c.pushMetric(ch, name, *value, deviceName, port, prometheus.CounterValue) + } +} + +func (c *AmazonEfaCollector) Update(ch chan<- prometheus.Metric) error { + devices, err := c.fs.AmazonEfaClass() + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Amazon EFA statistics not found, skipping") + return ErrNoData + } + return fmt.Errorf("error obtaining AmazonEfa class info: %w", err) + } + + for _, device := range devices { + infoDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, c.subsystem, "info"), + "Non-numeric data from /sys/class/infiniband/, value is always 1.", + []string{"device"}, + nil, + ) + infoValue := 1.0 + ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, device.Name) + + for _, port := range device.Ports { + portStr := strconv.FormatUint(uint64(port.Port), 10) + + //c.pushMetric(ch, "state_id", uint64(port.StateID), port.Name, portStr, prometheus.UntypedValue) + //c.pushMetric(ch, "physical_state_id", uint64(port.PhysStateID), port.Name, portStr, prometheus.UntypedValue) + //c.pushMetric(ch, "rate_bytes_per_second", port.Rate, port.Name, portStr, prometheus.UntypedValue) + + c.pushCounter(ch, "alloc_pd_err", port.Counters.AllocPdErr, port.Name, portStr) + c.pushCounter(ch, "alloc_ucontext_err", port.Counters.AllocUcontextErr, port.Name, portStr) + c.pushCounter(ch, "cmds_err", port.Counters.CmdsErr, port.Name, portStr) + c.pushCounter(ch, "completed_cmds", port.Counters.CompletedCmds, port.Name, portStr) + c.pushCounter(ch, "create_ah_err", port.Counters.CreateAhErr, port.Name, portStr) + c.pushCounter(ch, "create_cq_err", port.Counters.CreateCqErr, port.Name, portStr) + c.pushCounter(ch, "create_qp_err", port.Counters.CreateQpErr, port.Name, portStr) + c.pushCounter(ch, "keep_alive_rcvd", port.Counters.KeepAliveRcvd, port.Name, portStr) + c.pushCounter(ch, "lifespan", port.Counters.Lifespan, port.Name, portStr) + c.pushCounter(ch, "mmap_err", port.Counters.MmapErr, port.Name, portStr) + c.pushCounter(ch, "no_completion_cmds", port.Counters.NoCompletionCmds, port.Name, portStr) + c.pushCounter(ch, "rdma_read_bytes", port.Counters.RdmaReadBytes, port.Name, portStr) + c.pushCounter(ch, "rdma_read_resp_bytes", port.Counters.RdmaReadRespBytes, port.Name, portStr) + c.pushCounter(ch, "rdma_read_wr_err", port.Counters.RdmaReadWrErr, port.Name, portStr) + c.pushCounter(ch, "rdma_read_wrs", port.Counters.RdmaReadWrs, port.Name, portStr) + c.pushCounter(ch, "rdma_write_bytes", port.Counters.RdmaWriteBytes, port.Name, portStr) + c.pushCounter(ch, "rdma_write_recv_bytes", port.Counters.RdmaWriteRecvBytes, port.Name, portStr) + c.pushCounter(ch, "rdma_write_wr_err", port.Counters.RdmaWriteWrErr, port.Name, portStr) + c.pushCounter(ch, "rdma_write_wrs", port.Counters.RdmaWritedWrs, port.Name, portStr) + c.pushCounter(ch, "recv_bytes", port.Counters.RecvBytes, port.Name, portStr) + c.pushCounter(ch, "recv_wrs", port.Counters.RecvWrs, port.Name, portStr) + c.pushCounter(ch, "reg_mr_err", port.Counters.RegMrErr, port.Name, portStr) + c.pushCounter(ch, "rx_bytes", port.Counters.RxBytes, port.Name, portStr) + c.pushCounter(ch, "rx_drops", port.Counters.RxDrops, port.Name, portStr) + c.pushCounter(ch, "rx_pkts", port.Counters.RxPkts, port.Name, portStr) + c.pushCounter(ch, "send_bytes", port.Counters.SendBytes, port.Name, portStr) + c.pushCounter(ch, "send_wrs", port.Counters.SendWrs, port.Name, portStr) + c.pushCounter(ch, "submitted_cmds", port.Counters.SubmittedCmds, port.Name, portStr) + c.pushCounter(ch, "tx_bytes", port.Counters.TxBytes, port.Name, portStr) + c.pushCounter(ch, "tx_pkts", port.Counters.TxPkts, port.Name, portStr) + } + } + + return nil +} diff --git a/4.validation_scripts/3.efa-node-exporter/class_amazon_efa.go b/4.validation_scripts/3.efa-node-exporter/class_amazon_efa.go new file mode 100644 index 00000000..44bc73cb --- /dev/null +++ b/4.validation_scripts/3.efa-node-exporter/class_amazon_efa.go @@ -0,0 +1,308 @@ +// Copyright 2022 Amazon Web Services +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysfs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "bytes" + "syscall" + "github.com/prometheus/procfs/internal/util" +) + +const AmazonEfaPath = "class/infiniband" + +// AmazonEfaCounters contains counter values from files in +// /sys/class/infiniband//ports//hw_counters +// for a single port of one Amazon Elastic Fabric Adapter device. +type AmazonEfaCounters struct { + AllocPdErr *uint64 // hw_counters/alloc_pd_err + AllocUcontextErr *uint64 // hw_counters/alloc_ucontext_err + CmdsErr *uint64 // hw_counters/cmds_err + CompletedCmds *uint64 // hw_counters/completed_cmds + CreateAhErr *uint64 // hw_counters/create_ah_err + CreateCqErr *uint64 // hw_counters/create_cq_err + CreateQpErr *uint64 // hw_counters/create_qp_err + KeepAliveRcvd *uint64 // hw_counters/keep_alive_rcvd + Lifespan *uint64 // hw_counters/lifespan + MmapErr *uint64 // hw_counters/mmap_err + NoCompletionCmds *uint64 // hw_counters/no_completion_cmds + RdmaReadBytes *uint64 // hw_counters/rdma_read_bytes + RdmaReadRespBytes *uint64 // hw_counters/rdma_read_resp_bytes + RdmaReadWrErr *uint64 // hw_counters/rdma_read_wr_err + RdmaReadWrs *uint64 // hw_counters/rdma_read_wrs + RdmaWriteBytes *uint64 // hw_counters/rdma_read_bytes + RdmaWriteRecvBytes *uint64 // hw_counters/rdma_read_resp_bytes + RdmaWriteWrErr *uint64 // hw_counters/rdma_read_wr_err + RdmaWritedWrs *uint64 // hw_counters/rdma_read_wrs + RecvBytes *uint64 // hw_counters/recv_bytes + RecvWrs *uint64 // hw_counters/recv_wrs + RegMrErr *uint64 // hw_counters/reg_mr_err + RxBytes *uint64 // hw_counters/rx_bytes + RxDrops *uint64 // hw_counters/rx_drops + RxPkts *uint64 // hw_counters/rx_pkts + SendBytes *uint64 // hw_counters/send_bytes + SendWrs *uint64 // hw_counters/send_wrs + SubmittedCmds *uint64 // hw_counters/submitted_cmds + TxBytes *uint64 // hw_counters/tx_bytes + TxPkts *uint64 // hw_counters/tx_pkts +} + +// AmazonEfaPort contains info from files in +// /sys/class/infiniband//ports/ +// for a single port of one Amazon Elastic Fabric Adapter device. +type AmazonEfaPort struct { + Name string + Port uint + State string // String representation from /sys/class/infiniband//ports//state + StateID uint // ID from /sys/class/infiniband//ports//state + PhysState string // String representation from /sys/class/infiniband//ports//phys_state + PhysStateID uint // String representation from /sys/class/infiniband//ports//phys_state + Rate uint64 // in bytes/second from /sys/class/infiniband//ports//rate + Counters AmazonEfaCounters +} + +// AmazonEfaDevice contains info from files in /sys/class/infiniband for a +// single Amazon Elastic Fabric Adapter (EFA) device. +type AmazonEfaDevice struct { + Name string + Ports map[uint]AmazonEfaPort +} + +// AmazonEfaClass is a collection of every Amazon Elastic Fabric Adapter (EFA) device in +// /sys/class/infiniband. +// +// The map keys are the names of the Amazon Elastic Fabric Adapter (EFA) devices. +type AmazonEfaClass map[string]AmazonEfaDevice + +// AmazonEfaClass returns info for all Amazon Elastic Fabric Adapter (EFA) devices read from +// /sys/class/infiniband. +func (fs FS) AmazonEfaClass() (AmazonEfaClass, error) { + path := fs.sys.Path(AmazonEfaPath) + + dirs, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + ibc := make(AmazonEfaClass, len(dirs)) + for _, d := range dirs { + device, err := fs.parseAmazonEfaDevice(d.Name()) + if err != nil { + return nil, err + } + + ibc[device.Name] = *device + } + + return ibc, nil +} + +// Parse one AmazonEfa device. +func (fs FS) parseAmazonEfaDevice(name string) (*AmazonEfaDevice, error) { + path := fs.sys.Path(AmazonEfaPath, name) + device := AmazonEfaDevice{Name: name} + + portsPath := filepath.Join(path, "ports") + ports, err := ioutil.ReadDir(portsPath) + if err != nil { + return nil, fmt.Errorf("failed to list AmazonEfa ports at %q: %w", portsPath, err) + } + + device.Ports = make(map[uint]AmazonEfaPort, len(ports)) + for _, d := range ports { + port, err := fs.parseAmazonEfaPort(name, d.Name()) + if err != nil { + return nil, err + } + + device.Ports[port.Port] = *port + } + + return &device, nil +} + +// Scans predefined files in /sys/class/infiniband//ports/ +// directory and gets their contents. +func (fs FS) parseAmazonEfaPort(name string, port string) (*AmazonEfaPort, error) { + portNumber, err := strconv.ParseUint(port, 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to convert %s into uint", port) + } + ibp := AmazonEfaPort{Name: name, Port: uint(portNumber)} + + portPath := fs.sys.Path(AmazonEfaPath, name, "ports", port) + content, err := ioutil.ReadFile(filepath.Join(portPath, "state")) + if err != nil { + return nil, err + } + id, name, err := parseState(string(content)) + if err != nil { + return nil, fmt.Errorf("could not parse state file in %q: %w", portPath, err) + } + ibp.State = name + ibp.StateID = id + + content, err = ioutil.ReadFile(filepath.Join(portPath, "phys_state")) + if err != nil { + return nil, err + } + id, name, err = parseState(string(content)) + if err != nil { + return nil, fmt.Errorf("could not parse phys_state file in %q: %w", portPath, err) + } + ibp.PhysState = name + ibp.PhysStateID = id + + content, err = ioutil.ReadFile(filepath.Join(portPath, "rate")) + if err != nil { + return nil, err + } + ibp.Rate, err = parseRate(string(content)) + if err != nil { + return nil, fmt.Errorf("could not parse rate file in %q: %w", portPath, err) + } + + counters, err := parseAmazonEfaCounters(portPath) + if err != nil { + return nil, err + } + ibp.Counters = *counters + + return &ibp, nil +} + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +// +// Note that this function will not read files larger than 128 bytes. +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + const sysFileBufferSize = 128 + b := make([]byte, sysFileBufferSize) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} + +// Parse string to UInt64 +func parseUInt64(value string) (*uint64, error) { + // A base value of zero makes ParseInt infer the correct base using the + // string's prefix, if any. + const base = 0 + v, err := strconv.ParseUint(value, base, 64) + if err != nil { + return nil, err + } + return &v, err +} + +func parseAmazonEfaCounters(portPath string) (*AmazonEfaCounters, error) { + var counters AmazonEfaCounters + + path := filepath.Join(portPath, "hw_counters") + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + for _, f := range files { + if !f.Mode().IsRegular() { + continue + } + + name := filepath.Join(path, f.Name()) + value, err := util.SysReadFile(name) + if err != nil { + if os.IsNotExist(err) || os.IsPermission(err) || err.Error() == "operation not supported" || err.Error() == "invalid argument" { + continue + } + return nil, fmt.Errorf("failed to read file %q: %w", name, err) + } + + //const base = 0 + //vp := util.NewValueParser(value) + + switch f.Name() { + + case "lifespan": + counters.Lifespan, err = parseUInt64(value) + case "rdma_read_bytes": + counters.RdmaReadBytes, err = parseUInt64(value) + case "rdma_read_resp_bytes": + counters.RdmaReadRespBytes, err = parseUInt64(value) + case "rdma_read_wr_err": + counters.RdmaReadWrErr, err = parseUInt64(value) + case "rdma_read_wrs": + counters.RdmaReadWrs, err = parseUInt64(value) + case "rdma_write_bytes": + counters.RdmaWriteBytes, err = parseUInt64(value) + case "rdma_write_recv_bytes": + counters.RdmaWriteRecvBytes, err = parseUInt64(value) + case "rdma_write_wr_err": + counters.RdmaWriteWrErr, err = parseUInt64(value) + case "rdma_write_wrs": + counters.RdmaWritedWrs, err = parseUInt64(value) + case "recv_bytes": + counters.RecvBytes, err = parseUInt64(value) + case "recv_wrs": + counters.RecvWrs, err = parseUInt64(value) + case "rx_bytes": + counters.RxBytes, err = parseUInt64(value) + case "rx_drops": + counters.RxDrops, err = parseUInt64(value) + case "rx_pkts": + counters.RxPkts, err = parseUInt64(value) + case "send_bytes": + counters.SendBytes, err = parseUInt64(value) + case "send_wrs": + counters.SendWrs, err = parseUInt64(value) + case "tx_bytes": + counters.TxBytes, err = parseUInt64(value) + case "tx_pkts": + counters.TxPkts, err = parseUInt64(value) + + if err != nil { + // Ugly workaround for handling https://github.com/prometheus/node_exporter/issues/966 + // when counters are `N/A (not available)`. + // This was already patched and submitted, see + // https://www.spinics.net/lists/linux-rdma/msg68596.html + // Remove this as soon as the fix lands in the enterprise distros. + if strings.Contains(value, "N/A (no PMA)") { + continue + } + return nil, err + } + } + } + + return &counters, nil +} diff --git a/4.validation_scripts/3.efa-node-exporter/docker-compose.yml b/4.validation_scripts/3.efa-node-exporter/docker-compose.yml new file mode 100644 index 00000000..4960062c --- /dev/null +++ b/4.validation_scripts/3.efa-node-exporter/docker-compose.yml @@ -0,0 +1,18 @@ +version: '2.1' + +services: + + node_exporter_efa: + build: . + container_name: node_exporter_efa + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + command: + - '--path.procfs=/host/proc' + - '--path.rootfs=/rootfs' + - '--path.sysfs=/host/sys' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' + restart: unless-stopped + network_mode: host From 7cbdb151d7330354a5e7ae64c8d4091c4d44e66b Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 5 Jan 2024 10:43:14 -0600 Subject: [PATCH 276/648] Update authors and reviewers in EFA Node Exporter readme --- 4.validation_scripts/3.efa-node-exporter/README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/4.validation_scripts/3.efa-node-exporter/README.md b/4.validation_scripts/3.efa-node-exporter/README.md index 936284f6..6a8198be 100644 --- a/4.validation_scripts/3.efa-node-exporter/README.md +++ b/4.validation_scripts/3.efa-node-exporter/README.md @@ -2,7 +2,7 @@ Scripted fork of the [Prometheus Node Exporter](https://github.com/prometheus/node_exporter) and [ProcFS](https://github.com/prometheus/procfs) repositories to export Amazon EFA metrics counters on compatible instances including c5n, hpc6ad, P5, P4. -# How to run the collector +# 1. How to run the collector To create the docker image run: @@ -71,3 +71,9 @@ node_amazonefa_tx_pkts{device="rdmap99s0",port="1"} 1.664925e+06 node_scrape_collector_duration_seconds{collector="amazonefa"} 0.084817395 node_scrape_collector_success{collector="amazonefa"} 1 ``` + +## 2. Authors / Reviewers + +* [A] Pierre-Yves Aquilanti - pierreya@ +* [A] Sean Smith - seaam@ +* [R] Maxime Hugues - maxhaws@ From cf9a53ef22eedbfff7b92611912d75f26fe97d2e Mon Sep 17 00:00:00 2001 From: Pierre-Yves <62559210+perifaws@users.noreply.github.com> Date: Fri, 5 Jan 2024 11:06:00 -0600 Subject: [PATCH 277/648] Update EFA installer and golang version for EFA node exporter --- 4.validation_scripts/3.efa-node-exporter/Dockerfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/4.validation_scripts/3.efa-node-exporter/Dockerfile b/4.validation_scripts/3.efa-node-exporter/Dockerfile index 9d4671c1..957318bd 100644 --- a/4.validation_scripts/3.efa-node-exporter/Dockerfile +++ b/4.validation_scripts/3.efa-node-exporter/Dockerfile @@ -1,13 +1,14 @@ FROM ubuntu:20.04 -ARG EFA_INSTALLER_VERSION=latest +ARG EFA_INSTALLER_VERSION=1.30.0 ARG NODE_EXPORTER_VERSION=v1.6.1 ARG PROCFS_EXPORTER_VERSION=v0.12.0 +ARG GOLANG_VERSION=1.21.5 # install go RUN apt update && apt install curl git build-essential -y -RUN curl -OL https://go.dev/dl/go1.21.1.linux-amd64.tar.gz && \ - tar -C /usr/local -xvf go1.21.1.linux-amd64.tar.gz +RUN curl -OL https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz && \ + tar -C /usr/local -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz ENV PATH=$PATH:/usr/local/go/bin # Install EFA @@ -30,4 +31,4 @@ COPY amazon_efa_linux.go /workspace/node_exporter/collector/ WORKDIR /workspace/node_exporter RUN go mod edit --replace=github.com/prometheus/procfs=/workspace/procfs RUN go mod tidy && go build -o node_exporter -ENTRYPOINT ["./node_exporter"] \ No newline at end of file +ENTRYPOINT ["./node_exporter"] From 775f7381203bc905952ad57dfa02e178cd518ea6 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Fri, 5 Jan 2024 11:20:21 -0600 Subject: [PATCH 278/648] Rename 4.validation to include observability --- .../0.nccl-tests/0.nccl-tests.Dockerfile | 0 .../0.nccl-tests/1.nccl-tests.sbatch | 0 .../0.nccl-tests/2.nccl-3collectives.sbatch | 0 .../0.nccl-tests/3.nccl-validate.sbatch | 0 .../0.nccl-tests/README.md | 0 .../0.nccl-tests/nccl-test-eks.yaml | 0 .../0.nccl-tests/test_nccl_tests.py | 0 .../1.pytorch-env-validation/0.pytorch-screen.Dockerfile | 0 .../1.pytorch-env-validation/1.torch-screen.sbatch | 0 .../1.pytorch-env-validation/README.md | 0 .../1.pytorch-env-validation/pytorch-screen.py | 0 .../3.efa-node-exporter/Dockerfile | 0 .../3.efa-node-exporter/Makefile | 0 .../3.efa-node-exporter/README.md | 0 .../3.efa-node-exporter/amazon_efa_linux.go | 0 .../3.efa-node-exporter/class_amazon_efa.go | 0 .../3.efa-node-exporter/docker-compose.yml | 0 README.md | 2 +- 18 files changed, 1 insertion(+), 1 deletion(-) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/0.nccl-tests.Dockerfile (100%) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/1.nccl-tests.sbatch (100%) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/2.nccl-3collectives.sbatch (100%) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/3.nccl-validate.sbatch (100%) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/README.md (100%) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/nccl-test-eks.yaml (100%) rename {4.validation_scripts => 4.validation_and_observability}/0.nccl-tests/test_nccl_tests.py (100%) rename {4.validation_scripts => 4.validation_and_observability}/1.pytorch-env-validation/0.pytorch-screen.Dockerfile (100%) rename {4.validation_scripts => 4.validation_and_observability}/1.pytorch-env-validation/1.torch-screen.sbatch (100%) rename {4.validation_scripts => 4.validation_and_observability}/1.pytorch-env-validation/README.md (100%) rename {4.validation_scripts => 4.validation_and_observability}/1.pytorch-env-validation/pytorch-screen.py (100%) rename {4.validation_scripts => 4.validation_and_observability}/3.efa-node-exporter/Dockerfile (100%) rename {4.validation_scripts => 4.validation_and_observability}/3.efa-node-exporter/Makefile (100%) rename {4.validation_scripts => 4.validation_and_observability}/3.efa-node-exporter/README.md (100%) rename {4.validation_scripts => 4.validation_and_observability}/3.efa-node-exporter/amazon_efa_linux.go (100%) rename {4.validation_scripts => 4.validation_and_observability}/3.efa-node-exporter/class_amazon_efa.go (100%) rename {4.validation_scripts => 4.validation_and_observability}/3.efa-node-exporter/docker-compose.yml (100%) diff --git a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_and_observability/0.nccl-tests/0.nccl-tests.Dockerfile similarity index 100% rename from 4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile rename to 4.validation_and_observability/0.nccl-tests/0.nccl-tests.Dockerfile diff --git a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch similarity index 100% rename from 4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch rename to 4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch diff --git a/4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_and_observability/0.nccl-tests/2.nccl-3collectives.sbatch similarity index 100% rename from 4.validation_scripts/0.nccl-tests/2.nccl-3collectives.sbatch rename to 4.validation_and_observability/0.nccl-tests/2.nccl-3collectives.sbatch diff --git a/4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_and_observability/0.nccl-tests/3.nccl-validate.sbatch similarity index 100% rename from 4.validation_scripts/0.nccl-tests/3.nccl-validate.sbatch rename to 4.validation_and_observability/0.nccl-tests/3.nccl-validate.sbatch diff --git a/4.validation_scripts/0.nccl-tests/README.md b/4.validation_and_observability/0.nccl-tests/README.md similarity index 100% rename from 4.validation_scripts/0.nccl-tests/README.md rename to 4.validation_and_observability/0.nccl-tests/README.md diff --git a/4.validation_scripts/0.nccl-tests/nccl-test-eks.yaml b/4.validation_and_observability/0.nccl-tests/nccl-test-eks.yaml similarity index 100% rename from 4.validation_scripts/0.nccl-tests/nccl-test-eks.yaml rename to 4.validation_and_observability/0.nccl-tests/nccl-test-eks.yaml diff --git a/4.validation_scripts/0.nccl-tests/test_nccl_tests.py b/4.validation_and_observability/0.nccl-tests/test_nccl_tests.py similarity index 100% rename from 4.validation_scripts/0.nccl-tests/test_nccl_tests.py rename to 4.validation_and_observability/0.nccl-tests/test_nccl_tests.py diff --git a/4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile b/4.validation_and_observability/1.pytorch-env-validation/0.pytorch-screen.Dockerfile similarity index 100% rename from 4.validation_scripts/1.pytorch-env-validation/0.pytorch-screen.Dockerfile rename to 4.validation_and_observability/1.pytorch-env-validation/0.pytorch-screen.Dockerfile diff --git a/4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch b/4.validation_and_observability/1.pytorch-env-validation/1.torch-screen.sbatch similarity index 100% rename from 4.validation_scripts/1.pytorch-env-validation/1.torch-screen.sbatch rename to 4.validation_and_observability/1.pytorch-env-validation/1.torch-screen.sbatch diff --git a/4.validation_scripts/1.pytorch-env-validation/README.md b/4.validation_and_observability/1.pytorch-env-validation/README.md similarity index 100% rename from 4.validation_scripts/1.pytorch-env-validation/README.md rename to 4.validation_and_observability/1.pytorch-env-validation/README.md diff --git a/4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py b/4.validation_and_observability/1.pytorch-env-validation/pytorch-screen.py similarity index 100% rename from 4.validation_scripts/1.pytorch-env-validation/pytorch-screen.py rename to 4.validation_and_observability/1.pytorch-env-validation/pytorch-screen.py diff --git a/4.validation_scripts/3.efa-node-exporter/Dockerfile b/4.validation_and_observability/3.efa-node-exporter/Dockerfile similarity index 100% rename from 4.validation_scripts/3.efa-node-exporter/Dockerfile rename to 4.validation_and_observability/3.efa-node-exporter/Dockerfile diff --git a/4.validation_scripts/3.efa-node-exporter/Makefile b/4.validation_and_observability/3.efa-node-exporter/Makefile similarity index 100% rename from 4.validation_scripts/3.efa-node-exporter/Makefile rename to 4.validation_and_observability/3.efa-node-exporter/Makefile diff --git a/4.validation_scripts/3.efa-node-exporter/README.md b/4.validation_and_observability/3.efa-node-exporter/README.md similarity index 100% rename from 4.validation_scripts/3.efa-node-exporter/README.md rename to 4.validation_and_observability/3.efa-node-exporter/README.md diff --git a/4.validation_scripts/3.efa-node-exporter/amazon_efa_linux.go b/4.validation_and_observability/3.efa-node-exporter/amazon_efa_linux.go similarity index 100% rename from 4.validation_scripts/3.efa-node-exporter/amazon_efa_linux.go rename to 4.validation_and_observability/3.efa-node-exporter/amazon_efa_linux.go diff --git a/4.validation_scripts/3.efa-node-exporter/class_amazon_efa.go b/4.validation_and_observability/3.efa-node-exporter/class_amazon_efa.go similarity index 100% rename from 4.validation_scripts/3.efa-node-exporter/class_amazon_efa.go rename to 4.validation_and_observability/3.efa-node-exporter/class_amazon_efa.go diff --git a/4.validation_scripts/3.efa-node-exporter/docker-compose.yml b/4.validation_and_observability/3.efa-node-exporter/docker-compose.yml similarity index 100% rename from 4.validation_scripts/3.efa-node-exporter/docker-compose.yml rename to 4.validation_and_observability/3.efa-node-exporter/docker-compose.yml diff --git a/README.md b/README.md index 366342f7..14cda0e2 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ reference-architectures/ |-- 1.architectures/ # CloudFormation templates for reference arch |-- 2.ami_and_containers/ # Scripts to create AMIs and container images |-- 3.test_cases/ # Reference test cases and/or benchmark scripts -|-- 4.validation_scripts/ # Tools to measure performance or troubleshoot +|-- 4.validation_observability/ # Tools to measure performance or troubleshoot `-- ... ``` From 0be703d471c9c07572daae5647a4f86bec35207d Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Tue, 9 Jan 2024 22:33:39 +0000 Subject: [PATCH 279/648] Fixing links and wording in READMEs --- 3.test_cases/12.SM-dataparallel-FSDP/README.md | 4 ++-- .../scripts/__pycache__/utils.cpython-310.pyc | Bin 1521 -> 0 bytes .../13.SM-dataparallel-deepspeed/README.md | 4 ++-- .../code/__pycache__/utils.cpython-310.pyc | Bin 1587 -> 0 bytes 4 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 3.test_cases/12.SM-dataparallel-FSDP/scripts/__pycache__/utils.cpython-310.pyc delete mode 100644 3.test_cases/13.SM-dataparallel-deepspeed/code/__pycache__/utils.cpython-310.pyc diff --git a/3.test_cases/12.SM-dataparallel-FSDP/README.md b/3.test_cases/12.SM-dataparallel-FSDP/README.md index 116a12f3..2199fd5b 100644 --- a/3.test_cases/12.SM-dataparallel-FSDP/README.md +++ b/3.test_cases/12.SM-dataparallel-FSDP/README.md @@ -1,10 +1,10 @@ # How to Use SageMaker Distributed Data Parallel Library (SMDDP) with PyTorch FSDP ## What is SMDDP? -The SMDDP library provides fast GPU collective communication algorithms on P4d/P4de instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library (NCCL). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your PyTorch FSDP training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the Falcon model with FSDP. +The [SMDDP](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) library provides fast GPU collective communication algorithms on P4d/P4de instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library ([NCCL](https://developer.nvidia.com/nccl)). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the [Falcon](https://falconllm.tii.ae/falcon-models.html) model with FSDP. ## 0. Prerequisites -You will need a slurm cluster with an FSx for Lustre file system. See the sagemaker-hyperpod section in the [1.architectures](https://github.com/ruhanprasad/awsome-distributed-training/tree/main/1.architectures) folder for setup instructions. +You will need a slurm cluster with a shared parallel filesystem such as [Amazon FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/getting-started.html). See the sagemaker-hyperpod section in the [1.architectures/5.sagemaker-hyperpod](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/5.sagemaker-hyperpod) folder for setup instructions. ### Required Dependencies of SMDDP Library * Python==3.10 diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/__pycache__/utils.cpython-310.pyc b/3.test_cases/12.SM-dataparallel-FSDP/scripts/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index b174ca46ac966b10e08167ab0697767217579792..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1521 zcmZWp&2J+$6t_JenM{+m>O!@Ai3n*ASd3ljg5ublV~P%rR2(>B#&Yu@-hzwi0Ik=<^LV156`&pe0_`pYp77Xju= zc=U513^6Rw)V+zo8JQC7&Ytixf9ls~EP`pkd=~tQrXdSi3*Ie;$EY9u4Ia>GAKQ3D z4Ly;RaRuDFjL&H)Ou|5#%%l+1NHuFY?c22lZ%7Uu!J`=vgO+$nmR+X0}LlddY2Eq43+{aL!v)Qs;qbN9Xx|$o?;jgK<<5UPb zHolNbusWy;oM5*xd63IaZM6|hZ@ua!;I#32%iY<#kfQ#2wz1y`{zU3AM7rU_x&CQb zYOeIKP*hW<$|1ebGUEqqQ`bSAP@10&pA8JxCV_5pJv=-djK4i_UJI(I5L_G_k4GVGQQfX2n(*sHH=V&uR0aE#u)v8Sm+}k{o=teh2#J6he*G;06n*V zgvaUhTbG}H%|*dgFXLt*SDf;P+e!@yLv5=Fgyh{3+pdsnu8;dCWYcP%sA_}ZBXZB+eV)A9HtS* zRFSxm;DQa#ww>a(8f$6BzuDm-3&OkiSsa9}x&oV45F+ z2qLH=<+Rc!C_{>TprR%&W6y^wY0@(Fd<6Qe%sd~fS(BGJC0`Sfi1alPsmy)}%elzJ z>@_J1K~KmszlOh%&62wODYyKajti+ldSA#bACzSRw5;ls;`>&Kt2PQE=xP`xyw&gS6zCLJ+u?S`*Cx| z;1+5t8^#`h!k}M77r}xqX!7sX!~f%OYr9#-E@Q0ajbzN_j5VDY6!HaQKMh<>JNf{Q z(;pzgJKZDrcbx!13xiBXGFnits}O*jfsPqWkF)^Bk_&nfT!bTFK1O3OhFGV+Ca==l z9eQskv(ZS#)W%~Bwp4^*Jp&ybGtecsx_q3C`8I%?i8kLsim5ptwp%5!GNo`4;Ctzc zc4(NEy{e(q+)U`MXNnu^?Wgu~`P7M$ZCik!+v?oI@AAHe%acu~U+9k@mt~@lU~#dp zMraHuSPicY(O7I6t86)U2foIAi$h1dLjKIvjij+*U4GX``a@8Zhm*_QugeGCYfrH| z)l4Cm)2Tbj?ffww<=+}N`QIt-U8Xy#wB%O$xF#3#5F#-j1IdD%1~hq_h0_{D35D-% z5Ke0r>QBIIc`%{zjxXocUYEQ1o}jTDAVRG9)zrowH-yX;ka%mhoo?Xa@BnIjsokUd q=KIG_kYg44IQCNWW$Rld`(mo`FR?`pbS=rk0xA~1prD6gk^cwqv7uf7 From 1a578c00ef02bb284676576d727dd0e487d10ed4 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 11 Jan 2024 10:29:37 -0800 Subject: [PATCH 280/648] Fix Docker & new Pyxis Version Signed-off-by: Sean Smith --- .../base-config/utils/install_docker.sh | 36 ++++++++++++------- .../base-config/utils/install_enroot_pyxis.sh | 2 +- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh index 7dc15548..70da80a4 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh @@ -1,20 +1,30 @@ #!/bin/bash -set -e +set -exo pipefail -sudo apt-get -y update -sudo apt-get -y install ca-certificates curl gnupg -sudo install -m 0755 -d /etc/apt/keyrings -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg -sudo chmod a+r /etc/apt/keyrings/docker.gpg +echo " +################################### +# BEGIN: install docker +################################### +" +apt-get -y update +apt-get -y install \ + ca-certificates \ + curl \ + gnupg \ + lsb-release +mkdir -m 0755 -p /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg echo \ -"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ -"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ -sudo tee /etc/apt/sources.list.d/docker.list > /dev/null -sudo apt-get -y update - -sudo apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ +$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null +apt-get -y update +apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +chgrp docker $(which docker) +chmod g+s $(which docker) +systemctl enable docker.service +systemctl start docker.service # install nvidia docker toolkit curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ @@ -24,4 +34,4 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear sudo apt-get install -y nvidia-container-toolkit # add user to docker group -sudo usermod -aG docker ${USER} +sudo usermod -aG docker ubuntu diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh index 2d2ef61b..836ada4a 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh @@ -24,7 +24,7 @@ fi rm -fr $SLURM_INSTALL_DIR/pyxis mkdir -p $SLURM_INSTALL_DIR/enroot/ $SLURM_INSTALL_DIR/pyxis/ $PYXIS_TMP_DIR -PYXIS_VERSION=v0.15.0 +PYXIS_VERSION=v0.16.1 ENROOT_VERSION=3.4.1 arch=$(dpkg --print-architecture) cd $PYXIS_TMP_DIR From 24ed5988f2ec7529bf50ee2893fa853e7fdaa39a Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Fri, 12 Jan 2024 00:25:05 +0000 Subject: [PATCH 281/648] pinning datasets in requirements.txt --- 3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt b/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt index 23795c2e..8fef9c29 100755 --- a/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt +++ b/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt @@ -1,5 +1,5 @@ transformers==4.33.0 -datasets +datasets==1.16.0 accelerate>=0.21 bitsandbytes einops From 1821202c1e21a63dd1e684c883815d2117bd1922 Mon Sep 17 00:00:00 2001 From: johnbensnyder Date: Sat, 13 Jan 2024 01:08:09 +0000 Subject: [PATCH 282/648] Updated FSDP for G5 --- .../10.FSDP/1.distributed-training.sbatch | 3 ++- 3.test_cases/10.FSDP/README.md | 18 +++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch index 5ed8ece9..5c7a58c9 100755 --- a/3.test_cases/10.FSDP/1.distributed-training.sbatch +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -18,6 +18,7 @@ set -ex; ########################### ## Plenty of EFA level variables +## Comment out for non-efa instances (G5, G4d, P3) export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d export FI_EFA_FORK_SAFE=1 export FI_LOG_LEVEL=1 @@ -33,7 +34,7 @@ declare -a TORCHRUN_ARGS=( --nnodes=$SLURM_JOB_NUM_NODES \ --rdzv_id=$SLURM_JOB_ID \ --rdzv_backend=c10d \ - --rdzv_endpoint=$(hostname):0 \ + --rdzv_endpoint=$(hostname):29500 \ ) export TORCHRUN=./pt_fsdp/bin/torchrun diff --git a/3.test_cases/10.FSDP/README.md b/3.test_cases/10.FSDP/README.md index 4983cc84..a9743b11 100644 --- a/3.test_cases/10.FSDP/README.md +++ b/3.test_cases/10.FSDP/README.md @@ -36,7 +36,23 @@ If you'd like to instead use your own dataset, you can do so by [formatting it a ## 3. Launch Training -The script to launch a Slurm batch training job can be found in `1.distributed_training.sbatch`. You can adjust the number of training nodes by modifying `#SBATCH --nodes=4`. You can also adjust the training parameters in `TRAINING_ARGS`. Additional parameters can be found in `model/arguments.py`. Note that we use the same directory for both `--checkpoint_dir` and `--resume_from_checkpoint`. If there are multiple checkpoints, `--resume_from_checkpoint` will automatically select the most recent one. This way if our training is interupted for any reason, it will automatically pick up the most recent checkpoint. +The script to launch a Slurm batch training job can be found in `1.distributed_training.sbatch`. You can adjust the number of training nodes by modifying `#SBATCH --nodes=4`. + +If you are using a non-EFA enable instance, such as G5, comment out lines 21-25. + +``` +## Plenty of EFA level variables +## Comment out for non-efa instances (G5, G4d, P3) +# export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +# export FI_EFA_FORK_SAFE=1 +# export FI_LOG_LEVEL=1 +# export FI_PROVIDER=efa +# export NCCL_DEBUG=INFO +``` + +Also, make sure `--nproc_per_node` to match the number of GPUs on your instance type (8 for P4d/P5, 4 for G5.12xlarge, 1 for G5.xlarge). + +You can also adjust the training parameters in `TRAINING_ARGS` (for example, to train Llama 2 70b). Additional parameters can be found in `model/arguments.py`. Note that we use the same directory for both `--checkpoint_dir` and `--resume_from_checkpoint`. If there are multiple checkpoints, `--resume_from_checkpoint` will automatically select the most recent one. This way if our training is interupted for any reason, it will automatically pick up the most recent checkpoint. ``` declare -a TRAINING_ARGS=( From 6fc69961e3943d47dd9387b83ce7f19c24e84341 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 15 Jan 2024 11:32:28 +0800 Subject: [PATCH 283/648] nvidia-pytorch docker template: remove unused IB libraries --- 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index d4de6ee3..a877055f 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -36,6 +36,7 @@ RUN apt-get remove -y --allow-change-held-packages \ # complains about missing libuc?.so. RUN rm -rf /opt/hpcx/ompi \ && rm -rf /usr/local/mpi \ + && rm -rf /opt/hpcx/nccl_rdma_sharp_plugin && ldconfig ENV OPAL_PREFIX= RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ From b37c1e69826bba6b30b486ef94995587d2f362d0 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 15 Jan 2024 11:38:58 +0800 Subject: [PATCH 284/648] Fix missing trailing backslash --- 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index a877055f..ac2c18f7 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -36,7 +36,7 @@ RUN apt-get remove -y --allow-change-held-packages \ # complains about missing libuc?.so. RUN rm -rf /opt/hpcx/ompi \ && rm -rf /usr/local/mpi \ - && rm -rf /opt/hpcx/nccl_rdma_sharp_plugin + && rm -rf /opt/hpcx/nccl_rdma_sharp_plugin \ && ldconfig ENV OPAL_PREFIX= RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ From 52e08498191ffdcdf06ee79974bfa83a8a2add08 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 15 Jan 2024 14:53:14 +0800 Subject: [PATCH 285/648] Update to nemo-launcher 23.11 - update Dockerfile - backport env vars (in conf/config.yaml) from NVidia upstream repo - .sh do not repeat the default MBS (micro batch size) from .yaml - default activation checkpointing no longer OOM on small nodes - fix llama2-7b to work out-of-the-box on 2x p4de.24xlarge (80GB/GPU) --- .../0.NemoMegatron-aws-optimized.Dockerfile | 25 +++++++++---------- .../2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 9 ------- .../3.bmk-pretrain-gpt3-40b.sh | 10 +++----- .../4.bmk-pretrain-gpt3-175b.sh | 9 ++----- .../5.bmk-pretrain-llama-7b.sh | 11 -------- .../6.bmk-pretrain-llama-70b.sh | 6 ----- 3.test_cases/2.nemo-launcher/README.md | 8 +++--- .../2.nemo-launcher/conf.template/config.yaml | 3 +++ 8 files changed, 24 insertions(+), 57 deletions(-) mode change 100644 => 100755 3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh mode change 100644 => 100755 3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh diff --git a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile index cb6daafd..1e3b347f 100644 --- a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile +++ b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile @@ -3,11 +3,11 @@ # DOCKER_BUILDKIT=1 docker build --progress plain -t aws-nemo-megatron:latest . -FROM nvcr.io/ea-bignlp/ga-participants/nemofw-training:23.08.03 +FROM nvcr.io/ea-bignlp/ga-participants/nemofw-training:23.11 ENV DEBIAN_FRONTEND=noninteractive ENV EFA_INSTALLER_VERSION=latest -ENV AWS_OFI_NCCL_VERSION=1.7.3-aws +ENV AWS_OFI_NCCL_VERSION=1.7.4-aws ENV NCCL_TESTS_VERSION=master RUN apt-get update -y @@ -40,15 +40,15 @@ RUN apt-get install -y --allow-unauthenticated \ # Uncomment below stanza to install the latest NCCL # Require efa-installer>=1.29.0 for nccl-2.19.0 to avoid libfabric gave NCCL error. -#ENV NCCL_VERSION=2.19.3-1 -#RUN apt-get remove -y libnccl2 libnccl-dev \ -# && cd /tmp \ -# && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ -# && cd nccl \ -# && make -j src.build BUILDDIR=/usr/local \ -# # nvcc to target p5 and p4 instances -# NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" \ -# && rm -rf /tmp/nccl +ENV NCCL_VERSION=2.19.4-1 +RUN apt-get remove -y libnccl2 libnccl-dev \ + && cd /tmp \ + && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ + && cd nccl \ + && make -j src.build BUILDDIR=/usr/local \ + # nvcc to target p5 and p4 instances + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" \ + && rm -rf /tmp/nccl # EFA RUN apt-get update && \ @@ -87,8 +87,7 @@ ENV OMPI_MCA_pml=^cm,ucx \ OMPI_MCA_btl=tcp,self \ OMPI_MCA_btl_tcp_if_exclude=lo,docker0 \ OPAL_PREFIX=/opt/amazon/openmpi \ - NCCL_SOCKET_IFNAME=^docker,lo \ - FI_EFA_USE_HUGE_PAGE=0 + NCCL_SOCKET_IFNAME=^docker,lo # NCCL-tests RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh index 9f6249c1..28149c73 100755 --- a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -21,15 +21,6 @@ export MAX_STEPS=30 export MBS=2 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) declare -a MODEL_ARGS=( training.model.micro_batch_size=${MBS} - - # When node_count < 8, needs full activations checkpointing. These're settings found on - # Nemo repo's Jenkin script. - # - # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with - # activations_checkpoint_num_layers=0. - training.model.activations_checkpoint_granularity='full' - training.model.activations_checkpoint_method='block' - training.model.activations_checkpoint_num_layers=1 ) diff --git a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh index c4b8bea0..46a10039 100755 --- a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh +++ b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh @@ -18,16 +18,12 @@ export MODEL_SIZE=40b export NUM_NODES=2 export RUNTIME=4h export MAX_STEPS=5 -export MBS=1 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) +export MBS=1 # Down from the default value (=4) to run on 2x p4de (16x A100-80GB GPUs) declare -a MODEL_ARGS=( training.model.micro_batch_size=${MBS} - # Activation checkpointing - training.model.activations_checkpoint_granularity='full' - training.model.activations_checkpoint_method='block' - training.model.activations_checkpoint_num_layers=1 - - # Not applicable for A100 + # TE is not applicable for A100 (p4d or p4de). + # On p5 instances, comment or remove below stanza. training.model.transformer_engine=False training.model.ub_tp_comm_overlap=False ) diff --git a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh index 58ac2c4d..c1deb027 100755 --- a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh +++ b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh @@ -18,16 +18,11 @@ export MODEL_SIZE=175b export NUM_NODES=16 export RUNTIME=4h export MAX_STEPS=5 -export MBS=2 declare -a MODEL_ARGS=( training.model.micro_batch_size=${MBS} - # Activation checkpointing - training.model.activations_checkpoint_granularity='full' - training.model.activations_checkpoint_method='block' - training.model.activations_checkpoint_num_layers=1 - - # Not applicable for A100 + # TE is not applicable for A100 (p4d or p4de). + # On p5 instances, comment or remove below stanza. training.model.transformer_engine=False training.model.ub_tp_comm_overlap=False ) diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh old mode 100644 new mode 100755 index 0e307998..ef359484 --- a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -18,19 +18,8 @@ export MODEL_SIZE=llama2_7b export NUM_NODES=2 export RUNTIME=4h export MAX_STEPS=30 -export MBS=2 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) declare -a MODEL_ARGS=( - training.model.micro_batch_size=${MBS} - training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model - # When node_count < 8, needs full activations checkpointing. These're settings found on - # Nemo repo's Jenkin script. - # - # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with - # activations_checkpoint_num_layers=0. - training.model.activations_checkpoint_granularity='full' - training.model.activations_checkpoint_method='block' - training.model.activations_checkpoint_num_layers=1 ) diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh old mode 100644 new mode 100755 index 5ee47452..3e34ba2b --- a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -18,7 +18,6 @@ export MODEL_SIZE=llama2_70b export NUM_NODES=16 export TIME_LIMIT="7-00:00:00" export MAX_STEPS=100 -export MBS=1 declare -a MODEL_ARGS=( training.model.micro_batch_size=${MBS} @@ -31,11 +30,6 @@ declare -a MODEL_ARGS=( training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model - ## Activation checkpointing - #training.model.activations_checkpoint_granularity='full' - #training.model.activations_checkpoint_method='block' - #training.model.activations_checkpoint_num_layers=1 - # ## Not applicable for A100 #training.model.transformer_engine=False #training.model.ub_tp_comm_overlap=False diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 6d27230d..6d5d705e 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -19,13 +19,13 @@ Table of contents: The following pre-requisites are needed to run this example: - You are using p4de.24xlarge instances with A100 80GB or newer, with at least 80GB of memory per GPU. -- You have access to the base image [`nemofw-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). +- You have access to the base image [NeMo Framework Training](https://registry.ngc.nvidia.com/orgs/ea-bignlp/teams/ga-participants/containers/nemofw-training). To gain access to this image, go to [Get Access to NeMo Framework](https://developer.nvidia.com/nemo-framework) to enroll to organization/team `ea-bignlp/ga-participant`. - Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.ami_and_containers/1.amazon_machine_image)) You will need to setup the following environment variables before running the scripts. : ```bash -export NEMO_VERSION=23.08.03 +export NEMO_VERSION=23.11 export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem @@ -76,10 +76,10 @@ cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ $ENROOT_IMAGE \ - cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/nemo-data-curator /opt/nemo-rlhf /workspace/mount_dir/ + cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/nemo-data-curator /workspace/mount_dir/ ``` -The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). +The `NVIDIA_VISIBLE_DEVICES` variable is set to `void` to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). 3. Install the NemoMegatron requirements in a Python VirtualEnv by running the set of commands below. diff --git a/3.test_cases/2.nemo-launcher/conf.template/config.yaml b/3.test_cases/2.nemo-launcher/conf.template/config.yaml index d85985cb..8c55a56f 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/config.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/config.yaml @@ -40,6 +40,9 @@ env_vars: TRANSFORMER_OFFLINE: 1 NCCL_LAUNCH_MODE: parallel NCCL_ASYNC_ERROR_HANDLING: 1 + NCCL_NVLS_ENABLE: 0 + NCCL_AVOID_RECORD_STREAMS: 1 # torch<2.2 + TORCH_NCCL_AVOID_RECORD_STREAMS: 1 # torch>=2.2 # GPU Mapping numa_mapping: From 2332a9b184f7a3a901568a1876c728b4d2fe3229 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 15 Jan 2024 18:26:35 +0800 Subject: [PATCH 286/648] Transformer engine (fp8), and other fixes - backport upstream nemo-launcher's rlhf setting to template config.yaml - update transformer engine (fp8) flags - llama2-7b.sh: do not re-declare default settings - llama2 examples: reduce number of steps to facilitate quick runs - keep comments and actual code in-sync. --- .../1.bmk-pretrain-gpt3-126m.sh | 2 +- .../2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 6 +++++- .../3.bmk-pretrain-gpt3-40b.sh | 7 +++---- .../4.bmk-pretrain-gpt3-175b.sh | 9 +++------ .../5.bmk-pretrain-llama-7b.sh | 6 +++++- .../6.bmk-pretrain-llama-70b.sh | 19 ++++++------------- .../2.nemo-launcher/conf.template/config.yaml | 2 ++ 7 files changed, 25 insertions(+), 26 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh index 922bf886..2e2a7ba5 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh @@ -12,7 +12,7 @@ set -exo pipefail # 000: Modify this section to define pre-training configuration: model size, # number of nodes, max. pre-training steps, job's max. runtime. ################################################################################ -## Pre-train gpt3-126m on 2 nodes for 40 steps. +## Pre-train gpt3-126m on 2 nodes for 100 steps. export MODEL_SIZE=126m export NUM_NODES=2 export RUNTIME=30m diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh index 28149c73..9c66c1a0 100755 --- a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -12,7 +12,7 @@ set -exo pipefail # 000: Modify this section to define pre-training configuration: model size, # number of nodes, max. pre-training steps, job's max. runtime. ################################################################################ -## Pre-train gpt3-5b on 2 nodes for 5 steps +## Pre-train gpt3-5b on 2 nodes for 30 steps export MODEL=gpt3 export MODEL_SIZE=5b export NUM_NODES=2 @@ -21,6 +21,10 @@ export MAX_STEPS=30 export MBS=2 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) declare -a MODEL_ARGS=( training.model.micro_batch_size=${MBS} + + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.transformer_engine=True + #training.model.fp8=True ) diff --git a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh index 46a10039..5caaf60c 100755 --- a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh +++ b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh @@ -22,10 +22,9 @@ export MBS=1 # Down from the default value (=4) to run on 2x p4de (16x A100-80 declare -a MODEL_ARGS=( training.model.micro_batch_size=${MBS} - # TE is not applicable for A100 (p4d or p4de). - # On p5 instances, comment or remove below stanza. - training.model.transformer_engine=False - training.model.ub_tp_comm_overlap=False + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.transformer_engine=True + #training.model.fp8=True ) diff --git a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh index c1deb027..4ea1c300 100755 --- a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh +++ b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh @@ -19,12 +19,9 @@ export NUM_NODES=16 export RUNTIME=4h export MAX_STEPS=5 declare -a MODEL_ARGS=( - training.model.micro_batch_size=${MBS} - - # TE is not applicable for A100 (p4d or p4de). - # On p5 instances, comment or remove below stanza. - training.model.transformer_engine=False - training.model.ub_tp_comm_overlap=False + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.transformer_engine=True + #training.model.fp8=True ) diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh index ef359484..591ccb6f 100755 --- a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -12,7 +12,7 @@ set -exo pipefail # 000: Modify this section to define pre-training configuration: model size, # number of nodes, max. pre-training steps, job's max. runtime. ################################################################################ -## Pre-train llama2-7b on 2 nodes for 5 steps +## Pre-train llama2-7b on 2 nodes for 30 steps export MODEL=llama export MODEL_SIZE=llama2_7b export NUM_NODES=2 @@ -20,6 +20,10 @@ export RUNTIME=4h export MAX_STEPS=30 declare -a MODEL_ARGS=( training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.transformer_engine=True + #training.model.fp8=True ) diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh index 3e34ba2b..ac5cef5e 100755 --- a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -12,27 +12,20 @@ set -exo pipefail # 000: Modify this section to define pre-training configuration: model size, # number of nodes, max. pre-training steps, job's max. runtime. ################################################################################ -## Pre-train llama2-7b on 2 nodes for 5 steps +## Pre-train llama2-7b on 16 nodes for 5 steps export MODEL=llama export MODEL_SIZE=llama2_70b export NUM_NODES=16 export TIME_LIMIT="7-00:00:00" -export MAX_STEPS=100 +export MAX_STEPS=5 declare -a MODEL_ARGS=( - training.model.micro_batch_size=${MBS} - training.model.tensor_model_parallel_size=4 - training.model.pipeline_model_parallel_size=4 - training.model.virtual_pipeline_model_parallel_size=20 - training.model.overlap_p2p_comm=True - training.model.batch_p2p_comm=False - training.model.gc_interval=0 - training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + training.model.gc_interval=0 - ## Not applicable for A100 - #training.model.transformer_engine=False - #training.model.ub_tp_comm_overlap=False + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.transformer_engine=True + #training.model.fp8=True ) diff --git a/3.test_cases/2.nemo-launcher/conf.template/config.yaml b/3.test_cases/2.nemo-launcher/conf.template/config.yaml index 8c55a56f..5f001a5a 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/config.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/config.yaml @@ -64,3 +64,5 @@ ia3_learning_config: ${hydra:runtime.choices.ia3_learning} evaluation_config: ${hydra:runtime.choices.evaluation} conversion_config: ${hydra:runtime.choices.conversion} export_config: ${hydra:runtime.choices.export} +rlhf_rm_config: ${hydra:runtime.choices.rlhf_rm} +rlhf_ppo_config: ${hydra:runtime.choices.rlhf_ppo} From 3972704012020ad1acf531f8a719aa135a5fa10d Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 16 Jan 2024 12:54:17 +0800 Subject: [PATCH 287/648] Improve benchmark mode - enable bmk mode with BMK_MODE=1 (was UNIQUE_OUTPUT_DIR) - remove job dependency when running in benchmark mode --- .../2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh | 9 ++++++--- .../2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 9 ++++++--- .../2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh | 9 ++++++--- .../2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh | 9 ++++++--- .../2.nemo-launcher/5.bmk-pretrain-llama-7b.sh | 9 ++++++--- .../2.nemo-launcher/6.bmk-pretrain-llama-70b.sh | 11 ++++++----- 3.test_cases/2.nemo-launcher/README.md | 2 +- 7 files changed, 37 insertions(+), 21 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh index 2e2a7ba5..0002d575 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh @@ -45,13 +45,16 @@ CONT_RESULT_DIR=${WORKSPACE_CONT}/results CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe # Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) echo " #################### diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh index 9c66c1a0..f6ab6489 100755 --- a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -53,13 +53,16 @@ CONT_RESULT_DIR=${WORKSPACE_CONT}/results CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe # Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) echo " #################### diff --git a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh index 5caaf60c..f0f7710a 100755 --- a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh +++ b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh @@ -53,13 +53,16 @@ CONT_RESULT_DIR=${WORKSPACE_CONT}/results CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe # Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) echo " #################### diff --git a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh index 4ea1c300..ba383988 100755 --- a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh +++ b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh @@ -50,13 +50,16 @@ CONT_RESULT_DIR=${WORKSPACE_CONT}/results CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe # Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) echo " #################### diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh index 591ccb6f..1436de60 100755 --- a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -55,13 +55,16 @@ CONT_RESULT_DIR=${WORKSPACE_CONT}/results # Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) echo " #################### diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh index ac5cef5e..0efadc24 100755 --- a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -43,8 +43,6 @@ declare -a BMK_ARGS=( training.exp_manager.create_checkpoint_callback=False training.exp_manager.resume_if_exists=False - ################################ - # https://github.com/NVIDIA/NeMo/pull/6181/files training.model.data.data_impl=mock training.model.data.data_prefix=[] @@ -59,13 +57,16 @@ CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe # Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${UNIQUE_OUTPUT_DIR:=0}" -if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then # For debugging: each run has its own output dir. TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) echo " #################### diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index 6d5d705e..69a77a72 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -164,7 +164,7 @@ This section assumes that you went through the previous sections and 1/ retrieve └── nemo_log_globalrank-*.txt # Log of each rank ``` - Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, edit `TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` and uncomment the `#export UNIQUE_OUTPUT_DIR=1` line to produce this output dir instead: + Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, run the script in benchmark mode: `BMK_MODE=1 $TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` which produces output dir `$TARGET_PATH/results--utc-/gpt3_126m/`. 4. You can use Slurm command `squeue` to monitor the job status in the queue. The ample output below shows a `nemo-megatron` job with job id `1234` is in running state (`ST` = `R`). A queued job will have state `ST` = `PD` (pending). Please refer to the complete of job states in this [Slurm documentation](https://slurm.schedmd.com/squeue.html#SECTION_JOB-STATE-CODES). From 56a386e43db297130ce2f175ea9a4926dd6885cb Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Mon, 15 Jan 2024 07:38:40 +0000 Subject: [PATCH 288/648] update mpt docker image --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index 12e46dbb..7fffb25c 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -1,10 +1,10 @@ -FROM mosaicml/pytorch:2.0.1_cu118-python3.10-ubuntu20.04 +FROM mosaicml/llm-foundry:2.1.0_cu121_flash2_aws-latest ARG EFA_INSTALLER_VERSION=latest ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws ARG NCCL_TESTS_VERSION=master ARG NCCL_VERSION=2.18.5-1 -ARG LLM_FOUNDRY_VERSION=v0.3.0 +ARG LLM_FOUNDRY_VERSION=v0.4.0 ARG OPEN_MPI_PATH=/opt/amazon/openmpi RUN apt-get update -y @@ -71,6 +71,7 @@ RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ ################################################### ## Install AWS-OFI-NCCL plugin RUN export OPAL_PREFIX="" \ + && rm -rf /opt/aws-ofi-nccl \ && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ && cd /opt/aws-ofi-nccl \ && git checkout ${AWS_OFI_NCCL_VERSION} \ @@ -83,7 +84,6 @@ RUN export OPAL_PREFIX="" \ && make -j && make install RUN rm -rf /var/lib/apt/lists/* -ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf @@ -96,4 +96,5 @@ RUN git clone https://github.com/mosaicml/llm-foundry.git llm-foundry \ && cd llm-foundry \ && git checkout $LLM_FOUNDRY_VERSION \ && pip install -e ".[gpu]" \ - && pip install xformers nvtx 'flash-attn==v1.0.3.post0' \ No newline at end of file + && pip install flash-attn==1.0.7 --no-build-isolation \ + && pip install git+https://github.com/NVIDIA/TransformerEngine.git@v0.10 \ No newline at end of file From 1681a7f44d7e61a3894b80c14a03d8dd9ae92818 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 16 Jan 2024 16:04:49 +0800 Subject: [PATCH 289/648] Fix fp8 mode for llama2 examples --- 3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh | 1 + 3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh index 1436de60..5faf4738 100755 --- a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -24,6 +24,7 @@ declare -a MODEL_ARGS=( ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) #training.model.transformer_engine=True #training.model.fp8=True + #training.model.fp8_hybrid=True ) diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh index 0efadc24..e51e1eb1 100755 --- a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -26,6 +26,7 @@ declare -a MODEL_ARGS=( ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) #training.model.transformer_engine=True #training.model.fp8=True + #training.model.fp8_hybrid=True ) From 010c04b77329d1c10316f9d716931fcc6545e9b5 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 16 Jan 2024 21:06:26 +0800 Subject: [PATCH 290/648] llama2: remove redundant flags --- 3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh | 1 - 3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh | 1 - 2 files changed, 2 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh index 5faf4738..9d1b9161 100755 --- a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -22,7 +22,6 @@ declare -a MODEL_ARGS=( training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) - #training.model.transformer_engine=True #training.model.fp8=True #training.model.fp8_hybrid=True ) diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh index e51e1eb1..461cf759 100755 --- a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -24,7 +24,6 @@ declare -a MODEL_ARGS=( training.model.gc_interval=0 ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) - #training.model.transformer_engine=True #training.model.fp8=True #training.model.fp8_hybrid=True ) From 0bffae1a4ba0bbc8332aaed4e182ea86c67cd493 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 17 Jan 2024 14:12:42 +0800 Subject: [PATCH 291/648] Fix wrong sockets in Parallel Cluster Ref: https://github.com/aws/aws-parallelcluster/issues/5797 --- .../distributed-training-p4de-base.yaml | 5 ++++- ...uted-training-p4de_batch-inference-g5_custom_ami.yaml | 9 ++++++++- .../distributed-training-p4de_custom_ami.yaml | 5 ++++- .../distributed-training-p4de_postinstall_scripts.yaml | 5 ++++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index def47596..4c6273a3 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -55,6 +55,7 @@ Scheduling: # multiple reservations CapacityReservationTarget: CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance ComputeResources: - Name: distributed-ml InstanceType: p4de.24xlarge @@ -62,7 +63,9 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance + CustomSlurmSettings: + Sockets: 2 # p4, p5 + CoresPerSocket: 24 # p4=24, p5=48 SharedStorage: - MountDir: /fsx Name: fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index 6c2d5468..80eb13e5 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -59,6 +59,7 @@ Scheduling: # multiple reservations CapacityReservationTarget: CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance ComputeResources: - Name: distributed-ml InstanceType: p4de.24xlarge @@ -66,7 +67,9 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance + CustomSlurmSettings: + Sockets: 2 # p4, p5 + CoresPerSocket: 24 # p4=24, p5=48 - Name: inference-gpu CapacityType: ONDEMAND Networking: @@ -83,6 +86,10 @@ Scheduling: InstanceType: g5.12xlarge MinCount: 0 # if min = max then capacity is maintained and will MaxCount: 10 # not scale down + CustomSlurmSettings: + # Below are for g5.12xlarge. For another instance type, please update accordingly. + Sockets: 1 + CoresPerSocket: 24 SharedStorage: - MountDir: /fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index e3172e99..8eafbf36 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -56,6 +56,7 @@ Scheduling: # multiple reservations CapacityReservationTarget: CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance ComputeResources: - Name: distributed-ml InstanceType: p4de.24xlarge @@ -63,7 +64,9 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance + CustomSlurmSettings: + Sockets: 2 # p4, p5 + CoresPerSocket: 24 # p4=24, p5=48 SharedStorage: - MountDir: /fsx Name: fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 6cf9fcdf..7c163eda 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -62,6 +62,7 @@ Scheduling: # multiple reservations CapacityReservationTarget: CapacityReservationId: PLACEHOLDER_CAPACITY_RESERVATION_ID + JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance ComputeResources: - Name: distributed-ml InstanceType: p4de.24xlarge @@ -69,7 +70,9 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - JobExclusiveAllocation: true # GenAI training likes to gobble all GPUs in an instance + CustomSlurmSettings: + Sockets: 2 # p4, p5 + CoresPerSocket: 24 # p4=24, p5=48 CustomActions: OnNodeConfigured: Sequence: From 34a9a72ee0b3820308e2cb99ad21ed8aa014fa75 Mon Sep 17 00:00:00 2001 From: Arun Lokanatha Date: Wed, 17 Jan 2024 14:20:15 -0800 Subject: [PATCH 292/648] changes 1) Updated the pytorch version to use 2.1.2 ( latest release) 2) Removed the port from Rendezvous address. --- 3.test_cases/10.FSDP/0.create_conda_env.sh | 2 +- 3.test_cases/10.FSDP/1.distributed-training.sbatch | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/10.FSDP/0.create_conda_env.sh b/3.test_cases/10.FSDP/0.create_conda_env.sh index 38cce21f..ae34afe4 100755 --- a/3.test_cases/10.FSDP/0.create_conda_env.sh +++ b/3.test_cases/10.FSDP/0.create_conda_env.sh @@ -15,7 +15,7 @@ conda create -y -p ./pt_fsdp python=3.10 source activate ./pt_fsdp/ # Install PyTorch -pip install torch==2.0.1 torchvision torchaudio transformers datasets +pip install torch torchvision torchaudio transformers datasets # Create checkpoint dir mkdir checkpoints diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch index 5ed8ece9..0118f67d 100755 --- a/3.test_cases/10.FSDP/1.distributed-training.sbatch +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -33,7 +33,7 @@ declare -a TORCHRUN_ARGS=( --nnodes=$SLURM_JOB_NUM_NODES \ --rdzv_id=$SLURM_JOB_ID \ --rdzv_backend=c10d \ - --rdzv_endpoint=$(hostname):0 \ + --rdzv_endpoint=$(hostname) \ ) export TORCHRUN=./pt_fsdp/bin/torchrun From 169a76b80c40886eaf6d56e9b3797952a4b94904 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 17 Jan 2024 16:26:43 -0800 Subject: [PATCH 293/648] megatron changes Signed-off-by: Sean Smith --- 3.test_cases/1.megatron-lm/2.distributed-training.sbatch | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch index 923225fe..8844417e 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch @@ -3,10 +3,8 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -#SBATCH --nodes=24 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --nodes=2 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job -#SBATCH --gpus-per-node=8 # Number of GPU per node -#SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive # job has exclusive use of the resource, no sharing #SBATCH --wait-all-nodes=1 @@ -68,11 +66,12 @@ declare -a ARGS=( ) declare -a TORCHRUN_ARGS=( - --nproc_per_node=$SLURM_GPUS_PER_NODE \ + # change this to match the number of gpus per node: + --nproc_per_node=8 \ --nnodes=$SLURM_JOB_NUM_NODES \ --rdzv_id=$SLURM_JOB_ID \ --rdzv_backend=c10d \ - --rdzv_endpoint=$(hostname):29501 \ + --rdzv_endpoint=$(hostname) \ ) declare -a MEGATRON_ARGS=( From db441ac30d528560e81f8a84be4b429a89708608 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 17 Jan 2024 17:41:25 -0800 Subject: [PATCH 294/648] change gpt2 data path Signed-off-by: Sean Smith --- 3.test_cases/1.megatron-lm/2.distributed-training.sbatch | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch index 8844417e..0644693d 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch @@ -101,9 +101,9 @@ srun -l "${ARGS[@]}" python -m torch.distributed.run "${TORCHRUN_ARGS[@]}" /work --log-interval 1 \ --eval-iters 40 \ --eval-interval 1000 \ - --data-path "${DATA_PATH}/my-gpt2_text_document" \ - --vocab-file "${DATA_PATH}/gpt2-vocab.json" \ - --merge-file "${DATA_PATH}/gpt2-merges.txt" \ + --data-path "$(pwd)/gpt2/my-gpt2_text_document" \ + --vocab-file "$(pwd)/gpt2/gpt2-vocab.json" \ + --merge-file "$(pwd)/gpt2/gpt2-merges.txt" \ --split 98,2,0 \ --clip-grad 1.0 \ --weight-decay 0.1 \ From 18a290f27b5152a1d970df6259dbbd371a8135a3 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 18 Jan 2024 09:09:39 -0800 Subject: [PATCH 295/648] EFA Versions Script Signed-off-by: Sean Smith --- .../efa-versions.sh | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 4.validation_and_observability/efa-versions.sh diff --git a/4.validation_and_observability/efa-versions.sh b/4.validation_and_observability/efa-versions.sh new file mode 100644 index 00000000..5d12c34b --- /dev/null +++ b/4.validation_and_observability/efa-versions.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# Fetch software versions related to EFA. +# Currently only tested on Ubuntu 20.04 + +# EFA Version +cat /opt/amazon/efa_installed_packages | grep "EFA installer version:" + +# NCCL Version +sudo apt install mlocate +locate nccl| grep "libnccl.so" | tail -n1 | sed -r 's/^.*\.so\.//' + +# libfabric Version +fi_info --version | grep "libfabric:" + +# NCCL OFI Version +strings /opt/aws-ofi-nccl/lib/libnccl-net.so | grep Initializing + +# CUDA Driver +nvidia-smi --query-gpu=driver_version --format=csv,noheader | head -1 + +# CUDA Version +nvcc --version | grep "release" \ No newline at end of file From afc8df6bc5e63943035a4e00cc075ed07b642d1f Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 18 Jan 2024 09:48:45 -0800 Subject: [PATCH 296/648] MIT-0 Signed-off-by: Sean Smith --- 1.architectures/0.s3/0.private-bucket.yaml | 2 +- 1.architectures/1.vpc_network/1.vpc-multi-az.yaml | 2 +- 1.architectures/1.vpc_network/2.vpc-one-az.yaml | 2 +- .../2.aws-parallelcluster/distributed-training-p4de-base.yaml | 2 +- ...distributed-training-p4de_batch-inference-g5_custom_ami.yaml | 2 +- .../distributed-training-p4de_custom_ami.yaml | 2 +- .../distributed-training-p4de_postinstall_scripts.yaml | 2 +- .../distributed-training-trn1_custom_ami.yaml | 2 +- .../3.aws-batch/0.aws-batch-distributed-training.yaml | 2 +- 1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml | 2 +- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 2 +- 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile | 2 +- 3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile | 2 +- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 2 +- 3.test_cases/1.megatron-lm/2.distributed-training.sbatch | 2 +- .../2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile | 2 +- 3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh | 2 +- 3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh | 2 +- 3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh | 2 +- 3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh | 2 +- 3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh | 2 +- 3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh | 2 +- .../0.nccl-tests/0.nccl-tests.Dockerfile | 2 +- 4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch | 2 +- .../0.nccl-tests/2.nccl-3collectives.sbatch | 2 +- .../0.nccl-tests/3.nccl-validate.sbatch | 2 +- .../1.pytorch-env-validation/0.pytorch-screen.Dockerfile | 2 +- .../1.pytorch-env-validation/1.torch-screen.sbatch | 2 +- .../1.pytorch-env-validation/pytorch-screen.py | 2 +- 4.validation_and_observability/efa-versions.sh | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) diff --git a/1.architectures/0.s3/0.private-bucket.yaml b/1.architectures/0.s3/0.private-bucket.yaml index d07d4050..4518205b 100644 --- a/1.architectures/0.s3/0.private-bucket.yaml +++ b/1.architectures/0.s3/0.private-bucket.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 AWSTemplateFormatVersion: '2010-09-09' Description: This CloudFormation template to create S3 Bucket diff --git a/1.architectures/1.vpc_network/1.vpc-multi-az.yaml b/1.architectures/1.vpc_network/1.vpc-multi-az.yaml index 74bc05f7..0020bcc3 100644 --- a/1.architectures/1.vpc_network/1.vpc-multi-az.yaml +++ b/1.architectures/1.vpc_network/1.vpc-multi-az.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 AWSTemplateFormatVersion: '2010-09-09' Description: > diff --git a/1.architectures/1.vpc_network/2.vpc-one-az.yaml b/1.architectures/1.vpc_network/2.vpc-one-az.yaml index be1fb81c..6caff6ec 100644 --- a/1.architectures/1.vpc_network/2.vpc-one-az.yaml +++ b/1.architectures/1.vpc_network/2.vpc-one-az.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 AWSTemplateFormatVersion: '2010-09-09' Description: > diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index def47596..69814c1e 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 Imds: ImdsSupport: v2.0 diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index 6c2d5468..9aa862c0 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 Imds: ImdsSupport: v2.0 diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index e3172e99..2728aad0 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 Imds: ImdsSupport: v2.0 diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 6cf9fcdf..3a403ec6 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 Imds: ImdsSupport: v2.0 diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml index 92230246..a91624ca 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-trn1_custom_ami.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 # For additional examples please refer to this [Github repository](https://github.com/aws-neuron/aws-neuron-parallelcluster-samples/blob/master/examples/jobs/neuronx-nemo-megatron-llamav2-job.md) from aws-neuron. diff --git a/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml b/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml index eab126bb..9bebbdf2 100644 --- a/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml +++ b/1.architectures/3.aws-batch/0.aws-batch-distributed-training.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 AWSTemplateFormatVersion: '2010-09-09' Description: > diff --git a/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml b/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml index 7ae825a8..399ab8b3 100644 --- a/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml +++ b/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 AWSTemplateFormatVersion: '2010-09-09' Description: > diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index d190ff24..2ec49d50 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 declare -a HELP=( "[-h|--help]" diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index ac2c18f7..4310b79e 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #################################################################################################### # This is a sample Dockerfile, with optional stanzas. Please read through this Dockerfile, diff --git a/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile index 6ba0ec9c..15086e5d 100644 --- a/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile +++ b/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 FROM nvcr.io/nvidia/pytorch:23.09-py3 diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index d8c91693..fb9d6632 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH -N 1 # number of nodes we want #SBATCH --exclusive # job has exclusive use of the resource, no sharing diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch index 0644693d..d9bc8d3b 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH --nodes=2 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job diff --git a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile index cb6daafd..cecab9b5 100644 --- a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile +++ b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 # DOCKER_BUILDKIT=1 docker build --progress plain -t aws-nemo-megatron:latest . diff --git a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh index 922bf886..e70905a4 100755 --- a/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh +++ b/3.test_cases/2.nemo-launcher/1.bmk-pretrain-gpt3-126m.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ diff --git a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh index 9f6249c1..24ce0355 100755 --- a/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh +++ b/3.test_cases/2.nemo-launcher/2.bmk-pretrain-gpt3-5b.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ diff --git a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh index c4b8bea0..4448cff4 100755 --- a/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh +++ b/3.test_cases/2.nemo-launcher/3.bmk-pretrain-gpt3-40b.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ diff --git a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh index 58ac2c4d..a804d6ac 100755 --- a/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh +++ b/3.test_cases/2.nemo-launcher/4.bmk-pretrain-gpt3-175b.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh index 0e307998..19f2819f 100644 --- a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh index 5ee47452..6e08d0f2 100644 --- a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 set -exo pipefail [[ -z "${TARGET_PATH}" ]] \ diff --git a/4.validation_and_observability/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_and_observability/0.nccl-tests/0.nccl-tests.Dockerfile index 3a7a55dd..8ff95e1b 100644 --- a/4.validation_and_observability/0.nccl-tests/0.nccl-tests.Dockerfile +++ b/4.validation_and_observability/0.nccl-tests/0.nccl-tests.Dockerfile @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 ARG EFA_INSTALLER_VERSION=1.28.0 diff --git a/4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch index 7654e21e..a0baff7c 100644 --- a/4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch +++ b/4.validation_and_observability/0.nccl-tests/1.nccl-tests.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job diff --git a/4.validation_and_observability/0.nccl-tests/2.nccl-3collectives.sbatch b/4.validation_and_observability/0.nccl-tests/2.nccl-3collectives.sbatch index b645fb8b..03f8e0f3 100644 --- a/4.validation_and_observability/0.nccl-tests/2.nccl-3collectives.sbatch +++ b/4.validation_and_observability/0.nccl-tests/2.nccl-3collectives.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job diff --git a/4.validation_and_observability/0.nccl-tests/3.nccl-validate.sbatch b/4.validation_and_observability/0.nccl-tests/3.nccl-validate.sbatch index 81242ecf..c41122f8 100644 --- a/4.validation_and_observability/0.nccl-tests/3.nccl-validate.sbatch +++ b/4.validation_and_observability/0.nccl-tests/3.nccl-validate.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH -N 2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=megatron_gpt # name of your job diff --git a/4.validation_and_observability/1.pytorch-env-validation/0.pytorch-screen.Dockerfile b/4.validation_and_observability/1.pytorch-env-validation/0.pytorch-screen.Dockerfile index 424c2fc1..707cecc7 100644 --- a/4.validation_and_observability/1.pytorch-env-validation/0.pytorch-screen.Dockerfile +++ b/4.validation_and_observability/1.pytorch-env-validation/0.pytorch-screen.Dockerfile @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 ARG AWS_REGION=us-west-2 diff --git a/4.validation_and_observability/1.pytorch-env-validation/1.torch-screen.sbatch b/4.validation_and_observability/1.pytorch-env-validation/1.torch-screen.sbatch index a3147074..e5453af1 100644 --- a/4.validation_and_observability/1.pytorch-env-validation/1.torch-screen.sbatch +++ b/4.validation_and_observability/1.pytorch-env-validation/1.torch-screen.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH -N 2 # number of nodes to run the scrip on, use 2 here #SBATCH --job-name=megatron_gpt # name of your job diff --git a/4.validation_and_observability/1.pytorch-env-validation/pytorch-screen.py b/4.validation_and_observability/1.pytorch-env-validation/pytorch-screen.py index 52fdb68a..962ecaf5 100644 --- a/4.validation_and_observability/1.pytorch-env-validation/pytorch-screen.py +++ b/4.validation_and_observability/1.pytorch-env-validation/pytorch-screen.py @@ -1,5 +1,5 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 import torch diff --git a/4.validation_and_observability/efa-versions.sh b/4.validation_and_observability/efa-versions.sh index 5d12c34b..5b042c84 100644 --- a/4.validation_and_observability/efa-versions.sh +++ b/4.validation_and_observability/efa-versions.sh @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 # Fetch software versions related to EFA. # Currently only tested on Ubuntu 20.04 From c9982646cd0f6f9eec0197d30bc3157422d9a208 Mon Sep 17 00:00:00 2001 From: ruhanprasad Date: Fri, 19 Jan 2024 23:13:51 +0000 Subject: [PATCH 297/648] Revision --- .../0.create_conda_env.sh | 5 +--- .../12.SM-dataparallel-FSDP/1.prepare_data.sh | 1 - ..._training.sbatch => 1.run_training.sbatch} | 0 .../12.SM-dataparallel-FSDP/README.md | 25 +++++++++++-------- .../12.SM-dataparallel-FSDP/exec_torchrun.sh | 2 +- .../prepare_dataset.py | 7 +++--- .../12.SM-dataparallel-FSDP/scripts/train.py | 7 +++--- .../0.create_conda_env.sh | 1 + .../13.SM-dataparallel-deepspeed/README.md | 22 ++++++++-------- .../code/train.py | 3 ++- .../exec_torchrun.sh | 2 +- 11 files changed, 39 insertions(+), 36 deletions(-) delete mode 100755 3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh rename 3.test_cases/12.SM-dataparallel-FSDP/{2.run_training.sbatch => 1.run_training.sbatch} (100%) diff --git a/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh b/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh index 07a48835..744ba8b2 100755 --- a/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh +++ b/3.test_cases/12.SM-dataparallel-FSDP/0.create_conda_env.sh @@ -4,6 +4,7 @@ # SPDX-License-Identifier: MIT-0 set -ex +# Mamba to be installed in shared FSx MAMBA_VERSION=23.1.0-1 curl -L -o ./mambaforge.sh https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-x86_64.sh @@ -21,7 +22,3 @@ conda activate smdataparallel conda install -y pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.8 -c pytorch -c nvidia pip install https://smdataparallel.s3.amazonaws.com/binary/pytorch/2.0.1/cu118/2023-11-07/smdistributed_dataparallel-2.0.2-cp310-cp310-linux_x86_64.whl pip install -r scripts/requirements.txt - - - - diff --git a/3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh b/3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh deleted file mode 100755 index 1689d0a7..00000000 --- a/3.test_cases/12.SM-dataparallel-FSDP/1.prepare_data.sh +++ /dev/null @@ -1 +0,0 @@ -python prepare_dataset.py diff --git a/3.test_cases/12.SM-dataparallel-FSDP/2.run_training.sbatch b/3.test_cases/12.SM-dataparallel-FSDP/1.run_training.sbatch similarity index 100% rename from 3.test_cases/12.SM-dataparallel-FSDP/2.run_training.sbatch rename to 3.test_cases/12.SM-dataparallel-FSDP/1.run_training.sbatch diff --git a/3.test_cases/12.SM-dataparallel-FSDP/README.md b/3.test_cases/12.SM-dataparallel-FSDP/README.md index 2199fd5b..b783349e 100644 --- a/3.test_cases/12.SM-dataparallel-FSDP/README.md +++ b/3.test_cases/12.SM-dataparallel-FSDP/README.md @@ -1,21 +1,24 @@ -# How to Use SageMaker Distributed Data Parallel Library (SMDDP) with PyTorch FSDP +# How to use SageMaker Distributed Data Parallel Library (SMDDP) with PyTorch FSDP ## What is SMDDP? -The [SMDDP](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) library provides fast GPU collective communication algorithms on P4d/P4de instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library ([NCCL](https://developer.nvidia.com/nccl)). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the [Falcon](https://falconllm.tii.ae/falcon-models.html) model with FSDP. +The [SMDDP](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) library provides fast GPU collective communication algorithms on [p4d.24xlarge](https://aws.amazon.com/ec2/instance-types/p4/)/p4de.24xlarge instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library ([NCCL](https://developer.nvidia.com/nccl)). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the [Falcon](https://falconllm.tii.ae/falcon-models.html) model with FSDP. + +Enabling SMDDP in your FSDP script is seamless. The only required changes are the following two lines: +1. Importing SMDDP: `import smdistributed.dataparallel.torch.torch_smddp` +2. Initializing process group with SMDDP backend: `torch.distributed.init_process_group(backend="smddp")` ## 0. Prerequisites -You will need a slurm cluster with a shared parallel filesystem such as [Amazon FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/getting-started.html). See the sagemaker-hyperpod section in the [1.architectures/5.sagemaker-hyperpod](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/5.sagemaker-hyperpod) folder for setup instructions. +You will need a Slurm cluster with a shared parallel filesystem such as [Amazon FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/getting-started.html). See the sagemaker-hyperpod section in the [Sagemaker Hyperpod](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/5.sagemaker-hyperpod) folder for setup instructions. The required dependencies to use SMDDP in this example are listed below. Note that CUDA is already provided on HyperPod. ### Required Dependencies of SMDDP Library * Python==3.10 * CUDA==11.8 * PyTorch==2.0.1 -Additionally, SMDDP must be used on AWS P4d or P4de instances. This example also uses mamba as a package manager. Mamba is a drop-in replacement for conda and it is recommended over Miniconda or Anaconda with SMDDP (see Known Issues section for more details) +Additionally, SMDDP must be used on AWS P4d or P4de instances. This example also uses [mamba](https://github.com/mamba-org/mamba) as a package manager. Mamba is a drop-in replacement for [conda](https://conda.io/projects/conda/en/latest/index.html) and it is recommended over [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/) or [Anaconda](https://www.anaconda.com/) with SMDDP (see [Known Issues](#4-known-issues) section for more details) ## 1. Create Environment -1. On your cluster head node, navigate to your shared FSx filesystem, which should be located at `/fsx` -2. Clone this repo +On your cluster head node, navigate to your shared FSx filesystem, which should be located at `/fsx`, and clone this repo ``` cd /fsx git clone https://github.com/aws-samples/awsome-distributed-training/ @@ -24,12 +27,12 @@ cd awsome-distributed-training/3.test_cases/12.SM-dataparallel-FSDP 3. Run the `0.create_conda_env.sh` script. This will install [Mamba](https://github.com/mamba-org/mamba) and create an environment called `smdataparallel`. Since the environment is created on the shared FSx filesystem, all compute nodes will have access to it. Activate this environment via `conda activate smdataparallel`. ## 2. Prepare Dataset -To run this example, we will use part of the [glue/sst2 dataset](https://huggingface.co/datasets/glue) from HuggingFace. To download and prepare a subset of this dataset for our example, run `./1.prepare_data.sh` +To run this example, we will use part of the [glue/sst2 dataset](https://huggingface.co/datasets/glue) from HuggingFace. To download and prepare a subset of this dataset for our example, run `python prepare_dataset.py` ## 3. Launch Training -To launch the distributed training job, run `sbatch 2.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. +To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. -Launching the job will create a log file in the current directory (`slurm-`) which you can tail to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `scripts/train.py` +Launching the job will create a log file in the current directory (`slurm-`) which you can tail (via `tail -f slurm-`) to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `scripts/train.py` This example only runs training for one iteration and exits immediately. You should see output similar to below ``` @@ -40,7 +43,7 @@ This example only runs training for one iteration and exits immediately. You sh 0: Training done! ``` ## 4. Known Issues -When using SMDDP in your own conda environment, you may encounter the following error after importing SMDDP in your training script: ``version `GLIBCXX_3.4.30' not found`` +Ensure that you are installing PyTorch via conda before pip installing SMDDP (i.e. install PyTorch through `conda install` before installing SMDDP in your environment). Additionally, we suggest to use [Mamba](https://github.com/mamba-org/mamba) as your package manager rather than Miniconda or Anaconda. Mamba is a drop-in replacement for conda with improvements in dependency resolution. -If this occurs, firstly ensure that you are installing PyTorch via conda before pip installing SMDDP (i.e. install PyTorch through `conda install` before installing SMDDP in your environment). If this still does not resolve the error, please use [Mamba](https://github.com/mamba-org/mamba) as your package manager rather than Miniconda or Anaconda. Mamba is a drop-in replacement for conda with improvements in dependency resolution, and creating an environment with Mamba is known to resolve this issue. +Not following these suggestions may result in the following error after importing SMDDP in your training script: ``version `GLIBCXX_3.4.30' not found`` diff --git a/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh b/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh index 56f8ae41..74f27cc7 100755 --- a/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh +++ b/3.test_cases/12.SM-dataparallel-FSDP/exec_torchrun.sh @@ -4,7 +4,7 @@ # SPDX-License-Identifier: MIT-0 export OMP_NUM_THREADS=1 -export GPUS_PER_NODE=8 +export GPUS_PER_NODE=8 # p4d/p4de instances have 8 gpus per node MASTER_NODE=$(scontrol show hostname | head -n 1) export MASTER_ADDR=$(scontrol show node=$MASTER_NODE | awk -F= '/NodeAddr=/{print $2}' | awk '{print $1}') export NNODES=$SLURM_NTASKS diff --git a/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py b/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py index 762ffdcc..d6a58670 100644 --- a/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py +++ b/3.test_cases/12.SM-dataparallel-FSDP/prepare_dataset.py @@ -7,6 +7,8 @@ from datasets import load_dataset from transformers import AutoTokenizer +from itertools import chain +from functools import partial # Load Tokenizer @@ -29,9 +31,6 @@ split="train[5%:]" ) -from itertools import chain -from functools import partial - def group_texts(examples,block_size = 2048): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} @@ -61,7 +60,7 @@ def group_texts(examples,block_size = 2048): batched=True, ) -training_input_path = f'processed/data/' +training_input_path = f"processed/data/" lm_dataset.save_to_disk(training_input_path) print(f"Saved data to: {training_input_path}") diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py b/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py index 020407dc..f0494721 100644 --- a/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py +++ b/3.test_cases/12.SM-dataparallel-FSDP/scripts/train.py @@ -21,6 +21,7 @@ import time from tqdm import tqdm +import functools from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, MixedPrecision, @@ -37,7 +38,9 @@ from torch.distributed.fsdp.wrap import ( transformer_auto_wrap_policy, ) -import functools + + +# This is the only change needed to enable SMDDP in an FSDP script try: backend = "smddp" import smdistributed.dataparallel.torch.torch_smddp @@ -121,7 +124,6 @@ def parse_arge(): parser.add_argument("--limit_all_gathers", type=bool, default=False) parser.add_argument("--forward_prefetch", type=bool, default=False) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") - parser.add_argument("--model_dir",type=str,default="/opt/ml/model") parser.add_argument("--cache_dir",type=str,default=None) args = parser.parse_known_args() @@ -279,7 +281,6 @@ def training_function(args): if args.max_steps is not None and total_steps > args.max_steps: break - #save_model(model,tokenizer,args.model_dir,args.rank) if args.rank == 0: print("Training done!") dist.barrier() diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh b/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh index e911f148..353c90f5 100755 --- a/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh +++ b/3.test_cases/13.SM-dataparallel-deepspeed/0.create_conda_env.sh @@ -4,6 +4,7 @@ # SPDX-License-Identifier: MIT-0 set -ex +# Mamba installed in shared FSx directory MAMBA_VERSION=23.1.0-1 curl -L -o ./mambaforge.sh https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-x86_64.sh diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/README.md b/3.test_cases/13.SM-dataparallel-deepspeed/README.md index 22805a73..92e19bd0 100644 --- a/3.test_cases/13.SM-dataparallel-deepspeed/README.md +++ b/3.test_cases/13.SM-dataparallel-deepspeed/README.md @@ -1,21 +1,24 @@ -# How to Use SageMaker Distributed Data Parallel Library (SMDDP) with DeepSpeed ZeRO +# How to use SageMaker Distributed Data Parallel Library (SMDDP) with DeepSpeed ZeRO ## What is SMDDP? -The [SMDDP](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) library provides fast GPU collective communication algorithms on P4d/P4de instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library ([NCCL](https://developer.nvidia.com/nccl)). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your [DeepSpeed](https://github.com/microsoft/DeepSpeed) training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the [Llama2](https://ai.meta.com/llama/) model with DeepSpeed. +The [SMDDP](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) library provides fast GPU collective communication algorithms on [p4d.24xlarge](https://aws.amazon.com/ec2/instance-types/p4/)/p4de.24xlarge instance types and serves as a drop-in replacement for the Nvidia Collective Communications Library ([NCCL](https://developer.nvidia.com/nccl)). Specifically, SMDDP implements an optimized AllGather communication routine, which is the main source of GPU communication overhead in sharded data parallel training jobs. With just two lines of code change, you can enable the SMDDP Library's optimized AllGather algorithm in your [DeepSpeed](https://github.com/microsoft/DeepSpeed) training jobs and speed up training by up to 20% compared to NCCL! This examples shows how you can use SMDDP when training the [Llama2](https://ai.meta.com/llama/) model with DeepSpeed. + +Enabling SMDDP in your DeepSpeed script is seamless. The only required changes are the following two lines: +1. Importing SMDDP: `import smdistributed.dataparallel.torch.torch_smddp` +2. Initializing process group with SMDDP backend: `deepspeed.init_distributed(dist_backend="smddp")` ## 0. Prerequisites -You will need a slurm cluster with a shared parallel file-system such as Amazon [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/getting-started.html) file system. See the sagemaker-hyperpod section in the [1.architectures/5.sagemaker-hyperpod](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/5.sagemaker-hyperpod) folder for setup instructions. +You will need a Slurm cluster with a shared parallel file-system such as Amazon [FSx for Lustre](https://docs.aws.amazon.com/fsx/latest/LustreGuide/getting-started.html) file system. See the [Sagemaker Hyperpod](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/5.sagemaker-hyperpod) folder for setup instructions. The required dependencies to use SMDDP in this example are listed below. Note that CUDA is already provided on HyperPod. ### Required Dependencies of SMDDP Library * Python==3.10 * CUDA==11.8 * PyTorch==2.0.1 -Additionally, SMDDP must be used on AWS P4d or P4de instances. This example also uses mamba as a package manager. Mamba is a drop-in replacement for conda and it is recommended over Miniconda or Anaconda with SMDDP (see Known Issues section for more details) +Additionally, SMDDP must be used on AWS P4d or P4de instances. This example also uses [mamba](https://github.com/mamba-org/mamba) as a package manager. Mamba is a drop-in replacement for [conda](https://conda.io/projects/conda/en/latest/index.html) and it is recommended over [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/) or [Anaconda](https://www.anaconda.com/) with SMDDP (see [Known Issues](#4-known-issues) section for more details) ## 1. Create Environment -1. On your cluster head node, navigate to your shared FSx filesystem, which should be located at `/fsx` -2. Clone this repo +On your cluster head node, navigate to your shared FSx filesystem, which should be located at `/fsx`, and clone this repo ``` cd /fsx git clone https://github.com/aws-samples/awsome-distributed-training/ @@ -26,7 +29,7 @@ cd awsome-distributed-training/3.test_cases/13.SM-dataparallel-deepspeed ## 2. Launch Training No dataset preparation is needed as this example uses synthetic data for simplicity. To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. -Launching the job will create a log file in the current directory (`slurm-`) which you can tail to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `code/train.py` +Launching the job will create a log file in the current directory (`slurm-`) which you can tail (via `tail -f slurm-`)to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `code/train.py` This example only runs training for one iteration and exits immediately. You should see output similar to below ``` @@ -39,7 +42,6 @@ Performing validation on training batch 1 Training done! ``` ## 4. Known Issues -When using SMDDP in your own conda environment, you may encounter the following error after importing SMDDP in your training script: ``version `GLIBCXX_3.4.30' not found`` - -If this occurs, firstly ensure that you are installing PyTorch via conda before pip installing SMDDP (i.e. install PyTorch through `conda install` before installing SMDDP in your environment). If this still does not resolve the error, please use [Mamba](https://github.com/mamba-org/mamba) as your package manager rather than Miniconda or Anaconda. Mamba is a drop-in replacement for conda with improvements in dependency resolution, and creating an environment with Mamba is known to resolve this issue. +Ensure that you are installing PyTorch via conda before pip installing SMDDP (i.e. install PyTorch through `conda install` before installing SMDDP in your environment). Additionally, we suggest to use [Mamba](https://github.com/mamba-org/mamba) as your package manager rather than Miniconda or Anaconda. Mamba is a drop-in replacement for conda with improvements in dependency resolution. +Not following these suggestions may result in the following error after importing SMDDP in your training script: ``version `GLIBCXX_3.4.30' not found`` diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py b/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py index edc8c40c..457ff796 100755 --- a/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py +++ b/3.test_cases/13.SM-dataparallel-deepspeed/code/train.py @@ -11,6 +11,8 @@ from utils import create_dataloaders, StubDataset import functools import deepspeed + +# This is the only change needed to enable SMDDP in an DeepSpeed script try: backend = "smddp" import smdistributed.dataparallel.torch.torch_smddp @@ -84,7 +86,6 @@ def parse_args(): ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") - parser.add_argument("--model_dir",type=str,default="/opt/ml/model") parser.add_argument("--cache_dir",type=str,default=None) args = parser.parse_known_args() return args diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh b/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh index 5390fb13..7454bae4 100755 --- a/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh +++ b/3.test_cases/13.SM-dataparallel-deepspeed/exec_torchrun.sh @@ -4,7 +4,7 @@ # SPDX-License-Identifier: MIT-0 export OMP_NUM_THREADS=1 -export GPUS_PER_NODE=8 +export GPUS_PER_NODE=8 # p4d/p4de instances have 8 GPUs per node MASTER_NODE=$(scontrol show hostname | head -n 1) export MASTER_ADDR=$(scontrol show node=$MASTER_NODE | awk -F= '/NodeAddr=/{print $2}' | awk '{print $1}') export NNODES=$SLURM_NTASKS From 2696f1bb608d0611ca8fe42769c951d8d3857d38 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Mon, 22 Jan 2024 09:44:12 +0800 Subject: [PATCH 298/648] Apply suggestions from code review --- 3.test_cases/12.SM-dataparallel-FSDP/README.md | 2 +- 3.test_cases/13.SM-dataparallel-deepspeed/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/12.SM-dataparallel-FSDP/README.md b/3.test_cases/12.SM-dataparallel-FSDP/README.md index b783349e..05aa9970 100644 --- a/3.test_cases/12.SM-dataparallel-FSDP/README.md +++ b/3.test_cases/12.SM-dataparallel-FSDP/README.md @@ -30,7 +30,7 @@ cd awsome-distributed-training/3.test_cases/12.SM-dataparallel-FSDP To run this example, we will use part of the [glue/sst2 dataset](https://huggingface.co/datasets/glue) from HuggingFace. To download and prepare a subset of this dataset for our example, run `python prepare_dataset.py` ## 3. Launch Training -To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. +To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed either by (i) editing the sbatch script to change the `#SBATCH --nodes=...` argument, or (ii) run `sbatch --nodes=... 1.run_training.sbatch` to override the number of nodes in the sbatch script. Launching the job will create a log file in the current directory (`slurm-`) which you can tail (via `tail -f slurm-`) to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `scripts/train.py` diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/README.md b/3.test_cases/13.SM-dataparallel-deepspeed/README.md index 92e19bd0..68c2d234 100644 --- a/3.test_cases/13.SM-dataparallel-deepspeed/README.md +++ b/3.test_cases/13.SM-dataparallel-deepspeed/README.md @@ -27,7 +27,7 @@ cd awsome-distributed-training/3.test_cases/13.SM-dataparallel-deepspeed 3. Run the `0.create_conda_env.sh` script. This will install [Mamba](https://github.com/mamba-org/mamba) and create an environment called `smdataparallel`. Since the environment is created on the shared FSx filesystem, all compute nodes will have access to it. Activate this environment via `conda activate smdataparallel`. ## 2. Launch Training -No dataset preparation is needed as this example uses synthetic data for simplicity. To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed in the `#SBATCH --nodes=...` argument in the sbatch script. +No dataset preparation is needed as this example uses synthetic data for simplicity. To launch the distributed training job, run `sbatch 1.run_training.sbatch`. By default the number of nodes in the job is 2, but this can be changed either by (i) editing the sbatch script to change the `#SBATCH --nodes=...` argument, or (ii) run `sbatch --nodes=... 1.run_training.sbatch` to override the number of nodes in the sbatch script. Launching the job will create a log file in the current directory (`slurm-`) which you can tail (via `tail -f slurm-`)to monitor the progress of the training job. You can also see the underlying launch script in `exec_torchrun.sh` and the training script in `code/train.py` From 618dc0d535da9dc57e33ad9ae93548aa47f2d3e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 22:01:44 +0000 Subject: [PATCH 299/648] Bump pillow from 10.0.1 to 10.2.0 in /3.test_cases/4.DDP Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.0.1 to 10.2.0. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/10.0.1...10.2.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- 3.test_cases/4.DDP/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/4.DDP/requirements.txt b/3.test_cases/4.DDP/requirements.txt index b88c3ad8..cd6f68d1 100644 --- a/3.test_cases/4.DDP/requirements.txt +++ b/3.test_cases/4.DDP/requirements.txt @@ -4,7 +4,7 @@ matplotlib==3.5.1 numpy==1.22.0 opencv_python==4.5.5.64 pandas==1.5.0 -Pillow==10.0.1 +Pillow==10.2.0 PyYAML==6.0 scikit_image==0.19.2 scikit_learn==1.2.1 From 0d11df9a3165b47270f556ed7351116d7f69e433 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 00:39:58 +0000 Subject: [PATCH 300/648] Bump transformers in /3.test_cases/13.SM-dataparallel-deepspeed/code Bumps [transformers](https://github.com/huggingface/transformers) from 4.31 to 4.36.0. - [Release notes](https://github.com/huggingface/transformers/releases) - [Commits](https://github.com/huggingface/transformers/compare/v4.31.0...v4.36.0) --- updated-dependencies: - dependency-name: transformers dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- 3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100755 => 100644 3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt diff --git a/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt b/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt old mode 100755 new mode 100644 index b844e817..287a1ffc --- a/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt +++ b/3.test_cases/13.SM-dataparallel-deepspeed/code/requirements.txt @@ -1,4 +1,4 @@ -transformers==4.31 +transformers==4.36.0 datasets accelerate>=0.21 bitsandbytes From 6bf09c538b8e7e610ae2f24ee6f40d74a7826bf1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 00:40:08 +0000 Subject: [PATCH 301/648] Bump transformers in /3.test_cases/12.SM-dataparallel-FSDP/scripts Bumps [transformers](https://github.com/huggingface/transformers) from 4.33.0 to 4.36.0. - [Release notes](https://github.com/huggingface/transformers/releases) - [Commits](https://github.com/huggingface/transformers/compare/v4.33.0...v4.36.0) --- updated-dependencies: - dependency-name: transformers dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- 3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100755 => 100644 3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt diff --git a/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt b/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt old mode 100755 new mode 100644 index 8fef9c29..68d40ce0 --- a/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt +++ b/3.test_cases/12.SM-dataparallel-FSDP/scripts/requirements.txt @@ -1,4 +1,4 @@ -transformers==4.33.0 +transformers==4.36.0 datasets==1.16.0 accelerate>=0.21 bitsandbytes From b929813190c4901d8c5c429f6283cbf72edac6d3 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 23 Jan 2024 12:39:10 -0800 Subject: [PATCH 302/648] Delete default Provisioning Parameters Signed-off-by: Sean Smith --- .../base-config/provisioning_parameters.json | 12 ------------ 1.architectures/5.sagemaker-hyperpod/README.md | 4 +++- 2 files changed, 3 insertions(+), 13 deletions(-) delete mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json deleted file mode 100644 index d3eff3c4..00000000 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "version": "1.0.0", - "workload_manager": "slurm", - "controller_group": "controller-machine", - "login_group": "my-login-group", - "worker_groups": [ - { - "instance_group_name": "compute-nodes", - "partition_name": "dev" - } - ] -} diff --git a/1.architectures/5.sagemaker-hyperpod/README.md b/1.architectures/5.sagemaker-hyperpod/README.md index 35c6bae3..dd2b886e 100644 --- a/1.architectures/5.sagemaker-hyperpod/README.md +++ b/1.architectures/5.sagemaker-hyperpod/README.md @@ -136,7 +136,9 @@ Add both to your `provisioning_parameters.json` file. For example, } ``` -And copy the updated `provisioning_parameters.json` to S3. +Make sure the `instance_group_name` matches the instance group name `InstanceGroupName` in your cluster config which we create next. + +Copy the updated `provisioning_parameters.json` to S3: ``` aws s3 cp LifeCycleScripts/base-config/provisioning_parameters.json s3://${BUCKET}/LifeCycleScripts/base-config/ From 7ee1654967d6aad8eec257d7225167e4c9d775a2 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava <101727556+awsankur@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:09:04 -0800 Subject: [PATCH 303/648] Update efa-cheatsheet.md Added note on NVLinks for P5s --- 1.architectures/efa-cheatsheet.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/1.architectures/efa-cheatsheet.md b/1.architectures/efa-cheatsheet.md index ec0f4c59..a031d7aa 100644 --- a/1.architectures/efa-cheatsheet.md +++ b/1.architectures/efa-cheatsheet.md @@ -26,6 +26,9 @@ versions of your libfabric. Use cuda>=12.0, nccl>=2.18.0 (recommend at least 2.18.5), aws-ofi-nccl>=1.7.2 (recommend at least 1.7.3). +The dcgm command to validate the NVLinks is `sudo dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=18` where the 18 is H100 specific. +`nvidia-smi nvlink -s` is the command to get the status for all NVLinks for each of the GPUs. for H100 there are 18 + ## 3. Sample Presets ### 3.1. libfabric>=1.18.0 and aws-ofi-nccl>=1.7.0 From d46a64a851557b74cd2ba7fdb2f7cbad762b8ad5 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 23 Jan 2024 18:01:07 -0800 Subject: [PATCH 304/648] Script to Validate Cluster Config Signed-off-by: Sean Smith --- .../5.sagemaker-hyperpod/validate-config.py | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100755 1.architectures/5.sagemaker-hyperpod/validate-config.py diff --git a/1.architectures/5.sagemaker-hyperpod/validate-config.py b/1.architectures/5.sagemaker-hyperpod/validate-config.py new file mode 100755 index 00000000..73864fc6 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/validate-config.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +import json +import argparse +import boto3 + +# This function checks that all instance group names match +# between the cluster config and provisioning parameters. +def validate_instance_groups(cluster_config, provisioning_parameters): + for group in provisioning_parameters.get('worker_groups'): + instance_group_name = group.get('instance_group_name') + if not [instance_group for instance_group in cluster_config.get('InstanceGroups') if instance_group.get('InstanceGroupName') == instance_group_name]: + print(f"❌ Invalid instance group name in file provisioning_parameters.json: {instance_group_name}") + return False + else: + print(f"✔️ Validated instance group name {instance_group_name} is correct ...") + return True + +# Check if Subnet is private +def validate_subnet(ec2_client, cluster_config): + if cluster_config.get('VpcConfig'): + subnet_id = cluster_config.get('VpcConfig').get('Subnets')[0] + response = ec2_client.describe_subnets(SubnetIds=[subnet_id]) + if 'Subnets' in response and response.get('Subnets')[0].get('MapPublicIpOnLaunch'): + print(f"❌ Subnet {subnet_id} is public which will fail cluster creation ...") + return False + else: + print(f"✔️ Validated subnet {subnet_id} ...") + else: + print("⭕️ No subnet found in cluster_config.json ... skipping check") + return True + +# Check if Security Group supports EFA. +# See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html#efa-start-security +def validate_sg(ec2_client, cluster_config): + if cluster_config.get('VpcConfig'): + security_group = cluster_config.get('VpcConfig').get('SecurityGroupIds')[0] + ec2_client = boto3.client('ec2') + response = ec2_client.describe_security_groups(GroupIds=[security_group]) + + ingress = response.get('SecurityGroups')[0].get('IpPermissions') + egress = response.get('SecurityGroups')[0].get('IpPermissionsEgress') + + + for rule in ingress: + if rule.get('IpProtocol') == '-1': + user_id_group_pairs = rule.get('UserIdGroupPairs')[0] + if not ('GroupId' in user_id_group_pairs or security_group == user_id_group_pairs.get('GroupId')): + print(f"❌ No EFA egress rule found in security group {security_group} ...") + return False + else: + print(f"✔️ Validated security group {security_group} ingress rules ...") + + for rule in egress: + if rule.get('IpProtocol') == '-1': + user_id_group_pairs = rule.get('UserIdGroupPairs')[0] + if not ('GroupId' in user_id_group_pairs or security_group == user_id_group_pairs.get('GroupId')): + print(f"❌ No EFA egress rule found in security group {security_group} ...") + return False + else: + print(f"✔️ Validated security group {security_group} egress rules ...") + else: + print("⭕️ No security group found in cluster_config.json ... skipping check.") + + return True + + +def main(): + parser = argparse.ArgumentParser(description="Validate cluster config.") + parser.add_argument("--cluster-config", help="Path to the cluster config JSON file") + parser.add_argument("--provisioning-parameters", help="Path to the provisioning parameters JSON file") + args = parser.parse_args() + + with open(args.cluster_config, "r") as cluster_config_file: + cluster_config = json.load(cluster_config_file) + + with open(args.provisioning_parameters, "r") as provisioning_parameters_file: + provisioning_parameters = json.load(provisioning_parameters_file) + + ec2_client = boto3.client('ec2') + + # check instance group name + valid = validate_instance_groups(cluster_config, provisioning_parameters) + + # Validate Subnet + valid = validate_subnet(ec2_client, cluster_config) and valid + + # Validate Security Group + valid = validate_sg(ec2_client, cluster_config) and valid + + if valid: + # All good! + print(f"✅ Cluster Validation succeeded") + else: + print(f"❌ Cluster Validation failed") + +if __name__ == "__main__": + main() From de2464dc669062cf1af4abc7f77050993ebd8904 Mon Sep 17 00:00:00 2001 From: Pierre-Yves <62559210+perifaws@users.noreply.github.com> Date: Thu, 25 Jan 2024 08:16:40 -0800 Subject: [PATCH 305/648] Fix typo in MPT README.md --- 3.test_cases/3.MPT/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 6142b5b8..ae4daa09 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -1,4 +1,4 @@ -# Mosaic Pretraind Transformers (MPT) Test Case +# Mosaic Pretrained Transformers (MPT) Test Case MPT are GPT-style models in [llm-foundry](https://github.com/mosaicml/llm-foundry/tree/main) with some special features -- [Flash Attention](https://arxiv.org/abs/2205.14135) for efficiency, [ALiBi](https://arxiv.org/abs/2108.12409) for context length extrapolation, and stability improvements to mitigate loss spikes. From 5553c0ba1600ec391d3e12e111c415ce507614f4 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava <101727556+awsankur@users.noreply.github.com> Date: Thu, 25 Jan 2024 16:33:10 -0800 Subject: [PATCH 306/648] Update 1.Dockerfile Updated PyTorch installation following PyTorch best practices --- 3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile index ec47b9aa..bd84b916 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile +++ b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile @@ -89,7 +89,7 @@ RUN export OPAL_PREFIX="" \ RUN git clone https://github.com/mosaicml/diffusion-benchmark.git RUN pip3 install -r diffusion-benchmark/requirements.txt RUN pip3 install mosaicml==${MOSAICML_VERSION} --force -RUN pip3 install --pre torch torchvision torchaudio --index-url ${PYTORCH_INDEX_URL} --force +RUN pip3 install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu121 RUN pip3 uninstall transformer-engine -y RUN pip3 install protobuf==3.20.3 From f85f1da3ccc854ea8200b148cab6f19b41048138 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 23 Jan 2024 18:01:07 -0800 Subject: [PATCH 307/648] Script to Validate Cluster Config Signed-off-by: Sean Smith --- .../5.sagemaker-hyperpod/validate-config.py | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/validate-config.py b/1.architectures/5.sagemaker-hyperpod/validate-config.py index 73864fc6..165d4075 100755 --- a/1.architectures/5.sagemaker-hyperpod/validate-config.py +++ b/1.architectures/5.sagemaker-hyperpod/validate-config.py @@ -40,25 +40,32 @@ def validate_sg(ec2_client, cluster_config): ingress = response.get('SecurityGroups')[0].get('IpPermissions') egress = response.get('SecurityGroups')[0].get('IpPermissionsEgress') - for rule in ingress: if rule.get('IpProtocol') == '-1': - user_id_group_pairs = rule.get('UserIdGroupPairs')[0] - if not ('GroupId' in user_id_group_pairs or security_group == user_id_group_pairs.get('GroupId')): + user_id_group_pairs = rule.get('UserIdGroupPairs') + if not user_id_group_pairs: print(f"❌ No EFA egress rule found in security group {security_group} ...") return False else: - print(f"✔️ Validated security group {security_group} ingress rules ...") + if not ('GroupId' in user_id_group_pairs[0] or security_group == user_id_group_pairs[0].get('GroupId')): + print(f"❌ No EFA egress rule found in security group {security_group} ...") + return False + else: + print(f"✔️ Validated security group {security_group} ingress rules ...") for rule in egress: if rule.get('IpProtocol') == '-1': - user_id_group_pairs = rule.get('UserIdGroupPairs')[0] - if not ('GroupId' in user_id_group_pairs or security_group == user_id_group_pairs.get('GroupId')): + user_id_group_pairs = rule.get('UserIdGroupPairs') + if not user_id_group_pairs: print(f"❌ No EFA egress rule found in security group {security_group} ...") return False else: - print(f"✔️ Validated security group {security_group} egress rules ...") + if not ('GroupId' in user_id_group_pairs[0] or security_group == user_id_group_pairs[0].get('GroupId')): + print(f"❌ No EFA egress rule found in security group {security_group} ...") + return False + else: + print(f"✔️ Validated security group {security_group} egress rules ...") else: print("⭕️ No security group found in cluster_config.json ... skipping check.") From de24d8020d5a6f794820b7bf04e2e78f22cfd0a5 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 25 Jan 2024 17:41:41 -0800 Subject: [PATCH 308/648] use aws pytorch 2.0.1 version Signed-off-by: Sean Smith --- 3.test_cases/10.FSDP/0.create_conda_env.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/3.test_cases/10.FSDP/0.create_conda_env.sh b/3.test_cases/10.FSDP/0.create_conda_env.sh index ae34afe4..64a0a7f5 100755 --- a/3.test_cases/10.FSDP/0.create_conda_env.sh +++ b/3.test_cases/10.FSDP/0.create_conda_env.sh @@ -14,8 +14,8 @@ conda create -y -p ./pt_fsdp python=3.10 source activate ./pt_fsdp/ -# Install PyTorch -pip install torch torchvision torchaudio transformers datasets +# Install AWS Pytorch, see https://aws-pytorch-doc.com/ +conda install -y pytorch=2.0.1 pytorch-cuda=12.2 torchvision torchaudio transformers datasets fsspec=2023.9.2 --strict-channel-priority --override-channels -c https://aws-ml-conda.s3.us-west-2.amazonaws.com -c nvidia -c conda-forge # Create checkpoint dir mkdir checkpoints From 0f0a9598635af14d27c4f2b8fccff42db0ba8ac7 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 26 Jan 2024 10:58:34 +0800 Subject: [PATCH 309/648] Fix vpc name --- 1.architectures/1.vpc_network/2.vpc-one-az.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/1.architectures/1.vpc_network/2.vpc-one-az.yaml b/1.architectures/1.vpc_network/2.vpc-one-az.yaml index 6caff6ec..8795b65f 100644 --- a/1.architectures/1.vpc_network/2.vpc-one-az.yaml +++ b/1.architectures/1.vpc_network/2.vpc-one-az.yaml @@ -102,7 +102,7 @@ Resources: CidrBlock: !FindInMap [Networking, VPC, CIDR0] Tags: - Key: Name - Value: HPC VPC + Value: !Ref VPCName VpcCidrBlock: Type: AWS::EC2::VPCCidrBlock From 3f3569bb105b5677688092e9da16fec9190e7997 Mon Sep 17 00:00:00 2001 From: Pierre-Yves <62559210+perifaws@users.noreply.github.com> Date: Fri, 26 Jan 2024 13:22:49 -0600 Subject: [PATCH 310/648] Update README.md --- README.md | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 14cda0e2..859a69e1 100644 --- a/README.md +++ b/README.md @@ -38,21 +38,24 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test cases | Slurm | Kubernetes | AWS Batch | -| --------------------------------------------------------------------- | ----- | ----------- | ---------- | -| [`1.megatron-lm`](./3.test_cases/1.megatron-lm) | ✅ | ❓ | ❓ | -| [`2.nemo-launcher`](./3.test_cases/2.nemo-launcher) | ✅ | ❌ | ❌ | -| [`3.MPT`](./3.test_cases/3.MPT) | ✅ | ❓ | ❓ | -| [`4.DDP`](./3.test_cases/4.DDP) | ✅ | ❓ | ❓ | -| [`5.param-benchmark`](./3.test_cases/5.param-benchmark) | ✅ | ❓ | ❓ | -| [`6.stable-diffusion`](./3.test_cases/6.stable-diffusion) | ✅ | ❓ | ❓ | -| [`7.tensorflow-distributed`](./3.test_cases/7.tensorflow-distributed) | ✅ | ❓ | ❓ | -| [`8.neuronx-nemo-megatron`](./3.test_cases/8.neuronx-nemo-megatron) | ✅ | ❓ | ❓ | -| [`10.FSDP`](./3.test_cases/10.FSDP) | ✅ | ❓ | ❓ | +| Test cases | Slurm | Kubernetes | AWS Batch | +| ----------------------------------------------------------------------------- | ----- | ----------- | ---------- | +| [`1.megatron-lm`](./3.test_cases/1.megatron-lm) | ✅ | ❓ | ❓ | +| [`2.nemo-launcher`](./3.test_cases/2.nemo-launcher) | ✅ | ❌ | ❌ | +| [`3.MPT`](./3.test_cases/3.MPT) | ✅ | ❓ | ❓ | +| [`4.DDP`](./3.test_cases/4.DDP) | ✅ | ❓ | ❓ | +| [`5.param-benchmark`](./3.test_cases/5.param-benchmark) | ✅ | ❓ | ❓ | +| [`6.stable-diffusion`](./3.test_cases/6.stable-diffusion) | ✅ | ❓ | ❓ | +| [`7.tensorflow-distributed`](./3.test_cases/7.tensorflow-distributed) | ✅ | ❓ | ❓ | +| [`8.neuronx-nemo-megatron`](./3.test_cases/8.neuronx-nemo-megatron) | ✅ | ❓ | ❓ | +| [`10.FSDP`](./3.test_cases/10.FSDP) | ✅ | ❓ | ❓ | +| [`11.modelparallel`](./3.test_cases/11.modelparallel) | ✅ | ❓ | ❓ | +| [`12.SM-dataparallel-FSDP`](./3.test_cases/12.SM-dataparallel-FSDP) | ✅ | ❓ | ❓ | +| [`13.SM-dataparallel-deepspeed`](./3.test_cases/13.SM-dataparallel-deepspeed) | ✅ | ❓ | ❓ | ## 4. Validation scripts -Utilities scripts and micro-benchmarks examples are set under `4.validation_scripts/`. +Utilities scripts and micro-benchmarks examples are set under `4.validation_scripts/`. The EFA Prometheus exporter can be found in this [directory](./4.validation_and_observability/3.efa-node-exporter) ## 5. CI From 34fa41bfa9e10d932d1530020c59f8a0d394c374 Mon Sep 17 00:00:00 2001 From: Pierre-Yves <62559210+perifaws@users.noreply.github.com> Date: Fri, 26 Jan 2024 13:30:05 -0600 Subject: [PATCH 311/648] Update README.md for hyperpod in product list --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 859a69e1..b24e970e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ML Training Reference Architectures & Tests -This repository contains reference architectures and test cases for distributed model training with [AWS ParallelCluster](https://docs.aws.amazon.com/parallelcluster/latest/ug/what-is-aws-parallelcluster.html), [AWS Batch](https://docs.aws.amazon.com/batch/latest/userguide/what-is-batch.html), and [Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html). The test cases cover different types and sizes of models as well as different frameworks and parallel optimizations (Pytorch DDP/FSDP, MegatronLM, NemoMegatron...). +This repository contains reference architectures and test cases for distributed model training with [Amazon SageMaker Hyperpod](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod.html), [AWS ParallelCluster](https://docs.aws.amazon.com/parallelcluster/latest/ug/what-is-aws-parallelcluster.html), [AWS Batch](https://docs.aws.amazon.com/batch/latest/userguide/what-is-batch.html), and [Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html). The test cases cover different types and sizes of models as well as different frameworks and parallel optimizations (Pytorch DDP/FSDP, MegatronLM, NemoMegatron...). The major components of this directory are: From d27b7e218e79e46174cd059f87aa9c5cfc37f2bb Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 30 Jan 2024 17:55:00 +0800 Subject: [PATCH 312/648] smhp/easy-ssh: dry-run mode to just print the ssh config --- 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh index 2ec49d50..33601829 100755 --- a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -8,12 +8,14 @@ declare -a HELP=( "[-c|--controller-group]" "[-r|--region]" "[-p|--profile]" + "[-d|--dry-run]" "CLUSTER_NAME" ) cluster_name="" node_group="controller-machine" declare -a aws_cli_args=() +DRY_RUN=0 parse_args() { local key @@ -37,6 +39,10 @@ parse_args() { aws_cli_args+=(--profile "$2") shift 2 ;; + -d|--dry-run) + DRY_RUN=1 + shift + ;; *) [[ "$cluster_name" == "" ]] \ && cluster_name="$key" \ @@ -72,4 +78,6 @@ Add your ssh keypair and then you can do: $ ssh ${cluster_name} " +[[ DRY_RUN -eq 1 ]] && exit 0 + aws ssm start-session "${aws_cli_args[@]}" --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} From 5a866949c54f46baa0c5dceab9383d2ee69db8cd Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 29 Jan 2024 17:32:30 -0800 Subject: [PATCH 313/648] NCCL Test for HyperPod Signed-off-by: Sean Smith --- .../0.nccl-tests/4.dl-ami.sbatch | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch diff --git a/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch b/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch new file mode 100644 index 00000000..cff10cef --- /dev/null +++ b/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch @@ -0,0 +1,27 @@ +#!/bin/bash +#SBATCH -o nccl_all_reduce_perf.out +#SBATCH -e nccl_all_reduce_perf.err +#SBATCH -N 2 +#SBATCH --exclusive + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +# This script is designed to run on the Deep Learning AMI, Ubuntu 20.04 +# See https://aws.amazon.com/releasenotes/aws-deep-learning-base-gpu-ami-ubuntu-20-04/ +set -ex + +# Get Hostname to Instance ID mapping +mpirun -N 1 bash -c 'echo $(hostname) -> $(cat /sys/devices/virtual/dmi/id/board_asset_tag | tr -d " ")' + +# run all_reduce test +mpirun -n $((8 * SLURM_JOB_NUM_NODES)) -N 8 \ + -x FI_PROVIDER=efa \ + -x FI_EFA_USE_DEVICE_RDMA=1 \ + -x RDMAV_FORK_SAFE=1 \ + -x LD_LIBRARY_PATH=/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/opt/aws-ofi-nccl/lib:/usr/local/lib:/usr/lib:$LD_LIBRARY_PATH \ + -x NCCL_DEBUG=INFO \ + --mca pml ^cm \ + --mca btl tcp,self \ + --mca btl_tcp_if_exclude lo,docker0 \ + --bind-to none /usr/local/cuda-12.2/efa/test-cuda-12.2/all_reduce_perf -b 8 -e 2G -f 2 -g 1 -c 1 -n 100 \ No newline at end of file From 7dfe787eeb55bb9fc3d55fe50018f0d70aabb890 Mon Sep 17 00:00:00 2001 From: mhuguesaws <71357145+mhuguesaws@users.noreply.github.com> Date: Tue, 30 Jan 2024 10:37:28 -0600 Subject: [PATCH 314/648] Update 4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch --- 4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch b/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch index cff10cef..8a755e43 100644 --- a/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch +++ b/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch @@ -16,7 +16,7 @@ mpirun -N 1 bash -c 'echo $(hostname) -> $(cat /sys/devices/virtual/dmi/id/board # run all_reduce test mpirun -n $((8 * SLURM_JOB_NUM_NODES)) -N 8 \ - -x FI_PROVIDER=efa \ + -x FI_PROVIDER=efa \ -x FI_EFA_USE_DEVICE_RDMA=1 \ -x RDMAV_FORK_SAFE=1 \ -x LD_LIBRARY_PATH=/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/opt/aws-ofi-nccl/lib:/usr/local/lib:/usr/lib:$LD_LIBRARY_PATH \ From a670a3ce5fd2b04ebba750373279232624b342df Mon Sep 17 00:00:00 2001 From: mhuguesaws <71357145+mhuguesaws@users.noreply.github.com> Date: Tue, 30 Jan 2024 10:37:40 -0600 Subject: [PATCH 315/648] Update 4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch --- 4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch b/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch index 8a755e43..af83f2a2 100644 --- a/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch +++ b/4.validation_and_observability/0.nccl-tests/4.dl-ami.sbatch @@ -16,7 +16,7 @@ mpirun -N 1 bash -c 'echo $(hostname) -> $(cat /sys/devices/virtual/dmi/id/board # run all_reduce test mpirun -n $((8 * SLURM_JOB_NUM_NODES)) -N 8 \ - -x FI_PROVIDER=efa \ + -x FI_PROVIDER=efa \ -x FI_EFA_USE_DEVICE_RDMA=1 \ -x RDMAV_FORK_SAFE=1 \ -x LD_LIBRARY_PATH=/opt/amazon/efa/lib:/opt/amazon/openmpi/lib:/opt/aws-ofi-nccl/lib:/usr/local/lib:/usr/lib:$LD_LIBRARY_PATH \ From a0b85cb14cfd2ff7e166bfd1411be48baf4d5724 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Wed, 31 Jan 2024 13:04:58 -0800 Subject: [PATCH 316/648] Added stable diffusion EKS yaml script --- .../multi-node/3.stable-diffusion-eks.yaml | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml diff --git a/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml b/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml new file mode 100644 index 00000000..ce30b604 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml @@ -0,0 +1,76 @@ +apiVersion: "kubeflow.org/v1" +kind: PyTorchJob +metadata: + name: stable-diffusion +spec: + elasticPolicy: + rdzvBackend: etcd + rdzvHost: etcd + rdzvPort: 2379 + minReplicas: 1 + maxReplicas: 96 + maxRestarts: 100 + #metrics: + # - type: Resource + # resource: + # name: cpu + # target: + # type: Utilization + # averageUtilization: 80 + pytorchReplicaSpecs: + Worker: + replicas: 64 + restartPolicy: OnFailure + template: + metadata: + labels: + app: stable-diffusion + spec: + volumes: + - name: shmem + #emptyDir: + # medium: Memory + hostPath: + path: /dev/shm + #nodeSelector: + # node.kubernetes.io/instance-type: "p5.48xlarge" + containers: + - name: pytorch + image: 159553542841.dkr.ecr.us-west-2.amazonaws.com/mosaicml-stable-diffusion:0.15.0 + imagePullPolicy: Always + resources: + requests: + nvidia.com/gpu: + vpc.amazonaws.com/efa: 32 + limits: + nvidia.com/gpu: + vpc.amazonaws.com/efa: 32 + env: + # for P5 FI_* should be commented out + - name: LOGLEVEL + value: "DEBUG" + #- name: FI_PROVIDER + # value: efa + #- name: FI_EFA_USE_DEVICE_RDMA + # value: "1" + #- name: FI_EFA_FORK_SAFE + # value: "1" + #- name: FI_LOG_LEVEL + # value: "1" + #- name: FI_EFA_ENABLE_SHM_TRANSFER + # value: "1" + - name: NCCL_DEBUG + value: "INFO" + - name: NCCL_ASYNC_ERROR_HANDLING + value: "1" + #- name: NCCL_IGNORE_DISABLED_P2P + # value: "1" + - name: WANDB_MODE + value: "offline" + command: + - bash + - -c + - "composer -n 8 --world_size 512 --node_rank $(hostname | cut -d- -f4-) --master_addr stable-diffusion-worker-0 --master_port 80 benchmark.py --use_ema --use_synth_data --device_train_microbatch_size 4" + volumeMounts: + - name: shmem + mountPath: /dev/shm From 4148b53a1409fa4c84e4009a9e107db7dd0d928d Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Thu, 1 Feb 2024 16:44:36 -0600 Subject: [PATCH 317/648] Update software version --- .../1.amazon_machine_image/roles/aws_efa/defaults/main.yml | 2 +- .../1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml | 2 +- .../roles/nvidia_driver/defaults/main.yml | 2 +- .../1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/defaults/main.yml index c482ed61..bb2bc5a9 100644 --- a/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/defaults/main.yml +++ b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/defaults/main.yml @@ -1,5 +1,5 @@ --- -aws_efa_version: "1.26.0" +aws_efa_version: "1.30.0" aws_efa_archive: "aws-efa-installer-{{ aws_efa_version }}.tar.gz" aws_efa_url: "https://efa-installer.amazonaws.com/{{ aws_efa_archive }}" aws_efa_work_dir: "/tmp" diff --git a/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml index e5334773..a7a1085e 100644 --- a/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml +++ b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml @@ -10,7 +10,7 @@ ansible.builtin.git: repo: https://github.com/aws/aws-ofi-nccl.git dest: /tmp/aws-ofi-nccl - version: v1.7.2-aws + version: v1.7.4-aws - name: Build AWS EFA OFI ansible.builtin.shell: | diff --git a/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/defaults/main.yml index 21180e8c..b8fc5d0c 100644 --- a/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/defaults/main.yml +++ b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/defaults/main.yml @@ -1,3 +1,3 @@ -nvidia_driver_version: "535.54.03" +nvidia_driver_version: "535.129.03" install_nvidia_fabric_manager: true allow_reboot: true diff --git a/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml index cf24624b..05b1d429 100644 --- a/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml +++ b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml @@ -2,7 +2,6 @@ dependencies: - role: nvidia_cuda -nccl_tag: inc_nsteps -# nccl_tag: v2.12.10-1 +nccl_tag: v2.18.6-1 nccl_repo: NVIDIA install_nccl_tests: true From da5718e71d9e2f1a36d725b476877517a8d514fc Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 6 Feb 2024 21:31:07 +0800 Subject: [PATCH 318/648] SMHP: Docker and enroot to use /opt/dlami/nvme if available. Also increase apt timeout on these lcc scripts --- .../base-config/utils/enroot.conf | 2 +- .../base-config/utils/install_docker.sh | 30 ++++++++++--- .../base-config/utils/install_enroot_pyxis.sh | 42 +++++++++++++++++++ 3 files changed, 68 insertions(+), 6 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf index 02b4d98a..a150acf9 100644 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf @@ -14,7 +14,7 @@ ENROOT_DATA_PATH /tmp/enroot/data/user-$(id -u) #ENROOT_ZSTD_OPTIONS -1 # Options passed to mksquashfs to produce container images. -ENROOT_SQUASH_OPTIONS -noI -noD -noF -noX -no-duplicates +ENROOT_SQUASH_OPTIONS -comp lzo -noI -noD -noF -noX -no-duplicates # Make the container root filesystem writable by default. ENROOT_ROOTFS_WRITABLE yes diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh index 70da80a4..cc792d82 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh @@ -8,8 +8,8 @@ echo " ################################### " -apt-get -y update -apt-get -y install \ +apt-get -y -o DPkg::Lock::Timeout=120 update +apt-get -y -o DPkg::Lock::Timeout=120 install \ ca-certificates \ curl \ gnupg \ @@ -19,8 +19,8 @@ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/ echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null -apt-get -y update -apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +apt-get -y -o DPkg::Lock::Timeout=120 update +apt-get -y -o DPkg::Lock::Timeout=120 install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin chgrp docker $(which docker) chmod g+s $(which docker) systemctl enable docker.service @@ -31,7 +31,27 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list -sudo apt-get install -y nvidia-container-toolkit +sudo apt-get install -y -o DPkg::Lock::Timeout=120 nvidia-container-toolkit # add user to docker group sudo usermod -aG docker ubuntu + + +# Opportunistically use /opt/dlami/nvme if present. Let's be extra careful in the probe. +# See: https://github.com/aws-samples/awsome-distributed-training/issues/127 +# +# Docker workdir doesn't like Lustre. Tried with storage driver overlay2, fuse-overlayfs, & vfs. +if [[ $(mount | grep /opt/dlami/nvme) ]]; then + cat <> /etc/docker/daemon.json +{ + "data-root": "/opt/dlami/nvme/docker/data-root" +} +EOL + + sed -i \ + 's|^\[Service\]$|[Service]\nEnvironment="DOCKER_TMPDIR=/opt/dlami/nvme/docker/tmp"|' \ + /usr/lib/systemd/system/docker.service +fi + +systemctl daemon-reload +systemctl restart docker diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh index 836ada4a..2a2696af 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh @@ -3,6 +3,7 @@ set -e BIN_DIR=$(dirname $(readlink -e ${BASH_SOURCE[0]})) + ################################################################################ # Install enroot & pyxis ################################################################################ @@ -13,6 +14,15 @@ if [[ -f /opt/slurm/etc/cgroup.conf ]]; then || echo "ConstrainDevices=yes" >> /opt/slurm/etc/cgroup.conf fi +apt-get -y -o DPkg::Lock::Timeout=120 install squashfs-tools parallel libnvidia-container-tools + +## These are needed for `enroot start xxx.sqsh`, but on SMHP, `enroot start xxx.sqsh` hangs, hence +## not needed. +## +## The hang behavior may be the same as https://github.com/NVIDIA/enroot/issues/130 and the solution +## is to `enroot create xxx.sqsh ; enroot start xxx ; enroot remove xxx`. +#apt-get -y -o DPkg::Lock::Timeout=120 install fuse-overlayfs squashfuse + SLURM_INSTALL_DIR='/opt/slurm' PYXIS_TMP_DIR='/tmp/pyxis' @@ -45,3 +55,35 @@ ln -fs /usr/local/share/pyxis/pyxis.conf $SLURM_INSTALL_DIR/etc/plugstack.conf.d mkdir -p /run/pyxis/ /tmp/enroot/data /opt/enroot/ chmod 777 -R /tmp/enroot /opt/enroot ################################################################################ + + +# Opportunistically use /opt/dlami/nvme if present. Let's be extra careful in the probe. +# +# Note: ENROOT_TEMP_PATH on Lustre throws "Unrecognised xattr prefix lustre.lov". +# See: https://github.com/aws-samples/awsome-distributed-training/issues/127 +if [[ $(mount | grep /opt/dlami/nvme) ]]; then + sed -i \ + -e 's|^\(ENROOT_RUNTIME_PATH *\).*$|\1/opt/dlami/nvme/tmp/enroot/user-$(id -u)|' \ + -e 's|^\(ENROOT_CACHE_PATH *\).*$|\1/opt/dlami/nvme/enroot|' \ + -e 's|^\(ENROOT_DATA_PATH *\).*$|\1/opt/dlami/nvme/tmp/enroot/data/user-$(id -u)|' \ + -e 's|^#\(ENROOT_TEMP_PATH *\).*$|\1/opt/dlami/nvme/tmp|' \ + /etc/enroot/enroot.conf + + mkdir -p /opt/dlami/nvme/tmp/enroot/ + chmod 1777 /opt/dlami/nvme/tmp + chmod 1777 /opt/dlami/nvme/tmp/enroot/ + + #mkdir -p /opt/dlami/nvme/tmp/enroot/data/ + #chmod 1777 /opt/dlami/nvme/tmp/enroot/data/ + + # mkdir -p /opt/dlami/nvme/enroot + # chmod 1777 /opt/dlami/nvme/enroot + +fi + +# Use /fsx for enroot cache, if available. Let's be extra careful in the probe. +if [[ $(mount | grep /fsx) ]]; then + sed -i -e 's|^\(ENROOT_CACHE_PATH *\).*$|\1/fsx/enroot|' /etc/enroot/enroot.conf + mkdir -p /fsx/enroot + chmod 1777 /fsx/enroot +fi From 54f2e96722455b4ec7a312b7ee568a03f69eb3bb Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 6 Feb 2024 11:40:26 -0800 Subject: [PATCH 319/648] Closing Soon Bot Signed-off-by: Sean Smith --- .github/workflows/closing-soon.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/closing-soon.yml diff --git a/.github/workflows/closing-soon.yml b/.github/workflows/closing-soon.yml new file mode 100644 index 00000000..66742a98 --- /dev/null +++ b/.github/workflows/closing-soon.yml @@ -0,0 +1,22 @@ +name: Close inactive issues +on: + schedule: + - cron: "30 1 * * *" + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v5 + with: + days-before-issue-stale: 30 + days-before-issue-close: 14 + stale-issue-label: "stale" + stale-issue-message: "This issue is stale because it has been open for 30 days with no activity." + close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." + days-before-pr-stale: -1 + days-before-pr-close: -1 + repo-token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 0b60fa3da773087130382f9d787d8ed2cc8cc91b Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Tue, 6 Feb 2024 16:05:00 -0800 Subject: [PATCH 320/648] Updated efa-cheatsheet with PR comments Signed-off-by: Ankur Srivastava --- 1.architectures/efa-cheatsheet.md | 56 +++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/1.architectures/efa-cheatsheet.md b/1.architectures/efa-cheatsheet.md index a031d7aa..75947a7a 100644 --- a/1.architectures/efa-cheatsheet.md +++ b/1.architectures/efa-cheatsheet.md @@ -29,6 +29,62 @@ Use cuda>=12.0, nccl>=2.18.0 (recommend at least 2.18.5), aws-ofi-nccl>=1.7.2 (r The dcgm command to validate the NVLinks is `sudo dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=18` where the 18 is H100 specific. `nvidia-smi nvlink -s` is the command to get the status for all NVLinks for each of the GPUs. for H100 there are 18 +The table below shows number of NVLinks for `p4de.24xlarge` and `p5.48xlarge` instances: + +| Instance | GPU | # NVLinks | Generation | +|:-----------:|:----------:|:---------:| :---------:| +|p4de.24xlarge| A100 80GB | 12 | 3rd | +| p5.48xlarge | H100 | 18 | 4th | + +`nvidia-smi nvlink -s` is the command to get the status for all NVLinks for each of the GPUs. Below we see this data for GPU 0 of a `p4de.24xlarge` instance + +```bash +ubuntu@ip-172-31-35-99:~$ nvidia-smi nvlink -s +GPU 0: NVIDIA A100-SXM4-80GB (UUID: GPU-370ec676-e407-3115-836a-8ebcb3c4f62a) + Link 0: 25 GB/s + Link 1: 25 GB/s + Link 2: 25 GB/s + Link 3: 25 GB/s + Link 4: 25 GB/s + Link 5: 25 GB/s + Link 6: 25 GB/s + Link 7: 25 GB/s + Link 8: 25 GB/s + Link 9: 25 GB/s + Link 10: 25 GB/s + Link 11: 25 GB/s +``` +The dcgm command to validate the NVLinks is `sudo dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=<# NVLinks>`. For `p4de.24xlarge` instance, this diagnostic looks like: + +```bash +ubuntu@ip-172-31-35-99:~$ dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=12 +Successfully ran diagnostic for group. ++---------------------------+------------------------------------------------+ +| Diagnostic | Result | ++===========================+================================================+ +|----- Metadata ----------+------------------------------------------------| +| DCGM Version | 3.3.3 | +| Driver Version Detected | 535.104.12 | +| GPU Device IDs Detected | 20b2,20b2,20b2,20b2,20b2,20b2,20b2,20b2 | +|----- Deployment --------+------------------------------------------------| +| Denylist | Pass | +| NVML Library | Pass | +| CUDA Main Library | Pass | +| Permissions and OS Blocks | Pass | +| Persistence Mode | Pass | +| Environment Variables | Pass | +| Page Retirement/Row Remap | Pass | +| Graphics Processes | Pass | +| Inforom | Pass | ++----- Integration -------+------------------------------------------------+ +| PCIe | Pass - All | ++----- Hardware ----------+------------------------------------------------+ +| GPU Memory | Pass - All | ++----- Stress ------------+------------------------------------------------+ ++---------------------------+------------------------------------------------+ +``` + + ## 3. Sample Presets ### 3.1. libfabric>=1.18.0 and aws-ofi-nccl>=1.7.0 From f888588922a78ffe4d7eaab62b00679b4207080c Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Tue, 6 Feb 2024 16:08:06 -0800 Subject: [PATCH 321/648] Updated efa-cheatsheet with PR comments Signed-off-by: Ankur Srivastava --- 1.architectures/efa-cheatsheet.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/1.architectures/efa-cheatsheet.md b/1.architectures/efa-cheatsheet.md index 75947a7a..a3bb14f7 100644 --- a/1.architectures/efa-cheatsheet.md +++ b/1.architectures/efa-cheatsheet.md @@ -26,9 +26,6 @@ versions of your libfabric. Use cuda>=12.0, nccl>=2.18.0 (recommend at least 2.18.5), aws-ofi-nccl>=1.7.2 (recommend at least 1.7.3). -The dcgm command to validate the NVLinks is `sudo dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=18` where the 18 is H100 specific. -`nvidia-smi nvlink -s` is the command to get the status for all NVLinks for each of the GPUs. for H100 there are 18 - The table below shows number of NVLinks for `p4de.24xlarge` and `p5.48xlarge` instances: | Instance | GPU | # NVLinks | Generation | @@ -54,7 +51,7 @@ GPU 0: NVIDIA A100-SXM4-80GB (UUID: GPU-370ec676-e407-3115-836a-8ebcb3c4f62a) Link 10: 25 GB/s Link 11: 25 GB/s ``` -The dcgm command to validate the NVLinks is `sudo dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=<# NVLinks>`. For `p4de.24xlarge` instance, this diagnostic looks like: +The [dcgm](https://github.com/NVIDIA/DCGM?tab=readme-ov-file) command to validate the NVLinks is `sudo dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=<# NVLinks>`. For `p4de.24xlarge` instance, this diagnostic looks like: ```bash ubuntu@ip-172-31-35-99:~$ dcgmi diag -r 2 -p pcie.gpu_nvlinks_expected_up=12 From a70670a716cf72eced30e86f8965ded9c3f96c42 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 7 Feb 2024 11:44:11 +0800 Subject: [PATCH 322/648] Undo changes to yaml files; instead, document to README --- .../2.aws-parallelcluster/README.md | 41 +++++++++++++++++++ .../distributed-training-p4de-base.yaml | 3 -- ...ng-p4de_batch-inference-g5_custom_ami.yaml | 7 ---- .../distributed-training-p4de_custom_ami.yaml | 3 -- ...ted-training-p4de_postinstall_scripts.yaml | 3 -- 5 files changed, 41 insertions(+), 16 deletions(-) diff --git a/1.architectures/2.aws-parallelcluster/README.md b/1.architectures/2.aws-parallelcluster/README.md index 534afbe6..84966a13 100644 --- a/1.architectures/2.aws-parallelcluster/README.md +++ b/1.architectures/2.aws-parallelcluster/README.md @@ -186,3 +186,44 @@ You can chose to use a custom image or post-install scripts to install your appl A common issue we see customer face is a problem with the post install scripts or issue to access capacity due to a mis-configuration. This can manifest itself through a `HeadNodeWaitCondition` that'll cause the ParallelCluster to fail a cluster deployment. To solve that, you can look at the cluster logs in CloudWatch in the cluster log group, otherwise use the option `--rollback-on-failure false` to keep resources up upon failure for further troubleshooting. + +## 5. Appendix + +Parallel Cluster, as of this writing (v3.8.0), does not automatically set the correct number of sockets and cores-per-socket in Slurm partitions. For ML training on multiple GPUs, this should not have adverse impact on performance since NCCL does the topology detection. However, it might have impact on the training performance for single-GPU training or inference, or in the rare case to support Slurm affinity/binding feature. + +Using the example from [here](https://github.com/aws/aws-parallelcluster/issues/5797), a `p5.48xlarge` should have just two sockets, but Slurm shows the partition to have more than that. + +```bash +$ ssh p5-st-p5-1 /opt/slurm/sbin/slurmd -C +NodeName=p5-st-p5-1 CPUs=192 Boards=1 SocketsPerBoard=2 CoresPerSocket=48 ThreadsPerCore=2 RealMemory=2047961 UpTime=2-00:08:02 + +$ sinfo -o '%9P %4c %8z %8X %8Y %8Z' +PARTITION CPUS S:C:T SOCKETS CORES THREADS +p5* 192 192:1:1 192 1 1 +``` + +Should you want to correct the partition configuration, edit your cluster configuration file (e.g., the `distributed-training-*.yaml` file) and a new entry `Scheduling` / `SlurmQueues` / `ComputeResources` / `CustomSlurmSettings` similar to below. + +```yaml +Scheduling: + SlurmQueues: + - Name: compute-gpu + ... + ComputeResources: + - Name: distributed-ml + InstanceType: p5.48xlarge + ... + CustomSlurmSettings: + Sockets: 2 + CoresPerSocket: 48 +``` + +Each instance type has its own `Sockets` and `CoresPerSocket` values. Below are for instance types commonly used for distributed training. + +| Instance type | `Sockets` | `CoresPerSocket` | +| ------------- | --------- | ---------------- | +| p5.48xlarge | 2 | 48 | +| p4de.24xlarge | 2 | 24 | +| p4d.24xlarge | 2 | 24 | + +For other instance types, you'd need to run an instance to check the values. diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml index 4c6273a3..1bc9c6e5 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de-base.yaml @@ -63,9 +63,6 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - CustomSlurmSettings: - Sockets: 2 # p4, p5 - CoresPerSocket: 24 # p4=24, p5=48 SharedStorage: - MountDir: /fsx Name: fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml index 80eb13e5..6c511f84 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_batch-inference-g5_custom_ami.yaml @@ -67,9 +67,6 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - CustomSlurmSettings: - Sockets: 2 # p4, p5 - CoresPerSocket: 24 # p4=24, p5=48 - Name: inference-gpu CapacityType: ONDEMAND Networking: @@ -86,10 +83,6 @@ Scheduling: InstanceType: g5.12xlarge MinCount: 0 # if min = max then capacity is maintained and will MaxCount: 10 # not scale down - CustomSlurmSettings: - # Below are for g5.12xlarge. For another instance type, please update accordingly. - Sockets: 1 - CoresPerSocket: 24 SharedStorage: - MountDir: /fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml index 8eafbf36..bdded656 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_custom_ami.yaml @@ -64,9 +64,6 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - CustomSlurmSettings: - Sockets: 2 # p4, p5 - CoresPerSocket: 24 # p4=24, p5=48 SharedStorage: - MountDir: /fsx Name: fsx diff --git a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml index 7c163eda..ee184430 100644 --- a/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml +++ b/1.architectures/2.aws-parallelcluster/distributed-training-p4de_postinstall_scripts.yaml @@ -70,9 +70,6 @@ Scheduling: MaxCount: 4 # not scale down Efa: Enabled: true - CustomSlurmSettings: - Sockets: 2 # p4, p5 - CoresPerSocket: 24 # p4=24, p5=48 CustomActions: OnNodeConfigured: Sequence: From 5ead5abdc842230062969e7e5f4104aac3a174ba Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 8 Feb 2024 12:38:15 +0000 Subject: [PATCH 323/648] Megatron-LM Llama2 example Make the example work on as low as 2 nodes (16x GPUs each with 80GB memory) --- .../0.distributed-training.Dockerfile | 25 +-- .../2.distributed-training.sbatch | 19 +- .../1.megatron-lm/3.data-preproc-llama2.sh | 32 ++++ .../1.megatron-lm/4.pretrain-llama2.sh | 162 ++++++++++++++++++ 3.test_cases/1.megatron-lm/README.md | 41 ++++- .../conf.template/cluster/bcm.yaml | 2 +- 3.test_cases/2.nemo-launcher/haha-envvar.sh | 8 + 3.test_cases/2.nemo-launcher/llama2-70b2.sh | 89 ++++++++++ 3.test_cases/2.nemo-launcher/llama2-70b4.sh | 89 ++++++++++ 3.test_cases/2.nemo-launcher/llama2-70b8.sh | 89 ++++++++++ 10 files changed, 521 insertions(+), 35 deletions(-) create mode 100644 3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh create mode 100644 3.test_cases/1.megatron-lm/4.pretrain-llama2.sh create mode 100644 3.test_cases/2.nemo-launcher/haha-envvar.sh create mode 100755 3.test_cases/2.nemo-launcher/llama2-70b2.sh create mode 100755 3.test_cases/2.nemo-launcher/llama2-70b4.sh create mode 100755 3.test_cases/2.nemo-launcher/llama2-70b8.sh diff --git a/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile index 15086e5d..e033c123 100644 --- a/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile +++ b/3.test_cases/1.megatron-lm/0.distributed-training.Dockerfile @@ -1,10 +1,10 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 -FROM nvcr.io/nvidia/pytorch:23.09-py3 +FROM nvcr.io/nvidia/pytorch:24.01-py3 -ARG EFA_INSTALLER_VERSION=1.28.0 -ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws +ARG EFA_INSTALLER_VERSION=1.30.0 +ARG AWS_OFI_NCCL_VERSION=v1.7.4-aws ARG OPEN_MPI_PATH=/opt/amazon/openmpi ###################### @@ -16,14 +16,9 @@ RUN apt-get remove -y --allow-change-held-packages \ RUN rm -rf /opt/hpcx/ompi \ && rm -rf /usr/local/mpi \ - && rm -rf /usr/local/ucx \ + && rm -rf /opt/hpcx/nccl_rdma_sharp_plugin \ && ldconfig -###################### -# Add enviroment variable for processes to be able to call fork() -###################### -ENV RDMAV_FORK_SAFE=1 - RUN DEBIAN_FRONTEND=noninteractive apt install -y --allow-unauthenticated \ git \ gcc \ @@ -88,8 +83,8 @@ RUN rm -rf /var/lib/apt/lists/* RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf -RUN pip3 install awscli -RUN pip3 install pynvml +RUN pip3 install awscli +RUN pip3 install pynvml RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ && echo '#!/bin/bash' > $OPEN_MPI_PATH/bin/mpirun \ @@ -99,7 +94,7 @@ RUN mv $OPEN_MPI_PATH/bin/mpirun $OPEN_MPI_PATH/bin/mpirun.real \ ###################### # Transformers dependencies used in the model ###################### -RUN pip install transformers==4.21.0 +RUN pip install transformers==4.21.0 sentencepiece ##################### # Install megatron-lm @@ -108,9 +103,5 @@ RUN cd /workspace && git clone https://github.com/NVIDIA/Megatron-LM.git \ && cd Megatron-LM \ && python3 -m pip install nltk \ && python -m pip install . - -WORKDIR /workspace/Megatron-LM - - - +WORKDIR /workspace/Megatron-LM diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch index d9bc8d3b..fa3f8382 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch @@ -15,8 +15,8 @@ set -ex; ########################### # Parallelism decomposition variables -: "${TENSOR_PARALLEL:=8}" -: "${PIPELINE_PARALLEL:=4}" +: "${TENSOR_PARALLEL:=4}" +: "${PIPELINE_PARALLEL:=2}" # Model parameters, defaults to 39B model # Refer to page 8 of this paper on how to tune models parameters @@ -24,6 +24,7 @@ set -ex; : "${NUM_LAYERS:=36}" : "${HIDDEN_SIZE:=4096}" : "${NUM_ATTENTION_HEADS:=32}" + : "${SEQ_LENGTH:=2048}" : "${MAX_POSITION_EMBEDDINGS:=2048}" : "${MICRO_BATCH_SIZE:=1}" @@ -40,13 +41,6 @@ set -ex; ## Environment Variables ## ########################### -## Plenty of EFA level variables -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -# export NCCL_ALGO=Ring -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 # https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 # https://github.com/pytorch/pytorch/issues/68893 #export NCCL_SOCKET_IFNAME=ens @@ -101,9 +95,9 @@ srun -l "${ARGS[@]}" python -m torch.distributed.run "${TORCHRUN_ARGS[@]}" /work --log-interval 1 \ --eval-iters 40 \ --eval-interval 1000 \ - --data-path "$(pwd)/gpt2/my-gpt2_text_document" \ - --vocab-file "$(pwd)/gpt2/gpt2-vocab.json" \ - --merge-file "$(pwd)/gpt2/gpt2-merges.txt" \ + --data-path "${DATA_PATH}/gpt2/my-gpt2_text_document" \ + --vocab-file "${DATA_PATH}/gpt2/gpt2-vocab.json" \ + --merge-file "${DATA_PATH}/gpt2/gpt2-merges.txt" \ --split 98,2,0 \ --clip-grad 1.0 \ --weight-decay 0.1 \ @@ -112,4 +106,3 @@ srun -l "${ARGS[@]}" python -m torch.distributed.run "${TORCHRUN_ARGS[@]}" /work --init-method-std 0.006 \ --fp16 \ --recompute-activations - diff --git a/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh b/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh new file mode 100644 index 00000000..3e14f3d5 --- /dev/null +++ b/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +#SBATCH -N 1 # number of nodes we want +#SBATCH --exclusive # job has exclusive use of the resource, no sharing + +########################### +###### User Variables ##### +########################### + +: "${IMAGE:=$(pwd)/megatron-training.sqsh}" +: "${FSX_MOUNT:=/fsx:/fsx}" + +# default variables for Enroot +: "${DATA_PATH:=/fsx}" + +declare -a ARGS=( + --container-image $IMAGE + --container-mount-home + --container-mounts $FSX_MOUNT +) + +# runs in +srun -l "${ARGS[@]}" python3 /workspace/Megatron-LM/tools/preprocess_data.py \ + --input ${DATA_PATH}/llama2/oscar-1GB.jsonl \ + --output-prefix ${DATA_PATH}/llama2/my-llama2 \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${DATA_PATH}/llama2/tokenizer.model \ + --append-eod \ + --workers 64 diff --git a/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh b/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh new file mode 100644 index 00000000..18ad72fb --- /dev/null +++ b/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +#SBATCH --nodes=2 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs +#SBATCH --job-name=megatron_llama2 # name of your job +#SBATCH --exclusive # job has exclusive use of the resource, no sharing +#SBATCH --wait-all-nodes=1 + +set -exuo pipefail + + +################################################## +###### Model architectures (example presets) ##### +################################################## +# Feel free to choose one of the sample presents, or completely define your own +# custom model size. + +## llama2-7b-hf +#declare -a MEGATRON_ARGS=( +# --num-layers 32 +# --hidden-size 4096 +# --num-attention-heads 32 +# +# --tensor-model-parallel-size 1 +# --pipeline-model-parallel-size 1 +#) + +## llama2-13b-hf +#declare -a MEGATRON_ARGS=( +# --num-layers 40 +# --hidden-size 5120 +# --num-attention-heads 40 +# +# --tensor-model-parallel-size 2 +# --pipeline-model-parallel-size 1 +# --sequence-parallel +# +# --use-distributed-optimizer +# --overlap-grad-reduce +# --overlap-param-gather +#) + +# llama2-70b-hf. +declare -a MEGATRON_ARGS=( + --num-layers 80 + --hidden-size 8192 + --num-attention-heads 64 + --group-query-attention + --num-query-groups 8 + + --tensor-model-parallel-size 4 + --pipeline-model-parallel-size 4 + --sequence-parallel + + --use-distributed-optimizer + --overlap-grad-reduce + --overlap-param-gather +) + +# Required for Llama2-style architecture. Do not comment or remove. +MEGATRON_ARGS+=( + --untie-embeddings-and-output-weights + --position-embedding-type rope + --no-position-embedding + --normalization RMSNorm + --swiglu + --no-masked-softmax-fusion +) + +# Additional flags to make it possible to test with as few nodes as possible +MEGATRON_ARGS+=( + --no-rope-fusion + --use-flash-attn + --transformer-impl transformer_engine +) + + +########################### +###### User Variables ##### +########################### + +: "${SEQ_LENGTH:=4096}" +: "${MAX_POSITION_EMBEDDINGS:=4096}" +: "${MICRO_BATCH_SIZE:=1}" +: "${GLOBAL_BATCH_SIZE:=2048}" + +# default variables for Enroot +: "${DATA_PATH:=/fsx}" + +# default variables for Enroot +: "${IMAGE:=$(pwd)/megatron-training.sqsh}" +: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" + + +########################### +## Environment Variables ## +########################### + +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +#export NCCL_SOCKET_IFNAME=ens +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_NVLS_ENABLE=0 +#export NCCL_DEBUG=INFO +export NCCL_AVOID_RECORD_STREAMS=1 # torch<2.2 +export TORCH_NCCL_AVOID_RECORD_STREAMS=1 # torch>=2.2 + +# async runtime error ... +export CUDA_DEVICE_MAX_CONNECTIONS=1 + + +######################### +## Command and Options ## +######################### + +declare -a ARGS=( + --container-image $IMAGE + --container-mounts $FSX_MOUNT +) + +declare -a TORCHRUN_ARGS=( + # change this to match the number of gpus per node: + --nproc_per_node=8 + --nnodes=$SLURM_JOB_NUM_NODES + --rdzv_id=$SLURM_JOB_ID + --rdzv_backend=c10d + --rdzv_endpoint=$(hostname) +) + +MEGATRON_ARGS+=( + --seq-length $SEQ_LENGTH + --max-position-embeddings $MAX_POSITION_EMBEDDINGS + --micro-batch-size $MICRO_BATCH_SIZE + --global-batch-size $GLOBAL_BATCH_SIZE + + # Example how to control training duration using steps rather than number of samples. + --train-iters 5 + + # Example how to disable all validations, hence only training steps performed. + --split 100,0,0 +) + +srun -l "${ARGS[@]}" python -m torch.distributed.run "${TORCHRUN_ARGS[@]}" /workspace/Megatron-LM/pretrain_gpt.py \ + "${MEGATRON_ARGS[@]}" \ + --use-mcore-models \ + --log-throughput \ + --lr 6.0e-5 \ + --min-lr 6.0e-6 \ + --lr-decay-style cosine \ + --log-interval 1 \ + --eval-iters 0 \ + --data-path ${DATA_PATH}/llama2/my-llama2_text_document \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${DATA_PATH}/llama2/tokenizer.model \ + --clip-grad 1.0 \ + --weight-decay 0.1 \ + --adam-beta1 0.9 \ + --adam-beta2 0.95 \ + --init-method-std 0.006 \ + --fp16 diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 32628eb2..1137d357 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -14,7 +14,7 @@ To run a test case you will go through a series of steps described below: We describe the steps below for Slurm users. EKS users may follow the sequence but details will vary. -## 0. Preparation +## 1. Preparation This guide assumes that you have the following: @@ -32,7 +32,7 @@ export DATA_PATH=/fsx # FSx for Lustre shared file-system Make sure that your current directory is under a shared filesystem such as `/fsx/` or the home directory when using [Parallel Cluster](../../1.architectures/aws-parallelcluster). -## 1. Data Preprocessing +## 2. Data Preprocessing Before running training jobs you need to retrieve input data and preprocess it. This section of the guide you will retrieve a container then you convert it into a Squash file via [Enroot](https://github.com/NVIDIA/enroot), you will then retrieve input data ans tokenize it using the GPT2 vocabulary. @@ -113,7 +113,7 @@ Below are the steps you need to follow: Voilà! You have executed the preprocessing job. You will go through the steps to run your training job. -## 2. Distributed training +## 3. Distributed training Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. @@ -131,7 +131,7 @@ Now that the data is preprocessed, we will pretrain a GPT3 model MegatronLM. 1: iteration 27/73242187 | consumed samples: 54 | elapsed time per iteration (ms): 88.4 | learning rate: 1.769E-08 | global batch size: 2 | lm loss: 1.087129E+01 | loss scale: 4294967296.0 | grad norm: 0.000 | number of skipped iterations: 0 | number of nan iterations: 0 | ``` -## 3. What's next? +## 4. What's next? The example is based on the GPT3 example from MegatronLM's [repository](https://github.com/NVIDIA/Megatron-LM/blob/main/examples/pretrain_gpt.sh). You can modify `NUM_ATTENTION_HEADS`, `NUM_LAYERS`, and `HIDDEN_SIZE` based on the Table 1 (Page 8) of the document [Efficient Large-Scale Language Model Training on GPU Clusters Using Megatron-LM](https://arxiv.org/abs/2104.04473) to change the model size. You can also run the following commands to launch training for different model sizes before submitting a job as follows: `NUM_LAYERS=64 HIDDEN_SIZE=8192 NUM_ATTENTION_HEADS=48 sbatch 3.distributed-training.sbatch` @@ -145,3 +145,36 @@ The example is based on the GPT3 example from MegatronLM's [repository](https:// | 76.1B | `NUM_ATTENTION_HEADS=80 HIDDEN_SIZE=10240 NUM_LAYERS=60` | | 145.6B | `NUM_ATTENTION_HEADS=96 HIDDEN_SIZE=12288 NUM_LAYERS=80` | | 310.1B | `NUM_ATTENTION_HEADS=128 HIDDEN_SIZE=16384 NUM_LAYERS=96` | + +## 5. Appendix: Llama2 + +To pretrain Llama2, you must visit to download the tokenizers files (i.e., `tokenizer.json` and `tokenizer.model`). Registration required. Alternatively, you may train your own tokenizer but this is beyond the scope for this document. Either way, once you have the tokenizer files, you need to upload them to the FSx Lustre that your Slurm cluster mounts. + +The remaining steps are similar to the GPT3 example. For more information, please refer to the official Megatron-LM documentation on Llama2 [here](https://github.com/NVIDIA/Megatron-LM/blob/main/docs/llama2.md). + +### 5.1. Download and prepocess data + +```bash +mkdir -p /fsx/llama2 +cd /fsx/llama2/ +# Then, place `tokenizer.json` and `tokenizer.model` to this directory. + +# Download sample dataset +wget https://huggingface.co/bigscience/misc-test-data/resolve/main/stas/oscar-1GB.jsonl.xz +xz -d oscar-1GB.jsonl.xz + +sbatch 3.data-preproc-llama2.sbatch +``` + +### 5.2. Run pretraining job + +Edit `4.pre-train-llama2.sh` to choose the model size you want to train. Do this by commenting and uncommenting the related stanzas. Feel free to experiment with the hyperparameters such as parallelism, batches, etc. (for more details, please refer to the [Megatron-LM project](https://github.com/NVIDIA/Megatron-LM/) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). + + +, or any other flag. Please refer to Megatron-LM for more details. + +```bash +sbatch 2.distributed-training.sbatch +``` + +Tips: the Llama2 example prints the estimated FLOPS/GPU (enabled via `--log-throughput` in the pretrain `.sbatch` file). You might want to look at [PR-682](https://github.com/NVIDIA/Megatron-LM/pull/682) and decide whether to patch your Megatron-LM to adjust the way FLOPS/GPU is calculated. diff --git a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml index bb0f5703..388d28ff 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml @@ -1,4 +1,4 @@ -partition: null +partition: us-east-1e account: null exclusive: True gpus_per_task: null diff --git a/3.test_cases/2.nemo-launcher/haha-envvar.sh b/3.test_cases/2.nemo-launcher/haha-envvar.sh new file mode 100644 index 00000000..11ec68f4 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/haha-envvar.sh @@ -0,0 +1,8 @@ +export NEMO_VERSION=23.11 +export REPO=aws-nemo-megatron +export TAG=$NEMO_VERSION +export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem +export TEST_CASE_PATH=/fsx/awsome-distributed-training/3.test_cases/2.nemo-launcher # where you copy the test case or set to your test case path +export ENROOT_IMAGE=/fsx/${REPO}_${TAG}.sqsh +export BMK_MODE=1 +env | egrep 'NEMO_VERSION|REPO|TAG|TARGET_PATH|TEST_CASE_PATH|ENROOT_IMAGE|BMK_MODE' diff --git a/3.test_cases/2.nemo-launcher/llama2-70b2.sh b/3.test_cases/2.nemo-launcher/llama2-70b2.sh new file mode 100755 index 00000000..daf4eec0 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/llama2-70b2.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train llama2-7b on 16 nodes for 5 steps +export MODEL=llama +export MODEL_SIZE=llama2_70b +export NUM_NODES=2 +export TIME_LIMIT="7-00:00:00" +export MAX_STEPS=8 + +declare -a MODEL_ARGS=( + training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + training.model.gc_interval=0 + + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.fp8=True + #training.model.fp8_hybrid=True +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Disable wandb_logger + training.exp_manager.create_wandb_logger=False + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.run.time_limit=$TIME_LIMIT \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/llama2-70b4.sh b/3.test_cases/2.nemo-launcher/llama2-70b4.sh new file mode 100755 index 00000000..8b50dd55 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/llama2-70b4.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train llama2-7b on 16 nodes for 5 steps +export MODEL=llama +export MODEL_SIZE=llama2_70b +export NUM_NODES=4 +export TIME_LIMIT="7-00:00:00" +export MAX_STEPS=3600 + +declare -a MODEL_ARGS=( + training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + training.model.gc_interval=0 + + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.fp8=True + #training.model.fp8_hybrid=True +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Disable wandb_logger + training.exp_manager.create_wandb_logger=False + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.run.time_limit=$TIME_LIMIT \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/llama2-70b8.sh b/3.test_cases/2.nemo-launcher/llama2-70b8.sh new file mode 100755 index 00000000..65384c92 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/llama2-70b8.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train llama2-7b on 16 nodes for 5 steps +export MODEL=llama +export MODEL_SIZE=llama2_70b +export NUM_NODES=8 +export TIME_LIMIT="7-00:00:00" +export MAX_STEPS=3600 + +declare -a MODEL_ARGS=( + training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + training.model.gc_interval=0 + + ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) + #training.model.fp8=True + #training.model.fp8_hybrid=True +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Disable wandb_logger + training.exp_manager.create_wandb_logger=False + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${BMK_MODE:=0}" +if [[ ${BMK_MODE} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=( + base_results_dir=${CONT_RESULT_DIR} + training.run.dependency=null + ) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.run.time_limit=$TIME_LIMIT \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" From 1ac78cfae8556af7d63567a28c0db19cdddfd9c7 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 8 Feb 2024 21:23:05 +0800 Subject: [PATCH 324/648] Fail fast when enroot container not found --- 3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh | 3 +++ 3.test_cases/1.megatron-lm/4.pretrain-llama2.sh | 1 + 2 files changed, 4 insertions(+) diff --git a/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh b/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh index 3e14f3d5..91e60351 100644 --- a/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh +++ b/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh @@ -6,6 +6,8 @@ #SBATCH -N 1 # number of nodes we want #SBATCH --exclusive # job has exclusive use of the resource, no sharing +set -exuo pipefail + ########################### ###### User Variables ##### ########################### @@ -22,6 +24,7 @@ declare -a ARGS=( --container-mounts $FSX_MOUNT ) +[[ -f ${IMAGE} ]] || { echo "Could not find enroot image: $IMAGE" ; exit -1 ; } # runs in srun -l "${ARGS[@]}" python3 /workspace/Megatron-LM/tools/preprocess_data.py \ --input ${DATA_PATH}/llama2/oscar-1GB.jsonl \ diff --git a/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh b/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh index 18ad72fb..dfa6eea2 100644 --- a/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh +++ b/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh @@ -142,6 +142,7 @@ MEGATRON_ARGS+=( --split 100,0,0 ) +[[ -f ${IMAGE} ]] || { echo "Could not find enroot image: $IMAGE" ; exit -1 ; } srun -l "${ARGS[@]}" python -m torch.distributed.run "${TORCHRUN_ARGS[@]}" /workspace/Megatron-LM/pretrain_gpt.py \ "${MEGATRON_ARGS[@]}" \ --use-mcore-models \ From 3c39b590a22fa6bf17aa3be388dcaa51ca5466df Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 8 Feb 2024 16:04:20 -0800 Subject: [PATCH 325/648] Updated Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 147 +++++++++++++++++- .../multi-node/p5-model-scaling-eks.png | Bin 0 -> 57588 bytes ...5-model-scaling-stable-diff-throughput.png | Bin 0 -> 47851 bytes 3 files changed, 146 insertions(+), 1 deletion(-) create mode 100644 3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-eks.png create mode 100644 3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-stable-diff-throughput.png diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index e7b31e5b..641e5dd8 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -184,7 +184,7 @@ More details on this can be found here: https://pytorch.org/blog/accelerated-dif ## 2. Multi Node Tests -### 2.1 Multi-Node Training +### 2.1 Multi-Node Training with Slurm For the multi-node training we've created a [Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/multi-node/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile), and Slurm submit script to submit the training job. To get started please follow the guide [AWS ParallelCluster Distributed Training](../../1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: @@ -238,3 +238,148 @@ rain Epoch 0: 100%|████████████████ ``` +### 2.2 Multi-Node Training with Amazon EKS + +Next we will show how to train stable diffusion with Mosaic ML's [composer](https://github.com/mosaicml/composer/tree/dev) on [Amazon EKS](https://aws.amazon.com/eks/). To start we have created an EKS cluster following the steps [here](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/4.amazon-eks). You can follow these steps to add a nodegroup of `p5.48xlarge` instances. First export these environment variables. + +```bash +export AWS_REGION=us-west-2 +export ACCOUNT=$(aws sts get-caller-identity --query Account --output text) + +## Docker Image +export REGISTRY=${ACCOUNT}.dkr.ecr.${AWS_REGION}.amazonaws.com/ +export DOCKER_IMAGE_NAME=mosaicml-stable-diffusion +export MOSAICML_VERSION=0.15.0 +export TAG=$MOSAICML_VERSION +export PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:23.08-py3 + +## Job parameters +export NUM_NODES=64 +export NUM_GPUS_PER_NODE=8 +export WORLD_SIZE=$((NUM_NODES*NUM_GPUS_PER_NODE)) + +``` + +#### 2.2.1 + +First we need to run the `do-eks` container which has all the necessary kubectl tools installed. Just run: + +```bash +git clone https://github.com/aws-samples/aws-do-eks.git +cd ./aws-do-eks + +# Build the do-eks Docker image +./build.sh + +# Run container +./run.sh + +# Execute in the container +./exec.sh + +cd /eks/impl/aws + +# Next we will edit the nodegroup.conf config file +``` + +#### 2.2.2 + +To add a managed P5 nodegroup, we will follow the steps listed in the [aws-do-eks](https://github.com/aws-samples/aws-do-eks/tree/main/Container-Root/eks/impl/aws) project. + +1. First we need to create a P5 launch template and to do that we need to fill out the nodegroup.conf config + +```bash +CLUSTER= +REGION=${AWS_REGION} +LAUNCH_TEMPLATE_NAME=lt-p5-odcr-eks-1-27 +LAUNCH_TEMPLATE_ID= +LAUNCH_TEMPLATE_VERSION=1 +NODEGROUP_NAME=p5-48xlarge +NODE_ROLE_ARN= +SUBNETS= +MIN_SIZE=0 +DESIRED_SIZE=64 +MAX_SIZE=64 + +EFA_VERSION=1.29.1 +AMI= +SSH_KEY_NAME= +CAPACITY_RESERVATION_ID= +PLACEMENT_GROUP_NAME= + +``` + +You can get the EKS optimized id for EKS version 1.27 and Amazon Linux 2 as: +```bash +aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.27/amazon-linux-2-gpu/recommended/image_id --region $AWS_REGION --query 'Parameter.Value' --output text + +``` + +Next you can follow the steps given [here](https://github.com/aws-samples/aws-do-eks/tree/main/Container-Root/eks/impl/aws) to create a P5 nodegroup. + + +#### 2.2.3 + +Once the nodes are created, you can use `nv` to list the available nodes. `nv` is an alias to `eks-node-viewer` . You can see other aliases by typing `alias`. Below is a sample output with a cluster with 2 `c5.4xlarge` nodes. The status of `Ready` means that the node has joined the cluster. If a node is in a `Not Ready` state, you might need to manually terminate the node from EC2 console and EKS will restart it and the node will join the cluster again. + +```bash +2 nodes (700m/31780m) 2.2% cpu █░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ $1.360/hour | $992.800/month +13 pods (0 pending 13 running 13 bound) + +ip-192-168-70-41.us-west-2.compute.internal cpu █░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 2% (7 pods) c5.4xlarge/$0.6800 On-Demand - Ready +ip-192-168-120-65.us-west-2.compute.internal cpu █░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 2% (6 pods) c5.4xlarge/$0.6800 On-Demand - Ready +• +←/→ page • q: quit + +``` + +You can see additional details about the node as below. + +```bash +kubectl describe node +``` +The following is the `Allocatable` section of a P5 node which shows that there are 8 GPUs available and 32 EFA devices available as well. + +```bash + +Allocatable: + cpu: 191450m + ephemeral-storage: 27356033509547 + hugepages-1Gi: 0 + hugepages-2Mi: 42242Mi + memory: 2052371068Ki + nvidia.com/gpu: 8 + pods: 100 + vpc.amazonaws.com/efa: 32 +``` + +#### 2.2.4 + +Next we need to build the Docker Image and push it to [ECR](https://aws.amazon.com/ecr/): + +```bash +docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} --build-arg PYTORCH_IMAGE=${PYTORCH_IMAGE} -t ${REGISTRY}${DOCKER_IMAGE_NAME}${TAG} -f 1.Dockerfile . +``` + +Before pushing the image you might need to login to ECR as: + +```bash +aws ecr get-login-password | docker login --username AWS --password-stdin $REGISTRY +``` + +And the push the image as: +```bash +# Create registry if it does not exist +REGISTRY_COUNT=$(aws ecr describe-repositories | grep ${DOCKER_IMAGE_NAME} | wc -l) +if [ "$REGISTRY_COUNT" == "0" ]; then + echo "" + echo "Creating repository ${DOCKER_IMAGE_NAME} ..." + aws ecr create-repository --repository-name ${DOCKER_IMAGE_NAME} +fi + +echo "Pushing image ${REGISTRY}${DOCKER_IMAGE_NAME}:${TAG}" +docker image push ${REGISTRY}${DOCKER_IMAGE_NAME}:${TAG} + +``` + + diff --git a/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-eks.png b/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-eks.png new file mode 100644 index 0000000000000000000000000000000000000000..cb083f3d948f2002942ce901c7749f147e246428 GIT binary patch literal 57588 zcmbrmcR1DmA3sb>inNhcGE%aWt*MkKRLb7T9@(QwMHH3n3Mr9&>`^uuk?l~C?bv(W zk9VK%@4o-Mulu^YuFv)Dt80ad6VNGp4$v?_gtL?Ot#ufEfS& zTMiC3_ELg^mjCw)1gz~$1@}IXsKtlS+MK^(PesL`Py9#yQ9jO$it3lX;+d1`&fz0u zC+FQw9c2?^ALi<5xlgQX2va|KVIY6=$GayE9p;=*ySryk@>09>`^_KspBHS9=4|*p z-F5L0UyNny)M|>6V#S9O;#?1x?6tF7W$jJ+LTdDn*F}or8pBJouUp7)d z{@;D<71Y@%NRbxjK@4?=|N9 zf&&K*yiwvgb?OwwN{`d!e8XDGkLYv#4u=ALX2!ZXf-291h#1>R?cR}n5!cvx@LH0N z<Q%%UXl~J-{=_uO`-iREb$Nb5DT;S6&b;NzC;y=3trdQ9)vK#Z*2~MwLLo|7Sy|&t zV})_?_A85b%KX_bF3pbeU3tHOot-@^H&-poMt`U|vAXEq`h~%`D2Y2mKiPD1A6Jap zR1Q*|>8(9;_N=gJEvxGsCBiB{+rEG2jvcna?{~iFTSmQ+e_H`E&XmzR_BRo)HJn`P=kX?D%jgOre8h86xK) zB_}5*$=0r&;yQC%%5kJ(X1K-H)>ePAw^C-d)sSz=+271=&uXEQ zYonRnsv`&A%fF{EU>#*9D-QDCOy7LBuO@7&WS7e26oa#Py@cB=|Jw2tgQ&^(ti&GA zrO`~{c5$A4YBTYtbY!zOrO0h8|6H(uwnbf%c5d0?%<#fUT3wdI;7(sgL6cua_m&s> zqs-V1D+9TA3+l+!jB;Om?QlLpJICR7{#;jea%5CgHyfMR!sK5EN@vKMH%z$AA>x+% z0%Ru7Kb4|kV2`;JCVoaiVO@Q;y{YT;z)8H)*53XgKfhmuv{R}@o5;1qn|IFHK7QOu zi~T?Mwl+d~sz1t$_?Ox!d6g3+9=@uYBUkPVSF4 zGj2akRd}{A}Xf40A^8>`XVm$-mXzuiPT0{#FMczIN_L`mG)0 zoI3}_#rHZnIhlNabF$o@O|6MV(k|wxWR#HpeR78wKC<-jj)Pb{)f`=m$B!ifDJL1T zX)>(Hp?B|6?cB9%p(jATAz`kZ!wVbe#EBDBxZWJ!9Z%0IOSw)T;_#T>K~*QWWD(!H zeC^AZFCkKt`gfniUYI?i_JIyZD??T{)ktxhLQsBRU6e}87wsp{ zo|QH>24Oe1U#i`OUp1UmNcB-IPTAZ3&~5=OiBj`JoPiPlc2Rgl#9-5BMjQLhxw3_; zi$|#P-R8>L+OS;y0RjEQvAU6Fg7+{TKYo1Ox^=g@3uFt|R;Bm)au*wh%Xx&pJMDwx z;OFB*Q|ikYXWw7TuUjCaCd@_4z|ddJAm97jhl5nAnxG*pb!Vu=k6BcetTucLm)Ns1 z*By8A!KU82D30sbuWw-#Q2JZ-%BAQf>u^m^9u`Cvh9q$>6pKD<-4UhXb;x> ziFD_Ti=lmobEOcc9I<)x<{~mV;_ci0#ES+S<3wdza9dsezPkSL^1F4Bk&(NP-h7FT zDC{0qC7)s*qx~a1qIzbJU*$$ZV@b)H^?b>oIYmCmFrbV z)l>GY*h0Lgx3>f<9{IP2`uiK<2QE{6SCaKk$}RNn@?jONOccGUo9p-t=a%>`zeYzd z6}T_lYKWl?;#H~rIYCK3%5~hhYB#EKMU=ce>W6yb%~w~V6>LY_GdgleS3h6dv2Wi6 z8yg`V9UbD^Cf+=z`S<;5INYpgMH@OQ*K4u!u+?cA6h*McjPh+OG!;lb!Z;DV{5f9a53`b6?Vp z(o-u(J-fJ9qO{XY=&4YN6pvNv-i#Jboow?Q9-=*R{Y&qUa~#}4LfdgGYa9h#9vtAD zdx$G#zDv!ti$_U_WZ7ZjR$y8esXnu|G$!9MJKB-Sf`3Ss)y=s5ifqp+?fCtpM#}rd z$owcAL*`&XU1ibQ4D*(Ko}Qj&6nS(p!=E3okowJT6u4(&HNQF-nspcC?_g#&b6RVN zy~uJGixNs14tm%W!bq#gN7JuAIb%vL0%rCGW? z=CngoOs5yICO@hCP_#d^JC3xb>>eG-XwkVS>i_N0Ru;E8$|X^M?OaD@)Z3NZG~}Z8tYJmZ`p) z+@6-BE^eVh`lmu?_6q7)e4hI*S{LKum$1z7{c&{r#mTwMn7Y(YYH|An1Ozte&Cky> zp|f6^W5TkN*rIU*oLH+zHAogAS8yWe*4DZYFRVS&BrkToikz0)HP$;N!1ra^bEGu9 zscyD+ZLG_==b2K3TCYyDTm4ze6|BqttMTf;Q73w^%hTvhF9rTg)5*Af^>%YYr9Yc= zgRG9aIu2R-FZ5I@htc*|_tw*><~iHq-^z%77JAHl9;IjS=cir7g*6@Vn(S3X`#gW< z%m!s;Wto-f#_53{>%F|Z97o$r&vAH^;Dhjelyg!NbPFT#Lw)fuzMJWX>SX6wkKO+H z)_LMjxUAc2^ot36ZW^k~sne%JB<=ew3tX9pJ2C~aHA|bCUd2VaKE=CtY~Oyj)ZjNd zz%pJ=()p!{{TfjAjAhrJJsXP-2FRSkded&(#v?5J;sFgqDr$W05-%6mb9paMK%7%S zJeQn*-J=AV8B@&EZ85UBKI2&eR`}r>eQnxKMFjS zEVBE)XR#D^Ukny#TjjX^`P#Gjd1txh@zcOi=3$T8D=VMi4Wb4mn{)0Azy2+(>oIc` z4f!039J7#Kk!AVI8$W*ha2Wde1fM^JD%_B2e9_(Ay|utY+OW)zSU&eTvJ+lVX*Kro zVEZ?;!#99LRC^Aq|9W*@=bh`!&_f>|QIACj&&5GrcCY0LhooX5IsgNG76tMrHC}eN z5#jp}9@yI18DNRrXWLA}Wn2Q;WnGg|oank4(+uV)WK#LyrFF;kim0}+N}SNpU_!-s zdE~l3s=bI&#r6v?552IQFfQ$_45Fo{=N1;;AulhFL!tCw6MX|Y?!>)yF9ozickwDG z+xIKWt5!#)Q{(R^nl2^WNNde^JKhw3wWpL(H|BOz zd`U@((R`+Vh3tX2!UNl1e9z9HAq&y<02hc;9TXC(PuAck<)D0QrlS+1>RDSITSFBP z89B)zZ1B)>Da^Vcei& zb?qceb6`|5WteEH9m22iy>Gi+bFB|xl{CmK|8)(&Da-Efh4@K87v{UadwY$RC(1d@ zC>(qDnw&XzF7A*t?o?=t1ygh6_o1fGEP8A;dI8a{_0&C|;vz5nAO*@S^ahcn0DLcI ztUO}WNw!>@tQJ(WR5^cM2um0EN6V?f&1vG#+2ZQzYAU}!rM@h@Dv?T8P4N!z%*;$` zChiL_h5q#Pr2P#QE#6Vs?U$gFFLl;>>NJDwODpT9=Lb$V$aZymtyzM!6gM>;E4>vl zGH{ZH4s{?m`u;XHDI;fRXQsP9fP2(C*naHANj4t#NV@v@HCZQ5F^DJDd1YkgEpNVj zc23Sd-@Qju-D<9X(GreV|71W_hNBYRd@<x^Ix=3S(P^b@&3^c z7M3uI%UQ}V*sY6R0^*S zU-f(Va6_&z&Qpi?hpS(-BuhyEW6ZU+wVU)DMp}hyhFi024RdH0QHgFZJs8L84;JtM zEQnG?JB@XHA{ld^)GhSPKUx4J-ql=i@il+S=+d{Xf}OM3qK38Mk|g_wj~>OgQ$B9U zNAEIQok;Uq^VG~wDp)*RO6nc`WJS&~(<#S+u%vB?;2BWVRi!4rePaTk`B>Y~3YY^Q zN%Bu98((y{wSq}a=qt{Yb$%!T%zzJK@1vuieZHQwPwj(}qHxTy#Nj8tzM_Y(f3ct{ zLmdk>&~)S;(Dhom6FL*cX`5ZcDX^9<{KCRNm?UiU zLi+`7rhg>p1JQufmL6%FtlcGF>S2!ve?3beX{_X)b-eg0Mp@Cor-Day9aH0(sv)Z_I*|&`iYZO zhThk89X*UX&IhVf2)5c&9x#RDlww-PJ`2)qP-c0KZimXn0Crhm?abUn^a)@Cs#b3` z-nLX@-$dD&!A9Pi9>$|L#cCA8MUI&L`Rcwf87r;6RLL8qlo257@D4%Sl z>9@L}rPjOD>s6KN|Bj8R_-tddu>6Qx>=?Yo4ZK3aeO@TaeUcW$EgGBkXQE~>xK}v( zZx9x)t-aS*u;5niCC=lJ6sS;|TdjC2_<4DG%Lw=%d(o_ebW}iqZ)`Eo=Jz^uqi7tj zpP#SS%}VEMD@M_Fb#=+i{8V#t@EdvgsxG(V_dwmO4~M75u5XHwu^Y5|*|8?_R3^3I zib_g!Kx`le2TMtQfCuHXS?Fei&6f&f7d$J+Y!fsyt#ZoG%SDeTk}TTRX#}s!xuejk z=_R)^tw!au_LXV(?lWk?)boFQ1wVp>|yqpD!92;2H*ViY& zfcnSF)VjL5_wU~a&pnL(6O97zFi?NsEO|sV#h}z7vmYd^i1gYfuy>R*pNpnq(j!x` zAMNj2&c#Q2j^6w^Tdp-ZZ*?cG#a%jeX>R!U?-zd-q{s3e1P5P_cV&&@X+ukN^l*jsWW=^AdZ9?wE z(xnR;MWJsqpp?|)RGt&E=*nxGZ55*1#K5kkrNs&b<}o=VChfF|uGje$>5AK$S*|nj zqjR>I-NkqLc~6h!&#~jYnE+2=OZuQ?K?nN^2JJFcb6m}x*(NvAbJ0w5XHbklB^xKKq4r@{CE*Sx>tRlg}WrHPEN@uox-b}M*rblOj zCr>B6)1r%$H0w%R@E*0gLtPptP9&#i`;QuEsC#OpL3Pv zbLwGI^m2e`=)bm9xK?e)?-YOL$1O8QcP<0%~Hs|Z5Adt+c-Y{z}19K zr&{3d;u9_^Cl^`H?&b%O+A}m%WnU*tC|rjiQ=nO8(&x)TZGlF)v0^sk*VrEaTbXU6 zRx6%CbAf$$$JuX@I%EPMvxB!5lFlRmj4MS*9cwD690r~Iek~3q4g`IM)Z_TFbCP|N zkk{(65<_v#cgC%Q5FyWy@+oJZ?tZ8D^*-9CEqaWFWO>AGuzmZ!@9)rNN=i#7pt1e= z`}fu04kI&LCnr-NNT_S!K>O>OLyF6vu>@PD<@dkLop9+#QAxKd-Br02+D$L@%HoV6 zghPJq9I>-EOt?A3U4MUjLO4MFltHE6GO-^tde#QQ4>4GAMQvv?QlGdE~B3PQ!CcMe7-U4GleQv#3pu zlV6zao%>Z*TH3~5xI9_S9BB_FBSAZN?_H=Ge=F9=38Y8pBW>)a4t{=J#ep26$MXUt zPg$+*Oh5@zijs?i!%2$gfHqxt8!2 zeSiw8L9t`^VU01bjl-eiyvpH_SJ$9Kf}13!g)g(WbRJsZx{JboDbwm&689&1D{#Vx z4LTGW zeLd|tNkOQJfr)Ifnf|WIL#S#{j-c6vl$4xVoF42+zEc{cJ2mQg<;8SJvT^lZpRPuF zK5J7&PxBv%gs@4h)^KacLf(V#h3wf2Y1&+wSMCc0()^F*bdt%~g>ly11)0Y-B;ibx zx}pj2yl~0g$SB5g?=OJRxU9G@IC-Q4rl7hIK*Ek&D?0^#iILy%t++S@!o!E8-&Pk_ zH=du|K*)}y@=R&{E0ZytiCx=#OTXcGI%t;Xci_?u!}JERPAX|#IIkK|F<;lda`2jg z^7H1H`L)m)G+%2Lfyge|wzfg5RZo;CD>oU_+HkR9Zm*g^o~P?J)6!PjtV;qyZyVx` znl^_naKnF1S>pXsF4^vUH>VugN7!^1LpyLz&qBARW;$-A^?Hn4Lk~J{>Q)mfZW&tq zBW4u()!$%U&#y)zkPcj?f1JBPvbgcupP7xV233%H*kfq$lEE#N2Gdgx4r16O)vElN zRZ;SVqiNFlg0ml#BSK?4-Zd=dJqQa@<>9zvSeHME?ZyI|r}7{{D-8?{q00V+fRmHx zK_-(ei!J9y+KO|>3c}+3^_l2c#J|t}mVq6xyTft z8KDocu*(Cgb|kl@FGD}w7L|Sr!ZSAC!J1KT{Z^Aa@en0Gim+u{YOH17B(&}_N`9`Q zfNnv!!OrlQF6r}b*B0&q?>UDu6`H%!rBl5w_q9TZHx zep4&Y<9%}OS>z={CrYPT*t0woT}U;@F81fCIZe$;+x$AwAzK56`3DW-(-7Eg8Hvjf z=)wg8Zkl^nc<*D9kn^~nX27pT6?w}!-!M~?1q*#FKC?r( z!w+yvE@xV4LsvKhqks^xEyV3X?0A&k=ncOb#QWmgT)dV>kAs?@gS?jSI%9xpKz#bI zUwYsQlEA;{WX5HF%>Pg)jY^ofG-4s}1@}1#pgzf;U@%?wP&jQq@c^EYxuO~&0lAcIsmuKKkyH8f~ z(y>Xs1cN0^6&<(HWBbG{&YwE9?tg;Rfy0Nlz_Q7*?P<`ye?*#G)13sv5IpY~uAbOe z1V|?~EOe+{jEv$y-$dib%h zu#ha6B>Ly~@2~h)3d7Bb#76+AOpkU1<2R|ISpNC*r?t>a9u(K8E6;^__DuIsn`x9W z{to`Kedo?^kVRe4xriEqz3V>Hbe*trOifL5?LCcVroIqp=8e!jxO{` zJ`4@N!!G*YX^7N+Tp_EEL%m6ls_Y@{E-SY++Fd-Yv;S0kDbya(!`BjSeKsBIhiM~D z6dJ4Wf`S4w${KJW3yMRktV%URPrss~A_qgo#Sk$w)%X?cLQ5)zTek*A3)J~^K2LjM zSKIj&+ga;NTz>4kw3^UP$o;?1~VF>5C3Q>&cl3T*AMma;sv7s$f9TMmpd|J7r1FULSG`e323MS2T$tro=X=;n z-*CCORgMG{7J9|^aCv!M98Tk}BrflB7<6Mp*c6Z}6qJlpvZ0jxU!fDa@D4kuvq7Pg%Ra4^eGOS z5kHJSXE!%B(9cM()ddSPUZ+1+Yo1UMs!%1EM2t2AAQv|_?!s<~zn)ZkBq{IP*9SC~ znIC%h+_2mVi}riCWDpkl87`-1Y%Cx+m=UrZ(dhK>JvY$Mm@@f%{(J;bT2|VL(wp5M zN$~LbrKO&c5x>Gh`D5nIM)hw`5{{wA(yZhpcSSlDW(pfb)M4Nm+&gPB@fCOPUlc8c} z_oJdXuwGCW?zQN+Jp-PImELmTbCeVK1;NZ@kTO6CtaJ=hylFQ{v{ii(x})!paB@oa zKr1o=g@4I^BM>F3gRZ|uS+r&)K)p?AFMBgC3Fu#gT*61kj{|E}f|9EGCqA)*9B0xao= z3iPJNWXoRu^Dy?DA&w8WrSU2&=X6{^=hy&B_P-d2$_Y&F)CXrOKM0a@?7!9WOhFJT1fUn{XD}?J7HB@d1j(?JLX6 zltU-yCMUnu)r}S0ejd2=$?}3#EQv2Wq&=?p+U>AFP4bxKl(O}U=AYTbg5uBrkHpdc z2{PTJk!8VJ0rev8azI@CH5yX*HpP-M3-QmEU4}>r@Ej^{K{<6JK_L~shcN0;Zhj2;zO(ztIT-JH^SbV^@Z{NldIWnpY zgrC5_cdJPnGk6yF$-vJV{CiMNLz$TwxT(n!9Hp8p`yTiR#7Kb~j@sapuIKAc- z$&Zb;230=95yns6)bxdKgMo0Smm+*FZcT>a%6$rz;x0?GoxD3Ll@-T?LpxQ7Z_U!j zq%oI0qw7{T#YAuGw0|bj_q=0+n;)*O^ZNH6%|`B{yf!9B99H-C(LXqNB(Z~~aKjfJ zNtWmb#F6;73Z3OmTlX~yGnz@r@gxYEb~pe{2bs?z8^q}R5cY9@(5wTOMKt*)rOgba+o zmk{Z!(KNuln_#a{aG{Q3x%@fgw-q7Gw9vh_dQMMoKa6Dj6woqgg>l_WKDE9`)<6#c zC*;l0y)!eq^Ih}*Z7py8q9^p@ria{b&^s!Ty3FOQHL6^vya(AGUW_CoBt;;tTem)v zd(|2knVE@3A?ZY6#b;n!8;FG9hEe~TY0>S6`$dGsh!ho?ID~{}to}E#uxGfK?fFMS zs9C%oPUTv?cA95x_Ap`V(jmE2xz)FLd;q%VG6mc61E4@GC1v+ta96^mC14eSJexeB z%zQ<929|F}pd+>sK9p3R2Pk}$hKKkLW@#FFO-spl)}7HMz0P(%F;o7otMR#Dy=#(W zXI_6^|4Wuim##b-ZQ7g)*|%su*q(>3F2J;*-n?xaDYqMc#JZY_0u z|5hekUt{f9NKs{_6t&j0@i@c79FPMKJ)VPKV6C5M5Lmy zT?fl-(0Pf72t-<_nti}=CO}e|9!G&kqP^BuvF?8;6lLg|M74vz+-=FR^;hBAYBa2Q zBE17?dEarX@4)7b30}jp1B!EsKHlEzgiO9Ob(psldU=8BVAC*pe|@rxS2d4B1=Ftt z38R{&gvOly6%^IH*>-dFFIvp#tjjAae1OP=vIUKuZ|H-Q$}F1EKtt?S2n1!w?T8px zjYBZAgJYq7C#a6k2GIb7_b%b0C#I)kP&vLw$;;QY9F;}Z=RH&m0`VfSbGzkB4Ls6F ziX7B;KUT>=1b`5UBRW4e52u`5m(@zI{T8l_h4*KD|G=#P%Np@_B`R}e(D#&N!!3U1 zTWEq-$SjjmGP5kp)-+>JX_i#i9_`zURobWP}b-M%7A*z#)MS?ycK+i=1-|J;fL@C_KLx z6Fudk@S~Nqt5Ny}aQAS-eZ!p0mLG zpWqe%{=5g8pko%{QY**dJ0K?Z667$=bH(-c&yV_UHT#a;J_Bk296^WA_=bQMn6Hsk zK|0at3Ozj_+bf_xU@^aevO%a4xih2=Pal*^$Gdz1+i4Ix_Q{hcGGO=EYpIqU;$WOK zxQtRT*-sGZ;#W{@l<}R=Ok;t5hztNdi+Fv8`E}ybvAUujN5KH1!Dhncy&^$(Z;~u3 zfqn*Bk~oNbT&@ZNP?qiK`uGrR*(jI~+d(IJqujp%015{d(35(gY7y`j#Bv9S6VU68 zjN5zOy?X~pG)>|a+l_sYZF(mBK}6xV*R6PNos?>9c*q=^^xPK*<35p4fa-|kkUo=l3hci1LO?x@GR?S`eggvoyNz*03i#UeMQ^~W zo>~$)E2{NEpvtD`yd5ME1{N?;@8IBITL=NWm#B$wk=jeJrte6mh$E@tM`UwbNwHzNH2@kZn|cAW71tq_Kjf+P!m$N#!4}MAH=^oO+*Z&UWnfOY&4( z?gyxjhA7jU^u!ROqIk~M&Q%K*{?COUT|`+ZgLyUi_nu`OgTy=~!Ey5jv(atEsAucVk1=z2U%y2i@M$wPhj@tX|=H*Ft} zUFSG(xOwFH6w~1J29stWPpVru+wqn-xk|^{Znz>4aX-$q%1sGD!UcR?D!|?^mR-tqehIt zX2SmOLbi8zN2JH>jiHvh{OlYJ&M9>cF(Bkjbcc6eEN8<-35aqS%@^tnG zu&dZP7I(A=Dk7@0n_r{;nhuf(U9Q844t0YFMIP^Ve}JGzVgmeE$ThpM&7?^L9*a!2 z+$j*$er2+~@C`rh<@f_KQnZoX#`#{Ixq4;gSt^naTGMD_Pvhcv1LPKOeHf|>J?0Am z5SzWfLdY(H$)597w@zTJ;qGzAp0Vp;!oQ;o9Wzzt`4X5(-`MjHty+*})yT3rK%}bh!2p3BjI=0#sktiHIh9fJUh$JU z`L4p2?PAT)rGC4hl5aTydvL_cx-(0d01w!0e_otpuN-$)>>v3N?L*dIAapqB#mwxy zygvyqn}{Bx!w~!#x88b~NwH-O3GJBnJ_@B4@^IC4U5`=(uACvGs#!45(tfR}IeIq4 z*LUm5lP8HN8MHIgA>Sx;V3uUhuYVA~zwM^Fbe@8PMN~#GpVq!hS$CqQtyDhVNaI1+ zDmk~C;Ci1(fZ2%XpN;gHa{G0Uh3)1ZX#;)_-=MBzhgK_?wAP#VTe#0{_u&~p05J}Q zH$=G}Yf+2)I_Z9&viXrmXEy5|>tV^lt7nwfUH?+9)>Kr9#9?d7ZUQ4w5dYMUj-sA2y3?&#U%#41EK%RpRXMyQr1ZJH-;<9 zWycJw3`+%7)q#fJ)`ZkQN(q|gKH2c_9twmXlQ8Gid=&gZ+-&lXvHj5=fEty-f*c4K zq~(%#SY9EpV~b?|Msfc~YbFmqAJ0o??DQzg-A-FKreB`gw432$WaE`CHCDN_*3oSN zjwLojowbckA^BtJm6Ntw;o;*`4k=+VOD;K=OIjYZon|pvkcs^A$<5I^7)JyKy%9hS zph*;3OB!4?C>doq$Q8t`hEO3QQ=4wqI1lnnOGigU5eQ?PfG$wuz(wH5)HRD6rD#xD zq{YT@o84MOdH<)TjpAlyN>ALddgVaQT*Ui{Bq2O0V`S@zFeF(0gxA^}!gb{w4r5)i z7yB=nH#_ZbN?uII=tJ8I8*HeUH;*q9*qL(Tl*~wk;id0&7Wp@H(nZc_C-V$Ek2Ael zrz|NobG2{sUQOD5f=46Bry2?KdojPdaXCZQlU`MeeYIlq&c!orD-qOkzN- zSIYEkm?2V0pGc+U<Ul4Poh#kugikNGz(rgNE92Hf|0AF87SvhaKi4UD%`ep% z-S4wL8KC>?5xKtCc$_ah&9?94jUhW(&t(aurIfGi<=}{l=RZo+@|lkp`nYFaGEUpl z6dmo~bSlOS?hx?#LjYJpywuXtB0K>|j3rRdNhKPPe?KBeNrYa|y&xKIgjOQuL{UP| zC+K@Q82-ZQ(%6lB*JIf0{lG;;F{k5@3x`oFR>MU2JpTud>lVu4ZH6%L!RH8PrZ2{@ z@)hE?P`DouVl*sQT*$fD3$KXuGd_!GS;H+!Xg2-yT=eAv&*>+)GwL=GD8_iiE|3NY&9{EM-w0%lZreavAf~zy z$>Rf$yc8;SKt?7U%V7Wy8NnHJPGv{kR083Yy%zv)NH$)s(H{x(ysye|gG0XlaKAyW ziV@#Qba}-IcPPJz+X(>yi_IbO481D=1{Nxzfzv0x?793UN-oyZ=NZfKREQC13Vb(G zxrif#*zx(4-krwEHut5kd84OMo3=^8e*vrb%3c+gFl3@fnsq834}^W%od$lFHiu4k?~gW zXLY%5<%U4M2?{C^dE>v4`Vi@UV(bmj`2<+=D9-?HHH$n|Fs>I$5^40}tDDzq?w7C}Ikm9i1ey1p z;~FddsHdR_tHef9P|z-FYHAmg%C{&Lz!+fd_9dAyqWm-c>Hr013HYy@_7V>!)o}ovYvyXcJPSot~~lbotM93T$bNurEe;5 z=IyH9$Dhnv6!%kOOBt3X;yy~gr%m@DkM+j2gwR&coU!}-@uJP&iW>dlJ?&qg3SZ7X z^3!YIg`c%Q#wkwQF4aR)9<3-Q3KISt3@2}6DgPSD{7aiDvYWQ{%jNx>6<9YgtcCku zuIRf{Z)j+kjgE#{Izh7~NBLBps^?0|d*TN^G&lB@|)0Efsdl?2Aa`$I@ z7w?(|eL-@47591m0gjZZMz@hjZ1yJ7bQWHnmdEAQ#QOO|+^#$pj>bg4Gq_e8|K%cfNG zn|d9L0;%y1a8?1g*^#fXiTWem{E=9?j0qDht<*bQ`TyQqvBGD@wDiHYe(=a4E$Z;_ zccq7txlXG5bhS(`^_QM2;;GrkKQJ{U>*Ta&%>1V6GlWL`3NzCI%Cd z*q|ApTdR&7hvkd;iLk_!|5nxL^RRyM9fK;I`m&QgbqN8VXDU+adut`dPRc{Q2IZn- z5-NJ(lVOfNOAr#&vB2lgi-^Mq*3pK9D-H_DRYi7Y_gLPP|7bQsm!$H4LL_J4(b~~e zb$L*J}E7=t1?Qd!KZgkBbbGg?2C-YK?d^{@NhMmC<$B8gWdZ0V2lu! zT%-K`d-lX#O2$9?;e}ggyNcdlx8qTapJ^HBQzf5>1H)kA6=EC^JD!*W$?S4HQlm4CcqeoOi@JlHoK%=v8Wh-LDjp_Gx8Aq( zeyIVqm9l5Bxnd-F-m0l7nLE}VX%Q9w`l1T2rQ@~IE}uB2`VC?kLvrWDo!aRkW%l=$jFGU z1LRTy?lsKF#c6O30uuDZKN!fp8L0}Uz%b=2sW={M{4#Hnp@r<59RG)=`3$R#=(+7< zL&3#6F5E<-eUO}c9D30F0(o_;@*S;I5lZe?fK%ef z#Sb^EVKk=S`f(qJMGXmH^^Vb*w=|nJ?cBTfHN}?&#hEaPfM_k93;!8Pe};#B2g|&8 zUUZ(Yoa)+x7eM#}i^w~Yxsz;Zk`{t`{UiSB5e(znOR>J#{a+pVewv^5%{GBDvnh#i z+Bx;ZP3#(vYm{s?GR*t7vy<;Ovu~Z+}s{YALspI$r{(ihcbi2l-2ah>e{jkl}5WRv$~#oy1LKkr}CduwTKtRIQp55&|fY$dQS z?1X{lL>8Qx+-Sp4r4MvoFsZGay5W8_7I?UA*?s6PTql5%9`w%)O(*=#F<|uf?~}+* z61Fkq?!0Jo3A^6S2;pu9c}eAd1yENIZ5Ydq`WIB`P(DUfK+f#@m9))X#+Pbkx1Q+v zJ^szg*@5$=YlV2aHico^=t*L(_#i{@7pN5A&b#|+BYcyR1QFe)DeCUVID(2ltK>IW zFT{v0ZiW#`W*#!)IOq@2hPyK)xUJ3GQn?YHBdm2o14e})1`3EJf~ZJvPnX}17_Eli zK=w4VTEdjX=B-^2%r9AOocuF8QGz_xS+lkmXs^85^C(%Vp0yop_{n{7xc35Nl z$fhrm^~;xr`A4qiYDQIiRKKHjb$#+EdIjk(s!y&4ylCjab(kAB1h8=j;vr@kii%Fa z#i)fX2hr;Pxe!k z*Id5n>``zvY`GfkrwWwo*ut5ba%dz3>IQQpo+AK{qG~exgG!XLmR5M;b*XHKZXrMr z&=CzT)_!f5UL{CAF8CQndHEYN_YzPNWcu0{Etd|(S;BpyRBd?iZ=bb4%1<6V*D6WB z(;&e(a^qV^ua6#cQpf*xy<>jQMR00k`17chkDw&V$jTBb37~Dt%)ah;ueDW@TNp8( zi)}CtJ^uU)PGar>b4Tl&4|HG{@&lwC!j?jD&7C{ltw=XW*iBThCGtl-MhU^6dY~iI zI^GhZB_kEY_465t&;#(6WcLL-7{;O=1!z)Koy~SSXug=pSto>X_YRNI%0eb0V>Zy9 zaRhoQ;ROL8Vr!S1C*>=stE)fl;pX9)#AYOt^sshNzk31fc{w@1Q5J&gY|-Vu0ytWS zP2{P@ULX{yn@hx_IVk>n#=bGo>kXeVx6?`A2g_Bbee2P4_q-N&X;rWAX}Z>{M9J0Q zH`k&(7cyno9M4iM@IYS-V(z#m{TXlS`Vv# zJOLR{!fTbtMKGX}7-9qavCFno3ZRMfAp{x?wI|)O<2A@>FmY$GA{dXMBhq20n}Ga% zNaqod7g>~4mnjnvgTCiXBa+Cj-#tyqmvfoq!pB0ze}z*j(=>}Au65AI64g79;~_W} z=-+Q7-4Injrquhx zN;nK`NBa`4lbIqccTx{#fqZao5L`ct-wfG8AC4mqyBwKWr036{TU~jQXb$g?RKfTm z5BQf6Sc~DospAS#udS%Oq)(RmUtVKAM83SMK(UODMl;9Z z(+fdex#G&oO1OszSBao+s5X7p%=|)(Cc-;t`Efu(>Hpny@uxp5*DiD|&wQn&Yf5W_ z(1l=ui1*;khW{K3v9m538gEiCHc@Pl5_1TX<{ydYq!7PW0y@T1OVq+mdI*p)Z{5qE z{0h!K6GG`oprM2)AXZ95!a8O_SsSVRAeb9VdZ)9>xwj&BbexJvZ-un;%ED{CN6my= zqMUK`Q^bdsYq^f2x2ejIF|@F>M=^2qVuX||f~{_{n@_a_Ul%fmsMV3?e-re?VEb)N z95=cmitusEb|Y-r)<~5IsU|8v?BiHxG7A08sIAHxog!$`?&FBY;mZ9lekqNQ<7QAzxz z#DOl)h&xeV0Vr=t$x0vo`<#gFf)6TXx_`z%Q%S{(gDD#P zkVm^I1(@Lffc;PENB)JVEqCvTl@rwTedZk~$#0ZCsN3&0;`Z(ZGbY?_Y&{vo>maU6 z7kh{Vc8$;W7Jtjn8wrSk0@Uv}o-(MMp5!eyh4U!AYsG!1h4X$iqnced+@nD(U^z#V||yDU%t{o zdc$CT+gtYwaK8XILF%l$?>xAFALpwdN7$e4fVFh$PI33Gqw@Ex181rnSIcfjUbFLL z*v!BX2NfhiD`fUq>+s6PLzu;hPmNi6GirIU-4jD$KBdolaijYQJsN@zp%?%U#k3au zAOB$~AF%||Qo8l4pKhzja3cj_HFq-*lh8f={lW3IGsDe4c2v533~JRhq1QvlB*vei z;d?`!A;PDjw5~90M_(^3Ed`+x;-kuNk0Da%c?%O!vsSJ$Oc+;1D86B&k?EftI_Y)W&H=Dk=DT1bB&DX!DXZWG;b z@@#LPERAg3iJl#LTn53_HOF!Wr@0;G#nP^H?;&ImV)7EgFUpZOT23G?1DS&xISzbq z(-rN_@~2%-TF3c_9TBGTW0QW3GPn!F*u;1}z^`hPBsv}n*M+NH&~|cZU6Jk!HIL$* zn3^i??v5HCw?Q!9(y3-9Cbq8Og4QUr*sbjZYDC#wM#hJpuuIfi6?Y#=lo0kB+2#|6JL99}ed|cRvVf24DER%9Q2O z@9;9PTte)mlbs!>u<)}D6MHe!PgK#-(dsm-e)(iymS3+^zq8U0%Vo~K47Rk={-EC_ zG(2@D>SRx_h|@Mkr~P+SER;iK-NYgLeGfY>cs1;ND820Qav_8nytlDEMb?vmGbl6Z znt~WAB6F=>13>ZX|(5W9sQO5th7Ak31itP81hEASSpMJN4KGF+lKo z_vs~TUQajZV90W0j5a^SlL>?{Y5^EmiO&9gZKZpSc$f^jI$;JN%Hr(m`a!A|f>xu< zxtkq0Nm-7gp_a6=gjS_)P^`by%TnBG?Kevn-pKA+wwN_B9`1W-QpTt2E^eb4;rFi; zOI9Lj3b`qSV#%0r@zXK(ZI^UNvu~DylUF->jm;Ni^M1Bt>*2m(vbl!9C!Y-orxb!; zF;X4KIZgqbOY?t-`8^6ZLh+c3ww+>7Zw+Q#n@Hdy&>=43rdpn^5Ir*J`=qycfGrXD zGH-F{GZms&G*qBjh_EoUdX1tHg05X}kmF=I!M1!R-3aU52x2Sd%?oFbw5%}bPAQBF%g;rv!opt;2CJCq{zd(x~9f< zHOi3L+JbC6=FPP7U0-2q#~xaP{jPzhlYDumk1qtreSNXe8;k{!X@@E5nD!xTtC6~{ z$WA;$;|aVx!dz=kxIvmEV)Io;lBgirj(T)R<3V90EArE{w6x+c!;lq!;h2A_VieHY ztl?9{%M2<=1r&c_B32hqseq_t=iR?g*x<}lmrbAyWtgX3A0z7$i-X-6_uB8_*wDAW zs3Lc^E!Q9V>lTfB)c1K*=<>uJe7W@Sv`y4xY}E?`&7|WS9%*by=Bd0S{o1b(`%aASZ&4cxp%xw5%ztz$9U=RxuGz zS;2%dykJGCUcB#mmTwPvp!2?d(z-Td; z2J1+Q2tP1++g{%}E&b#-mf&uNlX(6fS_;=m6}dcFQG%i3r!JVborjW1lzV(DB8`ZL zk%`Ua;<+?WpLFB!6Jt5h)eTYJ5M3W38LCvHwiZLgYl9U6n`~V|exG3e^iKSz8h>LB zy45K}W|`zYWw0qfK)9Kvki&>1Hz;b13p++1P?WzDo)(xsSpC66haM9TyTQW%zUsF? zsN7W^;`@c9*v0ZA|+F5QpIc+Gvhvf6SA6lTN5jX#js z$B0ct<8O&@0+5zih!YRwfzofS*{s5qdHG)!M|2ne-r<-kmhzjX#ww~YA~N}lWgDvg z+=4XqorvK0%)aF{Yl`24NiIENoD?bp&S~JAH|OyjFv9Oak7(R*3D*T|;qm8vLRymb z;tm1cJFKB^-8F}WW`Au5w9w+!-046C^#=}t8N>?52qNtXJ)+>v=j8dTLj1odGz_zk zTvblp`xWl5#C56d_5V`doqN3X*~$v+4?8Su)9OsvRdkGL%ff}z&vbr8dQ_J^m3WGW zHdcyOz2rMBuJoCZce1Z)#T?`77Tfl1+s$hBTN|4=(&VgGbRYuQ(WK(p8)$97*Tzt7 zP?+|lwS}8#BYpeg;JMU;blAV>aJ~W@_iE&eXS)vwe7o7#x0PY`Uf>;jaq$=YMA^d_ zYoX@~>ejv!A6!$FLf;U&%9%4F=D(|x2YG)fe7Ta$`jdt3nA^QvYkzN^z^i}>kmL=q zZMU3&IEqq8k8oXS>DhyW|Gb0VZ6#;#Np5oAq8Y`=Ps4Qi%buTJo2tZpWJnGPorSES zk}9EMw75h-cf#>xb-DHLGP9$!Bu%g~qsKos?2+=aJet(TQboFZ>Mu6Zi;-XT8&>dC z5E#kCG}t-z*~oX5pU!Ye56UW>IWs9!d^h@YVo(8_#zRY{rI18D#SOfs!$OYq#=$aG zkb#i##3&QyNUC4>)XV^hP(xlu1MIUA;|#pceJLwsIud(1(6mI?N4rt4C&0#Rt>*t? z>%HT-e%tqPTe6a@tddA(_LdN3v}KEg?2$dAP>K+;NulgbHd&cv?@jjJ>vvvye}13G z_woJy;c<80H?P-oT-SA;=W!m#ajw~LS`t0AOwe1f(X}#Ww*>7qB7;l1^`j2~P!zZo ztm5KY5?X+#1FK71oDR0jKd=<;sQ!n|Im%LF<<=qB_~8y7Gs2CC=h}kGjm3kkbp75( z{id17J`zDdEO%lT8f0di0pc5n!xO&YJS0PKm9WoQ!77loEJ1-wKU=L6c;zRkH?)RW z0Yy4{`S#=s1y~~LPpPe3=@()M7bUYVCtJqNd$Ds#zovgAR6klw|6ZN*^ zfVfJvt8O>8^Scve3TLk6kBxkHv?86b!%%Q z&Fw>`0?3jQnlk4Dt`>vR+Yq*)N!0ck=q?{-0RMg1Zt;uV(3*M0jRgVIh}1&(H&&I% z`A7weiFO-Y1^*fVZ8m)KC$B=5WfBDs#Kxi71G7LGt47}~ zw&LC9(OTpkPor`=A|+-A8)L5{;40_XFKWOOMu<9OqzRD7L-V0DQRigs+eGacE4-_S zaNR$b%`#JUvAeIA(b!!3sB~ny|z&`ED56Rj!>J=rJ zBk~MOOrP3}^|?9}uTV=zd}mT;4Aw6^Uyw>8a7T14>*Q~O2dMF))ktaUtB)!yNS;dl zsQb}kfVF(1n~|*X9Q2}~=K@Cn7Az}y2GxI;Mrcd45Ni?h{;)bWYD!u;y{5OR5H6C* zW`OR;Da06t_7RY_u>-`@;EnnDO;RQc)s)VpH5U7`b~0eJ9T6t1$LqTcJ>?3h6|v<~dXOEPJcy-UzxaLit%iXMAP0 z3&8U>AR7VEK@de0w4eapA94fGcIxlH7T=^w#8bUCqWQzA83)a#tM;B}^V4g3T<$dW z?Z~cOfg6N$p`adi6#!OOYEWMQ66^pXnUh;xlHH-{_-vwLU_YN3(6*XC*hOh$U4t2H_J)fECz$L0)zCp zP2*@~{jS(KtnX9Cj!fHpZkJCwX~8`-c+vd^F&pM1;#q^Au;(g2M-e!eU%$Xpf8$(` znMylw)&MPL1>$xRtoS~stN+frM>pseBAx+XYOzRNXKxHLZLvu;W2Rt@0_ML!qIn9* zB&1m63K9?YA%8|jn-~yjp!kszt3GME6<5$}5!uFOOWLn`nn=VCKe?hRVKi};tR$Mw ztrDn?e#jRC+7?f!eHK}ZA)$n}VM*B(oedB|(j#SNk*X5XHNa)9pKnRAp0;$kCv<#rihR}fl%GIHu zizhE=kddivB5&46<4}ksJZ7w$*T@0RaMqdV_sX(MesDq1Il+eBOcv2RCA3&9EiHk+ zG2d|JWVNgb)_4}n==H=2Yc>mG`&eF6f|byJSXBak(pQ)q8WITh0MkND@Vg1kWo$(y zCFC?Tv;XtQv9BEGE4VD4@Jip{P-|AZ-EEYrNN=*`O8%HMGy45K*_`Ht?RaoWB&VlW zhGNvmF7fkcsF%(IG+||M>SQlF$xq^{9g?}KQ`jFn<(AHxu56f`l{p?aJK7svh_t=- z)~^Oi$xLtx1wuDS(b;Q$Ai)T*EpiQ&<$W%o53yMm%cU@sAl7~OvWxLXC+?2rR?Fh+ zCYe=yTP{hmMoL)sz|VQ7M~zw(v)$mU#-CZXbskC;ph}DJ;-JM9c(IsLg?X(F z<)-J=n(yW?ykI1iIBu9dCC3~E4P-_^Gh~_e1UeO5CeXQ|WcSN7|ppEDavPMIa7-nS00;n)7 zR9c|PI6zo48<-v&({nMS#)D^%ih!aYbM7?gDvL>DhDc`Yr59=GRlA%keU{UXyO&5M zrKH=XrNd3bc~Qh7bw06K>$j$DN$k%-gs5YhojK$_3sL zzI=V@!d$RjRFn|W&?#lzdp2A)aW=-J&cOiqBReL@o!g+>i|!6y0uBxiQpT5-=Ha|T z3vf_A!?&uN0Cv4>Y;KrH@r|jB?#mq@@)9{4>o-Z#7aY%u3@#wI?{hkR@~8*duVGG0 zk(MOO=tt)_mz?UPIc10EX?eZ1j+N=8MT5%gbWI^>hR{2yo&1T5{AU`a*3n6vR<1MF zb&=6jnYg6HFMnxQ)=DHNo(s}yfQ7aVk|MZbJQBJF08gNA{t)^5fEhJv+#y#O=n>I@ zsfk<+NJ9WdxQ#S8phyEREG?+6HQ^k9b+F&L$)mQ<=K2VmgtJ(Ec--9423Nf7^mnPG zNiEqwlkGS|AXWm5js=K&P-L@#8w_ILK+P2(eqz?D2Y4g$u>|T45d?0CEyeDKKhjT_ zFL0uu?h-`uF(|~Lebh3Rjhm)$4hxyYjyiZ>_+pMeC>63KsRVU-%ENDG5&s4XySrge(cu=f|4<)mK^FtmTp`DU9NelM01E!PBtjYK zH&_CzYYE(QA?sBGfhM$BCxJfw8NwEvYOp$-fUU%)Qz{G{O5}L}6{6U}AjBw)b64(S zctRHtNLtUqDI;A4%0&=loud^rv+8w1fpbza&zeJtG@oTgh3jrUK9L6AolKwe5(4kE zWtS8RTE=ytsiWgGMS*_M+6I=`FVx#doLnJG1}GOF`Ye!DOG#A>3`=SDNLPeH%OA;|fh>zSc>tI`+x-M&ny0V<03z!S zi^hgceTI>-4M64l8=rNHtX_Iz?9VHMM0_5>3I#Lp{h$^C(fRy?els|Ve?(w2S1-<5QXryQfPFre5q;~Ueme`s*hA4 zPqDnOE`Q)~8F?x`gLcUQ_!odzLo|q6f<>2LdpOD0^AmX^CJ~@rxgb;oJGJ{};F2x~ zrx`M$f|DB?Y3WpA#cu)mbb+B`Xo%inyK|cm*vrG#iNAi8Gxgp2U(N2iuq_dXGll&j z0l~Fy-b}B%E*MFo*w=Q$trl=pNWQ@P?mFxi%ymxhi;T|%4?UzT2R`aq3W{fnzo5uh z0ib@iCo^s_c6C4i1ma0b5!C=ULS)Pzdb|YsbI?kFrv+qWe&IUK$fgPTks$XgxH^9C z612Y0QtI#z24Rlb*JB*>Wgazgn)(Q%6 zXoz3`pN6>eJKsv@ZsxKU{BHF=MF|5pE+!i)h-{e;l1ZRRjGuO^7?ujCKT&*xCkz7i z#Vp%rh8AK#iZ0(@5<@?}F^^H|u%t(|Rh`7BHMaHg2H}YkUN_f(Num{>xGu7E6jqtJ^BE zIn&!rH(o*6?(LSa6!L+a$7q6_?3PRccW&6HPYrNE7+3DbV?e>v=XU$#dPdaFj*SLG zCfVbuOW^3vDqM2ow2ffl+CVo75SAKvixA_i4XDvd2&ST`Yq>QvSUR@C z3#t5<9J@b-CV^g2k6Y`&p&pkcj(G__5^=TwMTShG<)IxO@%f*?x1DWIz(z+tvOflQ zfd7h(wcuW>UP&6kW1O-i>9MuZNg{J8JO|w?9SXVG{tB_9?Jn)WZ}x697>D97FDjZ9 zaT5Ui0M}tS>8Adrk<6JzFU}mD$BcXAbvd&pG%?Grx@D1KK;|wm8Ub4n&ci&VkicprVU%3>_xF=JhS`;4Q&4RNra@Uy#02$87h_v-udHlNgbewM+`e~F zQJP6}kxDE8zeRX|}UfM^66Gm~=A^(8)PAsvz;U1ermgquzpN44DDw z6+r(Bn>S1O0#F+OxJ$^AvC^+;nR!(5vP6oX8t-_3q%7&Wm0z$eSur={s+K@k1~L>J z^w1mZNTULR7*L%3LG=yWH-P8_C$bUq$0gzP9x16Qw0lzJ-%ZnxdArkRY(Jr8|L%n= zMiVdtqzP6y@^6D1Uxx(j0T^wdTnu73bPhmqh(K&0?<+x93o-5xKL^^i|MU5&y{CG2 z5(U`e25Uek&C;M-C;GtUEN3VS(>9y$ur~u8T{AeSqh2B?uEv4HQJ~qzLj))ch+NKs z()oWrC+aVI*7Vc)`_ET{x_KQuQo5+-B)`SlkOk4AWD^0=*?Kj2u+Xs8LRKfG{-OV? zApBi}|4U?V<7Cwh6ggLCNWp=Zu+~1t=lDF+2O0~5)d?V8Murz4&w}fE+sA@BkgU3KlPRPnjLj6daRSlXd5qhg_Hxn4dT=fsU#m7lcKFl8gdi zZg{%O|IQo*3j=Da7om3Jh2Dyuw0zls$8E;lyAOwD6ucBF=utxgsict%#6(%PGL&sHu$DCjr?< z^OHCX?jlr)(Az=SkgzZ{>Ii{ZY9nARNYxFVH2}E`41p~y^3j*~IU&qB%^Y&zRua-3!!n z#6FQ_T@O&$ft>;ryBl?(D3~xi_*IaU9KE6sNoeCRJ-hS>L*xC+1L?>WW!XtCTVpR^ z&dmKIPAk|KIb#VP6Aie(?ltYsp?@%%U<4dR($?&Dh)G5eLr_M^9L`H5T;3`=JMi-L z6ScId-KIh6x?y(t8VExFCjD&tipV^fVHAqP7s}1hJ-W_rl_M3A3Dt~tpUs0HJ!<4G z15WoJfjeNi22;{g|)%>aV9 zLl5a%l9iq1Zu8I*HJ2ud^(kXNIK2g7okKc7kI=;;tX|jmCytdpCY8{9AiNB|nhF%atwL$-vrD+MWbVTVxO&QeP08)AHe;>#AnXAgjQg49Suh@jscJCchJ#j){p`U0JE@=;-)Ykxz1tCF`v__KK3}XQ(x8 zwZ5DSRA&@&yPV7fO9S5)XgA=0fXrCIY=|$-Wo1+v_&3NS;L-{FQ@jnxRWF8zPjz_L z*m7HXW3k7DJZJo0`OSb5$THKys`gmS@3FG8n*tKbW$0nGIHLhUptXTqc@5xekrTWj z$Yo^Xh$kUEKY38$74Q4>rmu&V_~HlRjuZ`J81O-2@Ifdm18XE!;8lEuaEYA5faM0x zoDcx$q}-#y8f*mw**VaLL^E+f^ZyT7YA9nJ=Xk{$6`uSM8&XY;!_kP%b`Q*la|YU6 z%5YqA_?9<`iH$^mVg|(pXf%0;_Mm9X*918suyU;ojM2(2_SdWYrkrKXu@D6V;i%re*b75>xP$Dc~vl=n1b`p+jF8w`k=8cR;URnmxKF$`!dS#c)Z z2XZgFM2doEu!F3&m$KAYijG(jG!~>S{OD`PjR~w*M$CkZsDB(|si>T}OFT#V^8RLmzPy!v%j^|+ zh~eO~iGJ})(2QOacX!-;c-k}4t>R(}r;9LbJVMc3N zs4S_biPEWKb}>K?OlCDw-9ZO)@TE0v zskiHc7qc8l_+3df0&*;%uIO|-?Rt8~*cHKJaDtYDg#-Y_Xg!firtGJ~Avwtqeui{^ zdhB4ClwTru%5b-~Oo-8iqG;dx4M@Zh(!oOnXJ2SETwz1mqVp#^maUeFi*w>C*FQJn zKImE!me?Trm=fAS_7)-u{5}#kAw@h?anK~7Kj}~fmCRIoWX-wFKaKm$+hyAkg z?_w&IjL02Wi#fhBmS>iMRR((mQ4E88DIkP3N`k^g0-LhbF_$BS;NddCP<;)dOBO$a zbabi*ihE4kxX_T4f=z{H?7&Wa027aZygAxXgH9J=mCnns!3*B`HrRIO;O}35(+?13 zA){rtK$I=BEG)|kSe6d<_J+_>2C1?aMBLwC*Z&VHviz}M2Da$`Qe0+pqr#dx>ntNY zVE;6g`d=TC9@V{S^c2jj_9JVkz}DDR1Sm3XZZf2IsBiyW2dEwo*mbC{_7Lr@+_S*<&Yh;EKgwh`u+-#8X9&{!Hzziu8 za=#BiU;~7z6^%v%e8N_g1HRAt%U^a#@P&?v$c)mSsc?1g|C*OdM46UN8*0>n5EHs+ zH=&guK+0+fcLT8l_1poOTdAY9YzML;Xp238D>CE;1+t&Nf35@sJ`6DRd|KWz^(c65 zWXYYWPx&8SQ2ZdNhTU5I8XiVX>X|T&gcG!fwrq-kKwZ-J|F`k_PYOd+V|AB931rnT zvGLb{%W%!O{enEbpbTg%x|ppC(Qq-y`;j~x=oHl!F+kh^D(9z%Q!sm>3R7|4e-X(W5|0g1_D{%b0rtkgkVPiG1P{-LtgNj63%u$6UzFt5 z;|+>v`-6av?$u9BtxujXjrA#ikoubut+V*g`7C{@rvOlBY4Cks5|(y=PzkpTCQ(+l zwi9nyW06l3Xm&)wz$>XZ5ZtbSqZdF_lL1#}3k1No6pAi3)Ud!xWY)E{wU>+TJ$t4b zw1AKoK-#`oxs#gFnZ~@AxY(^R{pi$#7|bGPD$kNZ)&I3yGmTO&Q-x&%9Ic-Qyg{98 z9^*d=AZ5`jw76d;rrZwA1!32nti@d*N)!LoG( zRXd@kz>M;Uc&Z{X@=EyAr8DGl4ON6ebi#~{jqJaP%vA#jj_GnW9jc}3sr%CgwF7we zPi)eHN?50_n(Y$o(g95x7mmIg(q=r1|?YG17qR z$k&`D3tF(=brzSeuI}@Y491?hWY*}I)~8?SE^R4vm2!Qms`B96s$G3sN3BKh(#)(^ zru=Xey`&<+9`t#0=_-db3?Tx^F7YZd(P&`hZp|?$x(nt7R;&etV*uOzA zkxdf`lA-$C>v4%-#Dyz*h&ATTU{=3{n*$Z=FRIP2H$1s1uwB1VkIKKs&^^6Byg(KR z5Bhpd{JHITR+@GmKOBEtq3E3XU>n)6vm0NYbuB$qMP`U(L0VVgVI9*8z6U~pgMA^( z^ziGn+&ogPkLdJnpwCewQqg{Ic)Y8y73V)$`TEpWT;QZ*uN}v?=@mgeUg>XGIuD6f z;QL438w`g3!7a14zbzHM@xpej??VnoWUT}rVDol8VUJF}Yiaz5HaGhYu5Dg^^CiYk z65Ne#FNo^Xzcw8;19x7o2Q5f3Ho`lWt!M2|j~i*~>L%AI_oLGDbs*bQFg+++cFp@( zlFA3J1AKYv#pmYZXKE+uaz4c~O1Te8qY9%zg8d>%lDJ{aONm=SSMPdc*-@87JwAze zH2wmtHaq$s7mrrlX3}o$);0*2Yz}EfJI4EwQee4$dvWPvrsGpb=ll2>z-t(Ltxcki z$2m)Vi@bU&UvIGyhPFTmD0a&p8=b~hX&tqhaF-4N%*GN*VWms$SH$2^!dxa{Th8ga zm5ozdBWiyZ)OzjCJ_x*ne<910mBNxp`OfX1P3Jc1YDJb=8bxmXi_pVgy@8VnY#V@bGgtm1^2Y#?&tEI zBKBvcQ5z;GecL-B!$RS9iQO`cQSAkvG`EI`kv=(Daa-+WW}C@zy8#39eakd9&@tzGaZ_N9N2mYc0B;+)w2;wkn1lT#+aiDPg4 z2JaBnh>A-*&nft#vzi#55%K|U_Ux|sw};J`>*2c+@Gymd7(?% zQ$km45PC8QHya2RIar^$D!tv(p5^b?3>SBMm~P|^KZVqqggoxan6X-Ci_R)-ZgL!P zp_KSgSD$QOwPb@As2|6LbjM`wlvG?I7Hosu@ljC!AJWyKf7#v#_rH&&W|fYw=d zGS-M1Z>IXGh?QipTVpqclnvWy+s}t%v$cm3DQXMTXANPI5O7U;NBR3C&pbOB+PfsL z=bL_OH=ywh<1SD^KA*iaj6eE_ITw+ep26?@5E5o}0tU_K;ytf>^-$Wyu}j<}lfiUSrh|rnHx+ zJ!`n%DrYmT@os33ONGUY&}D$dRB?`iJE-kJO-Jobw3-xjs66Pk$|?=89CYX1ra#5I z(wgcs3=&228{C&&;s)~;VKEUh8q>O-nou|pxC=XH0c6nf^Lu*yo>}}k-sUBMWA5bj zO4xhG`R2=)_P&=bO<{cI?>MnE9z7I(dArC=`Tt-=kaO0;nQ!x0TT(yfjobcmy8|Uo zcP`5;eFa9_!QCt~)V>eCG~WE?own^qIIS;%Tb9%vDm;E7eFX*e0G+q`_1~;Wq64!o zPmEnNEU90$w^hl0rG7utQIfl`#8oc%v4NtOB=~mu|7 z8p0U3A#wm)jt+t!A>di&KOrnGeGEbXYQW2V^gIM_ zc>c%703i?XG4PgS~RInT3F6nXfyTvBz-E%!s zQ)bd%XiH&$fTrZiG$M6xpG%%ie+u|D0Hb4__-cY+?oOcv6up?e7(?SeJj)E9 zqEjh@wHm4}7<5`#c5z;jWoaQ*a0(>rx4b!UyYlX}OKZ04GVZ`xBiz!VD}N_^;*$M` zn1|XraCJ#JEm=hBfw=)W3MRFnnVOFsA=H|Mg+h=Z+MR$XPLB`ldPTZK-yQyOUt!0j z5cSOU&rf#!%QDXlCH;27rybdDy3&T~p&9k!W2Sr7LgXtqB$x8iH9l@V^4Ft=bmZ<^ zm-Y}aB$D;2^`h12yJ6!4B`!QGkn|`*g2?-51O$W7CO@HQ1aE6exIY!#8o3}i2vkaJ z3Nf!;QQ?tKW{&_T8#MYnepW62n8|-D`f1mZ;?GayFpA|s+*nhc*;{bX!uJx#>%&CaVR)1dH%nOplNn0cwH z?;JA<^7GKl&b}4(^v@{vo1tuf zA1UtW{hBmp;ubQ!`mnuzFE&+gF&F_$%P?~iEmB7Jor_S%8%1t{o}&fSn4s4C46TY2 z%zkqC#U_r~5kbKro_E;4o=i*y7{BTixuPS%=J9K}+U$)ALXLjh0FMAImftpfY#tDY z7r$$`{9?!3F%)H^YFvc!judFpp#KQ~yz>vt(n7uW0YNZHf(Ar{l!qh|CxUq3Gdv3L zi4jGhR*;LYUgfGClzYHGnyBMx(e8+^)$Fc+mhTR4S&B>cXU=5BI>(?}fO37t2M*Tc z``4sQ@9PM7;=eWddb$x|>-(Uh1)~tuxPl%+E7X?e1He)=3nZ>VLq!3e#((^n^C_UE z+Yvj)y@$yH*D8v(4W0U?g?zFLf1#ewQVw5_zFO&wxW9ee)A`0!>c-%;xyy&oVKv!5 z@+W)PAU5N5u2>Pi3>%qqf=m#mRw#f>jTG8756m2FZP9=}*Oe-K==BMobTEoX#(SO& zFwZ}IY&rh}5frN_Yy`QWa+v%%H4hImR?ek0cfB+J+<-~wFZq)1wP@*9V(EyG$19Ln zJ9?2o%disI*vR$=5z1j3!eq=KB&P*(N_hBDBGejg)jXp+BTuerx-?*#_*Q2Nbbl$G zChu+>1;noCQut@JynQ;@Au|=ZWCb=M1ao0**Gs-*=B1y%e$Ate?%jQ6qi2dZ!-n92WOzmrhjlz#$MFi!sMi8FxqqL{A1hu27QXBne+`eYFwd9f{FaeU_MkD;1Zo$rvao7LIT%(z@9Hm@xj)6vk=ClBp+On2JN- zpnzo@2<{dB$nOPcuU|E{VFd652xif9f0(BB`vhekbOYe-H+z;PCiR={9M`XDHNA`r z*s;5|fI$SA0sbv^$2Zq_TZD2+r2ZTGwE}aCRuz6WT|aowzjN;2J1FCq&QoxPH;u+4 z0K60q6j&p*LSU_;Q5g}prvXE;xvcovF5L!$)I+$SJ2DzyAiK3rr+l$x5pYevTK%lW z$w!n+l5OJ>Gs3j)w))1Tk6Ca0qDs)t$B2B);g#tdY!lQBuC&7;Dj_*&R06WsFkUp>6?E<9@K3q>(G$zhx=c<+K^C2zi~2Dp=fJ?t2CYfpAP zvkQIhAQ|u}+T+q@p^g1lm z|4)VqhVn0}x|&Cqd}MPF;~RX{hlhtFq>B9s!SwH6tO$4(@LGZ|?al7)kWf<-e*wwU z%yzCBNNyYPtO?$vNzv88L8ks=8w>J?fdW^ZjJ)p;Kf=NNJ4;J?Q2XOYf8k(w<8V$` zFAa*o%nBtybvn{r01^OXo$dY?zmucdI!#1ov_ieRo-6k5;k6Vg>bI(meMPdebXU5g z8zQ~K(T;A~*15pTRaKB4Ur3vtiql@MfkBE(^|MPo^{$@*6@+jg)1qk{2#o=Uuw*Du zUl9N|8*nLl#JOyU0e)^inNsT+wH^)CbpqisnmI{={JoctUm~{RPwCGSUa%C zJP1M`lr`3|`lRJBC~2FPAMQG#CR|8L3YUvzy;N%iU#x$ z-By^g4gJkEtvyKlAFmrL+m;9j#ejA(L))L@)?BML zry~)%n_jh9=7d}e%KQJ0%&WDBuv{jYNEkC8gXj)=WHrbGg+~ty+*Iq#3R(~Tf)5r* z`>8>>2rQ=BKJ{ElD3!JKsXuJUr$h-Y=z?%SQ6NucPc=Icn#>Ini~0r}ibuIyA1%f0 zU;i|?MUMl7FeUn!T>mLrF0YsAjtjIx=86Y+$hic09XdtN3y zKW@!lR){u1N=;iCSU}Y0{0jK5L%3e$ENAzvk*(HZ5rnX&h&AjL87|Wi-jQWySJ8fB zxEc#l(l%g`H50S7QUJpQXFBoleP53v#m29yD{_-6#vFjAOwyzF2c4ODEp&MVSyTw_J3zxo+GD!BX?g3vPn1_4W}12m--wT)>t;ZSX3KB&HE7E7Q}K9p|FpC)uM@ zNy|%eHvTeHOzL?v;)D)yPXVdWEQZL0O94r?K0mkahjfq3FK#t|pz}5TB#&wQ6v^>9 zFs&1UG2zFHLr!Xsx{m5sw<98OT`VKv`mn5S z>el1N0coJp6ep!W)(bK+5fL#}uS?XQqay36v#n zpoKZ3OK>Q~n<=o*%0F+kq|0^rSK~I19(ZBIl>GMKlBl1MD$g~=e*eWk8pJDE_A?jD zHA{T^=IwLsJPM8@YYY<O;{-j)ekTc34sFm2fP zFL;u#cFL}wz`C!wO@%fgqbANfM(_rm;DX>hjXN%rkx*RQlg%xD!}Ot+G&`5xh_H!@ z={(I@dkb@C-7_+Q>!078>ZT1~dUWghaWJy_g9GVc+2a^opBP@2=KJ|Qe1u38B6GYEjhcen5H`KLBpE{TbEl+)*-PIY+KIG0lsb#POhb{_#R&k&S_2?bCqmb zjk?QTXI#i5o692`&xwkQvA?11D}^uimVt4X#k0wxOR~jA^(tGTcaKq){ye5(`asPB zTw+M>K+xe{VU}uJ*ytsmJtZGZ&dt>E^SbnvZR%aH=`^1`t z*6rZXnQKxrE)8tLt-rR%HzR1dW}I9noz_G?P!w%xaMF>i`4b!k($#Wv?T}jc2d3yc zx^4w*u_ui5C43nqDYRD^4ZGE(p1l}fVfLx1_417T?-C~>?Y8$!z2DT6na+-4lmWbs z^qi@i3kPj5*Wy=rM@)bnWJuP6gKOS2uO0__>m;^}mg+>US&XaKPHe7D?43*EP&OQ^ zb30mZDr6X)R+(ApA5^aIr1`^!8K<|h5!!OQtWb}7(#6eSCgSCO(96=@v5V}>DQ&y4 zOjG{d*P5n^$}o0_8XDF=1kJpBSLz8Cvi7Dyo|3)l&3})A5|6mVE|So4(V8jbo(YM* z>FAf*5QQrp&>_~J`N}=Hj)zEG5<4?9bdF0iFdQheBJUY?#&yLD0E zZ*M${?omN*L)#-g3;Z}etNL2vo&AuJqP*usZd`t2pKs=$`co0-M|U|MbC2S&Ruz&} z0tivOQcZ~MWD0Mr**?9lV3Lz-f8v4J&8n0opUi~auIV27rK=SMF|X|zf9dt-EHlPR_b`ZMph~(3z?SJpFOYt3k`F);#+Lyt)I9riVFCTQxY&j9-~}c$DaMd^b7$ z3FYB*Dj@pN zfE)oC;&6qte<*~pJ->d=`)uk~{VCMjzkmAJV6(QMs(;Kvc=@f3`EXy|44+!jbH)&l zp}N6MwsUpN`PI={Jq4231NHZ$cz@~+8|GUZ_23N-l-&z@-Z3S?WSXmS%C@gXpr{MW zVZ=3XIpzA1Uz>adfz@Ob_7NIjE#}90<<){~Tm^}z-gVXT@HAI$7wjH`&#Abp>owYn z%6}am_xK#Fx9Il9g?c<}*W$ms>F5^yQg|~>Ki_l6q}C$$Nv=kgCTDM2w^tkGv6F9? z#$>NY*=Q-8ZSDrAp7(v7v2SSJyvwKLLF=zDi*-JJW_p^}dR*3`boC|-M`;1iOj#J3 z@l7>-<5&+YaIpG7xG6&G+DTf##a~e%x*|~aR&KZDuj8Vd8vQQ~E!BO#^&NvLk|Zu= zmfgy+#fY=yOvMQ@;$@MwABFWF3$JNV^9z3~z2u@&LXkXlYxBcIIA_4X;)z|*oj5Mx z(|gI8Px2~wK5XCVlDfJhW%J!5prq*Av0PJ0nIW^b#Ludu=7am*<%jA{22)Ob_CLF| zSbv#dmMr>JZlqlA(qljWfq_0n)T~<{yQk*m(!K2h3OAo2RV6D|*$-w?EXG_KOT7hD zc?qNCTZsYYs)U06xTykJO=s66%J18XJtX|IEI=kw?uqvo#9l|49e@55Efx#}Xx8gz zkFK6OX1w#9l0&Uhk-O|C6jgl?Wg$rufh1{DSBS_P8eeOWI0lD>)yYw5|Aev4azMkp zv!l+$bQYt;!#n;|igkdK>#)?5WnJ99Uf)sTx09m|BcZ0`ZIlpxT<4{eIEL!d@?9FX z_b^f@4QA%LH;WQv=-z+E5Gelq5jihtJ(3Of+6vRCs^Wh- zW*F0?Dz%4tiu7Av#=;lbjlJ(Jnb{VT$f!2a zqFb_4s|kMGwiS)*{;b@xiaweM^ydk1}ZX-rp zE8uO!fV6BY1153%#KZ))Q%OkJ$B#eJRGPlrAL*2 zy9OqjY$~g}D(5DK6Tfe_Nc3e1*Z#^WqqHfV?tc|?E=K%P zf3yV7Y2Pew%RPFuZ#c%W5MtMlwQ+FZipm}!_cr|cbya|75HJz!y#<&`gK_`Jb;oRG3mANRJKU-t>sF#vYjR`6cKrlx?ue`lptY#Fwt-QXI`c^<`dhN+_9hJrXdj2~dWNG^=WSps!{ zr=%h*_Z8>S%;jdYg#k)>NyZK=(Lv%>s!-j5R$iUBsy5pDXa}wz4J(fA_uT8!A~I&I zuiV=FMoy+x{vPJO1lf?aV+>8Kw*Q-G=m(~oa&keDk#D<9F2iIqIMRZmOalsFfaN`4 zzJ|D~8XB@d@x2Ch@?|ue9Pr>bU|kMg4&vK`w)_@@y%jAQW@ctVWq;&kzYq~&OBNB? zXYdDd3K~0KW*)}fB(M5RHk_lN-1e+)ZBa=xtT{ub_-I}Ge2@NJeY)^y#`iTz62VZ} ze$RpJ53r;p5Psreqz3XrgvS2))UMkw02Um~c3dUV2u2j2L4bw^0{XtTcg+5rf9n9+UYGQ&9hMfO#=LUL%jjQ>YY& zgjssvV*8e=Uy9~1r#L$lBf%~E5v4C^r4E9;>WxUkACp>JX%FlXnK8=WNN=~XK$a#C z&%t4ltQ_S7N+`&{lK~j1*DBOVN8^u>*099E(xP~ZvQ_H%K);CrIuJv*wR_M|U+_0^ zf}cA(!%R$%g4+l^T6lZHEwxX@_{y{eP3Nm@ONWn*ws+3wC^WSiP5+`-(vEBnbMp;% z$$yhsMNB{bcc_#^x}QoN(8qwY?HF!DqP9)prWwUa4NjRonv=bTx!1tv4P5Q3Ks(V4 z@D=t+)Fu+r0kTOT~=U|2u$+s+M=2{>p50UEk+g6Q4fnOi-ac z)aZG3S6_Ul=l3=mBXOIu9Yf;MC2zQICKn&E7!_VVzCgACu*!h9PzQ(zb|0YAY^m6q zqv1WHg|nlL*V=I9Z$NMuNs&2%JB9lcd+)TWF9{3ytZH71;8=LKR-l9!khniTnhQ~%uFu8#@7Xb-v8^Wxma4X zu{*sRCi5@>l~&^Og=cy04x$gjEGGQBO5nRNMsqZG<^9$0?EEIG_qwvLyab+~9~f}s z{RPtaQ*aOe^7ZSzGH1I=I<}EqBT}&1;L3^VhsIY!Q&S=olrPNy&N|1+O00UvM>};a za=XQrfJ$b3$m)pV%*d_2-lhvZi&G`}B@sFN5upPl+?d99*+0&cPZT97OMLegZpwa* znumgf#-77KjY6|A!0QB=)nI_k2x->PY$hCh{0{K3h1oVj;K3hItDk@>IP}eQYq2nGN`=g;6m6oCDb+-3(=&2{C^ zhR4?LwKP9p$ZumWOo~w+)M2#kN#@z@F`zW>pOMlnbmqYx=$ZLZu=(b6Pj?L5<5^+Y z2;df@a{vBW`~9&2Q92UT$AjTI$bYr5r3EZg+KX+CkhH>SW|mZrEXZ?pcXTo1Lr%AN zfm*^XMuk$s*4Z?8g<0&JzOzk*d~WKz-)NjXcg^Ra;s}J9NrVm`NIvYuK~~lnNRTZ^M44WB>VPb>j<|?|H2ilG*v%E{z64lH}bp^ zViUT37xlMmyF`>`gH0d5Ha2U=dA0Gh^_;e~`EPq4HhlO49A|iQZ(IMCbK94I2hCh` zqx@UHhbodIWF5U8K25RSD%%k~KO{)PB?(o^$ehLZc_Hhqg{*g=ZVOxFXr2DNEboL6y(0rqvlW) ztwa~4QAk_ZL&uc~IbfvVCiO>Z`NR~HsX-*B`u0{u({pLlxv$9j5UlC7~fYHmNM!y;9>?vE$Rm0;e>z@Cmd--yaI2BN8YT6}=HyiS4?(7_tv3B>8|4># zXhnZ^2$thfmK7t`FU7;_*c;G=BZJ`?Pz}z)8oOZ0$#h?2*EMUGm5+8mNyN9UN3?I-(x_G#y&{sQI-Hss05Roa(#pzRa*@%|h?> z^d`+g-o;Qh#lmaH%Vp=1uDu~~xE8NXhuznY)4{i1)s zeZyRbQBZ141c)t-f?kao2zIEj7Acv0R%zMMQt5hJ=M_S!usdNDvC?bvyuYs!it3za zQs`XwmAb#O(>%6z{vt_+xn|t_K)tmJr#T-Yy@$%fivr@U`@yQbMa7{#SJ5a|M)CA{ zp!&+^n|uu#bvsWw8G9~UTj*6v!vDa zmi{lwk}Q(Ir!$4#xsLq;>8pc-gKyZ|3o^QGkipNrZ~q~m-?RkVUh5JT$fow4I=k!@ zhRy%J>>Si|q9X4bqte7nT`_kV7%}v?#16UAnl|59N9(edMr)!OnKsVwyuo2>WET9m z)zzo{d;!gbNpHQ2m>!JRWxwb*G`$*0mg$!KyOrXUQH#;b%h*4NL+tGN$TU$gIAUhZ zzYB^j{)o+z4ovpZ#OEoN5Px7*g;M}-hg5tKod>JS{dj9dAv@K279 zukXHHy!j1Ij($TGSS{Z_!7BwqLY;m#4_Ny`OPRbu)8Fr-RE_7CUBf?x6^59&F0OKd zueKaKB~rAa9^adsbjearH<{IICr{E88jLZE1Anm0VTA;xxnb;PqNhjGym3W!^qWl zyN4xTeI;{=Xz{mN6^il-*RVvrz|dlQeX(%F259Iom{A6-%CMbW$bvdpr3l@Rd$n}k zC;7J)6~_j3rh9^jekZ^(sVqq@RN>Eb@Q&3v3B!?k7Q_@i0|&Z@2=c>%6!S zb&=`5J$@mD!P8^L$)t!Gx`cLzLQ z|G}l7(q@|%Odi{kF_>`+Dv**EJm74w7LDRY+oT)PC`psfD<0F@H9W|t5e(x*EhKRN zGTpauKc=RvFm%_RpyRt@DR9awHQXpwg|>#LH^hC?cxS9lti$GZno+KXUm#I}^diss zZj!Hh@E@HdU*W$o$;5aTcAJNL6x18~Acc6EZc=qX+;^Hp$}^OrjN)ex1NzW&4W z>tHb6o*Ke{@Zwkeo+=fw?H8t}tduCXU497soJye^0W=(56@pzM^Tcujo@J%NfDX_5 zLzxDVQ_^HsW5l=-eWo?Ue6Z+PYk7$7|5-nnuNI11F?ReNwBjTG&L@r|yrwQjo7T#5 z`=;EM^kXZe$FXsRH-(hmnEypYgyMel8|)iB#kN}{PJ?HrNu0?>B5;2onFS9?; zRA|ncV#V)@-c2&4>u}VqKf@e3w`qt2PubI5qnBuA{1E<(|KQ;(;g(~K)8y69HIUw3 zp%2BpF3V;PC+sXv@ap}?}%u@v4 zE?hE(^Rg(n<%w&RVwUY?tzA4##c74z)=B%Hp(52E57AVs*fA53jGY@ewW{FRJl~8`Eu$d2{&|kxsmi6~e z%hwRfSGFk4PFoueXS=+lI!&5Gr6fa|sQ2&+vB~S-S8gYcp%wf{8yZCA0AlU{FUDyy zNZv{`X3)BxzIQQsEYOaO&0~pfXYc6qOu5q;JSE(yOP*~aCZdN@brEfBM8Eb+i7HA> zETZhsd)dHtiVCVvN=*7eM{ESA8gtYYHofa3G5T50(0+?uS8OW$HS{*)|@UrpCi9p6C3wp~AdkXZ|V8=|2D{~u+0Y*W>&0S!oCjzI2Q9_xA zOnt(1PYfgfHaR0t{sm#~Pd<{CLbGYTV6QUzgsRA;VtabN){9J58**o;a^8l zArLj@EmiRfEXTv*svh&0XmfURYtAx(i&*atdPAR`-as&NVG|Q@k;66-AnS-V#Kezz4aPB^crc~ z$1M1Sxu2UBgkg{Rmyq@ZmSaS2^A##+?!5I7byHb>Oc1;23IB4h$KRdYc9ZM&wUqkg zHu!r5FQ3Hd7B^9YzYtx#*Pr<`H|1IhJ)@;|Qf^r%qF#wmdFN{hY>n1;CBV&4F;NpiUlJLOSJsR}o z_EWT3Mx0t(bg}{h+dNV*q-Sh6@Yvwtvdd0Srya*&QJ}y;hvVvZ7wXN`xOdAVZ`n0E zFJArsihJ{DEZ4AYoMtJdBqE9?Wr~a$nllt7DJAm|G7p(4B~i+(Otq0jB$;Q5GDkw@ zIrEew&#wriI0*Nyj*E+<0#No9^?YL9oDm8Q#fGTUJ=P)9e4-iKR21&orr(`z zOl>dKvnoh0_KrBD&y%Uel+a@P)U08&FUWh#%+eEvJPPM0S7&q!t)1-IshVch)${YF zWpj3sv&h#AxL;PoP;Xppgf;~IGg{#zpmH1K2W9DxG!TL+Xv-1+NxsG9{HcRarStEm zM!J1kzqpSfG-M>hr|J2Kr_j&U*Hi2hj-!lx@P~2lMDy<>2yzlF@igeV*IN0UA7x~_ z6?^Rh)gQI(Qg5n3Em?sI2UC-1(?TTxUe;_>aNW`btk%=pdl=5=e*XTmpbD>O zT}4k5o!~rP>E73{eWSaBiQ=e~&t}2vM_H@7)ZYn(e$`u*A`(5(lAixwkY_DbY-^d` z`KT{de*~g0tw^eBwX5KHU@eE-uk)jWr3sOPqK`X<4dMzg|6Hi>MmMmEmR%;mLE+FN z!e7v^t^OCu%EaJ(CIpHJqD8|{%pV(VFecdW;I`whZ=<)rsoaVaW4!yLt@>o$QpW@7 zO0Hp{li`4vB3O==5CTB~X)^Gm2akNM$bDqPKmhqkm*q`HAM&)7=@4tkh&H5!NL7hj zp6|_r!=w-?fMwE9SXLpV9oYVfE z)jG32?W8fowi1)@tv?;LZrt@*pLK(d<+iO!L2fIHukX;SYfo1`sg6Y&i`T(G+9_hnDi!%8 z%|uav^MfZtb-0QCIlh4zyJ@QMP!D}yf3)n!&Ifj8Um5cjokb${y z?$%RpcyFgMEA!dW*e2dP)b5t#QPdY%F?7RYyEKQ3b*>a&Va4v!q_w*Jjx*N1bW$^V zhh*p9Q=y}~5OsQ|=}@g(pUsoVCw~Qg&tdga!)BQCKm2196zIshs}Uaa%2J?Icf>p6f=zO6v#3 z%h)q|J+r^+6#whdnPzgG9G&0+uU!1eftB=0uv+3$j~X=fQF|2|Iw#6xzHjW9Odh`$ zU)(>Jv&viJLGM_|>B$s24Pa)YHf*d~L5DU}6|&xuJ}64Dx*|Tt^P{LCf23$csNLM# zdqZcCcU9+)v}qcf)eBb$?%*jCXi@8W-gUbE3Z$AXn)HIVIZC3*fkJpJnuT#4RF%5{ zMf_rK9$C*8?I)NsJ~EDk$dPGI(W4}Hm!tHV%8~;0scW(GyC+ONP1g&W9=J9Z|B_$x zpw#uvQh#XS=I%nj>q-ijI4SdNMudRDdc#?T0du?!s)5`RSw zeT`ZRr7%n&OVP-5D+IQhLP+6NPUvoJ@uuNHy-pkVYLmph?F;^5l8c{(etMVeRF@yM zWl!ARJUG7NSJxnq)BVM_9s;_xmQ+ZZb3;qbuLrOB@J2(D-(>kbr|)2h&DfsE#dpN4 zEoc3<%`o!zg_a7$y?!<(bAlnXkC!*`<0R?T_4dv_focP_d3I~YlMO~?FWxOY{O(_d zcDJTr%T-37Pd^1#MAkp74<3{LeT}trfjjV7XK_}?X>Qr6@r_Gst_Ra!?HIRObMDjo z@xbD0v48lsyQEzWJ3Xl9kg!TaWc56s&XYiLd{{w10CK;@L(_i#R(34=(Di}lFY-t6m@})3;F$Xv zAO^%rhud;P4I=D4PFM^^T&oBp)PzwS4i%Zz;|O?yMiS2dDnVKN4D%|(`(%}Q%EY}-Ny&to zn;3kBAR)3}jS{``7e>~nVCOu6xmAmX8@K7VY*Bi-;#X8mObk7=;4nff-{P2?LH`Tf zz!XfwY+dThYBh!WWpPZfx>s?hXiQppQ>FA>sl~bC>%r7VhVLBK9Q^Y5MeNc;=f3k- z>Go+gx{v>6_O>Xr(R<^d+y0?>g!@NMh~W3`YeL!Md_!3)erJ-b-G^`76@9RKIU0hVq-F7 zReqZ0WBoTJI&$LqPFmJdXVn|^VmR9X^0<(Jy_1l@4P|@9DeRj z%;+K}w@s2l{ylq~%Fgv&)d`j~R%U*ri*eUn%`BHtx-}U~x|B7DWxO&o=c1K*aE`XV|~!Wj6tWRxAjQeK_lUNXMzj7WKj%FscF5F)O^DncAJ@6 zc>9K>6)vH4t}ZkW(oU+a6%^GN-!iO5b0yDI!I)32xJ_^7aH0MG44NHpU5Bsv{F6$Q zjm(ZGtu^=zj#cSp;vn84mA;EH?VGGgE(q5HDdR<-7+M&~( zes8&*bM{BJ)%((F7ZcJo-CkExIMT;mKsNehhtT@?`Z6e7$^aDv9^mUWRznx!BH0sN zxO1u^wX=$|?VTm-bYK0~Qq47Ol@vpl@W|B^rcFs{kQ*-(Stvx}zXuTh<;ml=Y!xrt z@7u^$b4hh-gK;@5vB{+BVS=UO#pUo6#)HoT&4nelF0MoEO+-%&fA^Zta`&G;f0{ND z(qpo}Li@g%$2Q47^%o=>&UdD+J{po`&4QK_wdV#KlJg*y{j_EP$^DzvhT#Mj^prWx zSRDh zNPM=D>=;pd$=rtzY?x|m-x2ren5`Xye>4Z0YYY#Fo>ZJkQ?+MvX}9pa{AI0bxYOp~ z*0Mtsr>DDirFXk8-0q|s&D9q;E??3)dL&GUWz)n!Yxqrd5!T>E^YnO3{ffEmu1?&W%KCpKhNsO0Z?lfGn{w-^p_au0ZuJ=LvZwvKvqN-9ykKgd`6?fY1 zH+AB9Ybv!XzfKGtRhE9K?;%>rKJ;9xkI|^AAW@gBO+bZvNXw_Fb2>>?Rh4v*OW*2{ z4xLxE1dq~g))xKIi%ZQ%GWu2u`p;#&dXU5(sOmhlbW6qL5YtXYh3+KD0ahVVpE~^* z`&RR7rS>oHpV=yY_MX+>m!*`6J?PVm(HlyDPdk(BHDRiZ`oOwbMd<&BE;7glB7- z8J{)m`0e)mafg4XS6kGsl)3n+bivsM6V?hTTb7hY&t1IS>posDy&P+J>uYK^!38)7 zgy-+)2XNE3E`C=hDGe_1YfJPioj+{p?5JYaRINTj?f z@Tx<;m)`f454Pq_JJ7Y~q4#xS{oO4R%Ta zK(DoL%IS#Ck8OrbX?O3L6`HAOs^4r}sS!Ib`{TW@{B3D(v*6V-e;UfCXY2;vyj&Qc zmwNu4W`Ak_xPb~KzwbxtF|s;NDYO}GtO4i099k)#tE&1)Z#MkN7A_V=wuJD9I$v0> zR%f#!txZaw(5M<%JUGTDi;VS}l4K86AzkRR%elLrU9S`G6dh4XR7`OBcs|@*KJafeN@5a*;cxca=|%4*NYx`K|THj80m5t!iBKmWN-*Ot|IS#`imwGiQCTsLh+* z5A24sX!PPT0?ol;K)R<8{tLvI&Wh~Tstk+n#&k16-1`)i+v&jqXIq%@ta?wQv<1xfL#?S_*}?K3 zYc1tlBa=8EWlxBn$b4fXDK)lPv9595q@=pQkJjpwf(=uP>|6`seud@EV}Nt$qq!l3 zgO{ZTY1s7!Yfv$XTmQ5u|NiE(cTLR&M8|F9W4hPf;pMF8aGFx&&%1A*r?shNCwoU{ zoU5fmpEhkIxl+|Ic1nr}Dq|t#=o;xRKF82*X986{vS` z%rf)y=g;eOw?@B_ZNKk0w1xGoo=)|MWJD87ZDx1Z8nELTRpMhB2X`w>oqb`Rn<+SM zNR|U$WqV$HmAQHIKvPqbU2a=o<&!H9Y=S7MT1+2HN{Ihdhq!Eu^j-M!qsQ_MYj2G55|w&&bi>^|Po*5YN=_qdO( zU%t)FHBY`VcPjatvDE_0aE_3#D$J|v>gC0y0!I4$1Dl`C4XF&C@t7Q)-n2#eU2Nv!W7^6S8yy)E_BRCB{0$m8 zD;QtQJ4rzS>KF?$Q7;0K-i z2{?u6B;RBoJXjiTvZk9>{ciZ7%3bsEFZ+HQ98>eYBk^wbH-|TOP+L9Kl}aQ2#kqg} z%o)9Y_Uy*-f3pYeSk`4VduU(lFrD$8A}-m`Xw?S_1r@LXWp+*jmb0E>mg@XB!Y$5M zPHl8NR{HX)0p0F~9Vbj%eNG)2c^Gs2ikhzlnih8@E4^1u(`s;iu1M^qUp^<4#KM)_ zmhi>-jlXkK?GQ5hi0NB-EEFO;23aaCs1`Y4)^G!}z&+>~t%)wQv>i9SapKGz^$A|r zjs3=zi~Y{d>iYe2z4uaGJ$E$h-=cf;4XrJ+#Y_rkBdeIkjMw+SzD>AGa@2De`R87oRmtb!G>Cdh7Bm#3*6E^9JHo6S6XG!_|aT50}9Ly%sd4%V!5ttY?n znPkm1z-k=43_mImvhFz`ejzx58DqP?*8u|US zQEwGvjrHH<<|`l?L;ou$M*{a+E=S#thmUVenT@lz!n|Me#t}vh7dn4NvroZhY)3zr zihl6l*Y>!FNulqodTGg>{P8EHuWCB)PQAOC^>-=AJX?K>-8wGJX}0O=_Gis8nYM{N zd2QY1RXNnclzXdx+SM;6L?rv)bCo#P@H(rs?O0v?awd4H>=boYK$$UZQR~EUwbjir zV(m8#ku$jG_-xX)S!317(_NnWczHE&1Qv3gw^%D9D_f|a5;%kVD#xRfJ|ub9>i|fE zLqg568i(C0K8>A>4E9nVTRdpS-E!Q)Cp+vKp*eAIC}oCOTHj(iv+Kw1ZKrV9K4ILS zZ1Q!3=^y8Fl^%~@gpAbQ)t4-&0bANeyeux}-;XTkTKyAc<_6`X(P1N%pS#^P{6cbc zS(@za7dpaDoE$QJB&DG#rk(vEtSxDa#1M7k!q&vS-QzZFbi!{B{XMb8F0ZeN=eV6F zt$N+aftPnOWU?u}V(C!mhA6tnQ5-Uof9R`d+V1mk?}*aO{(#FSO`Hu#8qUidxb18XWe{k2lTWigH z=QL;6-+R`JkJt;MlvB2?h*@QG1%lDHlxP7C>$FV0A44W|+jAy2UiddmD6L>-}<9=YB29 z!QDDj`f^6a#dEB0?BOFt31QA`PNu94(yWizow@(Lg(ohxRG+|hZo8V{j+MnIHa035 zB(mLf|J(W-qE!PIhHVY0L{G>`L%a|#UqJ=xIn@IajfFM5)9WIpkNHmds1|3wm0yYD zp!b`=@-NYgsjjc7J|S$zi-ooDrOnbKaeBX_6%X@+%Fyd==LxG0@3b{q=%>N%c)?Xs zS|3fQUoO#DMmzBBhc!?eAg|~Z7u^2hyBtc?2af&SE%b3z z2c6xP+9^xHR#jpTAZrXhj%wO1ReKTHzkk-l6exZy62dw#vNk}kBLOZ5U`E!0wWxII zQhB!F-+Rb$T6vJl8U4Q&5Q9mCfJ2&qR?#Y$QO-aNrR=}_-VZ8CY3WE}FPdVYND%{K zikckm=dxmH=l=c7tAV!czd+IHoOXEVHOBRC?pNw_?%f;nc{U(`0e&4`Z>ON$d>CpD%ijY!ul?`K zc_He(a4%xaHXBtv9n2oM=pJ-`)NuQ3NCqbY=l&anNhUik8?LmDt&_!+LdenEytv%e z1^rV*JOC~<*KX|$F27&iE8dp_NOeHL=Ag}Kh5i527gd~2#TD+*-K`)95voI8EVa!*N*FoP$k2YW(C}c!@5^bu< zTR%SQ1XCt&$haeJD7-f9oSTNuZRGfzjc=Xo|M0ozi}{h??}9yF{Pf4$LP_5^f5w&> zr7FR@=2F&KJ3G6;!KaqLF9261&sAVrarr_2^Tx}0O7~0^ZMdBbp2Lg(v{Ff=b(YKRHtfAE;S;+J+n&I^aZj-AfG=`^g1tr%Ws-Ht%Avf>W=uQWj6uEl5`gLjegs<(`doWVr<+i0EF~jqK5*xXUw-) zpk@R(p%i`2(o!H;!$l0M87=+^nA~(v*j4q&`^x=<ns0!1OB!5m3h zh@}vY0W!t?I6L5fH3b(R0mIro5iWBBlXkL74~b6>*wpVVd*xITt{#8FCYdq#CLSrb zG!+&(QxKm<6%dH?AmMq1U7-X_)+VS5+K#o2P_BNs=}UP&r$@!K zkPsSvk0Hw%tP>x7kopl!6$-Qd3ixK-r&O~KcJbjp&P$&gh~g3 zu1f+KKoa0)^iGCHvEX;xxCI=P8w+qTd0I^R8rGt$C6L$d;!A<{@=E?7ZdGk24=pEH z*m~3(31J?tn{Nt(56tMtoKaVAd*)27G*JKgVzC|kYH_-qz((QUA!2$91dr06KT_i- z&>>~=c-bDV2X6S46Tya=qPX`Y)rR-OU4~&ryj+hXZ@3nz%2o*-CsEPTv zX?nH{MPe*x3WB+ZYNh$WTn4XXQ4gb+ryU%!T%#}bDBI_%1LRatQDKB{o^pG@`c5oh z^8EN}Hyy{Eos-Gm9v|eH3*dVwqoATMfepTqI>pkauowv!5P+w@NBq*$ul zn=E`;Du!l0+N6=Dln1Y0zvgHdu|0~aZApM5whZUytww&{te+gj#wx_Qdcay~nT_#e zH1gAQdyz3mtTyysh%QP?iyHWHWX=O#vs}QBF180)3OUbN1GxAEGpHw-uW&Eb5eJFs z1xi*eU}Km_6od)qeN0TnX{_;FmX6>gD+Pu672UVbW$cYnqT|gv0qYWzfKbz{hZv~} zzMT^7)tu*ilsGE`o5HE_IwzE~s{x}{lm@8j?}Fmf%#58BJYes-yO$^H1wMF{vGxMW z=yy%GN_7vOla<|#UTfI3y_aujwI6z`QS$L4tN-Vk?g?UZG<}1{6ZR*Q7?K$W=j2k7 zx+G;53v_a7N>*O}kkZo6B~>*FPtHEegJBB$iPQc0P+<}Z$qpWjPkLh}mw63O6f7Y! zvXImi}JI~p7d#tI+^4kG?s`2w2j7>L8&t+oq|nFOFlzCUOx zQ{dhCK7D$j#{nN>F3Ng(t{-=$1oRUBpbm6<`fKs_~m zp6g1mnk5Z-L3(J6FM)*7EC97-N&{Mk^t$@`?_shq_$@A~J@k%Jo9f^}Vz0o$8(a<{ zUcU(xb~|t^J-}GiG{>{`r&sWxV!xW}keUp4sg?XCK0A#Q)N_9{rkR9lF?kSDQ52SW z365Pu0xXqWzH&nkR`X04JXvyFXs0YjTEJv%hkP)v5DdL^$($jRsJW(n?w+_kn1nQi zF>X$Sl41%O3(x>!Wl^ea?Uqz1Seh_T($()w+>g z2%^QQQxPHA42RIP38Wu^rSg7qJzvkV3qjz=Y|q}k%=wh1>f}LorfC1C76LNl@(-~X_~?_on?34#F^p$#V&1vaBNSOXKaEaHDA8muG;3gAF@MF|?_}1RQ(XLqd8S&)-e5KD1+)@Rk0!fPTy)@= zdgkiY5PCK?Q)Gf4abO*IaD_8a)?#rB|2~DVpM21$*Fig05?l(QM^B$_j?&0?DS;TX zcZ&gh@y?*L^ca&71W7Cbd29ocJP(b>%EIX_mfmPVo00E^6+u3>Yg5(6lL+fUQ7f&d z6}dLSMqLEYT8I1!W--sefA2At{A^OZMua#d3eU_wYF!3u0M+b%d;C9>V1|DUr9G9Y z)-PLykgSFZtjoWzZEj@8lb7%RF*W#Ka%jCBA$ z0+NQgdbhb~nR5R4V;@{AXjmW`EI8}q7M7~D1G9k}NMGSvcG4w9vJreOZx04L1&L!A z1VfM*5!TH{k(L6G5z3S)m~n}Mx)(ADdIpA57|{X0ct7^yK`Ry?1I!+7V3!KRU8mo= z)g7HzFsCrY>Xzp`FMv!l&8%Hrd$DM&+?O3e!_#veQDPuqCKz~6;zV!>Tjw$*eO)!c zpAsTBIQkSe3^`PLTQ`V31_W0CTyJFoG7oehNWO`w*9S;M+2l7m@=$*!Q~lty)=MXYF&IC40$Fy&)d%Z9`P>i z?!<3u!xHe*ch`ZK;1E#@4l3kjJ^%98P!rXli$_=-qgg{Ta&qEv4e2=U$nD1+v=C(_ z%xUgxA3PEDYHMpjM%g1M=#P}k9NEMc*ge6!=3es%??eRb9-mUq2f`)LFCwCf_&}<> z-SO<%GvW~%5D?&R1Q(nz)UgwrDAaX>G8%mocNcx)IyjkmF>jwC? z!|@W!$G-cq2!Smzz=C`{U{(x?Y&W^Tc+OOa2}m8Ix0;emvj2NYD)IHw4znnB-Xnk7 z`o^_q?pN)1x=0koSGj|9R(M7Y!9UfHm%hyg4QMo@n-00$IT@LC zT3T9g0=SFD(_Ngq+pfo&zjf`*YQuO+0MQ(H{rW(H$`H-|L|nffB=oHZzdZy*yBS;6 z$w>m2$t&D+y>rSeZ&sfQ0Z>Jk$EtHyuu?xI+`1KcNDACwsMs8blXVg?2~CI{R0vPj z6$fT;u0wG^Lv0c7W)8X!bzgpiuX7SNVfhpJOR6@Pc@xZ`JGF=t3K$qRAA2psYf`9? z-`}e@T8c||nw{L5sKLng>M%uo$>`kzim}@Fdh(RdNuRU}M{HBzR<3T> zxtffd;|P>@UP6$`=zDHF1CPxKJBg(iZxV2#$fEMFLootmf(B0+57sv?FSlo7t z(+%F=XlQ8>0-qx%DR4UZ%6z94Y&H=i_J8y6(!{j1o7i2aucxsDG&RRj~05g5_CAVM!EINezY^3sJZO zcz&^dO0s$9i3sQ4s-!?8!bvz6kt}jbN`{lJC%=E>BC4jnZ4@$jjdPzE457TIW0R0U zz1g5@kgTyXRAlOj>%vN8nJYI* zT+kIh)|u6d*4sAz;3Eu)I#QDhO=LdYjhQO=$QE`~%}Dd97owWZHX!QQKdcs={PA^K zJ_YR}L>a0AT=2y{hgfJVluNsFBA9giiG;7!-_+cbcw^vN$w~R;%WedhYN`1OiXa@3 z^r8q1AXoqL3fv)aMtb8@f2tI@%BL?X-TObdYI757mo^jCBNOcCRt<@2WRh3C!lqK%%Yo6 zMF0OKX{pASGrmGN^fAr?et2sLE-kEOcY4<-l!0`6(sdEpDr2Zj{JRRQX!Wi|7|>`8 zd!~YyhAz~qim$arkNPzek1NQ_cY^}VUF*ZyG-3;)*SZ26C>pnzL3e@=n38t0j{$-W z6{+rURwgtGKVlWz!hhEDiPOeAeYV-Mva(#$-PY$)d6hF5US zdz9*sO2T;La!VcumUA3u3Dz06o3wq?SDeD9l$DhqnjDQN5}w9Z{wUCzefy8%tkk5? zs%z-L9o`W2?MV?&MYJ7?woiG<7i+iB@CR!$?E90_Jc4o~1Y8T>;NY|P@L81Gu+}_N z(EI~JthY=KN1i2hffSy)n&R+2Siqy0?QJFX7e+VH&B0SS8rcarN?#Y zN9g*P^M+CaUsa5Z-cX#hcKH$CO#1GkkqRH%gN3ucCgN?zuP?`cKb-XPJ=gHcz0OIr zSV~#dVMdF!48M1~f$b4+)f5Nx>9(2!%6#NZvKuk6c!fN-IhPT@ge8XHV2&GvT}0TNFw0d6^xi{ zJKR`^Acr(=Ep(_xm?JGXA)s-I1UbFmSlvqSQv&(}0>KqI9><=awx*`04|_2p!tm~Q zctY)p_b6Z~bDf-LvuU>$1s<_kJjZpbj%;skf86C^mI9zz5xYK-W8-yC1QqpGOJ7Uo~(Tb`ol?!@Oh+yjoA0~#z^ys}0rK2}@CTq6A259D zqGyhzb%p+9TG58?vqvrC3!?l7w|z&oCkb7gnD3kD=%hO1x^e0zQI-&=CTQw6Mk6fE zxJo@q4|Y^i@^ckCi)dSBO`+w@aD!D&14U3NBR>b@$&t$#%x=JD?8H$MLN5rT?R$f0 zH*SmpzUlUCB1aUE+=8KD_d_`ag+3%M5`8(<{oNo)QdMx3ZC5kDL-|2E-1u{g>{gUF zIp0S5out|g9XYiN+^DIbopleQ>YX7&Y)?ufUne3%CNpJ0Ax5=NXI=c99P4yA)Peu4} z+5$jzDcSBA^KO+?N;DR>8-Xb$TUSqgH#MKT(OGS3SoC7x{5C$3^9wEzmWb~kCOXJ zv;wY|S=vQHb2QlnQbk0f$w`V`b4@Q!-~PZW>MvDO^qr+7iU<;3TyBX2+;E@?*NM+$yqdW2=hcUij-g| z=TBa{wu794YX@g%XO}E72f%$j?a^8`yBFs!)hL!gS?M_55S8OAbS!|rrN~(3kj+fe zb>yUL-LN14YFz1ILuw86I8!ZT;HTT%#LYXX5SPPTRZ2mRa=E7N4g!ZR+Gea;=ya{o zgkVeX)spTBCUVA8&q$LqNT*CsSCA4$La7073XYZFMhSXzi4b859Hdd*4jL2 zy~{Js+~vkxPU^Ch#IsAGY-3RwQU**9^3&bWhw*aa8@A1!z%vPIl!~l2AlM8P9#df6 zMZeSy7^WNdi>LecD?2FT2;*SB+>^?XmI6=$z%<18g299#RX{`=JZ#|kvq+Ypn<~w$xcas4eNWG^;s6kB!d@g?@h>u^6 zCY=^BE#c5g>FMkvD1AtYiYYq(r%&BlO-nn?XU^Kt#u@~R#MXVmkzx->?IwyrJb2(T z=3n*(kAqtiBpygXb=(Drs_W$gHsYfUPy@Le<}soGVz*!q5RhCtw8NU@X$ZQS$Uaez z-T`KSnmJA*UrMCSjuS^%53oFGlj@|KxY|$n#6{IlXaEv-ZxG2P^Oxr0rSGU(AcV3z zjZ;I^x(8)k41Tw#uWt*6BJgdhAn<)AQp#B&&Y38nMM#E{<^~qtS0rv|=$wWg85(e? z9uspW<L`~( zmYp^i0~?5tl$S6>MZjl5?+nQ*#)A$e<@k|$5q0UQOD(5QDY$*&ms34L)JDk5;9grT p1D-Wraa8}e0k8iDZ|g=`Thw@Fyh856Ju19pPb-{CJfV5_{{tEiF%|#- literal 0 HcmV?d00001 diff --git a/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-stable-diff-throughput.png b/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-stable-diff-throughput.png new file mode 100644 index 0000000000000000000000000000000000000000..e9bba48c845f51499d5f6a3f81d0fc90d8404593 GIT binary patch literal 47851 zcmb6BcQ}`Q{|AnrMv+a*9wAxTvR8y;M3RshvbXG+olz3XmQu2knY}|s*<@yvy|Vc} zPuF#QKA+$3kMD7OJMR0q@2<-@&)0Z9AM5!JzpbW7h)0cwAPC`2C3#H*!CF8Nj4j-= z@K5sDaIV7th`QX+ak*n}?(*2!$qZ37c5$$=cd@ZFVRSQda<;U$6W|u+7UW`ljqOT8wqJtHeU~lwNYA zFGCCO`1rV`R8w>1=NAi!)|M!mTT2qNH!USrFVIPIhArISgV&2wx_Ps{(si+?%-*oP zvhwIhZ9jo!7@jE#Z zzIas^TU}k9OTV0?F@%s)`>5EWJ481xL^-~vN|I%byReEG9+op?HJBfDu(N8~m!m;zdtD0?PJ_n#N{>xeZy%p$ zFJ53Hl)OfWf$yGKUx6V5UN9y+ak=|C6A`V5e`6@|Ymt!%dw2+RNf8+mY!()l$@Q65 z^qt&R$D?i~a8HetSi8^0-{Z9#6Y<{p`8ZbDnW$!CeLa8?*CSOiw|~0r88hN9JmGyF z)~2gutipLdHZ~SPb`LkZr{?GHjosoRovjXaY=6P-zBilT{BvhM&Cg_ItOD_Gj-)W1 ztP7}knX8$t9NxSm;pcZ^^3y<*vc!6@tBf##A|oTiV`X!D`-Z$c_J?}~(LM)%XYY6Q z_m{9-XJ*R0j$ViCKUpzS{H*gB7#I~qam&NSSH^vI9!U5eQjV6|a?av%QapbASkUr2 zOY7hIARN=L&o2*^SW}>B=`rm(nJZgC*5$YJWbgwMX#3ukieYR~OL?>(oPnUA#=?ba7} zI={A!{rnj<+VYetZh3oSXD2i!hM0xy9Q@ssYkp-VHBaBXnunjpSGIUnQC@zskyu*3 z_&l|T{8oKA7|zblA|fJnqoXt( z2|V3p-)zUq`9_~SI2(j9;j=?GQf42+arZ-Adpn-$>ub%=FW-uN_3D*9fk}sL=_Z}H zTe|&3O^!jeSZ8PFozddBKrLkT z=%b!=3@$D%w5Fs=dYR03r65i<_wY4elpYb8%p0-joT-+PSpA_AZs%*(sR2fWn3lqGM#QQGI3mXbacc-F#LNy-Y;hA z=d=%Ed4m&zGzV&cqZ60ihuEH=Vl`NqG;;ltpyF)Y|RVq;}5>L&58_s`=*L8KBX4o z7~K5OZ)LnTS-&@+SMBLK)ewwdLC-)!V#M`$w*5ke*UpN^z~=W`Jm7!u^zHe}yQz_zfMmzZ(i{3cvnvRYVJ=v`2qV=J&Iv^-j-zVP$Q<5WH&t3IW$r%yXe z{@u&qz2u9<)ns?Imt?PGYMR>H6AKFqBXv+d*S}s#t6C$|z4eky_R%9Y-7-5uM$;Vc zM~@zrrX3yZScogU`%&M~fzRNxMe4RP7XIU%`V5q$-R(g`KJQ%zrEJY7!NJBo>9Tu& zr4Xt`=YxZTEZLy5I<>x%U8$0hK{&)F(@o*A%GpXvN;0*+M-iVT^gTVrJhv7CZwgHuj=vIj(lcPe+jF_W=gDAZ2oQShr09>RA6+69I<7gX;eO5HJHqa zglj$t=ay2)iu~`=a8PV4$!H`U@{bR9a|visstZ7IrM3)AYe=ZTI$&?P%!~RMRJnxP@h9tyj{#11BequyJr^;?$)A zl9H%Gh#8{W+S*K-*-EGNo!vvc4_qdnf|yWX@IWwyfKkB!B}Y72p%v znH}MgXr+B=0frTR`g9%kR3U~ws@iMk<CeL^YqFZ zph%-dc_2^kUe11#)obVZu4d_z!ytodPh+T3r?>&m85Au{B;TMq0LFc2&pqR#abdl?!3h55@LTvUz-F-Y%nr}-Uwl-iG459VL3+8)qrfZq`O8UBA&U3g3 zwz#x|#k^M=CKguIfz#rp?l=8b!LT*7qnnGe6V+a2MJoVW@u78#wxf8cg}QEnkx7+scz_y+ldmt}DnKU?*43=S>-P`+j^?H&O0UG+dj;H2LPO`-oR1GZ>f76u5k>&k7d-{l?KbwkNw8(z=Tja(J-C;r z8*f^x_U`;yBHF~|>R3Z!Psb7S0qX`o$(;%+VY}oa6+33lh|!?qL!nurz_750P{{>( ztqUMqmMhN8@ z3$dA~p<`!f50+AN6crJn^y67Rm~4KzUGf{YJ#`k~K)U98gPfJlK@IK%W=2K?FPKR& z32pd_G4u%k%0yQ|nM+&jC6vNA*+4;U1#|?Db49PmXY6u)R~M0?-;rzTP6!s2K)s%> z?p(#hz`#X7PJI4P4?fLIPe)w%5kg4g2KubVxHm^WO&4{Usiy#Uj2`G6Hp6V?! zfK1Gu1ThTKEpX2r_BPCB)aMw4D0mDycXW7o3MAqzkQN>W7qJHc=gG}nf#A$AlzQZ; zr?3lT^jVeQo;%0BB^trGm_U|v3)liYgTOKOT&zlpnniM5;k^Qb3RcP_R+U5}3D(-u zjs3-f`3fFuy((zp_zHU7@LGSm~OuSFbQs6%C82MIJRrSE$=BO}xu$?!8%#9YZ{4LZ+X zQJeHTIbfirrJels>k06Q#*U7T-8hfchXDC%w#i)>wg5~%Jd_j^{M@r{_Q0zW3Y4gM z%eVbF*Q4q0hge+Jn#Y;d?PjWgQKqJ*Fc9=&t^w9TVV;QN^b^PGznSG;JNZR8h0r1P zUVf`Q0qW@S(t4=SIQ0NvVN~tw>05DCWYP1@EiEQp$)YHvgx@~7vJ%az`igSG&)3&? zxc%Z?-|h=*OM4tE&UgC;SvFn&uC28dQ-~HvcmBPv9o9>PZ^e_;tqyHz^MQHq@bGZC zuA=X1N@lyRg|A^7CT~}$CP~qV`OPB+d3iG&5E}T|sR)Pqpq&5mI-{Zd)j<_e8JqW z?^<#f#S1yM@8fuQuq1R8Sr2m0VBfRoPDMF!Pm!tO(C}~uoamuXW*zwIpI>YX@#N$| zx%uTewz)w=&mAvyU|+?;8qA?IcOWPYaQx9w;U#DsA+Lq)FS;)(%dXZR{@T&U=uRz| zmb!PtN0r~6@GEUmTw22mc2$$lje#S#<_^-+cZk-Ojy9Rnii6{~FQx_>gf`TunUs8c zzAqgX)=q@pu(u#Rpgt4l{ z^=HHlJ|;_l6Ov+nl(htS0^R;a3xHym$<*z*^~;S8s{x5+yNy5po}<3%cNC%eLgn_q+#&ZxTcov26fO0M<~*h`CDm{{4GgBBHag zj%lV)pH(m38W^uUtSM*j#P;M*A_zK5$nV=rN<|g!cf5yVJ@8RhNeTD*_3ONj)ABRV zEzf-2e?`%!{b@Y)_P8R;ui2gqxfU;wHXgiQ9-sQiaZBCMoY=|ur3dGVro-&B>S{@N zBYf{wq}a2~As`)&r~T{bY6vsY5}56$4R<#2a|9LdVafj zyo?MFuZN0QOc!_aX?JMn;5nIq^NugwMz40A$R>5G%H}$dZw-Gn@XmbR4BBA(o4!7+ znNJ!V@f>=@0?5qwYtSk72IBS<4SXdrDY*67c$3=6si|Mt=Im?&fJGRyEUuMZ4&V5m zrATaQvI8gf<$L3rGP|Gr+}79#gYRy}xvT(3CpN|t>FDBd*7Zyf7b;zTV=`tDnsqJc zlL!b1%sAo@(bj=}w8?b<=#(O<8}8=l0}1ZQWI5+d@UKqCD49#A?wULNet zr*-sM$XI^W_vDq5q8E2tYAYUt!Wh-9KF5~?`}gQ)Wr<_McYuMS-A=u|y@25jQx9%6 z{9PX1JZ#{-eEAHVbgWoMeD#5T4y5kqPdXU|%+$e$E~o^FU^quuqHa-RdY=p(fE zg^@BFEQpPJZ+_E{i@K^;YP33DjrhYwoZX#aL;FaJP+m|_5Fc#4cUsoM!Xwkbd(Tm& z44#w5FmkxqBI|y!`DNenL#_9k%_1}|GBPsxjal%cqXGBK-){w!;ABgjBuE|QSRrU_ zy+~16ZStq@eTx`qpg4Kjud_Ml4l#xMz9r8NQsZ_1E(6rDKSLhR+(ph*gw7$Q2^Clhm^X^(B zaa6|~Fdfs`_V}=fh`_QkVYO6=`$mU=Pg(%HgUV*@y;hI2wy^~NVxV!2(prM8z~IA0sofGYH5m{vh0b)tAsPyF@}7PX>n>x*INnw|%F;K=PC+4#dBB!Q$@j z{>wA@^=rM-PjJP*Zqo86spyng#TOdYW6qS0JzN1vOt$n>_eqWx$0as4wm~r>1}Ph^ z$3SoNJ3!sUlM5kW@7w$VI_-7oo%^#Ces29$!2Zm05jd3c%{zCTlPCUH659o=%?th6dlA@m zr2}YUCX&R#J_d&J1J3i>^O32WP(evwD4V5r2gSt10FIwUf9=uzp2cUDU=dXKDv-}r z;zeA#V>~_|Nx^fGh3sa2cW8EYcA-T#eFiKdCV~R0z0}nk52e&A9{AZcRn9fkNF}}G zy3eGP{as@8X+=fFsjTSX4g1Z11Uf}vR+ee%Hds5lpd3}NJ8EfZJu8r`Elt|q`w3c1 z4{W=Hsnt+n23nXTx}AaR0{i3`G&SQ-D)ij`gK4+}M+^_7_=*A0uoJus;==G+F0DW% zf1!MtUefakKx!j+*@YkPh$SQ>&^-ln36Lff%V`v9m)Z;m!iLj$eMz&hwT%Ler0VGJ zC`c)tkE4bGe;R>4gI+&Cz(#gn#D z1s!tGo_3FRCLH0JQN5jzTF^f#iV*0fk-dG)HQNy*sNO?g9@_`(oU37jT84vz1BWPG zJxwZ6*q%O`PCSJ3UcM0!&CJx?a^PWToq`g57R0N7`1lJSKYm2(0oVOc*iewVO1pM9 zQ3K=f_+T|r!h;_@*}x5CK{|S`l?jiAS}}!QJ9bdo7~oX9Pwm1A0p1L#J>JKM!zT+G zOU7-s+u6z&&>Ifjw=s%FSVfG+*K&P(yBX@{S__SRIxKo@$1Ko9d8lgW^}WQ)+cR{a zecyYlR}rn5X9=mn^|&%)@qMVs6s>?;%cB(G;o*o8EtOUhDkcH4sC#nm{B1C#z^+2| z`P9@@cc{RQNBgcbBwJ(TJDZp8Ku>Yl{&TnUl@LCvdRJ74f-@MA|KN)WJQ^-8E^m#R z%xXFx$||UNvGKc_RqU64UwXXI-kjt^&_1;T_FNG zF>)aD&3g9LcZ~@0^YcZV=Ln#WhJyb>y?XNslcN%L23gnj>)?cHDOi^10#SpiYYJ4i zVth`{tPxs&k?)Zw+zTx55!s$OW1l&b0TL8~T4VQyZe9*&!vbdqEYEqo-5siCa z+?G{n31@FW@$QyB!LAz&4{K^Xb!c6o^Z!-pv|bzUM0Ote%Ocl z#2hz3qJ&3pfQ5JZ$rGZl1Og;UsIdd^PB)B0W30lu|lQ)@v`q40l#NwKZmMC-*IVJ z7cO%#F=2vOBIru_uLrJ7FX8SF&Q8^uqe{=MMEGX^rxYM*$M5a|#rnHC5d*3ukf4O! zy)dUnH>I~N*ObgKMlU5%k21!eiO-{BWo1R_PK(4E_{}`VNsS|SIqtr{yzuhn%iHg@ z;-?8^%+CM!GX}h7F}lffA{Ct=(qSm?Ul>zzxahdPa7XV^PB8! z*@D4H5~{l_c)^)U9g?TEzs6V7Ksyt7SpZ<5No4lVPfrKp5Ys#SbZwXAh;IO-ix=F` zmvQ>Vz0Qg0uXW6wR=|@4v&ZC0KVVz1KMG4r&DGh>OnE8c%L0IA_kFkjeg65ippwzlG|R&A)H`BXt2uVpvB}(~-jeE?~0n9!FqQNfI%V*D@2Ldp!YiT*_ zx;W4bN7hzi4p620@9YF9`q6;uPNj6Uou$!FFJ6#8r4=Ou)iFmqk6Qbq&a;^+;7#tJ z=uZ9lg9rByEuXTove}MbPtThcfmIZdb*Q6I4+0zh@+CyMSoX%YJt=sDab4Ic z%Kb6Y$6??jOha`6Da6zZPXWyc{W<%+@vl0cM zF#hlCX#%7dKlWxeE-q1rt0Amj=bHM~p@dG~dgEKF4R= zfQ|kWu%Z?9^=Y{uR6YUJf?kMSkmpd$NkA=#i%-Gb6mfymh?g=Jq}c7n!I{l&X+>a9 zQ?s+ds2ZwA?B(t*0u~MmT2cGN^-o{@m(`j`Xnjloah`NzpPqo(79$c8r;~F7^fgh5 zZv;)d_cmdFS+Xr}iB>*u_Ud`BC1g9&xBEqf75EhvXns#eL7BrqKnN;#Tc!qXQJnYl zXNDXO3v?wYeeU~P?1I#UBc-<1WA@OBpyRhu%%c1AQ;yEfL4rr8^ zo;@6Wne>!~;H*YhLUJ+zD6U`~wLKiZ5(^jZ;w#EDOX56fu z1$O%$H~k1%MvayJ_6ePov|v9^A9RrFc4Jwgenf}0YE11Yku(Qb>)B3{MKlv8F#6Y zJm7Wj*Oq1F3i6+lDZRCpLc4+vfg80{Bky+mF#MJIBE`?Ua1_5)NeTogZ@jxP)5UT!%vUuxBlMlX z$4=+BIde`_tZwUdefh4>Uc3#C(uAeCJ@wof(n;~g(8d8y@xUWU3R({k04Agqwxb1l zW6iZWY>{?JRFn#|kx;1ia8glY#%3EE8{24ox;atE`XY)I;RC!{f)Hn(xVHy`laBDuQ+~?D;(0|GA}e zMXyx}`U3lZmtUKXV`P4>``@mUbY0&`?6`(D7!IL>*wQ)vV%2c$NAoNG$KO6=Jw8t{ zq{ps8^XS}Ho)b=jzyGGXG@f0~DH(cv8Se|vjp?Rkodv?~eSf`yPeES7o^Skhr8Q!Wd&T>`J^bI7 z)j0E5ob{aFQNoJ}pJbCUyZ5mf7R(k~_PTT_f9?fWBy~Q~Agd+I;IE&UV1PtUcUdQZ zFuq4UjJ@5}5PJbUJ4+K;TPqC}4W-Kv!1+~?Cjv>awJz~h@-OtKn}C!vBm@eD+Gp*2 z$XY1cV5=dh3jPvv$HiwHv@Eim0JYULLetX!6}1<)+13j~ z??j_9?u45?6{*bo!nx7(QOU-(U0Q`))47_mIYMG>Z!}rnT4sFur0{A0x#dh0%Z6ud zZLOoHN5e2t;f#auf&w>_z))e!$?fIkrSazyyijKQWma}V*f`^zY~1APoxamc=R=2s zE?Z6110(CNUFn~qsr1jp=J%WaeD>v0^)E!1M=EUK7SZ%**5haS{Ru;&en6K%nyGJS zz$79fLZK~M84h=6Vs@bs!D2!#gM^$M8`+%!YKq+tApE1tL$7cJ4QgRWQpU z!BA@eJ^}6y9vK;#(~GYUc_|BRhWX3Cd})Lq3|3JiSZUfuOEkiE@_D)?MwXW0kQM`| z%RPGhQEu$$=m_V0osmt~9X+B-QNx3<>66HL$!B_titR9#EzN33iSlW6?EA zY;QjvD1^Mi?3zngn7YJm7Wk*VbmVlo0s9brTaNlxVTv zd20kKUC~kmt9b~y0+g*lq#+cQl%j;k{j;)|K{|fE$Oq&WZ7t}kqp;%SaFh5Vmrm?_ zNzK8^mz)P*uAxyf74N*fJZBrurw}Wc$}1ahc&qxl2LvZ@iLk*i>L@;h1K_wgaP<(Z z52(55o})Pv{c6vC5ppR&R;Ydqq#XObRs^_#SYRyMrFK)HJs{pN+)|geoGj`A)ZRmF zE|;_QCGbiM}tlP0->#xJyJZ>aSE(=+9p;r#^75RlxhH!B-5dHq?vhd#j^ z)&LpSi7S~HMYsl!FzSsHhnlhIq(z@zVSk0W_+`I{hwH)_;c&p;AdwMJ@nhTB*&zrI z4-euW9DL^cj~`Rh)6bU^(d2<&0lYKH^lg4t3h4!=zI+*y=DU9mx*y_SP_vH*?i}>l z8`1mlT}3XxHBrG04XhyGr=!7*3Eufeu_fxqraq z8AC(Etv~%}D$uFseD)E5jopn79>@7EIzW7K?|wi=%#8C+uJ%<^Cn5xvf|rs24wOQ) z{4Qwk5R<(D=4`R;;3L77H)Y_jl>41XA?YA!?Seq%xI7|6KrbOO$_L-*!Mb4b2ZEww zV`Gz3Qvu`EUZ^QfDdEn`b=BE7(Euu^xw-kH(b7D}*8oMS1t0mt6{jk@50St)KnK7V zK*3i2d;Rpv%4b_{i#Toa0s51fEJ?IswpkRrp9LLlVeljM9}(U8JNWU6&D7J}2^~?@ z2_~SeggtVNzjx7zD&KcWZ(+k+lU?hgc?Iil4txQTVYVV?tAp!lUw9w#Q=t)E*iuvo zHuTxPAho~1oD8&La0AUhfv6fCj8D-pI7k8LWT?`W6F7`U&gLvD03-y&yi}W;k_fnx zSjgX%pVn2~KN}h{+vYjA-fQ8>l6^)1CioxqX(lmM-!E1+&AXvf%UYX-7V*b4KClw)++Tc{FKb`u*dm}fV->F*o1JtE>L25DVnI{Ee>l0q@`df;)eK@HqLA0GY0%f6DeX*M{s?6q?r{Jdc`O9<_A_wD0rdZ3 z8wGz9UX&Sx!S1Q0q{h|a4bc78Z#01Yk5@B!9aOGPH}y(`g4WrNgx(VCdo$O7jYH|i zLPIElXYh@nB*QZpgGeVj-S^u?#ApArW0)6^A`~7gL{W#6@t`Fn>E$@%S= zC|;7dfUn_K3DQgZ5&$0ndB7M*A)M*65F9YFwvGhGcnNMVFuWTO`9TF* zG!~;+4MKjFLR13;grb|8G;Ss@Fq_=+=N;qSy^o#3aNRohs30$F^JMW0l5X$bZ`^+)K5of^IT!3Hu zLF;`Wh+7RtyubF($V0zFd%j8{F9uYit58|iAeYt9-+uvBtuGs(fQG~8Y>@xvlzP#k z3HIM73YV^l{wy!}P$GI*rQcI~-}UpyyV*T)l6RUleqsAS7AUh|RG-XM`|ST8IrvT> zR(+f4t0)4BJEZx^!NNeh4|-Tkn(@GB#Q{RBaQik1gdd}R{d!0%Fu4(8L`=@cR;TL$`$(rNIta)!D)P)@$Phq6>LKtV#{51@nw7*&u$mMpmXMNocd8%I!`F~{aSHbzDa zSs|>Mn_-0f!|Sgc(^9f&eO{HPqbPZC3ZaVIP*!DH3+X3|PfWxGZw)g0zY@u>{vTq( z-ld5P#01iDkXxR*iuFS7=I4qrsOn2UzfhnddJ4RUWT=`|Nca4G5#g(sjJV~bDNo>X z!|!EjNP9!QGo1K3!@?9Gt~MRrqIR&pw&RIifz2{+?O_40NK-ZD^kZ(cFv!1koOk3$ zF@63m(sH^un=!jf2^nABokx2D_cmPyS4MBXR!#h9ffav(N5}pTkQHW*AtI-eB6a}) z9m+Vtp8^XlB`735)O;qv@E4h01Tc$Cci{AA3E(NFZbTS1z^K!72xftB~>W?Uge z7qP+qJKSAI8;7?Kel0om>C@lk(l}>1be=jKmVIqC$bH65;5~NApSw3|-}d(w7YS&! zM@WBs-NbAaSOY%;T`!PvICy2DMu-Dw5=X;_K#ldEby=93zg#4kICt(G8r(J+WX zv?V%p0LXl&mU0~s8)_*7Ou7Kd;uDajUkTesgT*!#L@ebGMITb~5V*vFQHdGPpCIFl zKytKZcN+g+Dt6(iZdQ;A$C3TCnMd^HNYl6<>pQb-z?}k$cTyQQyENZpujSA9Lo>*F zY)YY3q?x#Yms54#HOy=7`+Yz&Rsm)J-|%Hg)}R+t?0JJ@eI48YNHzrw8v3CLC&V9= z6*!f+)NV*UaK#Bvd>|Wi{)tTGJ(&@h_eVlbI2h z9^|%GGC%J1{EG2e*PA3SfwrzpafL}Ww{;@p8D-J2;0QNiQ*%Zms&yss7%FIKk;C#WEiHYbn>n2UVnbxW04x0^;p{h8bJz~9AeauoGnf?hL19!< z_bYvA3>D1(=`enYu5IZju@s^}Mv!3Zjd;nY?zgc!2a3p&$mhm!5LFx4+cO zJa|6O*iMWJ*Z(Nd!SDEx2|B{YKXrO7w&7NE+BoBZvLuth^i9i5Fc8%TcNZ3C18Y?E zZ-4tc{6WM!)%o7=h_PeAojzSkeKF$M`wkY+QxDl6_?`2?8#_bIp&BZ<_tz%}I#OS47cn4WM1CDEdf6H=fxXj*#{NA_2wQ6!AIdJW-?l z@6vrJ4Ve&*+g6$ENAWoS1JR1fg*5q}`3i~t8tSpX=W zsAULZAlN^cvFmx!%GuWe6g)KpwD)>h0^&Km1J7t+v<8sT{1AXoXWLk(3R>uOA!_e} z3<`Kgnk_Qm4Pf}f1%8)i6~;m{CCpt=V@9vVgmlEj{w znThsaD2e@vM(Qwm14&(>alaFvr*z^M;qW!UWknHirlhP#tP)G6kPeu|DQY;`n)lhq zk<+$yeP69%`z1z)+@BuhKfehp{gug;mSmnSDz(62L)pGw$yz*sOzOy(u zp-d^(fR^@F?s3HH6q^$ce7qlLK_x6)Mk_azHa{~Hgij%Igmv17J$6`b1T}l*o3!pJ zA_zr183VD+Q-dghx%5{+knemM_*P0(Sy8oR2QyZpnUOE1ZFaGl$GdEOiTiOB{{NEl~FN5>J4w^NmB{wa^BY z_EcvM*UR5gS2O?}%>Xh8F+{W~!+e^)zCNlS^s>eNPtPUJoHl6PDAFy9KE4~SSB5AGzRY%(mT@c9qdUGE9PL-!4OY>XTe_SP!j zDBkj5P8PE8_l?XPu?y$+A^pYsu#F^H+K*nQ3=VD+fr9@ zfVL*eRdGcom%_-_E@a1>W-`jnF7~GAIof!?Ox6Wx*=Tx5g_Tol4}z9#@ydL1|9}oW zwp}$YsSyt<<05v%KNSGydRW3Sz1n96B>Cy)`06@)cFg;O&~tXLMFDE zAmH?{a3FxF=4)emOlL`Lh&^z_u_=4I2yx8ajGfdjyv#Yb|APGvBQECQ(UBwMfb=_{ zIieEX!lVQtR=0Pva0Et?W%P28sVe> z<{(_f=dJQX>of>aZD5@9;6z$z#aq7rJg1dYI z0*zH3o4oep5=H9jr+YrzlO4LgyS8&%KS~7~arY82`c1-9e(#&#o_S`3{1)~Dre{Ww zwxg8xmAGa%76#EF*e}#N134EODHaU>;)9A-q^|nUk5f^uB;x!#(bO`@ONm|Ar5PQw z?>RlIEyVcD;kV!ieR=gB0f;;q!`$NKyOO7G74ICHuV^~XaBq#xfDoY^Yq;QiBryfY z2X)4XXCKA$iV6>PvtOlyTR9C>1B|yd zm~jT>?bhbj=_5V74$L{vg28BbVlei_TY%p6P;uPfdHLwDJ$y?P)AkEijhIgC2~>|P zu)1_AT`Ws|;FdH2z9yC>K@jk!(7ysb{+~TgALYA=7AVk@&me+%#*t=nRXydEd2rS^ z#N{948o~CW$sb5vr`R5Vi?Lph!%%0$E0b}%4n%TTKLPX3I?$Y&{yl!}u`Vg}hP373 zQn@ixOV#fRwc2=V4VjmczXyC3?RADn1p5^Ofq9;qDAUV)O3m`K_7kRKvD z%%guH5rYHe0adQz@3x4AplK%%@0JMCvE#TV1eOy)Xl*--9Ar$lET<>aBax-dG$c+#Rm-sSS4lIGDr#V zo-pm)sjZ#W%Lt+lWFG#jf!Rv$geOByANM-XYS7lKQ{p9&3wRB*rk08Ei(c`R&HS5w zu(yX!&!R@^OYR2|pikRCJOhCy5^H0|qhF4|FM^a=_`hW_{&`0X+1ga}t@1-QC_m4? zNTOYmPSF|;u9g(MWjDYy5Q~{1%(ymUU4YS6PCSdI@PMa%ia!Dg+Iji@>Bm!Ke!6-$ ztC0f7&g5tvE|*NUV56dMD&@cj);FLH=d{5YSzcNqp{0EaCiEm!Bvf~SIaAbuHsgHP z`v}6YXQ2r}Z0WxZjZbeBHsO6dZzd6O4+}Xfm>cbJL|xz8GS>8r^t50jlYjn1K#Kj+ z1V5z8V4Bw??bYd;E4R(Oap4;Ax#0KUDT0)b+&5aEWL+4k`wHKAm$q5CXTCcv25g@! zAjT%mrw*CCSr{YkklPBz%a!VA?2?O`=Xmq8#Lc%p65k45IP#H?*DV^?fEZ;9jHRQ$ z7TTN%NLdRbC7hj|y9$?4%)zm`BB9ojOqU@-_ZJ)KX}TLcEB7ihJQuxq9}*) z1(8Zuxbp2HvMsRoQTYn}golgV`eD#1nc=XYi0^bJRZJ`sw5Hv;*Y=RaWG*scMISJe z%xPu({H~7&0Vna;69s#Jbu7iDv|g$iagRC`?T=dlVR($~;+-%?3@Vx&00j_Y*TI0v zf|M5tHU=bf(b2ivpv=SnoP`PCdeB*$MXsKf%5WChyyoPT40n&8H^(AwD%`a`Q{!23 z22-I-R@Lj)olr$B_ORm%I*^g&HESnFb!aq(5BP!CW{4N8Cq|g}MKw3jKG38gnj=(< zriG|7i8ulJ@Jt-ovLuDVf@G4~^z|=)fRHd{Yc)ODd20KzkmXyeY3VAvJdFSW6vA{x zu8Z2}XG1`#YlP&s28@-%0Q6KOPmK(KK6Kn30z{K=RW4Hzoj$%9DQQ8=le8}5_PI^> z=~|&54I{6az&UhabROd9sKq;3POj;Fy<{Nug0|YPHFXN=?&95JBIC7CWaE}nL6FbyADp>`#Ct zG#hm%^c0(F8NV0u{D@Z-&JZ0eYfoa7CD^zml}b7Omi3M1xP_Y5#V}7|Pnb)Jrk8xR zSp5ri3L#f*3<5NK?t&2vc!!0Dlm6S8UPNXIGh_{ZVBY=P_F#b!^RVi}B?of~v=YWS zXY$MUVf%otiJpI$G>NtSqV`S>&^{{bvs}Im;Y|z}u9gMEC!_b#e{JMA?#&r-1VXx9x;b0(Ze{u`VM~?vxp}~Dfu%YRcLP#=# zgn?>XFqxc^clzJT7D#}hsj$n7$_2LudKw6#ZnX~2YJHUB4c7_Y5kgC{jY6)`F9h_F0b zFbPr*xpt%YmD^jQD7Yn7f%G&q_~48W4LwZ5*Z{K<@rP^tv)mSB0{}JUPO}mJJ)km# z-zFHoMD_=irT0>$zqr+#`|4sxYiV%y_p!byQjJUS*6$!Om~s2zs2dFTLR(3PToZ_h zuN=Qz&yb1dxEq_8$o4Oij8|(rdY4$tOfH-mhaf47;tjQiSWi+30`*z{Jjj#R3WQ3V zlAH`{69CD4h`u8Tr}-G6#O z!`{3Txo5sapz1sR=bOfPR+Df&DTl@`key3VW9EpFUzUXk6cain`%$L|6SbE$HOb(r zpu>}()j#C)f=E9idgb&}QkXKUZm{MQO?Gn*`3X@YlLq~Gx6TU79*EslrKKXhkdPy} z08kb8s3)&s}%8&>$f!YN)zD?T=KmziA8j42n8@P|Ze@SlXdypR#C^Q1t+Ol(9 z-x*CwOg?{?1llK79ZY8NLCON`H)??LXrRz(%p$+{(IgC70$XnA?p9wy8+#7X_c;5g&L2WD=Ezl4?!9x~)4xKs1qHb)~wgf1f^X7$4;4vKFQ(p@FRA4WsWAQM% z?f{ktY&b|g30f$n)53hH$$l#9QIODvgg=nz*^85l{rV=yDo^IF^`?dNwQhRxF`fp&ok?%CGQmhuSfhNmBV9jt|=6>a04X`Q+~B@!Yt*!hM+S0-sU_863F;VXAW+` zWdA+GW)SY-V|1Rvl3!d*hP;0(h;QqEN9|34Kp_5LE1>-W^~>M`4Iq781fP2VseYOm z!27L$F3`~s2(6%>m84T@L)8gv=qf}!6Gfd_A?_atF%k5Gi6|vKU5cimcXM=;^-;5Y zaIILmrM0-masvxLn95H6$|bIA*>(F|7e1Xl|9kTM@#{`(se_ph19&>GUpwxxeJF%v zQ5Swp;iH|q5~}^znJ`do>7&86an`RK^2~M_5Mck|+zoI#Z)YhGqIn2NUx0=EOpI)g+&8=Ap}%`z;F;`%Tp3U-;*GC zf@dtRpkM+?J&5=NHDE_zP?`V&1u#%UcFlhLDcFzbCK?se)8$9Rx#aTO6wIb_+-SdKHK^~I!iQ=4VR+&PnQfHY2-$r9qKy^SQ_TvsNp@a zz}RVmlne}X1cQP*uLN8WTuFF+Vb;zY&meJ%q99NKB;nISY{pV!WBozwHH9m@Fpx)s zFv3_r>Q;cj1~eiR1ZwmXe$a_7K$pE$9?oFV;lhv;22fxaiYSC41&!DgR5Szu#}&;{ zpqd<-g8QKL-PLpV#Pv*VvWNd_ygB9a0~$m6%9N}zxNtpG1P%pOggYMw_6FE-F*OA} zn^?_mPR0oh)6<+keB<)w(U#I66_CLw#q~d!rrK?99C%sRMZIR|iX}>mW1SnWatL)7 z_&e}iJ7a(1i806&2cfR!QOQ3j5pFZf%nwtMpY;dA4kKGcV2VRkZGichiBGDp1Orr9 z8-a{Pde?xR4TXjnOzXCn>7h@a$mI5CLaA-Bh4~gVwS<1m$Y<+8B8Ue(Nl3U?dTA93 zAv#2Yo^1$yp!jQ(>#t3;4`BJ*Y~(-@I!b@S=Ssm}TlnwC zZFU|d9c*yMff-t&ONfsjNjtBmuCD0mS-I{AHVXpFbUnAfX7X-Ld8hM>mWr^AhSOO9 zw$vZS>T3f96zeQE@Wt^(wQHt>)m(Eic_+Vhtt5_=VaV|#T^`2MDUoo|7Ll-wl z49`l@_&m=Un97wayYvjf-xmtJ&T8;_%Hq=L;KIcnu^XyAKYsiGpAjSJqr*hosCo@ z@lRJecU;L6j6qhei$^&q6ZPh`MWJxVq;T+c^uxKpLYK?xP2`@$)H&LjVZ|`ooapv* zIm>t-pT#mjJ`elm$x-T?c*dJ?vyZRFe!wCK#CBw-Zpwb-zYy4E6Y~FX_TJ%KzkUDs zJ1ZfZ>>^o7MiC(^GE#(uvJ#cZ43RAvX_1zhLMbXlWD}K9Y1v89Fe)L*_i?`Ux$f(} z@9*_Hj_=QLTz_1Bc#qfXb)M(*`B=|D(c=y)7rVc&4V|Y^j+3o&ed8ff8@mrh(u+f{ z8Rbk*#%}AVZZ!(e9d5cGGAIKsHa;{$(P-`7iWHl=yP?HzN(|GE{_d;NNtv5J6k4(- z`{c)|kGTeq+?LH0m74cYFWz0GFumv2tO`S4okhAD7Ju0A*s5c^=DP?t;Zb^|iqVYA z7nGGcP8Ap|RD*7(ll-XM&I9WXycmvd`=h^+O$mc=TMg;Iho_(5b>n;j; zfeor_SmhkZ1gaNEmxOI4?8%KG7v}eQJhsu#F&}z+l$8QmJrRo~CUJ3bnIRC7$U9_` zKSkf3+zdiVXv1L&ETK_$t#34&*ZsZjlIV;tZ@T5wHkZKR5_!?Gu8CHlM#t>_+Wysw zS9-%F^K9xRuDkxIpMNFj`CDVAs`nT7(zL5?4(wsvSLmeGRv7IbFAPY2_NA&fz{jH4 zTz$63COqFN&VA~|>hJfHwb-NyF&?&D&gH?J!<{6yy;2uCmV#$=Wlx(Fa{VbV|0?ok z$EMGgrmsHDcg_E9^XV#X3rbtxmi+iwTEg;5ZK=}-S2LBF&_rM0{;h?@C!ZZTO~{{r+U8GwrP$V&;>xM zkb&Y_)}Rt7od{8j75fBtNQWb5vgkJaI{Gq-vmv19e|`7YEo#|V!;s+1x9wLZ{O})iAD%z`c+t)rRlBd>mpidh_KyhsF8S=a&a-RD#Qqpr zt^th)_L|=%<#y3t(bjUh$a-SGT6f}_`->ajcFWGHeOG3x0LTiBu@6h;9^n9Hiu_H> zrFOxoxeIeg+TRF$(=h9K;67%AtNF_wAWo-x;f@V?T_oGkJ6e# zA!t9CLWH9+2LBql4r_HS5_1Uh?;N244{q?>5e-(En|B!`&}{Z>bWmp8QZ2rTqPd1 zl`K=yZhd|0+((mhJc|1-Tsdv;Dequ%^vK{ciCr$Y!ZIkX9+LS_@W7#iOM>2c@W$Ql9-zvnxV=#6==|tRl zxJof_WkT~64L8qg>qgkGkZ4*%3a_9^q!pUcZu&h=d)^xPlS~Z`?Bhe3w$^^~EBCt% zS-Tt8$%kw0Kt2e^TvwZ)?S_81&XT{nAqr_EV?wpB`cK3(n{5 z5l|DAkly`5Pt|stAHV6Ttxu`aF+WO%_39PuW(}(jM6gmIcZyiZ1%+IfmBRlxH~3q# zBt`Mhj(3`Q?j?C z+UTqzT_{p{I*^+%Op#3 z_yW-&IMhU($xlOL73cV>fw5w~3tosIAb0&;90l$5oiATFnkjfOuIDhDv2vnH^^-N! zDi@{y_7Uz|&P+@3&ELsm&*~X2W5opM(rJMG0BRnpfx&DQ$EC|B_uIA(zUYypgf(mW zjeFc?2$Wm0d1%@3?OYtm+pzL>xoWLt$uVC7(rBGWHy?5A>eP6SfuJ<1tzvtpF!PaJ zPvUBgk!an=x{H}fJJ;_xj-*n(UFh5qq^Bz;{k9a((@#jk7&L#6>u$;UopL@lc8hFP z+R~A$%339`?7o_!)X5=kx|2@F7quo6jHU=WFTmM9Pi8iwTsK%Du{+>|{+Wr)Lp zG7her#JxGi;4@o{=t<;Gw3J>Se|+IEs>q`cmyUlGEI{*&2hI`gGx99Q2({A-$#O6< zs>)KljvQ$M5l$c`!e9XCA|bUO0KWo+Cl~+$myyOPyU&e$m&WFeUa2KR>R_e{i%$(c z-f5d*9AdC=vFX{17ta9^ib1!JwwXzGBa{J@YN+t{cPoReqEv&8s%MEuZ*C5-a+A=5 zc$Jsw`gU;)iTQbjM}rLTH0$XNMFBPdHxYrt6CNyP*@FmK#BH~`f4Js;0l+xu=(OG4 zZ>Bop>RATl2?&#Bo{A&0cnrklL{>Uv_P>2aUMU*l4s@TerU4dl+`s>V=Notpp^QOz zv3ipxGc&V79k7l3_R*#shj-W5?UIZlE?7jA0T%Ck=zn^r74c3yX9wMejXLAi=LzHm za$Ea@mu~+&nY;6LnwKroE&vMm4?oHbz&l2JO$V|33iT5L^Gvp6`(F3s9m3rgsMlW? zkMT~Va1AovjM5=FnF2pCy?iWY>X4b+>WW4QgG!uMC9903{(6#1+xf*&Ri(nVj$?R$X7 z$4r`Qy|xM%Kw>|JR_7g9BrSyBo)l7b651!pNJG(*NhZ=d>oV_E=;ot|1KJsvirDj% zU>MF~8q~Mol$aBWD16Ng$$+(D?vj||Tl+s1;yY}i!^NQxuLta+1Bel>Mf-IUVDliX z%M=Q%u_XKzK!i4$D{rC4^`PjbJV5~U&-KZPT4o==afSY6We3xq_3m4RdGTPJ0Qsn~ z1CsU$bXeysCdsoKW9nIbeb%G@LH3bF6yZ;5{Wo^n*7h0^ju@Z7WK{~Wd6`W#th(;} zHidaZpb9HRKF?UVqU(m{lf4yvc+1=@M`E(I(BB=*p47bQ_zE%6rB+P*n#*6ZgjD2T zP{Y?0lM%%W!QDFuM|;5Zc&69OpWQ%bkZ=6(YAx7Lne)dq1WyKr+Jyt&H?UkCyfKRh z2UF_8A8TW~1^i7-2Eo^N?UG}R@KT=oSW0z(x1755+?lYL-fBXW_PeK*tSX+d$m9^d z1)tmycPJ@7UKf8)C+!IGIy{%UN`w@R;&D71R<&i{a}O=tpPHGLV)*_FoYcw zK%FoJ_&p*o0-=ai#S9l*@rP{`gb;ujUp2Vp{hX>hv z;Fukgga=d2WUhFs;i}ZnA5WNlJ@_$odorFQ88&aA2|$^-MtGr zClJa(I=6xf6ua~e2r3NEcY0yh(Xl+}<1W(2o4xVAk}8ned_pSsba$VE6$NP=0;R=Z zb136r^ITUyz@p`2VL(_Um4-9B;3CgBtvJrx=%F58zfRcZ#htp*5#f%Pbu}+C$y33< z0!%HIJR(vU0k9EG6NQ3prkW_T0NA6a$`1VgfgEu}SdFU(5Nr#T8*RB-zm8g?uoOdWd=Ymq2&%b(YJN`G}gXa9T%o`rK~@mhO8A|T~33^|kM z8b@$zOiV$}-VIS1|JspYXuk1}+@RM1d3>-<;cCEt*LwBK|;UnKvL*R%$B&cv+0}YR>#a!kPMNXbPTINLqmf^o{+!>m`rw|fIZzD-IU|>u1)kl z?*+!c$92y`WpU14Pkai!21%Z~cW`!UBM%>39q(Nmywur^!7CZ&9;(G0JbeNPP<6`0 zA%$y^8_)XSzi}p_;J|{=a&TBds5*j5BSXl^3KAEKPb>_J4zl(&wnVCkGHS)!_)mYB zv^rcgv_*mZ<@w>y>4-?@fRwWlh0jTnIfera6-x8n3RFQTtQuG∾cke10#r_^TMJ zT_OXeqPK+i!;~i$(!wWx$EBf73bP{Yv6O1kEJ2rP_ZShIj1km{6D?P5J5aGp(;abM zSU1&JH(UR%8=h1=0PxzKfD)dDf(#oFAxnX=q+U4jM}vaX$!EcxS!@5$UFc5hdz_0cXCxhjy#lMB-C~B<_e_ zAm@7R^N)w1%-jrKrT%6L-as!})1>-px-OCJ7)P_DS9Y@;+d30UO5$WWvj_BOo?=je zf~q{R3-mR<#j(*vF}Ii-vjf(5->?}-$`huRwWkeH;)5FO!XpgAw;;6>&D9ChY$b=$ z<(UCvrN(IJhX+OyHeL7A3UTetB9;9$QEZ8b<0rwGBpC=S0jg-=mCOp;P%2rt!wryn zN0shl-5?M@rK2yJV2>9~HZl<=T2D(S)GAnIw5N*d-x@3ri9+X=sm8xa?wHTt6~0GTf6OT?Iq^O+#PI(Gx1iWOWee z;G8#sq8mOfw%^J}v0kt5Z6iEFcnb=5xWki#@h=Q92b4DbF0{|)^iIgB9fIvB8 zUD zw_jR$AKOjt6Y0|3i?Nv6eg|!= z-w^wA=dLw8sEU5qPRmbm2)HDW+EpqF^2zWBjmxIR}DqF=Qhebq0psMow z?1WwIO-Usq$~`oO+|n@+KKG-W*!(v!5J5~#(341;?{?Y+MiBges`D%)x@g>X9q?}JBJ=^G8Y(JI$Z?YW$TuK$Dz7yx@(asDx?tXQ1&G!!E2#bU;D1kprgB~lPMkx^BRfje4_ zrD|}LU_mOTyg|uS;)5$!ZEfK3*3II zI-$e5SA=!i$(rl3roba4WzhT+xRjhD%sV9ze>o!wSy|h=j9NhTc3j`KoCKqwLPp{* zGoo<{fzp%p4K9(exai$8&GY2Z_v2F%Wh5mG8Bk=SL~0nFCe9qRBV;;As+wQELQNV7 z(T9J29jw5EO5iHst5Ik+9MPTT_+LRuSBfL1V)8LPtgp2S>iu(e=NKhVB9!7 zOj(4z4>~q(H5) z^cu`PsC4M`3jtv;`e&Zq18S7nDOmW&n$Jq4VhLrqw|&X#`By?Eygc)1MA4K#9d|jS z6rD1%fT?!Unsa)%b{3jB2PdtMyqAM0;I9wXthZR9Jmk=CbMW`vbpqF(II#Pc z%R&Ybw7Yv+I&jW%B;RyeH4LpQx+gAaZhSj;6l3$62~C*?6&Nc-B2mi~F3?W@mK)S; zI6!v)chcSwBJAud_6h0p^75`ShiN!@DR6J02Qz%(ubdA9rvZf37-GI;ds7D!TYA32 zwaTL~$DWZ(!E_Cm0&d;<1XOV{h2!@v==LNvW`0)K;HLsRf?*kM3hBL){ z(BZX~^i}5bl&kpasBDwGmv@M*OxD%&ewiInZparO+&!o^tQH9wFJ8gYnsfwPxB(rX z><&)QeU-)G`(5GPj^f4;#XP#Toh1fGwn_RP9HwJ4_^i{pKkw1Br;Lxz2G)b+V2I? zw#H~^_fPT(m5W3czCPzZ`&o5Y_Jiw6N|x-9AbGl5keSh)V5)37?yvb<`c8tK z0c45jGjzx|x%rXtpwFM3Bs5T*;g2P1w=J(mygrR`36=9ER)1Cqn6|ibsLFLcPtb3b z8r!=9{-=yqNyMF_J^&;R&BCxjJV$i+movF*MYcs~{u5`p_nG_V4u*}UwGO{*U()$i z)`U-*+?^p(Yv-N;G6_ka*Kknd-1}wA>s`CX_Bx*&O*6$}*VhTVUfph3A)~OS>(90I?i~f0TtNY9 z*Zg-Tein6_gesX?NeEZvJ|9_;?44*Qmh}$u-Y88Yt^QE1irpu+JSpkx zY^CWD*!QHox3zAOQP~r{g}*vxl9F+w^8h(G8XFrepw>&al_pDsCtmkewihlNSWiJj z{cFWv^v4sPfUAIXfx67k)Z3>8jVsO|*bmosDJSYj!tsMq9(GAkW&_A3B^#pupy-R^ zqCx5t?7{h75v4nhyvyj=Bv|eNe@E-&jVxPTS9}#lL_Klo84M=?7uW5NKyOIKkF559R*Fa8=_0Rzr)DFzlnrPRxBXp8 zz*sYm>Pq@f`RWTBtX{ZJ`C4(w8EZlHVPu57W{Ob`Ok+gUSwy>1EMIoC;aY!xu^U+< zN{-tyl+y)F6nm7iAjf&66`>8R57;m!EFZNP}Z zX`yJ`%0t6p>20{*<{$MPG-r-Fu1V%6T7mcn#s~^L>pP;g^pY05mU1@{~0-UEeQ6BAPw77K7drY1T9n?EUnmdt zRL{9Z9D{s@j5~q*m_k8FFyVZlWt3}v&dITeqAbK#m!+{2$koSRC#}ERFw0#bn4zAF z)d}%ojC5)wb}?kS+f5_cX6R{gTSK@ul1l_(b4PhEC87=fS-sX+$<@fa`wIB`jg57d4Qni_b3aIlzk`?jWD z4t;%HG=&(zTrfrw_;=W#KJN^;fMs(vKItVc#bD;o9V;#%SXR0s;H1~*rh^D8S8pw7d7DT99M0>7K zRV~|Ycm$Ltt_rE6K_plhf*sVH`>3Yb&B26TF5^RI(f)2@D#H?++YRI`^lR~}4D3=c zE%I3VV=F}zZ~*>|`o7`o3Pd~)RfZL7AE8h@{cay$`{$L=hkc3*;4%=FELN{{ggL^> z9$(rU#0=j$TA`NWZitDZ$UnfERSUF<`eOG93OGF?q%*oH8o=z3?j_>g`{xoC;S#R3 zEnz`{qz@|wnfnPWs~S1A0o#7TCPtE05r9Uh7V@G9@9-&DIDPc>c(fq)Z=}0igHX@c zQcnq~-Tu(Sa)CL}6K4Xn92y!r_~k1?0^rMs7^lSwg(XD`*aD!Q6xZEYPEt2;GLL__ z<`cWEGfSU`*duD89Md=*px**z%o%ib4R@&CVLPeznfMgm$qbw!0<;kaSWY80Q_B=%U zV0yg{OBX!TT4J^UX+{a-SK8-*X8AQDNr;0Vc{?Qa41q%w!V8cvmxcy<=n1f{$lwWN znjSIXN-PD_1oZchigri16VQo=l)U2p!f zXSSBwz)#&qE!9zvW0iefG0`q$Y?@T6djr9N-QRzBPkS!s*wO+58Z4# zVg$$%N7OF2(&L3C{>wFD7#+OYZJIBjm48D2P9#XyNmq48?MHlqG>T z1hRlAmyU8-J&=T&zlo`s{q;*sL4jlEEjxvjW*dWj7oMoaZV5(HK@! zN3U5gCcq0fgL0(N5beEmZCP!bjtujHqApK1*ZphZ{a3rhmINVji7+>I<-SX{rKvZo zZ21eZeGW!X(f|jT7kGK6&XqAJQXIkdsFn~952Cemc$;98Gx}<$7Z}*#&zHs$d}Mcw zcmZ9B_aaYaljFZycYRsYYd~k|^jr4oov^Q`OcD<*$l-SE*b(nnk-MaP1tS4#2n#$o z7{PtyKxfiI!tGVn1lm9AN);uN;P26GgK^3g!Zul>Exd&Q6?FoPXM# zQk;N1*N*?F-ORUsWy1%s6hc~}R=}fc{5x2$OxkFpdcYcngC+Jl7hi_Ow7#77&@;BU zYngKRs)gAEgnKk4x{B*dv}KrOY&Sj;(%x->$Cbpl9s50D04=gr0U^ySFm$5pjEfW6 z7EGogQhyF)SROetq-n~u)T&K0iR*fa4%qA&h`i4D*E ze~1l@e*B7+p~cCP)w2`wUJ*)vaY^y`L|CKzS6bN@OCKdtA2=+?|hKgXBRk*P#i;3j_2FzSK8@pvogfl3t$y63X<01m?o9yb-TMA)P%R=wwLehKh&Z&%VZ5O6g_w(Me z)_lSIZ-;ydVj>~?Ux@)EC!62i@qk6g05tHeCf5|_2vZ9$poh~AN2Pw+#nDkJdMZVF zN6eCxib*BNF_eWbi{oU*K$&AmdKYs-H(vy>ty7Q1t(C7z@A0s#`+JZj?`rsjAqFj5 zagaY`Ocqtv5Ru8@e+j|ePrv+&RPhVC&aE%}0C2TNY1CZWbVWC~oNoP>bFVl2T=l}Q z<){b~^=r2fm`3kAvKGdBe}dgxdiXAHp`Vkj#SN!JfA3ZcJvt@}cT}ODbxVuhRc}RX zjlSvde&KNFD2cTRKe@lz*K1FWj|XIaLxvxmWNsLBO=1*~C95ql_{><({_9`9z3!Y5@G?dhg?BtEusBmI}j%Wgx zmH-Vw7ItznM*AP}AtMtL35_L#PhicebCB`rzg^S6GcQfS<@C5eT5% z_FjlWDg+|*LXAfam!Oc?C`8*MAp>(&RzWstVFFQ-iAlMs;JR$vwEwwDlMuE}>Td2*lSFBpS2I@C#qo<6RTW4Jq`JtL3SsH`N#>U2| z+eM|N4WAUo4%0(I2Z*3>qFzI&f_G_M=y%nZcS`m2Dt^s>?FqEAu$*5!-X- z&2fI6iiriyGH$cyijzgP_w*Et&3*D@Lz8oNT}9&rT;KU7eFsiR&4(2ap>wtsF6U9w1va8%3 zHa1_L;CKAiiU)=6tYrl)G}-0r4XiU%-riRoE-t-uuljw4RqC^<#NQ&miB^x_5kfOI zH-^O~qYA@ONLfp!W+P3{5yMS}F`8OkLqpV%lRQ?)!X!hpYH%)6i2WmF))SwK0xI6^ z5pI~A4kLIDF0;jF+2^J<)xEB?JH*c0&bjT#RK(e>4d0s&S-Q;ouDer8L!koYKx8SA zyu%0xYQQKIuQb+0@y z4k-nMvz}XO)>nLR8PN`t=p{raQo@kY(|_7PBs@Oe_|WB)*wGbH)tOgK^n^_smtTMW z-to)R-7PUio1XVDo0bHxp~hI9F3U}(+1<3zDHI1XiIp3lF6XvQvTwQ=*}UQG#g(3H z?mdiyw6;#wb)!O!L~oy0d+RwZ4&1M(aDQLDY7G-;&y}xE>I=Lhq zovRquw2$p6x)RH>dcCbm!W@(0cVi00%7|WH)4W7rwr;ENRv}U9uWCr)X2VyI=Nszp zuMCwpk>N0kl0;J&6a^LOvVsg4!=oOT7JYC~^uPRUdE=kpFcqB#M{62R*1e{dKcCd_ z642I79r_OKq7zgY!+lkouv|z>s~e+b;mMt6T=Z2>CW}>K;6-t0OwsJ@eX)w2#!>2D zS{=WTi>NmlYsh;74zD_FgCvB5$oWyKUt^4XB%9;1Y@liS@wBuied#D)qh{7YJXfq~ zf3K;<@(b=fQ;?7o;S#zq+4j5|E}eciBVN4S;MCc?_TQd-M|VgbRA1ho*r8!lo&3+o zUVoNn_>4FVy(?C&Y5@MvZ|Qh~ftQKiJO0Jt!jdTc8`0*XbNE|DQ@f`rT|eDN;wCC> zoVG7!b*9!i2c%4_G9<;tp=6;TEc+T$_#?Nt&YakU%Bb~=&z{{jWwQ?xjT`UvmkhsZefr3Z`UspZQ*aA^h%^qF(n(mF zAXq;kq7lTWNGEpp?2cT0)68oTAp-At^JW$>t5_Dq{sUn?rkKNt93v7K1nHQ#q2l|I z6Hi*35595hsb@W@+~OVYYaqBv=;oTO>_patPbrwqJFK^~IcDX~@zxJJtQ9_RMA6@? z|JHM2>fREzAK|#*Om=xxkT5q>>5a^&-ehvQMSOJEpT<(J^NT36a^3Y^y31p9&it_Q zVq^PJvwF^^uH{*6J;%cSqUsQCx>rl(d)!1zl7W|$G#5pF<<&KNmU=K_NB_IK_8x8D zMdawv(U}$`+~Cpahpo2(*w9i8Kqu%FLL5=I2Yk5TO7c<}MGp+V(39}_c0f7J?&0#| zXB082r9R)QM`7iAQ7LYhgKJ*rpW80YZc&YMl zp_`>vpFtmFXt)G6gaqI#I*j4iIGx~~bnC59MaE3$4a_rNhNVkFAPMS$;K7KTRXdBW zl^gEJZ&=*b_U;m`%b4o&_THuGCr6nKZatyyTzZ{*+i3_yh%wG|hy|R${M7yvl4u+-#0w#FBAnqghmf*gn2bva6yI7G_ zJjMvgtcLU$DxRrBf&v0e7_EnY!qb>#Q#v%gUY#e9{nZ_&jytEgJU1 zPpNxY?gy4%bUnaa0Ay`3G%WM%0&TL_#RV`-&nZ#%&bXTqoKXhCDe>QT{wi*K<)~r( z_bIAD$uWlGIEo#rk9|2~s>^g70FRv#UxFPmh9&$_f9ur`4zH`G2HTib9G&-v^El%O zcmC>FGxLFZ-S%o>ulCzK1r&$`3czFtq$IEkbJP$u)Q(vX?!6WKICcS_k(bLTxpMzx zXowg4PDSQ{p#zaychF1(ENfMvZb@QwO2a}w@OSS9lQuPy@tQcs9g+Bfgc4HVQ~L2P z5c+(3J_%15A5@~-ZpG-4_xr>xju>y=Zso*O(ju{BJp3aZzI6GwutQ_|L2>g9?n;2z9q2J2iM@>}j zzxCR(kQ&FIr2R32umg~k6yCczcSILAn+^Ry1`vtt z2|A>xq(n@M8`RW*jPjFFuY2~S?YV?_1Tu~ho{*UH=a<1#YTk8TU;o_h+{XAm;nVTo zmsEy~_b_YxvqN>DV4A}AYGuWX!q4SVUCQcBo0dcVM_pgD=iy1kMw>!f$2`$WW6hq=;ZOzR!8QPHqy#Yc=dKaSqBtvy=sxfs$c-UZ4o4sWVi#gLW?P6 zj*67Dbd5@1a3rEg2|P@ugd}>0SUm7gGp&o|S$x`8X?dFI%juyZt%TsKW&e5OdiJpZ4}| zh1Xu~Z>Q0K$&Ern|F>z*(2+n{s6%NHzy+lAT$jNaaflQ66j=bdFQRn!^h}mKc<|uy z?6YUjFg)X0sePUWqL?giCqlNT(bWER^=&PCq%^_27TWd`G(+vnmp>QX&ZPp4{ZQbTN2-*ZDRj6gb zp2|LSfJB83rtMa(<{5DuF=sC+xw69DxoSS`>{(jO6(P|fSR=*9j$tG`1cNL>8``Df zCp&r0d%PbUN}zoDIVM@YpYr;C2LUX7TJ!hq7X7_A8JRNMn$jY!b~--G2P^Df9F52hG9NhDA2?2O1Oz1YaG@p56WGg9oU9!aH}$ zT_0d%pc&V**B_N!6HeJb+y`MY^_2iQ|FNAZca;^M#GJ~HOa z@%5FBLAx?$r>Cniw+Eb}w>G{Vz}tX|-`^fn!otERI1)&96e3J0Vc4xORI#?N&)(xZ zph?{VtH<`fzP{@qt=l;|?ytDkrl2}b@5I?1bH)bQ^&QT<+Fyti4J476kEtg;q=x7$>3XtGo$~d0M!J;M+5*+3= z+%8H?{RHX;!#6N8vV;Q3F$>fl#QgzG*BLQb(_ikk{fH?%8`!e?WqNIG~Nb>vHV5Q|`UuWhI z+`da{yf^kF)bI=_NETs+_3}E5Hxnvjlm2X2EE?vq<}%03<>lQ%1!H{|s3*rG z7M$~KEiGhNB6ijhqjZnHy!@}RGH2{>)_h5n9Petjpj6-0uyyD?E9{VSPkup{si$Y@ z3c#Rps<#f^IWhPVf&lgkpkh(xyIilApS@sQW*KJKx4h=fJv(cgEvZv`4H71&l>Zhr zdY${)Ti{Q6ih=@|_gQ;aeGA(^6^ChegsR1S(OG|SgSF$(YmedHXPy6CLJf8ta#$#l zh~^3ixG&0Pd=(v>W$2#TcYSR-tHW;XarypKi9K= zYu3Bt9=$fU_K{zr8ZEHltrQ0kisip=D0vK>?@z4x+TWLdIZb{T5;i6wa?1yYLrV+c=BY3msBR;o23{&|fjdGJ!Rmn7q(egIO!!qwi(D%VhZCZo}Fb62gcV zou0{|NQoQ%Hx$ceJ@q@}haRxVK)Rs_lG9wmP1pNxQ03yvVqJNC3+*yno_jhD-rHB3 zjPxuj*L)yeMsyG3LL-DteMi%W-3jP*dZ|J;?6(wl@g7wN~3_R3AG-CTUxeg1;$ zEnD`J3ll_Qb2XKQF|;`UVBP7}soy^vo;yI#aPLT6Rg-UIV%)aog~EHlG2B#wi~lG` zeCQ@9n_F(H^3LB-I>#Gqw9uq124wLL*QLHLO+JymaMKhk1(q!YCwkZXQ0K+Lop;zDeqi@lmfMkk zLipj)Ov#H6UjNrC*3(=$(D3lP%J?(3B~nt-(Vjk%HRXXe-;^++QM8 z74s_e&)OF`G$C)5_22s(E4=HJziXoVk6=Us549*CW>~93Il8Jc~OWb8NzoD5uxH z)kP%rD+bM|yqZHR3}uP+6+iiL+uHNqG8wNc+%9#>_G(OI@ha0jk86R?Nr^iQ#J2Jc z_SL>1K@s@rR0XDE^S_`KtcxPYZ`sou*DuDGQSgghI92^PnSJx`)N(Erb6z`eHVMo9kxmRgi4&e^32Z*iG4Zhb>kaoKY&8 zuiXlCL-t500jYz(lG_bjfOb&K%hx`1&;4-2zE#!F-!N2F2vMTNM+)AFhzv)fw~R^7 ziO$od#h{asP4wWq|B=p@gL-r3$1Mau1@`9Y~}hlT)Y;RiG?mMk`A8z zI2GkItvo9SH&-@U<`6oWp-p$i1m4&#qF7v(CVEXEH_rG~yS}DXgFS1mFQROlS>N<7 z)Gy{)MDzyghcr8cB{?j<@H$wTJ?!CGrC8@(A&wzZFIPA=!4pLMK0I+>4QdMu5v9jjCQ2-oXhUweS)!&i!C` z((;VvY^05&(Y*4vqwVW$*8Dy3#JC*7d#m5%x0q*~uH>O;HSA(r_$!H;0}I`C>?4*! z;K$S-MOs<#f7G8R6y_f?He|kSiTe^nR87y$4gyZO#`^caf8yXBwgkf&L`=cBZA5W^ zoI!+Mg9FDBo?F6MLYE1dI&@daq;m(eLmja6bd3b4-}Wb9PGSO$M;bzpYi|N z+ceY6X?pZloOt%k95C|dL(oPx!80_rzsjvc>`D5j`LwU75AiV$6*L5vqF>0q5}zyf zpBfR@a1RHPqyzf;J^2v*nD`8$vZkY>=w;h&s~Z^Fln5mUJoI;7wi#H6{D;oMDg4-S zr~_kg>_fUVS%Mb6uC8t{?dSh$8&=A&{m-wpowRI^HoqH1iV-0&&>nnKhSv#z4gCJ8 z^2D->pZsmpbd^l#jH2P+BPmlz+o}hVaw#Nf z!s2EjdA8;ksgZkG1AD#6PmgA)Ia3Dn(Ky>{5Jd1dTClivBd2n%BLY8ejUG&di z{p-!^J`IR@{augy!nXa}5`aU3`lD1$L&b$-sjKC-D<%w*$quz|-wI%`*#_U?Q+c{s zK~5;n$uRN-Y7F_3H%sjq!yz`yLe))jQ=o>Dv%8=1NKQzQY zA(Ja-iv}8MsYOVzxR0cLL=Gdd>GGaSXh}>iDAL1M7ExKLL9Cy%vUq=Sl!2thg?E6wi#%jcQ$c?$Oo+(O>${$2KOHkSa z^RL_nqI_uXNzLLWno5yfRmU{Q1B47CGG-2V41Do+=pM zg8$5XBw|s>l`;8A9_yc*ho|`v(HElmTZthI{VNQCr*jE;96Hex@yyL@lgTR^|mV(^U0ma&2Jf{{7M-uq2#2cdj3E0*Dfa zgNG*?m-gbjGr6n>07N4elgQqLRK4jSv*XhY-hlanOT$#RH2Q(t2nM}Q!t8tfKZx?{ zBBkZ@#fvhbKgaskjybcw49f`0hF{PL&i`*?Xx;Jn?SR7;?aehg)Wle2R) zyNfDRBGSkH%(#|41(Hce;aIjzGK!rj6m&AT$0D$`>?w#7JQCSba&pR}^xG>3-n|op zJMyWJcy`Ovr#!IZ@j$^P205Zhy5?@ino#EE4R|`EkX%|en;N;kPI7os4>&yr`?7O# zig{w)FfLi5ZD)6R$;#}Q6&OdgfB*iV8DK|rP;08rzkBzNc-?ZxQBnbb!0{F5djU%k6Aq4mq075l$10$Mq^}QiEBpY z-G2N2or9gd8Xx!~ZZNrFmBF;ro}&D1KoW>$7x_Vsm<9-hdr`H}P3fFG+7)&81HVfc z85>WY5h56q7e+yGZQi_;>@$>emqN=!@SzX;_&o2rjj6()KFO4d<^wW~hu$&+ECvR^ zbA{az+L32**x&!Bt67O43w)3S-DtVs=Fdq3oB{{ER#w3Z5<^ z;i4G&ZUD-X4A#I_Ti@7-dicN!k}wJZ3VH4^)c6|fae-Vwl#ezY8F@yJH-R&_F=nZt z19W4xk3MO#dfk>3Q1M_vR_9SG1Cwprj^pP-_|N83ieYRiQDoD)Rkad#9>Y5$`*e@Grs}`Vc zn1LCP(|y~`2wBC6n9sVGaafMHx6bV7a?OM=hxg6#m*UJloOGjzTC;l>HQ&S z0CDOtGBV0L?tQ@yO(9?^-L>;*Me*RQ{b�ELO%TM68s)sXVFz;V1Oxi6Tm!8aj+b zItyKJEuOZ4!NKg(J{)0?Kn4W{ka&r!SBHlNn$Jp3dhl*W$Ye(c2Le##TcfvEVz!u> zo!t|BdJ>EUcQ&zoKTgNXIO9S3)`layq@qF{O*03Y$)T#~^)8<*KRr5(TPz7fsh)+! zIWqRP0Rspz?eA{yr&v8U85NbCLL2=}$p0wp2?e87xvQnS@a`#%fc)s(VV>2!T@863< ztGyb5S3}#hwZpqV)SWB zFbu>>?7mi5xV_?A%j(*OhRrY`lX2g17!g!Ch=Ijq$S5PoiH`eT&J&oCu>_<3-rvY9 z{}jD!4?D(u>EJ;Gv=@s+h(`@&?0`fQA29op!wFSK$G!bK@I32b=z$)D9steGO(`v2 zepx(<{aT>^!W0}mJ+;e`3thEYsrK*q2YsQ9_z6($*&Q8MI{W3`$7B8pt-WA-=_F0bq> zP^-jc;`|m6-;;2CfSNcdP@L9*^}|ZA{B-+4N5^gZs@amnxVSSum|i@SHix-!#P>{O zFwn4*g7eu=9u%@{`X_}>AdVm5$V|vYZt`i-aLZu;;=Q>fzHAP1RWU}Z-gKN z>mk0;kEq~NVJ9FJ1`aa}3jpXujQGS8j(R2r<_e3~_EVMJGqC9sJvq=Nc_a>Ffp$Ol z1ZUFu>!|IOP4@VXeAI7xy0%sa<#gW*$L(v>;N~X-(!`#?=9vyQfrCPDD)TSg#3Ij!GYlAnqTK9=Q5xL*i5`WZR+1yNa2CiWqD&B8~gi9M#O@ED*se+ax^X{nHqtbj0EMgFI%?L zV?Igq7b9kyETZ6XSpgIigfEFfK)!|n{tLBybHdq+rwoxcJFeOf2eS%p5n5z&Aao`|< zOmT^9h92Ui2fL+A=?DI=3xg15c+pXa5a8u)`y}LCjTAq44(7h-@PH1*rC|<1gzv|4 zK=hIvG{_A|8-i`cYW6L@2??vZdhHsaD2W@22tSZffa&JV7=`l%HB1CX>GY8s4p}vt zhvc@w{{Ao=6N3;;&{N12kdeuMJHg3u+OY^dY}Vpy+oj%LKTn3dAi%();LnlPZMWS= z591LKHGHN)U*9u_*qj7xsF-og3mQ}(?8XEl>Q-#pWIyVIfD-koZXeLO5y)qGTXHBK zWsB9XA7ICe>29Q8zMiT-n! z$MPP#(4LB35(ON|urvZ=yt$b+7caZDT1KW8Z5c#zjCeU{hls8n@}bp7id1@Lu%WO~ z?g!7@>9yO}%Msl`hXTLjtKmb{_k-+K?0cC#eV6G^Z(q>V+Ox zMq{3c4z8yKF`<%+(9b>{pP!FzRR9}aII3< zBI=OhqO#Zbdn$w%IL$2z_?~EkQwzHz`3NbLGNn&XiR!*jM~dz)k9j;})m>c=p8biA zj>h5)Eq-SUz=YC|4O_XzmjB*WTdmEcLB!@VGyN*pvY(SPfAzgp}4!(>K zFKAY(kz?aF{+*Mf(~=-CxsBLVSTfu#zo5-d6^}uNS~cn=2$A(saiZm(0O3TOQ5gFr z$t!CehVSkKK@XOSKFE3$WaH?NXr*j_yMrX*5t+x!SL6NJ5%GiW9ORZD=^Q`hL9huq zLKBLmv5_eZG9n9|U^8TdC|0Hz=ze;7dNKwphRu)C;*u{_|^VnM~15R$7>glOcBz++c!9^4Z zA#QkAT_nw;!C^Ek0>7u>Ti%QtMzVJhQw%IYZ1rkp+#s;DO(Ya{UKl+SbYOaWDcFjo zaE>m?aDRQ9y>?iE_)w8$C5TReLc!r)0~>+-z9)Q?YKSVxOx>HiA6OmxwjZi*Yz$fG z5GY~LQCkN5#-mR1T*-(5yz!!QJU1aaFaR8|s6Q=m8Z-009Uj4%@NWLk*nI!Mz@iNI z)Dl$HWY7zqzyV08cy0-$;UfLj5O@!`xwu#kaCJW*`DXC*gPYv~*7K%%dKr#u($1i? z7LAfR(pxMljA?_Qp_7l{n(jIH@l_>fAzmiABpa+) zB=JW$I68)-2zSPgpucGztMu`e$c^axXFj4xz0n?5uT;RiZ3fM) zE%ewEC|`qQi5FmthSo1U8(zCjC=^zdK3%xCw*1_dM1de8-b-n0jvnAWG#1HIM*d+) ziv9FTGdx(wPMw0#sL5~0+0BguPy=lOYp4#rTdb0*ULxQQCx$@Wu*iUk?TPtR(5%gc zu=>Z@FPk-O5fz*s%S)gI)QMmZi!;Zd#;-_#!Z&h2tDhLzIpFdH3;n*r#%~6sgKwW0 z>Nn;ZqKr#MOv`82XEJ|Zz-o>F=7Cs(_xTr7C8 zD;$)rcdJBD6{!0jSM53er@#%fePm6gsCto8)?nQuBp>CN5hSA@KA&%CKY}t2+G}#N zBKZrxho2U6xB5Qn`i;oV`UaXB%%FTZ{9q;W|Kf>WY~EET_SoF##s@zM$nMPX=$Ci# zKuxqn?#u;Q!yV46A4}PNsAl_qah(5Da}LUv{-k*mr`+ebOxL9(>oxbkIXfvm>3)n^ zx^CT-8wZI-@bsu&`xDfPpG5Ua!EVv2Y*POqK2zN0hPxTPsZsBM?Q@U<&@d+)l+WW_ z>T(N#K^}Fc<>kI``RzVjjjrTP+-gkWy|;BQ^P+qL!^6!P6fCWp;9ZWs+0wH$cLeQ! zEv>Bl@bG2ssB8H%sUTWnL|{U0aFJ!kCXal`I88MiT7My8oP{OT6dnNK5ftAcN=)c! zmW=D6Duu(GCS9bQm*=G)re6Ru-KZ=TAIElSQh$L?;J}v)kxq*`(l!I*LHY6+f(vHI zZTo0|6JHWdOt>VZ4?P@3>(=~PY+Rh}s~@Pu^~A3siJXN)#fB_}@afg41XK;7@&gq!%05_DttYY~ z-&@|)bX((N!berHE6W-;VY<>;b`RG?U73H;+SWE=;Kggo9Z<0_=OIGU9aN?_V*LZi zk8r&=Z|j0!e}Dh%1cI{BKAfb9ku1>LGo2z@0%SMdD91$HUz8_a3{T_VF+(CLnv~Vo zUz+UFItO38-O-OZ&xYy6K^X8;T`iqTFHR2%D%dQ!sZdD$IyAlVbs&LOn(Vq;qf2tD zE}*A}>+1=1CsxfbL8K%fTbD23pm+>f2`Nn5vKdiJTze}t5^qJ!pXLc#e6*62ak4GL z+)Z3sKM82@6et(bj;MLiqlj;K8s0pgHdQfOA-Mu(@s1;QZW4 zYDAeWQb050cG~b<+3X3F@Ottz0w3;~_t=Hcwz#_fEh_VK_|{>Zr-moO6kVC&Q3mq3 zj-Bjr)@NXF&_sA@3e?? z<2MCLPhYvR4oJ%E{7btL1h|b5-@*w4c}E?0|J&8yQm5Y2*{R5B%+7qnAh|cW)s*t9RJpLZsI6XMw9*juuw6^Ri`Zh6o0m z#_l~HWdNNs4;xT5C_WlR1;v3#S%rMVh78YwLFXojk~_T8ESMO?BVh)vkxC3Grv9NG zd#2<89H?r82PS~R=Q1& zr-$2yAk#NL9mz$Iu|ZK+sHh0Kq|$^#=r5|5$vWTtSfF`xdYI+MDq&@j=)wfM9}`G+8i_cI8$1{nR3X)h#^4r z4Jv)S%jaEM2tiZAREV~698!Kan#bF5aBn&u-3qlhnnhZ~=|Q01>j)!v9mjlqP0Q!* zxO0@qeR`}l@2mT7r+V4{HfaC2JN1XX-2b0H9XGw&H~DFK{0oL2+c}^3*0Dc3@*nBH BYr+5k literal 0 HcmV?d00001 From 8db5fd501f1f0b0badb534617d443e682386b3f2 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 8 Feb 2024 16:14:30 -0800 Subject: [PATCH 326/648] updated Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 13 ++-- .../3.stable-diffusion-eks.yaml-template | 76 +++++++++++++++++++ 2 files changed, 83 insertions(+), 6 deletions(-) create mode 100644 3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml-template diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 641e5dd8..12ac53ac 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -254,13 +254,14 @@ export TAG=$MOSAICML_VERSION export PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:23.08-py3 ## Job parameters +export MASTER_PORT=80 export NUM_NODES=64 export NUM_GPUS_PER_NODE=8 export WORLD_SIZE=$((NUM_NODES*NUM_GPUS_PER_NODE)) ``` -#### 2.2.1 +#### 2.2.1 Set up `do-eks` container First we need to run the `do-eks` container which has all the necessary kubectl tools installed. Just run: @@ -282,7 +283,7 @@ cd /eks/impl/aws # Next we will edit the nodegroup.conf config file ``` -#### 2.2.2 +#### 2.2.2 Add a Managed P5 Nodegroup To add a managed P5 nodegroup, we will follow the steps listed in the [aws-do-eks](https://github.com/aws-samples/aws-do-eks/tree/main/Container-Root/eks/impl/aws) project. @@ -318,7 +319,7 @@ aws ssm get-parameter --name /aws/service/eks/optimized-ami/1.27/amazon-linux-2- Next you can follow the steps given [here](https://github.com/aws-samples/aws-do-eks/tree/main/Container-Root/eks/impl/aws) to create a P5 nodegroup. -#### 2.2.3 +#### 2.2.3 View your nodes Once the nodes are created, you can use `nv` to list the available nodes. `nv` is an alias to `eks-node-viewer` . You can see other aliases by typing `alias`. Below is a sample output with a cluster with 2 `c5.4xlarge` nodes. The status of `Ready` means that the node has joined the cluster. If a node is in a `Not Ready` state, you might need to manually terminate the node from EC2 console and EKS will restart it and the node will join the cluster again. @@ -353,9 +354,7 @@ Allocatable: vpc.amazonaws.com/efa: 32 ``` -#### 2.2.4 - -Next we need to build the Docker Image and push it to [ECR](https://aws.amazon.com/ecr/): +#### 2.2.4 Next we need to build the Docker Image and push it to [ECR](https://aws.amazon.com/ecr/): ```bash docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} --build-arg PYTORCH_IMAGE=${PYTORCH_IMAGE} -t ${REGISTRY}${DOCKER_IMAGE_NAME}${TAG} -f 1.Dockerfile . @@ -381,5 +380,7 @@ echo "Pushing image ${REGISTRY}${DOCKER_IMAGE_NAME}:${TAG}" docker image push ${REGISTRY}${DOCKER_IMAGE_NAME}:${TAG} ``` +#### 2.2.5 Now we can start training +We provide a template YAML file for diff --git a/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml-template b/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml-template new file mode 100644 index 00000000..2e60a191 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml-template @@ -0,0 +1,76 @@ +apiVersion: "kubeflow.org/v1" +kind: PyTorchJob +metadata: + name: stable-diffusion +spec: + elasticPolicy: + rdzvBackend: etcd + rdzvHost: etcd + rdzvPort: 2379 + minReplicas: 1 + maxReplicas: 96 + maxRestarts: 100 + #metrics: + # - type: Resource + # resource: + # name: cpu + # target: + # type: Utilization + # averageUtilization: 80 + pytorchReplicaSpecs: + Worker: + replicas: ${NUM_NODES} + restartPolicy: OnFailure + template: + metadata: + labels: + app: stable-diffusion + spec: + volumes: + - name: shmem + #emptyDir: + # medium: Memory + hostPath: + path: /dev/shm + #nodeSelector: + # node.kubernetes.io/instance-type: "p5.48xlarge" + containers: + - name: pytorch + image: ${REGISTRY}${DOCKER_IMAGE_NAME}:{TAG} + imagePullPolicy: Always + resources: + requests: + nvidia.com/gpu: + vpc.amazonaws.com/efa: 32 + limits: + nvidia.com/gpu: + vpc.amazonaws.com/efa: 32 + env: + # for P5 FI_* should be commented out + - name: LOGLEVEL + value: "DEBUG" + #- name: FI_PROVIDER + # value: efa + #- name: FI_EFA_USE_DEVICE_RDMA + # value: "1" + #- name: FI_EFA_FORK_SAFE + # value: "1" + #- name: FI_LOG_LEVEL + # value: "1" + #- name: FI_EFA_ENABLE_SHM_TRANSFER + # value: "1" + - name: NCCL_DEBUG + value: "INFO" + - name: NCCL_ASYNC_ERROR_HANDLING + value: "1" + #- name: NCCL_IGNORE_DISABLED_P2P + # value: "1" + - name: WANDB_MODE + value: "offline" + command: + - bash + - -c + - "composer -n ${NUM_GPUS_PER_NODE} --world_size ${WORLD_SIZE} --node_rank $(hostname | cut -d- -f4-) --master_addr stable-diffusion-worker-0 --master_port ${MASTER_PORT} benchmark.py --use_ema --use_synth_data --device_train_microbatch_size 4" + volumeMounts: + - name: shmem + mountPath: /dev/shm \ No newline at end of file From a859710a7c5e359e8f4315638aa8c24a653e6a0c Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 8 Feb 2024 16:34:29 -0800 Subject: [PATCH 327/648] Added steps to run distribted training of stable diffusion on EKS --- 3.test_cases/6.stable-diffusion/README.md | 39 +++++++++- .../multi-node/3.stable-diffusion-eks.yaml | 76 ------------------- .../6.stable-diffusion/multi-node/4.etcd.yaml | 54 +++++++++++++ 3 files changed, 92 insertions(+), 77 deletions(-) delete mode 100644 3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml create mode 100644 3.test_cases/6.stable-diffusion/multi-node/4.etcd.yaml diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 12ac53ac..25d69d16 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -382,5 +382,42 @@ docker image push ${REGISTRY}${DOCKER_IMAGE_NAME}:${TAG} ``` #### 2.2.5 Now we can start training -We provide a template YAML file for +We provide a template YAML file for submitting the stable diffusion distributed training job in [3.stable-diffusion-eks.yaml-template](https://github.com/aws-samples/awsome-distributed-training/blob/stable-diffusion-eks/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml-template). You can substitute the environment variables in the template manifest as: + +```bash +cat 3.mosaicml-sd-eks.yaml-template | envsubst > mosaicml-sd-eks.yaml + +``` + +To submit the training job, you need to deploy the `etcd` manifest first and then the training manifest. + +```bash +kubectl apply -f ./etcd.yaml +kubectl apply -f ./mosaicml-sd-eks.yaml +``` + +Once the pods are created, you can use the following to monitor the job: + +```bash +# To view all the pods +kubectl get pods -A + +# To view logs from a pod +kubectl logs -f stable-diffusion-worker-0 + +``` + +To kill the training job: + +```bash +kubectl delete -f mosaicml-sd-eks.yaml +``` + +#### 2.2.6 P5 scaling + +We were able to do a scaling test till 64 P5 nodes. The following charts show performance improvemnt and raw throughput numbers as we scale to 64 nodes: + + + + diff --git a/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml b/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml deleted file mode 100644 index ce30b604..00000000 --- a/3.test_cases/6.stable-diffusion/multi-node/3.stable-diffusion-eks.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: "kubeflow.org/v1" -kind: PyTorchJob -metadata: - name: stable-diffusion -spec: - elasticPolicy: - rdzvBackend: etcd - rdzvHost: etcd - rdzvPort: 2379 - minReplicas: 1 - maxReplicas: 96 - maxRestarts: 100 - #metrics: - # - type: Resource - # resource: - # name: cpu - # target: - # type: Utilization - # averageUtilization: 80 - pytorchReplicaSpecs: - Worker: - replicas: 64 - restartPolicy: OnFailure - template: - metadata: - labels: - app: stable-diffusion - spec: - volumes: - - name: shmem - #emptyDir: - # medium: Memory - hostPath: - path: /dev/shm - #nodeSelector: - # node.kubernetes.io/instance-type: "p5.48xlarge" - containers: - - name: pytorch - image: 159553542841.dkr.ecr.us-west-2.amazonaws.com/mosaicml-stable-diffusion:0.15.0 - imagePullPolicy: Always - resources: - requests: - nvidia.com/gpu: - vpc.amazonaws.com/efa: 32 - limits: - nvidia.com/gpu: - vpc.amazonaws.com/efa: 32 - env: - # for P5 FI_* should be commented out - - name: LOGLEVEL - value: "DEBUG" - #- name: FI_PROVIDER - # value: efa - #- name: FI_EFA_USE_DEVICE_RDMA - # value: "1" - #- name: FI_EFA_FORK_SAFE - # value: "1" - #- name: FI_LOG_LEVEL - # value: "1" - #- name: FI_EFA_ENABLE_SHM_TRANSFER - # value: "1" - - name: NCCL_DEBUG - value: "INFO" - - name: NCCL_ASYNC_ERROR_HANDLING - value: "1" - #- name: NCCL_IGNORE_DISABLED_P2P - # value: "1" - - name: WANDB_MODE - value: "offline" - command: - - bash - - -c - - "composer -n 8 --world_size 512 --node_rank $(hostname | cut -d- -f4-) --master_addr stable-diffusion-worker-0 --master_port 80 benchmark.py --use_ema --use_synth_data --device_train_microbatch_size 4" - volumeMounts: - - name: shmem - mountPath: /dev/shm diff --git a/3.test_cases/6.stable-diffusion/multi-node/4.etcd.yaml b/3.test_cases/6.stable-diffusion/multi-node/4.etcd.yaml new file mode 100644 index 00000000..0b115fda --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/4.etcd.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: Service +metadata: + name: etcd + #namespace: elastic-job +spec: + ports: + - name: etcd-client-port + port: 2379 + protocol: TCP + targetPort: 2379 + selector: + app: etcd + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: etcd + name: etcd + #namespace: elastic-job +spec: + replicas: 1 + selector: + matchLabels: + app: etcd + template: + metadata: + labels: + app: etcd + spec: + containers: + - name: etcd + command: ["/usr/local/bin/etcd"] + args: + - "--data-dir" + - "/var/lib/etcd" + - "--enable-v2" + - "--listen-client-urls" + - "http://0.0.0.0:2379" + - "--advertise-client-urls" + - "http://0.0.0.0:2379" + - "--initial-cluster-state" + - "new" + image: quay.io/coreos/etcd:latest + ports: + - containerPort: 2379 + name: client + protocol: TCP + - containerPort: 2380 + name: server + protocol: TCP + restartPolicy: Always \ No newline at end of file From 6064cd4c4fdf1eac63269eff5bc3fc0ad879235b Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 8 Feb 2024 17:02:50 -0800 Subject: [PATCH 328/648] Updated scaling plot Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 2 +- .../multi-node/p5-model-scaling-eks.png | Bin 57588 -> 0 bytes .../multi-node/p5-model-scaling-stable-diff.png | Bin 0 -> 57243 bytes 3 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-eks.png create mode 100644 3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-stable-diff.png diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 25d69d16..0b65b52e 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -417,7 +417,7 @@ kubectl delete -f mosaicml-sd-eks.yaml We were able to do a scaling test till 64 P5 nodes. The following charts show performance improvemnt and raw throughput numbers as we scale to 64 nodes: - + diff --git a/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-eks.png b/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-eks.png deleted file mode 100644 index cb083f3d948f2002942ce901c7749f147e246428..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57588 zcmbrmcR1DmA3sb>inNhcGE%aWt*MkKRLb7T9@(QwMHH3n3Mr9&>`^uuk?l~C?bv(W zk9VK%@4o-Mulu^YuFv)Dt80ad6VNGp4$v?_gtL?Ot#ufEfS& zTMiC3_ELg^mjCw)1gz~$1@}IXsKtlS+MK^(PesL`Py9#yQ9jO$it3lX;+d1`&fz0u zC+FQw9c2?^ALi<5xlgQX2va|KVIY6=$GayE9p;=*ySryk@>09>`^_KspBHS9=4|*p z-F5L0UyNny)M|>6V#S9O;#?1x?6tF7W$jJ+LTdDn*F}or8pBJouUp7)d z{@;D<71Y@%NRbxjK@4?=|N9 zf&&K*yiwvgb?OwwN{`d!e8XDGkLYv#4u=ALX2!ZXf-291h#1>R?cR}n5!cvx@LH0N z<Q%%UXl~J-{=_uO`-iREb$Nb5DT;S6&b;NzC;y=3trdQ9)vK#Z*2~MwLLo|7Sy|&t zV})_?_A85b%KX_bF3pbeU3tHOot-@^H&-poMt`U|vAXEq`h~%`D2Y2mKiPD1A6Jap zR1Q*|>8(9;_N=gJEvxGsCBiB{+rEG2jvcna?{~iFTSmQ+e_H`E&XmzR_BRo)HJn`P=kX?D%jgOre8h86xK) zB_}5*$=0r&;yQC%%5kJ(X1K-H)>ePAw^C-d)sSz=+271=&uXEQ zYonRnsv`&A%fF{EU>#*9D-QDCOy7LBuO@7&WS7e26oa#Py@cB=|Jw2tgQ&^(ti&GA zrO`~{c5$A4YBTYtbY!zOrO0h8|6H(uwnbf%c5d0?%<#fUT3wdI;7(sgL6cua_m&s> zqs-V1D+9TA3+l+!jB;Om?QlLpJICR7{#;jea%5CgHyfMR!sK5EN@vKMH%z$AA>x+% z0%Ru7Kb4|kV2`;JCVoaiVO@Q;y{YT;z)8H)*53XgKfhmuv{R}@o5;1qn|IFHK7QOu zi~T?Mwl+d~sz1t$_?Ox!d6g3+9=@uYBUkPVSF4 zGj2akRd}{A}Xf40A^8>`XVm$-mXzuiPT0{#FMczIN_L`mG)0 zoI3}_#rHZnIhlNabF$o@O|6MV(k|wxWR#HpeR78wKC<-jj)Pb{)f`=m$B!ifDJL1T zX)>(Hp?B|6?cB9%p(jATAz`kZ!wVbe#EBDBxZWJ!9Z%0IOSw)T;_#T>K~*QWWD(!H zeC^AZFCkKt`gfniUYI?i_JIyZD??T{)ktxhLQsBRU6e}87wsp{ zo|QH>24Oe1U#i`OUp1UmNcB-IPTAZ3&~5=OiBj`JoPiPlc2Rgl#9-5BMjQLhxw3_; zi$|#P-R8>L+OS;y0RjEQvAU6Fg7+{TKYo1Ox^=g@3uFt|R;Bm)au*wh%Xx&pJMDwx z;OFB*Q|ikYXWw7TuUjCaCd@_4z|ddJAm97jhl5nAnxG*pb!Vu=k6BcetTucLm)Ns1 z*By8A!KU82D30sbuWw-#Q2JZ-%BAQf>u^m^9u`Cvh9q$>6pKD<-4UhXb;x> ziFD_Ti=lmobEOcc9I<)x<{~mV;_ci0#ES+S<3wdza9dsezPkSL^1F4Bk&(NP-h7FT zDC{0qC7)s*qx~a1qIzbJU*$$ZV@b)H^?b>oIYmCmFrbV z)l>GY*h0Lgx3>f<9{IP2`uiK<2QE{6SCaKk$}RNn@?jONOccGUo9p-t=a%>`zeYzd z6}T_lYKWl?;#H~rIYCK3%5~hhYB#EKMU=ce>W6yb%~w~V6>LY_GdgleS3h6dv2Wi6 z8yg`V9UbD^Cf+=z`S<;5INYpgMH@OQ*K4u!u+?cA6h*McjPh+OG!;lb!Z;DV{5f9a53`b6?Vp z(o-u(J-fJ9qO{XY=&4YN6pvNv-i#Jboow?Q9-=*R{Y&qUa~#}4LfdgGYa9h#9vtAD zdx$G#zDv!ti$_U_WZ7ZjR$y8esXnu|G$!9MJKB-Sf`3Ss)y=s5ifqp+?fCtpM#}rd z$owcAL*`&XU1ibQ4D*(Ko}Qj&6nS(p!=E3okowJT6u4(&HNQF-nspcC?_g#&b6RVN zy~uJGixNs14tm%W!bq#gN7JuAIb%vL0%rCGW? z=CngoOs5yICO@hCP_#d^JC3xb>>eG-XwkVS>i_N0Ru;E8$|X^M?OaD@)Z3NZG~}Z8tYJmZ`p) z+@6-BE^eVh`lmu?_6q7)e4hI*S{LKum$1z7{c&{r#mTwMn7Y(YYH|An1Ozte&Cky> zp|f6^W5TkN*rIU*oLH+zHAogAS8yWe*4DZYFRVS&BrkToikz0)HP$;N!1ra^bEGu9 zscyD+ZLG_==b2K3TCYyDTm4ze6|BqttMTf;Q73w^%hTvhF9rTg)5*Af^>%YYr9Yc= zgRG9aIu2R-FZ5I@htc*|_tw*><~iHq-^z%77JAHl9;IjS=cir7g*6@Vn(S3X`#gW< z%m!s;Wto-f#_53{>%F|Z97o$r&vAH^;Dhjelyg!NbPFT#Lw)fuzMJWX>SX6wkKO+H z)_LMjxUAc2^ot36ZW^k~sne%JB<=ew3tX9pJ2C~aHA|bCUd2VaKE=CtY~Oyj)ZjNd zz%pJ=()p!{{TfjAjAhrJJsXP-2FRSkded&(#v?5J;sFgqDr$W05-%6mb9paMK%7%S zJeQn*-J=AV8B@&EZ85UBKI2&eR`}r>eQnxKMFjS zEVBE)XR#D^Ukny#TjjX^`P#Gjd1txh@zcOi=3$T8D=VMi4Wb4mn{)0Azy2+(>oIc` z4f!039J7#Kk!AVI8$W*ha2Wde1fM^JD%_B2e9_(Ay|utY+OW)zSU&eTvJ+lVX*Kro zVEZ?;!#99LRC^Aq|9W*@=bh`!&_f>|QIACj&&5GrcCY0LhooX5IsgNG76tMrHC}eN z5#jp}9@yI18DNRrXWLA}Wn2Q;WnGg|oank4(+uV)WK#LyrFF;kim0}+N}SNpU_!-s zdE~l3s=bI&#r6v?552IQFfQ$_45Fo{=N1;;AulhFL!tCw6MX|Y?!>)yF9ozickwDG z+xIKWt5!#)Q{(R^nl2^WNNde^JKhw3wWpL(H|BOz zd`U@((R`+Vh3tX2!UNl1e9z9HAq&y<02hc;9TXC(PuAck<)D0QrlS+1>RDSITSFBP z89B)zZ1B)>Da^Vcei& zb?qceb6`|5WteEH9m22iy>Gi+bFB|xl{CmK|8)(&Da-Efh4@K87v{UadwY$RC(1d@ zC>(qDnw&XzF7A*t?o?=t1ygh6_o1fGEP8A;dI8a{_0&C|;vz5nAO*@S^ahcn0DLcI ztUO}WNw!>@tQJ(WR5^cM2um0EN6V?f&1vG#+2ZQzYAU}!rM@h@Dv?T8P4N!z%*;$` zChiL_h5q#Pr2P#QE#6Vs?U$gFFLl;>>NJDwODpT9=Lb$V$aZymtyzM!6gM>;E4>vl zGH{ZH4s{?m`u;XHDI;fRXQsP9fP2(C*naHANj4t#NV@v@HCZQ5F^DJDd1YkgEpNVj zc23Sd-@Qju-D<9X(GreV|71W_hNBYRd@<x^Ix=3S(P^b@&3^c z7M3uI%UQ}V*sY6R0^*S zU-f(Va6_&z&Qpi?hpS(-BuhyEW6ZU+wVU)DMp}hyhFi024RdH0QHgFZJs8L84;JtM zEQnG?JB@XHA{ld^)GhSPKUx4J-ql=i@il+S=+d{Xf}OM3qK38Mk|g_wj~>OgQ$B9U zNAEIQok;Uq^VG~wDp)*RO6nc`WJS&~(<#S+u%vB?;2BWVRi!4rePaTk`B>Y~3YY^Q zN%Bu98((y{wSq}a=qt{Yb$%!T%zzJK@1vuieZHQwPwj(}qHxTy#Nj8tzM_Y(f3ct{ zLmdk>&~)S;(Dhom6FL*cX`5ZcDX^9<{KCRNm?UiU zLi+`7rhg>p1JQufmL6%FtlcGF>S2!ve?3beX{_X)b-eg0Mp@Cor-Day9aH0(sv)Z_I*|&`iYZO zhThk89X*UX&IhVf2)5c&9x#RDlww-PJ`2)qP-c0KZimXn0Crhm?abUn^a)@Cs#b3` z-nLX@-$dD&!A9Pi9>$|L#cCA8MUI&L`Rcwf87r;6RLL8qlo257@D4%Sl z>9@L}rPjOD>s6KN|Bj8R_-tddu>6Qx>=?Yo4ZK3aeO@TaeUcW$EgGBkXQE~>xK}v( zZx9x)t-aS*u;5niCC=lJ6sS;|TdjC2_<4DG%Lw=%d(o_ebW}iqZ)`Eo=Jz^uqi7tj zpP#SS%}VEMD@M_Fb#=+i{8V#t@EdvgsxG(V_dwmO4~M75u5XHwu^Y5|*|8?_R3^3I zib_g!Kx`le2TMtQfCuHXS?Fei&6f&f7d$J+Y!fsyt#ZoG%SDeTk}TTRX#}s!xuejk z=_R)^tw!au_LXV(?lWk?)boFQ1wVp>|yqpD!92;2H*ViY& zfcnSF)VjL5_wU~a&pnL(6O97zFi?NsEO|sV#h}z7vmYd^i1gYfuy>R*pNpnq(j!x` zAMNj2&c#Q2j^6w^Tdp-ZZ*?cG#a%jeX>R!U?-zd-q{s3e1P5P_cV&&@X+ukN^l*jsWW=^AdZ9?wE z(xnR;MWJsqpp?|)RGt&E=*nxGZ55*1#K5kkrNs&b<}o=VChfF|uGje$>5AK$S*|nj zqjR>I-NkqLc~6h!&#~jYnE+2=OZuQ?K?nN^2JJFcb6m}x*(NvAbJ0w5XHbklB^xKKq4r@{CE*Sx>tRlg}WrHPEN@uox-b}M*rblOj zCr>B6)1r%$H0w%R@E*0gLtPptP9&#i`;QuEsC#OpL3Pv zbLwGI^m2e`=)bm9xK?e)?-YOL$1O8QcP<0%~Hs|Z5Adt+c-Y{z}19K zr&{3d;u9_^Cl^`H?&b%O+A}m%WnU*tC|rjiQ=nO8(&x)TZGlF)v0^sk*VrEaTbXU6 zRx6%CbAf$$$JuX@I%EPMvxB!5lFlRmj4MS*9cwD690r~Iek~3q4g`IM)Z_TFbCP|N zkk{(65<_v#cgC%Q5FyWy@+oJZ?tZ8D^*-9CEqaWFWO>AGuzmZ!@9)rNN=i#7pt1e= z`}fu04kI&LCnr-NNT_S!K>O>OLyF6vu>@PD<@dkLop9+#QAxKd-Br02+D$L@%HoV6 zghPJq9I>-EOt?A3U4MUjLO4MFltHE6GO-^tde#QQ4>4GAMQvv?QlGdE~B3PQ!CcMe7-U4GleQv#3pu zlV6zao%>Z*TH3~5xI9_S9BB_FBSAZN?_H=Ge=F9=38Y8pBW>)a4t{=J#ep26$MXUt zPg$+*Oh5@zijs?i!%2$gfHqxt8!2 zeSiw8L9t`^VU01bjl-eiyvpH_SJ$9Kf}13!g)g(WbRJsZx{JboDbwm&689&1D{#Vx z4LTGW zeLd|tNkOQJfr)Ifnf|WIL#S#{j-c6vl$4xVoF42+zEc{cJ2mQg<;8SJvT^lZpRPuF zK5J7&PxBv%gs@4h)^KacLf(V#h3wf2Y1&+wSMCc0()^F*bdt%~g>ly11)0Y-B;ibx zx}pj2yl~0g$SB5g?=OJRxU9G@IC-Q4rl7hIK*Ek&D?0^#iILy%t++S@!o!E8-&Pk_ zH=du|K*)}y@=R&{E0ZytiCx=#OTXcGI%t;Xci_?u!}JERPAX|#IIkK|F<;lda`2jg z^7H1H`L)m)G+%2Lfyge|wzfg5RZo;CD>oU_+HkR9Zm*g^o~P?J)6!PjtV;qyZyVx` znl^_naKnF1S>pXsF4^vUH>VugN7!^1LpyLz&qBARW;$-A^?Hn4Lk~J{>Q)mfZW&tq zBW4u()!$%U&#y)zkPcj?f1JBPvbgcupP7xV233%H*kfq$lEE#N2Gdgx4r16O)vElN zRZ;SVqiNFlg0ml#BSK?4-Zd=dJqQa@<>9zvSeHME?ZyI|r}7{{D-8?{q00V+fRmHx zK_-(ei!J9y+KO|>3c}+3^_l2c#J|t}mVq6xyTft z8KDocu*(Cgb|kl@FGD}w7L|Sr!ZSAC!J1KT{Z^Aa@en0Gim+u{YOH17B(&}_N`9`Q zfNnv!!OrlQF6r}b*B0&q?>UDu6`H%!rBl5w_q9TZHx zep4&Y<9%}OS>z={CrYPT*t0woT}U;@F81fCIZe$;+x$AwAzK56`3DW-(-7Eg8Hvjf z=)wg8Zkl^nc<*D9kn^~nX27pT6?w}!-!M~?1q*#FKC?r( z!w+yvE@xV4LsvKhqks^xEyV3X?0A&k=ncOb#QWmgT)dV>kAs?@gS?jSI%9xpKz#bI zUwYsQlEA;{WX5HF%>Pg)jY^ofG-4s}1@}1#pgzf;U@%?wP&jQq@c^EYxuO~&0lAcIsmuKKkyH8f~ z(y>Xs1cN0^6&<(HWBbG{&YwE9?tg;Rfy0Nlz_Q7*?P<`ye?*#G)13sv5IpY~uAbOe z1V|?~EOe+{jEv$y-$dib%h zu#ha6B>Ly~@2~h)3d7Bb#76+AOpkU1<2R|ISpNC*r?t>a9u(K8E6;^__DuIsn`x9W z{to`Kedo?^kVRe4xriEqz3V>Hbe*trOifL5?LCcVroIqp=8e!jxO{` zJ`4@N!!G*YX^7N+Tp_EEL%m6ls_Y@{E-SY++Fd-Yv;S0kDbya(!`BjSeKsBIhiM~D z6dJ4Wf`S4w${KJW3yMRktV%URPrss~A_qgo#Sk$w)%X?cLQ5)zTek*A3)J~^K2LjM zSKIj&+ga;NTz>4kw3^UP$o;?1~VF>5C3Q>&cl3T*AMma;sv7s$f9TMmpd|J7r1FULSG`e323MS2T$tro=X=;n z-*CCORgMG{7J9|^aCv!M98Tk}BrflB7<6Mp*c6Z}6qJlpvZ0jxU!fDa@D4kuvq7Pg%Ra4^eGOS z5kHJSXE!%B(9cM()ddSPUZ+1+Yo1UMs!%1EM2t2AAQv|_?!s<~zn)ZkBq{IP*9SC~ znIC%h+_2mVi}riCWDpkl87`-1Y%Cx+m=UrZ(dhK>JvY$Mm@@f%{(J;bT2|VL(wp5M zN$~LbrKO&c5x>Gh`D5nIM)hw`5{{wA(yZhpcSSlDW(pfb)M4Nm+&gPB@fCOPUlc8c} z_oJdXuwGCW?zQN+Jp-PImELmTbCeVK1;NZ@kTO6CtaJ=hylFQ{v{ii(x})!paB@oa zKr1o=g@4I^BM>F3gRZ|uS+r&)K)p?AFMBgC3Fu#gT*61kj{|E}f|9EGCqA)*9B0xao= z3iPJNWXoRu^Dy?DA&w8WrSU2&=X6{^=hy&B_P-d2$_Y&F)CXrOKM0a@?7!9WOhFJT1fUn{XD}?J7HB@d1j(?JLX6 zltU-yCMUnu)r}S0ejd2=$?}3#EQv2Wq&=?p+U>AFP4bxKl(O}U=AYTbg5uBrkHpdc z2{PTJk!8VJ0rev8azI@CH5yX*HpP-M3-QmEU4}>r@Ej^{K{<6JK_L~shcN0;Zhj2;zO(ztIT-JH^SbV^@Z{NldIWnpY zgrC5_cdJPnGk6yF$-vJV{CiMNLz$TwxT(n!9Hp8p`yTiR#7Kb~j@sapuIKAc- z$&Zb;230=95yns6)bxdKgMo0Smm+*FZcT>a%6$rz;x0?GoxD3Ll@-T?LpxQ7Z_U!j zq%oI0qw7{T#YAuGw0|bj_q=0+n;)*O^ZNH6%|`B{yf!9B99H-C(LXqNB(Z~~aKjfJ zNtWmb#F6;73Z3OmTlX~yGnz@r@gxYEb~pe{2bs?z8^q}R5cY9@(5wTOMKt*)rOgba+o zmk{Z!(KNuln_#a{aG{Q3x%@fgw-q7Gw9vh_dQMMoKa6Dj6woqgg>l_WKDE9`)<6#c zC*;l0y)!eq^Ih}*Z7py8q9^p@ria{b&^s!Ty3FOQHL6^vya(AGUW_CoBt;;tTem)v zd(|2knVE@3A?ZY6#b;n!8;FG9hEe~TY0>S6`$dGsh!ho?ID~{}to}E#uxGfK?fFMS zs9C%oPUTv?cA95x_Ap`V(jmE2xz)FLd;q%VG6mc61E4@GC1v+ta96^mC14eSJexeB z%zQ<929|F}pd+>sK9p3R2Pk}$hKKkLW@#FFO-spl)}7HMz0P(%F;o7otMR#Dy=#(W zXI_6^|4Wuim##b-ZQ7g)*|%su*q(>3F2J;*-n?xaDYqMc#JZY_0u z|5hekUt{f9NKs{_6t&j0@i@c79FPMKJ)VPKV6C5M5Lmy zT?fl-(0Pf72t-<_nti}=CO}e|9!G&kqP^BuvF?8;6lLg|M74vz+-=FR^;hBAYBa2Q zBE17?dEarX@4)7b30}jp1B!EsKHlEzgiO9Ob(psldU=8BVAC*pe|@rxS2d4B1=Ftt z38R{&gvOly6%^IH*>-dFFIvp#tjjAae1OP=vIUKuZ|H-Q$}F1EKtt?S2n1!w?T8px zjYBZAgJYq7C#a6k2GIb7_b%b0C#I)kP&vLw$;;QY9F;}Z=RH&m0`VfSbGzkB4Ls6F ziX7B;KUT>=1b`5UBRW4e52u`5m(@zI{T8l_h4*KD|G=#P%Np@_B`R}e(D#&N!!3U1 zTWEq-$SjjmGP5kp)-+>JX_i#i9_`zURobWP}b-M%7A*z#)MS?ycK+i=1-|J;fL@C_KLx z6Fudk@S~Nqt5Ny}aQAS-eZ!p0mLG zpWqe%{=5g8pko%{QY**dJ0K?Z667$=bH(-c&yV_UHT#a;J_Bk296^WA_=bQMn6Hsk zK|0at3Ozj_+bf_xU@^aevO%a4xih2=Pal*^$Gdz1+i4Ix_Q{hcGGO=EYpIqU;$WOK zxQtRT*-sGZ;#W{@l<}R=Ok;t5hztNdi+Fv8`E}ybvAUujN5KH1!Dhncy&^$(Z;~u3 zfqn*Bk~oNbT&@ZNP?qiK`uGrR*(jI~+d(IJqujp%015{d(35(gY7y`j#Bv9S6VU68 zjN5zOy?X~pG)>|a+l_sYZF(mBK}6xV*R6PNos?>9c*q=^^xPK*<35p4fa-|kkUo=l3hci1LO?x@GR?S`eggvoyNz*03i#UeMQ^~W zo>~$)E2{NEpvtD`yd5ME1{N?;@8IBITL=NWm#B$wk=jeJrte6mh$E@tM`UwbNwHzNH2@kZn|cAW71tq_Kjf+P!m$N#!4}MAH=^oO+*Z&UWnfOY&4( z?gyxjhA7jU^u!ROqIk~M&Q%K*{?COUT|`+ZgLyUi_nu`OgTy=~!Ey5jv(atEsAucVk1=z2U%y2i@M$wPhj@tX|=H*Ft} zUFSG(xOwFH6w~1J29stWPpVru+wqn-xk|^{Znz>4aX-$q%1sGD!UcR?D!|?^mR-tqehIt zX2SmOLbi8zN2JH>jiHvh{OlYJ&M9>cF(Bkjbcc6eEN8<-35aqS%@^tnG zu&dZP7I(A=Dk7@0n_r{;nhuf(U9Q844t0YFMIP^Ve}JGzVgmeE$ThpM&7?^L9*a!2 z+$j*$er2+~@C`rh<@f_KQnZoX#`#{Ixq4;gSt^naTGMD_Pvhcv1LPKOeHf|>J?0Am z5SzWfLdY(H$)597w@zTJ;qGzAp0Vp;!oQ;o9Wzzt`4X5(-`MjHty+*})yT3rK%}bh!2p3BjI=0#sktiHIh9fJUh$JU z`L4p2?PAT)rGC4hl5aTydvL_cx-(0d01w!0e_otpuN-$)>>v3N?L*dIAapqB#mwxy zygvyqn}{Bx!w~!#x88b~NwH-O3GJBnJ_@B4@^IC4U5`=(uACvGs#!45(tfR}IeIq4 z*LUm5lP8HN8MHIgA>Sx;V3uUhuYVA~zwM^Fbe@8PMN~#GpVq!hS$CqQtyDhVNaI1+ zDmk~C;Ci1(fZ2%XpN;gHa{G0Uh3)1ZX#;)_-=MBzhgK_?wAP#VTe#0{_u&~p05J}Q zH$=G}Yf+2)I_Z9&viXrmXEy5|>tV^lt7nwfUH?+9)>Kr9#9?d7ZUQ4w5dYMUj-sA2y3?&#U%#41EK%RpRXMyQr1ZJH-;<9 zWycJw3`+%7)q#fJ)`ZkQN(q|gKH2c_9twmXlQ8Gid=&gZ+-&lXvHj5=fEty-f*c4K zq~(%#SY9EpV~b?|Msfc~YbFmqAJ0o??DQzg-A-FKreB`gw432$WaE`CHCDN_*3oSN zjwLojowbckA^BtJm6Ntw;o;*`4k=+VOD;K=OIjYZon|pvkcs^A$<5I^7)JyKy%9hS zph*;3OB!4?C>doq$Q8t`hEO3QQ=4wqI1lnnOGigU5eQ?PfG$wuz(wH5)HRD6rD#xD zq{YT@o84MOdH<)TjpAlyN>ALddgVaQT*Ui{Bq2O0V`S@zFeF(0gxA^}!gb{w4r5)i z7yB=nH#_ZbN?uII=tJ8I8*HeUH;*q9*qL(Tl*~wk;id0&7Wp@H(nZc_C-V$Ek2Ael zrz|NobG2{sUQOD5f=46Bry2?KdojPdaXCZQlU`MeeYIlq&c!orD-qOkzN- zSIYEkm?2V0pGc+U<Ul4Poh#kugikNGz(rgNE92Hf|0AF87SvhaKi4UD%`ep% z-S4wL8KC>?5xKtCc$_ah&9?94jUhW(&t(aurIfGi<=}{l=RZo+@|lkp`nYFaGEUpl z6dmo~bSlOS?hx?#LjYJpywuXtB0K>|j3rRdNhKPPe?KBeNrYa|y&xKIgjOQuL{UP| zC+K@Q82-ZQ(%6lB*JIf0{lG;;F{k5@3x`oFR>MU2JpTud>lVu4ZH6%L!RH8PrZ2{@ z@)hE?P`DouVl*sQT*$fD3$KXuGd_!GS;H+!Xg2-yT=eAv&*>+)GwL=GD8_iiE|3NY&9{EM-w0%lZreavAf~zy z$>Rf$yc8;SKt?7U%V7Wy8NnHJPGv{kR083Yy%zv)NH$)s(H{x(ysye|gG0XlaKAyW ziV@#Qba}-IcPPJz+X(>yi_IbO481D=1{Nxzfzv0x?793UN-oyZ=NZfKREQC13Vb(G zxrif#*zx(4-krwEHut5kd84OMo3=^8e*vrb%3c+gFl3@fnsq834}^W%od$lFHiu4k?~gW zXLY%5<%U4M2?{C^dE>v4`Vi@UV(bmj`2<+=D9-?HHH$n|Fs>I$5^40}tDDzq?w7C}Ikm9i1ey1p z;~FddsHdR_tHef9P|z-FYHAmg%C{&Lz!+fd_9dAyqWm-c>Hr013HYy@_7V>!)o}ovYvyXcJPSot~~lbotM93T$bNurEe;5 z=IyH9$Dhnv6!%kOOBt3X;yy~gr%m@DkM+j2gwR&coU!}-@uJP&iW>dlJ?&qg3SZ7X z^3!YIg`c%Q#wkwQF4aR)9<3-Q3KISt3@2}6DgPSD{7aiDvYWQ{%jNx>6<9YgtcCku zuIRf{Z)j+kjgE#{Izh7~NBLBps^?0|d*TN^G&lB@|)0Efsdl?2Aa`$I@ z7w?(|eL-@47591m0gjZZMz@hjZ1yJ7bQWHnmdEAQ#QOO|+^#$pj>bg4Gq_e8|K%cfNG zn|d9L0;%y1a8?1g*^#fXiTWem{E=9?j0qDht<*bQ`TyQqvBGD@wDiHYe(=a4E$Z;_ zccq7txlXG5bhS(`^_QM2;;GrkKQJ{U>*Ta&%>1V6GlWL`3NzCI%Cd z*q|ApTdR&7hvkd;iLk_!|5nxL^RRyM9fK;I`m&QgbqN8VXDU+adut`dPRc{Q2IZn- z5-NJ(lVOfNOAr#&vB2lgi-^Mq*3pK9D-H_DRYi7Y_gLPP|7bQsm!$H4LL_J4(b~~e zb$L*J}E7=t1?Qd!KZgkBbbGg?2C-YK?d^{@NhMmC<$B8gWdZ0V2lu! zT%-K`d-lX#O2$9?;e}ggyNcdlx8qTapJ^HBQzf5>1H)kA6=EC^JD!*W$?S4HQlm4CcqeoOi@JlHoK%=v8Wh-LDjp_Gx8Aq( zeyIVqm9l5Bxnd-F-m0l7nLE}VX%Q9w`l1T2rQ@~IE}uB2`VC?kLvrWDo!aRkW%l=$jFGU z1LRTy?lsKF#c6O30uuDZKN!fp8L0}Uz%b=2sW={M{4#Hnp@r<59RG)=`3$R#=(+7< zL&3#6F5E<-eUO}c9D30F0(o_;@*S;I5lZe?fK%ef z#Sb^EVKk=S`f(qJMGXmH^^Vb*w=|nJ?cBTfHN}?&#hEaPfM_k93;!8Pe};#B2g|&8 zUUZ(Yoa)+x7eM#}i^w~Yxsz;Zk`{t`{UiSB5e(znOR>J#{a+pVewv^5%{GBDvnh#i z+Bx;ZP3#(vYm{s?GR*t7vy<;Ovu~Z+}s{YALspI$r{(ihcbi2l-2ah>e{jkl}5WRv$~#oy1LKkr}CduwTKtRIQp55&|fY$dQS z?1X{lL>8Qx+-Sp4r4MvoFsZGay5W8_7I?UA*?s6PTql5%9`w%)O(*=#F<|uf?~}+* z61Fkq?!0Jo3A^6S2;pu9c}eAd1yENIZ5Ydq`WIB`P(DUfK+f#@m9))X#+Pbkx1Q+v zJ^szg*@5$=YlV2aHico^=t*L(_#i{@7pN5A&b#|+BYcyR1QFe)DeCUVID(2ltK>IW zFT{v0ZiW#`W*#!)IOq@2hPyK)xUJ3GQn?YHBdm2o14e})1`3EJf~ZJvPnX}17_Eli zK=w4VTEdjX=B-^2%r9AOocuF8QGz_xS+lkmXs^85^C(%Vp0yop_{n{7xc35Nl z$fhrm^~;xr`A4qiYDQIiRKKHjb$#+EdIjk(s!y&4ylCjab(kAB1h8=j;vr@kii%Fa z#i)fX2hr;Pxe!k z*Id5n>``zvY`GfkrwWwo*ut5ba%dz3>IQQpo+AK{qG~exgG!XLmR5M;b*XHKZXrMr z&=CzT)_!f5UL{CAF8CQndHEYN_YzPNWcu0{Etd|(S;BpyRBd?iZ=bb4%1<6V*D6WB z(;&e(a^qV^ua6#cQpf*xy<>jQMR00k`17chkDw&V$jTBb37~Dt%)ah;ueDW@TNp8( zi)}CtJ^uU)PGar>b4Tl&4|HG{@&lwC!j?jD&7C{ltw=XW*iBThCGtl-MhU^6dY~iI zI^GhZB_kEY_465t&;#(6WcLL-7{;O=1!z)Koy~SSXug=pSto>X_YRNI%0eb0V>Zy9 zaRhoQ;ROL8Vr!S1C*>=stE)fl;pX9)#AYOt^sshNzk31fc{w@1Q5J&gY|-Vu0ytWS zP2{P@ULX{yn@hx_IVk>n#=bGo>kXeVx6?`A2g_Bbee2P4_q-N&X;rWAX}Z>{M9J0Q zH`k&(7cyno9M4iM@IYS-V(z#m{TXlS`Vv# zJOLR{!fTbtMKGX}7-9qavCFno3ZRMfAp{x?wI|)O<2A@>FmY$GA{dXMBhq20n}Ga% zNaqod7g>~4mnjnvgTCiXBa+Cj-#tyqmvfoq!pB0ze}z*j(=>}Au65AI64g79;~_W} z=-+Q7-4Injrquhx zN;nK`NBa`4lbIqccTx{#fqZao5L`ct-wfG8AC4mqyBwKWr036{TU~jQXb$g?RKfTm z5BQf6Sc~DospAS#udS%Oq)(RmUtVKAM83SMK(UODMl;9Z z(+fdex#G&oO1OszSBao+s5X7p%=|)(Cc-;t`Efu(>Hpny@uxp5*DiD|&wQn&Yf5W_ z(1l=ui1*;khW{K3v9m538gEiCHc@Pl5_1TX<{ydYq!7PW0y@T1OVq+mdI*p)Z{5qE z{0h!K6GG`oprM2)AXZ95!a8O_SsSVRAeb9VdZ)9>xwj&BbexJvZ-un;%ED{CN6my= zqMUK`Q^bdsYq^f2x2ejIF|@F>M=^2qVuX||f~{_{n@_a_Ul%fmsMV3?e-re?VEb)N z95=cmitusEb|Y-r)<~5IsU|8v?BiHxG7A08sIAHxog!$`?&FBY;mZ9lekqNQ<7QAzxz z#DOl)h&xeV0Vr=t$x0vo`<#gFf)6TXx_`z%Q%S{(gDD#P zkVm^I1(@Lffc;PENB)JVEqCvTl@rwTedZk~$#0ZCsN3&0;`Z(ZGbY?_Y&{vo>maU6 z7kh{Vc8$;W7Jtjn8wrSk0@Uv}o-(MMp5!eyh4U!AYsG!1h4X$iqnced+@nD(U^z#V||yDU%t{o zdc$CT+gtYwaK8XILF%l$?>xAFALpwdN7$e4fVFh$PI33Gqw@Ex181rnSIcfjUbFLL z*v!BX2NfhiD`fUq>+s6PLzu;hPmNi6GirIU-4jD$KBdolaijYQJsN@zp%?%U#k3au zAOB$~AF%||Qo8l4pKhzja3cj_HFq-*lh8f={lW3IGsDe4c2v533~JRhq1QvlB*vei z;d?`!A;PDjw5~90M_(^3Ed`+x;-kuNk0Da%c?%O!vsSJ$Oc+;1D86B&k?EftI_Y)W&H=Dk=DT1bB&DX!DXZWG;b z@@#LPERAg3iJl#LTn53_HOF!Wr@0;G#nP^H?;&ImV)7EgFUpZOT23G?1DS&xISzbq z(-rN_@~2%-TF3c_9TBGTW0QW3GPn!F*u;1}z^`hPBsv}n*M+NH&~|cZU6Jk!HIL$* zn3^i??v5HCw?Q!9(y3-9Cbq8Og4QUr*sbjZYDC#wM#hJpuuIfi6?Y#=lo0kB+2#|6JL99}ed|cRvVf24DER%9Q2O z@9;9PTte)mlbs!>u<)}D6MHe!PgK#-(dsm-e)(iymS3+^zq8U0%Vo~K47Rk={-EC_ zG(2@D>SRx_h|@Mkr~P+SER;iK-NYgLeGfY>cs1;ND820Qav_8nytlDEMb?vmGbl6Z znt~WAB6F=>13>ZX|(5W9sQO5th7Ak31itP81hEASSpMJN4KGF+lKo z_vs~TUQajZV90W0j5a^SlL>?{Y5^EmiO&9gZKZpSc$f^jI$;JN%Hr(m`a!A|f>xu< zxtkq0Nm-7gp_a6=gjS_)P^`by%TnBG?Kevn-pKA+wwN_B9`1W-QpTt2E^eb4;rFi; zOI9Lj3b`qSV#%0r@zXK(ZI^UNvu~DylUF->jm;Ni^M1Bt>*2m(vbl!9C!Y-orxb!; zF;X4KIZgqbOY?t-`8^6ZLh+c3ww+>7Zw+Q#n@Hdy&>=43rdpn^5Ir*J`=qycfGrXD zGH-F{GZms&G*qBjh_EoUdX1tHg05X}kmF=I!M1!R-3aU52x2Sd%?oFbw5%}bPAQBF%g;rv!opt;2CJCq{zd(x~9f< zHOi3L+JbC6=FPP7U0-2q#~xaP{jPzhlYDumk1qtreSNXe8;k{!X@@E5nD!xTtC6~{ z$WA;$;|aVx!dz=kxIvmEV)Io;lBgirj(T)R<3V90EArE{w6x+c!;lq!;h2A_VieHY ztl?9{%M2<=1r&c_B32hqseq_t=iR?g*x<}lmrbAyWtgX3A0z7$i-X-6_uB8_*wDAW zs3Lc^E!Q9V>lTfB)c1K*=<>uJe7W@Sv`y4xY}E?`&7|WS9%*by=Bd0S{o1b(`%aASZ&4cxp%xw5%ztz$9U=RxuGz zS;2%dykJGCUcB#mmTwPvp!2?d(z-Td; z2J1+Q2tP1++g{%}E&b#-mf&uNlX(6fS_;=m6}dcFQG%i3r!JVborjW1lzV(DB8`ZL zk%`Ua;<+?WpLFB!6Jt5h)eTYJ5M3W38LCvHwiZLgYl9U6n`~V|exG3e^iKSz8h>LB zy45K}W|`zYWw0qfK)9Kvki&>1Hz;b13p++1P?WzDo)(xsSpC66haM9TyTQW%zUsF? zsN7W^;`@c9*v0ZA|+F5QpIc+Gvhvf6SA6lTN5jX#js z$B0ct<8O&@0+5zih!YRwfzofS*{s5qdHG)!M|2ne-r<-kmhzjX#ww~YA~N}lWgDvg z+=4XqorvK0%)aF{Yl`24NiIENoD?bp&S~JAH|OyjFv9Oak7(R*3D*T|;qm8vLRymb z;tm1cJFKB^-8F}WW`Au5w9w+!-046C^#=}t8N>?52qNtXJ)+>v=j8dTLj1odGz_zk zTvblp`xWl5#C56d_5V`doqN3X*~$v+4?8Su)9OsvRdkGL%ff}z&vbr8dQ_J^m3WGW zHdcyOz2rMBuJoCZce1Z)#T?`77Tfl1+s$hBTN|4=(&VgGbRYuQ(WK(p8)$97*Tzt7 zP?+|lwS}8#BYpeg;JMU;blAV>aJ~W@_iE&eXS)vwe7o7#x0PY`Uf>;jaq$=YMA^d_ zYoX@~>ejv!A6!$FLf;U&%9%4F=D(|x2YG)fe7Ta$`jdt3nA^QvYkzN^z^i}>kmL=q zZMU3&IEqq8k8oXS>DhyW|Gb0VZ6#;#Np5oAq8Y`=Ps4Qi%buTJo2tZpWJnGPorSES zk}9EMw75h-cf#>xb-DHLGP9$!Bu%g~qsKos?2+=aJet(TQboFZ>Mu6Zi;-XT8&>dC z5E#kCG}t-z*~oX5pU!Ye56UW>IWs9!d^h@YVo(8_#zRY{rI18D#SOfs!$OYq#=$aG zkb#i##3&QyNUC4>)XV^hP(xlu1MIUA;|#pceJLwsIud(1(6mI?N4rt4C&0#Rt>*t? z>%HT-e%tqPTe6a@tddA(_LdN3v}KEg?2$dAP>K+;NulgbHd&cv?@jjJ>vvvye}13G z_woJy;c<80H?P-oT-SA;=W!m#ajw~LS`t0AOwe1f(X}#Ww*>7qB7;l1^`j2~P!zZo ztm5KY5?X+#1FK71oDR0jKd=<;sQ!n|Im%LF<<=qB_~8y7Gs2CC=h}kGjm3kkbp75( z{id17J`zDdEO%lT8f0di0pc5n!xO&YJS0PKm9WoQ!77loEJ1-wKU=L6c;zRkH?)RW z0Yy4{`S#=s1y~~LPpPe3=@()M7bUYVCtJqNd$Ds#zovgAR6klw|6ZN*^ zfVfJvt8O>8^Scve3TLk6kBxkHv?86b!%%Q z&Fw>`0?3jQnlk4Dt`>vR+Yq*)N!0ck=q?{-0RMg1Zt;uV(3*M0jRgVIh}1&(H&&I% z`A7weiFO-Y1^*fVZ8m)KC$B=5WfBDs#Kxi71G7LGt47}~ zw&LC9(OTpkPor`=A|+-A8)L5{;40_XFKWOOMu<9OqzRD7L-V0DQRigs+eGacE4-_S zaNR$b%`#JUvAeIA(b!!3sB~ny|z&`ED56Rj!>J=rJ zBk~MOOrP3}^|?9}uTV=zd}mT;4Aw6^Uyw>8a7T14>*Q~O2dMF))ktaUtB)!yNS;dl zsQb}kfVF(1n~|*X9Q2}~=K@Cn7Az}y2GxI;Mrcd45Ni?h{;)bWYD!u;y{5OR5H6C* zW`OR;Da06t_7RY_u>-`@;EnnDO;RQc)s)VpH5U7`b~0eJ9T6t1$LqTcJ>?3h6|v<~dXOEPJcy-UzxaLit%iXMAP0 z3&8U>AR7VEK@de0w4eapA94fGcIxlH7T=^w#8bUCqWQzA83)a#tM;B}^V4g3T<$dW z?Z~cOfg6N$p`adi6#!OOYEWMQ66^pXnUh;xlHH-{_-vwLU_YN3(6*XC*hOh$U4t2H_J)fECz$L0)zCp zP2*@~{jS(KtnX9Cj!fHpZkJCwX~8`-c+vd^F&pM1;#q^Au;(g2M-e!eU%$Xpf8$(` znMylw)&MPL1>$xRtoS~stN+frM>pseBAx+XYOzRNXKxHLZLvu;W2Rt@0_ML!qIn9* zB&1m63K9?YA%8|jn-~yjp!kszt3GME6<5$}5!uFOOWLn`nn=VCKe?hRVKi};tR$Mw ztrDn?e#jRC+7?f!eHK}ZA)$n}VM*B(oedB|(j#SNk*X5XHNa)9pKnRAp0;$kCv<#rihR}fl%GIHu zizhE=kddivB5&46<4}ksJZ7w$*T@0RaMqdV_sX(MesDq1Il+eBOcv2RCA3&9EiHk+ zG2d|JWVNgb)_4}n==H=2Yc>mG`&eF6f|byJSXBak(pQ)q8WITh0MkND@Vg1kWo$(y zCFC?Tv;XtQv9BEGE4VD4@Jip{P-|AZ-EEYrNN=*`O8%HMGy45K*_`Ht?RaoWB&VlW zhGNvmF7fkcsF%(IG+||M>SQlF$xq^{9g?}KQ`jFn<(AHxu56f`l{p?aJK7svh_t=- z)~^Oi$xLtx1wuDS(b;Q$Ai)T*EpiQ&<$W%o53yMm%cU@sAl7~OvWxLXC+?2rR?Fh+ zCYe=yTP{hmMoL)sz|VQ7M~zw(v)$mU#-CZXbskC;ph}DJ;-JM9c(IsLg?X(F z<)-J=n(yW?ykI1iIBu9dCC3~E4P-_^Gh~_e1UeO5CeXQ|WcSN7|ppEDavPMIa7-nS00;n)7 zR9c|PI6zo48<-v&({nMS#)D^%ih!aYbM7?gDvL>DhDc`Yr59=GRlA%keU{UXyO&5M zrKH=XrNd3bc~Qh7bw06K>$j$DN$k%-gs5YhojK$_3sL zzI=V@!d$RjRFn|W&?#lzdp2A)aW=-J&cOiqBReL@o!g+>i|!6y0uBxiQpT5-=Ha|T z3vf_A!?&uN0Cv4>Y;KrH@r|jB?#mq@@)9{4>o-Z#7aY%u3@#wI?{hkR@~8*duVGG0 zk(MOO=tt)_mz?UPIc10EX?eZ1j+N=8MT5%gbWI^>hR{2yo&1T5{AU`a*3n6vR<1MF zb&=6jnYg6HFMnxQ)=DHNo(s}yfQ7aVk|MZbJQBJF08gNA{t)^5fEhJv+#y#O=n>I@ zsfk<+NJ9WdxQ#S8phyEREG?+6HQ^k9b+F&L$)mQ<=K2VmgtJ(Ec--9423Nf7^mnPG zNiEqwlkGS|AXWm5js=K&P-L@#8w_ILK+P2(eqz?D2Y4g$u>|T45d?0CEyeDKKhjT_ zFL0uu?h-`uF(|~Lebh3Rjhm)$4hxyYjyiZ>_+pMeC>63KsRVU-%ENDG5&s4XySrge(cu=f|4<)mK^FtmTp`DU9NelM01E!PBtjYK zH&_CzYYE(QA?sBGfhM$BCxJfw8NwEvYOp$-fUU%)Qz{G{O5}L}6{6U}AjBw)b64(S zctRHtNLtUqDI;A4%0&=loud^rv+8w1fpbza&zeJtG@oTgh3jrUK9L6AolKwe5(4kE zWtS8RTE=ytsiWgGMS*_M+6I=`FVx#doLnJG1}GOF`Ye!DOG#A>3`=SDNLPeH%OA;|fh>zSc>tI`+x-M&ny0V<03z!S zi^hgceTI>-4M64l8=rNHtX_Iz?9VHMM0_5>3I#Lp{h$^C(fRy?els|Ve?(w2S1-<5QXryQfPFre5q;~Ueme`s*hA4 zPqDnOE`Q)~8F?x`gLcUQ_!odzLo|q6f<>2LdpOD0^AmX^CJ~@rxgb;oJGJ{};F2x~ zrx`M$f|DB?Y3WpA#cu)mbb+B`Xo%inyK|cm*vrG#iNAi8Gxgp2U(N2iuq_dXGll&j z0l~Fy-b}B%E*MFo*w=Q$trl=pNWQ@P?mFxi%ymxhi;T|%4?UzT2R`aq3W{fnzo5uh z0ib@iCo^s_c6C4i1ma0b5!C=ULS)Pzdb|YsbI?kFrv+qWe&IUK$fgPTks$XgxH^9C z612Y0QtI#z24Rlb*JB*>Wgazgn)(Q%6 zXoz3`pN6>eJKsv@ZsxKU{BHF=MF|5pE+!i)h-{e;l1ZRRjGuO^7?ujCKT&*xCkz7i z#Vp%rh8AK#iZ0(@5<@?}F^^H|u%t(|Rh`7BHMaHg2H}YkUN_f(Num{>xGu7E6jqtJ^BE zIn&!rH(o*6?(LSa6!L+a$7q6_?3PRccW&6HPYrNE7+3DbV?e>v=XU$#dPdaFj*SLG zCfVbuOW^3vDqM2ow2ffl+CVo75SAKvixA_i4XDvd2&ST`Yq>QvSUR@C z3#t5<9J@b-CV^g2k6Y`&p&pkcj(G__5^=TwMTShG<)IxO@%f*?x1DWIz(z+tvOflQ zfd7h(wcuW>UP&6kW1O-i>9MuZNg{J8JO|w?9SXVG{tB_9?Jn)WZ}x697>D97FDjZ9 zaT5Ui0M}tS>8Adrk<6JzFU}mD$BcXAbvd&pG%?Grx@D1KK;|wm8Ub4n&ci&VkicprVU%3>_xF=JhS`;4Q&4RNra@Uy#02$87h_v-udHlNgbewM+`e~F zQJP6}kxDE8zeRX|}UfM^66Gm~=A^(8)PAsvz;U1ermgquzpN44DDw z6+r(Bn>S1O0#F+OxJ$^AvC^+;nR!(5vP6oX8t-_3q%7&Wm0z$eSur={s+K@k1~L>J z^w1mZNTULR7*L%3LG=yWH-P8_C$bUq$0gzP9x16Qw0lzJ-%ZnxdArkRY(Jr8|L%n= zMiVdtqzP6y@^6D1Uxx(j0T^wdTnu73bPhmqh(K&0?<+x93o-5xKL^^i|MU5&y{CG2 z5(U`e25Uek&C;M-C;GtUEN3VS(>9y$ur~u8T{AeSqh2B?uEv4HQJ~qzLj))ch+NKs z()oWrC+aVI*7Vc)`_ET{x_KQuQo5+-B)`SlkOk4AWD^0=*?Kj2u+Xs8LRKfG{-OV? zApBi}|4U?V<7Cwh6ggLCNWp=Zu+~1t=lDF+2O0~5)d?V8Murz4&w}fE+sA@BkgU3KlPRPnjLj6daRSlXd5qhg_Hxn4dT=fsU#m7lcKFl8gdi zZg{%O|IQo*3j=Da7om3Jh2Dyuw0zls$8E;lyAOwD6ucBF=utxgsict%#6(%PGL&sHu$DCjr?< z^OHCX?jlr)(Az=SkgzZ{>Ii{ZY9nARNYxFVH2}E`41p~y^3j*~IU&qB%^Y&zRua-3!!n z#6FQ_T@O&$ft>;ryBl?(D3~xi_*IaU9KE6sNoeCRJ-hS>L*xC+1L?>WW!XtCTVpR^ z&dmKIPAk|KIb#VP6Aie(?ltYsp?@%%U<4dR($?&Dh)G5eLr_M^9L`H5T;3`=JMi-L z6ScId-KIh6x?y(t8VExFCjD&tipV^fVHAqP7s}1hJ-W_rl_M3A3Dt~tpUs0HJ!<4G z15WoJfjeNi22;{g|)%>aV9 zLl5a%l9iq1Zu8I*HJ2ud^(kXNIK2g7okKc7kI=;;tX|jmCytdpCY8{9AiNB|nhF%atwL$-vrD+MWbVTVxO&QeP08)AHe;>#AnXAgjQg49Suh@jscJCchJ#j){p`U0JE@=;-)Ykxz1tCF`v__KK3}XQ(x8 zwZ5DSRA&@&yPV7fO9S5)XgA=0fXrCIY=|$-Wo1+v_&3NS;L-{FQ@jnxRWF8zPjz_L z*m7HXW3k7DJZJo0`OSb5$THKys`gmS@3FG8n*tKbW$0nGIHLhUptXTqc@5xekrTWj z$Yo^Xh$kUEKY38$74Q4>rmu&V_~HlRjuZ`J81O-2@Ifdm18XE!;8lEuaEYA5faM0x zoDcx$q}-#y8f*mw**VaLL^E+f^ZyT7YA9nJ=Xk{$6`uSM8&XY;!_kP%b`Q*la|YU6 z%5YqA_?9<`iH$^mVg|(pXf%0;_Mm9X*918suyU;ojM2(2_SdWYrkrKXu@D6V;i%re*b75>xP$Dc~vl=n1b`p+jF8w`k=8cR;URnmxKF$`!dS#c)Z z2XZgFM2doEu!F3&m$KAYijG(jG!~>S{OD`PjR~w*M$CkZsDB(|si>T}OFT#V^8RLmzPy!v%j^|+ zh~eO~iGJ})(2QOacX!-;c-k}4t>R(}r;9LbJVMc3N zs4S_biPEWKb}>K?OlCDw-9ZO)@TE0v zskiHc7qc8l_+3df0&*;%uIO|-?Rt8~*cHKJaDtYDg#-Y_Xg!firtGJ~Avwtqeui{^ zdhB4ClwTru%5b-~Oo-8iqG;dx4M@Zh(!oOnXJ2SETwz1mqVp#^maUeFi*w>C*FQJn zKImE!me?Trm=fAS_7)-u{5}#kAw@h?anK~7Kj}~fmCRIoWX-wFKaKm$+hyAkg z?_w&IjL02Wi#fhBmS>iMRR((mQ4E88DIkP3N`k^g0-LhbF_$BS;NddCP<;)dOBO$a zbabi*ihE4kxX_T4f=z{H?7&Wa027aZygAxXgH9J=mCnns!3*B`HrRIO;O}35(+?13 zA){rtK$I=BEG)|kSe6d<_J+_>2C1?aMBLwC*Z&VHviz}M2Da$`Qe0+pqr#dx>ntNY zVE;6g`d=TC9@V{S^c2jj_9JVkz}DDR1Sm3XZZf2IsBiyW2dEwo*mbC{_7Lr@+_S*<&Yh;EKgwh`u+-#8X9&{!Hzziu8 za=#BiU;~7z6^%v%e8N_g1HRAt%U^a#@P&?v$c)mSsc?1g|C*OdM46UN8*0>n5EHs+ zH=&guK+0+fcLT8l_1poOTdAY9YzML;Xp238D>CE;1+t&Nf35@sJ`6DRd|KWz^(c65 zWXYYWPx&8SQ2ZdNhTU5I8XiVX>X|T&gcG!fwrq-kKwZ-J|F`k_PYOd+V|AB931rnT zvGLb{%W%!O{enEbpbTg%x|ppC(Qq-y`;j~x=oHl!F+kh^D(9z%Q!sm>3R7|4e-X(W5|0g1_D{%b0rtkgkVPiG1P{-LtgNj63%u$6UzFt5 z;|+>v`-6av?$u9BtxujXjrA#ikoubut+V*g`7C{@rvOlBY4Cks5|(y=PzkpTCQ(+l zwi9nyW06l3Xm&)wz$>XZ5ZtbSqZdF_lL1#}3k1No6pAi3)Ud!xWY)E{wU>+TJ$t4b zw1AKoK-#`oxs#gFnZ~@AxY(^R{pi$#7|bGPD$kNZ)&I3yGmTO&Q-x&%9Ic-Qyg{98 z9^*d=AZ5`jw76d;rrZwA1!32nti@d*N)!LoG( zRXd@kz>M;Uc&Z{X@=EyAr8DGl4ON6ebi#~{jqJaP%vA#jj_GnW9jc}3sr%CgwF7we zPi)eHN?50_n(Y$o(g95x7mmIg(q=r1|?YG17qR z$k&`D3tF(=brzSeuI}@Y491?hWY*}I)~8?SE^R4vm2!Qms`B96s$G3sN3BKh(#)(^ zru=Xey`&<+9`t#0=_-db3?Tx^F7YZd(P&`hZp|?$x(nt7R;&etV*uOzA zkxdf`lA-$C>v4%-#Dyz*h&ATTU{=3{n*$Z=FRIP2H$1s1uwB1VkIKKs&^^6Byg(KR z5Bhpd{JHITR+@GmKOBEtq3E3XU>n)6vm0NYbuB$qMP`U(L0VVgVI9*8z6U~pgMA^( z^ziGn+&ogPkLdJnpwCewQqg{Ic)Y8y73V)$`TEpWT;QZ*uN}v?=@mgeUg>XGIuD6f z;QL438w`g3!7a14zbzHM@xpej??VnoWUT}rVDol8VUJF}Yiaz5HaGhYu5Dg^^CiYk z65Ne#FNo^Xzcw8;19x7o2Q5f3Ho`lWt!M2|j~i*~>L%AI_oLGDbs*bQFg+++cFp@( zlFA3J1AKYv#pmYZXKE+uaz4c~O1Te8qY9%zg8d>%lDJ{aONm=SSMPdc*-@87JwAze zH2wmtHaq$s7mrrlX3}o$);0*2Yz}EfJI4EwQee4$dvWPvrsGpb=ll2>z-t(Ltxcki z$2m)Vi@bU&UvIGyhPFTmD0a&p8=b~hX&tqhaF-4N%*GN*VWms$SH$2^!dxa{Th8ga zm5ozdBWiyZ)OzjCJ_x*ne<910mBNxp`OfX1P3Jc1YDJb=8bxmXi_pVgy@8VnY#V@bGgtm1^2Y#?&tEI zBKBvcQ5z;GecL-B!$RS9iQO`cQSAkvG`EI`kv=(Daa-+WW}C@zy8#39eakd9&@tzGaZ_N9N2mYc0B;+)w2;wkn1lT#+aiDPg4 z2JaBnh>A-*&nft#vzi#55%K|U_Ux|sw};J`>*2c+@Gymd7(?% zQ$km45PC8QHya2RIar^$D!tv(p5^b?3>SBMm~P|^KZVqqggoxan6X-Ci_R)-ZgL!P zp_KSgSD$QOwPb@As2|6LbjM`wlvG?I7Hosu@ljC!AJWyKf7#v#_rH&&W|fYw=d zGS-M1Z>IXGh?QipTVpqclnvWy+s}t%v$cm3DQXMTXANPI5O7U;NBR3C&pbOB+PfsL z=bL_OH=ywh<1SD^KA*iaj6eE_ITw+ep26?@5E5o}0tU_K;ytf>^-$Wyu}j<}lfiUSrh|rnHx+ zJ!`n%DrYmT@os33ONGUY&}D$dRB?`iJE-kJO-Jobw3-xjs66Pk$|?=89CYX1ra#5I z(wgcs3=&228{C&&;s)~;VKEUh8q>O-nou|pxC=XH0c6nf^Lu*yo>}}k-sUBMWA5bj zO4xhG`R2=)_P&=bO<{cI?>MnE9z7I(dArC=`Tt-=kaO0;nQ!x0TT(yfjobcmy8|Uo zcP`5;eFa9_!QCt~)V>eCG~WE?own^qIIS;%Tb9%vDm;E7eFX*e0G+q`_1~;Wq64!o zPmEnNEU90$w^hl0rG7utQIfl`#8oc%v4NtOB=~mu|7 z8p0U3A#wm)jt+t!A>di&KOrnGeGEbXYQW2V^gIM_ zc>c%703i?XG4PgS~RInT3F6nXfyTvBz-E%!s zQ)bd%XiH&$fTrZiG$M6xpG%%ie+u|D0Hb4__-cY+?oOcv6up?e7(?SeJj)E9 zqEjh@wHm4}7<5`#c5z;jWoaQ*a0(>rx4b!UyYlX}OKZ04GVZ`xBiz!VD}N_^;*$M` zn1|XraCJ#JEm=hBfw=)W3MRFnnVOFsA=H|Mg+h=Z+MR$XPLB`ldPTZK-yQyOUt!0j z5cSOU&rf#!%QDXlCH;27rybdDy3&T~p&9k!W2Sr7LgXtqB$x8iH9l@V^4Ft=bmZ<^ zm-Y}aB$D;2^`h12yJ6!4B`!QGkn|`*g2?-51O$W7CO@HQ1aE6exIY!#8o3}i2vkaJ z3Nf!;QQ?tKW{&_T8#MYnepW62n8|-D`f1mZ;?GayFpA|s+*nhc*;{bX!uJx#>%&CaVR)1dH%nOplNn0cwH z?;JA<^7GKl&b}4(^v@{vo1tuf zA1UtW{hBmp;ubQ!`mnuzFE&+gF&F_$%P?~iEmB7Jor_S%8%1t{o}&fSn4s4C46TY2 z%zkqC#U_r~5kbKro_E;4o=i*y7{BTixuPS%=J9K}+U$)ALXLjh0FMAImftpfY#tDY z7r$$`{9?!3F%)H^YFvc!judFpp#KQ~yz>vt(n7uW0YNZHf(Ar{l!qh|CxUq3Gdv3L zi4jGhR*;LYUgfGClzYHGnyBMx(e8+^)$Fc+mhTR4S&B>cXU=5BI>(?}fO37t2M*Tc z``4sQ@9PM7;=eWddb$x|>-(Uh1)~tuxPl%+E7X?e1He)=3nZ>VLq!3e#((^n^C_UE z+Yvj)y@$yH*D8v(4W0U?g?zFLf1#ewQVw5_zFO&wxW9ee)A`0!>c-%;xyy&oVKv!5 z@+W)PAU5N5u2>Pi3>%qqf=m#mRw#f>jTG8756m2FZP9=}*Oe-K==BMobTEoX#(SO& zFwZ}IY&rh}5frN_Yy`QWa+v%%H4hImR?ek0cfB+J+<-~wFZq)1wP@*9V(EyG$19Ln zJ9?2o%disI*vR$=5z1j3!eq=KB&P*(N_hBDBGejg)jXp+BTuerx-?*#_*Q2Nbbl$G zChu+>1;noCQut@JynQ;@Au|=ZWCb=M1ao0**Gs-*=B1y%e$Ate?%jQ6qi2dZ!-n92WOzmrhjlz#$MFi!sMi8FxqqL{A1hu27QXBne+`eYFwd9f{FaeU_MkD;1Zo$rvao7LIT%(z@9Hm@xj)6vk=ClBp+On2JN- zpnzo@2<{dB$nOPcuU|E{VFd652xif9f0(BB`vhekbOYe-H+z;PCiR={9M`XDHNA`r z*s;5|fI$SA0sbv^$2Zq_TZD2+r2ZTGwE}aCRuz6WT|aowzjN;2J1FCq&QoxPH;u+4 z0K60q6j&p*LSU_;Q5g}prvXE;xvcovF5L!$)I+$SJ2DzyAiK3rr+l$x5pYevTK%lW z$w!n+l5OJ>Gs3j)w))1Tk6Ca0qDs)t$B2B);g#tdY!lQBuC&7;Dj_*&R06WsFkUp>6?E<9@K3q>(G$zhx=c<+K^C2zi~2Dp=fJ?t2CYfpAP zvkQIhAQ|u}+T+q@p^g1lm z|4)VqhVn0}x|&Cqd}MPF;~RX{hlhtFq>B9s!SwH6tO$4(@LGZ|?al7)kWf<-e*wwU z%yzCBNNyYPtO?$vNzv88L8ks=8w>J?fdW^ZjJ)p;Kf=NNJ4;J?Q2XOYf8k(w<8V$` zFAa*o%nBtybvn{r01^OXo$dY?zmucdI!#1ov_ieRo-6k5;k6Vg>bI(meMPdebXU5g z8zQ~K(T;A~*15pTRaKB4Ur3vtiql@MfkBE(^|MPo^{$@*6@+jg)1qk{2#o=Uuw*Du zUl9N|8*nLl#JOyU0e)^inNsT+wH^)CbpqisnmI{={JoctUm~{RPwCGSUa%C zJP1M`lr`3|`lRJBC~2FPAMQG#CR|8L3YUvzy;N%iU#x$ z-By^g4gJkEtvyKlAFmrL+m;9j#ejA(L))L@)?BML zry~)%n_jh9=7d}e%KQJ0%&WDBuv{jYNEkC8gXj)=WHrbGg+~ty+*Iq#3R(~Tf)5r* z`>8>>2rQ=BKJ{ElD3!JKsXuJUr$h-Y=z?%SQ6NucPc=Icn#>Ini~0r}ibuIyA1%f0 zU;i|?MUMl7FeUn!T>mLrF0YsAjtjIx=86Y+$hic09XdtN3y zKW@!lR){u1N=;iCSU}Y0{0jK5L%3e$ENAzvk*(HZ5rnX&h&AjL87|Wi-jQWySJ8fB zxEc#l(l%g`H50S7QUJpQXFBoleP53v#m29yD{_-6#vFjAOwyzF2c4ODEp&MVSyTw_J3zxo+GD!BX?g3vPn1_4W}12m--wT)>t;ZSX3KB&HE7E7Q}K9p|FpC)uM@ zNy|%eHvTeHOzL?v;)D)yPXVdWEQZL0O94r?K0mkahjfq3FK#t|pz}5TB#&wQ6v^>9 zFs&1UG2zFHLr!Xsx{m5sw<98OT`VKv`mn5S z>el1N0coJp6ep!W)(bK+5fL#}uS?XQqay36v#n zpoKZ3OK>Q~n<=o*%0F+kq|0^rSK~I19(ZBIl>GMKlBl1MD$g~=e*eWk8pJDE_A?jD zHA{T^=IwLsJPM8@YYY<O;{-j)ekTc34sFm2fP zFL;u#cFL}wz`C!wO@%fgqbANfM(_rm;DX>hjXN%rkx*RQlg%xD!}Ot+G&`5xh_H!@ z={(I@dkb@C-7_+Q>!078>ZT1~dUWghaWJy_g9GVc+2a^opBP@2=KJ|Qe1u38B6GYEjhcen5H`KLBpE{TbEl+)*-PIY+KIG0lsb#POhb{_#R&k&S_2?bCqmb zjk?QTXI#i5o692`&xwkQvA?11D}^uimVt4X#k0wxOR~jA^(tGTcaKq){ye5(`asPB zTw+M>K+xe{VU}uJ*ytsmJtZGZ&dt>E^SbnvZR%aH=`^1`t z*6rZXnQKxrE)8tLt-rR%HzR1dW}I9noz_G?P!w%xaMF>i`4b!k($#Wv?T}jc2d3yc zx^4w*u_ui5C43nqDYRD^4ZGE(p1l}fVfLx1_417T?-C~>?Y8$!z2DT6na+-4lmWbs z^qi@i3kPj5*Wy=rM@)bnWJuP6gKOS2uO0__>m;^}mg+>US&XaKPHe7D?43*EP&OQ^ zb30mZDr6X)R+(ApA5^aIr1`^!8K<|h5!!OQtWb}7(#6eSCgSCO(96=@v5V}>DQ&y4 zOjG{d*P5n^$}o0_8XDF=1kJpBSLz8Cvi7Dyo|3)l&3})A5|6mVE|So4(V8jbo(YM* z>FAf*5QQrp&>_~J`N}=Hj)zEG5<4?9bdF0iFdQheBJUY?#&yLD0E zZ*M${?omN*L)#-g3;Z}etNL2vo&AuJqP*usZd`t2pKs=$`co0-M|U|MbC2S&Ruz&} z0tivOQcZ~MWD0Mr**?9lV3Lz-f8v4J&8n0opUi~auIV27rK=SMF|X|zf9dt-EHlPR_b`ZMph~(3z?SJpFOYt3k`F);#+Lyt)I9riVFCTQxY&j9-~}c$DaMd^b7$ z3FYB*Dj@pN zfE)oC;&6qte<*~pJ->d=`)uk~{VCMjzkmAJV6(QMs(;Kvc=@f3`EXy|44+!jbH)&l zp}N6MwsUpN`PI={Jq4231NHZ$cz@~+8|GUZ_23N-l-&z@-Z3S?WSXmS%C@gXpr{MW zVZ=3XIpzA1Uz>adfz@Ob_7NIjE#}90<<){~Tm^}z-gVXT@HAI$7wjH`&#Abp>owYn z%6}am_xK#Fx9Il9g?c<}*W$ms>F5^yQg|~>Ki_l6q}C$$Nv=kgCTDM2w^tkGv6F9? z#$>NY*=Q-8ZSDrAp7(v7v2SSJyvwKLLF=zDi*-JJW_p^}dR*3`boC|-M`;1iOj#J3 z@l7>-<5&+YaIpG7xG6&G+DTf##a~e%x*|~aR&KZDuj8Vd8vQQ~E!BO#^&NvLk|Zu= zmfgy+#fY=yOvMQ@;$@MwABFWF3$JNV^9z3~z2u@&LXkXlYxBcIIA_4X;)z|*oj5Mx z(|gI8Px2~wK5XCVlDfJhW%J!5prq*Av0PJ0nIW^b#Ludu=7am*<%jA{22)Ob_CLF| zSbv#dmMr>JZlqlA(qljWfq_0n)T~<{yQk*m(!K2h3OAo2RV6D|*$-w?EXG_KOT7hD zc?qNCTZsYYs)U06xTykJO=s66%J18XJtX|IEI=kw?uqvo#9l|49e@55Efx#}Xx8gz zkFK6OX1w#9l0&Uhk-O|C6jgl?Wg$rufh1{DSBS_P8eeOWI0lD>)yYw5|Aev4azMkp zv!l+$bQYt;!#n;|igkdK>#)?5WnJ99Uf)sTx09m|BcZ0`ZIlpxT<4{eIEL!d@?9FX z_b^f@4QA%LH;WQv=-z+E5Gelq5jihtJ(3Of+6vRCs^Wh- zW*F0?Dz%4tiu7Av#=;lbjlJ(Jnb{VT$f!2a zqFb_4s|kMGwiS)*{;b@xiaweM^ydk1}ZX-rp zE8uO!fV6BY1153%#KZ))Q%OkJ$B#eJRGPlrAL*2 zy9OqjY$~g}D(5DK6Tfe_Nc3e1*Z#^WqqHfV?tc|?E=K%P zf3yV7Y2Pew%RPFuZ#c%W5MtMlwQ+FZipm}!_cr|cbya|75HJz!y#<&`gK_`Jb;oRG3mANRJKU-t>sF#vYjR`6cKrlx?ue`lptY#Fwt-QXI`c^<`dhN+_9hJrXdj2~dWNG^=WSps!{ zr=%h*_Z8>S%;jdYg#k)>NyZK=(Lv%>s!-j5R$iUBsy5pDXa}wz4J(fA_uT8!A~I&I zuiV=FMoy+x{vPJO1lf?aV+>8Kw*Q-G=m(~oa&keDk#D<9F2iIqIMRZmOalsFfaN`4 zzJ|D~8XB@d@x2Ch@?|ue9Pr>bU|kMg4&vK`w)_@@y%jAQW@ctVWq;&kzYq~&OBNB? zXYdDd3K~0KW*)}fB(M5RHk_lN-1e+)ZBa=xtT{ub_-I}Ge2@NJeY)^y#`iTz62VZ} ze$RpJ53r;p5Psreqz3XrgvS2))UMkw02Um~c3dUV2u2j2L4bw^0{XtTcg+5rf9n9+UYGQ&9hMfO#=LUL%jjQ>YY& zgjssvV*8e=Uy9~1r#L$lBf%~E5v4C^r4E9;>WxUkACp>JX%FlXnK8=WNN=~XK$a#C z&%t4ltQ_S7N+`&{lK~j1*DBOVN8^u>*099E(xP~ZvQ_H%K);CrIuJv*wR_M|U+_0^ zf}cA(!%R$%g4+l^T6lZHEwxX@_{y{eP3Nm@ONWn*ws+3wC^WSiP5+`-(vEBnbMp;% z$$yhsMNB{bcc_#^x}QoN(8qwY?HF!DqP9)prWwUa4NjRonv=bTx!1tv4P5Q3Ks(V4 z@D=t+)Fu+r0kTOT~=U|2u$+s+M=2{>p50UEk+g6Q4fnOi-ac z)aZG3S6_Ul=l3=mBXOIu9Yf;MC2zQICKn&E7!_VVzCgACu*!h9PzQ(zb|0YAY^m6q zqv1WHg|nlL*V=I9Z$NMuNs&2%JB9lcd+)TWF9{3ytZH71;8=LKR-l9!khniTnhQ~%uFu8#@7Xb-v8^Wxma4X zu{*sRCi5@>l~&^Og=cy04x$gjEGGQBO5nRNMsqZG<^9$0?EEIG_qwvLyab+~9~f}s z{RPtaQ*aOe^7ZSzGH1I=I<}EqBT}&1;L3^VhsIY!Q&S=olrPNy&N|1+O00UvM>};a za=XQrfJ$b3$m)pV%*d_2-lhvZi&G`}B@sFN5upPl+?d99*+0&cPZT97OMLegZpwa* znumgf#-77KjY6|A!0QB=)nI_k2x->PY$hCh{0{K3h1oVj;K3hItDk@>IP}eQYq2nGN`=g;6m6oCDb+-3(=&2{C^ zhR4?LwKP9p$ZumWOo~w+)M2#kN#@z@F`zW>pOMlnbmqYx=$ZLZu=(b6Pj?L5<5^+Y z2;df@a{vBW`~9&2Q92UT$AjTI$bYr5r3EZg+KX+CkhH>SW|mZrEXZ?pcXTo1Lr%AN zfm*^XMuk$s*4Z?8g<0&JzOzk*d~WKz-)NjXcg^Ra;s}J9NrVm`NIvYuK~~lnNRTZ^M44WB>VPb>j<|?|H2ilG*v%E{z64lH}bp^ zViUT37xlMmyF`>`gH0d5Ha2U=dA0Gh^_;e~`EPq4HhlO49A|iQZ(IMCbK94I2hCh` zqx@UHhbodIWF5U8K25RSD%%k~KO{)PB?(o^$ehLZc_Hhqg{*g=ZVOxFXr2DNEboL6y(0rqvlW) ztwa~4QAk_ZL&uc~IbfvVCiO>Z`NR~HsX-*B`u0{u({pLlxv$9j5UlC7~fYHmNM!y;9>?vE$Rm0;e>z@Cmd--yaI2BN8YT6}=HyiS4?(7_tv3B>8|4># zXhnZ^2$thfmK7t`FU7;_*c;G=BZJ`?Pz}z)8oOZ0$#h?2*EMUGm5+8mNyN9UN3?I-(x_G#y&{sQI-Hss05Roa(#pzRa*@%|h?> z^d`+g-o;Qh#lmaH%Vp=1uDu~~xE8NXhuznY)4{i1)s zeZyRbQBZ141c)t-f?kao2zIEj7Acv0R%zMMQt5hJ=M_S!usdNDvC?bvyuYs!it3za zQs`XwmAb#O(>%6z{vt_+xn|t_K)tmJr#T-Yy@$%fivr@U`@yQbMa7{#SJ5a|M)CA{ zp!&+^n|uu#bvsWw8G9~UTj*6v!vDa zmi{lwk}Q(Ir!$4#xsLq;>8pc-gKyZ|3o^QGkipNrZ~q~m-?RkVUh5JT$fow4I=k!@ zhRy%J>>Si|q9X4bqte7nT`_kV7%}v?#16UAnl|59N9(edMr)!OnKsVwyuo2>WET9m z)zzo{d;!gbNpHQ2m>!JRWxwb*G`$*0mg$!KyOrXUQH#;b%h*4NL+tGN$TU$gIAUhZ zzYB^j{)o+z4ovpZ#OEoN5Px7*g;M}-hg5tKod>JS{dj9dAv@K279 zukXHHy!j1Ij($TGSS{Z_!7BwqLY;m#4_Ny`OPRbu)8Fr-RE_7CUBf?x6^59&F0OKd zueKaKB~rAa9^adsbjearH<{IICr{E88jLZE1Anm0VTA;xxnb;PqNhjGym3W!^qWl zyN4xTeI;{=Xz{mN6^il-*RVvrz|dlQeX(%F259Iom{A6-%CMbW$bvdpr3l@Rd$n}k zC;7J)6~_j3rh9^jekZ^(sVqq@RN>Eb@Q&3v3B!?k7Q_@i0|&Z@2=c>%6!S zb&=`5J$@mD!P8^L$)t!Gx`cLzLQ z|G}l7(q@|%Odi{kF_>`+Dv**EJm74w7LDRY+oT)PC`psfD<0F@H9W|t5e(x*EhKRN zGTpauKc=RvFm%_RpyRt@DR9awHQXpwg|>#LH^hC?cxS9lti$GZno+KXUm#I}^diss zZj!Hh@E@HdU*W$o$;5aTcAJNL6x18~Acc6EZc=qX+;^Hp$}^OrjN)ex1NzW&4W z>tHb6o*Ke{@Zwkeo+=fw?H8t}tduCXU497soJye^0W=(56@pzM^Tcujo@J%NfDX_5 zLzxDVQ_^HsW5l=-eWo?Ue6Z+PYk7$7|5-nnuNI11F?ReNwBjTG&L@r|yrwQjo7T#5 z`=;EM^kXZe$FXsRH-(hmnEypYgyMel8|)iB#kN}{PJ?HrNu0?>B5;2onFS9?; zRA|ncV#V)@-c2&4>u}VqKf@e3w`qt2PubI5qnBuA{1E<(|KQ;(;g(~K)8y69HIUw3 zp%2BpF3V;PC+sXv@ap}?}%u@v4 zE?hE(^Rg(n<%w&RVwUY?tzA4##c74z)=B%Hp(52E57AVs*fA53jGY@ewW{FRJl~8`Eu$d2{&|kxsmi6~e z%hwRfSGFk4PFoueXS=+lI!&5Gr6fa|sQ2&+vB~S-S8gYcp%wf{8yZCA0AlU{FUDyy zNZv{`X3)BxzIQQsEYOaO&0~pfXYc6qOu5q;JSE(yOP*~aCZdN@brEfBM8Eb+i7HA> zETZhsd)dHtiVCVvN=*7eM{ESA8gtYYHofa3G5T50(0+?uS8OW$HS{*)|@UrpCi9p6C3wp~AdkXZ|V8=|2D{~u+0Y*W>&0S!oCjzI2Q9_xA zOnt(1PYfgfHaR0t{sm#~Pd<{CLbGYTV6QUzgsRA;VtabN){9J58**o;a^8l zArLj@EmiRfEXTv*svh&0XmfURYtAx(i&*atdPAR`-as&NVG|Q@k;66-AnS-V#Kezz4aPB^crc~ z$1M1Sxu2UBgkg{Rmyq@ZmSaS2^A##+?!5I7byHb>Oc1;23IB4h$KRdYc9ZM&wUqkg zHu!r5FQ3Hd7B^9YzYtx#*Pr<`H|1IhJ)@;|Qf^r%qF#wmdFN{hY>n1;CBV&4F;NpiUlJLOSJsR}o z_EWT3Mx0t(bg}{h+dNV*q-Sh6@Yvwtvdd0Srya*&QJ}y;hvVvZ7wXN`xOdAVZ`n0E zFJArsihJ{DEZ4AYoMtJdBqE9?Wr~a$nllt7DJAm|G7p(4B~i+(Otq0jB$;Q5GDkw@ zIrEew&#wriI0*Nyj*E+<0#No9^?YL9oDm8Q#fGTUJ=P)9e4-iKR21&orr(`z zOl>dKvnoh0_KrBD&y%Uel+a@P)U08&FUWh#%+eEvJPPM0S7&q!t)1-IshVch)${YF zWpj3sv&h#AxL;PoP;Xppgf;~IGg{#zpmH1K2W9DxG!TL+Xv-1+NxsG9{HcRarStEm zM!J1kzqpSfG-M>hr|J2Kr_j&U*Hi2hj-!lx@P~2lMDy<>2yzlF@igeV*IN0UA7x~_ z6?^Rh)gQI(Qg5n3Em?sI2UC-1(?TTxUe;_>aNW`btk%=pdl=5=e*XTmpbD>O zT}4k5o!~rP>E73{eWSaBiQ=e~&t}2vM_H@7)ZYn(e$`u*A`(5(lAixwkY_DbY-^d` z`KT{de*~g0tw^eBwX5KHU@eE-uk)jWr3sOPqK`X<4dMzg|6Hi>MmMmEmR%;mLE+FN z!e7v^t^OCu%EaJ(CIpHJqD8|{%pV(VFecdW;I`whZ=<)rsoaVaW4!yLt@>o$QpW@7 zO0Hp{li`4vB3O==5CTB~X)^Gm2akNM$bDqPKmhqkm*q`HAM&)7=@4tkh&H5!NL7hj zp6|_r!=w-?fMwE9SXLpV9oYVfE z)jG32?W8fowi1)@tv?;LZrt@*pLK(d<+iO!L2fIHukX;SYfo1`sg6Y&i`T(G+9_hnDi!%8 z%|uav^MfZtb-0QCIlh4zyJ@QMP!D}yf3)n!&Ifj8Um5cjokb${y z?$%RpcyFgMEA!dW*e2dP)b5t#QPdY%F?7RYyEKQ3b*>a&Va4v!q_w*Jjx*N1bW$^V zhh*p9Q=y}~5OsQ|=}@g(pUsoVCw~Qg&tdga!)BQCKm2196zIshs}Uaa%2J?Icf>p6f=zO6v#3 z%h)q|J+r^+6#whdnPzgG9G&0+uU!1eftB=0uv+3$j~X=fQF|2|Iw#6xzHjW9Odh`$ zU)(>Jv&viJLGM_|>B$s24Pa)YHf*d~L5DU}6|&xuJ}64Dx*|Tt^P{LCf23$csNLM# zdqZcCcU9+)v}qcf)eBb$?%*jCXi@8W-gUbE3Z$AXn)HIVIZC3*fkJpJnuT#4RF%5{ zMf_rK9$C*8?I)NsJ~EDk$dPGI(W4}Hm!tHV%8~;0scW(GyC+ONP1g&W9=J9Z|B_$x zpw#uvQh#XS=I%nj>q-ijI4SdNMudRDdc#?T0du?!s)5`RSw zeT`ZRr7%n&OVP-5D+IQhLP+6NPUvoJ@uuNHy-pkVYLmph?F;^5l8c{(etMVeRF@yM zWl!ARJUG7NSJxnq)BVM_9s;_xmQ+ZZb3;qbuLrOB@J2(D-(>kbr|)2h&DfsE#dpN4 zEoc3<%`o!zg_a7$y?!<(bAlnXkC!*`<0R?T_4dv_focP_d3I~YlMO~?FWxOY{O(_d zcDJTr%T-37Pd^1#MAkp74<3{LeT}trfjjV7XK_}?X>Qr6@r_Gst_Ra!?HIRObMDjo z@xbD0v48lsyQEzWJ3Xl9kg!TaWc56s&XYiLd{{w10CK;@L(_i#R(34=(Di}lFY-t6m@})3;F$Xv zAO^%rhud;P4I=D4PFM^^T&oBp)PzwS4i%Zz;|O?yMiS2dDnVKN4D%|(`(%}Q%EY}-Ny&to zn;3kBAR)3}jS{``7e>~nVCOu6xmAmX8@K7VY*Bi-;#X8mObk7=;4nff-{P2?LH`Tf zz!XfwY+dThYBh!WWpPZfx>s?hXiQppQ>FA>sl~bC>%r7VhVLBK9Q^Y5MeNc;=f3k- z>Go+gx{v>6_O>Xr(R<^d+y0?>g!@NMh~W3`YeL!Md_!3)erJ-b-G^`76@9RKIU0hVq-F7 zReqZ0WBoTJI&$LqPFmJdXVn|^VmR9X^0<(Jy_1l@4P|@9DeRj z%;+K}w@s2l{ylq~%Fgv&)d`j~R%U*ri*eUn%`BHtx-}U~x|B7DWxO&o=c1K*aE`XV|~!Wj6tWRxAjQeK_lUNXMzj7WKj%FscF5F)O^DncAJ@6 zc>9K>6)vH4t}ZkW(oU+a6%^GN-!iO5b0yDI!I)32xJ_^7aH0MG44NHpU5Bsv{F6$Q zjm(ZGtu^=zj#cSp;vn84mA;EH?VGGgE(q5HDdR<-7+M&~( zes8&*bM{BJ)%((F7ZcJo-CkExIMT;mKsNehhtT@?`Z6e7$^aDv9^mUWRznx!BH0sN zxO1u^wX=$|?VTm-bYK0~Qq47Ol@vpl@W|B^rcFs{kQ*-(Stvx}zXuTh<;ml=Y!xrt z@7u^$b4hh-gK;@5vB{+BVS=UO#pUo6#)HoT&4nelF0MoEO+-%&fA^Zta`&G;f0{ND z(qpo}Li@g%$2Q47^%o=>&UdD+J{po`&4QK_wdV#KlJg*y{j_EP$^DzvhT#Mj^prWx zSRDh zNPM=D>=;pd$=rtzY?x|m-x2ren5`Xye>4Z0YYY#Fo>ZJkQ?+MvX}9pa{AI0bxYOp~ z*0Mtsr>DDirFXk8-0q|s&D9q;E??3)dL&GUWz)n!Yxqrd5!T>E^YnO3{ffEmu1?&W%KCpKhNsO0Z?lfGn{w-^p_au0ZuJ=LvZwvKvqN-9ykKgd`6?fY1 zH+AB9Ybv!XzfKGtRhE9K?;%>rKJ;9xkI|^AAW@gBO+bZvNXw_Fb2>>?Rh4v*OW*2{ z4xLxE1dq~g))xKIi%ZQ%GWu2u`p;#&dXU5(sOmhlbW6qL5YtXYh3+KD0ahVVpE~^* z`&RR7rS>oHpV=yY_MX+>m!*`6J?PVm(HlyDPdk(BHDRiZ`oOwbMd<&BE;7glB7- z8J{)m`0e)mafg4XS6kGsl)3n+bivsM6V?hTTb7hY&t1IS>posDy&P+J>uYK^!38)7 zgy-+)2XNE3E`C=hDGe_1YfJPioj+{p?5JYaRINTj?f z@Tx<;m)`f454Pq_JJ7Y~q4#xS{oO4R%Ta zK(DoL%IS#Ck8OrbX?O3L6`HAOs^4r}sS!Ib`{TW@{B3D(v*6V-e;UfCXY2;vyj&Qc zmwNu4W`Ak_xPb~KzwbxtF|s;NDYO}GtO4i099k)#tE&1)Z#MkN7A_V=wuJD9I$v0> zR%f#!txZaw(5M<%JUGTDi;VS}l4K86AzkRR%elLrU9S`G6dh4XR7`OBcs|@*KJafeN@5a*;cxca=|%4*NYx`K|THj80m5t!iBKmWN-*Ot|IS#`imwGiQCTsLh+* z5A24sX!PPT0?ol;K)R<8{tLvI&Wh~Tstk+n#&k16-1`)i+v&jqXIq%@ta?wQv<1xfL#?S_*}?K3 zYc1tlBa=8EWlxBn$b4fXDK)lPv9595q@=pQkJjpwf(=uP>|6`seud@EV}Nt$qq!l3 zgO{ZTY1s7!Yfv$XTmQ5u|NiE(cTLR&M8|F9W4hPf;pMF8aGFx&&%1A*r?shNCwoU{ zoU5fmpEhkIxl+|Ic1nr}Dq|t#=o;xRKF82*X986{vS` z%rf)y=g;eOw?@B_ZNKk0w1xGoo=)|MWJD87ZDx1Z8nELTRpMhB2X`w>oqb`Rn<+SM zNR|U$WqV$HmAQHIKvPqbU2a=o<&!H9Y=S7MT1+2HN{Ihdhq!Eu^j-M!qsQ_MYj2G55|w&&bi>^|Po*5YN=_qdO( zU%t)FHBY`VcPjatvDE_0aE_3#D$J|v>gC0y0!I4$1Dl`C4XF&C@t7Q)-n2#eU2Nv!W7^6S8yy)E_BRCB{0$m8 zD;QtQJ4rzS>KF?$Q7;0K-i z2{?u6B;RBoJXjiTvZk9>{ciZ7%3bsEFZ+HQ98>eYBk^wbH-|TOP+L9Kl}aQ2#kqg} z%o)9Y_Uy*-f3pYeSk`4VduU(lFrD$8A}-m`Xw?S_1r@LXWp+*jmb0E>mg@XB!Y$5M zPHl8NR{HX)0p0F~9Vbj%eNG)2c^Gs2ikhzlnih8@E4^1u(`s;iu1M^qUp^<4#KM)_ zmhi>-jlXkK?GQ5hi0NB-EEFO;23aaCs1`Y4)^G!}z&+>~t%)wQv>i9SapKGz^$A|r zjs3=zi~Y{d>iYe2z4uaGJ$E$h-=cf;4XrJ+#Y_rkBdeIkjMw+SzD>AGa@2De`R87oRmtb!G>Cdh7Bm#3*6E^9JHo6S6XG!_|aT50}9Ly%sd4%V!5ttY?n znPkm1z-k=43_mImvhFz`ejzx58DqP?*8u|US zQEwGvjrHH<<|`l?L;ou$M*{a+E=S#thmUVenT@lz!n|Me#t}vh7dn4NvroZhY)3zr zihl6l*Y>!FNulqodTGg>{P8EHuWCB)PQAOC^>-=AJX?K>-8wGJX}0O=_Gis8nYM{N zd2QY1RXNnclzXdx+SM;6L?rv)bCo#P@H(rs?O0v?awd4H>=boYK$$UZQR~EUwbjir zV(m8#ku$jG_-xX)S!317(_NnWczHE&1Qv3gw^%D9D_f|a5;%kVD#xRfJ|ub9>i|fE zLqg568i(C0K8>A>4E9nVTRdpS-E!Q)Cp+vKp*eAIC}oCOTHj(iv+Kw1ZKrV9K4ILS zZ1Q!3=^y8Fl^%~@gpAbQ)t4-&0bANeyeux}-;XTkTKyAc<_6`X(P1N%pS#^P{6cbc zS(@za7dpaDoE$QJB&DG#rk(vEtSxDa#1M7k!q&vS-QzZFbi!{B{XMb8F0ZeN=eV6F zt$N+aftPnOWU?u}V(C!mhA6tnQ5-Uof9R`d+V1mk?}*aO{(#FSO`Hu#8qUidxb18XWe{k2lTWigH z=QL;6-+R`JkJt;MlvB2?h*@QG1%lDHlxP7C>$FV0A44W|+jAy2UiddmD6L>-}<9=YB29 z!QDDj`f^6a#dEB0?BOFt31QA`PNu94(yWizow@(Lg(ohxRG+|hZo8V{j+MnIHa035 zB(mLf|J(W-qE!PIhHVY0L{G>`L%a|#UqJ=xIn@IajfFM5)9WIpkNHmds1|3wm0yYD zp!b`=@-NYgsjjc7J|S$zi-ooDrOnbKaeBX_6%X@+%Fyd==LxG0@3b{q=%>N%c)?Xs zS|3fQUoO#DMmzBBhc!?eAg|~Z7u^2hyBtc?2af&SE%b3z z2c6xP+9^xHR#jpTAZrXhj%wO1ReKTHzkk-l6exZy62dw#vNk}kBLOZ5U`E!0wWxII zQhB!F-+Rb$T6vJl8U4Q&5Q9mCfJ2&qR?#Y$QO-aNrR=}_-VZ8CY3WE}FPdVYND%{K zikckm=dxmH=l=c7tAV!czd+IHoOXEVHOBRC?pNw_?%f;nc{U(`0e&4`Z>ON$d>CpD%ijY!ul?`K zc_He(a4%xaHXBtv9n2oM=pJ-`)NuQ3NCqbY=l&anNhUik8?LmDt&_!+LdenEytv%e z1^rV*JOC~<*KX|$F27&iE8dp_NOeHL=Ag}Kh5i527gd~2#TD+*-K`)95voI8EVa!*N*FoP$k2YW(C}c!@5^bu< zTR%SQ1XCt&$haeJD7-f9oSTNuZRGfzjc=Xo|M0ozi}{h??}9yF{Pf4$LP_5^f5w&> zr7FR@=2F&KJ3G6;!KaqLF9261&sAVrarr_2^Tx}0O7~0^ZMdBbp2Lg(v{Ff=b(YKRHtfAE;S;+J+n&I^aZj-AfG=`^g1tr%Ws-Ht%Avf>W=uQWj6uEl5`gLjegs<(`doWVr<+i0EF~jqK5*xXUw-) zpk@R(p%i`2(o!H;!$l0M87=+^nA~(v*j4q&`^x=<ns0!1OB!5m3h zh@}vY0W!t?I6L5fH3b(R0mIro5iWBBlXkL74~b6>*wpVVd*xITt{#8FCYdq#CLSrb zG!+&(QxKm<6%dH?AmMq1U7-X_)+VS5+K#o2P_BNs=}UP&r$@!K zkPsSvk0Hw%tP>x7kopl!6$-Qd3ixK-r&O~KcJbjp&P$&gh~g3 zu1f+KKoa0)^iGCHvEX;xxCI=P8w+qTd0I^R8rGt$C6L$d;!A<{@=E?7ZdGk24=pEH z*m~3(31J?tn{Nt(56tMtoKaVAd*)27G*JKgVzC|kYH_-qz((QUA!2$91dr06KT_i- z&>>~=c-bDV2X6S46Tya=qPX`Y)rR-OU4~&ryj+hXZ@3nz%2o*-CsEPTv zX?nH{MPe*x3WB+ZYNh$WTn4XXQ4gb+ryU%!T%#}bDBI_%1LRatQDKB{o^pG@`c5oh z^8EN}Hyy{Eos-Gm9v|eH3*dVwqoATMfepTqI>pkauowv!5P+w@NBq*$ul zn=E`;Du!l0+N6=Dln1Y0zvgHdu|0~aZApM5whZUytww&{te+gj#wx_Qdcay~nT_#e zH1gAQdyz3mtTyysh%QP?iyHWHWX=O#vs}QBF180)3OUbN1GxAEGpHw-uW&Eb5eJFs z1xi*eU}Km_6od)qeN0TnX{_;FmX6>gD+Pu672UVbW$cYnqT|gv0qYWzfKbz{hZv~} zzMT^7)tu*ilsGE`o5HE_IwzE~s{x}{lm@8j?}Fmf%#58BJYes-yO$^H1wMF{vGxMW z=yy%GN_7vOla<|#UTfI3y_aujwI6z`QS$L4tN-Vk?g?UZG<}1{6ZR*Q7?K$W=j2k7 zx+G;53v_a7N>*O}kkZo6B~>*FPtHEegJBB$iPQc0P+<}Z$qpWjPkLh}mw63O6f7Y! zvXImi}JI~p7d#tI+^4kG?s`2w2j7>L8&t+oq|nFOFlzCUOx zQ{dhCK7D$j#{nN>F3Ng(t{-=$1oRUBpbm6<`fKs_~m zp6g1mnk5Z-L3(J6FM)*7EC97-N&{Mk^t$@`?_shq_$@A~J@k%Jo9f^}Vz0o$8(a<{ zUcU(xb~|t^J-}GiG{>{`r&sWxV!xW}keUp4sg?XCK0A#Q)N_9{rkR9lF?kSDQ52SW z365Pu0xXqWzH&nkR`X04JXvyFXs0YjTEJv%hkP)v5DdL^$($jRsJW(n?w+_kn1nQi zF>X$Sl41%O3(x>!Wl^ea?Uqz1Seh_T($()w+>g z2%^QQQxPHA42RIP38Wu^rSg7qJzvkV3qjz=Y|q}k%=wh1>f}LorfC1C76LNl@(-~X_~?_on?34#F^p$#V&1vaBNSOXKaEaHDA8muG;3gAF@MF|?_}1RQ(XLqd8S&)-e5KD1+)@Rk0!fPTy)@= zdgkiY5PCK?Q)Gf4abO*IaD_8a)?#rB|2~DVpM21$*Fig05?l(QM^B$_j?&0?DS;TX zcZ&gh@y?*L^ca&71W7Cbd29ocJP(b>%EIX_mfmPVo00E^6+u3>Yg5(6lL+fUQ7f&d z6}dLSMqLEYT8I1!W--sefA2At{A^OZMua#d3eU_wYF!3u0M+b%d;C9>V1|DUr9G9Y z)-PLykgSFZtjoWzZEj@8lb7%RF*W#Ka%jCBA$ z0+NQgdbhb~nR5R4V;@{AXjmW`EI8}q7M7~D1G9k}NMGSvcG4w9vJreOZx04L1&L!A z1VfM*5!TH{k(L6G5z3S)m~n}Mx)(ADdIpA57|{X0ct7^yK`Ry?1I!+7V3!KRU8mo= z)g7HzFsCrY>Xzp`FMv!l&8%Hrd$DM&+?O3e!_#veQDPuqCKz~6;zV!>Tjw$*eO)!c zpAsTBIQkSe3^`PLTQ`V31_W0CTyJFoG7oehNWO`w*9S;M+2l7m@=$*!Q~lty)=MXYF&IC40$Fy&)d%Z9`P>i z?!<3u!xHe*ch`ZK;1E#@4l3kjJ^%98P!rXli$_=-qgg{Ta&qEv4e2=U$nD1+v=C(_ z%xUgxA3PEDYHMpjM%g1M=#P}k9NEMc*ge6!=3es%??eRb9-mUq2f`)LFCwCf_&}<> z-SO<%GvW~%5D?&R1Q(nz)UgwrDAaX>G8%mocNcx)IyjkmF>jwC? z!|@W!$G-cq2!Smzz=C`{U{(x?Y&W^Tc+OOa2}m8Ix0;emvj2NYD)IHw4znnB-Xnk7 z`o^_q?pN)1x=0koSGj|9R(M7Y!9UfHm%hyg4QMo@n-00$IT@LC zT3T9g0=SFD(_Ngq+pfo&zjf`*YQuO+0MQ(H{rW(H$`H-|L|nffB=oHZzdZy*yBS;6 z$w>m2$t&D+y>rSeZ&sfQ0Z>Jk$EtHyuu?xI+`1KcNDACwsMs8blXVg?2~CI{R0vPj z6$fT;u0wG^Lv0c7W)8X!bzgpiuX7SNVfhpJOR6@Pc@xZ`JGF=t3K$qRAA2psYf`9? z-`}e@T8c||nw{L5sKLng>M%uo$>`kzim}@Fdh(RdNuRU}M{HBzR<3T> zxtffd;|P>@UP6$`=zDHF1CPxKJBg(iZxV2#$fEMFLootmf(B0+57sv?FSlo7t z(+%F=XlQ8>0-qx%DR4UZ%6z94Y&H=i_J8y6(!{j1o7i2aucxsDG&RRj~05g5_CAVM!EINezY^3sJZO zcz&^dO0s$9i3sQ4s-!?8!bvz6kt}jbN`{lJC%=E>BC4jnZ4@$jjdPzE457TIW0R0U zz1g5@kgTyXRAlOj>%vN8nJYI* zT+kIh)|u6d*4sAz;3Eu)I#QDhO=LdYjhQO=$QE`~%}Dd97owWZHX!QQKdcs={PA^K zJ_YR}L>a0AT=2y{hgfJVluNsFBA9giiG;7!-_+cbcw^vN$w~R;%WedhYN`1OiXa@3 z^r8q1AXoqL3fv)aMtb8@f2tI@%BL?X-TObdYI757mo^jCBNOcCRt<@2WRh3C!lqK%%Yo6 zMF0OKX{pASGrmGN^fAr?et2sLE-kEOcY4<-l!0`6(sdEpDr2Zj{JRRQX!Wi|7|>`8 zd!~YyhAz~qim$arkNPzek1NQ_cY^}VUF*ZyG-3;)*SZ26C>pnzL3e@=n38t0j{$-W z6{+rURwgtGKVlWz!hhEDiPOeAeYV-Mva(#$-PY$)d6hF5US zdz9*sO2T;La!VcumUA3u3Dz06o3wq?SDeD9l$DhqnjDQN5}w9Z{wUCzefy8%tkk5? zs%z-L9o`W2?MV?&MYJ7?woiG<7i+iB@CR!$?E90_Jc4o~1Y8T>;NY|P@L81Gu+}_N z(EI~JthY=KN1i2hffSy)n&R+2Siqy0?QJFX7e+VH&B0SS8rcarN?#Y zN9g*P^M+CaUsa5Z-cX#hcKH$CO#1GkkqRH%gN3ucCgN?zuP?`cKb-XPJ=gHcz0OIr zSV~#dVMdF!48M1~f$b4+)f5Nx>9(2!%6#NZvKuk6c!fN-IhPT@ge8XHV2&GvT}0TNFw0d6^xi{ zJKR`^Acr(=Ep(_xm?JGXA)s-I1UbFmSlvqSQv&(}0>KqI9><=awx*`04|_2p!tm~Q zctY)p_b6Z~bDf-LvuU>$1s<_kJjZpbj%;skf86C^mI9zz5xYK-W8-yC1QqpGOJ7Uo~(Tb`ol?!@Oh+yjoA0~#z^ys}0rK2}@CTq6A259D zqGyhzb%p+9TG58?vqvrC3!?l7w|z&oCkb7gnD3kD=%hO1x^e0zQI-&=CTQw6Mk6fE zxJo@q4|Y^i@^ckCi)dSBO`+w@aD!D&14U3NBR>b@$&t$#%x=JD?8H$MLN5rT?R$f0 zH*SmpzUlUCB1aUE+=8KD_d_`ag+3%M5`8(<{oNo)QdMx3ZC5kDL-|2E-1u{g>{gUF zIp0S5out|g9XYiN+^DIbopleQ>YX7&Y)?ufUne3%CNpJ0Ax5=NXI=c99P4yA)Peu4} z+5$jzDcSBA^KO+?N;DR>8-Xb$TUSqgH#MKT(OGS3SoC7x{5C$3^9wEzmWb~kCOXJ zv;wY|S=vQHb2QlnQbk0f$w`V`b4@Q!-~PZW>MvDO^qr+7iU<;3TyBX2+;E@?*NM+$yqdW2=hcUij-g| z=TBa{wu794YX@g%XO}E72f%$j?a^8`yBFs!)hL!gS?M_55S8OAbS!|rrN~(3kj+fe zb>yUL-LN14YFz1ILuw86I8!ZT;HTT%#LYXX5SPPTRZ2mRa=E7N4g!ZR+Gea;=ya{o zgkVeX)spTBCUVA8&q$LqNT*CsSCA4$La7073XYZFMhSXzi4b859Hdd*4jL2 zy~{Js+~vkxPU^Ch#IsAGY-3RwQU**9^3&bWhw*aa8@A1!z%vPIl!~l2AlM8P9#df6 zMZeSy7^WNdi>LecD?2FT2;*SB+>^?XmI6=$z%<18g299#RX{`=JZ#|kvq+Ypn<~w$xcas4eNWG^;s6kB!d@g?@h>u^6 zCY=^BE#c5g>FMkvD1AtYiYYq(r%&BlO-nn?XU^Kt#u@~R#MXVmkzx->?IwyrJb2(T z=3n*(kAqtiBpygXb=(Drs_W$gHsYfUPy@Le<}soGVz*!q5RhCtw8NU@X$ZQS$Uaez z-T`KSnmJA*UrMCSjuS^%53oFGlj@|KxY|$n#6{IlXaEv-ZxG2P^Oxr0rSGU(AcV3z zjZ;I^x(8)k41Tw#uWt*6BJgdhAn<)AQp#B&&Y38nMM#E{<^~qtS0rv|=$wWg85(e? z9uspW<L`~( zmYp^i0~?5tl$S6>MZjl5?+nQ*#)A$e<@k|$5q0UQOD(5QDY$*&ms34L)JDk5;9grT p1D-Wraa8}e0k8iDZ|g=`Thw@Fyh856Ju19pPb-{CJfV5_{{tEiF%|#- diff --git a/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-stable-diff.png b/3.test_cases/6.stable-diffusion/multi-node/p5-model-scaling-stable-diff.png new file mode 100644 index 0000000000000000000000000000000000000000..d77d22088087671db390ef6ae7b10f87f6cacf70 GIT binary patch literal 57243 zcmbTecQn`iA3yw2QA)X@VT6W}vZAc4u0*nm2FZ#@*?ZF>qZF0wN@&>QW3PlnX4xZ~ z?7g`k@2>CtJNG&FU-#{t>vS%BKHl&5>-Bm*AM5$?xu_z)b34;^5{a}^;k>LGiL`l$ zM5378MuqPr9^J}|f5dFhY1pb;7}+}LTi+rn>)TqITG*PJ+&pN1%i6}o!d!??gzqHJ z!P~aBmNw%2{AT~>7x*l!jrsRI5Us|WP+OkYv>}mb^@;x|K1j!zkVswI6lBj_b_^fw zaB#d_%33xx!PzqVFLnMxXV`3~-_djb3~NwO7Q0+By|Mdrvy80CzAbw{*1O1F-*i~n zdE(T)pUT$bY=GB&ao$~E>lZDXY5rO)vAe_vLbfzbdHqyz%?*Wrmx#*Pi?DcT*`ShyM5a7SBz#|NZK7 zQyGe!|9}lcRcHbMT2dYAPhyFY^+)gy+y$irxw{XMJxNvnKM6=bfefjR-fApR_mu|1S*M`wVuqe9sUw}>TZ_%ns`b| ziv8yYkNa-Cefw4^Li}Z8f|mOG^Sj&I+mBv+L&~_@WYqWVdAw#CXNQy2)K_|HDK6Cr z@mm|dil2Ji_v9}a35QKBEW{}UaS~7KMwaEF4ySgP{`Zolkm-<&Zy#XlPx;hSQc|99U9`N@7_X3cBiqu-s<^E!EcI4peTsp6 zr(^F={=r8n1- zbQWH2d`>F3;OHn?P*9M!vA*&%L2ExPEiLK$E8ZP48X62q`SVPqy~nQ__I&ku!L3fU zfB*jQh=_>^YTxruGpu^$sAyTMh8yDOO$)92w_i)VpT8_Eo8haKyv+sC`<4IXH` zM#?5OHa4x$p`qJyPY+bg4mZ%VurV=xpXx0ab6*wxd^O=FF35H6*ENZSb{oz41FT!Z zgw5XFs(j@+m&TdFrYswXqm`O)ME+@Qft!nYSHVu1O!JPRmNc&Z%HZ-}zh2&LN=$K> z&}mH6t}J`P%yOeG+@djF^XKPlw4@S&ZM$|2{Q32H3l&u=Zee|W#bOtWxa>q{KBKtp zQ6ZzMq6+S*gAx09uj?_$rS@y^>=U-=q$v`rktms|k&ZIVDJqh=|KI_|;$m;WaC`R5 znpCOMTb=Y#{tRJmvGN?dk)hU%<2bf`-(UI-M!G!38$FZuaR2hU3;Kb67!gVX($ zCe<_55vOU`qz-tlF3s5Ve}7r^so?$D?k|rt^PMxsQc7=j|NN*5kbMgNJ$ss-inq( zGVxDdULM_h<`Xx!=z6T3B0bu^x2MH8D&|tOTwi5yz~rQ*N`!bR*{n??)leajh3oK5 zw(YeXZA{YcdAy=gk}mdvO_TWPs}zHBVyn&R7^bSlT)2cQk!@ph89mYIwxIorSF7yV zp|ixE6*8`7SB;c#UF`NA<{NCyxGQ_^9L2=~*CoS-2x*T_d+8Y4!-q-OoiVCWQYd*B z(~Oj_CR{&vZxbcmdz1F86D3|d)7+?HN>P2PLxhZPcjTNp=eO_HczgDIVveC_i>Zoj z^`|SJ_hEN27?kePn@vok=@64z1G`tu_<0N z$Y7A4ddH4v#b7@2?>vL@r##v@CofsF~V~gD;1XG zJa??SyIZEr%ci)a;#!ZFqD5z(*e`0Gf6t!POxB~o1C7RQE-o$U3x`z%^RxX36B+Cc zkRHGe9RBoo$2KM~V_SUb=!mF~Q53rQ_3;+Tq6LGgp3>Przx;Yi{d&j8RZzt~Xl3%D zw){p7vgbaMfQnPTGEr#Za$v(fyw9lWY1p&G%OBOqW)HT~s!VP;uZ(4y)V-rblPG?$ zwXOc;u`9>UGef}(C=tp zj4a}|2XzZvu`SdJT%B~B`uDaSV44qIl14$tmZ(d;rATZsmmlbpUx{nDdGY4NV0d8F z`bOEy6BeBkq)6vK+_(*^uDPdApAHPiMRDRO;9<7dv8$>HT}jf;eT5pWmaKP%=yz8> z?WvL8kQl6vP&`$*VzR>u3tv9jUBZpcH9kJR2gk;|BPRv_*-B>q{;k*PG9k$%Vs83b z@$J8lh)rI&zI^x94fCF{F=bRLiV=?OBIfPiW(Ct)BThRIjTyCnYkV*@pH3cwgTn%Q zLpfV&WZi@|V+tAqp1&M6*W}z>DJt{3bAH%j`(sriLi^b5xw9A2j*Bfso(>9(oY8J~ z5guQ*@kBANF*p2ObngY9HWSv()y?hW)dZ%byy*+R^h}+(&i`6k3QXk9 z>_RCzjP4XVwtnW;W5cseNxCAW%>s9Ret7;?f@;@++m%PMwS4F}=ngdGIn7)S;J)(d zh1F2)Tf@pHiVKZe7K|2!Yxb#2I3J8y9Rb>DcA72YgTl0I*%lrTnp%#R<0aj z;BseNXidH4*F7>4b|<^?6|bVLDK3Ymh(Xj+FRGx zJ^|IVtkiFRP=d>ajjo@q3hl-U_;dcKROP^IhN*K5Qm&4^N!>g6^PY{SU=yFTpXiLZ z6QTdmeAFH+HIrf0eK^v7PlIw*Zm&S7^Or3aj3);z&Cr|zqb)NlT zHPv(Oh_JA*wv+p0@m4Cm7sszv1EmVk9O-r%?5hZ3Oq{3);xb#p(P?84F~5M)3 zkMa=vx@j$)vgG`l(BxQVm&zn#WVqc(BO?yDatdFBxSb%HdPH{;#7Uh9i5yQQGS)D zoxXZ>v4~l-SJeD@uftW*x#x`{dGCf|8&UDTe*OA>WD&@dab9?PweeN>AZrXt78mX0dUN&7_-Ck5hpXu|wdc@PteI1N3lNmla<0*R`%hgEKB{(0_q?FN8}jJ`R=K>=-HX;V4>A=>dsGrf|c>p22F|D#NT;B zG=NR55>C?$L>01iUFBzd!gNY7JUrb0-sY`Lg&XU+yZ7udy?RInWTizd8R=wEm zfoY~Kf?BMo=pqHne~uHBr3|a=kB7^o>%J6Im+#^bZQ1=rfmPb@+dpkns562D9x>=G zds2pe`~q45yko6Y_= zOMMyfkWblRCL`B1(r#SQNNC10CfMn|zpm?+5?4Y2a zsdk$hM&cLj$69Yyg^M-d4sYDIVP#+YH60+mE>LBZM!`V2eUAXAO89LaEppYi?b`)W zk&PtgeyWpuLAa)93hhm(YkBwijd>ehyLPSQW(s?V{71DIf2LD+7&<#UQP8|E0ZT?Z zOmvnJ^?vPvikjdNnL@jfS9NdBY;GfCmvK6~EBJWuaqAYWy8Yrh1%&nYHv2&?t6vM= z^4W8K97H(Cbl>W_0&byO!WDF5lIe602`=? zeZ=a|Pwwc8Z~vvzlUV8t;+9J4GdwBdu{PIaR_?aY;XpkfGvBUudZ|odtZ5?s#a>-& zigw^gvz5T@MZF*-M?^(M>*iltMeWB#|T!AO6xPqyg~F?jpOhM0|^|`3nL= zVkygQ$ZoRQrM0{kwR&eYP{jxmTQ6$)+w;Wj+Jjo5fgFmQEAH6M z6K2vk+S}S987Z6rO#^r8fviOtO5Yu;+Xw7<8v<3FW3>=X381~aFNJe^Ks~l>8ET4} z>-gMAlgPndX&$cqZ?t&!^$yiVml3DZMMWm?-X)Mmf1D#mG?iQTBO9PnFqZo6O=Hpe zsK(I?J%{mAoc{YUwYmqO=;_fi_ckdE4EFTgguGC**tN0lpewBe zfY2=0wTJim$&O$l!*ibQ(u29_?h}unJUIn*#DG-x`h5GxDeb~3xv_T=oPb9o*%Oy}snFf+DVwzUg7}Sm=a+cW zsU92dZGdL3q|uICky3-tS3b)DMFn;cZLvta?&2N#*QJ?GGe&L8U6L;IN+-DjhFMjTzGTr@`q<*>!U4cW3McLF3mQTpix|cWMkMMwKm&8Hc-2B z9Jo-?UU!x32$jaRaejOx88po{zbR3-uo{(_!m-DPt3JKj;sQ;{Z?w4s08zy&spp>` zmiaSLxuP!^;y7Kw71Y7Dz|@sxb)@dAoE5#v(#lE$Ad=w>gMeO<+piNxkAAy4|K`jC zOM812!HA?Je(XxB-@l6=D5z9#U04VgI-Is%2x*VJ0;vUkEC}L}dc4L76rqndvIWzO zYX(A$BPrtPrQDz+`Ala+667yj6(j|5DqpbTbaWYpT4Zi+t}y=j;@kUUGxMDb8R(Nx zi+p^1D9#I<7|j3El%xvI8rV??_Du>Eu`oaF&B-;MmjS76PeaTAbSrAkLBX&efW1$# zV|LTiKd3Myq$}H@+Cz0Irya%(D*`zn@`Qsvs-qkc`W+Ox?|?kFn-W<_-n&?CetAf} za7ER{H|!`)e3I^kAU}>6p#1|9j?ey9P%&;uYp97+17Q|wWYZQJ-stU<_(=<=uuD;>egwM5eKpJOK#8W9Y{{eM`eOdlW?u^H?ji>aqed6b_fkJfMioM zGifo@G&JwQHY?mKlmY;OroYmwJ`=IfA>>vdBQUMdvH2e&U8Kgj9CfuMo%}ruAXZ)W z9)OM}iMo1vZ^qnl2I+qM!?dx_W34fZ@m<=#t8dzynnWtU|DaJ0J^9vlxSj-1;EUEZ z15ODt*tim^xeT01*8IhM^7gZvt&0$yyP`~UeD@yb%;kq}L|$QBs$=J~%)@$J`FxcS zi3s8iDI~_R9s1R+u$S4m>R~>S>WRV)71YPcnHi#$0!be#UJ*3cEc@9Ryin5B6-7{_ zTsjkHBG0 z+l%)%)d;Ol#RZ3hhVc)v=E#0-YpCuPkv z0_Lhq7>6iv0y7vKoT=7?=xPgM1+_)UeRYw&;eGno*8u^v(i^L331e*rFDX31w{B|4 zlQ~@!$I4gncWWU47cC!A2W)9r6CbDu@^j`MZdsU}=Ck(q&wXAQg)AyLT6R zY^>-KMAKHf24}i?ysJj+kkGAJ*I?GE!pc*o%>tzhva4(-)RsNabUMuE=*!hS`Dkgt zCdBPWzX8Nn;BZ346+|sabrZ{WoH}fgH&y2La%#TKqT#&)J=ER7%fZ?{T2y>u06wZ# z9;=bTvpiGQDBvgKp46}^n#NpN<*%D6ERud)Wh4Y@8x3iYG}@Zs*QDd}1b6i8_%%Lp z+o2+?g=xN$$=(f*$m?3%8p(Qr^x22Kw%hFj3bHEwrRav>8aDMydG~(V>i5_W$eNS#fHO{i^-UPMzX5G7U3wsYN zFDaMM=-BSGOh?tnz6zq})uoBYK17iOIxTEekdrf~vNrDX5i^Jc8Z94$*MPhj< zn!H9-BYU6JzV->RL&pt?wF{Zj<{Mv0mRP)@rg}I~GCMo_gyWoVNFeDvV9@1A96|!b$M>)%f#s zil%CUpA(wfaP7mtKJT9y_(9MQ(~g{%kc_IMq-#)>2n6almeEAC@s97|r|#~<=G_vX zUjusc4o=Y?ZnZ5?O4^b;C^l0~;!zn_Fny=>-TGGh)~}6B7*AwHUbUQX_H_a{nzv`^ z@6eH6{Joh3hF#fR;w=V4<(&blgh_58h+!M8pjB~rbaOOsyDF>IU;?ZD^afq~7|N*v85gGr>@yU3Qq3m2zmh=q{Ww zH1S?E%=SE|Ae7m-R>hYuvfhqhUm1b!R}4IF>>LM+iF5LBcCPB775Nd&ultIs(`5I) zePvM|>(m-a=aMwjVoUzeZFq+^H#ZwLlom>z62l%w_2_66B19HQI;HRfsJCxFeDvsJ z<0!XRfY31?&8Id7KW4mKJllFe^6(*#^;M$Ug6i&OX0Al7ECu;moUY{0wj0?8Wq=PQ zE^RJ#ab>0L>5p$5N3A9*;YUpmMtSsfcRx4Gnf3qI>S24fjm>a9N1{%C=*^thuNkx~ zT;$r@5+SBXnYNDBCZ_(`(2BXR*L|sf|G~M)^~IIm8aFydM(QFw{J%QOEdq{`SDtRG z1t{xT_@-UpDptxSqPBdXGE9VjKi`dmkjc~^48*^P@F3aP+4X}p5v&a={PT`n2Y>*1 zST#0XO|GZK#7+Rdz);%$Y+S! z=-IRF9P*E)r+(&h{Ya0Lr9B`@lEHRXrPJ>@w$bb5hL5G{YaYoaeWq`0KyL5yN>J>={n@oWv;>zFm;#S2~8 zYV-~T5B($P@o%T9S^f9f$b|;$f(Pk^tSxJ~;i9iL**g7x^ypEG$_sQWAI_7$VF!+8 z@aNan)yaG>{4@@X^ow8I3ZO1n@54eqTm(uD7b7jmS+Nont?4EPJ4U5I0ZdQ5UjUxY zTmJKzTn_2QM$hcLfFVx|+f^{?IGSMi4)83+^vuqhL{zkvmX_MLr?%ha;|t@=jC|j?Jz6->QD**xfB!ZUveVY_aVEO?G<; z!G)!5%QfsC<@s_?sC;b&8dno42oZGUZ=}5J?!?q}%OTR`)N7 z!ce?mN0$U7#`*2IdF~Muk#y6RdSO$0H1~>EFkln+a3y2em`TOO#rE1eOD@6m3z7Da z(u+89(7f~hXc_!mFpJi!TFvPu^x%h%fzpBR<08tWqso-ugdaI3ln15bCI`9jUDzTBw^BcyHHH+UK7fX};^&sRsoB>B?qB3O*b;{gYMB#k-a=k%QrkE^e^I#V<96{Nxs9p^V zH;#1Ttl*C%N;QfU3H3x$TlXp4#?r^TkB}B&XEI7U+xjNeK}Rs7&@sA>vkF-*b&X%g zZSD#|t%v(#*&W8)$(qW>HIWg00xfVc9uT~;MPk#YO=<_e?BBchmKNWSwzkvS!PfwF z+h3K}zCD|w!Q)d8fyVFD-A2+RjMCFQ&#r?{`%3b|(aIz2yOII_0CLkpIao#qv*>UiO*xp`RN3`T23-%2Sv@=%<_G z)2z5o>4F*iGo1Kx)jO+(UjEysjij_RApl0o_(}_oxg1pUnJ+u|o269K?!20>x*0&e z2&mXL*?|MY`G19v}QxhDrTZC9BAHaI*DXZiiBCYXi9BO1r%<&sp z$x|TK%!I0g@_WY>ZBq_lpC+in@T6&T?JGWRtCjy$Z|tilsH4nS)-vGyCn$cMme4s! zCFSK@72+1{S#_^!z^drS0-#(ooxH85s2(l1vlo)gNoZ@d2CzEHU=jKA7c71u+Vf6G ziqUYS8JDH=<{Gugf+#w-Mo}Z1GcBT~7P>r`Awhblpg$X|E>6i|^Oy~jdcpNh+%zx< zsuIc=w4d%%Xj{icacN}|CBB|xj;U-3#nx;XV8?WIwn6o;HcSZY;LgIZ$o%m%TIz|v z=cBWki;Ih}7RtQY+;7KU&-hL#pnoqFeMt`7*Q~r?wqDWaJO8D34=EV1Ym`#m^PT5X zSs(+N_2N|mn>KHLf<Ql2v6JKufm^#IwV_8%OZZVcm@Z6tp>jtlIX8-44AUeBGw z4#0o%bt+dF^EhEr&)&8Uqn8;voe)Z(3E?Ir6|U#})v%#{Lc@-tV0b{a^Fm5q-@}Iw z8E|^YFMmbL(-Jzm+{KH}rchm(5QRw|y9)(>Hw#OINsZX4`Q?q+bB>N#ylh4AR{~OC z&L07{U&QvJcO^SO2W_cIe(^QmoY76P=l$SXXuYvNcsQ!{PGm&dR#qPE*AWmlZPp|9 zDu{|8fkb|X2e)Tip&EFFg@y4(fiE)_u1@>yMq?ap&u&1F!H3|>t<@vCu2bjabi~_6 zp4XA<^PFss06dl@>zDY$JpOx52sc4FOqdZsT+nwvzqw1~-eZ>qpkze8wWc|u@A5Z} zaUiT7Q1`CS^na#P#Vb2Nr09PiZ6~M0@i}$#=1tR;a(_mHm9F%=P1TMYYYS$Bxe)ga zkmo2vaKvDRjnko8(GPek8ibs>b0x}s`6EO*4~ATa3E`{p8v1zv?Q&3|=-iCr`tx^h zS%0RNpLMZ;|`gE;or+ndi9VrT#QiceU$ z(Q!1T+;r}!daVCcw>KLTdY$0>Xp4bqXI{%9&cPGleI^|p9XXI8`j>k*TtfQ}EL&V4 z?_p&%-@~IJaHcl6J^`RgPESuS(5VBllj8j^7BgK`e-p+&na0@IxVJ%7T6wNS!2RGq z(qs7|;FN?nqcHINdXXMOBfk#O4nuXKU>w9QWd!iYEOXg`1Er?GtJyPC^@k+M$yqjp z3Ylt`2%!NGw{*)!rhgt4_VEV*<5wS6e{PzcoSfXkHT2#b>`NHeZ=;*^KyuUUPo2#o zBAJFWaK44*{2BCDiVtPCKmoK;8K8^ zZjlE+<&apASC%5VZ+yHO%)KSY?l_SNLvJIzbbfw*#Jr~F=3?*^B+$F6!bAerRuneNo)h z-^frved9(fkUwv7)7D)F&I{~0o{yJkAU%hkM6mYldyoC@?S0_qw*w{dtxmo(p^m|F zP6mRe2@cwm)}n+>Ndyz1b@x3H={$JwpeIT!fg##5;#d0n`o5wpeFqu(crA$)n;4w; zQoget;rhXQd#<jdP4{kpDa^z;taNX*a>k+jOn9k?^d8YYnap!$$^M)(^D zhb@5tFX1W)MItgX65_{2pdUiyfg`UB%`Nr!-YgH~XJFr@8CKAt2FXWCI3YI~2Rkee zL<_vr8<8_QI=W4kx{8Ni@@NJ^D4hmIAgbN;zz;v9S)fouk-P`tA(B^%Eb2;^9>2b^ z@7U$Xun>eG`9s=NfiG`rmW+nK3cyN)ov1lqyVkY<72mGAPb>f~Q~jMRHL=3T>L83@ zgr`8}xS&ovV$0c~OR=g^DsU^!q8x}!$?>cHSkftE2Bu-CLDMC<$PnDH|fvNUQ`pDC6~d;iqwNTxbnDn1xN>cx<`(-UmLv=>7k`D|7qy(L;w0RaAd1 zG_GOAVpU_^P>xi56*v1netf?A#p=1;aQSYE5UDx{aD?iJNFxLuN?;p8uOcFq@Y^1G zdn3U6TEZzc`txzTV%Hm11dN~aSfa+s-W}+yL_h}_+cJH6L*fDqxQVE8#io6JkI-&X z?)*B2yDx>Mo^91jh5h;hyA2+a0B#?MrhDiUuIBNB|3|*rcBk`ppt5XSfs~ZgYb8$k zYx4g~IiDyI$Z}f|#xX6c;9m2K<(5$`#{Za()DdrjulJ=WeB98hast#nBZ*2$TsOeffAv)V@ z)&CKR?;xb!aZ}<-e&amY|%Io!vb#5s;LW6#W?sc=&MO<4v}wsi~>E z5hHbUap}bmk&twdp@#&7LpnJ>Usj=TjqP!7{|jLg1}Z$d|I|9#HwXWQ(^FX}hwQD# zT2JNW=Mzuc%Z16nz`)1X7h%A)d%L{Y2frzp)gw5E1t4N$fVKan*3Xk?^gMd2IT7xC z{`@&Meg9+qqQhb@D-6HX)C7F{cAjUSU-aiA#5J1eQ!6f6?RrP**ww>XQ@`c%;TGNY zmDw{2?YFK}(x?HFD;?V0>g^T8A^Tk+Dtp?MA- z7K(BajRg@m#;VAt3DFV%_tHEcEgz*;F4uT4)61-P;nv8!U_yM`57ws%x}5M?iAy+q zxTW$*Q1Xb@Quq1)w0AU(RUTjre~@@4_`q$XuCY6jTpzP#F%}FVVp6d`xf8It zAHvZ=Ri`GSNwnd2aEgM4UL*TL)M6-4h|}Y{;475GF$IY^3`SH^TFP-YNl^~vSgoXj z2==qPJ+<&;b150j`2GFN2N9a9d4U2y>UWFHGucuO^<1o=d><%`d`K(`aJ*LLQ@ptl zN+!ZV%EYAJFWIRkwB2|BXtDo`*us41CB8c+YpR>2r1g{diU|`_$T&ivMrs>?><&Y#cAymT@KEhU^NG z$Hb{2gaW=83Z4K;ClT>omociHOtJ`_VM8!@6%(oXJn~e41zdvA?|wE2P6I#4!_b(* ziNpcH-4H7bsugn3>_XEU3n-05tQ$fjaT)Lpow^_GNA3RiUHa7E9#NYC8bTxh*D8V5 zhnAlT1YoEu^0zQ?{+px~thAEH$>aE)DLgFv&CUI{7gWBNcjmshzDw&muhiTdY0odw z@2u>td+Z-lvN;IVrKC9K7$te!x_I#)m_aiK@Be_Tz`qz(H}Tah2p>{?JfZY+!(6_C!Xt52|QVo+BXyg z)I+bu5XC)DPZBYsfixSVgrj9>WFlw$OTg0FTN}w;Bd@q3(hf`ra+I#|f#RX_srb(KhbRV$9&<_pq zI^aYkiEBqej;t9hnh=XZ~$r9;buI`K7TZuJQQlbGW ziG?`c$IzKHoCBz6|o0ca%+cxf~?4TQ=-^vjL~n?1xL ztq4VI_gr#2IM7vnswVHE%fX*4H(AQ~92f)RQIR(bKwVT<+7AW>>$et>z`~?y@JjzM z5ep_HOe$i19{UP&CW)C|c5!)msi&~=$sLO%LE;;-vgf+4wag1+H*FRm@=CHRX#R$H z+QkA$LS$J*|0hxiCm|Hdtgo+6O@0LauAO6hf@FXPLp}ck@k|opr~-PPn4~dXA>v+G zKxVY~G6Jd`*vi0h;l=snF~~<=!NI}ZV`FBBObHhkK>uVyYUAkD_}4@N$#kvP4+#fC zWg(TpU<}dg0#G-bD_EFw97)KeDSAze6V%K>;<%8S_dZg+eP)wCL6Dqo+;DVj%RrFRm6n2N#YdtCB$OHaIe_s*ZV{Y#fFJx%m|+!}3o8hzhPiW3l9Hz6AkQ&z<;=+suMHPEe3Avs8my$W zJYGDTyiNM|OZ1ZI+ND@C@#=_eilLPX!$u5cZqkF8%h^YdMqg!WQ09nW`2GET)MQhe|+dZJk46fXL#(K;$(;lsG8hoT8_y1AB6a8 ziYV@f4 z2!BdQNDKiihSqO@QUJ4iL&I!NV`@>!;_&)zkywxwMcU99zb#Lr5L3n@2B%a5qrrYv;*ba;Xx=ArIBS?I4fZ)%c z{Q}w?BoT{JgMxj!ZXSClRj(L2^(Hl52NWfe1Wt#-xJ7F^H;gwiDC>PG<*dlWX9=S7 zh4dw_60ZPBzC2^~5ZQauWSu1P2=CN}zl`e%yRdzbM44QK4%W?*omy@`t4!33Q0M zi$v&ND*rh_VbDZ!Lr2?l?DEkc6w8Pmh`P+1KBcW4I~0}4+%zHE;#@tkmyY$vliH)x zPPrWpp60a3!Ec8Yx)qy%7>xq>Yk>WOORYj|2B8Q=btx}>0sB#nSf!9a)0%aVhM;zx zcd|b0nZZ1obO$S_E5_}hzmY(+0&nKrZHh14wT{9|Ojr2K2QZ5~MJS{7yHbLA%OzAc zI0Yx6T5&`0f`Ri02&Dmq6GMA}qe|0LrIH(ck;d)wsGwY}*V1p-v;DD>rh6}}|K{(# zKi4lBpNPM=)W7DCx!+>T&d7Vd+5$K4xeZ6XS<*LJ2q~>UEz!za@-gi+ZJH3)UFw%D z)baLR*-rwJRNo!?O5#&dS#-`~XMDfnob{}$JmOplb4yIYLS<7^Fv5$;7&Ac8A_BV2 z$@-h2jrL=*6RN5wT#=a28lY&99zh^81C$0R*w4^rVM-A&_s0)j^|p)fRXokevwdH8 zDkJSfM>tC99t4|Laebv-Dmmez94Qe(_^#+C$>ts6#FQKM*wSPP?GjRo5CzKJUWT>D zPsjvLP!q?*>YpI$dH!^&K|a=t`TKu*P;I}O8^+n#leH}Q=79K|!};^)-yQla_PoOI zLc_BuxZUrxBc^Qa6a*Ql1AL7vqt#r=Qd7^yi=SM$mD|#;%t?<%obsdEz5n}Opqf%u z>9wzmQ@u;52kJiz05V9#xSZ{HyI64nJ(tRR$5C`u^>!Tqh|87y*VB)d2e1tWsY)FJ zVg49Ppb}qchOaw|RT9iYc*ot~Z$z*!VAxBz#o+km4@Dm9Gi-#9L1Zyt2~-?U%KJhL zzuob~JL>OZlOo0fVP{=ya6T9HtI z!N}{gqVbBv+!9Q3b=|KG#U;NU?Z5t_JEpOJaxR&wr+}3&?`O^d{`8|k3HN8-_tivo zkqegA*BbQ&tnS^kwXHw9fD5N)m&_B(AbH>@W5nio-8`hN&FmVDykZblDaY3@S{4GL zZ7=%rXh+a!%+t?WMl%Zw<=CuHTsWP_c=zoN{e z#Ve+*0l^b7C&8(Q_Q$VkB%R^DT9wnOe}jJ>s#8!^9JVQm7%w_gV=E&=Vso3l`qqRfv4`1|)ElhKdT`*DD>k#!#D*?M)+ z3RU#AUKf$pKs37kMa`PEom~vZs$h&hfUM9;mY0{uY}{-14F{xri4cPic`GHQ6=noL z5-U)Pefxnh_Mj6F)I>$;4Xj}M5*clTKrlmVN2mM`*WVp6^hbZujYcVP%jfkSO#%19 z_)j{rin|XDwf5g$_|@->+5>|1LQm`R<;(BPDn25Immq(oo-Z*@A}N!Zl=zf++w*wr zvl&8q%C^3RAnij4hX8!EfLEm*9UbJa8UV@&kcz{8BIbdKiL233C1My9&Mm>lYzC_I zVN}Bk^8rw`SzmP`tei4`#uBLS;ifF`tjaO}LzIr&fT{$uKxP049!6H==zD{ym#~jf z_k6gbTmxApoe6=CxDd>l4c0^%W3UhnIu+*V$^JoTli^}E#K59O{@gwcO&!PH=*M~h zy-ve|6fkQ|PcN)p=tiuL7#t(g8W_L12xIjA{rlT?>@a`|=93uH1wR=XV@6C=yhZY3 zae3Lg`wJ!77~-S1kQe-@ld1+yvj9_B1VAN{4H#A;sv9onfRyWN3=E?%NgEC{>_aJf z{$ezO)-N)DMO63PhmDh$JvO!1XWkEZkC}uaVsv@Z5$}oiL9Dr9fR(lN7gSa7oO;AZ z*n>j@w|1SL$SVO*Z%HxO3VhFnF*W>15DBxgr!0zyN|>5JDF22c@h^5MBBfu;%GA!1N1=9uupnpi59Mu9`Z^E>fM7d7K*tQ@lsMgXbh3ExTfp&5@3PIwPD!j3zE4+ z_PKub1}io_F{8_=8d-&=1DBlFC@lm>ydmT6HCB&RE1=ErfmK4S#dgNng#?DUx{)!w zjHEUCVt0T4%UcxA=#m;aw%ky`_3)4gG7ziGEQM?lLFkXJ5lK@kD}5|I45T0c^6mS0 z&V%AP4#x!7OEAR}+99{P7bL+bSPMZArwJ1|(qrAJEPySkQitHmscJ)ZBeE7TVOng# zCT0<{38r^P-dn_kYH~dc;?Wlz6{REtZehWd8m6z%-iOk&y1I&C%s!OSVm%WSUYPil@_`k8$6S z%3)YJ6uEL^BGkv)oizD)n zeIB-R9O~=ou(jV-&0#PqKIVW)v(XT3U-Qm91)w66mQ)TT;n^XUACPo@(Vjga97_x| zLJ|%Kvrq#}B4*)RGc9T`A@s|Jlc*bqX}yPzZd+v7WPEn0@bkE6$lD7JBLeg9=1%%c zhJE{KmyKV^Lbh|C;`bdtabP7noIEgoMo7+t-AjgT@!h^lfChPGbsdj2J7Ncsh-swl zdyW?U$2V1TD*uwQ-A)I;YFC&F-b-U#*VNxx?Y3RBNia$BAJ^@o9RaGq)x-6A)D#Bv;sKA&1ZTPU3`5!du?l*gcP}`wk3|Y=9Lu zrMKnS`$k?)4lyYcmjh7hHIgFE56e^emEPB9z)|7MCfi*vRo?nfaPZ^k{;$G}%*=lJ z3>foJR$C(ZuJEXgdFwupe=~oyxV+a=}tJ#Cy4Y&+4|1CJ*q@SOz_z@=6rdfYSU>zr< zMa=@v{`)V;Cj;DBMNA8W-(yDAO}B~6>{X1_nVOm^et%bno%x%Onc(5Wo6xZ^z7U&z zlvM`j2vTM}N`|+Qutr6}PgaiHOJT0t7*k%Wn3r)+YAMM#>GgEg1-O^`B%FSD|I~6_ z#Xb50Nq;jz^F8)*EvKw?@*GdVngJ4fL4+P6HM!IvuF2k2z4qy{4+zyJqOOzpbPK}Y zY+Zxg1ARlNLZYL+9S)jeI%Y)Tn%{uoLoX5)6YFSFyKHrYx@VfsDm}cJGa@`3-~U*U z#NpDgp_)qn-x+5+947tn$wa5yjo7_EJ@%<*lSNO91!?mQxV&!dTa!}Fb?=q(SeL+H zAq}fUb7TwWN2pxr2cpGfJlJNVKz>XDqy89z!;sftDU3F!Jb=Yw9mz2Uqp96lkywXITL;%sgn%QN-Re1ckJ+qlXGrO6_2Nstc zP54D~WjX}~=C{4JYtd2BZzAO*PEWWkpcBN0cDNWtoJYa}W0boIDRg6{Yl9eSCg=!m zB`s#D#$){~f4zkB^ZV>K+t)nOPCK~hZx>cUy}jw-W3Au) z79Fv_CFE1?daieEtOo z(X^^#q^!gBci+CdmO3A(u$G^sb{Wx@H(pA8sJ(eMORv49B?MBEfN}K!7+DX%JyXje zGZQl(HFy3#!^egmhxQ-3{d7+Fw%B5D8r7c&sZBK!7t>k$p2e7Rn2n12tpLl3+WpxB z#fQP`o<75kLiY}`WOi5|5-C0i4aAPTh6sjf48+&ep>}hiMG}f8U=R<#7O@>z>AWc7 zGK={ibT@oTUUJK_p`Ks06j6S(>tcMt{Tc6P+Ag&KK4vwe#4pRrT6=vE7_sX=ZCn)N zM$o)%b5Zv$XtP`ssSqAzdV@G-ZDdQu=5gq)uYVZHUpq^@=IhkeCu!B+cm3|6_~mjY zKkn^CyYfcG)V;Y>Lx-oX#B0>mT>%Gy_u`zAVrd;@C+gDD*l3oq^^1@4MGJ5kC-j!SXfz;hb9(>}8543DzpM$^=6{&O;QDS27;sT++PEHmy zAf}3F7zH0*czFy1+J3-w2=3{j;K<6!bxt$@knG;AaP;sN>{j)Wl7)ccP?7jE;xcvO zlRdh%i{~f@1LKv3&k(9NScVbeNa0d$p^qPLdlwyTwgS>p0c7*2!m#J%HJ#$pb1X~4 zH>o$L8BYhK!9**YD&ESbhj1j?q`SL2p%@YdC^4%VAZFdS4S2B|Rh97B5W%!rnK#4W zkqYqM5`8v>Km0N6rH+X+KbbLz!k3xX@xnZe$D zvx~?552qW=5~VJt`q*WsXK~br|CBz)o#tYghxidvZp!D+Z!0S-+!rz);wAw7hes!m zFg!k8{`fBr*QoZB@JhN}6hUu?8Ah1bI?G$yacu+<##9M0cZ)i`!xP%dB(9MVsb$MC z3`NLV)sd2wNJo8xx?#DzP%;K!jJV8GJO)hOZvAvM;m+xC*s+9uf<=93-2M!PGHAPwC&nO~PM!joS zEQEknK~Z-}A&z=+3)ywcOw_~QGmV8> zQmN*uCnjDyI>~cJ@dXj%L!bkS{P~7EswqH#mU{g7>DFFvPKH2@JBJP}>Dd4BnrX3^ z3?kwf=Tv>{rm(ng z%ii1~PYwZB?^80@EcqDqnpkHc_CG|RwE=w?B#X8Psrw+MHhkI*37Yb)nq!6*LEGc< z3I|P(wtjc^DB1X3!%sKP-|#(j!1bR4T_p8mT^*g4Ti^%eNOEruKKn7YaivnOxYJ1? z&gHbIvMNV3=md$Fcz@>xLKh27VRf!amzsLL}-QHY|%Y^VSPAT9^T&USWoZrN&V9$-eEZhiD(;dDrMb>Uf~>-p5Y zI?7pHeKZV>`eRePEYxlTXBtxUzCL5$zmU6->%KigT1E8-3qE^fpC{q8zmd6T*nknX zwsC$;=mBICqiE3G{zZU`_{0@379@UQ#s}_fC(dHup}?P8sxEamzLB^s+@5XPG2XZ7 zW$xJr#&2&+dSAX`Ert4rHvaW1JN?1VBbWdKS%9ZCruk^}$FJl4Ya3MWNLg8%5~&xo zzQogiVd>IiY3v+R)DZu&Ec%=N_OkV#TlhJJ`EWwMHZ;Y@FiNa2O%CGv;`Rn8HN6=o zL5`AebqGMRnjABRAmlk0x`2bi)$D)?w%MzHJAn1OK2AOJ*~8&&COfP1cl_IN=%Vo( zJB{4)oT(%1Rxkw);=_g@+`kXC2X)fmd3ADuyrWTfKJ@85qa+QNsx@DY4Plr5F4~u) zg+H&i&-Do7Wiqdh3(qT%wr z_toXx1uJ1p4%JXhLZM5=$3eBt*VbOdyf>8I8G_0~Q*d9Mk|#b4;P8Sryj_040%ViRKE@qS$ z9??3N>V^IdWYqiT&vz&WgwYkGl!oyeXb>eJ5U2#O^>mVr_pVJ$6?MQiMd#3gZ=ora^on8$L|u`zz#* zrU`QoWFEN(;!_d83wI&CobfmOBFiebfBpW)x$c#5OGnBVCMNlfuBFGR8+cjt^Y>|g zr5F`gIG_>y<$IL0bcr|ZARZ$bxZfhPgI|lxgX$KC#yHQXlluIHkpi2rDFb%AV!9jb zek=@E1wqdUbcX$-s=h?Tkf2YPBx=FNJ(-q^)jqF6XTMouV~q6uO1pDJ_Gj9ku5{P) z&GQ>2VyIuUy?OpAP^ei_X6wj%9dBATBOC!@9BRjYzJKWH&k|`b$TCP=eQjwG6Y?db z7=(W`O=%8j-qPkATQzrFI+(Sjv3ZVbzf5=Q@AZibYRcv9Yj@|9Rf-NwR&a-_)-;1~2`O+tViYM|ZnEn{zMzQ~O(U zWcv0W%HXXRW48<;I;FS1y0nU{8q`~QztTX{>p>M?;q3}VGyT1sz@YYmE%(wAf^zm+ z=ylA0+hOU*0ma+**Vdql6uUjywDvn|qobqIz)2vKB(=!W(@zyF4ie8}x588Xq864$ z{n4$#GNvbQh&hVLBsDfT(^pWSR1>tmDoUEo6Y5+&dLIb9r=Oo({oicPd2x*|2~962 z!YP8+3v%eB{3V&;N_9_m1niec#7RDzrtSMaW7;(k>CosARSGKxt}gC@PXfBuzx9 zG__MH+QVv3P1<|c_qcL@f1l6e`}qBFKi==hee-%fU)S@x&g(pn^Ei&v&C2?tfdAne z`!{@`R+2C+FDxvL2mz_-K4IsCOPW@jr%0zX+=t8o>XX0WtfgPkg%lHGhaciU+y}Sr zNS6JEOF63|FL-V=>}X{1UH4IGi7n=eHg)e#Y@?2G(vBHS&CSbHXV!AGD;HZ?D)DifAvNQ z4XW|)`%~>ROY#OjEvZ8@%??31njA*x1H5}98gd2-kEZM`j3j-BP#l&0vuDq4@K`3P z)h8R%;Sc>7(Zm<1hTgEZcWaOIB0;?ScUx`i*vSZ=RTuM1*FL}R-CrCvq|$HK!~4hs z5ge!w@u&ovIWki9)ckO$@P;+$h_iBW`DbTmuUcEm_LC3$Vu$w2SM4>b^|K6NEfy_3 zU8}TZKei@F=j&f&bz(-^<35}_O`ZWsIPv*Nh5rtq+4kQC(+`|ic7LgWg|26mII|eX zkXEvVt<9~nfTNYWxHLopY42)p??r*T7Do+8X&Lgr!H|zw7=oIz<<@*d9ONc23BL!p~?^42Hz7JeHWJe*UQc$dAV(PG{@qHs5YreR}onB25jof=pj+vKXEI zqG(HYk~_*Esb1LZ-?Qy~x~Bik04c3nI!1=|iZtr@hsRlSCN|A%w6ofI9QTrPf($eg zq|iZg!4a?zVg7p5N`dg5Y!d z$zJ1Z9*|T}d!Cp6YIG3lgzNtm(h3@1zUF@uspTHGh}3lXs@)v*Ma|AM2Tx@MTwP|wi2|}2d0cOZnh6~ey=PjrG78Pc~y=X_v=1ZEXY(iY%gDQYl{5crd5~gD5>@2&7Me{?|cBx(M-WX>mRiondtNq1@S5 zZ~&ey#N48^XY=OGip2EM8f7yDn;$hU$RUY0m$f#lpL?pH@fP~-S74Uj1?7aJL)5x) zidw+kRXKhnl!9Ggi2b57^Lf)X8{X@mjY-?9KA^u4MzGt10~u8fs4YqU00SE0r3t=0 z>26t}{|nG?eY7?>^?<})!ea?Hn;km~_2LQEBM9BE%&aQFyPL;^1G+#Hd6 z7wYHPJ(e7qSz1O|Jd@25h2-bJ|&Ye+| zv+FrI?_! zI0ydw;~W#|D)N`0`Z6aF_)$=&;DDj+tx*^}Jw(rpryAbinEc!$24au+1$I>72-~u1#2LwaD){j{r42Huf9)1waehpJ4`# zfM2X&(}W@>_(d}EiIXrdAPy*m2n28xjGmUKZKyDy)O(2n5KtFMuaIRueUZEpypo$g z>sz&_yYoDYI(iaK!=P)FN+HSYkA20SVUZE85@CHq+Hz=iKQU+owF-pe)3|GeFI>9D zTArDmEo}Gq0&#hKv9NNXB>()9S5Ufn-GwBX4?n*&+UmGa{Nz}dd0HaXnfKA`z}yco zUtv_|HQnkW_~i!Z2!?1!1M+MV`Mq+gnZnuW26We?NaWX@)nAl!n7;UzMv%wTet5Uo zCMN|f0T29b(JQXR%j-qe){*@KHGBPh z4I5nT?;@;{x)e6cB-8WO-;ohgX+m1Mkd8oxmgq3L^4lMnuJ!Uz)+;|Lw5+&B#&R*c zd;83|ZQqQ{hIl4vlh1urHDe-(+6Hah6JiARL`#z>6M)$XXk{D$e-ZyO8gwN`=WA~t zkZYt%&qUd*dUkdz+Lh<`YNiKYrS?Br5Pn+7JpNehZr%oEr2W=FE3N>>H(^_;IUczp znl~G0XXhL-{tv8KLhceOn(@R=i|hD~rsRI+X{<{ioFiOJ_K@d(w0OLgv?PDhjA)DE zzn=6iymxv18%>GN&ik``71krcC+08cBOQdyAAZTHp%R4-!=B!zlXqgK)n}&be~W$h*t;*!z5M`?o!I`l{>rG z*Z)xcrblpf@xu3Gl9w zW_}mAE$~;uhyp3@4gl6sC~IsM=j>eA{{2Le&ldTSy7`+vcEs$`>+f}#{4 zX?%gWn%Br1!8V4k!>;_iHcZIOpjIMV6Adk5aiwWx!A^r_c@D&+#DWzDXXAiB*F#WU z{d;8{28O@Os7b8nyX-BJP*{1sJ%a8gUCY@{bDEePpEN?H&%5#St`BIblM6TxQVyZ( zjNoEJ4~!em6=$J`NmS#9V?ofa2uDwFU5Gx+^wP$a<9%>w!Q$C3mN%leTH54ph;4XF zqb=(b%p@0E$EAIYrBwgm{v!bizyxo?s3}a+;Q^}XZy?1=NlRy<@rpiA$-TW~Bl$)P ze6^YW)Ie3WuvSSxrQ4c4XI%uHhXhr@Md=P+1NV?D#<^Sw+TOxv9iAJ6F2M=r@IJp4 z3w*^2p?0k(yTLI(mySDwwH<$F_o>(I7i`*clTI@0Deo9_$s=MQ<_Yl$5ul}jcSRaz z^i&=sA36T-r>sww&%f2CS6pW`nvmU8ZSM8RKYrS5+Ckx$d{;-%JIXnt+XBX~jtdh- zS`u|PB5zOWu1I3_2L7^$aO?_y=}}F*FtZq)XV1|YhW^8*qHk+i|H?AlF%wU7;~lF> zcJXk7sS`03B#;q8Dy@e;I~;t+>J0-9fO-pj2NIuThi@wq)>yo>)BSOvW?^G-t+M=%waAHyGM4;3DHIcggYjN>j#NwGEAv~pUQyhjW51BDkwJb(ma)%OPEXd}{ zq`dt1edkw^vI{D*Dnf1png1;O$36;e+5{hh0@4S7kk%_E#?)kkAX#Jai}$IQ)EOmB zj@%HF%4LCspq1IV;`X}m{EKY|j&~IpO;aqQXu2`j_=I=~)Jg{!CaJ;XaR&@k2#Fii zkaKWfcDXD^PK3>K$;(@>Jg{3FYuXgwbf(Ixb9L8#Lx%6MlQ*txL|h2Qtw-Q^XpYcd z@BDdyispQb@Lncw~y}7KECnlofpB5i3F`lP8yM;jQD$W=R&) zWwdrQ$65;7HS2+nuq18IR$lKgHK0x|65QDD(6)Jb2q>U2+Nxmmf%IH80;1^ww*^O+ zODkP7n^mXTZ!SNx3pwQRax%^=__Esut!7S5QI^0ndUb#J!HKw!$c12{bWkjS$nC-G zK?jgX`=;VN$+?w!iw=q%>(+^^$)woekHI-#AJ>{y`ow$v&D|L0r|`fSaS%jUB1OPm zeEw_`uvKU%#q9^4N&h=9OzR6h&Qk)o@wx25Viq2lTY9B`DQ(%-9KGb^huTy(fPuuu znmzyi{0txTcA#1&o35PLE;(i=+NB64$-G;!%grr1?>BFF-;v>b*H;XGYXZa~!ck-7 zmQp~o?);f-5Ce{)$*-9f_um&cXt(pL&DXY=l~?MhqzL|W*)n$dhIxy;JZH&=x+r83 zTR>rfa~059g~LdmRueTUz#ssW^(pJf)_K%Y^z}&J$LxUIF-@;=DdVU@TA?218%y%2 zR96mY-dNebjLb|_QSK^a^{T>3kw74vf|LQRa49TnP6HW*CRNtT&dwNZ^#7LCXj6x1 zN3~$PU*kxVutR$AFva2~G|?Tq#I2@naZSo6vml!gguInpS0EJ;R?Lb^#|>|NGy=&* zw$dtIS*fCL0)sx`!tKM6+a#K^_AfjY(VH$|s1i_l^MqW}kGRl~=ZZoFKnPSw|EOy9 zq?RiG0pX-ki2XCDokL!?*wOpcjQpMr;d;)7_0n})!g>>cRd@w6X?WQJgL{fjZYK-^ zh@T0v{R&(~kfN`n08|u%LVsrVDl$JT=Y#cbtZXt2No+#m$6kNHCA$0D8i@Mpl;pc^ z>4Cvq@^Bjk9K<+?%o27JxIwR0^OEDWLsD$hmBF3EBa|7=o(^tEC^MYpTDIW?d=uHg z;?$1(Z5hBapD%ZkK`5l3Zl@WTh1SEQo6j`oW$bfKf;It0!b{GN z)<3fn5KfMW*NgmUG~~8#$*OE%ct7RAYqxSJIE(OyQ3}IaoN^2?bjH`1T-6Ch1Q} z772~4_iDWso^Wc)0vIN^6ADTyYy)yvpZO8(t^YgOXy92to6&kbD za>dhT5TOI9pOSsZ_5rju|#u?kr36v zQv46(7`lugJj=x>;9ZUUVuD~%+g7MvZq%UXROhkbzgwRbq%}8ewQf=tt(bPB&{WVdpgNQz@ zjSJtQuP%Y7?dCWG8Wd^QnHU*ONS+N^INBNKU_=BIq!Ki$QXItLY%P*``Xt{zmT%PG zHWRS@s9tCG$n+}EJ)#AqmKg5KP{ixdwjF&CpN@wp8ItZJH7HIptn=s+Y5J{|B4sKO z_-8=meT&kCNSIOFmsqKz%MpXvL5O+8r<)LS|HDGAYyJ-w(upxNrB)&2tuq?0O`+-3 zl<^#ws~Mz2I{CmdacFpg#e8=4U#9Z`nE*q-;&{MHpNxe-TZG+P4g`cNzus>|IYrqH zT`95VLS4O{om~Mf4sh(403QDlFvIzSB&uoZ*pZtcynTjvSS!LB>8rx1;auR$>=vzNsIEcHNpLX=Et`}z229+%z* zHx#NN)ZmBFW)YSXbOx1@9b!kocuEJ`qMd8}Htze1ZXgCJoW0B?{MjH0rU@F2Y1bj~ zubLQ>S0ykL7=lK-)N8qqRduEsZc%b^|GZ(d!Qh@@agHAeUc-!LwEa^uK0M=$)4feSD$Ljv002>t;RE^P2%y(Z|6 z-a%bQx(h^0j}A8xyTA{Ig2)4~cj4o%pFB$h%RN0$uVmvPztcSV)qOXb2(&q%*j752 zEA6f86|So}+`ES%pfW#o@H7s{n~gW-*FSyW(a9l1ebe*wk6_lSAps_?|GUoft`W#y zaRknS_M)WpPMHyY3-{sT0J`cQfntcOKwajm@4?^=bj(2SgAO0z0SDCyD`$+^0MLJ^ zuojW@U}~cq-O?P3((+p61q!~$b}!RU>Lk*18f$nxWy{)X{)kf5&2msE5#w~2{81AYZM!(p%y3Xs&Jz03?;5Zw6?SaiGz+KH2B zqbtDU5#FX2d@^OhW%2hP*%wBJW)E9Ca&P~sZx8_l=?8w=dPjEmQQt;3a_c+WdtJ`7ML9 z`;MV%u&N9S$a7%N_WtPSD>d?81%g-=@}H8J)Q&aq0+cV0b4C2r%5e-t2lff*)=mF1 zriMuU{QGQHvu)ybXR1`*`H2V#j#?&UB9FX*Pb-eRD7|CHZJ<@JpM3%~U(w>frc4Sh zQeNH%b^>c`f++;u%_wxCV@E^o{>w$=?q{J*G4uGaQ<_|mzwbV{y_i=(E_~5c zM+S|Bx~znTeKmg>tQ6-S9l`p~AWwj0@T-4Ch?5X?q)&bb^Px?jKd7gRXCM7Jn0>W* zk@Gx6v88EvE>X+Y=7cBG|_CO1v;jZ z?Yfq6|HoFrhMy9DH18<2$0catQj!SYPaf{z@2_j$N6fSI>TWvH=2Uy#xar8_Zz}Pd zHoW~^4_;#l)vh0_CJ4rk8r&9i27b2F_pZ{?vp-b-MJlo+(mMOZZGY2$wqB!*LW}yt z0@dt3&&RB(PEDi=2&8@5LN$SCF#=XDamqdP8jH9x6>f(MT^_aj;u<8(p}Ve!3eI2J zWl73i!QqUoyGY8wmvjZ;=@7byD4^&IFrEUxr-0xRr#_YOqR z-AdWoC3Rzt{jcoxmbyR3gj_sJyXqzG;o;FV0qlnfRU44I_?CEeq6OVJwn(>)uPm!_ zJYup;J2&~04xR%oO%EN2FGytoNDD2se>q&yrw8#~{*U7*ox^zT;tK0yrDy~!9>j*? zr3Vc--;| zb@S%Cp`o=*8}V}`*FL8UmGyWyUR%s(`nJA7;(o3Xt568Zr|};X`;V&=t3ot{0ooQs zG@uJ#Qn*{<@6aL$knPRFf`_=MUM+d=`sAZ}mZZ+)C~;4K6q^L6bRFkEJFzW^X_<5i zwnku>fW&%6e(!MMC>6WDgI}_8O=L$(s4j9-rDT(8jQ5c`LJrgBgR60uV>lG*jm}?} zbzg&oKQlA4!vz&Nv0#~s^Ct1=if8q)cK$Wbsr2jOnk+bd7>B+5LhbIRK6m9(KZY+m zV~Rul&y|9{kfI$TF?8Z2N>i=>thjGTMblF$S$>>)%A?`tFlZC`S;cbAPi`ZID&U6P z>v2CK@OUWJ-6EHqhL-qhTZuMTt(R*_*Vq2rnE@Opm1GwtGEq|DkwuutBHfEWYW+nP z)O(NpdHOJWHEQa0m+WpZ;L|uy$tq(9DD>R*zXR;Gus+}eCH-#IO=YR6L&JFrDT(-x z3#V7eCiS}VLJJ~H_HKsZ1N)U4(%Oeyrjt+^v9NHgjI=b+<>33Ac2#ffM+Blf8p?9H z`VZ6Z8}>u=@yNgBvWJXO$;0^%9wydUZKwP%^2g5X$Z%HY;Yg}&m(tw{gy~l#g{UP+ z(g4{ht#o1UpL}ieYvWoHt@7OB`eKr!b`aF#@B5>S&NJxa!xVky*VQ27 z`e6JEf7XWgH2X8@$8I;>((S&d|BEz(k!>%2_1Ahd$3#sM4ay%IUCKF>`~w#l@c5;N z`8$|}Ha~wrnNr+ReD*|P*4o7J{_LxCb)Iw8N1qh`)I?=u6M0B63=7))zw>Rv$jiF0 z<%pMQ+_A=384HFAzglZ^SFEcI^U9jo1ULL>v~`my*%GAu&b$T5&mm`)2XWrPOem9! zYtJJZ(89GFk96tWi^SngzDY-70^0o?b30o;rGC)W$qnxO3$fj!!vDsO{?-#!%Ktkm zl)0W$Yt=930s^?J{{!4{Ah)c1C)2({4_z8qk?1!CGqcc#@6sIRAp}C1xv0tEQ3mFb zyOUe&$j{yY7Y=L{p}T8nc5SknOs>ir8lq)aN%IFxrnU3`#m>=*TCx6dV#kKVT?MX7 z{}a)h1zEX&*smN~KlU^;w6>J$`KOzr?4oVsYUE12L*|M^oIGcTegbm7uWY`ThErE^g&BUWboIj?f-k{&5_338AEfEm1N_lsQ7R;^v7Yv*> z>&$UP%ut>ZNzrj3OWvULhNce!pPi7#gT@sdAr_9YKct#fMyP}2F!kE6d&IN@{#X=3 zn8QUh$dG1!Mpu+5X#f#swDhBW@>}KGxsmDW#3gr!bo8yd8|9`G;!+aRo%YSXz+vQl zbls?oc^bga7!bM?bc86Ny@3aB+9FaIGL(=4gC8hlW%F(X6Eo4(0mQY13`8O*56E@U z3h;pxqS_A}*w(dbb(J@mCeB|ZZs$%NDbZdPR#`K=>13S> zUc~p(A<8-YtZGP#572LKID``S>px53F`$SC;T|@>SD25_8{;wVfJ3?Eu(<+O0D+Z& zYTsd<7l;m63gA#9stH*CfK9m~&7oL<%%0A_H`FNlUqv62Yv>{RGp6Ys8^l+>5gH=+=CJFR z#ih`JY733e9SYf29Y@ikIgUaLKD}<aC`57^2gHB5-;T>aFJ)wFcBL=W^*c{ z7{x0a0||deNZbFTa(bQ%5Q?+dNdCvb$a2X>x3aXs=ML0LbLrg%9~#;DD(8VO{JsKl z1!4Apci)NWZbO}e&WD%boIyY>Z)MnH=fI3{S}jQ(Zq2kp}!HyjA94AF^DxCWD}Y^T1)E_8#= zGix6bH$rs-}s>h~(FZ~x@)m8sy)f?h0wl-SUN-RHOyeh#++z|nR2 z07+EO_QNL%QU@a2v~mKcA!xrQn+GUQiTGxj`2k@vjRu|>86Xt^2~2*ShcN-@BAz)c z3kDiGgX^!0hE}RKMkQzc*ROy4s>#TlKdhbtCx3fvN75gaR{?gqJ`M$gwqepXodfk6 zr=-NQq|2xjSM0goooyW+PwnR)!t|+v;$m8N_vJ&c9c*neL$A!DyZGx@h~*T~nh*-D zTrt%i29ytNtvAkeW4siCQ}*38-h)5%uSw6mpygDjX1BW9)^aHGp4UkWpNqI4O@Hdl3Fy30GmI#hJwsOXsmF(_lJ)g;CJ#y zAUc(T{FWHJVb8za4D$94mrUo?_Gf%@#|ze9Yw78s6Pd?F)~;KdG>OhgAU^=ITy)!r zv?_0TDHN&j>u2^L>=I2Z$hh=R>_Fkbbgq2#%%PZgg&j-Gbx=O85(09FW*Ko9{2%s@ z@mqe957^oWudYgMt-j2HuGFu2kI;0(eo|DbS0VAjdnM!>u+6v$!8-X5;HqNK|DYk8 z==-+)m~my8xbT-n=-vtM7WbOTZg;YVbh(V#y+<$XPIW8Hia)_4mcsRy{`RA@1x7aJ zT)3l(^dYR7!+zPV8odUk4e6L+7*tzYwLubb{KM+t^X!Ph9Eb*{1QsOS1%)YFeI|Zq zZobQ&ldO8UW9$pFsw-)ot8S}Su4MT;wn^=M{I`Mo8NALw}4lucyIhwgE|T>GWHdda)Jp#6taJ; zMW9|Dk~U(oguMNfb*V7^744;&vopyuw{?<=D$QXI7;tdjb{xeQD5eJg&#pHStI zhPhX1*SB?iODiUl{+QU7V;VkXL8j-S{UdDy;B>GL{tbK)A&LkB!Kw0ziE(Bi7yAKv zGeM`otoAM=?REQgsjY0hvyMeg9U0k!E*x@IBeMd1HYFXV?w=o31gK96C0Z-uCmk$J zO<9&I9eFBl`TFsh#moYbOVFt`HX-V=2do=u=G{osz1wx93spq8U0bJc{yrYDepLbd zBC+)C{9##aCUiy0+H@aV@-~|!y^dAM_o@0t$Fe?RVzLLnr1?SO!@8#ZvF{^x+SEEy z;BUoQb?z9LV*~AtrJ(k@0Jqin&z3`Zx6dVItYfUa znt|A)W%YEanf_Ym&k4;B?gxM0T&1I66_=O3Z|Au4x$kX$s!KY{t<%v)I)XXPL*8#Y z&V8e|xUjcCq|cg0bno#S=KLR~w4YUUAcc2_eJ_MMx4{#7{X7jcc;bG2bzKkMdh(s~ znjIy3`xg(!qXvFrW|muv>STw{-TMzZ2K*D{DQDi9z0g|oKw-4LIl|;r*JKf=+v5|z z%Fn&=5PUu6eC~4_Yullzn=TnyBS-xXQat}6y&a81+9NARYpWrOI_Ki?I3BwJh8 z@}-e#$x_gywJbX^XKumEXVepK zTxnLlX3hG!I~DR8fNGfJR#0BPZ}3ibh83_AmrH2M?pQ>^7cs0?cV^Afw2$Fq)(d?w zj-gKRG3+N9@4aVgUii(dI-8b;7gIzF6N`56+;({uz9+CwSdJ+}WcT!r$aKx8t2+!J z$ZWwlAOsJ>+<+%)s6iua3}9Mz21V|mIyJWVM5Y_(8y^#p3c8zEmZxs}Gtl z7lyaaO{I_7;3M+LhqbFx2+ozS5M~F4hgDgVv%H0h@d`RSw=#mvcmC znxe(%;3%@SQ3wpl^b#`ZoQy}<9j?!Q=<4^|;CD2p01XMuURt6~bE65KZfUuc!?Hqo zrtZDJe;R>O{IuhRLsd#jYoCtX3l)*tf0UQs!R7OEuE0W3o}8mq#?FRLFbW2G$ii*?XoZQ<2*INN>@fc)VzaQQ{yPuyuiKr3!g z_XihvphReh@w{w$`IfM3$0L(ED#KTIUuYreLZJE1@9PUEDtA4D_Z6(fMklTGVf~X+ z^YVmzG>rW#6kIg>9$ z%CZwFI(9ZZUP~VD3h=Z6?3e)9F>|ebg~xjTs8PS#L7bqxmn!{xNMRf(4^P05PqV8d z<7KZkvB6%-v_HT=uO`cYvUPEh8SlYn(%$Hv)2vH&LoJP9$X530GnXxM6yBt>!2*2$ zYh1Qdn^;ysP>Ks@(mhX0TbzR7q{m~{uKl*6YAC!jeNrB?;Ks zQFLi=TXkpmHSIV2tr8ut7jkz8*0J(9Xdm7=^&-+1#r>_{T-Mw%7nkq2(rx|w!0r`f zH-Bj-z12ytI?zSUz@Wjlhs~Wc`zzvKPIH>nwhl$*pLqPU3XX?(rr#^+2#TJXbLr@P zu~qA!?i=bpWVw1c7J$?X?HQbHkbJFxvVojMH+qycc$_DPXqMN(nd{mFr6^DFMHpWu za}^4SId?Ogv#Y?(A%%7FboXn;hMYqJ{mQRBKZ%LAEb33+VKb&s4gaFOBNpHw%u&Z*M66MSyixf!Q{o1DL@ZYK?|8qIsu}S z<2f-NmbFe1e%$Kc|ZAoa{*y{ z{h1(!3QBtatA7eSXBFw6T8eO2nS1@q*1i=L=X}3zYAYDd9M=mD|Wnura zxgV)qlps=M9-wq@VNN+L^zq;5bVjr0q z(kC(Y{?6hR)v7HI>-GeW*>&-X-^Dv1VlT`fK%V&p=?M%hQV1fB-hlr{fxxtJ4zc8Q zWvRC#Qab7ih&GSXwMRWcF?c!Mp{?WG#k(BEdBUY<4*u~!v`XQzPV_p@tuOiOj|_a+ z&1OY?nOZwL%B&!{-i6Ygt;kz@!#U>YY2Mv>51-zC-ukAwjVBnCtrtcL-d&4Q%G@SZ z($iHTR%@JkgZA;N_CuGgn@yK9y+;&U$O+8%Oh8qq=)LUI&v)AsiE|s&j z^#Rfies1>R1+J@Rr`~W(u=R&0?5S!ps6sXr0L?AE%9xSav$PSqBdaRhOT$`QQzM7B zOo%UgwHn@Za6qVhT>BwseGgaa1F7vX^}UW~p7{$u=W{&fROq<4u!&NZb^<*)7aoK~ zTgY6vM>)ZMD({vsz#^};%3czx3wFpUyV*K+)7N6nepI;9ZRM8pCrj&0=RRytr`7!9e2U-n(CH|L6z5bWe_nbsiS% zUjz5ec5veJnYq)&U0pl+AIfle4%EcS%I|pIUuLOES?g8H(8xJ|vW%-+miz+G@bEf zweOeIt%$R%zCR>v0u6iWJX&ED;;w_+aFu^CgKis5tSn{S(&X(2YU7`}wLkc%Ey`}? zRn;7*66{F#&z^Q4)OWEDtRB7M;~#Ohv+`RTP2OK#UhU_-*ilCS(T@*fe{!xB zoG)w9_a5(>s&U*@Znrs8rg%{9l7#5J$=0Q~>x)<7 zw{IF1rUvl1N7sUeKb-#bt^z047r$mrobqU+s#|KJLY-1HSXjjOWJ=S-rt@U*%zn?C zKKFNO=I7P$^~V!>dMx@|lN|{& z3EOeEuC*-NM1E=nW|a<8=0-=~1~eBb+Un>9m$2=%>1zDz`gnOo;!|sKEN5SeZnd4f zHAWM2Y?`HJ%=5V!8eg30xzw*)?Ye9a56GBvIT>9Mk`Dd)BUXJ??(@3NAV(~~J4H8! zK7n9AIC2uIGqi;V4g>-nG5DyVc=oI@+D98Ug@&#}$-~UUGh7{v?bG>j=Ipc4hqGd0 z8ydCOwODutF^V*yrWEIUZSgGInu+fF^Z|!Xi`t0>8E*WG_( z6Q)dA5hxYtsg~xbZPJd#m{lY{xLi0k-I414`D)z}HLc1t06q`yH5n$_pJcQZ9Wtqk z%vKBIzHRnRHBw^#VS z&0j3%hfhXm_eu4=z0>uU*;zE9Hfx&pA6Oo^@JEtDfC(G%s5ry!MLvX_p7%*k3al%N+;=Y zi3*26z{yt@ydp-ra(l3G4v0E5W%$LrFsYy9USIL&Z0Dbb9c;a6B3cfk;`M`5Tdu|X zUbv&X98+0dPK*T5A+`)@vDhUmyPTK>z;-7D{BW9?Nm(~GTB^~V?kdX0*)e6-%U(Z!v#r3_yu1hKA@$gjKH$F?=T?g`LCW?yWZERgdU-||F z>=G7!jJ@~h*|Wa=kl^Dufiu@#&FAa;^ZN1mt%tsHGNhk8VtrUG&g$$ql<*=uU-9_6qb^G*{;MXE*ewJm7rv@@U_$IPTo( zwg9OEdsDqz@AzKWg;j-wJ{gaX61NzAZwfL)6=eN(_!<>;Sk-{T3rFoz2n>PCLrpiF zq0@hNS42CUJxx)yh;cGHPYslUYOM%@OOD+=KflfJ*MRFJ%g&uUiE;t+%kD%+@5sy1 zWeMEUz4wltZb;~->)MV{3jxE}s4BXlR;Rl0(<`)szof?=tPeb26noi$FTLcVMN2d9 zrulVYsY-W^1|MkM3+KKUMmrT^oy8DlvB~L6cjU0(CA!uc?)BzNe-dYDTx>5aA4wFH zblcj;Gn1dMpFXD*GvIaWq29hrX6k-jM#38KDDIuPcdOkj1^<5)3=8GXDM&_n1^jlttcCl zmVCdkgadih{8Hq%<>YQkAA!voSuV%i)Iv+?ghidJ1vx@Qgv1iG*9p5-kF9+qBRBrg z!zHSMKGM}`Gs2Q_vaPnOJ!{@$?r_-l*SPf9#*@A(Sm z(SS04^wkNU1lu?WoFUny#0?EIB$kaEyScijg2Tc%DSrR{O+mmVL{kV-UHUNs;SqWw z2i=zzY^a+#zki*D{pq4?8mq;vyjywh@4wmCZkQ~+_asAqP=>C2T1_pz+Vh84qW4Fgw*^;6 zX}CJ2qrJNg_EdH!7<0_@KQsg4Yh@i<+x&gn+-BYByNoH1Lq7VoxrJXGia+1~Qg7=P zF=6W63*-5e`ImuP| zz$L$i9e&iF{PWAJ`PmAk(v~x(N9BxLZTE_~wI_ABEFP;C|JqM`JGolvf$hfQdme6X z^Y3?#_$~b7MTMQT^0w{5ej873;&Nc!_iAM!RU`TN`QyINel*w>h#;C|Qn{t4DqQn* zf3wSQE(Sw?otF|74TikYYACZ8GPjGrOt=Go{4oOP9cEi4UT>bpy5!7z@}-P=B|TMn zk8YBH6J5DCPa=DdrSFLwR0H8Y!inYLdbU5UwwpbjGn!pjcA~7#75cPaeb6)RVD z*wJ#$fXtO0A4=70uCIQ2o|f5&fp;X`^}WpB$0oZYGu?Kz#h0}Q%i7N==_->@W>jSs zd@43bVQ{sp=S2s1{=Ifw){!1UE-g#bEW6F~OJvTn+g%clY?-Q*=1^)@wp+X(bmV;S zoYBR$m^P|8?A;8EeV&2tZ_nPn5(ciMCbb(#H#|V^rD>V_1l9rv;Fq^W)MCIk2ZY*I zhQBabSeFk1R{=sb@iae>7vbyM)@|<9x5&eP;*Cf4h5?qE(Y<}ntUo0`nl$aAj7e5; zJ9U%2_oK2tBCke_{GOmX14b92^b3i~`K8HzF50U0{n;~XY?=Go&$1o=R|CL$+WPBe z(?mGc(RcRQ_enK6`@jTg16W$yz%bZ}=4Y#2H~0_8NMNIv+2PH>E^IwZwN7d)@9W>mVv*~2o)$;odKYKbW@TAMp|?qu1}wYEvhF`GgSnU= zvWFxnB3!4`<1Xz3#`+rd7#Ut(wl85OcRw%h$qN~qp61c|wRsNgpQqbDGOZ;XMHQWz zkkq57w};$LhmScWoqwUl7!!yTp|LoDauJsrE+b+pWnHUoIy?T|NBnuGEHDHSYq;ofq=99ySGG|!)Exe=|RV;^aA2LNJVV|fB)Zbk;X zPoeeaLa_N24LkZ1f4h!b+er2uUf-y#+UsDr>BqO*V{(J4S02AtLFufz2{nnRMf0<~ zC5Z`(_q@Dz@;Yp&gsuHwEE=1rr-$ZWG@==&X{q2?J{5jR?Qiy*g?5$J-7@#bH;~@v zUw&kopj3w;XCl)}t7PWQ-E}nNcL;5!G1oRyGF)CfJtFo(=kyhacbmVSm;AhQ_~(s% zuf3n#85_Q2Ix;?2cx*Pxeot7;%CnFJA^^_1#1fcg`-u^>W4Y}6g;@eM3e{Je$q0Kn zeJ!KvW>6A6S;@epmKtiK{f7KK-r}+Z%9ai*^5?C8oE63BM@fOquJu{=gQqY&>vb=P z9~Gpk5>K*^^O&YN_DyEo$?9$9hn)ViJ-(Sg)$hC(a8u9ov%Ea4vYXKR2uQ)02;v_$ z#cqE7E({Rp#6PjApS~EJJF+P---JDJ+(F@*p-e*Edj3nmakMRk9E_O!VjB*h@_)x4 zBzj-i^6tN9J6V`O>568|1eo%ko~tMZ?8%6()wJ`AwiGOZtO+}qF1cZy+U7Skf7o2dGbtR~&R%Z8XHxM=_}spE;%IFZNu;O(%OpW9{XY z-`NT9NXD{N3idvL!(QXuFXg2AmfW50WlC(nIMW|YWjtE>qG?T^74_{CzqFd7Pi%OS zHQUWJ{`88E_ZW>eBVy9ZF5-2=C+VoEDPX$m=S<(Qu%~Ald8+o;7(``qxI9j|F+3*m zm80x3UbqC=s4VYffH}Y+z!0TI2Ji#VRDtvItJJ>kwcRrRjwf< zEX~2Rdg8ouh_31`&a=B^k(3SoJ-M=k3Li(zjx58=wG#Tha`5*Rm6Qk@Y$+$lwhpN?ouVz24*?pnPou7OU&Ht&S*oocC=Gu+h{#9H>lfU8yWq`F{yVi=D4A15a1iH8JUw0n}WrsvGOiGv5}nOE5L;4ds!4~ zoWyQ?VtlE3E$fRaL)p>U^mzweL4N5SLFuPEa;DaGW0R|nBT`{>!|-DFS0Q@PM+@P{ z6yO-KD$DmmNJHnzi8KsNxiT8{8b%)tJp&_kot;3-gHxZW` zWBxsGJ04heyh3==?}6{n`;TslbsiQDvz*%;asB#J(Lo12dFeO$KvR@nv1{}@m z=OX2i!K^Ir!t{^KnX66kEsTb5E~1rY5ahlQTTG{WurTxIsV`NFW@VSTxXb#B#l-S* z|Gh0KJo@tR@)DB=!sWxDpswoEe!iUTqxp~0EA>9jGo{2`mF=6goaxeN%hL7t-Y0WG zA>sI`mXX3nemAvrzmWc9M!Ifej@*?;FDd7s+Z*)*0;RNo0Ha5u(BgW2Ubo-Yh2A{+-#B`?9*{97zci+ouS)FyYz zM<3Q)IDAvGCx1NJzQ%QRYVPWRb&b2SU#f0Os9f^vHMJOd+Rc_puo{%?b?1LO+A3v< z1~FX=K3Yt3U|lEK>83Lx(jdnXZUZxnrSOx=jxkA-;RdJ^A=-Z1Y>SYEQ4MM2NbT~= z;jdrZb!h0ijI15S0`t24G;--IH6u4v#3cV@G;<%c^=rsV_q{&9t|2YK*Y(cpX34~j zk9%GRyHsV5i>CB_TDmDcN8eam_k1NZg<`MnQjWO)#Q+wu6jMVj<@l8<7A?unfLiVD z>*uHK?0I8p7ISMm&5#;h!8Yb_8pCCw@*R4>h(j{Jm^Ybu4_}|ZSD%$o!|_$v=AN|+ zeWdNbD(`kP@8!(8iv2d{RDQqI);J@QLq+ID`CSOZn4sYA?5i*v_9d}LSooP~oR|uC z6Z_b~ZE1nCS{*rGoup{goxaQ$I&5p2dP;32e1Et^;Io|qg=~;?$*Dy(+N<+<&v%}9 z$iDe#(Dsbxhuqz3_zwQGf7$v7(XvFbV0b%lVwtexLi37ha{?O%Man&oMz!p$8RS!t zjyGw#OpDLY9y^`0y|rm-W703lEjlGr1H55FWmFmaOO9TvSfb!&J>exrdo@Xap2~^+ zZs0bx_)k3bY)`V-TT_y%w@e%?bUFOBdsMIZZ{0NLki0erzcE_y{&x4a)*EzGSra$^ zmA>~ILsUP5Rin_FfCLCm47|^=<@@El$TpXrnQ}dDUM}$RXZO2_EpnIW-W_-m{pHz< zY`NxJBjIfQTlee@+S;E~`OK!?EAzGGd5=PS>pcALQS*%>8 zf>0&0PJZWAZLzn!{l)V0J*F2s>iBU0v_`^aGrFU;m^GG`2+>b%l*rYeZ~xP4{Ob2{ zC5P8_i5gqQ_XedNE1X`Eef;WJQn&|&+o8_`J?_$@EZZoa*OF6~9N9*d)il@xN~#ec zVzF=~is27KAlfd^=Pet8PC^FPU`k%}=ca_a3LfL=HP8bRJuA!*l-jINliAcO3zQ zV0uKeSRM~EzsmE<(#N6G4v?N=hn4A@RapvAHj;`h=?|lpU8vIpJTmx%oZpEbjrd73 zw7}|l>{`|nnR}eER^Vvat z>tzb$l|c1ziwf%Gu*d!?3x@%6Q5JDQR+buF4MGy>ZFH%LA@#7I8PDNYn-VDSYkOnn z_Qt7V3;iChs2#MNNhazZcgFt#@OBr{NA~jDaixtm7W5ZPZ{!yDD8AFg6yoN#^=4gw z9cLXa)ym0mn4L^7hBGW|?qH%Z!P5Wq7r4-ovY}gfPnN5%w&goIoTO*V>DW5nHqyPj zUW|%Pg_F~Na3XZgUwbO~*R?ld_g%Q~_xHyJ_tAw4hSp?Yx7F3n9=8fS4wvSPmGfC{ zZ9A{xTx9n3b_33mvAeeqd^r(we3xe?)5|dp2z=*b78U|fEIz5NEG^xJw|o2cYWKr( z@(;ZK|I(IQta=)ZAFArcJ5401=Cu?aR$fZEF5|g1_KD*7_j2jOPOT;uLqCFM(H9(+ zE0EfI=viug;xYT9qLFn0*36r?)DNrU)XHDm^bZ3c!~HZ08umf>zMtv`pE<=*iW}^t z-)Jh7d`r9&vc0z{7|v7;wI)tK+-mw6OUgYsO!dUV*)>1KHl%UOx!v_^JN$oI`x0ob z*Z146CJm%{G*l{y3>hM0!zt-VRFbI>%2;F`OHmPt$V@~+GS4Y8WX_mb=6RNxyI-B( zf4KK|*SdGz`(5jtv({Pl{eFh`ectEU&wlnE3e(f(P5rCWpx9Hf<3Msj?#k}O0|&m* zbWW->oE6&k1*Op0gZHqa=`hcZgUNFVMgb{G97CVy^zj<3`Z_v0iGJD`z#>dPr=W}_ zor4HxWKuDRSndirLE*o0OQ~IMPH*{{Q)1~&&E?gQ)A%BN#;VT5W?sad%aY4zSk&pr z`FpARMG80nt(4;LB=oLYT!;MhSr-?a$WT)(*6>o!gH%N^pr}$9-}KfevSAjNdSfFs zDGWjEf&}1h*vymn-^u#j@%5EkKYTyU(HVu#Psoh)-fOW~&cAqTL#gqnr$v~(%DnWt zi?$2>^uO6AA}5xYmz4PzfYhH_v=-jKME16inSBbGizjbfqIGO;<9R|n1{y-9Q@3s3 zK8pUHTC6-}e5bA*n;Em$U_t}CQ27s?)Z{N;#W4Z8dR=3To9cM|G! zd9XHs!8Qv84IGFDK7s%CE1x|n>8G6hs)Y4_a^=vkHmJyF@3)FhZO%ZMV|M8AIbWUf zUtc(eu3M#u`ME}qZk~9wWv5tF$?f{F!Q!SA--W z7rUA}PI~J(|LWpy(Vq2CC?E%*z>wV{==_`a>?us`4p1B#t*W}JpE+66uK(sKeMolZ zr?!>y^E(C_XeNcuxG$u~ojAGj94A%(sfRUPKgDE+jx3q;8ClYF>D8AcXf$|#=6BDQ zO`ejky`q+ya%GQw{X6>m>dA4IuKDNROx>wX0RIL-{06=ww+j8 zdpqV5q~|LHHVdxzjx3RCk+WX$@jyH~@0vZ6tCF`q(!CY?=BDGx2YW1O|JJz~=v{>0 zn^DU(i8G{&ViP+g>Wr1f-H|ZM`*eeIFUTJFu*kj+OYw?k<>Ucu^vc~mvMMNK5-)oU zt~c=ydS#CvKmLYaZx5y`e&1~X{oD*jLf=bEyPSDSTgfOKDE8GAqHbID%MMQ)^6Gk< z2C+v(3%Pf<=bLTYwn5aB`eW9j1$r@UIWA%l03!=AV{Ry&7Z?XMlhsRt*Pw^K zbGij~{G>|_Exo`?G?6_m zi@9h5EQbUc&048A(s~Y17EueiqI-s`-0%^tr#{^e64krb=qjoKF&U>$(OdmE_tc$j zG}e1%>(5-hrJvR_x{F<+C8xV6iN{*%@XR6VA(`d~Rhly=-`%%K-=>x`$oEr{ynnYZ z_Zq1d0+;4l*yN&G0avx*pD|A@4UNs)9!v-wR%G9y#8KGSZ8x?42-oWs$7YxtgzYfl^fGU|e@Qimf?EHieqPd}LKfNp4Dd zBZZYA*k`sNcH+Cp%pt4zlkS{yw?Ap-KQ!)i3;s%@?OWXH!umkp?3QxsY$`q7f}e-@ z+r=Xf&soc^kWwCSzh)h?BjowBlVW0lh=^A%U;c7K%l|JWC3@Ja)p09_py_|@F8u9t zwcOz-)wPUDLfBANRyJdjBYoibACTnO)JaIMbfY;X8Z{4JwW!~$^RlLsja&~cX0ldZ zzT>2BlwCK^(Bdeoc$GGJ;NClZ`Sh~vx5_(ev=m2fIT%Fd1{ph_s1N z1IFU2s{5}i8*djB6g>ME!`$%jwfbBJ(ust0hFM_4;P0cgKw=Um{?fA65lxJDm*zd`ZU5(tY zPYu3$R-0xhNHt+3yW-hw$f`Y)7uN9_$4YyaWQ(d?+t@?x%X?Q=+?ki)hycMc?9fZw zk(!!HYG6%hlEe1Fo_eY%pTM19af zJbnMFilF!sO?4fUb(Nk5-G~5n0m>cZWW_{BWua6(A_>=2(Y!`Y)llW^4 z4N<1)*iXqp9)jLGn$4~Evn6+$6)~TFw7ZJK8`1K|poc(I&6kWsP;gLxeg6h*fth(_ zdXr6dj^Gy@O9K5-@fl@BVPQ8d2NQ{M%+h?f7y=mdO4ovMKon>oSzjws{!UX@c~xM` zsyY5)&QEVbx|+_5yW8A~UK}0!cSMZE>SR`@im9>YM_Dr(3zw7v_U&`r7x{$8mxU|z z%AW27$!t}z*>pZ~QNX#7jfRJX#bM6NRweE;u+_TdUwk_4 zc++u^D9vHP7kP1t8EUxI7Ysg5tZemvI3m0A-q6iR25*|Bd78W^dn(uJI|9^e;}pDk zIWrs8hf7=jj_F$0c)v8l>O>lgX`(5JC(KXCNHJj4;uz*0_&d-Mf&$SxMgl+;tLpdk zCV}#hPtW^*noyP>+t@M4<$B}Mt^U(@Dr&7%erxd=TGH?jv)-~k$Jl?|@J68b#b-r1 z?Pu!U6g!Si`61UuQ&wLnT@XE1+RmCS%#OFh0ll9*JT&GjSpTdxxA>W6Q6NcOrX8xrPt~ci&tm(T+W!N zMws$iEk9PH8D<{WEneY`vy%usGj2|7>k8bx z-TAqY?x_R`mx*=l$6hf%GofFon%JbciFuA!wdzKR)sX$id&?15mIqbfUOw%PvZ7~I zey^QFB5NP3OoHUIx2j6=sUdCoM~n3r*FJUS3h&TS_;Uv(qdMX{BevTtt^Z6+z)<&dutq52)kEhMTCf%``b|H9M~;e0^^Q zj3@e4$Q|!EF=rP1yquWx^W}1`;Ct`mRTD%l5d`3PWIC-Q{`FIX4f!y$TK5Ik?$2CY zL^qdgictC?D!)j@ZnHsHA9;+t{k~wq#%Z3=fHiVPg{CqQPQ!A;!CG^Q(`Al2Z{k%? zuqA1ZSN)Yd((s|_?34OkqsTA+DUQ$R?GgR>F>$u|F<8gZcphu1xk55KtUg{89&Sl*cN !kM}i4)2{w z4f>G@`hlX>6#36qsVLTDMRV+!hU^;$Z>@VIw^JPW=6e?Tjn4NB>aG;jMw*P2e4we( z=vc97r#_2l_N9M8fHfXucps?8aTLQXHazDKqsL+WEe##0$BvHiNN-Swe@oDd5L9UH z7TeUa?a`y)T85W_tNk6%JXlzHZEE}2;0Kz|%Kl$J=m|ED-HLpoQnFPb%larsG9lHN z+jKePsA!v&<)sKSHoe8L1U?^d)>-2LLGA0+%a`}APND~1P*gOowKbH(Xh%-{U6NWB zvu|*6v=!DD9WrrG*4Dhl-SV-t)%Ti^{O3ariu#G-Wnt_5T11~v>AiYJn{4UYRFTX| z0c7ir;u|jwKTTjjba7ec2Z^Q+@Ncy2p!#%4MTJp6**7StaOFiC8&ODV!LFt(Q0isc z5v{%K=G2~_H#|u%iQ6XxSi1gZUvv^%%;VwO+aq{AJC-uN;;dyLk2yfF{{jWwt`8hMUgc2KE<+Sz@X10Kg_ymQiC8rG{7mZazOGq-E7*t`wbHi?PO-Ld&ifi%*Z9;LBVx8OEmhp>Tr9a~w7Ddl z*Nc4c+7R!%bh~ZJ(M(F`&yU$6pk$$(xj}3-02-k=fB(rOTKjn-Q;qhyYN|cu!Hc?& zs!R^eyi8x&=dUdq(^5y%9X`Q-!di#>soxfRrW(z$$q!G88xp9ePDNy>+wU?sBK_ii zRk`o-BHS$K2N(e@EMllR1JGm_Y;u<1sF;!OcZmgZ{9UN`^(BT1l6`DM0cl`bK$y zw|?|@1!_oc2)ea%b&`OMf>-W|$T;tX?lqFij0Uegzq^rVIbI)LA+SHg&uKY?Jzcc} z#40;XtB^`&>3CYKBiul`c)U2l*G z4hz|4-KG-3f5{%);q%4g_O zGg|cQ=hyxD1?0a-4AsZo-|^M&S-Buxyv7=23nr!&77m4oUlGhWA%T}n<)P^Rf>&+^ zU}_iqPpxMsMlafT9&G)OTxr+N&rdre6_)ddxd=lpqDKT~Pq!%}u6PV7q{)k%aGiPKsfBZVPgz|J<{QdiTc{~D}54sSBJpZzZ z^x06&tE@qsib*uc$>YmJ`?r=-L5RT&H!9feXM~f%{PiAImd!aqx7&5crUv%uw^qul z^bLqyx}f(Uqr;m0_hnDv^1zp^L3^~Lmfx#H zYedB-4YUQo&)Lq%DI|wt`L&muxM$XeuolPBEYFb=1`s8jPnCU{_bTS=mYS2{wcz6& zr1rHre0wo|RjTARqEdv<#$fYY*+W8x&+i?x+IsNM_iFyLulRcTTcaD0*0ZB);j`<*!P5y{q~(LA z&nB$c8#ivOT76AexlS$iszSb6Y^!1Fhk6=8x44?@wu4Nl(=#DvFYt2uw z<^Sw(F+Frt!T0uQy{?w=nZYY&8a0RI^o!Qq>P*rVRru_yILm+NTUakqn#0HOgCX2x za+n5Rinp75Wol{jSvvVu(RO9=# zw{}kF=nEz_e>=U)Z&q9K-c5 zm-sJ&HyJT;Qq?5hGWYzOWl{aeZSBxNSp`sp3p-8)Tn@_J@}$g{t8_55=q64gh7 zYWL$BqKsIwW2CpnS+ge7TJ0$c&ncdJy;6_5>2KGJV@fsAR7-cnP)mnSeC5*hlUZOWx_d%;I9^v}9wc&D7MEtGR>(1l)Q zen#3vqg&N`X$y@-eYoV0FWu&MOZg3*{P?OQmTzeDkF~QGq8dw@2fB`xt>-!MNn>_g zAZfAiuhmP3jtV>bT1@5m&l^MqZX8c_X`gG@W&M3lelW?$Z10~7dvvZ`T0z4%(_GT& zjj#bvfb4jxjDmPx!rVIRM7Comvd%M4wM@{dTqp}0{VAOLvrEk2qo6^(>)RiZ!FR=e zPVnp>4?I+$tW;@mad~BimR8_$ddf?h&(T`Byp&$L`;m=Wwv>J&+fB7om$iN;X1e6h z-juWo#gyrkJiGV>S0OWvQvP!{$3*c>xjiB~B1>dKhX468jw|9~>r(Ac-~XLr|8l`M z^U3F`X7bDspUmWJ^Xm6!m1i#8b!=iRQ8Q}od#gf1qY5)Y|dV_U80PZMvKtw-1RPSf%)~I$eJmuSbjK zQZ!u;G?I>o`x&ke4i0`W-0Tm1|LOaNWEY;RK;!|!t$aZyiSU2Jyb89si( zvhtrS+Rgn^G;PnjckhUk0-;X$L*h+TYBD7OV>sWqo*dL1x8^+0a!!X+K#aXH|6c#Kjm%HVBr&h-#{D?lLCzZDLgKLh-e{Q3V1mHk7`K{INt;&{RglpB@}zH%ekzql;i{hG}11Z?uA{Gx)yJ0W}Vuzu8YKdlK&Bkj@F)W4$35r+6a_HrI zG07kqeC;3dsurF&1+V#6y-o~7u{g-QCYc`ql@2;yDwR?Y+997&naGv1Q0Xbf*o#?D zuwy}SaVSLVF`c~d=|%hU&cT9Hz$%~sANqJta=AuF;Slsbaq85c{2DEALL*rItfG*y zK=N^;1TBlMVIggyY%BkBUY%Vb9^I0gc@%WR{8{Fe;qx)a( zX6M}P8z$^Uukt0SGPjtUn^SmUuI-@sXL%A1ONKD`t*$e|PWLl@eBOxqfyf3yK#P4x z(K_4Ae_jP7LZF_1^4Gt8U_0kRRd*qEbMx_eUTrbJR>=2AWnE;oq=JG%VwV2Zt5^Md zzZe=`{$E~M4U4N*M}{;1vwxoDItm#oT>emrQ;FE&ZE?W~5$TO~8tVZ&tR+SOzz6$W zmtR=isR zAjP7eIZcl#Fh@pZ6%?8$=s4=1Cpp7I2vqC63HQvji?da~l-k$!lz5`QC zK_F+4^7GA`0!S0jAgj0P+qQEjk9O4&^uiEBZlO}Tt)Vef1tXsg-TtPG(C#{T_^@HE zvgt1z^5fAG$3}oF5F?~v5lhR7p0B_hhDAPpKEkJ${dgVb1@*HQObond-(3v{90FMt ztItrl?-lvDD9^XPhe{0+yg&sUz>1B91SlNLz$spdR-~5H2`O!FEMD@zz?)SX8b$v8 zoJjkUa0I!EEsTt%__IeiIDXfp&h|y#NH;k~=C&Z=@eAdV26$H0lRH8^y`eM#ZEiJK zi(*Tz46po7w&|%;50gt7OdFRgDPlSc5%;=Sb?TirtzxHAKqko;FAYgZ@IZrO6jSua z&Btyd5fAn;yJ)!O;>C;VxH%Hs4qfytN8^z6svtVH;XS8hi302+AzghEx@%Lc*RwqO zYQm)UqRW?Y6SUDu#w)f_2hOY@rr^%t`xy4}N=dT;n-x=||C(+iMpKYg+^V;jE=*%%$Z)(;zZ>P}Xv= zF}0H747G6>BmF+*TcSzEu9{Ew*F^JB)De9hRCnc>wpeS)N6Rtu@Kh>xV{PyuRd#;$ z>Q$fhO4)vBkL)qXob;T1TlF40Ed=+E_{if7+U&06CKF%PU9-Dcs||3LoFgL9B4La2 z#(PfDQU15VK0>FWvhsUz@kFl^l3$!$O$|HYCm; zsrC_aA!1nDgOk>?khVH@7bEA=S-Q0!87^G7(0p>`8XClsTA?vk!>C6669md$y?Ql7 zyUtk4!1qKZL|`~$+-jDoM=P|-8I3K)A z;_ZX@R*_{w#zF!Ubq`cW7j>#Z7_cu! z_7Kj3F0!)#hO$k-oqPYXlDILN{>H<62gzk@@mXV6K+Vq%|=kS`XFMgKhJHwb+N zvXFy%p6-FFyPO$Cd@#H*in&c9hZxw7tI5-hpP48^CSZ?2>gkE1>d5PJR%X1=a>kap z19|+(jLi{=!i;nn5F@XTkoKW_CiK0 z89SUC_KD1g59dw8h4KaTDR=Kwe&F^4R~_>HeSzf*+xd2;yu0E4nrIuAzY#M1_ycl@ z?U=2Iiby`ll1dth=U@Xw=2V!OnRDi@$;w|yAr`eu`qK@ zSKk#KwiBj>c%+>i!>rQv>Qj!i7i{UIOBQmaR^?NzlN~iffI`t zy~R5}2Tej?VwVRy0`0U5vKtm~$7}BWtWB&{(b?r-9+?PGqFh2!@;J^^XH@nL#6HIA zBk{;4H_VGI<_wc~b!4jN;1ouiu{VOYu;0~=8#5>d{%_wNX!lgq=T=i!Ckd=NtX9sQ zJLgbl47dZ1(gNl81nAG5KK+Or*PPS)KyEzU&Akans{yZeRl%o*!E=~fUjEg+rL>4G zD{_LVlow<_R3d%S2cL15NqW=!n^~Xl0|eZ=_k{ig<8|ApJx;2-&XqVf{gWO^?2i6d z()8c6g@01}gOvaCOzOYo22d9`83~nB9I<(>OgAf~B4nOCc_KXBY7c?c^^Q3YLCb~P zfm-f!AAKLBZ~kvdawdvDJ4{OyIFU4kWTyeD{9WVErz1DXCp+JVhu@7=tASwyd@YUy zJo#+Rc!ug34WxM-YiU0le?B{DQkhK3Fw-U2)Yz2@C;CB~Bz?&GKb4i_Ubv4nQbAwj|2 z*!s6`AC&UpRUK*lDvXvTT*4Ex$BRC5Qrq5$mO~2ITNSaDV6HF=E>i8rkVOBR#U{q0 zmKfCdFG%RT`u_d``3e!NpEx>Nid}t%DSyoVAuf!gZBvQxay0om+6J!6_tpnq;@0D6^Wie*f_$R`mm!UEHZBcS-q{L6>r=xM_d$(I@|DE zG?5*21}x#1c(8XUP-Z!Q>C#3JEg&TtfR-(i_5yI`u&rnBXJ$c1=?6kNru@VbnBM%*Uum~WO5D2~n7t%=NqJ-#nDLg(~hZK;_bk`tM@E3R(F`2*jU}z2Ls1f#$ z`)gPt4np>*?;TmEo!aF=RLG$6I}PvSIaWU~>R83f&b|s5d54oLtv)jX>?^0Yo4%Yh7<|19Ff(144Ly-0`h1GaQoYi z%uW>6Yz#CkgsO&T&Wdlr-PlVljSniN;I;7*gKfk>cO{*$I8xOqKV$?WWwwj%m;~K~ zt%Dyn$9=FdShHVIP$+!7ZtEQ!^O(0s{$^Mz*gb-;Vj6A4-y%E-I1^mLfUvy*cG$O3E8jI^wNwfkEk=J4fK#r3Pn`PW|6< zzh{yAGf>D$Ri@AtH`cdf+qMUd-7t9b#mN6&JB+PPeL`C0FEpI7)ef4-jQs0GG1Wfo zF*{c3`r<_=*4moltQh;b<{!i*N*-xDh2+bKwI|yIM@L5q2@RDLeWebSlJ#V@d~{cg z1!%vbUE{6}SgRPP2twRDc<|t@_&O+Igd-(V!u($bAb6GzovyqB4q8wrlBrH)7 zJEUJEsw4G7qsc9~*}@6g6O;LGqR*_y${AfbE(L*G!>n0T{$*3cI4PG4Qxpg zPnC>H_+G1Ip%?&^L2CXu*bE@?L+&$=g-5f8_kG2u`Kl|@KftLQE{H+k?R`LYBq4Ct zCmWOj;bya5029gZ`ynqfdxz2~Be?I59zqJLaKn5CQ!Y@-)~{c`mX?;}yM#4POzcpM z{OE>0`dWly_%W;~rn_BO!wp~r9A)dj_13OJy+bM9+<~F%kj9hC+1?`j*NQ~4jNyfW zRJdZ?>G^YH2t(Y7!#K3o1R3OtRNxs-<*8alARv9yse^cSiI?9LK`T0tG_g8%W!feehE`KAwl_Qe@aI=zC!y$zKUu$ZC2dWVRV2bePsHgp9>g7nd&V5fZ6TT|tri8}8Q? z`8^uS9~Y7@0-%&VVZ)(bq#f`9c##;$<;U*(w2D1{)QX0MoPy zjMWxePc@x4G(3LmQYDPNd4P_sBybc2w6_n@2OzN}=IUIkk4^}{^609-Bg^;jn)E=y zwxVmzU8u(=nu-{qydtFz!b(1uGthy#N_dq?T)Omeq8|@bNHylB$;B@?fv+pX3`_?n zZxZ$y&&key`s^9&)O_JQmPB(tWTl1i(ArRuPA*!CcqY!D*Ma&x1p(F>%j|2qN{d?T zOCS|UE~W`fcQFkER^H@^(f=U*NQkS0%XtejMb(S5rj7T{r|R5;r9-Q=Rhh_?N4lHb z4Sy{EYi0|Gp&mkNdWj0=tq-XwMhOYJ`9@r|`-l!%{oU}Hg!WrdHBdrBl7e!KuCpdq zA-WOIXnWG21k9P&iz9IsKu{^t%K=}PV)S^rb*j|>#HXmJD9Nl#->&u_*XYm3?H!H2 zZZco4m~w4$z7X(2{#;7jj{#x?HTN|SzMMu^V-P3FF$P5PQ}S?LZtnfT#9BsUMOV2x z>g~uIY_gJ`N+>TMi07-~G%gz0S&w|BcGa%rv+V=GmD)+l;d~@a(FvtE*^Xw|KuwXY z@lZNU(NI^d{Dfq~U)1Up)7euuEMPqHC0ef*2=CDRl1%Gz*Vp zD;(fBcxE%BVMtG=wDAtP@3o4fYi#&;{CaeW?vXRl@G7!T*PLIi2={Hs@FQz#U7VfS zlQY-_n56s*MX$csl^p-NSC4=giYhzKLI}lG`&-I@iKgQRNdd7Gsq+@lyj|y*=~y2Y zMR#=~lj91WujrNPakeWpj${H&Q8g3VLxFU>E}9XDV=1OaS-``{#gb+@n8)(KU1SD# z*a}-r_T@sf2O1Sxh-6B})z=_w-y+0}C}|_OKL$d1zN~;a3uGd0W{G0QvfcVP^HKL~ z49C6nCJH%j&eQYp(}ZLzmR##hQfp)5kRoS$ubNs9LZpWNo)=F2eKKRz}WxenIh`Gj4T0aL+ z6Ou~o21OkKhnQ726$5~UX zOfH?HDhx(``W@#I{3<5NhuqQI{T5O~4zXl{tKMwrh4@M3c(@u@`9GC+L(7(aiFrPu_SC~6M6 zBGYsn72vVtQkWq1GQ5bo5U*AC@t)}{P7w4FDLobMoRTvJL>quJ~cTELK3E?2b2$ z7X8tf@pAL>%C}vbYgtc{^vZ<0SH3ikW(XsV__^+Gdu%=6C9nX7R6D^uz;p>cSNM@+ zo!U~%0ca^fXm$f{Imb!rkdcuQb?p`;NIeY3 zOoE1?rXgSZG%-X2vur97GJoQFpBNW?rbB#dZaC(N?He&+Tp0FEF?!bc%Gr)Py)0pf zrdu6zzG8DfWR|WzmY(|)iChlEpW2j%5k%zlGe*xL3R-+qRJi8Y({pEWGRAfkIh{p# zyFpXhYy82A^dHqYfBK9r0pr8kq`%dyz5qB?9WL|0WWLaJ0;y`pLu3L&>=!^K!Town z(Bj46LERcx#L*76_{7Hv%Om={N`yMBJ4FtIcvF*1FL&hpKggVjujmi!5 z+qQLB6LSxSA zfJ&qzD!#N0soZ28{V>jN(k>&JDWw(XbU3sOMQe9q=c1Mr0rZnuMtvC;Z=I;ECrkSX zI(ezUa?|FB*EHyw*gNk%QA%-!Oz1UvlsmftMe$OGYf(lU%EOC6I0U&|^GUhLYMhqb zlpM|Y-LT8!hV;j6N?zy%cdJr7sHp(2fB+SJA;hJf2L8|g_Ti1lo&qhJX(iWC=9CH< zvTruYMTE{sre-t?%I?p?mUMtVPYvz5b=)W%X?;t&j9vh{Z|Tm68#V%;?r6sEAN5Ik zzCVCo59`zonhZ~+4$L8Rm3dj}YqSK0T>EV;>@y|%+hY4dCgZ%RMQF^}te-0Vbn^;_ zT(!{|6&yf&v5_>N433)#=fR31a}rvN&R8>( z8u-@cx>YQvk+0o|yS9WNY>DP2A~gyTeJ-lTu_Q~RJL!UIrq_+uLo z3F54RwC*`9L(EQavAO1|{US9!7Fj2xe)jY8d!oR`d;cbNqUjs_TXyr_x%|2w3Rl3^ zD+$#NX) zG%`Y#<7d$qp=8=*jHlp;Gp|UiGXFT|=jj8);Q^Qb7R>+{oyy2;j);g*GW+$!3J-@g z7<^!PWJtsAAoKk#WKfqs5j$ojV8lc|{|^w?@>l);#?Ah(zjS0tw2!N3e~B^&1)mFl MT{`>m^sPt#1AeMz=Kufz literal 0 HcmV?d00001 From b4cc70f3a4f2b61ca8142472fd6fd487d4baf845 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 8 Feb 2024 17:04:25 -0800 Subject: [PATCH 329/648] Updated scaling plot Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 0b65b52e..76178750 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -417,7 +417,7 @@ kubectl delete -f mosaicml-sd-eks.yaml We were able to do a scaling test till 64 P5 nodes. The following charts show performance improvemnt and raw throughput numbers as we scale to 64 nodes: - + From e30c90bcf5453f74e0d7d9ad8e1034cee89799a6 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 9 Feb 2024 09:26:58 +0800 Subject: [PATCH 330/648] Fix paths to pwd --- 3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch | 5 ++--- 3.test_cases/1.megatron-lm/2.distributed-training.sbatch | 6 ++---- 3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh | 5 ++--- 3.test_cases/1.megatron-lm/4.pretrain-llama2.sh | 6 ++---- 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch index fb9d6632..31ef5543 100644 --- a/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch +++ b/3.test_cases/1.megatron-lm/1.data-preprocessing.sbatch @@ -10,11 +10,10 @@ ###### User Variables ##### ########################### -: "${IMAGE:=$(pwd)/megatron-training.sqsh}" -: "${FSX_MOUNT:=$(pwd)/gpt2:/fsx}" - # default variables for Enroot +: "${IMAGE:=$(pwd)/megatron-training.sqsh}" : "${DATA_PATH:=/fsx}" +: "${FSX_MOUNT:=$(pwd)/gpt2:$DATA_PATH}" declare -a ARGS=( --container-image $IMAGE diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch index fa3f8382..b34ba37a 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.sbatch +++ b/3.test_cases/1.megatron-lm/2.distributed-training.sbatch @@ -30,12 +30,10 @@ set -ex; : "${MICRO_BATCH_SIZE:=1}" : "${GLOBAL_BATCH_SIZE:=288}" -# default variables for Enroot -: "${DATA_PATH:=/fsx}" - # default variables for Enroot : "${IMAGE:=$(pwd)/megatron-training.sqsh}" -: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" +: "${DATA_PATH:=/fsx}" +: "${FSX_MOUNT:=$(pwd):$DATA_PATH}" ########################### ## Environment Variables ## diff --git a/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh b/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh index 91e60351..688f2cce 100644 --- a/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh +++ b/3.test_cases/1.megatron-lm/3.data-preproc-llama2.sh @@ -12,11 +12,10 @@ set -exuo pipefail ###### User Variables ##### ########################### -: "${IMAGE:=$(pwd)/megatron-training.sqsh}" -: "${FSX_MOUNT:=/fsx:/fsx}" - # default variables for Enroot +: "${IMAGE:=$(pwd)/megatron-training.sqsh}" : "${DATA_PATH:=/fsx}" +: "${FSX_MOUNT:=$(pwd):$DATA_PATH}" declare -a ARGS=( --container-image $IMAGE diff --git a/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh b/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh index dfa6eea2..06bb4c77 100644 --- a/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh +++ b/3.test_cases/1.megatron-lm/4.pretrain-llama2.sh @@ -86,12 +86,10 @@ MEGATRON_ARGS+=( : "${MICRO_BATCH_SIZE:=1}" : "${GLOBAL_BATCH_SIZE:=2048}" -# default variables for Enroot -: "${DATA_PATH:=/fsx}" - # default variables for Enroot : "${IMAGE:=$(pwd)/megatron-training.sqsh}" -: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" +: "${DATA_PATH:=/fsx}" +: "${FSX_MOUNT:=$(pwd):$DATA_PATH}" ########################### From cb8ac9d5e1e31c8b0dc160dabada4738f336df23 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 9 Feb 2024 09:48:35 +0800 Subject: [PATCH 331/648] Undo accidental changes on nemo-launcher examples --- .../conf.template/cluster/bcm.yaml | 2 +- 3.test_cases/2.nemo-launcher/haha-envvar.sh | 8 -- 3.test_cases/2.nemo-launcher/llama2-70b2.sh | 89 ------------------- 3.test_cases/2.nemo-launcher/llama2-70b4.sh | 89 ------------------- 3.test_cases/2.nemo-launcher/llama2-70b8.sh | 89 ------------------- 5 files changed, 1 insertion(+), 276 deletions(-) delete mode 100644 3.test_cases/2.nemo-launcher/haha-envvar.sh delete mode 100755 3.test_cases/2.nemo-launcher/llama2-70b2.sh delete mode 100755 3.test_cases/2.nemo-launcher/llama2-70b4.sh delete mode 100755 3.test_cases/2.nemo-launcher/llama2-70b8.sh diff --git a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml index 388d28ff..bb0f5703 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml @@ -1,4 +1,4 @@ -partition: us-east-1e +partition: null account: null exclusive: True gpus_per_task: null diff --git a/3.test_cases/2.nemo-launcher/haha-envvar.sh b/3.test_cases/2.nemo-launcher/haha-envvar.sh deleted file mode 100644 index 11ec68f4..00000000 --- a/3.test_cases/2.nemo-launcher/haha-envvar.sh +++ /dev/null @@ -1,8 +0,0 @@ -export NEMO_VERSION=23.11 -export REPO=aws-nemo-megatron -export TAG=$NEMO_VERSION -export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem -export TEST_CASE_PATH=/fsx/awsome-distributed-training/3.test_cases/2.nemo-launcher # where you copy the test case or set to your test case path -export ENROOT_IMAGE=/fsx/${REPO}_${TAG}.sqsh -export BMK_MODE=1 -env | egrep 'NEMO_VERSION|REPO|TAG|TARGET_PATH|TEST_CASE_PATH|ENROOT_IMAGE|BMK_MODE' diff --git a/3.test_cases/2.nemo-launcher/llama2-70b2.sh b/3.test_cases/2.nemo-launcher/llama2-70b2.sh deleted file mode 100755 index daf4eec0..00000000 --- a/3.test_cases/2.nemo-launcher/llama2-70b2.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -set -exo pipefail -[[ -z "${TARGET_PATH}" ]] \ - && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ - || echo TARGET_PATH=$TARGET_PATH - -################################################################################ -# 000: Modify this section to define pre-training configuration: model size, -# number of nodes, max. pre-training steps, job's max. runtime. -################################################################################ -## Pre-train llama2-7b on 16 nodes for 5 steps -export MODEL=llama -export MODEL_SIZE=llama2_70b -export NUM_NODES=2 -export TIME_LIMIT="7-00:00:00" -export MAX_STEPS=8 - -declare -a MODEL_ARGS=( - training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model - training.model.gc_interval=0 - - ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) - #training.model.fp8=True - #training.model.fp8_hybrid=True -) - - -################################################################################ -# 010: Advance users can modify this stanza to customize benchmarking behavior. -################################################################################ -declare -a BMK_ARGS=( - # Disable validation, as we're only interested to measure the training time. - training.trainer.limit_val_batches=0.0 - - # Disable wandb_logger - training.exp_manager.create_wandb_logger=False - - # Ignore checkpoints - training.exp_manager.create_checkpoint_callback=False - training.exp_manager.resume_if_exists=False - - # https://github.com/NVIDIA/NeMo/pull/6181/files - training.model.data.data_impl=mock - training.model.data.data_prefix=[] -) - - -################################################################################ -# 020: Internal settings. -################################################################################ -WORKSPACE_CONT=$TARGET_PATH -CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe - -# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${BMK_MODE:=0}" -if [[ ${BMK_MODE} -eq 1 ]]; then - # For debugging: each run has its own output dir. - TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) - CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - - BMK_ARGS+=( - base_results_dir=${CONT_RESULT_DIR} - training.run.dependency=null - ) - - echo " - #################### - This run will write to directory ${CONT_RESULT_DIR} - #################### - " -fi - - -################################################################################ -# 030: Here we go... -################################################################################ -HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ - stages=[training] \ - training=${MODEL}/${MODEL_SIZE} \ - training.run.time_limit=$TIME_LIMIT \ - training.trainer.num_nodes=$NUM_NODES \ - training.trainer.max_steps=$MAX_STEPS \ - training.trainer.val_check_interval=$MAX_STEPS \ - "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/llama2-70b4.sh b/3.test_cases/2.nemo-launcher/llama2-70b4.sh deleted file mode 100755 index 8b50dd55..00000000 --- a/3.test_cases/2.nemo-launcher/llama2-70b4.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -set -exo pipefail -[[ -z "${TARGET_PATH}" ]] \ - && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ - || echo TARGET_PATH=$TARGET_PATH - -################################################################################ -# 000: Modify this section to define pre-training configuration: model size, -# number of nodes, max. pre-training steps, job's max. runtime. -################################################################################ -## Pre-train llama2-7b on 16 nodes for 5 steps -export MODEL=llama -export MODEL_SIZE=llama2_70b -export NUM_NODES=4 -export TIME_LIMIT="7-00:00:00" -export MAX_STEPS=3600 - -declare -a MODEL_ARGS=( - training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model - training.model.gc_interval=0 - - ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) - #training.model.fp8=True - #training.model.fp8_hybrid=True -) - - -################################################################################ -# 010: Advance users can modify this stanza to customize benchmarking behavior. -################################################################################ -declare -a BMK_ARGS=( - # Disable validation, as we're only interested to measure the training time. - training.trainer.limit_val_batches=0.0 - - # Disable wandb_logger - training.exp_manager.create_wandb_logger=False - - # Ignore checkpoints - training.exp_manager.create_checkpoint_callback=False - training.exp_manager.resume_if_exists=False - - # https://github.com/NVIDIA/NeMo/pull/6181/files - training.model.data.data_impl=mock - training.model.data.data_prefix=[] -) - - -################################################################################ -# 020: Internal settings. -################################################################################ -WORKSPACE_CONT=$TARGET_PATH -CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe - -# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${BMK_MODE:=0}" -if [[ ${BMK_MODE} -eq 1 ]]; then - # For debugging: each run has its own output dir. - TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) - CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - - BMK_ARGS+=( - base_results_dir=${CONT_RESULT_DIR} - training.run.dependency=null - ) - - echo " - #################### - This run will write to directory ${CONT_RESULT_DIR} - #################### - " -fi - - -################################################################################ -# 030: Here we go... -################################################################################ -HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ - stages=[training] \ - training=${MODEL}/${MODEL_SIZE} \ - training.run.time_limit=$TIME_LIMIT \ - training.trainer.num_nodes=$NUM_NODES \ - training.trainer.max_steps=$MAX_STEPS \ - training.trainer.val_check_interval=$MAX_STEPS \ - "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/llama2-70b8.sh b/3.test_cases/2.nemo-launcher/llama2-70b8.sh deleted file mode 100755 index 65384c92..00000000 --- a/3.test_cases/2.nemo-launcher/llama2-70b8.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -set -exo pipefail -[[ -z "${TARGET_PATH}" ]] \ - && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ - || echo TARGET_PATH=$TARGET_PATH - -################################################################################ -# 000: Modify this section to define pre-training configuration: model size, -# number of nodes, max. pre-training steps, job's max. runtime. -################################################################################ -## Pre-train llama2-7b on 16 nodes for 5 steps -export MODEL=llama -export MODEL_SIZE=llama2_70b -export NUM_NODES=8 -export TIME_LIMIT="7-00:00:00" -export MAX_STEPS=3600 - -declare -a MODEL_ARGS=( - training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model - training.model.gc_interval=0 - - ## Uncomment below to enable fp8 training (Transformers Engine) on p5 instances (H100 GPUs) - #training.model.fp8=True - #training.model.fp8_hybrid=True -) - - -################################################################################ -# 010: Advance users can modify this stanza to customize benchmarking behavior. -################################################################################ -declare -a BMK_ARGS=( - # Disable validation, as we're only interested to measure the training time. - training.trainer.limit_val_batches=0.0 - - # Disable wandb_logger - training.exp_manager.create_wandb_logger=False - - # Ignore checkpoints - training.exp_manager.create_checkpoint_callback=False - training.exp_manager.resume_if_exists=False - - # https://github.com/NVIDIA/NeMo/pull/6181/files - training.model.data.data_impl=mock - training.model.data.data_prefix=[] -) - - -################################################################################ -# 020: Internal settings. -################################################################################ -WORKSPACE_CONT=$TARGET_PATH -CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 -CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe - -# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. -: "${BMK_MODE:=0}" -if [[ ${BMK_MODE} -eq 1 ]]; then - # For debugging: each run has its own output dir. - TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) - CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} - - BMK_ARGS+=( - base_results_dir=${CONT_RESULT_DIR} - training.run.dependency=null - ) - - echo " - #################### - This run will write to directory ${CONT_RESULT_DIR} - #################### - " -fi - - -################################################################################ -# 030: Here we go... -################################################################################ -HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ - stages=[training] \ - training=${MODEL}/${MODEL_SIZE} \ - training.run.time_limit=$TIME_LIMIT \ - training.trainer.num_nodes=$NUM_NODES \ - training.trainer.max_steps=$MAX_STEPS \ - training.trainer.val_check_interval=$MAX_STEPS \ - "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" From e65b68920fc34dcb72d822fbfeb10bf22c1aaace Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 9 Feb 2024 09:51:05 +0800 Subject: [PATCH 332/648] Fix readme --- 3.test_cases/1.megatron-lm/README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 1137d357..0bd57380 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -170,9 +170,6 @@ sbatch 3.data-preproc-llama2.sbatch Edit `4.pre-train-llama2.sh` to choose the model size you want to train. Do this by commenting and uncommenting the related stanzas. Feel free to experiment with the hyperparameters such as parallelism, batches, etc. (for more details, please refer to the [Megatron-LM project](https://github.com/NVIDIA/Megatron-LM/) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). - -, or any other flag. Please refer to Megatron-LM for more details. - ```bash sbatch 2.distributed-training.sbatch ``` From 45f81d81ff5087632292d9b712015d3e36143bac Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 13 Feb 2024 14:03:55 +0800 Subject: [PATCH 333/648] Restart slurm daemons after pyxis installed --- .../LifecycleScripts/base-config/utils/install_enroot_pyxis.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh index 2a2696af..b862f7a9 100755 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh @@ -87,3 +87,6 @@ if [[ $(mount | grep /fsx) ]]; then mkdir -p /fsx/enroot chmod 1777 /fsx/enroot fi + +systemctl is-active --quiet slurmctld && systemctl restart slurmctld || echo "This instance does not run slurmctld" +systemctl is-active --quiet slurmd && systemctl restart slurmd || echo "This instance does not run slurmd" From 5001d71b055cfcfcc6f17ac73c8c653fd78823b0 Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Tue, 13 Feb 2024 09:13:15 -0600 Subject: [PATCH 334/648] Rename docker folder to containers --- .../{2.docker => containers}/0.nvcr-pytorch-aws.dockerfile | 0 2.ami_and_containers/{2.docker => containers}/README.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename 2.ami_and_containers/{2.docker => containers}/0.nvcr-pytorch-aws.dockerfile (100%) rename 2.ami_and_containers/{2.docker => containers}/README.md (100%) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/containers/0.nvcr-pytorch-aws.dockerfile similarity index 100% rename from 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile rename to 2.ami_and_containers/containers/0.nvcr-pytorch-aws.dockerfile diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/containers/README.md similarity index 100% rename from 2.ami_and_containers/2.docker/README.md rename to 2.ami_and_containers/containers/README.md From 0f304139f735942af71f096079a84611228e4d6c Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 14 Feb 2024 00:13:44 +0800 Subject: [PATCH 335/648] SMHP: validate FSx Lustre --- .../5.sagemaker-hyperpod/validate-config.py | 36 +++++++++++++++++-- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/1.architectures/5.sagemaker-hyperpod/validate-config.py b/1.architectures/5.sagemaker-hyperpod/validate-config.py index 165d4075..a36eaa2a 100755 --- a/1.architectures/5.sagemaker-hyperpod/validate-config.py +++ b/1.architectures/5.sagemaker-hyperpod/validate-config.py @@ -35,12 +35,11 @@ def validate_subnet(ec2_client, cluster_config): def validate_sg(ec2_client, cluster_config): if cluster_config.get('VpcConfig'): security_group = cluster_config.get('VpcConfig').get('SecurityGroupIds')[0] - ec2_client = boto3.client('ec2') response = ec2_client.describe_security_groups(GroupIds=[security_group]) ingress = response.get('SecurityGroups')[0].get('IpPermissions') egress = response.get('SecurityGroups')[0].get('IpPermissionsEgress') - + for rule in ingress: if rule.get('IpProtocol') == '-1': user_id_group_pairs = rule.get('UserIdGroupPairs') @@ -68,7 +67,35 @@ def validate_sg(ec2_client, cluster_config): print(f"✔️ Validated security group {security_group} egress rules ...") else: print("⭕️ No security group found in cluster_config.json ... skipping check.") - + + return True + +def validate_fsx_lustre(fsx_client, cluster_config, provisioning_parameters): + fsx_dns_name = provisioning_parameters.get('fsx_dns_name') + if fsx_dns_name: + try: + response = fsx_client.describe_file_systems(FileSystemIds=[fsx_dns_name.split('.', 1)[0]]) + returned_fsx_mount = response['FileSystems'][0]['LustreConfiguration']['MountName'] + returned_fsx_dns_name = response['FileSystems'][0]['DNSName'] + + if returned_fsx_dns_name != fsx_dns_name: + print(f"❌ Incorrect FSx DNS name: {fsx_dns_name}. Did you mean {returned_fsx_dns_name}?") + return False + + fsx_mountname = provisioning_parameters.get('fsx_mountname') + if returned_fsx_mount != fsx_mountname: + print(f"❌ Mismatch FSx mount name: {fsx_mountname}. Did you mean {returned_fsx_mount}?") + return False + + print(f"✔️ Validated FSx Lustre DNS name {fsx_dns_name} ...") + print(f"✔️ Validated FSx Lustre mount name {fsx_mountname} ...") + + except fsx_client.exceptions.FileSystemNotFound: + print(f"❌ File system {fsx_dns_name} does not exist.") + return False + else: + print("⭕️ No FSx Lustre found in provisioning_parameters.json ... skipping check.") + return True @@ -95,6 +122,9 @@ def main(): # Validate Security Group valid = validate_sg(ec2_client, cluster_config) and valid + # Validate FSx Lustre + valid = validate_fsx_lustre(boto3.client('fsx'), cluster_config, provisioning_parameters) and valid + if valid: # All good! print(f"✅ Cluster Validation succeeded") From 8a5de02300bc554d34a7bdfa0fe8e61eab989f7d Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Tue, 13 Feb 2024 10:17:56 -0600 Subject: [PATCH 336/648] Move pytorch container in its folder --- .../containers/{ => pytorch}/0.nvcr-pytorch-aws.dockerfile | 0 2.ami_and_containers/containers/{ => pytorch}/README.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename 2.ami_and_containers/containers/{ => pytorch}/0.nvcr-pytorch-aws.dockerfile (100%) rename 2.ami_and_containers/containers/{ => pytorch}/README.md (100%) diff --git a/2.ami_and_containers/containers/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/containers/pytorch/0.nvcr-pytorch-aws.dockerfile similarity index 100% rename from 2.ami_and_containers/containers/0.nvcr-pytorch-aws.dockerfile rename to 2.ami_and_containers/containers/pytorch/0.nvcr-pytorch-aws.dockerfile diff --git a/2.ami_and_containers/containers/README.md b/2.ami_and_containers/containers/pytorch/README.md similarity index 100% rename from 2.ami_and_containers/containers/README.md rename to 2.ami_and_containers/containers/pytorch/README.md From 90fb097d572c17bb12b95ddb067319c37ac0b1e7 Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Tue, 13 Feb 2024 10:18:12 -0600 Subject: [PATCH 337/648] Add jax paxml container --- .../containers/jax/jax_paxml.Dockerfile | 99 +++++++++++++++++++ .../containers/jax/run_paxml.sh | 16 +++ 2 files changed, 115 insertions(+) create mode 100644 2.ami_and_containers/containers/jax/jax_paxml.Dockerfile create mode 100644 2.ami_and_containers/containers/jax/run_paxml.sh diff --git a/2.ami_and_containers/containers/jax/jax_paxml.Dockerfile b/2.ami_and_containers/containers/jax/jax_paxml.Dockerfile new file mode 100644 index 00000000..16e1bc84 --- /dev/null +++ b/2.ami_and_containers/containers/jax/jax_paxml.Dockerfile @@ -0,0 +1,99 @@ +FROM nvcr.io/nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04 + +ARG EFA_INSTALLER_VERSION=1.29.1 +ARG NCCL_VERSION=v2.18.6-1 +ARG AWS_OFI_NCCL_VERSION=v1.7.4-aws +ARG JAX_VERSION=0.4.18 +ARG PRAXIS_VERSION=1.2.0 +ARG PAXML_VERSION=1.2.0 + +ENV DEBIAN_FRONTEND=noninteractive +ENV PYTHON_VERSION=3.10 +ENV LD_LIBRARY_PATH=/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/cuda-12/lib64:$LD_LIBRARY_PATH +ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/local/cuda-12/bin:$PATH +ENV CUDA_HOME=/usr/local/cuda-12 + + +######################### +# Packages and Pre-reqs # +RUN apt-get update -y && \ + apt-get purge -y --allow-change-held-packages libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl-dev libnccl2 +RUN apt-get install -y --allow-unauthenticated \ + autoconf \ + automake \ + bash \ + build-essential \ + ca-certificates \ + curl \ + debianutils \ + dnsutils \ + g++ \ + git \ + libtool \ + libhwloc-dev \ + netcat \ + openssh-client \ + openssh-server \ + openssl \ + python3-distutils \ + python"${PYTHON_VERSION}"-dev \ + python-is-python3 \ + util-linux + +RUN update-ca-certificates + +########################### +# Python/Pip dependencies # +RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ + && python"${PYTHON_VERSION}" /tmp/get-pip.py +RUN pip"${PYTHON_VERSION}" install numpy wheel build + +###################################### +# Install EFA Libfabric and Open MPI # +RUN cd /tmp \ + && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && cd aws-efa-installer \ + && ./efa_installer.sh -y -d --skip-kmod --skip-limit-conf --no-verify + +############################ +# Compile and Install NCCL # +RUN git clone -b "${NCCL_VERSION}" https://github.com/NVIDIA/nccl.git /opt/nccl \ + && cd /opt/nccl \ + && make -j src.build CUDA_HOME=${CUDA_HOME} \ + && cp -R /opt/nccl/build/* /usr/ + +############################### +# Compile AWS OFI NCCL Plugin # +RUN git clone -b "${AWS_OFI_NCCL_VERSION}" https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ + && cd /opt/aws-ofi-nccl \ + && ./autogen.sh \ + && ./configure --prefix=/opt/aws-ofi-nccl/install \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=${CUDA_HOME} \ + --with-mpi=/opt/amazon/openmpi/ \ + --with-nccl=/opt/nccl/build \ + --enable-platform-aws \ + && make -j && make install + +############### +# Install JAX # +RUN pip install --upgrade "jax[cuda12_pip]==${JAX_VERSION}" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +RUN pip install "orbax-checkpoint>=0.4.0,<0.5.0" + +################## +# Install Praxis # +RUN pip install praxis==${PRAXIS_VERSION} + +################# +# Install Paxml # +RUN pip install paxml==${PAXML_VERSION} + +##################################### +# Allow unauthenticated SSH for MPI # +RUN mkdir -p /var/run/sshd \ + && sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config \ + && echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config \ + && sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config + +COPY run_paxml.sh /run_paxml.sh diff --git a/2.ami_and_containers/containers/jax/run_paxml.sh b/2.ami_and_containers/containers/jax/run_paxml.sh new file mode 100644 index 00000000..5db92ff7 --- /dev/null +++ b/2.ami_and_containers/containers/jax/run_paxml.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -ex + +# Create checkpoint directry +mkdir -p "${BASE_DIR}/checkpoints" + +# TRAINING_CONFIG example paxml.tasks.lm.params.lm_cloud.LmCloudSpmd2B +python3.10 -m paxml.main \ + --job_log_dir="${BASE_DIR}/LOG_DIR" \ + --fdl_config=${TRAINING_CONFIG} \ + $JAX_FLAGS \ + --multiprocess_gpu=true \ + --server_addr=${LEAD_NODE}:12345 \ + --num_hosts=${OMPI_COMM_WORLD_SIZE} \ + --host_idx=${OMPI_COMM_WORLD_RANK} \ + --alsologtostderr From 24ce96ae480676fa8a753a38b66381385ee37f4f Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Tue, 13 Feb 2024 14:10:30 -0600 Subject: [PATCH 338/648] Add JAX container build instructions --- 2.ami_and_containers/containers/jax/README.md | 54 +++++++++++++++++++ .../containers/jax/run_paxml.sh | 8 ++- 2 files changed, 57 insertions(+), 5 deletions(-) create mode 100644 2.ami_and_containers/containers/jax/README.md diff --git a/2.ami_and_containers/containers/jax/README.md b/2.ami_and_containers/containers/jax/README.md new file mode 100644 index 00000000..d512b5dc --- /dev/null +++ b/2.ami_and_containers/containers/jax/README.md @@ -0,0 +1,54 @@ +# JAX container for Amazon EC2 GPU accelerated Instances + +Ths directory contains a sample Dockerfile `jax_paxml.Dockerfile` to run [JAX](https://github.com/google/jax) and [Paxml](https://github.com/google/paxml) on AWS. + +## Container description + +In principle, the reference `Dockerfile` does the following: + +- Provide JAX built for NVIDIA CUDA devices, by using a recent NVIDIA CUDA image as the + parent image. +- Remove unneccessary networking packages that might conflict with AWS technologies. +- Install EFA user-space libraries. It's important to avoid building the kernel drivers during + `docker build`, and skip the self-tests, as both of these steps fail are expected to fail when run + during container build. +- Install NCCL recommended version. +- Install [aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl) to get NCCL to utilize EFA. +- Install JAX. +- Install Paxml. +- Install Praxis. + +## Build the container + +Build the jax container as follow + +```bash +# Build a container image +DOCKER_BUILDKIT=1 docker build --progress=plain -f jax_paxml.Dockerfile -t paxml:jax-0.4.18-1.2.0 . + +# Verify the image has been built +docker images +``` + +Convert container to enroot format + +```bash +# Convert to enroot format. Attempt to remove an existing .sqsh, otherwise enroot refuses to +# run when the output .sqsh file already exists. +rm /fsx/paxml_jax-0.4.18-1.2.0.sqsh ; enroot import -o /fsx/paxml_jax-0.4.18-1.2.0.sqsh dockerd://paxml:jax-0.4.18-1.2.0 +``` + +Tips: when building on a compute node (or a build node), you save the built Docker image on a shared +filesystem such as `/fsx`, to allow other nodes (e.g., head node, or other compute nodes) to load +the image to their local Docker registry. + +```bash +# Build node: save image to file +docker save paxml:jax-0.4.18-1.2.0 > /fsx/paxml_jax-0.4.18-1.2.0.sqsh.tar + +# Load image to local docker registry -> on head node, or new compute/build node +docker load < /fsx/paxml_jax-0.4.18-1.2.0.tar + +# Verify the image has been loaded +docker images +``` diff --git a/2.ami_and_containers/containers/jax/run_paxml.sh b/2.ami_and_containers/containers/jax/run_paxml.sh index 5db92ff7..151bee21 100644 --- a/2.ami_and_containers/containers/jax/run_paxml.sh +++ b/2.ami_and_containers/containers/jax/run_paxml.sh @@ -1,16 +1,14 @@ #!/usr/bin/env bash set -ex -# Create checkpoint directry -mkdir -p "${BASE_DIR}/checkpoints" # TRAINING_CONFIG example paxml.tasks.lm.params.lm_cloud.LmCloudSpmd2B python3.10 -m paxml.main \ --job_log_dir="${BASE_DIR}/LOG_DIR" \ --fdl_config=${TRAINING_CONFIG} \ - $JAX_FLAGS \ + ${JAX_FLAGS} \ --multiprocess_gpu=true \ --server_addr=${LEAD_NODE}:12345 \ - --num_hosts=${OMPI_COMM_WORLD_SIZE} \ - --host_idx=${OMPI_COMM_WORLD_RANK} \ + --num_hosts=${SLURM_NPROCS} \ + --host_idx=${SLURM_PROCID} \ --alsologtostderr From d37f2a601102e232122156c362c7d0849c79428b Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Wed, 14 Feb 2024 09:48:27 -0600 Subject: [PATCH 339/648] Add sbatch jax for slurm --- .../containers/jax/jax.sbatch | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 2.ami_and_containers/containers/jax/jax.sbatch diff --git a/2.ami_and_containers/containers/jax/jax.sbatch b/2.ami_and_containers/containers/jax/jax.sbatch new file mode 100644 index 00000000..6d182a8f --- /dev/null +++ b/2.ami_and_containers/containers/jax/jax.sbatch @@ -0,0 +1,45 @@ +#!/bin/bash + +#SBATCH -o jax_%j.out +#SBATCH -e jax_%j.err +#SBATCH -n 384 +#SBATCH --gpus-per-node=8 +#SBATCH --exclusive + +GPU_PER_NODE=8 +TOTAL_NB_GPUS=$(($SLURM_JOB_NUM_NODES * $GPU_PER_NODE)) + +CHECKPOINT_DIR=/data/700/$SLURM_JOBID +if [ ! -d ${CHECKPOINT_DIR} ]; then + mkdir -p ${CHECKPOINT_DIR} +fi + +# EFA Flags +export FI_PROVIDER=efa +export FI_EFA_USE_DEVICE_RDMA=1 +export FI_EFA_FORK_SAFE=1 + +# NCCL Flags +export NCCL_DEBUG=INFO +export NCCL_NVLS_ENABLE=0 + +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +# Library Path +export LD_LIBRARY_PATH=/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/cuda-12/lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 + +# XLA Configuration +export XLA_PYTHON_CLIENT_MEM_FRACTION=0.7 +export XLA_FLAGS="--xla_gpu_enable_latency_hiding_scheduler=true --xla_gpu_enable_triton_gemm=false --xla_gpu_simplify_all_fp_conversions --xla_gpu_enable_async_all_gather=true --xla_gpu_enable_async_reduce_scatter=true --xla_gpu_enable_highest_priority_async_stream=true --xla_gpu_enable_triton_softmax_fusion=false --xla_gpu_all_reduce_combine_threshold_bytes=33554432 --xla_gpu_graph_level=0 --xla_gpu_enable_async_all_reduce=true" +export TPU_TYPE=gpu +export TF_FORCE_GPU_ALLOW_GROWTH=true + +# Setup and checkpoint directory +export LEAD_NODE=${SLURMD_NODENAME} +export BASE_DIR=${CHECKPOINT_DIR} + +# JAX Configuration +export TRAINING_CONFIG=paxml.tasks.lm.params.lm_cloud.LmCloudSpmd2BLimitsteps +export JAX_FLAGS="--fdl.ICI_MESH_SHAPE=[1,${TOTAL_NB_GPUS},1] --fdl.PERCORE_BATCH_SIZE=32" + +srun --container-image /fsx/paxml_jax-0.4.18-1.2.0.sqsh --container-mounts /fsx/data:/data -n ${TOTAL_NB_GPUS} -N ${SLURM_JOB_NUM_NODES} /bin/bash run_paxml.sh \ No newline at end of file From ad56d82a92082331d9f77a650b367deb921b008a Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Wed, 14 Feb 2024 17:48:09 -0600 Subject: [PATCH 340/648] Add jax instructions --- 2.ami_and_containers/containers/jax/README.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/2.ami_and_containers/containers/jax/README.md b/2.ami_and_containers/containers/jax/README.md index d512b5dc..0195ff75 100644 --- a/2.ami_and_containers/containers/jax/README.md +++ b/2.ami_and_containers/containers/jax/README.md @@ -38,17 +38,11 @@ Convert container to enroot format rm /fsx/paxml_jax-0.4.18-1.2.0.sqsh ; enroot import -o /fsx/paxml_jax-0.4.18-1.2.0.sqsh dockerd://paxml:jax-0.4.18-1.2.0 ``` -Tips: when building on a compute node (or a build node), you save the built Docker image on a shared -filesystem such as `/fsx`, to allow other nodes (e.g., head node, or other compute nodes) to load -the image to their local Docker registry. +## Run -```bash -# Build node: save image to file -docker save paxml:jax-0.4.18-1.2.0 > /fsx/paxml_jax-0.4.18-1.2.0.sqsh.tar - -# Load image to local docker registry -> on head node, or new compute/build node -docker load < /fsx/paxml_jax-0.4.18-1.2.0.tar +Once the container converted to the enroot format, you can run on a **Slurm** cluster the lm_clouds example of Pax. +The following command submit a job to the **Slurm** cluster to train a 2B parameters transformer based SPMD language model on synthetic data. -# Verify the image has been loaded -docker images -``` +```bash +sbatch jax.sbatch +``` \ No newline at end of file From af423fa4aba6cd39c39f666c6905fc589a61b888 Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Thu, 15 Feb 2024 15:34:49 -0600 Subject: [PATCH 341/648] Move jax in test cases --- {2.ami_and_containers/containers => 3.test_cases}/jax/README.md | 0 {2.ami_and_containers/containers => 3.test_cases}/jax/jax.sbatch | 0 .../containers => 3.test_cases}/jax/jax_paxml.Dockerfile | 0 .../containers => 3.test_cases}/jax/run_paxml.sh | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename {2.ami_and_containers/containers => 3.test_cases}/jax/README.md (100%) rename {2.ami_and_containers/containers => 3.test_cases}/jax/jax.sbatch (100%) rename {2.ami_and_containers/containers => 3.test_cases}/jax/jax_paxml.Dockerfile (100%) rename {2.ami_and_containers/containers => 3.test_cases}/jax/run_paxml.sh (100%) diff --git a/2.ami_and_containers/containers/jax/README.md b/3.test_cases/jax/README.md similarity index 100% rename from 2.ami_and_containers/containers/jax/README.md rename to 3.test_cases/jax/README.md diff --git a/2.ami_and_containers/containers/jax/jax.sbatch b/3.test_cases/jax/jax.sbatch similarity index 100% rename from 2.ami_and_containers/containers/jax/jax.sbatch rename to 3.test_cases/jax/jax.sbatch diff --git a/2.ami_and_containers/containers/jax/jax_paxml.Dockerfile b/3.test_cases/jax/jax_paxml.Dockerfile similarity index 100% rename from 2.ami_and_containers/containers/jax/jax_paxml.Dockerfile rename to 3.test_cases/jax/jax_paxml.Dockerfile diff --git a/2.ami_and_containers/containers/jax/run_paxml.sh b/3.test_cases/jax/run_paxml.sh similarity index 100% rename from 2.ami_and_containers/containers/jax/run_paxml.sh rename to 3.test_cases/jax/run_paxml.sh From 62c827db4190d8f228e52f6683dfeec6d38bc22e Mon Sep 17 00:00:00 2001 From: nghtm Date: Mon, 5 Feb 2024 18:32:28 -0500 Subject: [PATCH 342/648] Updates to dcgm_exporter, parameterized prometheus version, build docker for efa exporter, update readme. ready for review Signed-off-by: nghtm --- .../base-config/lifecycle_script.py | 16 +- .../utils/install_dcgm_exporter.sh | 21 +++ .../utils/install_efa_node_exporter.sh | 18 ++ .../base-config/utils/install_prometheus.sh | 127 ++++++++++++++ .../utils/install_slurm_exporter.sh | 37 +++++ .../4.prometheus-grafana/README.md | 155 ++++++++++++++++++ .../assets/dcgm-dashboard.png | Bin 0 -> 471772 bytes .../assets/efa-node-dashboard.png | Bin 0 -> 518012 bytes .../assets/grafana-datasource-configure.png | Bin 0 -> 243853 bytes .../assets/grafana-datasource.png | Bin 0 -> 275791 bytes .../assets/grafana_users.png | Bin 0 -> 138268 bytes .../assets/grafana_users_admin.png | Bin 0 -> 59522 bytes .../assets/observability_architecture.png | Bin 0 -> 710253 bytes .../assets/prometheus_running.png | Bin 0 -> 121152 bytes .../assets/slurm-dashboard.png | Bin 0 -> 274173 bytes .../cluster-observability.yaml | 67 ++++++++ 16 files changed, 438 insertions(+), 3 deletions(-) create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_dcgm_exporter.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_efa_node_exporter.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_prometheus.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_slurm_exporter.sh create mode 100644 4.validation_and_observability/4.prometheus-grafana/README.md create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/dcgm-dashboard.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/efa-node-dashboard.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/grafana-datasource-configure.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/grafana-datasource.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/grafana_users.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/grafana_users_admin.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/observability_architecture.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/prometheus_running.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/assets/slurm-dashboard.png create mode 100644 4.validation_and_observability/4.prometheus-grafana/cluster-observability.yaml diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py index e26f6a5f..d02ff1c3 100644 --- a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py @@ -151,10 +151,20 @@ def main(args): ExecuteBashScript("./start_slurm.sh").run(node_type, ",".join(controllers)) - # Note: Uncomment the below lines to install docker and enroot - # ExecuteBashScript("./utils/install_docker.sh").run() - # ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) + ## Note: Uncomment the below lines to install docker and enroot. + ExecuteBashScript("./utils/install_docker.sh").run() + ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) + # # Note: Uncomment the below lines to install DCGM Exporter and EFA Node Exporter and Cluster Nodes. (Docker must also be installed above) + # if node_type == SlurmNodeType.COMPUTE_NODE: + # ExecuteBashScript("./utils/install_dcgm_exporter.sh").run() + # ExecuteBashScript("./utils/install_efa_node_exporter.sh").run() + + # # Note: Uncomment the below lines to install Slurm Exporter and Prometheus on the Controller Node. + # if node_type == SlurmNodeType.HEAD_NODE: + # ExecuteBashScript("./utils/install_slurm_exporter.sh").run() + # ExecuteBashScript("./utils/install_prometheus.sh").run() + print("[INFO]: Success: All provisioning scripts completed") diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_dcgm_exporter.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_dcgm_exporter.sh new file mode 100644 index 00000000..f3a842fa --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_dcgm_exporter.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Check if Nvidia GPU is present +if nvidia-smi; then + echo "NVIDIA GPU found. Proceeding with script..." + # Set DCGM Exporter version + DCGM_EXPORTER_VERSION=2.1.4-2.3.1 + + # Run the DCGM Exporter Docker container + sudo docker run -d --rm \ + --gpus all \ + --net host \ + --cap-add SYS_ADMIN \ + nvcr.io/nvidia/k8s/dcgm-exporter:${DCGM_EXPORTER_VERSION}-ubuntu20.04 \ + -f /etc/dcgm-exporter/dcp-metrics-included.csv || { echo "Failed to run DCGM Exporter Docker container"; exit 1; } + + echo "Running DCGM exporter in a Docker container on port 9400..." +else + echo "NVIDIA GPU not found. DCGM Exporter was not installed. If this is controller node, you can safelly ignore this warning. Exiting gracefully..." + exit 0 +fi \ No newline at end of file diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_efa_node_exporter.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_efa_node_exporter.sh new file mode 100644 index 00000000..48432a76 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_efa_node_exporter.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Clone the repository +git clone https://github.com/aws-samples/awsome-distributed-training.git || { echo "Failed to clone the repository"; exit 1; } +# Change directory to the desired location +cd awsome-distributed-training/4.validation_and_observability/3.efa-node-exporter || { echo "Failed to change directory"; exit 1; } + +# Build the Docker image explicitly +sudo docker build -t node_exporter_efa:latest . || { echo "Failed to build Docker image"; exit 1; } + +# Run the Docker container with appropriate configurations +sudo docker run -d \ + --net="host" \ + --pid="host" \ + -v "/:/host:ro,rslave" \ + node_exporter_efa:latest \ + --path.rootfs=/host && { echo "Successfully started EFA Node Exporter on node"; exit 0; } || { echo "Failed to run Docker container"; exit 1; } + diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_prometheus.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_prometheus.sh new file mode 100644 index 00000000..6d5eb343 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_prometheus.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +# Retrieve IMDSv2 Token to fetch region of current EC2 Instance (Head Node) +echo "Retrieving IMDSv2 Token to fetch region of current EC2 Instance (Head Node)" +TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600" -s) +REGION=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/placement/region) + +# Retrieve AMPRemoteWriteURL from Output Tab of CloudFormation Stack +echo "Retrieving AMPRemoteWriteURL from Output Tab of CloudFormation Stack" +AMPREMOTEWRITEURL=$(aws cloudformation describe-stacks \ +--region $REGION \ +--query "Stacks[?Description != null && contains(Description, 'monitor sagemaker hyperpod')][].Outputs[?OutputKey=='AMPRemoteWriteURL'].OutputValue" \ +--output text | grep -v 'None') + +# Check if CFNREGION is empty +if [ -z "$AMPREMOTEWRITEURL" ]; then + echo "Cluster may be in a different Region than monitoring stack. Unable to determine AMPRemoteWriteURL for prometheus. You will need to manually edit /etc/prometheus/prometheus.yml file on the head node and restart prometheus to complete setup." +fi + +# Retrieve compute nodes from scontrol +echo "Retrieving compute nodes from scontrol" +export COMPUTENODES=$(scontrol show nodes | awk '/NodeAddr/ {print $1}' | cut -d '=' -f 2 | paste -sd "," -) + +# Function to generate target lines for a job +generate_targets() { + local port="$1" + local nodes="$2" + IFS=',' read -r -a nodes_array <<< "$nodes" + for node_ip in "${nodes_array[@]}"; do + echo " - '${node_ip}:${port}'" + done +} + +# Retrieve the latest Prometheus version from GitHub releases +echo "Retrieving the latest Prometheus version..." +LATEST_VERSION=$(curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest | grep -oP '"tag_name": "\K(.*?)(?=")' | sed 's/^v//') + +# Check if the latest version retrieval was successful +if [ -z "$LATEST_VERSION" ]; then + echo "Error: Failed to retrieve the latest Prometheus version." + exit 1 +fi + +echo "Latest Prometheus version: $LATEST_VERSION" + +# Construct the download URL with the correct version format +DOWNLOAD_URL="https://github.com/prometheus/prometheus/releases/download/v$LATEST_VERSION/prometheus-$LATEST_VERSION.linux-amd64.tar.gz" + +# Download the latest Prometheus release tarball +echo "Downloading Prometheus version $LATEST_VERSION from $DOWNLOAD_URL ..." +wget "$DOWNLOAD_URL" + +# Extract Prometheus +echo "Extracting Prometheus" +tar xvfz prometheus-*.tar.gz + +# Move to Prometheus directory +cd prometheus-*-amd64 + +# Move binaries to /usr/bin/ +echo "Moving Prometheus binaries to /usr/bin/" +sudo mv prometheus /usr/bin/ +sudo mv promtool /usr/bin/ + +# Create Prometheus config directory +echo "Creating Prometheus config directory" +sudo mkdir -p /etc/prometheus + +# Move prometheus.yml to config directory +echo "Moving prometheus.yml to /etc/prometheus/" +sudo mv prometheus.yml /etc/prometheus/prometheus.yml + +# Replace placeholders in the configuration template +echo "Replacing placeholders in the Prometheus configuration template" +sudo tee /etc/prometheus/prometheus.yml > /dev/null < /dev/null < /dev/null; then + echo "Go is not installed. Installing Go..." + sudo apt install -y golang + else + echo "Go is already installed." + fi + echo "This was identified as the controller node because Slurmctld is running. Begining SLURM Exporter Installation" + git clone -b 0.20 https://github.com/vpenso/prometheus-slurm-exporter.git + cd prometheus-slurm-exporter + sudo make && sudo cp bin/prometheus-slurm-exporter /usr/bin/ + sudo tee /etc/systemd/system/prometheus-slurm-exporter.service > /dev/null < + +This repository provides a comprehensive guide for deploying an observability stack tailored to enhance monitoring capabilities for your SageMaker HyperPod cluster. It demonstrates how to export both cluster metrics (SLURM-exporter) and node metrics (DCGM-exporter, EFA-node-exporter) to a Prometheus/Grafana monitoring stack. This setup enables your administrators, ML-ops teams, and model developers to access real-time metrics, offering valuable insights into your cluster's performance. + + +To get started, you will initiate the provisioning of an Amazon CloudFormation Stack within your AWS Account. You can find the complete stack template in [cluster-observability.yaml](./cluster-observability.yaml). This CloudFormation stack will orchestrate the deployment of the following resources dedicated to cluster monitoring in your AWS environment: + + * [Amazon Manged Prometheus WorkSpace](https://aws.amazon.com/prometheus/) + * [Amazon Managed Grafana Workspace](https://aws.amazon.com/grafana/) + * Associated IAM roles and permissions + + +![observability_architecture](./assets/observability_architecture.png) + + +The solution uses SageMaker HyperPod [Lifecycle Scripts](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/5.sagemaker-hyperpod#31-lifecycle-scripts), to bootstrap your cluster with the following open-source exporter services: + +| Name | Script Deployment Target | Metrics Description | +| ------------------------------------------------------------------ | -------- | --------------------------------------------------- | +| [`0.Prometheus Slurm Exporter`](https://github.com/vpenso/prometheus-slurm-exporter) | controller-node | SLURM Accounting metrics (sinfo, sacct) | +| [`1.EFA-Node-Exporter`](https://github.com/aws-samples/awsome-distributed-training/tree/main/4.validation_and_observability/3.efa-node-exporter) | cluster-nodes | Fork of Node exporter to include metrics from emitted from EFA | +| [`2.NVIDIA-DCGM-Exporter`](https://github.com/NVIDIA/dcgm-exporter) | cluster-nodes | Nvidia DCGM Metrics about Nvidia Enabled GPUs | + +### Prerequisites + +> [!IMPORTANT] +> To enable these exporter services, uncomment lines 154-165 from the [lifecycle_script.py](https://github.com/aws-samples/awsome-distributed-training/blob/c52a69393f4ecdaba7de8af802174d075eca3a3b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py#L154) file used when deploying your cluster. Uncommenting these lines will install and configure the necessary exporter services to export cluster metrics to the Amazon Managed Prometheus workspace. Save this file, and [upload it to the s3 bucket path](https://catalog.workshops.aws/sagemaker-hyperpod/en-US/01-cluster/03-s3) referenced in your [`cluster-config.json`](https://catalog.workshops.aws/sagemaker-hyperpod/en-US/01-cluster/04-create-cluster#create-cluster) file. + + +> [!IMPORTANT] +>Before proceeding, you will need to add the following AWS Managed IAM Policies to your AmazonSagemakerClusterExecutionRole: +>* [AmazonPrometheusRemoteWriteAccess](https://us-east-1.console.aws.amazon.com/iam/home?/policies/details/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAmazonPrometheusRemoteWriteAccess?section=permissions#/policies/details/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAmazonPrometheusRemoteWriteAccess?section=permissions): *this will give the control node access to write cluster metrics to the Amazon Managed Prometheus Workspace you will create.* +>* [AWSCloudFormatinoReadOnlyAccess](https://us-east-1.console.aws.amazon.com/iam/home?policies/details/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAWSCloudFormationReadOnlyAccess?section=permissions#/policies/details/arn%3Aaws%3Aiam%3A%3Aaws%3Apolicy%2FAWSCloudFormationReadOnlyAccess?section=permissions) *this will give the install_prometheus.sh file permissions to read stack outputs (remotewriteurl, region) from your cloudformation stack* + +### Deploy the CloudFormation Stack + +[
 1-Click Deploy 🚀 
](https://console.aws.amazon.com/cloudformation/home?#/stacks/quickcreate?templateURL=https://awsome-distributed-training.s3.amazonaws.com/templates/cluster-observability.yaml&stackName=Cluster-Observability) + +>[!IMPORTANT] +> It is strongly recommended you deploy this stack into the same region and same account as your SageMaker HyperPod Cluster.This will ensure successful execution of the Lifecycle Scripts, specifically `install_prometheus.sh`, which relies on AWS CLI commands that assume same account and same region. + +### Connect to the cluster +Connect to the controller node of your cluster via ssm: +>[!NOTE] +>You can find the ClusterID, WorkerGroup, and Instance ID of your controller node in the SageMaker Console or via the AWS CLI + +```bash +aws ssm start-session —target sagemaker-cluster:_- +``` + +Verify the new prometheus config and service file created by `install_prometheus.sh` is running on the controller node: +```bash +sudo systemctl status prometheus +``` +The output should show active (running): +![prometheus_running](./assets/prometheus_running.png) + +You can validate the prometheus confiugration file with: +```bash +cat /etc/prometheus/prometheus.yml +``` + +Your file should look similar to the following: +```yaml +global: + scrape_interval: 15s + evaluation_interval: 15s + scrape_timeout: 15s + +scrape_configs: + - job_name: 'slurm_exporter' + static_configs: + - targets: + - 'localhost:8080' + - job_name: 'dcgm_exporter' + static_configs: + - targets: + - ':9400' + - ':9400' + - job_name: 'efa_node_exporter' + static_configs: + - targets: + - ':9100' + - ':9100' + +remote_write: + - url: + queue_config: + max_samples_per_send: 1000 + max_shards: 200 + capacity: 2500 + sigv4: + region: +``` + +You can curl for relevant Promtetheus metrics on the controller nodes using: +```bash +curl -s http://localhost:9090/metrics | grep -E 'slurm|dcgm|efa' +``` + +With node and cluster metrics now being exported to Amazon Managed Prometheus Workspace via prometheus remote write from the control node, next you will set up the Amazon Managed Grafana Workspace. + +### Setup the Grafana Workspace +>[!IMPORTANT] +>Before proceeding, ensure your AWS Account has been setup with [AWS Identity Center](https://docs.aws.amazon.com/singlesignon/latest/userguide/get-set-up-for-idc.html). It will be used to authenticate to the Amazon Managed Grafana Workspace in the final steps: + +Navigate to [Amazon Managed Grafana](https://console.aws.amazon.com/grafana/home?#/workspaces) in the AWS Management Console + +In the Authentication Tab, configure Authentication using AWS IAM Identity Center: + +>[!NOTE] +>Configure your AWS IAM Identity Center User as User type: Admin. + +![grafana users admin](./assets/grafana_users_admin.png) + +Within the DataSources Tab of your Grafana workspace, click the “Configure in Grafana” link to Configure Prometheus as a data source. + +![grafana datasources](./assets/grafana-datasource.png) + +You will prompted to authenticate to the Grafana workspace with the IAM Identity Center Username and Password. This is the you set up for the workspace. + +>[!NOTE] +>If you have forgotten username and password, you can find and reset them within [IAM Identity Center](https://us-east-1.console.aws.amazon.com/singlesignon/identity) + +![grafana datasources](./assets/grafana-datasource.png) + +Once you are in the Amazon Managed Grafana Workspace "datasources" page, select the AWS Region and Prometheus Workspace ID of your Amazon Managed Prometheus Workspace ID. + +![grafana datasource configure](./assets/grafana-datasource-configure.png) + +### Build Grafana Dashboards + +Finally, with authentication and data sources setup, within your grafana workspace, select dashboards > new > import. + +To display metrics for the exporter services, you can start by configuring and customizing the following 3 open source Grafana Dashboards by copying and pasting the below links: + +#### Slurm Exporter Dashboard: + +https://grafana.com/grafana/dashboards/4323-slurm-dashboard/ + +![slurm dashboard](./assets/slurm-dashboard.png) + +#### Node Exporter Dashboard: + +https://grafana.com/grafana/dashboards/1860-node-exporter-full/ + +![EFA Node dashboard](./assets/efa-node-dashboard.png) + +#### DCGM Exporter Dashboard: + +https://grafana.com/grafana/dashboards/12239-nvidia-dcgm-exporter-dashboard/ + +![DCGM Dashboard](./assets/dcgm-dashboard.png) + +Congratulations, you can now view real time metrics about your Sagemaker HyperPod Cluster and compute nodes in Grafana! \ No newline at end of file diff --git a/4.validation_and_observability/4.prometheus-grafana/assets/dcgm-dashboard.png b/4.validation_and_observability/4.prometheus-grafana/assets/dcgm-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..7280f80b838d2cd6d4a8eabbafee2725283c4f8b GIT binary patch literal 471772 zcmeFYWmFtpmp_UH4IU)86WoHkLvVKp5FofikmLo!V!gEx)}hQdLH{@{*DisxFR})^-*!FmjQ}I!L-2BltN6KjP#6z#+?_bfQqH!^xrK zg|1`Uh0ut7;6M!ePO7Kr#8Rr;6IT8GgDz|xwFmy-IsqE$Xq#>=lcTyA&~MM@0J6XS z`r^7TYBiZ{fCzJoYgosJG!H|elf)K@4z`L`QZ|imgN2KR!Q+D!W@S?u{~n8h^apV& zpv9B-CvRf~24__O_*G|i7wrlFLotn2a(NB-g+&enQ?ngO`v8Mw(eW_nTlcmfv5}Tt zJc3}B8 z_INlSo*D5Wb(n7Plp;i&e#bGxS~A1XVF*imT-t#Vwok|z!}IY~FZR{`7ynEzp?0kr z7cFI7J%4}O+Gq|5UB708kF<4y8kVj2llbpch-9=nXAva|YQS84>Tno0d>fYFj_Yk|3U;wM+RSI-LA_ zXVgqM-jcpH>9L0E4M~wmv3A<~Tm+6cW>IjaW;6pfSc3Z&!=Vg*L+>o>sge9>W_0yl z@UezbrFvgb{VFMpaNbqiuuV)5XR$RiX#9@U-1d7&zoh*B__<1`S}1^z;G|w`;1l5- z_HGP1c@~-p7FKDR-~tO4?Er(jNW}HCIi9)mcRqUkqqs~ImmrId_XDh6Z=6T#0A?bF z@u+@6Mp@*fHNSD$#<#;{n67$L1D$#hVZXr^))obhP@o~N_z?SSCFj#4I|U+qfq`{W zKaQvFp-Jt`6O7ZF*p+!O}*4EJ?CPGCl6v{^s5=a7INshxo zmsUVfp`d<}bz`*?N^``ptA7cY!mQtTkN>4FIoge#O49F7BB9({iZHC;h~`$fRVZ?*WwcLxOKjHwXR^>yQ)f^h;A5TdZ2!|n$;-mV7&$?)c;_R-k<+ug8fs}Z9 zRRNW8RKUhO?}l)1|JbRQyhmRE24o4>AmVg*(u}_DdJ(%4o4s~N7k43yGKYFFo{OAl1aM3XjtBmx zexqh?(KXz zx-d2Zc{i)>wVo)~!vm=QAcrCN1kndd5-R+rP^3-5Vnkj+MnWmsYM7G-$kO1rM=nTt z-ZRtTHbihmL`8xkZwGJ(Ft*LjL?fkU=$8^ZkEOaEnH^CWF<HRoG>{vieRpn6a%C zS@K0~Uu{A4_(KW9-3LcaEv*SnVNG+*&~jHTVCk8faLJ?UEwxsZjjV;nutw1w?cC`c z?%c{8??%wZ8mvMtZRG~!?Ug;o^ zabY>%{S%0Tv>H(aNBr*|jFJNM@OW8mY` zun2R^LsGyd*V05 zkueKI*__AR3*2C?Nls&H-|^Pr5QlAZs*ifsE~~rzlHt8(yX*_>`tSXU!-^@3)n_M) zPm6OWDkpHpzmFeJoMmc^r|%!_GffzbLzoqrshBPG=k!eNPNuq9#BFmhab!9$y?aUE2uVaHBT@v*cnT}r`C{RCHL~!<)zmtAt|X+ z_LQ}bK_%7tSnf4vYb9uv<63mxG~GBYUT_fYK=eF1xGud}-@BK>w8q`%9%vf{(&6>w zjWozMh-(k^DfdZ+C>8T?W&BF>nhf6DI_DVPi5Qku;hplG^2-VE;s;pxoZcLhnG4IX zTY}Pt*Dco#&nMQiMc(FW<|>I~cNKRTZrE(d`tSSucH8?;={8p~FR(5s1keXWzKXxz z*{Oh;!I3b`6nsBF{uEYaVyAR6+6<;>_!<3k`DfQ>;O6^YdI@AoQ_3+(2bX9*A!YOu zwmD%p?XR|k+{~QfqO|u{k(?d|`yR zq#Ez9KYD1Mj4w7g-?Yj*3+}8BLWbF5x-&l~QYd|u*Hj_N)lIw?VW21zDsw&gUDq&V ztjHsWPsdvnua2Y9TyztMM$%$q2m}BxX?GkIYjfD``yL_@ab4-ivI|8b1+4=ZDY+Tk zt&RyC-nC?`J;o3G?$i9DR#7!wycR~ zF{JlR#{1F40rKrRwb!e)S64(>o0e$69osr+c?e#@1__kif`om zXjZP`J5aoo9+od;yOeCGpQRsbdrE%RZ}!g(ltD_Ym2s79GaY#Srf;l5#Y4L%daN!i z8^=Fp^E>YM>s9JyT2VL5TDFXHoryA16MJ&lrK({DdCVCcS)NY~JlUP>M87$oo~y#-~MAf*(t zS(_X;yY+K!b57S2kNKsZe*iMGF$Z)|nMwD58XBERcGOYQckk*eGTX5kwK+PT^L$S($hhxwLV z^VJk0>sMlMFdYtyxk5H5+y+UzyB|#Kpf_pzo;#DHEyU%eJ$t(9S4dxaaCPlG)ovVN z;(NyOpm9A_p31|9`LuVZW3aKl}N>D*<*&)?)x{Ix$e zZamIPl-AE1>~%~qlge#k-^~+%2JvvD@cb+`p6s;O4M%AA)ms3jrUhP10r4BgomG
vt!G926LI}qLT7B9NK#NSO>QE0fsisrt)2|V%jn=<7mghmwKEp+8Am6Tu@ zp>1Rs1Xv=NH_#R=bQ6Ik{;#$S><1Y5e;tQ|feE*ULHN&Wl%e~-OFVS@OYYaN^YgQ@bFy)AvOuq3arJU=GX=0XxKjVeP5#e) zq%2&`U96qltQ{RF{@&Nr%+cLVn2PG}gZ^vzkA7MJtpEE-4zBbetR@$U^L^Uxf^|^Du{Z+mFlQq|~ zU#YIFyg6IL!o*w+?XOi!=?8urw)F?UZ*$G4U~wq^;TnjH-5S<+i=1H5L?Z_G57&$y z?l)*_(s7^;Lb-1U|8S*}`PO$Ek!LaZ4?6`Vr*KjNll(e#&IZDv#KZivRcruRwUvyc zu`=zSzMvco!Uw|x_kYZF8Z0%qch$1**qG@5p)kPW{B+6&{zC>q$;G5lpgdCRsLcAu zJo?t>ZVdm=B>kC{Q8d{{MyO z|5e-ke^Q9V-UX8IJIrf@W02A$vO}k?a5TFf-apiC+jP*0@@0}d6qK+vs{ap#6NeHy zTc}-UJ{ZQm)mixT&&*3a@?WKro+tzDUu(VgKby#bQgTr1@;mDzw154{O+Wsb&ZCj| ztAO_l)mq8kS63$gGo?p?{dXui>0_Y%bF!%Vhq^B^9;#;&_^qM+3uCgS{bvS3E(B_^ z;>P6*p#3w{WBOmB)}uue&+m$VyAkkOmPoH+IM^vil3A_S5L>SPt4(5fY%q#oE)X6v z{}kFV{~gbSJ{Eq{_}{Z$jFUE=PDwvm(39+0R3U{}D-6V0dUG^uO=LBhr4^90K3CtJHqR8%vXHa(a!5fe7-O!<^Z7h@uPFqy4!O)x%f_pI$iM+4X4FWEPs z_?Mkn+Hhs+McUHeSIZnmJsR3XcF>Q*h#gel*K~_#3wp|A3;SqiKVBy-*O^x=H{09l zwq(vR`rJ$-GAis>+VuAEAOBA1_YY6yv|zN(+|zDZOEFmU)$njPLbT2;YF9RLjZwFn z%u%y?`2k)!FZKgRwK;8dn0QUnF#k(Te4%3eZ^dMj5&MJMHreg*m?NmF`vE*=(;wEP zW37S%w$vz5E?D<&Xkqk~#hxNvYj=$EhGYZhYP!iT4W~l?y^MUuZ9Q!^WVBtdW3<-~ zaIVQ7wtXB==d!upk1?)jWrhiyE7K@**&|c+Bw#92FKp!zV|^WbeY&n$u63?@JZn!r zKJR9Gtl2eGg*#nwS5N1(xEjBPoOhS06@+_L??^a8S=#x_iQ%spNd15z5lLp$(jN*r z)5OvqPNFDYzv`!JCKeLg8%;Lgw)>-4v($Eq)a0~fD4fk|B%syi_Fn7hOr4U0N2|uD z<$D%-G{U_CbZRT3{y(XnYMj6+q&V%C)gO|Mey=Z`<+d%u)iXIXs}si54y4BZM@m|c zIgp^U-Qo%2Ii?yB;*Xd*|3lrk2o3B3svvd zeR4l#(q-$jO|mOzKT56v%z5V9v>-o+g?$GO1g>__dB3PTl{?D_t<@N`=s#aiiHeWW z4kvf`C+_GSa3sQ@7}~=Dz#{4m)e&VHL*6jaEaP%6Mv@5k-Z;VU$c3<8OZ^){*nD@+$qE8nB%JxZQdW|ZgW&$Q{9?VXxJ-! zm!yWNS-R^PLBM9LD(Jsl?QyzX2IWkMfZroq=S#pVBsVC&O)M~|{LiUpgsF^8x64=x zn>Gl@rJfh)a@qte7jQqW@b$U|Nxwdq%&CYv>b$-{YH;hER=`y@y+QomeiIq)bEKiD zpd<1Y2ek+X-P)ehVu2=uxA2xzIn`r=Xo(L9H5*_2VQH@V!xIdkGhjY}I()uZiFhsm zAjh5{Mn&Ot-f5xr0AYGOJ(?~EpAQU%%sY0IYDAE@Y!9F$^SfHwSB4()N94O|G-p1K zG81N+IRvpiQQGfv{&3!e)v8d*+;H9J4fKL#+OrCc_?#D@w*Hapdl3}kS zuWz?~LHO2!2Rgr8WgSH!%T~5O7?wi37XO-@4k^V}5vLw@uZX?aabpwHDwL%i<0-PK z()KgP>bQ%ggSLvD;efcyuN7Jqc|_cHdDjy;TuKwzOk~9avOOYkXPvJ)%ylZ+0%iV* zLw)-!#Dj4D{y`211o4d zDY3nxMF|4V+k{s9bXaD!;?KpXWU6Z@{{Bc{KSC_pKY-Xh;I}Q5#0;jt-B}Ba}^XYY@wB za(w2jg3sflyVNkCR|st(`wTBrN;#=ukm#&$5f0d-#&J622_4Y%m8zre67jM(+)>(= zcP-!*{`i-b@pOB5Y?D>asehA4^2)z)1Og*)p?=3%gku+fyUlb*j7Be;&s{|6dHkps z$iB^ylnH}mv1Cw9xwsTXFix+<;hE^NqibON3P|;>E@;~`)hVUN^)Z8cBPznK008dV zWxmV!7Czs8&dNG{H(Cz0-GX`xQFT3>L}-VXg#t|=27oGhO7T=$zW;+o%VZEoSz8>> z+Y}>W-0>VPVQq5_aVEt9Cfw^=gQnwtMo(4~smP7E*J8`x?(wS{qe*3&RkFtszi;XM zUOzMYj4y5-KRsOMd|htxF)T`Q2CB}y!28S-n2cl6+IH;8XYy1XFBhefR5&KG<7eo}X;#|CcD}4Aad>^6jkUU^@n;GpRERq}^IY6#RK5=LS&S-lwTU79R#JvyP z>VGt|;cvXa(Jj&V2~?y+tRZnjLWrNl=v8B)E#+ql8OI#a%Fcu8-g^e9y9?w43b@=$ zC3Ayf+I&k`?dS6(IihUHY&Y~O0mmRtlgOTySD0L22j4jYnlT=nis)^=?y!jiGr~$_ z_iNXOzI=;vlh-($UD>n#dD}A9Y@b8PdzWdr+oK+pAkx?afsQm2|4XILGl&)wl*gTC z>1_P0;|bIyk9L3L{0>D&kfvv+JQQLHh2q|uemMW2DOD33cs(F*KDX5$E_A(&2DK#B z)d~UG#&azjvZ(0tg*^5G<&}E%hewNmhgxCg&k$VZz()<0uAib`v59I`HrCr5u93FV zqrZ(aquoU92r`p)b6F`TV|?o|yHSo4&8y;3n~F$~98d=Xellk7ovw&hbFRfNrIRyA zfK=a>Wx>JHk)7X%1bN1(&OBTb%(dW<4g&mR%H9fveY9Vm;g1WvdVRjzu-R+5my1{m zmII%(l!l>ae8qepc--%Tym-KtR#gS+yjlX%E(A1~{W_BakAj-snlkvf1?czABK-h1 z?PyIJ<7e8AB|)}^Cj>N0(^V$BLs8fAYT$Ln&(|Od`C`~35SA|3kMR(8&uSs>hQyNS zRPpNPy7e~@5vdRDg%p#q+#xV;D3n>LPg!Y%JiA{-oPFlI&buJm-Vn4&qL?{t$vYFTNq8a@rf8m0;Kf>Lcr+KP>+i2z8zgg3* zEVyH9xwP)rU1~i`m#+7`QftbUtk5yDd<315{yV0k=TGv2-U^fJ>9~;Fm908fM)RTQ z$B+G=Y3le>ARDjMI$l@zXI?^F%?i8UnB?D}t>@VYJw+gMKZhRn%lcL;e{xgtg!QG%9D~N`3sLwYKejb!l)R`yz^6)b z?ZbYNvT!Zy85-OUbA~qzBH6tnUU4i>W;uO=taBlC`Qa6!`7Y8i^L?rqJ570qP7-S} zX5&_9z|-f~TOjoeeokIz#mbm@M%T+@bh$+6cO1&$5>JVjqOq5~$PZG;^MG!z`;r35;l0ojb&cCGL* zR_>(7^FoQVu@lBt@6s8F9e2w2EXI!Xpvn- zHk!?%On0tmEU)l_O&3nAmowpt6D07eCDR-bZa##^MJ6$5e4b(3^5U>KmU>|4J8W4U zAJA^aa5gzmUQ={_Mq{hE-k$&);HSPfkn@obfxOCpm8Nk+HukYceIU2UchKsx1y1$- z0gl+SfHTL_uX#?(M5IwSkO;JcHbT(|c2{4i@q4TXT_5+{b;%O}PhF5v`Ec(y_Fuj| z$^G^2!QOG#@r+tT-R_5WAv!MwvT^sE1_5ipJrk%N&;H$YpZqU3N){!QeJ^A;AlF@xLw^p~gzJ>rTGa0$(v+A!e5#;ay$pWf;AG5Ha|&;1hC{pv~7 ze=M;-nnk4L&2eDLisFe-iE_3}NZ#+@(w}1~EOia^D@Q_JS7qMr#QZxrw!kWNlYq{X z?*Ps6n|MYBZ1V(mhi_etyNnJuUIkUV7f=54EypX4-D+Of`#PTMdYXG<24X&X;keW- zd1B?7WpmWrpwiV}*JW!y9_1`J!lW2Zn}t;!eCQn34?u;vHg^smWA2B<8ArQn6TY5u zjgmmJ6=-xBcBAJV#k2!?R66OqMIx^(|MZTnjIAQz^;yMl=HQvn$c){$Os6HUO^eC~ zP$brO3&nV}lrrxhk2*(xrP4}m36MnJ9==3#?RS{&csIEnQQcM3M?5s!lk4blLbB$1 zVTq|GxoAbLrUvfM9%R*Q{rtwbml zp-OM50g5nv?s5oZcR5>b(uf*{PgQ4(cOnfs&JoUW7ZH=Z6vDvfuGCKW;}!ZF*#lLp z^#xQTyT{Ky!&+YA*mh+3CV{eVdgSZ~LHzcQX|NEBxjB=eXZTS+Vn=L$%iiL1Q)nZ# zj6du8#xu*AM$so0Q$~eS^`cJ@FJ(-88+1|{qqdu*KQ1C_qPljg2)(zcqFbWOuS`1C z+dAi2zqsb7M9(8Uu)4%!wI^Ld8YNpRs|*61Qnu0!F9S!*b?a~@Z(+qBYPWKwabLWQ zQn7xK^SdmTu`P!9`B7~oqXOZL40^$oziJeq!|Sx>r~?v!>Yw`E#2-#;v5Ud6KY30w zj;;28Z{cdTxgBx;=3V!eM~u*x#kYa0>4f^IH2HE|-LW-yKHX(VJ10iNO_!@&HHXro zBao{jtDM^Zr#TXsFMY19RWWz5ntMyE9ZusP8{_mmauk%>Cm%27 zRoplh%^Eh_L)2Jw`GBTww7A^1ax^=FzB$5ES^NdV8LW}i5xXg)$xLj(wdcoLl`NrZ znP@^Bd<3T(z)sAPn~^-_vW4MKsc!$4(OW_K&sv1k1NX%0ie|6(9sUf4I;cA(F+@Cf zZjF}8Un#cQ&V`?AM~d;;9%jx)E4BQG)O)tAHb@VvamUZ0 zXwtmD5w?%kpNHe^t(QqEX?L=$21-e!5rEZ|ZSI@UcT-G%oj9kz=pgrGlX@$em0M)j zXT?5u2n^-h^VPC;xAl9?3Mjgyp6mBgmc3W5{8#@yy{ksr2hn25IJ5v=J<*%lTOGUO zo)D&Dh`2Rcq}?+dR`5Hx-rO$KZ0M$6d^ZjPLDg%fU--99`5pYWew;B1xZ&Y2!9nfT zbuFpQ;%i*v{Tir?R_!u0&R{c@vvziD&P|@^v{rqRAbTuFSlBn2Z#_$IjfpoIR2oSa zlPReEe5gpu>z@>a*QL|P)u5Ca8yo6H!S)!}bYAJux`k*oI3lWCI$a=pZ-ncy#=DVa zB!tlDG5~_yMHST(k}r*b+HOXzQaNV7!n6xxskS;C4s1B8zpGxkk?7nw;k@Kmf&w(w z^t4;vxhjYiPYOw7AW^tI%Z z)_Glf3yb;Qtgx68O`^Dsy>Eu0;f`4F*Y*^i>zWp@y;6zYs1$7{&btlDa2F2t3M zr#Bo5aiE04YbX8Ip}3+58KL96xOtyA*#DTrBv1}DAmxv@L?D4pHx#lsmE4!>@5hSK zq2@l%h~7B>QlA@;r}aQqifU_h;|_*=x^StY> z&3iXN_SW5b#s1ekH_*61`SHtro zX*`Wz%kTC1(-bFI!Z33pvk52ePm$DphSpi+9BLoXsNH|zZV>iYYKMC2q%huOuYBE~ zfa|K%rj`>%Of8+3D5OKEyT=H4dD;q}@RzQusWNcK8rw5~!r9dt0gv4lPwfjph6y?hCZ_YH)^O;}m4IkoL*G4FC4+oBycHTcn?VY(IOi^9@ z`Q~;sBkHntgAQ0bRJ?z3|AS5b}WN+mz$F>_a zM&A<15p=f!&bD8~k?ib_>QJ1)bG2VC2in%^C3tbqAew<=^~)>Q%$<4oFeKopbUQ|BZenag*AQA8tla)rkVQoH_Y}E5Sd(um0 zd;NAI%jzEdc6T#FvQ?^)A8Uv^DFoD6_!h>Z4MQ#nR$F>YsBo|*itK>0igPvkpJR8wt@k!SG1QFr0>r+JCC@)xwXLNO#FBi00V%N^pUP(Vk3F@X((ROCaB z0Y;eHWKi~0MblX?QpI7QX~Z-(<(sK=M{VZo>m1`e30f9i-Y|D?*LH?k0Cx&E9hv^@~ z>q}_n+hA6v-|b&jkB%EpAMf-$Er9;+22(DOB9a7^*N_KE)A>eu{hT6<1gn zdeB;Kw7%t=i!*3B{CY}9J=BJ};aoz7i!dfS@W*Pcjhl+|wlhjm|jd z$sGc&vP_nGsoobN-dx2XVr=(6G+?KLJrEFmX&q#+QdxBWZ08R}=}+b--3r1czI0H> z|L^C$@j8^fAsbK7?7w0oXkG4@$t(?Ct9>fW(8nw9h{LX~2xI=rKe1>UByVjg-n_kk4w!citp$$O$3IdAyGMvLb?p z%0Q+=w1yK3+6qsS;;we_5tLj%Cc9qp9Da=02J{vh7bTsOWg#%t zD8w;l#_Acz6>q`bmbEeP=&Zx5E;uR0PHc3-oArYP&=_O+9iUvxVM+O27?ZD2fLS76 z2x8u4@v@;gCcV~r1`Myb-OPMKC%muIZ~U6K!Pw$NO1}3s$)`xGqFW1UuJs$Wnw?0! zDEJuF7lC@4lfI8P*1xI+knGu4eAo6kx=keBiAkH#wEK99J_1hsk zn+Lz53L&os+Mv_(WV)avG#@w4Hnwu!Rjyt0GN}iiiE=FEG;7^TRvjMpTi0O|d2KU} z@tIL*#V`+M&Gr`9HuN;YQf;;d(rjL~T@QP_*sRGmvpXlVvv>p7T*&W32G+Sig>2vqB zJHXo_pUG&dkiG#o9Z!=|Ibt7u|JrQwiYdMN5NG69S!~W!D1D#+HLy3}I!feDN>%Qo zKqSU8AE}7QkjY*YSg8TJ?gzg{Wxpd5hEgj^W5bSng@#W{_=ikVhV~no%5-s&^ljjo zY%<+YtnB8&dOxG>Hg@9^pj`MHgz7-`29ljFKWKC zXiVT7$*u1n915|B-vlv%%fA50&#c`B z3Y1{sDQX(MCp=NfnvUn~3+vI?JRh$r-Ymyowh{)3+?|X~a&JyTa}PGR>k!9*uLFCm z!(RW!Ohl-_ZDCbII;eS_J$yjTfZd zBya^&;R7Ur?bfr}oD12KJ19InmiN8)a0y)#faSD0>NyUDIh!lmy^Zm0#q;X+@BHS< zeEv@CDCYazHgoE~T=zJlp8U6>Ew}o-whQ;78#*kanw7~#hl2g%Ma(0j5;f{;HpF&{ z9moQ6=+At?9l|oa9;?59X27-^+7iLARU&T;!qzF==>4+F2)@U@Tkd|;#ceI9PJ%3d4(m1 zRQ0vD^_yom2Ge%qEsReI2H=FJxe8@x^qS)2Cz`M+=fKgDj2%w*!^O70VI(-)`$OV& zl;6>8PU7udrSNvlCQ7m`=#B4?hTz`Q(&T=%+bc+9SuAVyu@T?Ef058l=;l*Pa@JUX zTCrp}t69e(SIT=q2Pvs_$IC&!x{ZKH`BsOwgV0R!dy^90$rWgTGH zh$U1WKYet-lKCUsBGD3>J=%cB-l6FD4?q+G~ws(H>X^Smrf4ansc{nG5yKj6Flds-jj-eXYSwb$fyMA%`SzvV@2NR_C+J?#d}tg4?cKfGANpzL z{TL@ws*iUfCax0!$Dye?F4YG*CSEH??>Gd%DPtf5COVN~FLa}JX$=G9I)2?9dOYye zLO=BUX!lvy47_D~+3_+xe4a58a(ZX84z(-TeeM_5^Yxu14d)kdC`9(+4jcaD5AMVeAdP4JuG`X0SobgMvSRR5 z0`G^-d19{ra%)ycW@4*DUU9UDYOThfSvI)B2;;r*K;W4?(ExG8;r9rKEh@^`{pJ`*PvE40P`N`QH)tar))ukYtOG>D}bt;b~C`>#I@Mw8Z{70CIl*xcN183p zbzY&??gu!4TMUxlI0n(=(5JKXS9~iCe^K_6YgR1rx;!*mZY(7Ac5c*(?Afm8YLFXVBw-MJ^Kp>ymB#y zkk6*k08OSfG)EG^14!aw=7x}*+yPDEN_9*EL^7<1+;X}eWE_)CsANKaz%|85G7yt{ zt*1ulO6M=rpb5k!<=yiI>XmN1d2M>Q}Y&TtO@>pZMo!5B@tIKnU zdiXabq7+#OSk7;!fvYIyu}&E3mAcNPD9fE>*C5_X6!{Z7G$6K-QzfmwI}oBce^Nfu z$$KOsc%yK1Nq)p-$1f2A3ZH6kpnrI%w=h4T{0|ko8lD9wFK%I5vu0kO-<7!Fd z>;#EU|D7wLUR`D9OV;R+OH-+5bkF)>8cDpZ7074V`{q<1CCM8hB`U-f2A;j{1}c*f zDnm~}jD{BCS+~+Y_peUu${`_jU}$L%=y%)2B>E}bTi4CdRgbs$v(2Zs4qF04c6Ovm z*#xr?@OiS2K#Nb^MhEGefl6epRJ+6FXPE{lsRtKS;y{Go-l#Hc-%(Y2U)9}E7) zTiR+kP0Mq-D9yS3Td0M!UDlsB74+30ie1+DSC7aJUw%%-pKVx1Fq26bB&#LG%AHS! zm`S3PtamZECN-WDC6~Im&1*Mncgb#kPRMv4B)k@R(xREJps*xZ?|#GaGLV;HHQ>)D z3feDmAV99jlM_oMjTsf2y%t(`T17X?_Kwi71@XJ;;ZS{qF)4}Gm(_-v2)Q%ZP9T;@ z=gWSSKY1yZF1PLSGJiP~dfx)b4N4fa#(G73HBL~yght8(m5ln0t=^y*Op*lj6lj2( z>V5WLEKrHfEOUuvGdoIOIpB?)ZO=jEkVBap<1EzIk867#x1KVztn$0M-m+6IgpT|} zh@wQ$eVgB4I+Ezt2yTugo9Xl@=5%Ki7d&zIRZS}1xh{moqCqkj3@Won`_{c0< z%DAdPm$ze97_t9(KF$Xr+f@bClUiR^gl)_FwJWy*imx8o?6YN6g<-xbqQTd}9NZSH z!%Nwn<5qTF%p}4}svV(j-yi%d{+M=CT->6e)Fu(I`{_!a)*mI`8mnqgI+%ftlD`6V zT{H71*{bo7yD5W0v2cRE5c;%gw1$F8yTc84TIp+zT96J^LqVRaYP$24YO?2NDA@9` zST)0P9udNTeNvIt7Q>q8z1&u0>oK|TMr`kXF{?ZP7jKzQOUcdk0H{?dXYnQDdI#Tl zxS33_+s{aupa4lqie2I>Ky%2opGkQ7R$f*SZ3-r45az%J6<8P7d zJtzEJq_BL^t{FV`M}Vf+ojB3C+O6Q{<67xIx`D33M$zDt=UXNUt|3?PS`eIeAGB=K zMZoIMR-@Yz)!I_nBgNBj`};daoDRd4b80pjb~iCOiIuTaw(KUv@$iTS$apvp3_ zcc3O~`^**58FI)$opr_j!)~E6*>xRLfqoL6TCxJvrc-Xfwk~um(<#d+ z|C_Hu#NcSZITO}|_&0PtizL;JwrkBZazpWWlUbW1+wXcO(BuWLgFd}lEJ4}#P|ngg z;sf_%>C{-a20n(QhyD`QQrXAHA;Ih*dAq(d+=e%aTLR-5+%`d#2n()fP$9unQW`F+M6_btZ02iejV zC;7!WVdlhyV`&*k04Qv0e5%&+B$OCrb@Be>VLsdE{S*_8w!#u4zQ7 zH=x<772_@EnXMvn346lN$;^76@R;=briFfrzT!sqVC>)~LQCojuVi9g&6g~l$wEX- zzY=UlBRsyPChKxL`r*$@MLtx$`McQn;k*Byk`yf8JjV|1Z}OFQ9IaO z?5N+GVIg>#P+-4+Ig8#j`i;3^#LU?qHic)|Ff5nD?oPWm$dXr2uIZ7B?wq(;QlAJb@V>x#i zE}`c0Uj*E}5yU^-XZLUQ?{pl$Suk+5v9kIAf4Z*fOf&rMJ)CEh+Ja=hm*jHGYpRs3 zT*A9E(P$RxrAOpWiomDdb`1R-DB^gvLM=6(#?*$Kqr9n=I;C_AsN45;YZR8(?t z)SRMHZf`%nK=q=ryW7#K22p?9Atx3CueG+0PmUX7Lgd{BsNnm%E(NyZ=V`(13AgkH zqCPOjsb9XTUAGa$a(YzqklXGzAXL>z?itay5TEU(uZ%hi7mrCBpRJ|HJ^AaqogET# z^aRD`dw$J0&|>bd1_IDp5W!XP1i8BhHu)%W00QXV>)OgzJgR}yCSR{%p}Y*BA}wKV zv0YfFW}8$$b<}u-E8Ne0`|`89jpo2-dixDkx&CodQz!^V=S);MlYdVibAh-jALp4~Y%cr-pB@}ee=Pem6 z+wp^C^a`h^;4Q)yv|KtvV^yJ5#Al!CSv<(6&N%pxwCOWMw|&y4zqrZfyA)o;+;H!F zYn+{F{&TNs!0uAM9Xu!9mcZ3gOxcpFC0`hKF@{M&T}I9u4PSs4Ep_yZEW(AKAshhN zle}t&Yc$Xqb6_-&#vp6G=5=^kcOa<&7!&VgcGz?A$s>Yil?iJsLnaPi`-X zhnT2%eB&C>9Jn));?uzOG!5?A_}MoO(DoJDG;_$XT-_G=Y%?^v>2oJEPsdyrqkO&E zpC~yVX+T1mDwq->r|2`yRowtBM_Q9ZH54f&y)puU2Q=+gv-~(?&bg)@yG?U-c^x_TFW9nX+y9mh`cKB1pih!htpVqA1vt3ZM|xUrdUT2 zA3;K8nI*d;|2!s**2uS{X%_WU^3(}gy4At+>}LVml#ZuFCVKvtozs=ri%rYjbw^Jl zj)`z60w$|8oj<6#{PAeciNmgLqsh`_ykQ&qi9(tGv*^dDTSo{uu0oObe%a~pP%=C> z`%fTZaC^Zvi_coyyfPB%)#Ab1^$F+G*1@NAE-SUuWsf;6Z)q;5S^82hNq;P%7T~*e zih9W30({rl(mPM%iIlD901S*(3t9Szvc6>+5q6RI_1Zi1DWvwX4%}rmod7NOzP~I` za(?!2i4hAjq8IiN0Rg_5wD>of{9kOHbx@SyyZ#kKK^mkx1f)ahZly%JOQf5nLplW! z5TrvwYUy5(T&24fSh|*mC6@ZVo;h>QZ{~deVHjt05MJ*0ey;2KT#wPPwm!P?$Xh}XCJF?oT5 zH58HV2QW{wC;f3_tPBa$4Sq{QZNW2*NsP5t6LIl3*{oFe8)l-^R#vB*1N?seru+rp%|PqL+x=XX|SL5r;l2Mfr$ zTlj91L!)beJ2 z^Ud;f8d|~n`k=DI<3oPwRe*)RP2mfY?$yGK2U2E9<>z|&rXQ<<;2;KQk+%GW;l zJ9fYu$IW~RHC?D<5yx~93$ksZm0bOrP57nIFHvwRjwOb`shyl4{dFFkAOer!r#Wq& zQ%Kv5zM9D@szIU2C#L626o-y6b&cIPSL6=cDUeF}C$(d+Tf7F+PsV1RjaJxT<3HUhRDe#evB}0}O zxK^T!?1f4}qSUQ65A>P(v0xXYY^uI1|s)K#xn4JKX29alR{6xY-k$=yyrRGZ%prPCk zp3KArp?!q#(mwQF27bgQK1jrkEHfPVpDuu~{-_+n+Mv!oqZrPUFLI18R#AbMsgpA$ zY#YecPf>>dB$>`Xq$80Zy93hwV-lU*+#JIvofIm5D9C3jJF9HI4@c}UcR^c?BU|gx zvU9y`6z@=9baF~<&H?DD*Y=NDN`(-x%>##))uL8kf!vkDbf0({CQUL`e!5LlMxA6< z!Oli+UmwwQ?$JPS__h{rkGOo=Tr2WfNok;oY%?AQl6EO~e9b!ELwd?yO5pt%{Hysd z($fq)60iewUjH-J&BF7qeJ*^xUFed?qj)|FM=Lb(G_oMRQ9f?hI8QZ-CN}@YE6wNQ zI_*T_6L63XAH;rJ{ejoFU;pvq%G!gavZvyZ$0*N_ltB8^sqdjjEk~fJ{U$39M?){broxfxX`< zVaErd8_iLIid)>Gj5CnRB$jD{2jzd*#nmb0im@bsG>34qU0i+@I1ej5EZI0^W+C<>yFYnw%H`woQqzbaZMq5 z4}XHZnOlVx3U9O$_}&3xl~pbw-tC>YlqMh}R^T%juYsCuX-12;O?N29#s-Iv?1wan zdWH}MI08%kxpQgH5`0W`*z!KV`?eXz*o&ETrxAJughA-DG z@}rjyf}q%B+&h29TSJ-eq33t&yyzzlklMBzh$ddQy6~+`=vm#U2@nXeU4kuD3yhM6B-lyBjvn6xE)D|?Uik)P{wvts&A#KjOV-r6x zNt1-VgzfsS#3uUUw)@hzl9e4jJJxk_+RXQ5U^3hF8%TPdhI5y z8+pY-+jHT#D%#t6MW-bzM@`1~&1lF8+xS`VvKf~?FRMAoYnh?v>E8Rte*irZ9V>lY z-}fNZ8?=zfPkr%KaVJ~h z(5oV?$qCM$^D>Y}{6)y;GeAknnjqi{x*N8{P;n=|A2Qg@sE$CC>jsxE`(D1$!-I25 z9^1!(_$JcbdhR$4omNG=2kUL;f9w5O2b+w(BV-C5yHU%=CQUvTO?*;wzyU@{>Lbx+ z=`gWYcS-ExjC*)|jeCkQMFW1bx-8!kiN0#W)3A+tZ$ICfZ>G$}WDJF>9VnHj+XKee zThF<#ZhrH*jQTFi-OxTBt*LmYCu<l0tP`EL0e zIdtfUf?f$m(Ew#SQpqyKFCEme>@QYZJDaHrCYDF)*g<Q3zEY_0uev1~#(?K3<&#+cZNGwYrc(FvblS1E^0U6M-^?8R;TAD? zw4S7Rs;VyA`C2F}Z*2QqCUegQ!S>1~Zf;HE} zb_zc!$o(Q$())OZXfC2giY~-&AP3zZ8`aLac$w+nV;kIE{p5%zz;^4ax`LH-SbNyh zOET;;0ri?^w57}p7=yxR1X?acg)Ll0f>83lE=ilSBCa-*1y0kyf@^L{QNT(ZkiH5b zXKS#V)U*b&OkhDx4Bp8K0|!tXwZMdB4EyKfq>yjfWc`G^-!N0mg)^9x@O-Nlf97)8 zYLTNq!eQ@9|MT-bm>4Fe!YfBT%84j?c%M?GD0g5sUc$$?7HkH#GHD z5MtNwfkUUUj_;0zf2sfkNES9Y&Ra^*T2qsYa9E?sY6<_9mh$}DBObDaK-E53s*aPO{C5}7^_*(zxaY+DlNt80)jwnEnh z_j1g&_&;>Mi`@>eRgxL23fytQCYJ}ztgX8ZA=)tHq34?j#2neqk35~OW}^0KO2W!{ zjq^LX@+5xi=XJ_TEYM~U+g~o@OUj3{1`=+u-E+U<1-$sE_viLZq`)Pu>89Pg}y z7RV8_#C2(gY{d_S%6yqqD)8|S{RrC;Xa<);RJp)1cjC;|)liYTV;+lkxyk84=5UEd z6fG3O&7f*!T&LNym7x9{#V9`@NQi%o*NgOM5bueK8@EaX=?Z;37g;v0c1>7lJb=3D zRPr|JLKc`>I|_+J{D3XNqFk&-dqcoprQDk=iGuN0&nbkpu8f$qZ{07&nX$&z9a!Hw z9*7EDX^wPl>7Ofzbi8TLeLH{9f+9+7>v#|h{Kjk9Ow{$c$p*$YSzX_IT<&bHO`u#n9xqE&oc zokqA?%3O!Qd8YJ`_|1x}0xRw}E@SB6^~p$O;WVMqDhfyeH`q>TzXp!^2X+fXQKvUi z;liQP!V`ZN8jwf<#nkWMC}kq>NGuk$`|_4KMz--If37+-o zjOm({{+pgwioYt-v^om)bkSRcviNkVjj00sk&MPo5N5d*Op6AwRk^fRb)a^9<^{v$9K=IQ{~U>4(g) z%+uGNJ-I7OC9?h)(PnS69OTX7xhm>KN$9*BxNQ~jwMN;EOV*epIJiy`e@F_Sf~IWjQcz0nY;(? zp3r#ttcyOE<>kw=u0UXfzHXj=g=WLF6D0)b)gEN`I+q>C$*;aK2Osc#lMZy*ksHo< zlj&2UzW(>o$h)AM1HIAvg^hi;rNd-uQBKzvAE@lvwt+ChuIdl|@(ZFbRJ%XpU*gf- z|Hx6_3s!|oC7eMkji$IMl4Uv}*?Tb_MeS=JvFs9rx!jdzI_@rH%VwBBNYoO4Keo$v zFWDEcfa{g_BeQ04pb+^}ua=*@PG+5rbJLQ7(R1#b1H_fN)Ja-RF7V>xti)IOqgOQ- z*`AHbteYqr!g~a8Yy!n&OH;?$Fof53eFNM-w?3fSKs40G+aQ|7`)IxrQr||CO@A~^ zTGe(r>#$7A3iVq|HVfc`b2!vnAG)=k5GyKCxwr#3!cV+)q}TH_*So8j^)7rnri2UO460J)wpY#i#8Q>gS$CssyG<`a_3>s&!HF@1PrsT6tnaU`p0*Po4>GCTS}pPECx;?hi(xJs`6{tr`P**{5Hj5s)k za1-!o&TBsn8;bO&u@F<=4invJvL)23{ZPJFRBfjHo2@iae5pOfH}K8VoHsee9n{eC37YQ)G|jJ&C}&%9FFk8lvwU!-C2g%lin*ej+5D12(cHywy#3X4;jF`nnIV$pS2f_S+jAW{Kl^KCxS+=qdTr6p!2b zqvo9xbY6asFlz1q!SHKcAnzM&*#Biwy8Hu?qDJRV*hf-ys9zbZy7M6)W-1J(|EwmT z+qdtFi7!w5HLNtmoo?{pal+c?jL-P2zXuec5Y6n0+610`N8b66CnCDS5skmuSQ3vS zo|EX(1&x^tJH5B%lmp*RHM3a$>_xMykCVP?;DMX1OKKgM4CE zo$_}uucaj!=mxxM`xY)v%_sSMIq{+gSm^{b_uh&0!Ntblg;*TBKVCq$WV-4FXZyTthuXA=UdTz<0KQCGFhsu6sHxxK%x@kzfu-^K_95%1U3ClZblJ-4{uq$I3Gu?f5gejIN_dx9WCb=;>WV*P571Als;6?&GKN;RHXP(Iyee)xy3kjJ=aB zc3y5bJ*;scDG;qseH;%A8<8lqqc;Md)HKZkkqQ;vP*D=s(Ek9RR`G{i5mZ@al?F-V zx&|f(*zlO%7M1zf7{1AqbLP@j>Lf!sk3l2utKV>W{W9`j6;E^eB&XgJ4N`S$WM0)28BK+4$gFeckGn1N9U~?{k>6^|$U*Ag4-X z6$vL|<~N#`q?UQ$qF~>nWr*J`iMlDR&x#Dl#Lq1Fuzok$dWqJA8?>ig z)*vGMK9w=Et-fMY3o{s&qT1r=? zLftGeZ^1SEU?WHWw-sDKzf{4+0gP(0s?RPE7-E5v6cwj&5rW?WM|fTQEqleq*?_(x z!~8njY|521rHY=LOQf$SF0-+c!B1)t_dR?UAN=85aGpm;weu)Xn|5N3B*&*AV0Z@Y z&uLC~$YEoJZ_jZ8=JiPTwzk4lVe(Yt}@*OkILDssg{j+HZ7n5 zDV>GUE;Gbu@H$6o9d<{=b)SfF`M^4?$|o_Y#$2L-Ln?TLjBHhu@=b2Q>-T9=ENcZ9 ziElg_cXLVsh}a(1zm22^HklL|IjXf5et^vaQ1K$jW(MdgaI58z@%7Rtp65570YL0d zdXAK8;n^l>diOfNe0Z_eD;7RWph?EQJlIo}X_f zj2^IW;@aRjFU%??qFB4ztMMRMeP5p`@SJmi&AR9XYDhqtWowK-59YOelqe{1i1F!) zlOjYd^tE{R!pz%@p=aBpf|<`zMztFf>dJKu2;FN4l@p(MLE`@6wZ~ZN(13cPN~tnd zYGnN_qUbbU-${2@-IqLx9!N2Y8E+p!6U#LVBiW)~In0z+p0z?cSeaKv87s8|@@khg z&YsboVYQ z=X3%_HT+CUo3LK%hiWhIWA)4=9fs{NR`)ACb7aGUdhboqiIxxV*kjG2FS&S?YVC8T z%bF)NVicTYrC?$xH@X5&V3mNvoeZX`c;a|Xu80_dZm-D<55cP+ z3`%O8N@7yVa?R;qS2PCBGkRa0V2$kIk8x5mUOVT@eX|U-1oAd!50{+}Z~dE^CP|l1 zCUucRb&_)275LwO&&r=k8||ge=1e-Rn_Cktv^FXDBq~eSyyb1oYBF!k5c6&o_gB9r zsni44v?4|<+0B;G_TUfq;|xBlmBH8U54MkkB827sLy_N!Jbgz50@{0anar7mZ`F!oM?0pk3=(8AU<;8#Tn?xVR&)p5|cNE9Jh?#2FbW|eHcIX6)9 zH+U2#N#-1-;Wa;}p2i)RTlT)AK+CaNzE03eaim*c^?c_(W!;r!s>WSA_jLq51~K5eTeQTa7Z&)#i? zp50#8JzwwQ-B)!6%I%`G{}L4;EpDizE7hDatFC;AVRtt5=uvnkOWX4FMpZ1)&<|lkV!G(#NgZB#)0yDl zyqG+Ov7hsW33FK3PQ@YO?-!OSt%#~%I}DMLuC3pr1J%ml)sfL&JbUx5*s}~z6?;ER z?-r54$GROW9)R8(LUmMl&qYwP#6%k+RKGzHl&k+D+x`gh5IJJ$-w>78?vW-q;K^G}hqxewECMl5Yxr2-KW*c;8dC>_V9{b;NV|*_0FjsKI$d^weYJ;P!Q&g($wB zOc%(;2x;T>RY#&MVb%J|J|2s)8Noyb83+Kr1KR0-(*5HY2*J%18zM%sqfwTxsMP^w zR1XB?C9rpE&=1Fq_5b~|c<7CGXNsVmvBxjYR2AQUNd@@uq=bgboNF|$_)6EM3%~u% z5VY^<07g2KDaVS_-NHzzr26+FZ}ncfvFUo&tfa_ldosOjl5Xwz5Eoxe+4<<`0%Qlg zre_;JJp}xbp02ha=347(#lYT1`N}J^{Zk4&52uz+ZXbo^yteybmDXpZc+Q)y?;Kal zhhK<)HuAeY+flQ9=@mv?pr<}VK6(*2{1`jsIDfLjP6a~Qy)r6Ewek^H=gGl~uQBQ2 zh-Nakps$t-LfL=U6M+#2h^7wkz)lh#(|0;S)v?PC(FfxvSBX&}XjRzjuf$naTfaOS z)g-s>5`Mg!B_B>9TnbKpIpskL#CbZZvc3S3&BxgBnLuw8cy{52@Q?6-_Byu20dWhA zkYt~Y=hd;HoLf?I+oCV2s+ z$fH)28=ZMn;-=PWY;&a9ZBlf)F&<11gf>(Vbadq$fP7_+4hO-U|YTpH)yJEujT`CpsUFRIAc z-oIZ84t@z=j{Qu!P7;lGr0)E*jrqeK;M_0Gi*F~#x(!IRu`W0zMg( z5C3}~fQP@XEMu>qdAsEgidC)3PYGqvq`H z=2Db!hWL!nwEWzh6G5zFehkoTPR8 zAndU#u{#F+U>nBx_M z%zaU=cd2Rc4@E~U8b=caVA9gZxu|gHg`_3rmqXMVF2T%()NS=VMAY%DagOv$J%H0i z_MtfE@cI>-@$dpAX_;kZ%y#&B#;>ys%5^H7+$mpbNG|D!6F>ZHvcm3nVrcPaU1_QBh_?IMA5T?Pte(B|F(mCc z%`9N@$C1zq6V~8Wgn$dn*b2=X%K|IkoAbJ*`Y7cJjWliuX;{x1W_F~6A82CPLe1a| zn{ca{Pm3l8+<9F9prT()?}jFMVp0J~-@Z89hl0UU!n=FW6E$@`vi7}_3T42j22g?i z7<~|(5@#LOTz|`UAFobRae(Qhme7{5lLrFj}0!qgT;_ zJXb)=hOZ$xOuPp;Ku^1u>pIaS3FPQB>n)L10!Q%+C2<@U6cYbl#@_$x56S zok!u(wPQ7MdNyqU7W9*p%g|PDRp^WN#~ZK2as>Etsm(MS^8&OB>>ad6Zc-U+uZ~TD zaFtRJ>>XP-@{=W}YaBT)z04z=qcs{a@l~IY>WGf5M~<3;-!aa7oN~3uqbdxMmA@3{ z5gi6K;ww;B0Nr^}ah=rYd^fIMKgmM{cb#E6}`DPt4~K16qHPq=}I%&$Br*i;T=4Y;zk7en39vQi!aU~K7Zz$ z?R4KgAJosqm#Suq0NQlQ22!|7j=(CGjzF}$U4XCJnP@o>mzlFh;>ux$M(RE@JRLGV z-)O0$#ntV%*F~QgjlrIUv{5%2gR@2H z=Ur1lYT{K|%~c&Y&P7|cN_0RelMRLvf7dsfK7#eQPn^Oc*t6qwm^R|%ak8HMuAyN(-{m7ZZ=)rZ ziwDXcPS5k4%|^TW<-buX-T1iPdHJusrGRP_k@;ga9^AV9XavJm(zthP8qC+j-R7ST z7{|CS(bkDFWn_2p2d$|$3Q$(mMIeA4(TPjs7ola&j4&KSEjNJU?PPb1QrE8mOsg2m z>fXOVEWd`piucoE_um&$G4I8zV57{Amh-251%1(y>e_&)MME;Rub3C`33BI<=xcR- zg;iL%>%Vw2u0;K@1?sb3<7%u#7kt_Qp`baSSRiJ;p|-dO70;T~$?2P7m z{&$X|nS+v6Qlbo8t*)DECUq1EL4`6)txoH<3_k|q$f-;z)p{1VjC1*%LYSULH9G)R zaHTVEeM)hu_|?U6OQz`F00qO|lFp0($r<%23Th;}Ofr;!bL-j6bRO=exLzzN^TPnX zo!rG9oZ`JNr9eINAzo;a&VugZu;lrYsXUq6tR~QmS!Og_a^5=a(4pnhc~Q}n{W{GF_?Klm_B(zxiP>$9)A?QJp`}uE!B@Z7^{cZ|81iD%iJNqd*<*TYmtFhW1^>=Ef9fpD{4x%RK(!+P;aXP{34 z$Fd_{*Qw}TYWA4AK9&Xr=20}Bk=W`T78v4%`PPiB6yzmnZI%XP)Wp?#U^QaCzDF9E z-T#En_YDr$1y0gm#Xjzo?14M)KQzkxzCI&3zc>+~ukLHy(uau7IZP2?YZ3}Ety0Q8 z6TzugW!I|;Z3U}}NW>ig=@CvtChACy&7)xHZ5bLSrE99UfKVz3>jh(tMx#&{H)DK< zb!T%p^gcPfZuO)WFkgPaJI&5Cp>aW8Ak7700j#C;l1hA7yL``I%GbB~cSqqmlUDK2 z$m&8jvckaS1@AD2c@Sz}$JbemIqRdnw>w)C?< zbizhCQNeazAuGn2DYMaE`sOm|$T1_bX>^C+Zvi+`1 zyCX2V+?572pZ)X7Hp0fOt%tV%sZVjH3{MKc6d)6&C`+FYG=!Z4U9sh<9BGK%^m}vO z!ck}#sId|j^nSE+jH8(Gt);|MLP|kf_Jif_@r3Q(w|w{C{3m@@yQHziZ9XQnf&m_^ z@;j%&i%M#;{{%q<5GcELBe#S~zw7j_x1NKpjjL{g96IY&4#Zz9#W@D>wr0Spq2UB1 zqnNYkT=@0ikW*HhRZ)GN(&IegjTSwt8vu3rIqI+cKA*gw=AQuSq|+i( zX?h+m`dZ(1*8Ht%?^jzYaPfK5ls$jdjxMz^Rdb7Yx%ScaivZnh43N@uH4d*KTd%-b z_a9>;e-=P))h$r|F~&b@=@F|iMlJMpRw^yAGoP6yPz;Iw9%87rjmftq3R-v~v5Xn0 zUTKKbp5c^C+-FHnbA@QijXzrau3{r&)Hm2dSg$0^v(*xH#&1CVWPZ&k-k#oFAYlWK!bwG?&`xGY*QNE;}+FR-QPM?sE;MZspmL5_>15ofAO zv`tKJ!R|@Mz|{@RyZ!lxz!AR&n;_ycs56(zbGb7ZoFNwc{enNnRk!-0?-?gic+i2^ z>r3KoU^%COMK(bJB;EW2tG>U}T(#~Ur)fJNr-SXKKFku%nh_BS^mY-@Ldzwm)N zw~ogv(wjD7Vvy4PC9R?(cIcCfx=?bupgj0{85#fMMYj-nzKCvj`I>`EdugL%jPolO zf32t;mtmmbDqEox-C`t`gzHc*4JaO4!v@C*9|uA32{eI_k7?YGDu`f2Yv&@mp?*4h z>S$~5>|u7l4faix7@rB8x)mz(_ZqOr_)g+-ujAwQqr@1 zauw}FRQ`(Kn~b@hN>#a0Uz<2lRXNZqqWce*+dhFFsL?=M{g6#W{kCBCAn#XPdGcXP z>Ojm(PFPT;y^2Z$HAiRsP_A3~e!dP~c1Th~IFg$UqzPVAu42KBSuqQ*08iNcUa%C1 zJr6uge3<1@Co##fEy1Jaoz1Abie;(V0*w7|xm{zA;NJD$a)ecSRmN%~!B|r2rgRZO z6a}AvT;sZ*l;_n~EsUx@#K!HKZ#Ix60Xnxsv2f9`X5GG=@1ImmVy#)%95f}BQnXoZ z!H#sJk>~HgjyZ!sXm*t;s$mtEzd(*L5QW1I2`_$|D+w_(&<{zZl@Q;7PNL&$yLSs^ zbOXt!w5JO$dlT@q-T|??)>p9Mks3z)c5~Hq-u{n?7YCNjc#I2ge9yukhJ0CF!Lp3h^MqC7T9*Lk3gfZW0)RsLc6aozY`06qh z!UjLTioZa^(fB$h6B^)?P^9ekCESpcC12D70lHepi-@+)kIhS(WtPX}qmkJj&7JMX z@o5S|PLr|3-8-@CmWeq3%`d5OiNdFQ-*&nG4e!q+yZ7N@neuAE2xy1Y1{M#9Yn>A} z!@y5F=fXP{ba}Gg`PXez2jdh@d$0_oR32IS4?@pyk0x{b1UzRd=C$u{F&Zoi#J_vG zOLYRyS`8ra$_b}ohC~QHj^}qZDM~?5W6hTE4UJ(J+%Gb2Hvxj6x)Sx1D?||__J#!Q z?!(JLVLE@GAY2OmKciz^7Yhka)WXdgO0hCV92z+R3v+mv8E^Ff7MSYd^UX5EpCm`8 z#d(m6Mzw{SNuds6engzi%5bNi&2LtumaKU6Vb=NJPW11Xx+5bUeD>`PZFZ#>6`@nr zt`^yjyK`Y*xMx7pLK+}P5Iy!6=nVM`P@yW}4`w$`M6`PUg)j!HjY1ymBufB@F6V%zAEFUv zM=(wQ(lSGN2LfotJ33aB0N;Jq;)fRZ&1^R}TlD^3{xw&6Q-CaMa%8?)#!QiV#jU|U z3zm?i9PsTX*UWvW^JYEfPYtGq1`8QA?uU<=+D;c}zX-k|l8xh1c^ebW`FA2x{+IU0 zxUVUJJh8TLtyih?7lH7Bhii#-YMZT*p=_~BQ|~QQ)+~p|%(nQpgw2+^0Q^jh2Nlb^ zcl6wFbBf};Da54BdyY}^`i!KyGfAW~Q5fM9h7cXS{xW7z#b7b~cxRLyW5|Vg#D%KW z7@Mx+{Pewz&dm-)rMyo1NcHZh6S_|W$@cvl7|+~XOu2s)tg)(mJJFOn{|5;dfB@>H?H*as1zm+CEOgyuR*txi9H8G^!o;i` zI=aN#FHnOPJjJI~gFbs$SFzxw9l_F0Hy+d4Kf0sdHj(77w6^$veQ3$Zv~btG8Twd^ z7Ods|J)1i*L&#xiFX)U?q=)*{%|mEo%sUR>+Wlf)p~zrTH=nfa%w{Od{_wbVA^X$D z6!r_Aock_Npaul&Z(|Kf z{evxMs&@Xk+n_4ZFbURTD}%l&q3S=9r}w?{enGg?yt!`ZrZm-y4lzE?bDOo$g>>n> z8F+Fq*g|IJb9oexs4R%=SP3p?vH9`zuVT6t)~)$I=d6+h7$t>lZs~is#u3-hYM(kV z4Jl`(l=9rkyY6Od%ZArq(*)_g)$ns@#kI0$JGgi{P2_~gc8h4q%L|4-c5cu+;}r0@H}%-BC#6-L zxeJ8LEtU!eQQhT2EO;d7#?yS0p3XKb{d|i1dau%`oy_QoGckMfA%rF)h@Rr!{^NFN zQ%1vbq;u~dJv46$_XXhl)bh7 zffbb6A;d{l#%0qHnQSxUBV?Poe8#WuDsgP@fs3Kx<4`eKf3p+ts;Fsk0yg{726It~ z8D>A{`_0Wg(yy4ZQEOg!&S~?!r?ul@Cl8DTIn_&4c;>=p1S|qOaID`j614kqb^=3O z#;KJ`W*YnK-#i*w$o5f0iZ=`D?#_J%E8tw0`jUHc4}nrh^tPQW&fXZzgo|;EHwWbl zyR8X#mYYEof6*9gOoh-3eDi}~+-y%0S0aIt*s|7924T--um&Nd(aC;O_%RVS+$c-<{gTGU5x-)ehPRV7K5hns7w?5p^{;8}fwp=#6RC4aoxnzfP zE8xHNmnZ$md4Yahp$qkZ?x$IV=a)YKsXR&uF`dMCLLmL<@&9}Xnbf482}Qhd`NODI zPErjI+w&Gi1x_ScL5t2*GOZIIW@SqHCO7Z!2B=1G-$=c9SBliQj798$4qLY8#oQ6s z#XUgLhx79{KP&8=HeqG&THI$BF3zcgE+KzwXKuMYMDb=#yu-B1d@}ua)Zf^1ExJ#q z>Z!yKZD+uV?!Vy=(O{utBkb}^%hgVJxyYNLx6T*9Ekz6PgLVrar>nvux^zn!v(_8o zY(@U=>&(TK){f#x9GVr$VXlm(Mj2CY#oy79e~>r9+d?V7SNz=0sIqk5g1$d#J($*;2PMW`847uG=CJgd_bYm^7)=-hCza(3UQBo^8^0kaIwZpsen3^ zJMrV6Y_TTb@6K+8y*c}lBbR|O8`UX3u;{n3crM=f^=2k^9baNmaO3pcm_W_d#AUdk z$#2}^@b<@PYs)*XxxC+Xk|3T5uy_Vwey;wVf*jxW&lbW$FijnWd|<&tBXycPrigv6a#;3p@=JYIA1rosIaPK(7?XSF)W8MoB4x> zbwu)Hvlk#nnBvHIaAua|&+1RzOA<^>z-eX_?7$wXUT<1Wu^Ng%NmcmiJTQ0=PE%?< z_U((c-Wu%O2vnXZBS{@5=`4DeleQB_ zRIi46YBwgZNXCZB0j_uFk)~j0yK{|F8iqqEyvU9^g!%%0UE8c2%JUpfaL{S!xSB|L ziUpS=!^%w|yEw@#8r2U>2vxB}?{p)Q9Gi|m(_K^@csMZioZIg$gYQ(%LWC3Reg#f> z+?M19e^rRvx*5jL8sjnCo9&c)m%$Tx-x4_^w}i((LEyO}`KH3SPSHsyy>c+j zBsveA1aomAKNX)6^BT&!&Vg5+)>ldwju!?7=YkAg%Ne#WpJV^Y9P0b^gMT-5o%LCu z*}prs7R?W*3cQ)d7=Y*H>^v@EZ;Lbv1MQHuVDNmfmM-X4_j`I=9a@xfZk=)%!tb7x z+gRVZt--#)Y8;#6b$!kh{%b=E>CR{X0SUh_>1?0Grf;D1FO@Ol*#0gmHGbU&YuA9- zETf;R;~4r%6m@}?I(2=)0Y(k-X4HFRb9LLQX~r$?D$@_s)g~sb`*K2aG-XFM{15$9 zF5gWh0D9e)+>OsSgjFAQOF%U|LW7}Ii%Ra?Cy$mXf2A0Ed4PV9f5(dRs|lelCj9}&4!&A#d$0C!=X%!I19Gi?_EGp9_P-#dz_|DD zTL#;_8t0{_hu+t%z%YgjdCNYq|56j!cfX@CR?9-8^UE`Ae;X#TEW=@(=qkB_xHjSq_$MP4=l5tu(y-f%(hObPPS`iV2+qXGo(?_l#F zru&gwUFzYlSu3rLX04tEE!V~GE|7#*M#-Y>4_&v_7%*9+W^nMSZUeqoiJOb2kdw_0 zc9?3!@xyJ#d1u~sbIaB5PS>?U6TfsgD4^vBk7`nu^_CEc&GGWEm~lweNTR2AaOQK+ zr5!Fs8c%Q`gsN?_<36F7jLWVV=zXI-^dJW&KFqfC_OLB8vuqdRHYo1-1+3$qrgOJA zHt5FVqCE9P%CuwX_6ugv>X*Y3zXn71kiP>yqZ~`79MulOet>FRV89c3PBPZ11F{!V zPVtTXsfVH(FSy)(`(q+4hIHLClBW};`kfmY4F0bLTc>=cIzm5t{jTRD>U`u$o7aKo zCx!D;gAm{{G+5izGiT{Oq;!&&6b(lgQY(1Fu1zTvVTYhft^{sblRf{=Ls>CF058*# zy3vVBNL4p;?TQc_OCDal*iV!iszeSwj9~=1#;11l`$^SLH0v_W*q2cDPVjhk-{GsQ zoa|hXnFYi^$Q8nh3{%Cmi!OgVY55zD!VzE&o+vea(^0HD9uYZH;@2G;-zEf8z#%u& z<$5m>a<#g(ou>ot7s1LMH;0Ii80N{78YWHVU7_x~Mb>KX-?aHI-$(Dw@c3oo-{ppP zZ^ep3x*tk%br)P+-EX>4n7x^wpH{}DziWji(9}F=<>^rue;ngsIA@INW=SbuM{WM> zz~A#{l$$`ym!0>U#lKT6cx(c>D>faj?7CHchFeX>WjGbo_3yqmSfuWh#-Pkih4-iV z28XnkZy^6aCc6>A3qleu=jrFM$dsHY25+xevK_u%j55?xR!|it*%;o;8_0D2GO9v4 z1npw*;f=$+YLn6X-I^5sRM&Hlq@3WtcSJH^+nGyBi75OGqh05@>MqHrVu$8u=XC}^ zt-%4AxBV}(m7?i9XqnkEtzPFk zU*R<_@A4*|&0cak92>)h-Vn4qFSUMr;Lan<-MKKNFug(o(xIxDiBWtCf)EtX6c3TiDsazeqrF7pAJ z@UTU2&qQ}RimLSf%NSf{7HnoSQd6j|WkH8*GM50K`%VXp!;8P_xd%((w4sw0$9^`| z2Hhcr{%oe1?3%I>SFMIUyUYxY&HYiJm)lmZ^UG2)&KOiFrsiX!s@W#CuOP!6OEDhm zL>Jutdf#Ehz|6zM@3C@MXhgQbg+PNxi3fXC8|SW|tgD36&Ln5EX_vOu)Vq*ARRX5( zPw#p}E!Om}d$pCU|996`_78dAg{+4Q_)9+uiDw@x^f&W(_SuMtCgNeA!qTGAOWDuj{zGgm~reBQ``64P1i30Nm5s(DG;n zr)96q;it~vjl$8V0=Z6B|M@t*gC`IF|0DA6VR4Pcz^ux(JZAHn%?m1IM(@;ojcED{ zxGI*1^uj$Z%e%sE2MwFUOY|H!G_4*MpO@c)j+P}Z{P}P>?4=JX>PBkw6(C{1NGFc* zRMZq~T*!yp_C2X*;rx4<9@haLtS&ZJ*3%lN@J{HG7{c#}J9>#Y;OaRuR4HBI08XCz4cIJnWcvMQwz24Q%K@+Hh~} z>HLR-FOtTdABV}NEK=?P8?_8GR1fY0=qq&GLa)UqTqny!KFs76UZ@&;sD=US2V%x{nlFh|U0U`Hv*am2hVItxMN- zjB8&X2PQWP&LrE?Ld{>>{FAQ&8)S2j<^RVe!_|_+gbegzteZdl_e@`(ei@7Tj z;D)tzBq-j)%8r}Z1BhiQNJ$2);G#rx7m>r1?t**^b9b9RI^e@Oy}eRcGUq5 zNdcQkElEW^Fs}Ya7Is01LaasP7HAo^z!af#2CQPwRaG(1D-!?>9DYDt z;1=9V&NsrIbUToSH5T1`9lTZj5l4;W{)d41*U;H?7!bqb1^Px3okq)#n5QlK0s~e> zhiPM}P;@5Ox|0Q2u|%dQvq#_qLjBmO z{9~ATAVce(uMnAu5K%~K=BtOP0F|Sh$l8bqMtyubk0W}|rj^PMJ$xLN|4tT&9B%(; zx91NYMoTjTH^HhSs5{*8wNOh_{p80*PGgm~3+E!31%;{Wvv9>hU?yMCqpH|Pl6B-^ z0eAh^Z}oQi%d@OvN-XKA#oM0A0h@hOk?wCUAN%Js<>wN`JULQs+jAB0%}*C@=oy^6 zc>S?3=k=$8Py)nu2meERzs)L$&I_zr+n1yE(8u$cL7xUCuavX&N;_Kp-hqdm89%r#$Sr8IHl384IZ9B`AZE51ubcAJ8_8!a4hixRW z?=X<;Y$gLi%ZrTfvaOVG-&}aq=q*yWaIV0zA@TA^kkSv`OU{7^**SPxxW1qg;Mesh zfF%+v`58`rcek`Ny&bGfa;sm`$5wk3MLmZ7NB(O({M+CD^DkH}uNcF|<76Q&9*-s^ z&l@PlO-4&`b6f$|-&gX#zRf@X>woq1fBdb|<&$SDbW{1?0d`;0FYw=2``K}myG|L?~cPf82{x9|Ig2pQvlea_-wA*-wx7$&QJdq(*Mo#ydcUOL>tuT)!8Vph5?A4Z=qyLIT#8SE9d z;O}K#v9EycG>r=u(RR6Zel3!}X0ZRw&Tk8yxf#j%C~PJ4L|lxRamH%$=IR6PF&4qA zf6gwge+#~Te)$A2dWozyaN@J=i;5R2e`2EmH|baYdU&kD8g&goAYx~FGOgCR5GM*9rVTx;-=M!$Sz(f_s1Rxs|R&DpllB?!m@uwH{)P?Xjjq$<-d+qUS9{ zJhQ&acnx##aQ?~jFz%wyYSXdh`+;R%4286F1eaMaU03-W-py)knSLu7VZvr|L`+ga}6MeI1wDhA2P|`-k8^_k3scKI-R(g$R3mBj_Y- zy{f0CP5AYalf(bqtG!Tq7_7l5&1wO;yF`3@0x7+t*%iu6HXj_;;9 zUDXIHMpE@je@oyzul^@Z=Gw)D{#^ZDuk#1bEgM)cb$r+QPY2S!_tfB=!<^8~*e^wQ z4tT#LJV`eBlZJ8T{zpeN>eEGPw{BZfc)L#;Smg)#2CF?uTBb8Hu z)lQ>YExk9ZQx;+AvdvE~2pgD`r;wR0-?;(^DOL}SHM@Mh`$xpPa^#@d&~9>6_-&E}F`YH}LO!`rD;uHlD9{_xnef_+xY7$7sKjLtV)}p0++wXVf{o+J=Ps z&u*_xv$n)e-wRf{d-vr`M*_z3`$uNxW`>rx%wqGpe4~~2+@Hv<4{xgc9%zFN&i9^K z8MS4R$TZwbU|^TbF+=o;f45z<8&ut{XH7Tvb76i^irDmgjQ^FLvgQfa=d=(psQE(1 zARxNx?lUHO*JETUrq!=Y%)c4mmY+{M=iNXipmSZLo&Hv&Q58NTn5n#dlY{DUv4$^G z>X6#gAQ>Y|NSTNl)2{@+H%td!64ut_Y_=`W@pP=}-ahvG(V%2;>ihi{i4%UC`DZhw zvf&R(g`UlNC0Vxi1pdm)w%GyLf!LTBTYjrb%ypg2J3ue+&j|R{{@NMA17^V8sx{JNc%fZ`|o4jmFyE|EPCvE?9w=yl(X(YrxqlCkE{9NO2vomPQWyR zzODI+`5`vPM1e(E^x9o!`4uCL*OcRbU0nXl z|GT*VySV>rx&OZk3+?Cb-Mbev-5SmB$ZizH5NvXy_nbxaUcki0^G)x?Dx2bZVn1Pf z^p|pSR_!au5W)%DHmdPpSx0|mLr4a?I-xvTv&QA?HuBiCYH9upCd~YjcF@bJBz|4X zMGVFC2GAW3+C|Bpw;3+BZ!+?lH!Su6y67xG!nHY(KLHV-sKOJp-(ZB?+c}jiIx939 zH1PuA^7(944cx}DcD=(fM>RYyPRv$V`f)YQqnm4e$T+ioupoYRp)dQBq^^5lgC9eV z=Cr?R7wu~>oi?-QEfhb_=lVK;y59a-3MbkF;vB8FskmuzM}tr65_?SwzEVM?4o`py ziU1{`Sb123Kc$V-SenS3dGkU4aB11C^Tt-tcjM?S!K{6=51EM~M#WKwK}CUd z)1yp*>fXfhWNWLZ&adq(pB2oqc3n%^JM5URwJ5s2kRAP?TCArs0=RtKBn;CzoD3nj z+&N!pj=t0lV8E9(FbR8)PWb?gzcEe}GQU|GeaR&so1pK;lj0#WLL@hoQl@poktnkK zWbJ%L96*j29w|KABTHM(T7OK_aZjLP-1Eu?tC=`tuBS?Y zm#q(V>F5IQc)dG?_N@UWM|xKFv}J~j##K&gkpf8D-(GvnQm-DWuein*ou}Bu1~V@G zR2ek!q&-bK04k5n=ay_A;K zcXI_THcEQkw`trw2adc97U4U&941!6a%1tel3$oyybT31W>U10=c!cmVVE}_e4_+o2 zK|r>uMdD1XqT>t>4wrnjiY@Yi?8fKA~RWDY2+i*6!;I~qY zaern93h8LN($hRpv1dyg?MWXd#Or#-L63Wg*g3{auj#nfz&)E>&!x>5 z6=tf@eVY_A`o|6()Z0N~m)w1Z!=Ks)*=@FR6?Ll}EqS@cU4okYC>v3h{8v*8pR9*F zW6OD+xbvrK16_J=^&yJxgzV9a%u5~OSjt|V$`3Q2Tjs}c>b7{C1@mqGl6m=D{ApgF z;*1}(sP~xfN8?xc%=h;Auo)5NBtKJ>ka;&TXmL=(rkly5zeHrMW_t7ZIr}#|RmQDv zD!^U?XUaoEypFS2knrFa2A?O2R_Oq{Ni$!AGsdteuo`233r?A)YyZ{{xf*gbmQAWa z5EwLH-l;;5kK6OX6q~FWEskbqvL7C{0e)UN>A}IoYynUbi)N`!A_M_%pB3^GQRguO zTRrht)Q-+Q86PNZ%5nsp>KwpRn(hqDFvkD);EhexZ3pQXwk+BoiI91$<36Z!fSEPuFm z5A}Z3<(84vDIqgSx9A5&3yr(|_uLMmWFf@&e2`GsPUgaII%|=3R?o(|2iG?_-S6*A zJI{YQCaX!`;U|fHn%v0Nh^}Kq(`&0({j7)xwc#;SsQpjqhDn91d?pBd`q zhka_llJZU6y#fm%l<{b}%G>m)`yCEy)rOuE9Zls&3M_mHXzdi|b ztQv*nquf+d8vG&gJ&oJqXTKgFKvCz@!Si9KRAt%KFfiQL97LN9|7>2!OkDS0~9^8OYk%*pze?GXqkOOGQ1}uU9nEweKnjn{AglB4?T@HoYJ7 ztx?NNPHOWrN6`&U4@!0{KPo9B@UA@rm6W+!7U|99q0voQH?>v1ccerb(Z8*l+;r@e zb+3nRm|Vu=Q+^bpNP>V-S;RNP?4Y$4bgol!=d#~}#G7#k4rQJmPQ%xKdwsI=ut(Bw z44nn&y+btYL+8&Ee!#sAx3mQ)9#$dC;sl7T%R(DtvB1&mWFi2`Lc}oHfZtg0?g-cQU3dDxPmA29k2dEG8Wh3 zz57-W7I(F$@o6i0*Kx2$rb=Z?TVVxAK<u9DwWt&OS&;<>c)n&SR0_NaJO$A&MT zU50@x9bnEsAQ-JIPdbm+G#N!P0y9l4^u`RX2Q>@J_^f^$L^L>j85;NP+iG>5`2Obn z(;=mbqxn_WcA*sc57w^G@ez2g&zgsmAke~;l%PnzG*X$Kq2P@C3I2s<9x6(6T-_L& zeAe}Vbec+!#;Xd7A7l5FXLz*qacF{FLZ_~-8fa(i@XJ)dVv*gc2%ga7=d&(u{-km&*QH@~J`pD6{ylYWck4)u zZPy0@DXc*|!;P?$KjSr#VNAs2mIlzm_8D|v+()HCkzTLIc^e}7rse7w+Qh;4Xo@N( z^Q*UbO@@e7AS?BUYqA2g5VR~0zIz01+Z})}>3}_HF8$e*L|un zIv9FVYGuVdXWsM6Ry`>iNWWqgyYR0%;8aU}%_C^CL@DV(aP#?wOi>87rW-YUDs}T< z{=*P-DR2jwbKy$8TjDUUF@V4O`eb`f_`u56j?^ckm5z0warxYe2I-P01of1LEIdWB zANyLL#r&aU9neN@HUw!B7UOrx<-?6WXRa?emVuYRJF z`(p~9>BjVBO0ob{<&<=D08Gx^6<)mOS2)wAuX^9YLhWQijb1f)cQn8yGU>91R5qJ8F0WA`X=QH1>wefKfaMnVWaBwbN=u;0&nL1)k_Rm%_mJOE!d8gr_sCBL5az};H(iS<9ovpLLzmA3y21jG zFxej2@@E63q7ZBD^FE4GD4oSJI~A7XXN_gJcu7x!WWZBv`qFIqb_tFCZ5t&cB^96} zT)*RO+YcS=#lRngwlh7jN;(X2=}_r=l=mg*SLr4`ac3!S(|M(7kpo-q;m{j{qo6FUff z+Xp|d{O3hJ^zOj0h@7CtU~d@<*9PrWZ%I$2{lHNV)b_!TI*Mtn!06~Phg}(t<*cJ6 zUv_#`qDQ2Q6D%pD2NG03{%^`8uf&?7}fg%?6-M>1VMk`J4#0oF5;97d_sxz?HvBI(MsS z-?*FweV3bP`j4-rRk$ugs$i+9dB2a)P33M%cV5uGhPjm6># z8C=Jm#?m>(SJiG96U}5xNVWR7@;jg_a1=fa)#pjA{n#&iS%O6<@Ybi2L8ob1 zrY+*G29Diq6qKvBZ?#OAZ~Fu-f2uB0FgVkqnkfYVn+sN06@i6cZ0@HTk*<2=hg3i_ z<86^<_=ewx?047iGqp*~eS63exHF?obq}0EwA>-6?|k3{N*J^}s1FHSyCp2D(QO;p z5R+q$w$v*z6GAwAM&RzO{GLEKae<@D*6Sjqq;?|yQvvL+d^e@ylh)f4JF(3)UdQUR ztMnwD6#>VxS#)QWgkOh^6qdab z3IYe<`-$a1zA3 zTy1c?dk=SJ2Y8te(+&YzB!X_0?nVqVs>JgN!io&xf;l_0NeZFub$$~Ma>$W zYAn=m4xLFN346K;}bg}E3)@Pj%qI>hl{ByL@-)Uz@SDtYr>C*W7CLxZ*yR_ zKOfB0*j@jL3P70;((4Ro+G68b3W3j|nQqRuI{XpNNY7m61zvOmlOz@^@9)i?*8{+C z>9Ym{(L6O7Y=8?y!Vch%GC0bBPVL+Y!fC`wSCELzny)D#l4q>*o>gHia(X|zrC)K( z2{7U*EgM{otjv$yPFnHomFG2_pD+#GI&Ic+#3>o56JnK)4)jT z{W^-zcaWbz&l9XY&=tU=i>qJX!hma`$0esWrN-TkV1#Qop7JWCjsZQ!%b3rd=i2?A z!#?u(^*z6+?+gli*yL3*Q3^3%3(O-+=CRnKE?f4HD&`j1EGS6DAnK59f042AS|D<) zF{p76{1DqYFU9%u#7MT{e!}O2ecklr>@V>Ha!jr?A44173z_fCQxIU^+f`j>sZ|w_ z@&}dAuNqq5%8CBTQvEE}LtdTZBllsYv!_xyM-YZ{QFb?J5b%3305&e9M8#*#Tjm$F zlki&cvjb4(NUt&W`&JHHRddJoW6aK0fVw65LyWCZT-S>lp*=>Ka-?z@_XpX^W04$u z3LYhwlGA`-U<3kvY{AL!T%) zW=${UTKa5ZmxvvoV3mb6?dU#NxEfTF#{cx7W>O+p0BuAz>>9Ee2sKQC%I;MY@@6u9 zPQv*4Z^}=MUUdh$hJ?B?`h0B*ecW*^7aW_&x3l>*!_V=q+~zSnE552EmW=KNBq)Sd zxO~39qCOaH@nf{--2qsAhhdz3p6(eD6+OZZf`a@!JR^lG-KkP>Lgw%sGgtZi6S2DK zGOe#@*qg8X>n<~%1a}0be|rthWM}SV689E0S9uod0Idt!|;=m zyZ*zbzROwF6o~|2h{;iNAydGf03XVT<*bpq#JVJWDz-Ag;HRwamb>kFJV+zXPq!^a zqNppq;yZD;K=iUE*Fp4JhGV;YeuMKOh)>3Q*}>LVk~y<(BHdPd~Cg zK9P@I3A5#6TR(s3f` zjDTxvvf}y>S2~6CvqeG{NJEk_4S?6!zLro##gh_|0ve3Vm6`mdu<>^K*?WG|jLDfgdQC+IbJx z&ZX{BOH^z<=ED=_7%#~9C?4`$+4>T&PltO4PAE=(TbxWhQhnYj1r55zJohV?uNO)V zC(|xAk?oru+A^FSE#Moa#r(|d4Alc>Edl)lv*R{~=25XzEI8}@LxOK@Q7I}gi31rc z?1AN1eM)MGr-3`Km~AI7uW)U5B{af`Hh;{NOl?1dsCM%YtBostBVw>MIm1}KHBWIK z2yP>=SPf$7) z8-=Pi(t}_H!`i})xh3hlvlZd%)N%fFwMU9S^j)ES-=eh*`wmM7Y=0^Fz(M@jkl+}9 z((9-rf8t95rP@hQI~C`M@LlLtw)fExlJ?uQ`{F|D_O~B7Uc34&Vq6Jc;o6y~y>5pN zvjJ)YsA6kHK7Lhczd=>0TOn(H0FywFzB`ElP>UP4ZV)B5X6 zeP#>Nj-4*hnNy9WnS8xa-6M9II$%iOoPviJ|rM4~Dsh;t)Jm%B94>lrw0Y0$&55vwkGhk?Oe8-g8w) zu|5b;n-gi=<>8R6^093QfSn9<|U{w(cT$0gFC^T@hxE)0;0`>tmd zY0W(?{sBtrOqLE{xd7m7V47`imoJ@Baj#RL+C#)gt_GZ{#X3WaED`io1ZN00d68Ny zf)j>Fjr&*a#}`}(EJVhOe=&Kg{Ovz+fG}~42!WYua7_1$@}&qH+2#k_0~*VJ_Es*- z%n~*e`e#XH(*Ri~mMiJq`%FcSc_Sr?W1Cp4fh)G65dS%J@>D{-nE>v#N%{P`q1m4M z3&^j+`;>(zZZEgHclNFW zQ`Qqg^33MmACMZE#Ra*~$!f4cOr30<#hr!4o%224=}sVuTISw2TGCw3N4)d-}kXB~LNA$}$hjfEo+nn3SV4<|Ioeb%8`yW|=^ z%jW!Oy>C1s`kuyHWf>tZiQu?H{lAV+POj-zzNJVxOU@!{!+C-ZRF0_ zt)JO6UXH=bf$TnCCE@xo$h3Y0q8tgk1RM_%C^C=%C400QKasMKu+QHJIf}eK+W~Uq zKgQ_Szv&xWaeb-P1ZV;awaq!6FHnPP#Q)}jK4|ruyv1x#K7Js-n?*j_>$BO)b6aoK z=mIw#S=sdr+rM7=h&ST^1q$NPtLk~WR#$4>=gJ?Dznv}lSnn{@{^E<&CP{n^3$c0t zZA)-^Gy{m|)sW;9VCq|CC^1JOGi( z0zePxIYxC%S9;{ZusmqsEtXQ7iVFO)xq4J)&Gv-nX&8lwwF0zno(f$QvAllhnEb5! zB36BqRL$rI0FGk`E44(J>r)<$oDiSG?ID^lf45#i-)6lRnY<#zvs-y{9* z22bi-T%#Q;yFrij-OXSX0Go7=&Rsl@*Gw!Pb>6@|J3XEW0q#u{6q5v}iM-gLpJq4Y zTa`K7=>)>TEbPa%6K`diZ2`^yn^lDEZyKLupy+h%T0}l-_5D#`UI)V1ton|r+HOI0fYi$^Q+Yo}!Z zJZFFOE%_MZ<<->8@$h>whb5&VHfYNS+MUZzV(>=2+0a2Jp0Vsi?l?OE>Ir* z?||Fp3}N0NqZ!9o?TA)p!@4Dh+JB}aR6d+g!^v!J7HTk(U> zK7D(=BH(ubbpIGwZF2~(lf*m_aKHA8-CxN z+GDZFdS{Sy$7=ne3?7vsmR06tz8}J&$Pq%Qux${r%~+&4;kJ-@<=!i1iZHil_{6D} zp0bTl2KJjCbEr*o*!jPNcwUF=;jHz9oQeTQw%VmIKe+} zQkmjclpk1y=1>wSOzCYiOG~Ims1EB2vbJ;PaQucF;sWKS7Wv8a7o98h&-?X|tH)O_aB%hf9IQ@3IDChEBitrn=kQTZjM3nlHTSnd#-DNqt@!O_0V8dDU`r+}WkhkySE4OFVGmb2>T8Nl@`54I|Hd=DJ?q@})FRM&mL=F_` zU?8{?1weQ36&9<@)(77pPc{PSP{D*Bl>io3CO1{Lha6+-;CScI-j}UG_de_F3OZy* zr!ja6e$>!^gfHbjaGuS=5wfq6H%5M4eYeUeA7AZUq?IPF100n+&7lp#HutTb!%v>h zC~^h_VwTH+@uHo;n9f6HEI>vGKr?=OyHuMFOvUmZckP2*70S`__j2v>+S#f{``~T-?AF zgwB-v@{(Fyi$xT0fCs&jXG9_5{gYUh!VV-y5XKGc3PrW?l{;f8-pGw;DTDn1esD+9 zMyA(AHTs7PfE`oXe2?j)a&t;K7;o7)^q2bYh9eK_dBAvsTMB>4cw=$^<5jr;=sGcX zo|g+SyG;+3q0ht-0upSZKR6KLGqw!fhKF`T2q0|<&HXbj*kkz*g0qk(-*PpgRra=B z>hir}3x!(mt(^jwZ|DiuSXRxN3_L%And>hIr@fap7p~395~+my6Ea;giHr1i*4>Hv zary;V(&MZT_j<0x!V$5DTQs6mt^*9R@>E{K+F^Faz?x}#xx-Ko4$CXesTmaK^u0J{ z{EQ>&vb5F+6$C%GM+AU#wNAdQp#5g7hV%Et0MjnU zysN%#Y`rL)uKTF8cd+_kWrYXB0)Ly^;{MF^)goxp4bTq*og!STt+m)Pop9DyZ#fQQ;5rj#Xo3OuJC-%Hvu-OXQC*K8tB?wuDOSRcg&TlY6E%LNe*}m|s@QXcl zl{ZHnbcaOi-ps(M*>-$9>Aw7CAHUO~ZIu+4v_p(JBV0o)$s@`ElXo^UEUm^GlY%n^ z8xlrBjtqcFw~W`BYkMV`ooXwhXXNtG^V(2(0$!?tpyD11$pfT)YJA{pNZUU^5#p3< z=XgTaC7@H|2wUpEZ2JCt$o>60rH4Q(KxhlQ1v*9x7SO`0Kj$P$&rKl|S*6>m_7!=8K z9alD;tq0dRgjlrA6iVXX**xy_Lx%+@q14-a!^>Qj_121G|2DL8Z7~{_G#Hb1rYN)s zcUem9B(s21Di*+HE9)sbeXD+YAx2jl*FsZQ95&{97}*CYTZ=`d3P~9aoZ5v{)z0{n zFx^a~lKX5|f*s=C^V95-CUafs$#z5Mc<*<-LANH$-rBY-)ig}j8C3ef+MR+G{8=?b zUy@jo0J2H_-m0I3FIIr6fw*}~9RKCXGy8p|DOc~_(1Xtb@kWe6Zs`h2yTk+9wV;{h zB6B)$kJYr&4vAnQwSn^1JyRuDR&Fwo9NtJtgeHq!)MWIy9h%Ynjt#B(j_-WcqOBT088%{?1NbbPEzHaMt|l*jNd$PSfahsPo0nAQgnfm`S3L@{R4F_n2>ihdzX$FA zsLTLybJ#M*4p|*zSe9~FE|MV;yMPR`5SSj#^u`DT2?aa>Ak*cxMqaWo#^>n<@SB!D zhYzOi?-^Z;vgg~U`*-PBd1{}pE!fldw%9+xVu6m3!LtTLTo#K)J!~K!zTt->d%IJr zt3;tD61K6_NWXO#)iB28!~zw8_Qtu5LVyZoYUN@t-UgDwg%IAcV|gifd=qE1<(c!u zrwu2iaMzdxcW{c{A5IZBiQKrWKT}CjF#DP~r(h|M&M1HvLGWZSymI|Or8JC4?pON_fH_j zFjF@!#2?8{IDu9hx04=WkXK3&RV`N|lron58LZ1vrP7=FINRnVHEa z*FCgM>>J(-fDDCiw@w;F?%LhAr8>^a`m^Er+7)Ow)=QsY0Y<|@#OtPoF~mfn)ExT? zb)rH&H%|6KJq?EuefnJx*YYU36+jlN3fqr^Px}e4Vb7yv z1f#W}w$(H^_i8-@L4jqbewd5kZl=88mqOE)1&hAJl9mDQz z?F@y4;s8KM1R}X+=$^SlwBFd09e2sz&+!gQr#x^5NP{lxRF=%8&-jUww?RKicZP^^ z{5s$tw3R#Rd_ZN%{PP#mw{n03pYyh>Hfbo%oU{o#oy)LMFv5{wppy+ACid6F!NV?`lU(ruX(?uldYV+#P+Cw7LJWgM{^BI?U|`G-zjV7?;_}q!#YCO2LwytZB`@r~nKRzI*{dm|mOE^#y%^2b23)`p5f!stA`pe@Z(v!5 ziAa*M(ad&G9BKJMt_y83uoL^P`wfins!3n42DUQarLVOFF?(F7V!J^^fdhtEz@4@g z&`TSR21NRyrFV&n!$@4;B7Lu!%Zoc^}8!7&M zXLllLrbDoERDlziFfj%P3k8*cp5=2>foX*f{n`FpkfFY8t9W?6mS=u?Y!n9~rI1Wc z*Q~|A+WATCOdnpgCKxfb0XkDN)T=!mKtexmgFDOrrr=4x!w@vsm$ZKGhoC`?=?;b6 z&KOkoG{a5=)Ug z{iOx@nQT0>op|ecXKbAs(SEEK?3;5Q7vnSJfE1ttgsH_^nDg`Qlp%vU%Z1R@Igt;% zDq#xqa9Qw(PLDjp4qq$KVeujy02>|QdP}T}{u+9v)Etyr*sZv?1Y?k)&QV25Tt)b1 zP3x2;w~+yHpXR3e;gf~G0;>^MiE9UN)3=Dh3OK1 zMQU&@M4yehk{my>O!;~%$-1pK_@~wuBsr=pxnbm04>ZW1Ol0t5IYyeG%YMo{uq5?a z_!!fUaM~JhK>Os-$w(5Xw7Ut>*(=EjTQ}sqmlYwLm=yX>ymX+;8wZRrm;p*~kR(~| z8}ec4-cxIRG>D=+EfXlw$|v?qRmQAWaWVOs2pUJmce>P}g--!TpvU&YBs5NlW~z)i zeo+qHspL?d&SRTY$G0HqXe9i8L%7H#=?F$)L0z*;E4nB3$f=9(+vA^)WGv1n)R7B2 z00!!}t<75H=9#4;sPKa%p~U=51*$ZbMk)V;I*&Q|U#atZJvh*d&PQ12Z+rZz_hPPG zz0(3Z)ncAgS!3>&G(i+@dg<>bLJZbBdErLO+Y9+Mhwpxjd3^R8+;)A%nl!^Vs^#L& zOOoqJN*D8ZKG!AdzZhT}=pXvqfrE#SpShy+;$MCvWTfQU;BK#16JVmfP(!;J@nfBk zIr#W}o97YzThs`q%xDdlaqdvYoxq%kly8ec#gUgsZPx8KydYMU%Dbf`c2jBd#^xdF z@r9xLBxC^i^!8G-$m6|2sV?N=|QWE^J>A zZ@XsEAFAI|qqOTXjk`_WY}T+tc^gsr-+kTd=bkrI4{Jo$eQvU43-(B^igs{SDEJoB zr1%`uur>JRktn;9NmlP=<*Xkm(s>_!yi!{(nqTwFbPKGyq`14PvP+L`{Ly>g)vc^0 zsY8QHhZz^NkG+4(A5rg;Qos=`lMs<@pJ!vwrtj~dA)}L>U0OjTg@H}BAzD|54H_hO zKXllOuk|Q`dn5WF`)UYUoR^OanFK@MuNq^|@q!^xZgfBsC0KEOTT(~1&)uDr;JHkS;>lhdP!K+;6)p~gbz?YC@Ag% zl`o4E84MkrEy)*jWEXR?r)eFKY?&dC23W&f;s>`M$>rMp3YI>Tqsy&_+MUYOf;j94 zDdM?a{AoVcAun93vrs_}r2`8u3DY~EQb}7hh!UwS#d_9=h@n*YOtrUWna_s(*0@Io z3vf>}aVg&!YMdQrpDyoI-)~M{3M5dSG@Hn7#gU96PFDAAy z#GdBO8+N$>j?Ud>AV%MjE;=W7GH)`My)WCNPF(&HlkQT5G$PUtJ6Tziw&Q?;{ZqA$WNDO zOkDKe+S-sZ4!q)Pfh_jU1JN{}4he}wvCf;p*6HkfjWqn${w>SE<(D1iQF)OzXla=u zTuKOmw(RkEw$+dQ`TXrXc%OlDqQ$zD+tQ6@jV4{N^SvjOCGk4forPgTIM_K;XqE)4 zsmlkgrOpD)MVU!Gj#UrfFz-0KTKU?nM41LgtIvVC6DTt zZw^M>U|(#g4m_!m7mESp7n{af-k~VI6SzCHVtV zwvi4Sb;=p_dpSWUZU|gPkeaB~J@Wgdcc$K)Rs322yR-Cp`dKbsVlO1iA$=y+qChPeDbzaDAYl#{_)@S$Vhjm*L0 zb`7_y=5gU-CXRNFY0f%k?WK!`LGsC-{m^8F^UNx+RQr>AdYG(GZjnBx&f{ zP}j@>8Ic$^d4~PqP*_mDWtar?MDiea*E?%w6WOPbG!jhMW!01&)i5{KDVWV_g?$XI za>X|U^{6xHWJmA2k?}?>(L?j=ou!vrxgZYJ-M|Pyy%Vpv#QL|eb=u5utRMbfXKUrh zOBSURzuZUj(X!RXH_N{3`PvC_2trHy@6FDq`S0B0#zq$iwUD2&uM5#YBAgIlDn_^v zd_;BW)=pWirAY;&Ap#p)*UL^miJcGOkf2OrkKtcFS9!YAg-$4Bz z_TDq9$*l<+wgEQ84pLPVM4I%TSOGym=^Yj6z1M(hVPRM=ly=Tu{Gjq-CM4_$ctVf<>61+=S1?gKd z9#7T~8BgZqF8OnP(yl7>Ch-=rWTmpO2?}=)BmfP|!!UqzbgCLNu{ydr@jB#1!}g+V zft<6Kcg4(tEY$qAcjOETnUCC_mBn{H$txRz2y1%3mL)M|DbAp@X8aXg2A$+5TTh9o z(nb<;Gms?`ili$0-Zu@#HmFR!gsEZI@e)yd@%VUAJA@NKpQXDhA=w$M-SK1*Mx)Go zON9#E#o&Be&ulHqvCwQR*a7{?OH*qgB3u~CJEiH0x9kWE(LKrw6(55Oz=0Vkvoc#g zLGW^j&@z6JgV>a>f9Z9yu%|!gksy*5;3c$h8C z*8bqQ=jNi!_?NiG%dCzrS(^r=?X3ng=(xkUU=wb0nfqw|CT6m-UBpjPrCb1BJ`|B& zfYhI{G3+yn3~7gKJqoMsJuss(RW+nZG>&>uuX%raG-bJFamr$G1A|9`$UUrN%E(Ak zhu|%tOq_~yKtx#}+|F-Z!IYr&^RPHqOOmkmdncvg@%)-umK)CwpD}a*EG#+)Oq;b!8p)nJHAlF5bUd#@FYUU;#6#Y!$>V29;7$0z&||NKd;l}DSzOzu#+up z`9@*@&uf=anD1wwmqDR$10Ol6h_S!+Pck4$>sEI%sPbZ7s_wVZc6{_=9S*{7G+76;KR4P9%DOj{6wi<8}UMcuLX zTmFiAhSMV~W9j;+#>lcSXVzl5@l8RMdcXo(f8j3v2kZ;pm5yO|Exw>b*`@lx{^M{F zZ3(vTI63BOQrbS#1{qsqgb4^QJS?;pXrS3ktowXz7>zK;FE5Db3}3Va8Z}QzG4Q~D zMAzSE<7zWmqei2Rk}bl|cHDGnjK8u24NTSca9_MC0jm1*W|M=A3ZSO`oP3&l64RfNqS!~%w>bt)hP`_RZWR| zH(dM=p2}}evQhvpOP*ebx;x~_&AgTOX2Ltej*{J{E%#X67P(2=wq@*AHPv63wmB$- z-jLw=M<8ag=LyzoKA^O7`Pc>XG+M=?oh5yD^E_E5pI|3ZwhJQ7148HsGPJ8`8@Bt$ zCD#F4%Em&YI6NWb>^)ATZH&Usj$%9R6LCBV1h9Fm^}e?p*jOl7}$=isb)iA{hnDiy2_aflzLP%P@72f!9V_ zo||~$@jakYHn5RB@jQq?oi@PZp#4&0xEGFVzW_`g%y9bio0L3@i$C^J630Gq$LXU2 zEmsUD^(Qgwb4Bej4hwQjPOIDZpHQJQV%%q^ zdkYLNI!a!kq^1w^3m-V1>2OBEXNC1yTfBq}=yGQG>E*e_R?@aeKhsJSLdG%s3u)A! z<>9->HhyTC9HSrab_KDALsDWgY%m#Ao$;014eM2)a;<}K=U>oFAyEUcxT8K!=J&Hs09TWVgUB18gn9l)bT_x*y zahCw{im*jLdL{gRTCnLL$RKes2yHd6cxMJZ6fZ{%-VvWk<*PfX@`!b<%myc+N*g2h zeIH)s@OkjTt8M6@fbGb;^_h%Bx%reV5xKQay-LTt>VotesU05`QfO^aV(y6%<)s(5 z2TN|UTb=(W#ha&)r1b=wM}6DlpVZ%9>`YQMP+o2lI{&0V{<0WXnSg3`I-7OAJ*wwp@RxE#h~b{h#IjPv5!g4`zo+ zHv88h{OL=7c^r_st8TzvFFBW9_`47P+rWLqf$GOzRrdRDU;4{pkiXn?Xg?ydtmvmM z_@~MIeFnY&w4zV->heDh>epAgs}Jmbbv*a~z0e&2vm^de_1ypU>?{GHo1C}2{lAay z$I?0N-;Y51q#pd+sr!8{{<_d%v`2M$ZA|3(EqmI(t=e50op%ZtMJxnmOJxssS<&<@yxpuj7I3P>VC4%}OzB2o66sIH@6xdBpT{c)NU_KUhSThz}9yU$3mL|odvcPt~(i#`Xb@NI+;2C_7mw)k;>mYV#a;_YQkA$6nw zWymj7?!UUt_5z{D9$1(n(Ua{IYo6V7X(4_>!~9;$?}iSDi@O2D^m7v3xA`(BOTOf$ zeKZKA0a(B3u-7X(QQLRV)uUMdZvB3~^3N@Ch0=#3J&OMsHBqc)mefsW9_&@J*)$Ey z{T;tw5%(!vn>-;|4$%u??sMCZ2wAY#mihSu|K%ZfPs;9&7KHWX8`yPghEHnL2gv_n z4ygAv4MiS8h}>8G2Q<2~vLMK6w3okb`Is(mz@;vluYx}mh6GK?@`p7Q{+Bf}K6#1ZC5NzbJ^yQ?*zp4rP#==YmH1@f;#8*?wo@@jhTqgs6ObN=7A0D)p> zZqYWmY`@+7&lgt(4u(;GIvEQN9XomNcc^z4b+wOZUyCQeyZ;x%WGd}bxI6mcYYKGW z`KI3$!7S}14+EZZeN;ir?>GNCke_#2FWnVBt72IQIQciDe-qZLTYHRf%@k;?i2Cc_ zZvN+sq%#yvl#c=wPX8+`e_Zwd=Y^&CpbrRf2jY7ME}f3@u8nd_?|Zn^$R6_MhX2QR zK6_Jen8Z4(vf-29kg_?`_Q_lU-T>>tbo1&q-Vb9{I8-1w+)kVsHaF@D!jklPB3mm0 zH}hY+%M@EA=0S;ZD&&NsS+TX#19PmHiJorXqgR$4LTG(R)0Rfr?Zi66ZbXjBb_R?0 zbE&nViiGUhzi#WF`e|IEeCQ<1(F5mnm8)i5xh@?undn8}tJxSvFLrYB)(^T2Gn4tm z=nuWo34gpPG0H^HbER1+Wuz96g}y`qgGLiAkvI1S23yQK_)d$nvI9jyqeU;Qu! z%=7GLhSnli?beSGWl&xQVSRczy;E~?2AD7vXU@T%aV1en2x0m|=fJ?rADiq-&3A1b zlF#|@mq29FnSJU8u)JOv8kP*Kbcz(AA<>f##Qsl#$`I=m{6YoRrURi@C5=8syRCTu zhC|{Ii*>Y#BAR%t0q>~>77z29xgY@#IN#Hul!g_%IwO&Zfv>JzLUvsSi00;$7!GC& zRlIbGXYbnw1>4WO8N1D!W3}#AOseNfj>Pg?)G^ppPd(5RFre{g|Fbb_<=D|B%j<^` zB5Evyry})Oq}*HXTDqr9enC0aN3%D)R@yd9#S`Z}Y{4{kFHd3-C~XKdJDPjDSi!QQ z&Us@t-nq{_ZtqmAb)}JI6+Us=2!ZbfDiP08SB?~(qZj&Fos$lWu7 z(;1c@m-@fC^W6c8b1m&&!C70(Jk1oe-V{qCa3}*I`BF_Ad(asJzza^B3^>VTXcoMkYD%4e||ug zJ5ak2IHvM7;{DQp796(R9XJ#Y!u2dHFvk**&HeVVbihVk5}xZ>KZB_UNtUpD?D9LL z-Qq>e)MUg_-C@`o(r{h~-9DfyvHW$J6mUXOGTZrHY$9|y=+MI(OG|NIy0|NXG`r$4hI{d80T7QO?KW=Q;iP zTtNq$g;D--L{A=jdBSX)R_yuk zf?cX@8G4=@MkrSt(q(Mcf+qxIFDEd%5)QK*CK}h5UKCD_JH7Gx5n1ELHo9Vb@_v* z7I?a%HgO%sx1`bThm3Atu7$I36$j3j4G2x;NKVfVA!Ro2o`GRsD?x%d5!DySO0{|+ z)3|eQVP0APIpYMgsVK|BMov~y>L4Y^mMC*hP!9 z?}<14*?lyuilW=amVI9xTD$HKdJn_cIiWgsl{36(qvaAS!YC0p_9=zN`mI9AOKC_f zj>IwnA-xwkg~`duNj7PRfh9ftsi|FZS@o+OCa++jlHsEqO0UFz%Q(9_dc?Z`yD%_? z@47?a`*dUgN59-6VBi>QpH0XANOomwv&(f7%3BALsRJXp?Am7s|GW_%gZLIejAOMi zfIR`GW|kU};gG{rJvuHo{F&KfAuQ2eem`Q^c8h7_K0U`%=-W;zp@CdfQ-9RZv+~7s z3(UNNz#-mh)45{9c2BKY2hSA#OScTWnm9?29s`@#o@zJ&kGhQT?Yw>te9nCNhXuX? zOH@E3C+vQfH?A9`_*UNNG0x=d!^XLP)sx=+wzVJbC10oBRm{QlJM(+uy zY=JsM!CNRJr(wu<81ECo@m;4H=_}>^5~6cD0C>1O3`|IEF~7$yy=H;tc43KR+rPO; zV=;+Uk{JI;NyA8<9;UBu#Jz6&w|!}b0+DNjLIXAy;4Ydo(XD<1Uh`fgzW4K+8=Mbj zEXE_Gy%=rR;%O#XhYg(TZWZ*U>!`hN8RQS?_s*uF@kY%8fij1rI;%lQK!4P=RawVV zyvaF4tLOMabmOByb{$t7I6sE{W9nxe}^3N^%XDi zTWK0k@K`uH?757xT-0@FWx~Wc-L?u3oic#Uhl_4L-&;6YVkL(*{QyVPWqE1af;g`< zVNp6}$P-&4y8aRd>_EFwq<*ASvHUVAYhQatRI*Mg%W2h&Yrd2XY_ah5DlHo%{2``k z@0S&q-rlgbnIHkB0B;g;qh4L-7cICy>?qlvW6tLA#g6BMSX(~9ViG;HhC)EVVZ<^i zU{*}T#a0Sm7VpxQ);;ye*T8eT&ZMAO(PN@HVCyxcj>AWe6`XJrD@H9E7)$W>fk~ES~MB(+H8~w?}&45nbI-*ZXDBS4Rp>3AyusT zvZHhIkzCfOttrYJ3t(kWp;8WpWx(}4wz@TKef*@)xGOp(xGoilZUw#Wv=lzYvI{;# zXO`lB`OVTYuc*cP5%X<@l)(IwdUqjt`b34=74EX3^4hf&;R{3S>Y?yE2~V&f(hbMv zBZWx8LD^o~+w+fY4E>o%VSaePZbPjsp~j~TpHY!42ELrE#XtD)48HOWAzWV5!r$Bw z>dyl0$40hsNO29Cp!CMUmQlKO;?I+D_Db-M6aGQi$QI^_?;wwSu81diRtFmZV{K*9 zg^qn2e^_Cp(ZXB18or&lJKQU2z`6p8^LE4Lp~sVMtTtj(qb(4X7ttHx? z>26*WVc2@3S;~#2Re}VQI3j9$^K1=ffmp9?fzZhtnl_=gLlREcslK^LiEc>a&F{^K zA`mN`<8$u?7GJ?vaM$$a%Tz$}E^U8Kat*SZDPx@X$@}4eYX^OI$xk~R5PJG&)Dwh! zr)`l(E9Jtlc-15@Px!fD({i~Yo)X>lsoQKHc20sTpB&1%^Zod`$x?lSlLR2gOts28cA1g&y_{00s;%VjfFYg zsXJ>pa%-$U7Lm8FnjPNFnKIH2A;xv9^-V97u=>-o>`DvZpo^EL@}B5G)rF}jo&m;7 zyG5+w>js4WkfKd_En8nsXihk#sy=)ZjT)j2*&4_3kv0$2S6bNgCNYs*H$!FfH4%y> z=N4xRS03Z)BK4o6^@&Q3@s@?<8&eF0Z;7|VkB!mh1f4X9?+jsJcF$Bg-9(xX;B0ut zV5#l4OU%m2DbAg+xcEgy=_bMMmb;TfjJeE&`$jk|XTB{nQ=tSUW;y5jvd2CtBGcIq=54r5>V!F>v>Dl2S4M;1Z6uiv1bV_-^&C>g+^~v;}_?H7UQc zu3dY~ZFTKd%)N|k)~r-pU{r1`4MgE;R3hl_mW z;?1~MZIHD>-K_{*A2&H$)>7gtFvmBk&?-3qa|6~Zj=$hd@b0Slo~C%m7E5mDDO z!f(p#%Tk=*#g_H8m#=9>Cmk03eN;X@=f0aWCtZV|!ufN&@Pl(z?bizWap+GyY5o#e zoAm~1EPVdG`xET^Kv_s2Z}ny47_%wQwT493o06cthE@gOF}e)HaGD;i_+^KR(fXWm zK-Eo*?7HK=%c1i?28O1J%s#d2Y;2ftBIEhmmIpF(EcqJJ>iLRD#aX66u0q!m%>(CY za)XR(9_T9yG%yWtMr`6cHwyb%SOr&iP4_olB9;xg8|p(_8J#8$6tr|UW115@whipw zzmPGSTsgZCJiz?id>3@<^Mee%(g49u<#|=&$QABk*RGX-$!l8S{V!d$zX1jGa4kVX zkUj#k^nBdVhvIab*U@i%CW-adQf1An_qHRBPd1&g~mpafw7MgN5u#>KE3*0Te5~dCpSXZdEM92+Cu6 zH>>TvJJ#yta*LFKBya-85=ox+#X8(2Upg17h7sCsuImpbJ?y$nvD&{x9kq92or}7$ z{D7OfgRXEzYd+yDo%zv;59vqF;%}*DOdUq>B3JC~fTdTDJzh5CY^duo7_Q}3(iB2k z8zd%q>e1((^~=gpA=quh(Ziu@b=|9WYFJ&z(Nla_t`AgD0Q77viyO7O)j|nr5qidc z)p3;Uq6_P+OZG9hw|J`N1Rp+KEzZ42fsRf7uIYBGc;BDjbxCB{(Z%;(uf?!q)E#9B zMAd4k=jV{(kk&G~j!11u>3(BNB6pYY4=z~}e*aE@)G0DqPO;0_s85$zs1{l%AD{A61F8&}zLpz9rB~F#uftU`!LN0tl8`-A z{NUgro9Y?u&?nVq=0H96hn7PL>mPa@^5YAaFKs)>IN;`AhL(c)ZI{Q*(FpSkVW)JwJN^ zm=YQoT}-Rj#}4()k|oeqIp1(B+GJa6u(+cgq>0pP7*LN=-qLb@39iI%6LIwr;(Sk% zoC7$>nTAIAt7~g$f2=f*Jk3%ztKFc-aQq^snaA?;kolWcpWzSQ-dEbB@?D&|HDL>t z%?TUBIMn4fCCEmIbQ!7DCITMrRVG)uKy3Q(0ZQ6u{zE9i zy|b+i&{Cr;+T515N>fMNRaPcX5l`Z94jQ`lGZY&e>CsZ|3z0~x!pd(WH`ZQ=40|HA zTOc%26VsY@e&K~1V`ned8HUZ2S=wHq`0kV@)qT$b*eph>@BKx{GAV!)^+hvz$@a1W zu#$~WO5xo#g}Yb))8lZS?C7Y}oh`9{A9nqo&5nrcxBS7BcZ=|5g$<@!-qLsPqp%x>xlB$93IKFFw$K;Mh!if z8N?v253v+8xw4B-L1f5L;p<8ywr~SH>!52!=ysQhR2T4Mm=CEvAvv`1X{54TYk`uk zcNkVOZ=}PezL*D(xRbUG>Otf5(6Suy*V2N})oK0ihdY%RGAyu{krpPxU+)*mLU+k_ z4cMU0P%&3)%+FS0GU7`;HqDmvIH5PEUs-i&x=NK;Oon?U=CMe++8V7C#wObgx6eYJ z1^g;N`^m|N_Wa)jNZ*o_*#w8%59)#g6KEp{7jw4c;Yc{HSf#y{*xxB;Re_fdFFSeU zlaftdNl#!x)%@#78tBqvSA4fd;Swd*=D5Usxkn8T9+?V>npGVbUhBt_UVJz*jH=A8 z#gq~< z1CJ}gKI*&FPpz2L_MVa@8=4rVX8_%VgL zH2<`7?}M^lQ(uLgx?X2u>lpB1#D7LjJl&82?T|IcI9E`IJz{}!nUOifU|n5r>LNf6 zTY#Z52J#f%bX<^}Wx1OgUkqY^eB+LjN~{eRJMXPA*cFWyNRr6wlLt8tzqw<7Bt#H~=)r()IB;^YF5gia?Fs#SYb176B^yd zRWZ?N_mz*8^0Fx?e@UNi<^;NVH|f#6!w8$=#u#$KSUa6AcEml}tC^BY|N0O5l9V{Jo`Utb+C4;qQ-uZ{0M-mG|LlV5VqsuoU80dqZsOD)NMi@9QR)A>2 zn!1JvK}^QznZ^6r&S5xdkxau;tFo2G51sj_MQuj+-shpd*|ESGJZ!g-DC3o> zp$DyO@4uJC=4iVG>S~^Yx{T6}DXYqv(*^TAtWVHYxPlv=AgOKF6HQwu>CiqfEu5mG zDREhC{zESx1zJOMw9Ib0wqYXQl82cU{$&V6;yvx;X%IFQlg6&gOR1#kH9;PYTnyU+zrh2BInn0u<$P~TwUa`vZ)D{8O zq1k}o~Bfpe<1 zXMPQ{C0DoInw62jhCmV`EMr&N^U4P4(E2V$`iOWf*U?+!VXtLY;6E&c5QqrhK^e^p z@hN=|gcf`LaFj{?=I8$G8`7m+<+I(Y2W0MZ{tE(4gzi%oxdn=IeGVvdK631^9AsLS z?5%9KX1t}&AQi=U3^Z_e_?~XJ6p?~nD?*HmV`*>H!r`)zekQ@f!h{ibcRjm<@fsN} z!yYC<=ZNNm{w!XZebTz91p?=IgpN(Fa9S)Kc0$ywgfU&GPDmJ!K6+>=B-wkY1S3Ni&$f=sp-yHhn8;DoClPUD%C~3nT zf!4Tr4^Ke<->(-hp%lY}Z7tIe^rbnoc;i_!50ZyWiznI0jI$#0OZ({ya6?o^PMy%1 zHyGM2NIOWqoVjxUs^r$n>lAIhwI$FfIcsTlmb=iusD$B+8B+a&9F&5OL3-%>mtTvhiB8?xUw5!Pn5_$h^22Ew~gMi+%(pVO}j5ggk9*fN@` z*#ZqW^9n&a2ZcblFXfkhQI>BgkO`qXwgnvRU^h%W)OpaYy}Ip)#1;o(4F}wV7NiR_ zF4Dkr#t)o}a%0(k{?4L5)ep7a3YY?2Hd-DEKUovpKd1uDs8a1;5-8G~wykPq9|2YJ zHCk-*)7~+V$ZKqdx2{08s-tXbD3;ZTb7eZI8hG`L^!*^-wm#(4;M=VyZJ2U#l9S|} zzJ7IXL~cK+RgTVEHyyY+W+aaLda%iF7WpW8e7sibRA_BCG z6NGd`EPLN-dVhc#FIVA8HFA+X&(UldR0%V+nBF_Udk@UlV(#XQ&POe*P752ASQ;mCFRKAOWOh`#9R$7HSE3+nv6f4o9pn(Y$qGb_HzT^g zc7LbNv@zoFlb4ijvMQ*iq&*Vzg-*f-#CE1KeAY^$W7BDv#7Jzfd#r-F24Q%q?xCh%QAxe$-?_gJuHet;sk-T0 z7rv}!wr_6U*;G_pMFjxk-H58G9&#PhvZ2&!2&y64B>oi{)eJfY_BvzZmu}5GP^WL? z@Iy2hWy3zLeov&p=^j)$qcCwWyA~mgWMA^R8w)-(ui<5x3;e>p zrf^S3!t8qvP9HqO?*=Q3VP_V zlg;3q?5_Hx7KDHrkTtx!r4GRBa znb&lEwf2Se6t;02g($q(6?zQ0VMzz?k}wI=)w3&X41TkMzEUt_Kg;SwQ4t{ zpu@JGN&O8Psn7Sdz?_Kmn=bc0sM4dKBSJ}T8~_KEuDe`Z*;;PDVF=E&gA1t}u&WaW zv#Do=gbS5@4?3MTXK{z~ly6rxEz&@~J@?EfVfT#@Y$0$zLwA&=^`-LRI*tU znw`vqV>^9OU5ZldQwbKXFQMF$3r|_c{Ad$ed(aHJ296zd;}a)jo{lNy2ZiBp8hy;2 z4P#!_vtLvT@94?M<_V|=P2qq$?>H~6rEW6v&Zl6!O~;sKWeI*+APeN+_2WB--)+z) zoFXUHQf)UCB^H=-5KI2EvvWn3wp*WX1S2Fb>2VBe*g?(e+9dEnc=Uj1zCbq1{)tm^ zEr)5^X##6~{lj|u*?!SiPTyV6(kx$~aTNw-&>+0>r4&d)#_=&wp)#eCNQKKi7LiP+s54A8G9qwyIsXiZ{1+`~N|B!E z^{jmQ3Fp$iBn!?Af06EvF!RWajeE$#k}OcSPKQY(VxnNHVqKQLS>o5746Wqz7X}|R za6kO?(vD`Lhe1~fj;$cp6!gSU%Q(o<6cE&;ynN%=&w1;f*Ut`A7CbuT>I;^#7s#cOO>gmpmz zQp~%-GX}`$g_?OgjY^%xb6x+AfYVx(QG`|@l~m#ROSD{HJF; z6@yd0SnrBO1V{}ZgGtrlySIW`F~BKoDB;bgyPODWuCp6@S9H|N7Jo$i{tJrDl)q2r z6gae1rY;>EGMmjNX&OCxQ`80#%HF-y4+oe%LE3KtMsVY6V07qaei*h4>>2ck_@jK! zeT(T4@R{BJs)RqIDOcC8Ob-3s<$-Hi=2B>j;pC!0+cH<7iTL( z-asEjmfS7;;bTcX zB11lk)xbarrCMT7uWA^Z=aLEd5tl0+=jjb#cq2e6uxU`!Fi=0E04pS83ymD<#d6?= z8(AW|kMt!SBqz#y%_8$SZF(5O(7XZyeKgQD2EYj0=>98IV&lo-ma)ohb?78$Uc@%^#ld>ML`) z-zH<{f=4vU5_h3W6!1Thg~nhOCmjP>)E3{g(>PUZ_07`RTY#qJdu95)R1bCLBx9mYhMG(#1nL2Dc)6AN;OeCRJ_RW!97K<+eB^w% zf3n^HR`ISEvV3+L!&tuK{GWr*{9zY*SRbI~6hV?iVB{<&vb z`XX&QssuE6$XL>gW~HQ|KWU?exIMogf6>K4;7|0VvsaUj?GRKD^(MB+RZZ@a5S|h~ zB+?oX*MfjE%t5I}5CpyHQ{+f!9mDQ?RS!g-lh;gIWME2&B%jSTUjq|8Z_WC3kdD%; zQdD;fGzgJ{b`hx~L}VUZ$4|mrE8zKF%>`z@mEUpAyQinVQwAU8zfSpCjP@JKmOB)* zu>0;zeE%;YD^O1#K&Jl%WQc;MGHxHe3>`A9t(@63>Q-Ct7hEXk%9WNJ1ITb;kW94CM*Ie&<=P^b(MQk&XnrU zvT^pa?oI}M^&p_J_v^d;Oi@A$1holv?2OE0bE4BzVA#<6M%iT0Nw@`@TDECbQ%7*? zy93)p;Kn*A2ks=Yi!Xi`K&u&@5#8TL{X0aScBI3p%m zc7Rh!+$^$uAx7sstVa>l>jwiFkcs5cO94l#mgJ0eHuF3uE>z{z^84`}nl5833Z>tf zcu?E?J-cKvybtqZJN!n?E!Nsi^SFO5&FSHv!RO-e%{!>6af`G?ZjFF0Zl&DDw0&Z(F3x9dB?jHyQCP*q~hn3u0iV}w}@n& z$`YyT?sXWrJpviIfVGPUhUuF#y{+X|L5z~ zelr?>y!f}c@fR25yFK~;iLgNK?L%YYUA8st>cW#j>oCAj4I!VKcN_(J;n6u&ImdGW z8r??j5GTov4`ex^^;Q9=DqV2Qz0?ut)9Qf3RprTTs$zNoHj*v>&0)@oFuCM_`xs0N}% zYkqv~Uzexw%9`6x0^8^(?PzsvM@4N(AChH4&MaA(o9Meuf|G#by7u*2{ISCQk5KZD zPTvVQxn}8@?B5k%TLJF3xl!2i)~eJ6<;Ri=!$;QHyw0r3gJZ7MYUT<#c>+8ZDpGvx z>5aB}4c+UW9OC+Nf1{TYtt7q}q-oi4u@JUGEHt>ow=vO?CXdebTmk(T<#~~x?~`)! zN~PzD&^~SholT%xR8`uPbWz-8Aq>p~njy^fI1rnraRyV(Eq~mo|6%GuX9RMZ92`FP z*9zwWkPqP%89Mb^LD=muy`bpokPBuSbb!6%K$tOk4x+dh*z)%^Qc)8)g!?Sxb=`Wj zOadL+Yc7a8Gcunf%@rvblH*`KA%K?9Ho%e_v@&$A4fUFv#*0e0*j1-Y&zad9~M~VSRl( zq;TVB=8g}f$IHotU_g-w^1ykC>WB!};`@w=YN=-lAY5(;DnveoAcx!;>UbpPj{zct znq}FCsf{I0o+=1PNT1^zc3aD{n+%Vf!kU}7uYP6k(KbX{cBDn>9bFrA(Ez7D{NmXE zQP_TbYP6yolyXFIKv4;`)<+Ai>fUq31c{ zM&^g-Cr*1!2;dJnu)o2IiR7kl92~h`=lEk{}4#o=E96Y-f^DYqkG_ z@%$x(@^jB$jok@vUf=&WZ3n6q0CVHx>d&fNb*scJ=RnZ(-z6+I41}@c$?P!~YUzbT9(fMkevmi1Ok6n5;`z->7@F<9qoG`e zD8_)E3qoy|v}a~(DgKAu@rTzobs&cuZtefO-UdoxwRlb*6rcMYbPE4*Vp6YbSBXrH zu5kGf*tkuyvlx9}btDLs_Sgo^ zpFtmVXcf}9!+)D-H>fZ(ZxUx$Ki&K0hG$Qi&0!|-;uSAT*~eFJhA?;4rGhp%yaasU zHQ)NpVFBZP`vEsE#w`6sBYP|t!5go(`RxNu3c{3n_nwrK(>-}QpzN?|-TB?)rh#WQ zOh!_ipebb0RWw2`g;?BG?=tM2h>W*r(aF5_`0BkR2TU6uEQEQnD=9JA0L2@a6Mo<* z-65YN;WgFKmtpQVF2meloG^EgY^_(gU#ex&>ii89hK39TvwiNds97_2PNkaCb**Mu z1gBQKa2eXbxk7{Y{+PTxgPo+))ZYbz5-&Z$g2R{Z$2m8Nb$}9ZuG|`(_W2ZV5`kGe z7&k)53U}r%9(f-s{z#0mgR^^1qIHxg>+n5&wucV9+_m#})nU)}LH<^q-Iov^Cr0^y z-ro7pG43%6J^6qaK|^=tsFLKWL9b;;<@>b~w81mA9DDIQ!o6bzvy6DZ*Gh5e;}vH0 z-Rt1=rv9M$;;y#()B~^Ia){7|NV*%u66x(y1|j=8Iyy9qjJY`x41Cvu8ddjE9%;Gh z=_x5})qmksa*o0&3EzvmQ;tm!l<_JnzY0xE3@I$MPw{JTm3?<|pS!%%^bk_zo^G{T zO@P8N0pAO|Q;y3jF8Blm9d>iScIM#0gF*Cc+4er-Y}nvji|j>(B0k>-;#l**JL*R0IWMO3>BTwb<6YE)nA z`f)D*?0Co1gtYPng{RFIuI~B$@3Y;WZT(39ymDri#K)Kf7f=#`8{ooBx$Y|U9dP0E`orftGT>rHx&AS=EV$VH`u)p0 z^5pNRG`yq!xfQ-Imh9O(2a1oP4Pt$oS~)ooHtiyH_$yM2VqC}1?*94b^YUm?`GQZM zdC4eB*-Q$lRH;wwiPKsq#I{SVf}gRMgdg4Tm2k%$q z$HQJkibUAmU@0^b{7;+S}m(*KPQ~VoBUdNG&ApJ zYfr0bCYPC-DLmkXOs=X;u;LvbZiN3cbshDmv+spWvvWF{vj^$=*t3RhKN+TW^xoxt zHD^Cl);nnZ`sAEk%ctv>v%5ZV(VJ)At+6{0W3EGr&9jah)@HvSC>=Dx&z2QAA#~<+ z&ME5d$;>7L-)Gs%>>hpW1;hIr-D0d|)P@l)dRgsM?a!>FltV@D3vv~DzfReG>aus* zt%c3n)4rmOc6aPqIL>0O>TL*r2|s<7rvy4S!KkXDV8@evu;*;{?f7H0v0o}u$DS+P z_P^EEP6)`W%q~xIBd0HI>6-M%ASs^)q?;pU;lL?b~Qh@SLg4)K?p&Y`s}`D)us# z5zgIi7hiNZo~BzLIgzDk-+9*XTkf+3CJ%+i)2g%+?=Iu6g*)h2o;Pm}bn=ETG21gS zoMH-0y=EBt60*;CRqRwjBXTq|r|ucf;70hzuXnWq4&rN;?~got@Z^j)#Tne2d%A0E z?~TYpxF51xCs&Q6@F9x~_|TOCi=bbgIrc#||pm4ZdxBa6g^(s@o|p<%vANzf!sqTbNhDnxO-#Ig=K`|DhKW6CiFp`(gm{iNY3&mS_mSMo z{ZHd6NZ0x*{p?>qrZ77}^0}vyk0zetB~ggspwB5Qvfc>^y%3BLUfmT0F?*(;)|N8q zupk}(^~)|X8u#E{YAt_*^PSF6^f^MwNWRSk!3 zz(La2#;;6_EO$73>OI{$wM}3a-um|JbZxg-mS~zW4nFtVrUT=-z1jYDwTpcuGuxJh zfmcK&NNkH4mLk(vZ^XY)b(ejQm+tTzI)Qm>sVPDee;6k3dUgZRd_!XGjp<_9r?1!x zhsC~G4(c~n)^#ef3`S_*D>Oc*UJVQRy6f)qOUFnm4Re8S{NwWdJY071r&yoVT&^p+ z6t%`jZxL7E)_~~Gj*LA{24A@Ib!Q7q7{?}rc=#y!4hVnzD8njz zZED;|pSR&tp~8Ywk=R-X*2p8y%42-ClncXcOnAnLf%fFN%pnII`lRyv_swF%dU^P( zPJa!b9-7d17L9AB7NX9)oFna|6<^(xTv<#Reo-hB@zPBT@pZQPToL|px!;c5zq|Er z7`ycLBswF}`#`Yxk|u4euz^NiTP+Gvnb{4yei4zg>(#4QwzVtGZ!ydstlA5`t8uE6 zc#$E`Qx3!40}drKQ}fON*mQaNy~13;oIX>a-YakpT(I4tzah*>z9WBNFFzmojw167 zk;CLWw<-1>*bBhZhqEsjpS;}r>AybEOQUQZXZFUr|yy;E~gGxhHHCV zRqm+7JRjsEJdM5o`rv$?g4`4Ri^naIjQJ05GmGE5FglPX?oo(%oY$Y*TYfTP+Vs(! zu$ugGHuuHO{EhVv<0VH6m@Uj@=xME=$h${(aUwN!@ zl(D{PznUdpLoIKR_+p7V&;-m+SW)NXmVB)~EQN3Gn>^CRth9PzKqHih5?N8%uUk-+N;H;m=e|_n6}^8gEob|4 z_Xr2VWA{>ltlc2hNyPcx$DhmYxox&6D;wOc?0#J-TIt^M;&Y`JHacWAM_nO_6HC~$A=iBl$bw6|ORLo=>tRgn@wgsmo$CS#HNJT`PtNeh-%a#fM zG#73pl*y5QOD#*qJpa|?6_tVq+u6lFFGSOD)Ct!ABu2)ec`S96h+F=A+X^Hk<8vn4sJ#OwetA zQ>{LM2yXdp|4zL3ixo+F7&{{n_gCSu^s`61Bvb-B5VO-d8rweAQ0O&S-{~w@DN0~S z6q;vdM~oa6u5~2mAMNgao*5;2_@&j2)&IlUS4TzleQyh*h;$<>i7QUcN`!yquE z2+}nm4Ij+C2>L^9Y5+l)@fOXxRBldPeKl{pr29Z1KKRLJJWOTRZa(iJ5&IbdkY-sMl>Ur_3< zPy@Fc8l^wEwU?IHI)K;AydwD7OXqIlby6KD^WkwVoN5XbYtxsRsBdSYR)uA#y1dOb2T~ zDv9wEa-H~lNb=Ejci2I9T&DHc9s4`55+%_F#7p2PsO|d4H-#uDsy}gJIQkfzqfPv^ zY6;bL4;ostHZ$eO(KeO+#H02}tYe*l8|$Qyt;N?w2P3}GDdigNjgoyH2nSD*)GO&5 zy`hB~vDO25AKyB9h&%#J=3Szn==#U^xRKcYe93r37y#n+@Y5*2LvpPHo`%IjBn$}J z^{!YRIbJ{nW0E~1AO8epFpX0`{4pqle5PB)4v0*gL|Y zhCZ8W@yN6wju<=B%GX|T`i~9I`uir%+wMIY9mfqChp@4F#35lxb^(%&EVva(n5;0gE=I>C#7Z0bwCBgZh+B5OafHs7 zk&_|kb}|@jhMKgHd#dDep0Vdu!L8}DyJtBc>YeHlubWTXf)=8aX|-7oX3dUclvW08 z<@$U(5NHM|l#Mctn;+5vc{g!&#dx^^9eTI+pjB4RPNJo4etU@-NkxzEh1FV zq+`l0%nxO->7q)3t0lk78hg<%N-_S6no9|j4QYSA$72(Vjy&_uxM>q6y&{oU+W}{- zF7ev+i4OsK?=20*ZGBU3KP-~r8wooV&qp8{27G}7xgv}23lEQ{jpo|w`O@UF6*kfq z>tn}~n&6a5I+u$!`gApY#8i$WQ>6QWlL3e z@~0cWeh@(A+>|a^Uuo*r5x5l=w)Q7_Ae8$;mU`>ly9Qo0o-6b#Hw$Op~==LQNZPZJRSy zCz0!9$*yqc=y@&+8eR_)wcm=uQn>pQ>m0?KS4;oPptM8q*2Fg1_*6@qD$5WjAXs97 zVPbJo$$&uBgapwjX2<{|w_BucO#TFbeVb)enGV7F&LZWAQhhwFfSC$FKh(3@T7H~L zAo%iV2(G=36YZCd)!1>=DmNz*_<}fLGzZ~4Mc9CMM~p5jDd|f+xjq0ea$Vsz`$HjA z&Ks$v9qiJ$ea^e`GABByl1sLP`fS-~+oHlT(?}i(O|D|}(>YjFON+SrNW&Up)~Ie)x)+K7dey@`ERB zNrNG)5a*I|$0dh#Ws&3{4FB#g&$d4>KXuCG+BNiDI!HVVXK2gexwuCjCZRQ=O&?z?i6P&4hd* z4#ydYw4Q4z+fnOK8(&=LHg>HW^n%a#%#e)llETuZcHH^X0hacTX-<}FNlXKyBH{@kz zs@%vGX%-6WAdg$M_S_}xc)-YvYo^wW?I?csWJWg=kvLAi%7on22>;+(lh_KWIx(-X zPdZ9|oNTY@2AS0eOlp~%Y8NB873LC@a zhL;#*Vz7&nrh?wN83sOP%XFz9$9ZMCOVs+XfY*7&3QKU-O3GrsjzsuA*FhEe(YDYR zagdM6paHzr?#;EW@&l|b0ClJ4buufo2mvC|ROi5h^9v+;Bu8H3;XDH3OOyy9Z;TWa z{E6G*^valOfS9m6~IL7;BI%FNgj zC$i9))%@3saFJy}U9%sWpo=_Na%3@oxe3?Qp}9@r6DYtS$txrg{{(WI;c{?uNZ-!D zG9O9A>A}`JcF%@4Y$|Km9$nBadiaFyX=yv0yf4_yUTPy4miy7;Lu9Vkj1kXUK6{F7 zlw|xP3Kd6KRl2Zo&a|!*yXmv9sJ4ST1AV=G`}+l7VE(hV)A3mZ#}wn@g{{vF&>`cf z*u{F98x|mfm6F0+NxO)Rj!6g&H`#xYoLQjIukvoxYIsUuDJ3k@~ z5!YG23_HFV#;PPrXtu+JAel1!m}iQ+0`Lzva?}N%fQ%jti11O|hp%nAf4c;ko5^&6 z;=s0;q?;1le+#&QAR@tr+xox`vIwmb*)0Chr1=DhdtEt~{pw=?QrYh9AhInM zWS3RAvE?w|-350gs@Bk`Nfl9~{`6Q;?elYdZHO|T;YpA-+%U+hKs5E?>lEZ7vigK% zyjT%fwZ(_9V*TwO9}`hf!-q76}Kq=7GiQU|RXW3c*4w@Gz>zSGm zPCJNN0vTkPcHXh~Sft;AZSnaC*@VBjeuzgy8MTW}#Q z(!Ih~7GPBdVapfq81=adu?qp?GM!e;g|sgTHu{A|GtJq+x4IhJyKxoNEWj^5lIL;lsUiVzuYGe2Q zdr5gkbPlr&#{B)D;Bl)K7aN@DW$!W`qQ zNkbgK`5}XcYrNrlF~Z$uGJ}NK6%gccs5vOMc4eE*1{RDEIV5b(Kut4`<1wQ%aCtMNfe*KA)UvINLeo{ zNo@*bSGkbAJp42DOd=~Ualq9By6c~4v(YegKlkZlOGL?N-$pXULKKR=pv`S;6S&b@ zzJtv;xc#wljTv0BP{N1gq8>B|uH*o{52oK|02YSzwBRpns~2(OUI);k;G z3&`?SYlo!q5jNpx&F5KB&vw(7GD!nhQ%igH%xRKLPEr~)peeYlDN)3kurkiU{?Fx; zHGQY5JC}#yKfg?`l7V~->ixm+4~}0K-cqu@<76}Z*O{Z5am2TOXzJ^P=W5k8nf}n~ zpQuug>%a`-p@?8A7TYXB7$6LGdFmQvc2{_23KNQIcbFzlpr_oa7Jz~D!#ZasG7ovO z>)*qK$jd50pNJ{X5=dQATyXjvn*SZm@s7`OV|$mLsQ>C5Dfn=~@)X)>}WWG(G&J&>+@vV+R! zo5EC&F^Vvm)OX9Y>Gy((7($#FT!uvKL?o#eJ+xi!JjJ^u9tQSM(}A z>AYW%NRkuWNNQJF^lOm!ZtQ>i{wItb9CP-~WV_I0404YHa-Mpj#T#(gbyg9Jb`p;;O zs)CT8^tCIE4rm4-q9X6@02UaPOxm*3771E*Wln5a%y*1{Wk5-D73S@FkUgb^2hv;Y zh0c{~$~v=NJY{Fe+#LwYZnkk#1>z$_xVXa={{ze2xWg5L0twVYp9OJptiPt#ktib% z=~05Y7V4BB9(nl5Qje+NQi&WvH#S4VatmM6J)ae(zGV-6lz$~bVJ4fDzf?D#p9>xF1V&~ zo|{eD%jniXqb%2{moZ}%E*-9rNth~4Q+i;;9Exdl+`54UIh9kgaMd^<-gJJ$$aJcA zPD4MIZ*(mQBVvVY;+*`&BZDY=vT<=^2LP{U^kKz*Bdy9$^Sk+_+fo#ziW{)J-ph;-wF?qH8Ud(6^u7NP%7wcYE(d!Q)X6YAul>>u|aTNDGiE3 z=%%gI@~0NkQUIW1r*Y|rYXc5|S>S-y?LG*rZbGW>yaIM!UawPR)|dov?ke>*Vp_nf zsCX6UluUr+7blCKlxG3`zT)JHuHxNFtT&9v@g#e4;@84Y!)Ui-UrkXbC>z)pmB{s$ z_H;|U`jm*UR=Qn!7%`>68EX^IbMsDIRA(tO@2Dy{&)JPaiE~0~rLgZ-AE&&CG@k62 zW+R{I9j8fD3mPpe8w4qAuQ$+!5X&2_&(^tIkQBW>vm3FU*)q&~S?R*liViLiT1zJQ zXnR;j1YjvBgdrD!n7d_d(-<|ITF8YvZg#RICTD4sTNox*IHaDybx2w55pFGWza2hW zEpe4A>OHnV*-*cKq6eu|G;O|~kPJm+*RvoCCC+x?BwN_&Q$;2HzFxubba7_#Iqr|e zLVa(*6z{M9{JF)gcGj-D&e{xkwBt}HM13bNpU_?_vA%F0Zb7}rY^d77eqvmqt9 zp6Tq^=k~a5+o{}%?iEyG-?{o`ft#Q*CJWKP-~|^@$T4$bF>B2>}P1csFEUgA3W z`e642bSZWGtT;6JOg0X|-e^>ze<hVQJWO!nk-q!55=22!rIia=0%e zqD*t{QS?WYP`P@O9CT`b7x>ilhdmz;YhbeA#IVLkCG;dEDMRql-YIw;GTas+x#wEq~oYt!v2Yb2gnw7XI8Y8 zvI5Fz^Q_MwejPC5GuKjK@92PmNs<;pJ`ErsyH9^@iP`|-K$xuUboVBhpSz}Le9Hug zyFGMs;{q5lWQa9d6>WAg6trdsyxtyU{^_FxtI0Xu8 zVzMtXIY#1bq|FTpOyy!#x5UUyTQf9Y9QIC|YmhWrTg7^lzVI~IRpGfKFfz7w`9<$! zQUJN*QPs}M$jGex{<|DoRMIpuhUPqtI)Z?L<50lvOL827mVR&NJ!8uvj~W`uq$&c!@>479b+m z!Pf0Ac01&ukN%T~R@KWr?#;9Uax{Kpr-={RkM4xO7P+E563CzY7*rqoxL`Jo~!aQ1sT zYmj(H{Z0r|jjai5hi^4m zr&bKrs?yV$DjwFEu%Vb(8f7XOqr%O~w%y-NYwYZ@DcWrQVNn@j91@3(COnn<$g!%qf9~<`21vr=MBcJ(;Aa`)sYxis0VRG=c@`0kJp~#%F~8&?xAX z+C5=orm7riI5s-kE%;n#DnLN4OTTK~ZuCuq>WZ905+7@Qm%`;@?9xpneOYkQxb6{` zk)z^puyT0JiLf{WorUweRa&PCjU%oLGD^q=s*ZNfrt7}0#;RWvr_9%qcSQrgzP2hA zopBRdQ!bf&TWq~g#To<@-4JnrwKmTT2(@ocs0IBF(#5bs#bTte0R`W^#-;62#=9o$Fg5_xiCgM$ZT4qw3!CDqYbtf)O$# zzH`-JDQ8%43%J?~8hw>FIlT+L$f2*EKj)+5)I?D)cQ6$uiKee)N8)vHK7c50F;%d0mce><)D}?kR*yY$ zSi7N29_ELZ4_w@CptrReo=%zaIU_YjcX{?@*}Yr46|?MACQ2(c5M^QA628t8>7*-P zsI%Gphlqh}OUk0hnp1n)TR$K=PG!f$!2-CWBdM4dVl3A*3|5~53<$_LZQ@O0;?x2w z>J~H4jq^(#ShB0F*DN@%0creQ{xl>22ysJalsrJ!1DMK%&>!FpPPZK&zoUPM>c2({ zuhJDkOBYmM^32~dgp3LX%y$Ogod;AGKj;t3FZF_0qx`gc2HSLENu?&dD9kFXx0qWk znL<7;adr=$M4NSxOTIVCq=5`3~%_Nvif&Y z>tbNj4ZXruaK5>b%}9LPYSQ3{5Y*x{lV|y@?VfeD0`ceV$7$YijZY@&^9VF=>`oV1 zs2uk%SbimGkSM8MWpwMHSURuQY!X~oI%A_DX#+yTH71Z);6%C+KPKFz{hgUw4R*9A zu}lTl>vx%;OCm5DAo+30G6Y5qi7K`4u+9NjG-KrSI1JL1>wb_8QSmw3HnJUNxKZwR1RAN>H7 zIrz2}x0(KBloolJIc)?J1;}r? zgej~uWhM1OpPK~w@Fa|%TFJM>7QL5~d;UmlwXwYv_R^J4*P9`Ek zP4sqT1$BMvj}$jPYge@b1N*6_>8!E=B+<=XTgd>@}RX(SK~B%zGBs8(^}< zojzJB1N6yciq$fhMYRLYepr~Gy58j|$0u%SPg6fOYNHiyX_dd()H2~LOZ*e4)OaWR z2dLbAjMIJ;{Q2k1!)0rtQ+#E3*oYD}L7*(%Ed&XIYJOk!68fO9qaNWwW_wR=_#FvZ zG2dqj_nG=KmA=hOG$%=w!p{xKu-Jvs6q1qUIbYP0$0%#f#coHwyp=!9OlQtDcxT|K zKMDTuoGN%R`70le_st{O)9AD{v)$ZPGt%rCRnbA!)vVsYJ8X0nP5w^?`ZsPCT{@HM zoHr8JDc5B5cr*LodD=<^ASzLrKY>bW<`?eD-C(Va|NYV#?>Rw{VE&OnMK5bta3&g?LEZ=;9ThR+Of@leYlkr1NS1?Il&G ztb(J|R3LU6mJiw5Pms`k;p!f-OI(;dT)cjnQcQh9tFu~sItPEIV6KSo@AO}TfxAN=XV*7xP^TC?k7<2?L6DJ}NY)VOCp}iS9(VzW zk8p0<=|v`(ad9b{eD(``bt$rPO9o8OS-MvU{ZT+*34XGGHc2($&l336;pob$F+6-A z)3+hRb{N3(9kFcBT12CN2!Z#*vYg4Nt!Sc~U1Ey6Rya&tT%|CVW_=5eyG{SF;hB}&a-G>QTORveGhOP3@A9&)em=^1$<}5%qv;Pr zo6R3gvHmRNI@lnaOEij1Xtp5=@g`Nm@W@bw1H%^!xyeDGAj^MsHR*5@&nqjkOGV3(FRv0^wC)e7VwIhv5(e_16&za z+he(ZrK?29-{`>oufMv_fWg{M(7h8t+(0NhHt-=>wbZucjqQu)FvteQb68K~!vqR# zCf};qk+2x{a*w@t4+~~z96vvYsMP2d?QE>;6png@gl7aNwCtQ}jZV_Aj&D>kKXYj$ z_pb2BP7mI?cp5w;Q7L4)XuBhm8aV5wz2RnmzdKxFx5g8F!@9<_L!Bo2VCt!MlIu<< zUI1Q@3E)_6{jtIahd7+SZLorWMMWt<#E2Jt>-!w|9+*DSbqhBW(7Wq?N(k6s1KkqN z;|6?gV9>^uN~#W9j@EZZ9{IDFuQjbC;V%G7MkWyzJ%MqPX zQHEWbb9J%8sL0p+CRzgG$x&$63s?AojMYm*;z_xeP^PoeS%vkuS`_yynf`)g7CasE zx#uC`N+{a}hexI}t&i`WnhH22*}lRhtpRubuffEB|HXlz2i*CAljVsgea&&BDwcUdV3wB}KOV3Qf%N{$<67DOk~jBogaJ`s{UC+u z5842Sdy^zM9KaF)KIh*8L`#k1`~r#&&?{7JVNS2rlF;6E-(ty%^ITF>QMa)7eltQoQY^?*Sm>VJ-L`#m zfDTVy3UcHoOsl0hbAsqY)DW8wUMlJtGdynMJB?Ba;?YBt_2s0IW?Txj*5%I{YI5Ck zpJ^<=<*Q77JnFlYRVJ5DsQ+&MUqUzXL(3M$Aje7@+jsgXP5s zGdw|1sM)?8Glu}L6nX`_sPBP7Etk0clnHnh^Bx{40v7gT$MH`6f$@NX2b29)aIe2C zm5U@svUU)C-RfN!3^h2P_O1ZALpGXSsj``GFT3MZkk4Cw`ZQ)VOfbHB^>&+Sg4G5^ zGV&7=fpBK>Hzsz#*+Pz2tEoXE#GPFj^13BHS5P;EBG1yT<~`=55v9Y%RZ$13C;$^5i34bW+=RH&q-#2 zL}S$<`}8aMB%Q+}(eL{c3x(8Y$3eIXI4;DK3DUEwamZHQ88O-jT{KjJ!naD#j-J!T z`SA=HR|BhmFw-A;CRkLp-nSHV);oik%(o~(Q_LDi8$=A?J)tKQQ~{HKdJ1{oywe;5 zKG71S3VRMdv0kC7AqEGgI#h8Vz=88ImDpdC744^a3)I*y^}j&w5KHTn)nt`f-gJpR zK<3FL1j@|gAMLSYSvnvPx%}L`F0hG5V*U!+>s84 zV-@i&VQLz;;4P-g8bN`=P6OUwRKk>zS`$f&JR>|Q+E#)l!@F7w)X6rhGo0B z>`}Jv%kQs-Zx{V~m921M`xrm7>a(LbP}Vf}2aoLtInl2Xf-bpUavG>p_yv}~Q<5W@ zF|L4#qoZh|m@w3V>59K{NW-cJ&R}_r|Jt)8(Ev2`;4uI_LFpGxvTZXl+$=w^tWgfFHZTfhu z$=_H&;9=T2Zgg`S7}HXiY)lzx?DNsLw<7jOGgw`#I??eDvT7c)nh-rzxf49mZn<32 zH{zWz0aI7v;6t=BX!(1uyIFf<4^#mvi^2X!kp1%B;8)B2M&1+*cFg_*AWv{dZ#oI< zJD3czm6%q-qysAknB!{+aa=&XjdWZzWEr9WW@S6PAV{(X7EY)B=s+oEhy%+p#oWFP z{QkfHh9p2QyqgHr1cL|6u!4XZlh7^in`Vn9%+X)jiFtb-38SiJ5eQy7bBy?VMrBpE zxqT-6awUsoUKyHmW0xU3xQ%N?F>gH*6wJ!Zl!ltw$GRKE$)$a1bawK zYESA5d$l!lfQ1PihvW*pzuLQf@A4%NEu6q8)0qy)SNh%hZK1@aaE$rsm_f(0OU1b( zhq(^{N#L<_;5`26- z0Z_acMHAiN6~;w0;}m;gqeD}qQ+R1rcYQosYKd55ms#E{0NIH|l%IVp8m7Se@Sc(6 z>@Hr~1fgK^G%J(+!_^3`Oz+iAb}rp4jc0~oH_}&(hoOZUM%!X`MrI45fEq{sRpYKZ z&V$!}t1FFY$ z+URKoFH078eRNM=$VY36=%wft#*XqR>DE~Gh~H;D%^%x`j+tC?9OmRee)NGhiMEBQof(-<9JwMs8DyE72A2?y+#zo~R1(;*FnMLsM z2f%^C`OU-1r~xJeo^27l_rV{0@*?EH$3S3E@gmfb`U5Zk3MR@vM1W7s!}tFC7=WgK zK-7DG5Kf>c{`uAjDJ{LnpJRowjGk(+?b5V)JGDs>rc4BjLfhYuqy7E%p<9t9>aitMH>BiyFZ6O<{Z4Zs} zxcwxb?P4+`tzKiwuen(dh(V!QpT3}1pX8gGO0p2~{!JY5TKFk;nt+KTMFiC+9MHQ+ z$5Lgz0tc!Hs(gRYsW>=MeOw2hz$7H-Qs$hf=RXCwQGui;y5 z<0RhfYDo;!h1J5JtMYvsFeQKSa5Wi%T$(?silT`n?}2>>cob%Uq4zWf{$#>?>eUpJ zfH2CtUW%Ny0G5iLD$*wKpMeGJDiAWKGV0&Opaevhm&A+J|9>C-wZC`Ee@pDMzjX1C z2&SXBX=SOzPD^I#ge-m1)e`{$rO}sRa%9?NMnb%tEm?d}vzb<}^rj*?E`+|Ec8#8G z>D%6XLh@V3GMG1+GrU~nWdgKK+}4z~np&0D$b-txCS!Pv6j3?1&yKCOGZQ9q^}LF& z<4O*Gext4fWO_bB^9=xOGeqzQ7S>!(OunuR8en?eEE}F!V4hJ+Th3elVE8j6J88%Q zre;H)uf7ievpJ>R?=QOv6j2@-lfFEQ0o_lIdCCuf*89VK+`9SO@aq1P z-Eb+DH~3<7A|#}QcX*%FLX@b40%i4*^ma%xo<)@u+O>S$9beon8PBN9$TFW_9Mb0z zl$Dnco40#amud~H-5WJ2oOOIXQ+ugw?H6bP{Fyq*PI}9(jIui(6;2Z}aNm$n^&`l$ z$-ATDNr0Lb&gY})@=7M^BC!;%{RR@EgK5zuAdCni6Bfg@1}sUND2mn~7~Izf4z84m zz~%*+>xtysfwjiTEfrIx;53WA_I!2$BmS9`Ypx8g;72eek-PuV8+N}bS30i00HPX4 z*R1%x%bJ6Vs124WX%;+TNkdz~6>64vdsA*xxooX)6x3AV4O$+|$k6f@w(|OZD{@8w z7xH<1tX6Um%ihY*V-cpsXDT0W;4bpqiJs6$lrlsIZlQvJlTRtLOJsdAXbI$mPQ=_S zEOZ&r0=AGOpd?o6?2m%ZO6czpOZ_guxVHG>88PTg-lkup^^pQ+%AqDmkqJt$<)X+| z1}MQ~MK+IG0acAEed}B+4F*E_uP0w)ft&8HxElSRbHP+=F_P{SBl;WLG~wvlP=reg z})4|;mGdKR?|C~y=KB@_WRU0w^yRexuh%{Qc*ZEOWh=!7dnHeMC zPpk@@DW~Ez0l7_P)^S>kpWuun%#Mjfz!|9_MZW%bHfA53QDcg8yFb9Si~m%aG+hE` zR9Cm!{V%kG{I`eQOus3rkiRUQGbV*KSvi{K4J?vABFAu#k$5bXJkg&`HnTj3TDb`p z&@ope4SxkYn17)N(XN@ysjt7aS%jR$B!ou$E#N!~ZJ8a)K$Hq86!CDU_f_-r$Kqd(V@&BnU*0m^K92oTf9Z!ifkS_In01josg78(tl70a8}*6jWQL)-n!789V#aQ5=U5(+w>lsv-o zX#Yc{i6(}R&!#;<^WuB4+r(h0sQq!8Z0e1Ztu??zlX)z{$7_K-q!AJd|KbG;RH{>0Pa|m~fWxTanaB5XK7D%i zAiunv!YgG2fmk&)A2b%)&f^wo=oe&U=ifpNegI6pALq7$g^iNrp@cCMxiM5 zcS^YqxnJWD%BqQ-Iwld38|o65DTYVw&j^3i%=sKoPiLM0cQcUWrMQU3#>JUbeb>;} z4_M|-1kT0p%9phpA1mSjXJQ=b*i@&V6E5dM)4G7B9b+4%Ps6DK`PC$$`x1EK9*0se z7>}U+K*y?)&zmTTk5e0twzk6Cl@)NcZytGlCvia~O{ZkL{oW15GWRWqjPspMbjKXp zyVxyFau5g-{c4-f))j__+~c;cRP7OT{hyb58h(^3Bdz#jJ3HA7ytg|zxHhKi+>a+I zjrWlajM?^*B7?;ooRdJ`1m~X*M7ZH}29>z6DH zO8TP9YMc`m@546~!P_&2_*v!Nwow%zp93us9^Nx7zXe?NaSLd0peH_SL*sG06?jKz z#=ps%-(@Yy$Q`5FcIvw+6?zU$b!vWST{i37(J1NL2HbIIMk@e+txa6$Siar6k`v}{()dreu zV~$J>P=y7>mP|zH`1o1l%d$?r7}Ow<%HtQp0A@_RmeMgCaJ$XjNR`ydXRxEhnIgNW z^kM!*nIp-4(m$RunX}J?s1Rfq@V<64QoRLmTL8-&leaej?_d)1lpre^(7xRjtkSdp zc@9$0b6};-ykiAF`k+IA_*X7b{|swD_<}ZOO#M~Gn`KdAh<)RUQGR~D<(R9A%DveS z2aXl+CW`n{k5Pdn)9vr0{epAw)$8x?`;csGZaPfw1qn^9prRAUu=yme-Wgt;7A5^$ z*&1(jd*ys$w{G2dxNNNzRor}h+E?bZ>|~UzTE;8pSrlC1W`!|B{*$J{sH5mPUjs!l zYNzrWZ!LU9ytvnR&g+bo-e;-lWmjd5L*JK%vQ6G+dRxbV!9SxU_cX$etU8*Jjr7=C zzNLe<$ONTN&8Od$^-_G1+fUuMRR{*e&`h+>ha;{sSL36UCZmWHSM)Qn{M#{v!BoX| zvU>srGy=%Forn2oXL)wkV#$NUJ|`Jvs|p$I+%IPgjkuDRzq~7stW3Iz#CDp|n%<;O z!Xl=ij~+c*OE#VLwpX)mc>nT3Q=TpiXly0bmw?O|OqRgEapMFjhu-wu_2N2{Ye@3$ zx0$G=2U-{-OH?Js3oi50-4w%YlbwWscFqH6&Np_>>Dxj(0Cfc-b*ui zw94Kgk1aphHqobkwo6Jsa3);+o}bW?L_Hc5ZEfscQz1`}iQI!R1S(3w$v7 zH6-iJcSfpQbQ#q0MiK=a?#8qcR>U^SqSZMWp<9gm%yhF0U zcYNH-rTN4UZXrv}$tzdeZbsGAN6LJX?2uk%{h6J*h0kXRm1)J$WxLt8443F)3J-#b z-Mcw?V%+}Yx2Jxn+nlix4+@UfRW;L=yvPDBcf(bBCvj2fT-l98IID{<+!~xQNg9j@ zqWAimD#Y4%^#E$edC)Z@UR#9O$Zb#y&`1bs!eUk%U@~cnK676ZnDbykjo-J`T zpCe3s*<`0-fcxNKvfG;=~xpE`3pW#ZM{4n932b&S1%TJEvr3jJW zv#;>H+IhbR<~0-kXbTett6$`#@2yX?LO1t-Xoi7W<>VX01l2U)4-hGB=TV8-PS&cp zhZJGtg6^?Vs%d1cAwZ>#6HdOhT2NbNNem4Q9gox@&7?l=7LXMDHZEy(W$g2=?t4iI zd>&n=yf{|;`u6U8IZQs^b^r_kr?A>1ScY(bpg{~HtRYrP48R{X*MpN4 zGp++8w`(MBjQ#`wz?_xVn2x~u4$#afeBo(X%v7*WCa$Gc45t!ID2Vyh)=lB4e5|Zc zNE!(JzOieK$f|NPUtL-vdwlxyN*W>tjU$QOZM6O*=Bn;^F;nOKb1`hNwx;f-n%K5Y zir3;R566>G*4pEGZiD0}Ovpkii{`y1idPAho(mlo8Tt9$bO9C}yVYC)4TSe6zI0H+ zfQwAUwd8Yi&MnfsNuUO<5r%fE25vY5N=m4mcieLEum$=AuFIqSu!4Ogjb|`-TT#>-rJ{d?~`#g<^$Zo{>U@7m75QBgVm+*RfzUR z(?Tb=ekCIGE6lAQug1n@qtXoDNVU1+?u$T#Y)Ne)r=%p|0p+EC8=<%!-xs`3hQS0PJp*sy5GnbvVxLXmNr7eDpNQ!dERuMoY%SNrhfI+&V%>940edLW^K7<}Se4ZdhQ!11d-+QF&<>argJ47pNMV zr=>X31ymPf3kOq-Rn`3ICaUW&=TME;>iVj&3c0J5G2ek@neG$8n;VXf89WZ-e)veY}X zNS8YM+8c?a$dtH`i;KIIu;W^sf~~`2VGL7zg|*ldnwomVo;gm7%ISZB4uWV*1_> z9lg9T4ZTlz+B-Yfd)<^GU4*G06-NedTFEEn=tCeOacUw@8G!S~Tus;&i>D#%DWTu; zQnSyz77K!Yo~)+VowE+D-1oMSE{4quJCE#+KYE>1R_fP8&-XECk^Y(yY9Ela-Uqi2|RGtFC^-Nvzd(rJ*4feiSSIBKnV+OY^9O1JfT$hHxz!TxCH~iB~Rc;p8 zbT0X4H^ba!Q-Qn2e8xN}+xR5WbI{>Db)zq72cj?gJ?^llINTn1?~06^0`9nQuXtCo zeTnyL!k0eEd7z1Cd3jkw=vwV3{y|~NE_Lx6vqDd&&vt`m)TMu|O!4jgWFAOPGU#$| zntga+=g!qahUz`Ld1LV6$%9=WSwctiJA{lR1KVm$oQ>dge>azkhQnjzYoL-y-ja^i zLFb?7q(V*TcB7rko{m<->4glyMQ+TwgAg2K?(cpag#HMk5na;I zH$Wd&xEy1R?5et`vc!1#rLmehlgf?(OwlaFw)*|AG1z~t!v1R>_Fp@(e+|a|*K!Oz zoLumg-0`zoN-h6R;|%9LpOewN*IRp6l|Az#&j#B$*;A3|z4Ixke8*83Z~ZhZi~gw%hiVw$Jj?a_oOOUZggQJI-{dM*PS(Be>n| zFhWXV=XIjKnjS1+*D)?()IE?@-SGakGqv#;$b~d{rFdRG zLN!&{_ZDl6PL7hT9U!VC^z}ixo}vFO*92~uW4hd~B33MHbb=(+RJhc1s!XBSc`Rkf zIbPKyN{HDdJ9p{f8QfvS9jX`^yRo5VGQ-wdU*k=^(=-1IhyvyQK-~ZQ*>%ab7|LfTGUk@(;?9BKn znkak%OEfDB%;s-`gNzUp-H(2G!9L%@3-^HjuqSe#vKQ+MnW{Eqdw%df7bRP)-|Xqx z94k(Eqq}-WyK3mU9R`4bf*SFIEmGiK**Xa{e_QWy`;pjgEdwe0*4yd+WQPSg3zJPEzhv$Uuu zd!HV9q4&L(iaCx6g8Ep9T>nZd>|${(v=2E9xu%w6)4+}^@q@Q%Lf?cy zQ~Y7SLpK6!MOt>yw)O?Iu66qzmJvV(wf@0DYyKB-Aac-|9|689+y89&1^l?~pldAx zG}n7ht--EDwI`pZ%BJGQk3!8NFM=)`rXq{!(hta9&ZUZB1J`tg4+6JSuRvbBIFxaB z7g0-fE%Hp)2;M~Yj^SQ?jQUVxu(~tl{K=^K^4IPaDhrCb7@&E5lCUZ+l>GnLdJCXB zwx(-15Ind`a0m{;-QC@t-~@-@9z4M%!QB$vAxLlu?w;UqaJK{h9CGh{-ummSqNpi2 zb7po=@71eU_v~%kE@*Q%NR!o{6BJVI+lvYQ2nzZdqW@{E*48`vv;B7&c~HoczzLNa zpiBB}?d;fiHRR=WwT)`R8kg@5-R?Myo3I~lnT+TB^gKIl7-OaWYV5z|#uVMi{lm07 zY~#<9K)HvyX5&~&)b76H_UhC>bjGvN!NY4PNzmzVhI??TZ{wm}4)@OWk+caMLp<41 zb_Sk&<+y(sR7SDpU;?+4E9eZamd*-V$(pg91{Tq{UJhOhcq*Ln^R-uLZhu;perwia z-H2XRwzBr-R=44O3oN+I44Ui{1~P{GV2kR{e0v=h|#{yoif8G-|sw z(w#*iT-&-+60l&>H`~tOF)g{|%Vei%zBF>pgqE_33ja9~(>4M^W}pTN6Bu}2B!@A- z)hEY!%-dVsV=@%Xmp{{YX@oV|2Mwro3PTlGBGy!IaZ%3CWuo&18&JGHu{$=hnQ z|8CU69sXh{d3Xzc)O2?uKm2i=-+j?p&r;j3#Mrac*te8FGnW6+Y0SZIl+^!bLIr0V z*fQ+<@yF}t#2{@*s)&+75@yK;WQvL@Z^P`YfdiCG5;!YN$X^m@!)%Zs2k~#W>`6$SYdQL;}taQ$26u z@0b02#(ZxcwY`Du$^*irNqxqBYHd=M5{rz1`1yFBzY~a5xbAv7TaM2tR$rD;@rF9k zzMjh*A;yf2t|~&rD8T4d&yX zE^t5{93D$X0@b@v4w79fLVf=H`HL6G-Cr=2Os|YU`O8~b;rR9(vJO~a;pt{MVM9@S zp3}{IRYSuWGt&GDh~7tn_7^f~pyeolHv}-gJd90LNkkVcAT&qPoy?46&IPGD0X3~^ z4eh55aA8S#$+Z{(b#lx*lUlu%ry{SzPDw}=2>~DKvU$^qr!IEywzh1ajE;$# z@&gf7c4VC0yonzdU%0yVMyKQP!)KtjPMO_3VsRIU@&zScgc}nmPYO`#^TtP0l7@FyE;E@&nP0egCO`T6RzcZbJLd$4}vhc!&RBhvr_ zdn{H~XnNk4$6lai#-Dj|Kv0nXHiJBf8KOx@1`1@r2mpnW>%gD7m?w#Vf+zg*^zHw7 zP3Ql}6p$9F=ht~2ATRNuS-;`k{G^YH*iv^Smb-bWeRQ*2k6$&nlkJDHw?{DFV9x-M zg&y2Pqkr@QrrB>eSu;t3fcR&|YULol%_Snzb|xV=jpjX&DFd7Wd*&S$dj+juHr;?P3JackScnxeAcr9y_wZ~ml9iC>^_m3nuy+$dSGNbc>u{Z@zD&A_YolIJ8agj2p&a1T;eyYI<;Zl?Mp zmUEqB{@VAac>#)?XnsB&PZys==kk#W^OMa>|F5<9noG8NWS?SU;=8i>mrHT^onRp? z5HRt9cehf8?>I|aJp_u9{AK6Qdz|rGTmx=tUH!9~9dIw_%ZF=+@H7u`9qGFpS$>H> zmJtize`t2*5`l(;)D6Anqd|B^o&+&G2l$zJPeRvJLJuc`9xX`*kK-ZA`&)q0DZOTM zu)53Y`s5dDTK6#CHZkmfTkYL;@0-s`vBVblDDlP+Jp+J^S~>!~ZA9)_Pzj(r^G!aV_*#@>^e!3LZ#JSwg_fjc&X5Zd&(%AXx2K>C^b z?BfPxiEG`b3fM7do#TKL)GVJzo4GerZ4ESvtw{EzLI1K(rasU9!)w#Yp3KYJ)3FWI z3FWU2)#`X!bsmrIxi;dj7mZLCU#SVk-*4erfr#jE88gE59olk7#Fd0>2$lFR8QWqL zvXuG3O}M=B^77rU#@ccv(^xw0uww;}zQ?Ruc4y9)Wbsve^x<^B7~IaZUd3*0ps8r& zjsR7r({Upoe)HhpOL})PB6LVR_4Tkg^8>DukaL>Qre$-r=|o;=ChrgZYsfmOsASIJW zM?T2p=tKH$NoLpY{)Cf$j%IE$pt0=kyKLH=bWNuoJjbxH%5-ImijGdR_h4G~ojRPi zAIdt@sj$s(npCZrf%0+}G;VtMdFv- zw7>zJqQZ+VBzR{qAlsYH8Ge}JU~XOa1%&kL zfD$}rs&JgI8ue4~l7xM{11UdToTJTGnYQ_~?R=6q&{q2iB(CSn(`KvcpZxr0&-?F&@k}uV!OJ4M6+Fe@nUtX_Au4R3-|a3`;rgd6k{We;+XfLpy7!A;Ol-R z!Rei*t3^9wgMdSYHv8@Jxp#lMg#C_oj)%UoAN6b+dd!92^F2M-&(ZeRr-sDz+y*Fj ze1rnCs{+Z4O|i25c^n|`BH11!0#c3iYfc{^HqJ%y}~EBI_ML}+MG`=4C52BN_u~pAZ*B8Yjk<9mX+W9Nrs)TX(%9IqxLHUDq(P< z(UKoFfnE;)nJ97!+%^5$ev&IUtCCuisIUDgowr(qxu`FOQ#8>&Yq@*R%u@DqM;RRa zkWo&80W3oCOBU!2IIF^$1CkuWry7N}h2f!Lrz_s#f&4MsbPEr_|9>7f@{xk(W?6I| z{l>)c`7M7q?9|!xbK2#9dsPtyB5r1H|K?Y^QCm|J?5%C#!+HgpQ$8xGGj%n}YELX} z12<;^nT|X3-ku*i9#MF3m)Nu%rxHY&dc;!c_@XSQAff8a${^{1zTxbn>wGeLDkE3* z;RNAG>ffGH5$F~198r>RfdC5>l5G7sL8vNi%(uKy)D_pMYkM=%11U5eQyYKo(K@2=+Ra*9T0vl0isng z{{4_0i^*t)?w!v6$^dRi_ln%BVJjX>@}#b-7dC)A;&^$JAH#<`HFh6xHTtr>wCr=Y z<8_Y86|KcbI(06D0ZeoW0yY?3XbLi)Xr!nOb@O9tA_8O!GUE~W{xk)5iwV#*tTfbDTanV2qGNw>V$snSX7{_J2I~XH(ZWT3_>mN1IN00<@*wgM}$N9QP=&O>bv{)PBvX zK3hq?Mk(9HN~5|(ed%ciFW84(r|PXBR)`#6M;lr8NlnXz1kIj>b@kRpWQNVPty`lX zzFQQ2SGTm?;$;Snng-myLWYf|I7|j#+IObva!!%YgdeMnkTiBiGmA_q>tvzS`C^yY zwUKjvk0IzcI20rcGs~BjtX%6e41D222KKnXc>gUv&@qsE#=G%#az#GI$aS1q^CbB9 z^*)K11X{=47u1T!)QU`*$H!m8!}@X{0Vl*)2!SsUc-Vt_Byi{76}Aq9I3uTa;haw} z*bl43jGNpe9taFc3m8A0-%G~(fw8H3DDGQKk*g}0p=aA!JY=20g{gAG@%;~$5f+z5 zeyj3aBV^%?PNpb|*?h)Q-hc9V>m0zgwW`^?@1SrPRU(c;*YZh@JM0=h)$<2DI#a^J zz)W_Bqm|e-dVDYMTR$hO(Eo%4E84k-C}QAohn^cmPw~Q8(C_C^dS;2bcz}0viSeD1 zgbn%&sDFM2$e>NHH>~%rX!%@6s6NZV)x)R4LUCdQHdPc^=a$=&0jF=#iW(??{oG8c zE@WWg(5QHS<5BN!=Z~|%D!7Hc?|0v!+~}~PP(3P(Wu@57b8o%Li%zKSl0SBf3Lv@>wUt6w58EXvqSR5trOA!{SnNYK8cO|E?{C;< zy1yP(eiZ;)I+tTtvbqnwrq82$!#-5=-Z8C{u@{SdS459>cf6f_rgSQhf`Y>OV^it~ zlRVy&f`Rh<2+{OTx5IW6{^1n0%Kh~P-JSI`S^m40&z#@5u!Jp@!S`sN?>r+M8qEUk zF9<{&Ic%6gOSK*}e%z1WutBrdQImCadaOQ;o(eH|v5NI9rIDbF_%Fxr(9o3i+Pta3 zr#n~XD_cHhH{-Eej~JvV1?*OjiducM#4SM#24kCa8@pq~9fdj;bIgJziXhapo2Pbg z2I}kibTeHa(kiVuR(rf2cXf(+Eux^{t#g&`EKS zg(QnTj1^M~=ZA;o5kT!{f(6j2;y7e4pviu}g?{)k2<;OrGjCdl@U_jYtxKcUp7(6* zK%V8}?r665LA3y4fKtZ@@23OMRbVt9u~_Hg^%gTt#SR{2m6bv|O>yR*y@RmW17$oG zqqjsbk8hhSXK-(O&X$h#Z5A&gEr<7tUOil2$e!lWA+w5?ckozSB6#3GWfz7NSK&0<$yB`1$yv^770M+zF;%K|6?c&dr6hMSd0W)FS>d<{BNB zpZI|+j&3}iKIxZK=-JJ{VzzYO)a=VY9R6NI>&e7vLG+@X)Cg5gMg1hP32k?Ut?J7eijD-@JVmPdOB9% z);QpP5yoUYY<2~nfJhj@+U#BF?kL~E4|fs9QKP&`BhwR7r@3>(IcgzVyV}1Lqo^m;zx%fWh#_0n2yzcdS=>j zP?FRccH;quFycqc@4)!>AQCr_`XC&`)SP z0Fxb!f`|@|_Y2l$^RiWbN6Hu$D{Hmc2e`v`ocZhT%E{g>8byGLB&y1Bp`m`ZYc$$P z(rZy`2zW&NX!~$VXN-`#2b=kDjzp~I9%YvQDFA-+XAI-_+NKy-7?Bq+(+YuYiX;fb z0VsGKWX50p+T093XSjEfDAzwcMyCe&cYHhmg`HGa5I#dZopYw}_-J-((!;}p`ojwc zLe-T4HrGd?&Uw`X!3wI)e_)hDU8P!Z8tiYzDzMs-_I5aPuS6m82gGeq^Q^lsM^iXeMIkTI6 zDz)u_)LA3wdq%_ym?tv72osqR4>m z_3xeac60f*nszEBGE}{~ZFrSII+!^X@JQ&&sg5o|daKQC}}J^e)qD zmZ_GfxtB^^>YedCvHKixEK)3f&Dv^po+?TD()$%JCA))|TTA9(Z0~YzEr&VNd$%_1 z3ioTdV-)h7t_XHDSmJL8GSHYz)L|fohvm@=={$+f$pD|pgD6Qublp_ z)F*Do=z4(D(A71JKd+kd2K zPh!rYSn)UE`D+WUIjpsRoqzRvhkT{Y|Di{fkO_`--tNz)*UYz>q?z-!8fAfP|COs( zE3F=>JXrp(SUYpyFQi^}Ab!fe5ngP&SN7rT&+{(FF;`#+Z*q`>S+XTlDRVF_TWJ@# zwK$76Yc4LXp{JHldPsBKnH0Z!_Y=b#+cM)KI<~rMbPdl03c@(FDdOVdM5@=>`(9xM z30mcvl+Bf6ZZV}9zE=0ILLYnoQ@#f*vnWKgFlV@z&Zl&WVY0)KD^`)fVv66mo2}qp(#|| zekkld`1)##*7Z>=?~q?$%kaCZ{!CTwgouH*c*eMtJ37F1Vt8_s_W`c6a>m11mKEC4 zk(?d1IYy=2fKedSbK4tD>LWR@|pr$2>4IY$%MxSF(5UMz&hk=HEC{S6{vNW$0)#pzLu)1^!g>Ay8t^Up{F9@6w!Yx{$PgC)2& zEPAqks_CUL8OsP)-jNq}mn~$caEuV2UZdr}u2qhs>Ajt677xFk(l(A|^^mFd%gFWF zPiju$*FMq!nKxf;D2JzwAZPAJ61a^g;5&8ILV?5fw@jvr&K!H*bv%5DArZ_IHm8+3 z-^lfnKm3H)0)4o)CqcaCr`-7UfMv|~t>bX}qzZqdrP$QhuSsB_$s z{e{Q){r={Lhy6_*3IvtkQ*<6T>zM3_)<_+9^x)^JRpRDpFDB=AGWuV6k)$|Y8;wV< zF;50%KNWtQhe^K+P899Lfcfrce1y5)lJQ2~TyM@V=%+Cm__X+z4wLsKRqW^-(}Stg zR+PI76`6YtGYKdc(3$jNY5a-5gi14lnFlT$AtYzRYBTHqo;0 z(GKDdf3u8umUX|WVIZcc?}Q6#+_- z99fFCme^6T)#B&&^6?r<w{ErE@H@66cT!+D5==$1)Z(5Gb?hCEdN_9|cWpd+AZRvEqfuc1o9}wt*b^GPGXbm}Y*uGnPV^woMl-o? zQ5tuuT}Za!+Ql^b;mP*U`|i`Z?`UFi?B4I6V2*d&RI7FOU5C4q&Ec<`OuU~?kDKAq z4h#3#->92!y0$h-mcG&<3{hKe+>7s)9uA_+m`K4pI?rrvv{`*1a66b^lTrlMG+;51 zeQ0WRI{ks2@fj%srQ$p>wqMOD<@=pDQe17e~r8z`9YYg za5U0!#`wXtl>S=EySP=my7v=d-fqWC%087oKHew4+ZX7iV(=9;ziFhh4}*|SKAqA$ z-ln$`J=Irp6f&5EccAQZ=fx1OVXaD%Ml$8k9cgZME!`LiF_ zVh$M7%l$V4wcxt*KlJ z!?fa$2&_LIHWV{Brf=u5n`#M>EKw|$*+}$^YRwIi-W%KDHg92p_M{0QIQJhLb>Wln zBM;TS5&sROSfATr*gPULTOV^G6WamIz`5rji~yA(U=x-i0dA*>@rFZ<8t@{0+>)|F zcGctAT#beZu2-7o8Ul*O?wKD*eQb4MX}2bUkF>gZPVmA*_=(ByrVM#Z6K6pxs*hr> zKXQ~%z_e7U9S1v^@T_%C@9^#C!2{IypOu1up#mHNUUPbH6!HbG7c)y9qk)D6GOR-h z)Y0gMm9K10!v;il=BeOdU*Tff)5__F)K0DVc`z7SMZQN_s|G_PQv5n+O3p^=bXwlGL z*W1B9@bh{SuPPH9?yDQ$BI{HYD%e4zdGcCK}iLWHB(V@M@6oW>t=j zURtDhh&yKyxheZ1_Nx=S_)Pz>=0KmDc`aPG(hP}Q6`nbq`)`0aS#xt~m!7IOHzpkf zwa8L@MG_z){JvFw?x;sk3ren;1AsJUVk5WFpTP6v$LC?*Nq})%Z|U%0)TASkJ~SGp zxhKeulYv(j%EMrH&1Rq6g1f2RB*g zy7&9{3N>GyqB_(9ZQW_5MRi|pvib;E3H{A{6+D;)++I%Z<}D&XXa_a0&r(P-ll})@ z?=QtEt+Ykz8+4?v+fJ}_tgWHwHDtuO9m6JOzNID#?(w!L${8v{a2;EI`Xw-1oY4XU zU4VfXkJNZV2@{?uL6vDq@n{%(~%K=HWh>DUsI8*wD_BPU#_GM_Q!UNovGC6vPclFNyHQ1?6U0a4)oO~p{H7F zbfl(oSJxfwl^(c%W)4Pye47e6l>ei7Du>YOJk90RbSCHQGz{32(3~X}1ef1$Y zP!nR9&*kb<2Gi}%MnlOn%D6(#f9RN2T+&C4KFi&kCpoYwS;OM&SsZ{~%a-7heA8w`5&UC`73OPyoFxsr<)Iw>c0xy8FE(+bh6>Pg+E@>qZYq+?) zq+~h8dzSYCMTTGi9V4`pgu&9CGi#Pp-`di4`5WODebS7>*dJ4?ZkwfFA=sBxTw8GK z2nalJ5ELe_;D0OAgdGo9fcP2;fmRamY&9xXGz4@&Oan6yU>rzN^5Ye>B)jguVqWzH z!o8nJ{6wEF{snB+`UeOK8V*dD@Prb9BKQYaMI1vrPSB>`K0G!8`QV(Z-=TuwrBH2gbkCFa-lZb1p6^x&0P) zfnG;+oOGOG+j2REEWga`;aeL%dJ3rlP&n;@Thmd^%`0dEDTs^&@MK2wBQH}(?&>2& z($)YEJfN$bD87~C7d@BLlxqhw-!F`6A2i-72QqpdDpC1$kbjEF`m6w!c=)M0%mHF=Bzyr8NQTGU&5w`4IeywuGc z@(~Db(FY)Sy8Xy%p5xh{B0I!#fBk}(@Qr~2V3C;C7cfbHQf)|=Py!DV0RV(A-gE`J z1Zf>|VM1uIc`gkF|K2anIh!?M47%)(ln%0h$@IgBGaImgF@1oJwq1z|ENvRiWJz0# zh3n9d;2;l5ki5yq*K5DT1>Y>aY%~iQ4HoeRTu7OPH3>dw8gi7076S+!|2Rb;HOzM) zw*~zLi5M!CCNm@3=-u@BZ{M%G?x()l@`;KCa&tn`L^xG08?f3^B-oc6DGg(M zGN#4d<=+vDg90xhc?hcWiuX3p6cRKcN{>53)gNs1xAMR!bs6Uop|t=#-hm`trh#m; z0^L@ZCU&4DXQZiWW5AExW2Lal$9Y@1%czkW2nqJ$u`x?QfUoe2NG3Z-cV;w^WDG{9 z%?v3Kr*EeUU}E-aq!p7+o!F$(;yT(dACZWLGd(LvTC|AJDL^XhfJmM6oU1md+kAg} z8xN=fx-0IN8!XQT4onfryEzQna-TW?v2fE#aq78Vxt|I!8X(7q7-E<|=ylcpdTiTc zBsTKs^9)0gtO%47A+vx8XyHv+Z0fJ8UqV69gSaYBjvF#e#h0-TOQFCp&vk~ePm3Q9 zk<@wP*q+ z6eCTRR$(4`J=7eL%Vf!WU1>8cz_8Wm0U!e;-C4;nchYzp!_x0ZSKA-&Ag6cYAPgLU zoGI@4Xr!JT2NU?QIFHmnpJ9(MUq$RV6mGh!5)r-rUqrd;U|2^(mb}8nM+na zJ_l78L@v=zTVQIJONC>Jk`VyYU@&fgSWthv=tJdJKBK6R%l&b!vh_mBlG`7tx?8NO zxpKhgJwPY<=STp#s(7oSv6BL$hB@bIaar`?v&$3_r>i`(z+1<$YWR!O13QO7#7sCS zc_64Ze#9IK4BUc5mQf_?|7;*aM*SRCFaOdEAIt$mDk4rIgGA^M@W*eo8j_(d63#^{ zH}f!yPv^oE!*>>xqLbq0+SlijDegXfIPM|9+!L{d!R$?Rd#`YI0NNtN$xS;P8ax2W zq0)Pko8OcaSJ(N?k|XgGXX#BjA;Vyk|3%WM-_q16nB&&K4Gke}iI6b?W|*S*xIt#} zjP$yp+gF9;$6;#asa^$@H!8a=fpIOt zGSIuTL(@fq1h8B}F@rc-d|1yeCzPUnZ?|0u8!zaIg-N$8lg16vt3ClfH_VfZji3Wb z-wO*=Q2%8aFJRQ(m;lnU9_4efrU5VLtP-zK?{GM=7;k4*vros$G;{0v1Qcs?o&>%) zXg~GPU!~}7i5;k`C_uMi5h}rhe3L<@h_CPK-oCy_hd0f*8Gdbl;5BIrB$Onr{`|H( zvFnNdY_)gN!hS))x|dclN&t2h_{+CEe7`T>MM0pWu^npMOA>h2+SoMouC9P>64zdo z6PXt)DT+a|F9A>10|KUH&uArs2n8cxH5(DYyUF)jrJ2xw@C zKTi*R8)zecqiJB5iQclX8mW?!Iwca8-J980ZohZblfJ}JQF}-ILTUGZ+$`0Q^C-;C z5rQC(lR7jIequj=F6!y&p@KGhp;pK(ul9MiFw$yJ#nKu}o5cvP2P888)oD?{i?mmg zn#vH%jx2}luQjyaB9zeELy+{cjxHAlK?kDRS!Adn!7!2ocl$+6B?sPbx)7c7dy05= zsOb!8qfK|k{RJ&Fsr4rtQ+a9N(=jlQ!GVVm=NLuj1pIWpB|8>=#-H~U%3jGt$p6%E z*FG!=HwptjNi^BXhcFLGVGfa1U;Z0dUOXEkA>=v4%3&gmVo5lnmdh@jSz7w3uoA>u z$;Nn*$S+wE7ZDX7QFH3f#|W*q_hfIxTL>b)nuXXDsDGWfdiweb^!D}=axy4Se4j2b zRvgNp6tR@oHaoyDjy6UkS@V-3>OU?lzRn^t#3CZgf%(T0!9x30U!EbuOotW$+6H*o zS#V^p;ey;CV2=!{FViiNj(*VK2zcZ5`{$HSTH75TG0*L)j#_601Wpj2r(Rl@1;`x(oFnaCyp$^Q&wD6j_@f09gBACG3l4Y!dgO&p> zQ2`@44fSsl_xtEM$Tt6lj6wwLiZ7`~$Lg8B4{Z|3Lz`nWgU8$wSIdXBsA|)RRD1EY z7k9UJ7*_iI(9hrS`vHar1Y~iaRIl6tNyK)!m=MRGH+#L-t>^4lZQ}Lhp(|PjDXLxx z{olw@QTFy*M>S+pTuNgIIzbp=CN;1~5(C5t*piN?;Dw0M-0ANh9L}yL{hR9#O?b*5 zH%L}mVv!4E3#u$3Qygdt2mt~#fHh;NR>6TWB6{oI+ak)gYLxtK*IFe4q3p4*g-J!b ziwtwD;$_ttK>OXs=uTF^rB-kI}AJNm(TcsRzt5ht}j7oi* zp1M5xxI)K8py`f-c*(_RI&)gh(UJ5F5QcW;WG~I0rB0E~b{MsxcB2YajIxzK7H4N# z4sR1Y3F0d$`Eef=t*#9k9s5;U^)afh{CEL8Xi;QquZ2v7PqEN?zv+RYw8~LUpjkmm z`*cXja&T(dT)7(UI=Id3d~bp*D_WHJV}vIBJ5n^T0h%fS*Z>3dePEy>WG;L}qX`M5 zic4u44VA-1%295$qxHcx_8L9BpWp}($G2a-*Kyb&1t#!6IhcqpV2I?c$i@G#VL1^( zz3G^q>u>h2paM>b)oVpL>5QviuXESqZT`tsC~|L@&i*JpS;>y7c7qdgTrL65uvi(6 zu5Eeumzk(AK#_dG#=@dB;>Wx+98i7xiu3RZ$zp`4z)w-t#52KRI|i%6I8RVM7E~FE zgI*Q-Pxurg-YH6l%mu-faVKbm0ucbikW8wGo-s@iQLm%HtJEFri;bTHP5_OUO4mKi z@w?qxDyWfv4+FHX0IJ6oXdheffWH0bjUIP{+h%PB%a0UjZ z0E1daB+GdX+@xNL~UChl`!B! z7Cd>l=;M$k6Uv~D|IG0$9sn8-!(Xk;4q=nwYsl(rgY*YBFNRbc?TAps)5tozK~}!n z`!vi(}Za zZRcxV`?R{)tY!a{Ai=)E8Ku8z$P9^Ba=kG`z^RGLZB|kMTpkM^UQwMG7~^7G+r77v z_w}W35<`0Svh$TK4AZh(^kduW!j><2KVu-|`Sh2w&Ww+nVD+jZU!l zCFnOqx^ND9jvjl|Ii(q~X&2wf*|L3*;sx5Q(pkoIJ&uh7#Po|v3yQCiNz~V&{V9f9 zQd7C0LT+oTpE-qy$RP}7Pf}HNHLIG&+$A-S@f+{_Cx#?S4jtAYLrbq%r5xiohlNW! zB)iO^9b|FOy-5J?X8uL{p5d8ymm@n1A?yNNa4x>n7`-YJc!YW1zqVPMr$SHX$SDbq zZ|QegPql|%FtVw3pfQleK|%ms{7-*$K#Ph1?OBDfi*zOh7OuB6a;w&}eYMd@rMN3i zW-$m*de_mRfalQjMJw7K-ANt-n`{TWb;OW(Luml7jpDrWv(uUPue)nP;z`uo+;}96 zlF4=IvWb>Y=m@eHHkq(uXynD&!q`$BW4sEHh_bchK1jNmh3-OsEA}Ftmxw2JVhRe# z)NkJG@`LscW$XDG!=|?mO+tX@C&sMJed#RnnyhyC@6Wz)swJ?ENCI4_YQ-5 zf#*R7+?pYM#;yV!z@C++>$J4?XBmZYhVaPpof!nuwky!`DT`T|G~m{<%9;g$y`VlK zp=d$@gQrKpOxwMHVFYU1l(6S_YHJOEbS{ItP}z@BUH741Fs=0{X6hOX1qMnQ7~T=W zP*WH`Ff)$$Cc=PU)e_2oq4Pg>=JgyhLarH9OBX|Nu;6u7HlMD+o(W11-V-I~?P5AR zW82T7F_4W$zvy6wObB1!4Ap+lf20gb27-P3VlW<|tsJ{AF!)O+98X<8NmB7Q3Flnj+>>ETR19D zCb-UbohKn0KsZzXc5C#Ah2QS7Pqjkfu^F$8ld%!`j5N$>F=Zf!|s*kn?R{UhCx z#nIN}^!G`ro0;X2geeE&aT1>(;V>qK_p3$DA%n@kX}P%zwBLW4^{-E)$nMe6L{oBM zfy_sS)w08KAZQxa{}k`ei~;6;@he2a0yF3%<_a`hQnB*=E%JbP>fqG2e$vUILyx_E zdtY?bQ~Se{BH+e(TnxrTf%)X|=XahRDrKCJv!K2ui>)dRe7!wB#nP9!IGJ!|;at-x z=13bJ;@S5G2rwCt09%awim=cOxW49VfkfboxS@g-|DpgoI$+ifE+#tBn3rS8~7*!L`(Gk4pXxexSaBm}EROHM>=43$u_I&c;T2Jy+HJkgmqcx-1 z-PQldTOx8Wn3$NfQs&0ozF=A-$|S<8GPx`xlAfo9mMSVzFk=drZ)5n8v5%RZJI~rb z53EIP4pIfIz*fGXeg}+vOy{ww#SX*C${F0&=%Rifc1x>|Yrh>9z${c)sP8cKl|gn2 zqeZK3g2)FVSh3WU%t3(MqrW77Ob$z?5-2H;C9b6NaSe)BYK~B+V_~Js09-R?Y}-h^ z#j6AjMgyXfmvx2ql|eEO9GJnbH(~~j=-5#~&pKTb4uDM#I1Y`w?+5i@OMx|baIm2w zG1swdmcEC$Z5=PkB1>w)1_%e7U0f(EhI9VoFET+QW@Z$%ukr%b{1Mef`oj<6);hyQ87uT5};-hUvid=VB^{FO&dUaK!dgLYgfD5UY{V zpa_49!7OVUjAgDwhj}o{xYKWqOS_(KrvotfGU}7kU0}X&1P;0kAWmIs{QixOP>={5 zxj~CNqrVmJBy?Vpr_!s`+G5C0YnE_Ch0fGuLVOj=75aHpFN;0=GW#50xb(3lE6Zqer#~Urer{YFxTojy8_D9n+yt9RZo8+vH= z00;_g>EsoCq8=|%)}+6c6O}9w&gAjD2Re@|){UaPaEp&$2W(%Ro`%*{&;%grrVSvEm0WlJ<0)|e zX$6MPMOQ8)1YmPBZ?6Lft)77sg5l^@TK`wKzdU@n9%8?oNo^PSZabw0?XyA3jD zk8i#eT|-jm8JJfn?%n_pX9`JJ=R_einScF2%-8t?{>rN#Mi^SW-KEwqn>BMXz4945}nuQPM) ztY0GV0Q1W|oU$L*5OOew2<)dh7fJ2?H}ii^ONOc?6k}+B0=jJFb!CN4m>%7&m*;pc zXYpR;r4=&Yrl`Na|K`?~MHSk&e{u>FqM(l-VWJ`xjeGZB;Q&$mA=LXO5=v6IVGe2(^@gABn{9;HzgfsN0pAb23hnI zf$0~Mz=F4*sXyoW63MNm(V z1bL-&bZQNi7o_pd+tr#0X=tCN!ej|VB!6ngjKSA>O$cT^j(}#8!m$Oi8pA^jx5O(u zfcLCWP45=Hgiw(e<_2B%2u`*q|qbd5Q{GoI(yLj+XSWVD|eVLdQ_)sbzQW-MN>_ zkFC&;fr0lB_!Fv@QYMDB_P8KCsf;1{$BqocR;3ltfNco!1%(|_7l z!*B>ULAPTp1!s1hg|u0A$&#d~2ZrjQI<#S<&Elg^#7)J}xyFy3OL#6E1;7s(rH}o`XcI;;#wSEnp65XVKZZ83QwCh5Kr^ z*#+i3&a>El_4!;SQQKuus<)SBvK=dSH~*`4oor*09lb3yR52u<+!Ox@R|Zj&nIN zJ9R-kh<0>2^G0U;xCiMUL2^D`-d_b!JtOd5`ln-oJbT^sR`u+q3d{zy<7x-sE&E*T&b zYf424t1vF(OX*(==auOAeo^^z2;xu#NIXZbh`0eHq~jq_w8Z^^SVBi;_#c!bgU?Q@^#os@4< z)%2x&vPJ8+&;O;tzc)7Ig@lCe?7#n8=*vgw>F-xpc>l>?2X~BZ^1X^8y^c_9+?>7N zA=b-=9?#13oujF~Q!%4h#i5g^v-i@+CLy0TOHO<&ppK#y*kfdRj(Ca}8g_KHwA}mD+m@Lj8IGP$mwh9cS zgVOJHuV4vaCf$G30%Z17MJ-YsV&x^&m10NS@UKRwOJu(%A&~&+r)oxqsC6%Mmi7_= zE&zi&^S5wW0;vjXhL4pK9^-9_I|eWoT#o+|)5TlQG}@CFhC95KXk-z}QKSI9e{cTd z`$zUFpfZOMP|^1nR9`?7JSRj0pa}sfd1jIWhf+sN07EcenXjYG8`-?JZC{WOBTX~<`*-S3F1miJ>>eA z{q~DhAR)o1&RuPswJxOzH-HIXz;Sd*La-!WB#($dP6W|;WvoBMwj(1n%x!x2-sNbK z{qFs$MT-lA)^jY}hgiV-)DLb3za(2&+^gRj_RKY>*)ey{Ld~g^ZR2-C%h(#Ndxqh8XXFS(5FZQ7V8gb&Ggb; zHV5dAvx)fwijDnsv*{Pd$Ad47=CU*jWK@w((Bjstq@*M#!S~mDJDLpNquk5z>Nb8R zP|K^X355)-tz~BcedOKJs0ydHl?X~G`ZU z_-!gC%z08uPR_6C(hVPtW*1E)N|tap-Oqk!$A8xC@2?EDq>G{F85Aa86PXEW$W zUEk8n;!9fnRkEeR%cqZR^3skI4FvF^RmHW?p>;9I~rn0qNL&L;O6qrV7BLZEu{ChGc&s+fi{{i*Fv>fV-A@1_vAn_;i2 z*N)E%eAnTzml}CbUa7Az%84Xq;PIKK#g_$=0x^@CyN#Pd+#Q*dc@qw1S<)Z?oNTgx zW$8cp5oV}F$dgdMR@=DvB~5m-XwrnF58$j0j$I{dnGjbmw$87-qR9E!G-^Nf@n)hE z6)2oo$GzXrFLtiG8{@wKhqYeW^6kSy6+^nZ_QZnWN+BF58GbzHwt~6B!+K3u4pcfw zHdkSsUv2EGFqTae(Abl3P(v+DG|tvBq@EJ+u=lp*!(o(a5U?gz!ca03OX!MH<`Ok9~N(+AEqVic*Kz#|s8}hL5VQub!%e?`q@K>bO0Yb+13)?XpAO)~OwN zhj>>zCOuIokN1nD5k5AavQMQjDml^cc#L?-=IF=Q!K?F_c2d+lzRFo{ABD#AtIVZ~ z$ff{=W8oENtDE6GuIs`NK5I~DeqOJNMVZSnhfT)0!NVl}KNVn3`jna*@TI=ojy{N#OY>`pHvhj(_7#&F%#Qywsnyg;Qdkz3=)FzK?Kj*QAz?y+fRy~l35YBh99?`ZNr7jD82+M zRPDDyIPOF7UQS2MK}#IK0wDiFMW(|2PjJ#FHCGg;0Laq#C|Xp~U{0!aQgwS|1%I!p z=y=CY=Iv|S@nRw1rICeEheE?au|&S=73=yVU9^=k!w_{Vj1ILZO!n3pB?fBzm)PRN zZi8`j$o5E7X0%=;gs5~VtGzUjE?XAgPR0eE%`91o3lao$vlD|P3sAz<f)7hETEfI(ym2nZ<+kBkbW<*WYVdnPW?+slpZ#@5@ zdF)mGJDC?8d(8*#lg8OGbDBn54$CU(t*p?Y)D^G8%XqGbxh-I{#KwAmZ0MQ_pkW=a$Ee*!QmttD zCwHj~MT~>Mqd4ipfZy_Ws?f__t?g;kPC*0VXDww(v%>mT=!VvTqX- zENbIb>k2=hx7(EAl*(scW%5!2yvji(T5BpEVBdbsMlwO1 z*z@Yzrvqc&9xWIE z@-hhU1AVc4HOCb^H^E;oK4^1f|Gmch%( zLlOmuQs-LScOff1_n&{ue#K^$`RUE&$yRalknSY@>p$1=G%Y|);zY8L5s;vGb00A( zPwNJeOtlqz;d{@w21t8gc-@T^Z~xvrxoXynK;AhQ5ujAd0|N(f8JUk!ki^Nl!)p&- zpST~8Wv*M6n#k<2;6Vu4!XhExnXPI4D}C20Tre@v9J2n0Y;1#0sCTwPTO#{=7| z?w7}M4Bv0^HyWK;G{5Z`Oe6$v9QLwU-WMc>38AX9lJF*G-ECp@40Qo5nTsR5``bJW zNI6P6h#LhN=Y>>6Eyq)Q%0)w*H2hP?m%|sC238+YxT@9F?ni1HTU*@0lTtEQ&mO<{ zp;N*9n5rD!YBS(>g@l=&R@9TlNZk*eVi&Mey7ar~|9piQNv(VHW^``g^!A5g1BU-^ zL&I-}9X#3K1pdFP9W}xW=gH=Mgw}sIgXQ)HG8#G~1c$`e2D-s5 zN7A&NvW0y^#A&W$Z&RG$iMn=EowvU>E=Av`VC%ke6ZAK3QMoG;IzU}NP7Jba9231C zyn4dPmnrQ3IRS?5zoR)lu=5eFS(xehlSpSqfQR3@Gg+5;>rGqO$1rBZV?OVPLNy;3 z!)pUjr4}CN+Y>-(zJ$+dm=E~Eq?)(&{y!ynJD8YXPB3w8+qkeWuMx+*hD&U>#?hw5 zKpCHG@@;b=wC~q1%_sbdj=j(QKL7+CPxuE$|Eqk*FXrT~)%0^Lwd$bK(&(%#k(NY2feaZ2(1`_Xoshg~b zUIr3uTnb}GvZIaQ!3mGtc3Z$PH-3?^t%%M?Wp!b6!i^&W0n0dOK2*KT$Gd-JOsV(a ze{$GGudC&nrXO$RWdeMW_gSui7x2!JkGj4^Hb28)>OoR=El)QGsM@^~f)*tFpO8GR zes@o2t_baWbkdejy%G=80bT9$;FGFnDL~>ObE(D36l&N{_5_r&_`*liYxEQ+GyEHd zd=Iy4Obh{6zi_=zCGptBLW|J@u`C6qQV2r;rT`Inp2(m*U}iI;j`}=NNbepXj~lTs zF5bR|jk1}O$Oox5#&VnDsHE}(Q!vgJcWT|X4hIaHY*rOnC9=hQ)5Ix66v2xoqT)0s z5s?NFSs-e8lh<)+ZQp6zb&8yd*dpQ1d7T0l1CcNwrY}^w6t{Zr@rD0Ft zmmahCpLaA$we+w2qC5lpOJ~?ENgbznix&3h+mtE{+ZepO_RqfcSJOCZeZ>ko+~SPC zo{S$b_M-9cc>e@-cCF)|Ih0(%Oyf+G_R5=*O1#zd0F0>6nci7C-*|29N%QhYvPr>g z_Qi@~o6qACHR!FY{jZ|B(_i>X%8{>7o<3`%nG=sef9CFs?=3Ls-}(5D$Gkdu&28LL zrzWT=T}{;eO=!j8ZKjB4zV%pEIL5vCEnzNLt_+#oTx%U=!4%M|iYm!CG8V%7=@fh9 zzgfi;RDW#C8$mmsHSOcaf9rRU%7chCmyehIob#_yv>P5<4_nqMfGqqg z4J{k|f9ha;Kgwkupd-P)y;N=W_!zomFeTFCVvIxL9JazQOr4lKr5?-86t(-~3=xsP zaCEO1wGVXJfrv$K`^;aY@b36z<_V}ZKq#W<&JXBZ zivCFRW4mMl9c`%3Rp{GCSG;6)T1nPHUOm5eMH zu+FL{)N2_XPUJWAFE41B{8aCE|Jry9CtLp^dz5T3y9+>6q>dk4btED-tAh{}*DDdZ1Sq6X_LRgO zB_C&8?gGu}o)X~o5v_Fe4tN$YLxh}e9w4`8m69l!*342o7AlF@lPY3sFf&BJNxgo8 zlYhU@A?NE58gRv#?W1^wT8n}6fx*A`qTx!qL#Q&?t!$YN>U()q6P39u)yIvl#O}Z?1M=ogfH(d}9WS@@!c_tHAhLs{79|vR!23^c zNDw>WB3^HV0=JIV`=-m(U>5$7g^*(tp zm?eg-lJ-*b$)mXjFuqE_Qp_TO(O2iW-<+iEJ z$N41+F%b4%L7oU_cvrzu!Z-ShE9UR-5;`!v*xWO3(F&9mi&$e8zfv&J`f_=7HP3jt z0}1S)&0)m2C3*Gk&F;-r1}!i95XUY(D>ir~ye9y&cJ6mPqbK$6FME4u>(OZyES+PI zMHv!uTd6a+!wjNJWm2oz{x-8_*^kmZ%3raf32AlmPszAd2I3wx^!(p((Rsww`=kWw zUjTW^^dj#G!T8gU9xiS6H9#}WJMls4J$4a4bcM+koHVnUIN0zWCNa7ylPBQ^4kpgp zYbOv;d=VIjE-zs$#`kbcO19JOr=uYIKY6s8wyGZ(m>FpIkp-V1t@t&-1KMQO#1wlG zYt6Y~;%7r@JEuGGtY;E+_md(JZNB(9S01ME+Ew36i2SJNAlx?rSeAptvS?S6kO4bo zf0P|MT_klsWXL>H_Q9U19J7M}x3KF*LF!aTvc{5ghjacR+3=)SpNS|41hSO31%tuf z6^)I0u_GqeRPd={wZb1mAQJA^QpvE6i(SpDb5q2Dg!l@1@^m*+-t1oAwHYloH;upO zSBO!QMLphY7Zd=-Ieq=h$!e&;x;Xdc-6dJ}cq(5p$Lltr9leJ(x$4-&Y>vsq!gfO$ z(Ml=dlF)wXS>8@PDmm(UitY~0`M$X1_?GivIL~sZetI=i_34n%dV9`#-`y<3V447h zN~UP&#gUu-B2j*K+lQUwkvryhgevd!MY+UPX=q6Qcz^6hB6Q(ZgNbDmVIQ*lkrG}G z#Z4zxgC2&rQoSCA+p9)}xEENLVakZdFAUq2^Xxv1%~gj@nP>G(>_WZ9^DA-uRhHL0 zMsy!x{se)@RRT@p3v)*2rcD_ielWh_zMcQHW$g|C$6nDBWKo-Jzzb2?D>O%UdXk;w zIDf5OZp(`dc`eV$j(vg5g2Oci1Ps?UF+7gUHeKz9H}X zhU_9`SQ~ z+a&Bdv8lUFkK7Qq0h8t-=Y?LYeLFu7lZQTI8kxpqWpWH|RtW?H`pF0xLlbZRL2=Crb+?uBx&M~)L0Qx)Wd(bt2(xC zf=|$(3#Iz`NgG2!BFxHHA-A{Jl|8>j#l&_Zt5!6s@|aGuKQQ!gJX(1ue7o4hcjfb_ z=aO}*ck1(fUu0joG2k$93TG7aV>pAI?ND!x2en_6(Q7Nnbpv$O5>?$ll_ z2RbRU?EN5)(SsJnjxnMjemW?Eyk+nqkddYbKzzLiGuiRQqCZJ1d@H#9%w;`lH2t>@ zR$?=;=GxpWW9HMGAYWJP`W8h8zPndy*wB8X4oqO^2gdA$mRwj zpT-ny5{_AqWs+(xQVFq^w5y=IG_LYONJZEI4{G!|d96}3rjfpp^E(y_4<}M-ELZIQ z@x_NRMNNob+xS)v z#N_oJkw6u{G3oY_ujlx%zNUM!>e+d5BE?2K70@9q>{i4i0!gAWZfRF@=J!?-^g3)e z=J)Sq%r6r8f_;}_Ty4Rc`c_O4r?zN_I9C64_ID2zY~GY4O@yZb=;X^K``2ztMyy)y z_UDcXs0{dB_`0>aWZ>B))9cr1&7Qt}%)~^!YQ>;jEK4dP7O+xikMr6r$K^?=#y85Q z?dcMtfd?%KT*lwTDNu>!o?UrOwf@(n_FTEUZj%urUf))n)d-H@*DLO;4m-aT9-%=c zQmLzL(h*#`5YSSqcMzb8)Aus)ea;GD&-s(CTRiR6CMN2~CLg+pkz4Kc-vebWXHC0m zQ9Wb@25Ah;Tm2Quu%WAf-r8Y{HKB}5ALB2s4i5m*Di-sRoJZ2khrriy?IcZK3`FJC}rVb zxs9yOZen(OeA!>bcF}7de_I~A`vG`P=ws*0Qo}8QON^P*SKxllYpt*76!ui38BpVk zPFoqczQV_hBl@WABBmLL9lfpb{kiT?$=&-F??YpQ8itGm@hG(?AYaMO2Y_&N3=Gwj z>inNHCOgh=8KW`cKW`tGSr+0O*ukG9)u0oTu19(VDwD@i96p$sPAw6j+;-)zqH~z@`PWY3 z^Sb!`Evk^YgvUJ|qw}7#v@OnDv62x?Mgjh-!F>1dpD%_2xd4uaOB#g;M2qKPj(&|M zDNGll$I`BQQXuB*H5ru}k95?UQp^ug*+EgaS;o_d`N*Okw5m~({gPvWSTn}d9!UH; zD|Hd${M__5UZ}(nmQ`)fQRt$x7ccz)pTyO>u2kSOhMcR@We>ILTU{=27e#r?s$Ht3 zo#i8IaYQY#YHwfq_9WvW3ER|iHCmj_8E}k zefA>%_ETvgwuVUz$?+A~eCqxexlN>+z6p@+o#k$ZZGgQ@im32n^i2sz-O)CYinIoF z+1|{jI@i^QZz(qch?xQ?>ZifU4s-?>+@vP6EX=Ct`(oifW_NRR^gm?==P5=grd4E z743I)X!t2!9W*rpA!3!}YLmQsE|!0RY`eb6Xw6-42${p(quyFTZdT=M$xAvMly}_vfNEB>8 zDcN*}56;+JjK^3WbBt!d!0);9r9jQ!Ufo9%oU!4|l@RCW)a#j>irML-B`&L${-YMM z05z+(TXW5!F_bp-Mg}4+Ue|#p&Wp;VctrY*p-&LWv#dHaTPuHxRZ`i9ar=CwA@(51Sd+(} z1{~Y@MTGl=Y&b8P0ygL&fXYYb^fg<@b+*u$4o&Mpvn7#%fMq8tvO4Z{qIvS#yI(;$ zynkqU9p4sy*5Z!zjA{mboh?PXFWO&mqqWcp0XAIXw4G_pJtm0t(4HJ8VzWRH3Xeyr zz44O*X9g($DmKph{i<00OYi5v86duD82yChOCY*0WW?+pTvZnU$FF%=LpC_P;w7>}@V<(^}b{nVEbkGo1&Tu1e} zLsurnguJS3lp1s2+KQ5bKkrwFNt@ZcO4I%$QTa!AU)gjgZ<*~6Zuu5IlI$xhy`S|6 zu1OGbn*ORt%q^G7Yxy)L)!Kb6JD;9)p5w}CQEM<+D0}gCMabNvSnsCcKhLB3W-5&3 z^ur1@F`0D-K5s~nghT8;=Rpn;sWRt5$QY0H=#5hL-4^jP&wQ74p&tqjCa$nv6~x!y z?Z5w4jvzb`f;srU_SjM{v^z$fE1O(wwf<=^bc8H6!69Kdj5j=%`MvuO#Mm`=bnzCr zDk4WEz|Y>6bbd{JIbGeSNwzXd z(odTHzhd0jl!tm)Qc^_P=uE(_bjSk{9)M_7DJl6?1kgl#ayn@l`Cu}IT8?$xG>Vv5 z2`Gm@1KLh+`YX~XFdwUs0ax$*4E{;`of-IKORu4V0njXB#{P}J4>FmjQWk_xJGQOl zH*j?P8%Abpa(V%Aeu|r$1l3D`G(Mr?jVKLY8U3^)@%MhS*P&CymfKqKd2>5=47VPC za=kO`b~AYm_l_!s|HA_~8Sal)kuET&d52#`ThFXIZY@qf0_6*>z{;1N;Np_Roe;;4 z|5O?+4^zoamu4zmwrP8(XuAwAI9M4j{KOd(N|5)7AspTV@m3Qyn@qhg2Tr+o$WO-4` z>pc4O;geCt44(EO-%T3(kmM`B;k%7Ta9yY|v;BtOujY&oVs(ujNFDaw(*xh?RCDpW zu+dCDFg~|wy+Lm#Uv&(Co0_)-lo=Gr;RSq8<>odLRmlEE9czcWxe*5x#W5-Fy@tE` zL>=fQk3NvR8TkG_Gze$E?-Z`q!qy6skga?S9L`X_Y ze;F>N=H@Mnrc}>{*!o0|8szbRlrM&)J2#Q|nZ2usEJi^_K@Cq{F7FK#HS~(@PPxm(pi)$@T9ACRJ4PM^=LRyMW+G>Y9j`q|*TL=6v zZIViHek&PS<9Jv5YHk&Imjonc#JzPeqTd%j3lz`Fyb)p2mXa}-qQwHP*Sr_BMEf3H zm8+7Mk_2b@Q>kG{0^0 z>#qzbd4f|_YoauWBnUEm9x27ux<_`tI&HhIDH^>#-~zV<&XQkcvBl0*BU0pUbH%71 z(RxbrTdBMI2GtHgt`ZXvNH!cU7D<_Pex4lK2(Q_Aj{))8+$*W|JvTo!7z8a^!?=mmZAvAXJLMIL0J-tPmzEH zVOQ{KA~0~coXe`~EkT}-pWl~PAj745_&Ox?w95zKV{X##$?iM0(pIs{&a{>BB*O03 zTxufJ5Uli2mI!i3p@4>Drz~7A=YF?4zd+vvex2hrM6}tw&$%SvLy9yBc{4hznw5*u*<&05P+;;y zme=uJm_{l_+$pfokXFO*CalRSYM%d73*UR{>hnVHT>@~eoJ$_B5W47K#QC1PDIw^L z7h6fIJftu@avJAZzX!1Yt)PJfE>2M8!~x6)e+u zP9;}q8*S+)T8|mMK$w5NJmD5E!Xq2qLBJ}WS^24Px-~;uS62Z0(Ppq<&J}jjzcsy% z`X=8&s6*yYL4Vahq#uiQ$rt-r|8Zmo_})m0VD^O@6ZZ;ticANpU0|e01TW@)JJtK} z#?$?Cak=p8ard3+DvCA$V!_JJ*CJQ2E$it*3GVQXZ=|b`63e;K?5+yyE*77N$E=7AIqsHLf_*HUGsQZrV z>D)GS12SSLG&xy__XwZ8+9X%n^0|O(RC#3K;8^cB-gH#vkUzg!?J0$sxp|43ZUx?l zd7OVccAY_m2(qFFOKjm=D)=s71F%5*U3Q4Vko}H*;a=62TVQl7e0fKF)0w zzOfj;=o`MIa2lVe831p%Ed9u{{)RS(C%Rq4bkQFY^CR1ZBLab0HwQAG7u%;>JOWD9 z>GeE!^tD7Cm&jC!bQayU$XuuhK0X7U@kAyXuL*4zLx^lNUD})qC3lxO8kXQ5w^`P! zo4&P(AO7uItL2E3O^%t!F^;d!K&vv9XrtO-1a$g`N+P@VjP;hK_fdWfHU(}`eFVZU z`4l0NVp3tOw)~O&KB$1&m^WwU0!6hiP-iI544tEF;z5j8#KlNB?y)h zG1;#a5klpJF(XsmRb>o~wFGk`mo8mOcKUOc7!M{Wl|#CPXh0;tNm zJ`c0T{FJ|Ah_0aYH!KQIWg8xO)TsivZ8EeU?V53k%Lo;r=OslQLQ}?FV3x|Z6aE|F zirY1{2%o2A&+#KiDp#@?CNUxzifGAul%ifBCO?L915FbWF4;{{E@q=}YOu7G#vW3bJkcp|dF zJj*M^070DT5Fkw9*vMNd1l;7ycUa!sjA_RSQTP=wXCLb1dg~$+@mVdkNjku53wFHp z)BWg~8ttD4zyh55>Fq0tFaW?K%lv9R#UA9Ke2uO%N}-R!xlsj3Ld-b5AFz<-YTfor zl>nPWdd?8Qkb%}&M{eYNSAnGF$p<#9+mM#K7tr=AvmGU>1AD>?PbMEBLDH{(W?zfm zY~Meg){Ln?i`)N3<+Lf`Md6ojsKpMcV0oK!AWB?3YgsnC)>)>-}6CguUlRe zrIVd6_-goUeLb5`^WM)1dXlKAUDarX+fu~(mDf{Ma=gI_p$a-0NvaA(;-?d=bLc`PtMph`hob&N0HpDepxIG^)k z4lA-%AfD-Rd>b-fH;J1WitG6vI~)1p)8t!FlhN|uk=n%)?xz+(fyTHOGsdzqU4PTE zvlDUqJdGJ#W{k*Gc&eLC6Q(v)D1$WK1iXq4PHk`pNE#3`leg0?UOy*#Z zaGuezF=1n?F~~^b-pJ==zgu)b0e9NK2zC?VNF0l&N=Z*~4kMA1)!501nVo`L)%1 zuI83WQWr0S&E;ttm#@|E5Wt|TZ;Plc5k_k0NGA5|ea+ze$)=ulN8(dnJg< zcDdp_(?*-v47UUFOgsDf3Fh=i$y2JR5{KrUzer3^&e6NWm0PWe4(l41nQBw(V+gmw z#lAC_ZsJJjv#q6(qwVwNP1io-hJgguYyld`&boPBS`Fl;NV2^A-`=~KNbw5<zRY` zHDXgUIw`e?ANBOyO{YUxD;PP4dMm6oANZU^gA+ys?NMas@ZDLP$Gdnu#vUkDWaMhi z7^*CDZqV1ZtfD@aLTlz;o3`)XEWP*6xBbGZ+yLNxd0ZZW2mJ;UI?2(Mv`cIwr-*^`XYja-QqGLQS$RWOg7q_;3yL@-~LkVGE03;O)Q2963{YJ z4%0%rzmF1qY64)bPh-mFvxPOl>ZOD~CjWF?vggGOlJ1npHH%V6s2c9lC>X}f`8RaJ18dNke=38){|>HRRr_PvQzov>Eds!YEVkaYmsu~ zd*`*{Z~rDzCsLTgkzcRprB<`OTGxxh?I_^RqFH95o6su;^vrn%$wp9WM_bZ>qz^4;nrbN5D8}{eX{c)bo9cJJJFLl}u44vgofD;_Sl!4c46 zsF!v08A!&+IpzseJ4mvChojH5Et|h|7n0~yA}gQO=#$5hRd-e+XBt>6fjvh%PKe?E z`JDkuLd^sDP5r3(KF$RSLwg≦@y=`qV5Fx0V@7gSti)B3lQtiU{GIqHeC9TGaHB`7=*=IA zD6n3#(Q6%X)akmX75ibK(^B)${d2Jo?mp`yx*H)!y2(P4sTG8)~u5J_Y_S)9EmV#jz&$< zoDRfrEhq>+n^h_RWvh6NSh^lh*FpPRckH>Zqzt?^uYV5yKnClOdz>FQFf$E80m*-j z^8&1)`yQVyd^L-}jn6g8|%~_6qi1CeU=eh5IZK#7*jqtN)AW!=5id!3X_+&$e2UoqS z9|-Xsp*lZsj}2#!rDP1bU%?r2HO7nq`NtYgKVX zg8^{88w~AWu^*V89u1Vn1SyX}g_w$P@XPXUk^>t)t^SNcsyrls& z%qYR_;!XS?3*nsVEbeo&(i_h(X~*A!uFm|f@{)nRqOt|qqStegRs?~P_Lx4W+YqPw zi?l;YobfYd>lFeni&gM4US9B9`L(xBRlAKF!u;=nK40cTi7i@?B3W&&#KI=(w^x_)-(!?eZtj0Cs^ zI@VpH0K~x3gx5m;k+=o`KR}s`onyq-!Zs&z}aci$Gq^#sUx zJ*UOamY8hhiE8{u-6f8hq`jj|XE|d8C|1=37$$07^}u^07yMk;dpJ!% zGr;8_l~aClX4bh4&b7Xqi%s3COC7LrOA6IBw+3cAyY}7P%!Em0?-{utaRWV7Na(XU z?=9nvVGrj!y z=U#Y^S*gz-N^QB9RcBaPQujcXMC00yb&NmC=684N9e~rh1dN^`Yk7Q}TDf~JZJfN6 zp^{s;daMvHaW(_#&QwevFZ_Xv6T$e~HtAl_T6mPGuuJ>mte@QegpoQH)Ria%rFs%n4GW=kKCD{CM8;(M~H2LE;Er%x$! z%^v%)pmB@{$L53a&~9pZ^Lx_{_-{Z%3Sjz$v_hCXYovK!EWM@8RAt9^ckG(kf3m1` zCi+37PVXdpp*O}9J|TYI=e1FGzST&Sp`;TG>^1!Y5zM0@-T5Zpp?;3E7Y8tTAfm_H zC|>hk-RiBZ{Edw4Y$)XULvj|TK1#12R|-1miqaCO0q%{^P|6S8IZ)Y4zvE)wqr~Km z{RY$It{|Z#^KatsJeO^g^A-aJ3eDj)W8ysX$tN`3NLQN)QqUQCrH3~JnKfwn%5Exa zuLkhn*)ZNa;dmfG#djh0JL zV`ls%(q0-bH-amyiEuEr3XwRVBG%FaM8Xg}fg*|L=H+?CHxj9wX3Q_Sm0t;dmywZ| z3IA^_0KA#i{6dWqQVEIkSGI<80e7ptRP4hh+c^>)$R61kfURQW28JhC$vhM_V%?0y%A6xGaYg{UAWs2zZMv4De`g|(U4~)EjhZZC) z`}018|B_YU33`XPtz{id(K|HqI950m*daGpxq>qmV2AD~33E|s=TJgHcAFBEtFB(Z z;!|n#Q8}N#_eTB?yRP*H_gxAnen;hRVLUnwoZnx8yH%dP3w}9&m99Y&VukacrZWeA zL`2Y|{!y!8#{=Q3^L)m6f%4t*^x9cixx=wN9~^#{sby5Wa+qwxud}!n?u84`9~sYx z72k4j=OEqtI7wOp{?bZBc8LSQ8|HY3hkyR-Uf-FG3 zIc^G?VRy6IHOJ2AgQKSeh*yK?+om~S2^9FA zQy(Ao-3d79s`uoIh-tl|OrnzsdAri?Z+UfUOER#FP>pD8=X#ta(>Vvos?1D@yawQQ z4_|nYO$J(9q5H4~jq1ScR7C zldk)1rUPYn#ov-Znz-Qe8=%`v>t3wI|p6S=AGy z{_JVGkxp*KoJnA#GPwpT+a;FRH_A75PXwKx_OpEui&rfv?ZiPW`(an^8a&0p1eJn( z^-xiTAYV@51#bsVC`%l!N0pw`OMD^BXuOH$b4{|m)SGQ>nTY01W#*3g;{#UD%|C^| z0T(o3|KV&zqX~v+vvj8wupY-Gf}bw@z>*b;WBxcw1MkKhPWwm~5EWi>&-G<90V073 zV<;c80Te(Z`l*utYt3;Gq41nr=yLDXWNJq1`6cfqvcj~*u8rdXs1A@OCiXvPqhlJK zsO+u2{9{dRp?^#jvPZe|gU>6j;*8pCadoI*-Rjhs-#1k>x3tFUDY+i-M8-B(wH!QgZ1klLFs;HYV9erVAv(p7iixKYh7@{2B*4!SZgaQd9oc?w7!S zs_rGi(?l&+)Lyv{?@);Y%E8kpgPrPVftSH6<)S2VSqgq@`8JCQAnUp5n!nG{f)GP1 z~)2ClCfsts0=x{KZv$NL-fQ&e7}qBOodx02b0NSP~hl$9ACi&204V!82M)_>RpL zAQi{18C;dXN+!Top^4RK*7$^iwm2B+ZlJBlZPfXv@-HqK2*5*s6g<5|AyYb8uWvjz zF?8(?3+eTsl56_tI?8X8W zwwWfRTRgybYNc006|4X=q^((gi~-hs{w7bqhzU4!&aPobEwW2>1+`!`?(+vH!H5PL-*g!#N7wJNhu&JL<-0cTsCf(M z_<2iHx9`d6w{h4O!+iuvfx|0mZw+REB=<$m5>)a5rzv6``a1*7cDCebtWJP&&2_>C zO3;i8SZn}zJ34F^q?e)k-q^+eAY-n2Omzv~Y99J*XcCN>)$xk8*cbrf=eNvk*IEk& z5Zcju}Nlln=i!U-_4xC`s18T`4B;Y3^?V4zhz1villqtSk&m}AN!m{ z2(9>6MhV-0E$xPIYw)$eExV#Dj~h!{vxfc#EpznPA!Ur9q;HdBg8n@=HNZAH*J{>x zh(@#MvzNbozW^u1GC54oiM9y{ZHin3fH&1GPVUiHMz3~LsHVW{JZI7G_T$b1pP6hm zZ&qx_-L=SNhc&4ICHWLE^%A{JsSUGv@kx9zHf#?e z&MJ6fZh2X4$H~UlahF89URNE*Upl$`X6m_c<+qSr0l3>v!Tsu zdSxN}U|=lEUbP?;ybp^oYPFR+kA4P!WHdv=1_21@b z8O6x?xo(L2Z+yKk#Q(D&e$4A;Vx-z+R^5!%kQmEDhWVd#x97C$M$Z2D_-_EERTZEd z#Nj>p$RW!*uu;qGS%EA6M zPzmk#um7a)SUHR%sfs$oDVQPavqic!dy8zh9u6@xOrH;tkSc?b?J*luWbJm;Nqw); zQs}-ri)wh|L3cSJAEp7ok;|{izDWa!h(ML<^!EMFNx2#2C896>O6^al4y<-m z1Oqg~pw0$C#0u?E#4YqW(M`PRO!;AJY2efjE)uit0t$b;Rg)4?~tJgHw`XA5a)t!llK`=sLJt>~40$SET4Uu7rA6OehNWP*~xC>Z`IINEw z+Jw`MSZ#X-hgD>obxVaS!*ty z95FLSQx+W8$~y@ylZb`lo6u*YL^M>r3+UW7uHUKCmcWXQlhl(mbwL&3hybuj2!6!C z$dsAMvm6x0dukdUj2kNdA(eXcN9@l z*_2*0Hao(V_DAP~u6@&!K7#SuiL<6jOf|{SodZ6$ifh!ZYiNF(4334H@LMJ#MZa!6 zuQ!7@M4(LO958?|MTq;CuXSoTV^)2Il_55)tP)S+m(YAEl=3QFtY_;ywe_n?bb&ZN zX+VuKb&FZv#>|DoEP8244d&JW2&2O;4kZWy@T@65E;jA^G~mr_-QB-R#scR|h^0oW z&I8b41R?J|OiY(dAq>n?1>l&CW0v!IXwB6qIYgW$mQpxU2M51;kq*q94?~tWqA)SY zfCqS~X%NDe;+1p&6%4p9GYo)^7fW)Yu|3rn86s_QEU&F=eL6D;t%qbrN&qTKhFynW7s2J& z6~Kw(&;;wPUP%=p!>l{bL`q9&tD}}!B$CgwukxPdB~i-cs=H*f9MI-;IFDER7`#Q; zKRj(V|4`DNNFEtyzA&#aA72*Hgq>={!wa&6X7#nw8A} zzT5)g6PlF}yodqz(!15>@jnNSHlYm|-s^m4rNRRjCcN1{_kHA$unwWlmz8xaPp_QS z=H}!cxv}|nvz62kiGdB03YVqt@y}`ulH^K`FL+9EvK#vtMDUjx19f6-nzC`8em(q` zS-`sdb>gJg#2bpPs~PXEcgg$g{-J05-kv$5_|w6v;fbu?f8%hUI%|-LROw51?s*HZ zH8bs0jRA(i*#K6oWR3=~#Kfls{}%}Wvs7G`IE1gH-unapL-4KQU=9KXs0kO(yiATG zXg?e=QEB>QxuQ73+}7E3GU!k)U`nfV zf~>GoCoc(MO5xL<5(H2Bk6=+r2Ne8i$&*@Pk=6nOK#DENp_!KN8Wj*OYhKh>zF$hd z^ysr$B9h;fD`i!I2B$G5Q0?dH%Nan_=%H%FQ;WRyh0OYBWN$T_dM`ua?rIJt;8YDG z(g%$GN7e!7-GIp#|LLowFZ!4}ywsg2T7vm|A2#e8x3h8HooovopV|+JN{3lW=ZFHo z2S3Ua_(ztZqt`akf%;NK?i}4n4f?tziNA}+mQ#cjtc4uT25*#lr;h(Pv-Fo1;xg!E zkFmVttSD0u^dcD7p4BWJY!0wM8>p3B7#$Khm}fXMFj(-~-!1vXQ`yBINs#2~E@rWf zH`?CDrU_P?@y0R5W4Ox!dwl$w;0jszx69Rq z+gy)cJS6KC?Kb0&%4+v2yeA-qL!-fP^^(estImgF4`6`-4VoO0y_6r8TyP?JwrRLC z{P~}BHB`Ic9wn^r#*&$T9=WP_D{c68`kM37 zdYR$z;?nxmV!oKb1v`cr4xr)`W#Q0ZOkD_Yggj2?CH~AG;184LcKH?EnVkEcza1T5 z(qUnbwkcX13e`+FQiEh10_-8ti_@=k5O3#<%!!)iYhz+dnipk4{dz`Z0k%Co}j9l2KN1_-D#i z!;dF7Qx1|bmd+f2J+XW zTRy>L+Q;|xqvpYH=TYc>&db&d=2Mha<9;5L)$5jDLs=+z)!KWI2Qm!?@w{|D@o^X! zo>g_%{*6??uC>*nOeH2T>%|edj7B^4SCkwbe~lH3)0_3$yPr;xr1d9EB8M{z`S4-R zx8;pJ*6ZPk6r*{K%JPD;Fg<;ZtozGPo);vnZGsHVpvjV!U!?HGhxclr$gYx%j!{qU zGRn(lg)Ev3Nz0rbQcXa%Df+x2?MH0miJp|V^^2g~@8Yf)iMP9LhB83uBz7P7IcySW zZsdw2SuH5|YY3v-qqreY6m@2E1$YEb$&Sd>hl)r3LDe}d>|~OW+b@ib&O7fV$eQoY z$OpAA%ju?gcp?w~6noO8eIteRAA(A@ybE4h^jGj+3doU{JVH>Ja(T7W{sSrN867+1 z^94y!ok_u^{%`z^Ec`3dt{HLdrl%(JW!R5Iaj-6&50NYIzxy3P?PW_Gifim z(03XYnEy6AsZ(Es7j@0RInmw2$*{$F6L2N`&fv{aRb9I3Lv#K8NoP}!qI}9*JaYwfDm@? ze_X(eQA;bG^h`rGx{Wx9j@B-;nLCq*%Pwx=$4l}l7@DMJ=0||IELP9mZ3=l?oMW7U77*)pQ^yP) zgrOqkN>x>W@(rFPDE*h8NPiVnA%m93nQv4cItwffN!os^a)!$vZ_aUhu4J;tHFui* z*H*xk!$8fBmoO^cCk9zq+rfWY!0m0}&b_FFHdH*kGFTw4-Sj7$vXtI6mTQ-DpVkCV z3+GdEFzerCKy)bGe-+ZD<64-A@C1JhP4fHTJn0*`Z$-QTPh8jnUB!5oT-))c=r=ox z_1;8HQNxAHul;-?lKI*Hc=>X|?#M~v^&oXSGECT-64KIZcm^)Bl$G3wfH0-itiGEX zWZgJiL0)0Nx?4!Qoozt#wLZ&(YT_&eS>p#ff7^O6$gY4M(R)KH*&D)>)YUdPnm@P6 zm3_U^yqx6`TVMbL&&DP0JimCRUuHZgU{SuTGFJW|c0g@6ph4JZr6DV_lMsU&oewf` z6Peji|8tUm}sY)!S1=b3-RcS5EBN7cdrKo5SU|Oz}Cz?2yERqgglvZny3le zc|H81lB}>1RsErIWlcrR+E&k>qK=gwimT^eH(ghK^|T4g<31&JO^p|4Y?Q5#7Gf{q zD)>_Y4F(E|lZ6?F8Z6(3k^+t|ziRo(8TKVvuVTW+bQIm0JhFmFy=PiCuT*{VevvhE z?P|L&?O$j9I(;hx2*aj>bLoFv7<6m5)HB{Q;*3EfK(_0~0Whd}-61c35wAeuoRhC4 z3r#?PH8u$MUQx&^etS!g&zlYM`i6@R{MWBddK37D*b(Z737bckpM@_?J`BS2fDop3 zgY?1*Ny(mlP!g>G+4+U}ua?ZrA zAcVREh!>U~zs;eK2$Kb)fSLM4l}>JuAa5*6x1pZ_VD7$fgIBNVx8{aXpo&%HHS z&BC=tCK<1Jce?RAPVN_qQ{Yt)awQV=k+cq<8uo!aQQFl0m0-KjT?UL)u zulMks8DoBuCM##NMypdfUHpP2`lIGnf4xJ{n6Msva&f0^K)8Vs0B*Dhh1=!t7&uK^ zs{b_}z-c^0I^!cK0fhz%Bj?vww}g*Rbl@nim5ZeSDP3 zCB?Slb+w;4BRks%inJb*qtn^?F=%3;m5q z-aEHLL?gLlTU&z~lNs{k*uW-}RHIx*-|XGKg^Y}B9UNEqRvE=*0qguMGdd{GnUmlJ ze<8gT+`cUPbDy*~m1m!jZeN|w3nf{VMB~!rH%qjy_Ri^72Wp+vLAm{iQj!n4-NV~` zp)?7K)$VuJJM^(xth<@AL|9P~%9Wrfq%a<4ts!RH{bG!B27iqk8n~dN4=-u*G!cK)NlLbFZe|i?;=R zwB18NBJ*h+2Rks$Ntozp^y|W9?0o??&=7e<;BK}z6|ZK7j;<`avD;SE6N5P6&wYRE zm+f&Kjnkwln(iAsg7w{e4b=R|ALy9nh9cB0$1E3CAR}bmL$5$GPW~iNm=8i$EoU-! zRyQQ0*xb})kn<6rP;8)YM)M`zW&!7(#)jyFRHFxr{T)CZLI5~Cjz5TDE>nnv0l#~=_==q5QcFiSw{)8Bx2F)uYQ+=eg zP-KQX5I34^T87hmPTq2OX)9!-ZR|j}`?k2Y?d>YAAUbZ#%1{tku+~Te+qBw`gXNN$ zSV18SddBWwtp$|Wo|~u*QXmzdc~bc>kk_jaU!Z{!NafAgQP9T;ELk_wf=(D{+ug-Y zKWPZ3^EIkC#Ds9P6mhtFk(!FpGWOH&A@}?_K^?ecbY^1Ykf)i{0VI|NZwStroQtH8 zE|KxCK}c?&cKAgXN#-D;$#mr*Ki`SFB+r{By$^H8(-Q2tLsZNGk2LkZgvZcdmRy!e zV<2T4nh55sg5y6^f@-dKhP31@M;10L*5XppAQ*_(s%&;R)Nd}#KS#AFn)cvoeO??| z9L{5P0x=msO3AsBSc_PP9fKz=UWu2{&(_G2DSRik0?EgVOH%{~D;fk~wPzXBIKu#K zG!R}+kx7#QQj=qU5XZR+9?TRj)B`F7Z~05}N2g5)xe-`v`^)tj478U`c${4J{4qJ- zEOinXKnd*E`xdGIy!#8P_|FACC4*#c4un$g;n7#VQ5B)Fk|GXEeZ7Y}9D2X$h3j~y z{VG1i8|v9c$eU(yRwC#nB{^B&s%Nt%tLun?*Q@|}2g3sz=GbVU$}rF1vZ^6npofHU z-Pab}uikBah}HbiaYWvDV-alttfLe zUdw9Q;+sxj<(t^@E4E=FjfKJV94t%41I_q3Ms3rT8zxOzrJ(3UOM-_hyKGPY<4xk9 znk#r$*ES<>$d~LyxI+LktNZTjmtsfXh^LOEJeS8t=4v?z#3v(O*!+c&9lc~d06j)p zP%M#vpGT)8*ii;C0a)V`zCo3cQOct@OE6J`t2hZHK<$oN!D8-wUK~!6ku7?H7gxes zc^bzIp9Oy5ai;_=TZa?NL?3=hW5m`?4xD8Oj89QL@;j>)CSXv3R0HN|_nDHXiSos5 zl6?jONl|&Mjq7T+cwR%++(3{T>ndTR8YcMN^LWcP*9D=<#)9Y=61cnov5z$2*na*n zrpS=o(BeG0%jYXp9!-k-;V@%~F1AcauO3C&rz;e9{m*wNtpt;sttKy5({ey}oMG#~3NRfc|V zcRc|D74RgsGTq0F$dT9FKV?370eOc7Eai8ghO&dbe!@R7K{>c~OjG)03_O4$NmGm8 zgn@wJ|47s9TU@yF(o)j(-c5RP3U0Obgb^P`f;dMNCv;u(18XpP1brI`nZMZa9M%RV z*$O}v(b9z9bh_kRJeNF&aHpj*kyax`JEemkGvCo((CqU_MQ!Xr7--IJGpA*4$uXs4 z3pkZOJBpK)g95-9%Zyg4!@f&f5sRD90cop6X)`S>k$D1|PUZW)$a9HapWk{XCi*A8 zPTFlqEDK z;QrzbG8V*qH&b#~5JuE)Qa2@Wup!72We!q4%y7}}T;MYJXAzx>{5t6Bz|jjSv3jW> zTyG`tzu$o}6W?M%l^aeTBee2CRcb>difa8VpH}m%>u(!2iu=$g-HqUp8QyoOp6Vi% z5jjH@B2k#mWHD45h)tbfP=Hni!iU$S;%XLG;eqy_F?OWi-34_cTh*-mvF0aNXf;$r zYe;SsS{YQr;;%t)j27>?&iodGJ5KKTy>7K!z7Q1_vgNuD42cd^<|d-EQT2ljPNwnQ zS4y&C=~SlG7Lr?}qm?u?c=C%7)EFSO^Su@qzzlrLY9z$rTHY)X2?Zg^;PSgALx{Q( zYi)iH4o1Po?7vk}PzpvWvxnvi1U?020sfH}?p%e51LNd%+2+@AMamtk5kAb|!lQn& zUh5|D$G-s`G|Dkq2+JLldHZ3IZjb!{D>{41=k4?NcUIxC_15$d@d=YRKxNR(GNUIX zp2UN1#!m%kj1^(wX;9((;4TBr8^xQ~%ASfXa74qFF6d@-PbgK4*(fGtTunr|#M-j4 z?-=bC6g+&tCirET5et)RY2U;3&VPfu&qoh=Bmo^%ZuBhYL+GbQf~qSC7+`&ZyRH#6 zkgfbjd(IRPcCFB-bZm??C$4IWc&np0ZJYDX+gk%qYtPBQ41nmv zar9RaQh;+SZA5!jQ-_gTDU16+s8 zLFl>B@#*5lCw3IEr%{QuWEgZmM-p%Tl>HfLOqM!j+JSdplwKGZuz9ff(zVCgj{tJ2K#p`ZVbpr^z7Q*@wt(;3~x@hsuwDPm?v z0DE&pR_q)&jA^~8@zEZ?GUj<#k8dm22W9RWo`Y3PABq)5xo%TrLtCfTsG ze+=?qS_AoQy1=L4y&r}nxQ*|8=U$ZTsMlY4o9n7>oKp6n4K?jS#LczM4V=6uD<$~f zu;eZVPY^?Gnz9Bo(JtsQ$gA`O8W}kBe!j_Sd+73(jd?vQl85!Rj z0u;iP1)W~lLsv)6V@0Cv`tKq|y*->52@q`amLtB5(>$t_5r1&7?IDkcXc1?+EntEY zY&elqeT*XNK%A~5Va2U;=b?XbP{zZD8U?ED(=Af$33BPr+;3jHPj2&-c5`mti&&9` zRQXW$VbMkJh39}#F3UiSr_3TV4;Q!K2Gg+196miX_IT||!ZZxU_9O$8*X#CLv)6z4 zHjrcpBZ$#mOEnt@$h+HcGCt@gC)VVD`lb-BwmNUYsL7+VqL)`9m2 zKtngSw0XEM2}SJ4Dk9TEf_Kj+U3ovN*8axD!`XwZZt+b|RknoK16~hxU18SMO3fs> zQ=?_WFXO!Y4W6$kK!o%3Onc%`^oh37eJBozXxh;_1&LVyu5@-8f(}wz@|(PYPeU;U z>sWfy;qq=e?qD5fqwv3WYWd(NTr778Xu138W6Cf{nZv{o-j|13ZDvPDL4Kn5U#yDs zmi@l%K1S-fo~QTvyaLlb{9ofv#iK6*j_V~7RPS3)3Q;iaajr%L&nM@;U-J^m^b%b_KO5T5o@i{o?Pv>w`qdP*JiKrA_mIYP|C z*K1i3+Bd0?K2-AB>etgNo3Q9UeAsO4@y4lScs+J|M_u~J()rtCg-WZ<=A@$5D?f|u zoeDg&4Jy~1M>F->7n1CTmhD&;x|5mxt5$Jka~#&R)|h9#s=rwgVI2SjTj^jQ(&-63 zT!a8q1wHg|=bwkXgXYiG)y&RkW%Rc_oTrS)7A7fCAGDNn{~hY<0cD*x;oF>|NA0=G zLdb5*=1UB{bXee+v9D#&S~wP*GAQgD+3MYaKQ@;0de_5`=WJH0Zt+0r98d6P?JjKX%F&AIq^8eQsJ@vOFK z+Vb5V)#w(tEO;*SgDJlP+)Fe+LW&==)ri$hMYTN_yv-wEw(VHuSiabwsWR7@nlfnZ zdDJT#5tI7vkBezt)_ey`QX20!dp@F7EkMWre4WQ?5{t&M{9Xx;9 zM3FBQTRfd?*{C-ORaoZux>J=G%|7>+)g3KevVNGrLx)}=12mtg_y2-~E(VS@hd@DM z^;rF-Juho0&-+^wPqUs(N}mx4ezll%0;=PaE=j zuQVVce;e|$cOZnRNWBbZ_eU*L*)P(2N7Cmat6 zdd{}JoG+|2rlj=;bZz?7eJ#m7JleF#&~}2Q>5<|5o?{@pM?>K zc86n;uhk#HOgWiBlfcQhgYn*|d6GRTzrE*#7x8j4F8@;K{QACHkpYYfMe)50JjO+F~zI zO>M$>QC1*$$Ik9gqdnzxgwZ_HU3_t1n9-BY1S+nf*1wGpro%o|!J-=cA< z*NV4-UMdYznp`4YKO^K4Q8&Jn{hz$%DJ#^or;@tCCY)SZy?bzH0_*x$JI(WNravA`Pfj&pS9z>7<*{+wbEWuV z_>R2Gh3$%OTNkyC9GsajMxhqiU7gu@$+ zgre_Qjb6 z1SN&6@HD&AjRZy-1+tvjDrCB=!{yc@jcY984x?WmUDdFdUm98L;34XC5)^p$5(cUa z4kXH7R$*ma;n{JVKy$RHelaJr(RJ*8)|v)}%}rFO+2<(R4t2W~Mr0+{i?3&icrX^K zj|Huu))teIm;8K;G!2VistKh!pG${|P!i%IUjl=g3jw7H?z20uo->=jk=+2u@F9c@ z0T5;QC>4ldK6o5aihz+?o7w&3RI{+^+S4@5d>kOw!y$0}NHnh}oUZ=Kd+uE?>xBBr z@d=P6xSF>51>ISYCu>no>qkcJ2oIlf8uu?hqO|lL3Odv%@5>TNWab_3&KI}P$DGun z9lH3+4?M=KrK6dNq{pl_hDHWJl4$?^BZ*qirfzE+M1 zP^X+Rp8V9L(=LS``n6r<j(>}~P!>CUgzUXSh4 z&>HHcvzw$~_@|A6CY_osRs*&FF0tBu+Y-nSZ3dpUOx zo}$69C+Wt|J86F4NupX(5+*@`Jn1WHes>t)>dF*Rff^_oc)N787l32{t)w_hL(e+V z_!rR|yH>oxM>wei7TmHh4$Jm$wZ{=7> z(3xw%(%Lbo=`mPeX6esp%#-mtI+nkE&m>`fIF^j3qLOUJrB{kdJ-_Eu**^E6!OiP2(amV^>V}pumlWss z+}cJvZPG}8wT9%nkMEF3I~%A1_w6!xlAY`p8Om29!xUMrqmVew!=+n5%oA&~R5xK4 z%2N?_?_dpVqr!~6kI46aWhn8y5J68A^IL4*X)gZy=6mDHrDYym)VJ>LaXTg~a+Ugu zEBjjl4?2^D8y9Q}N3JJOV(XIpnyf|0F_Y+5Srvb{A55f|zhFU!R}~J6SewjLLE!3_ zxF*!}M@d)I@H7^*T*scQ5xP$4v+S}s&!ziNfTI-lDHG>Oas1};sH|?`L!OSrM8g{8 zxC?<}q`j3tR>rd{qjsY5@IFV4sW<;3b`-_(WpBqi5`G(2lQDOAfgrD8>||a=_71^Kl-vsY*Jg0> zLN1h!yO}&@H3o3Ro_{5oSI*;ujV>$0{1;;szlAnsfxELT7vD|pyBpD7$ZJWIHqDjr zYx?#k(Q6itX?QKKrWakUpPS_}Ly$1vJz>mp?J(S@|q4#?d>3dGh} z{tFvaQRR@z2WBTpK6w|Zw4z<>3)&8yAPs#+TNEvvSC>fIQ9+sSKY$6>7N z-_awN4EwAKbWSW*!z4SU_7WB)k(p7!tFC+*Z zsf=2n?98Wqo3F((Z-o*`do@b@{#3uLzg1D%BW6M<_3LGG0JNK%w+?6;L6!XXi|nzs zI__9Dys^)8At$^t>hUpemo7%X)FOAB@2WkKPVi$YY-|t@z`Aqa{egF)E`vRUexsXn zNdb5D!eNbmzTrIw7YHa`BPqIYvG8Iv()5Z_`b#Y}^lMymx%ErR4zYfLdh#khxFDso zX&HU2+MlX+x*7eXIOj5rNN-hAH~6a^^Z|6BCmad8aW&?1&_xxY=*WcWtDKAECI$V` z|L8h}oCn3AYGT8W$y-__8i@QgHBk%Ra~B!u+kKom|N1 zbwjmUl)}i>%+z_-q^=HTL_Ld2gOPpR2d<9gOQp73teTv^hJ>$MyadU2#vEG;aHd;P zLLLMxI!@L{l9f}K0=S5lxgkI!`6Om>U}0#lr$m%%J}{I$zCkPeila6w4lv*p%8`qN z6PjxcD6~9jmU=QbrLn8upSW}TX;KLdsP0Ck#s=zZEkx4m=h|}f`SQwXJ?Dy@$8x1_ zy{Pp&6!F~X8p*%yn*S4o#@E3?#E$<^3pA?8dd0~757;a~z$S&^iwh0}Y(BhVpq2S_ z$;l|=7wJaFpM#y~$J9bOU#JNKW#fr&-eI8N!IStS*Dr@IH^N??Rzk1Yn5TtB1;=q~ z6xwO77buLpeI8_U;-tLUL``2U1&T}=4?!+@j= zxjdTQNVA?ld4;`M&!QU6W3T3C{zCO|{AKXxm>u#^p`7K+dHhG2)sGwbvBzwb3$%19 zO8S1C^4Md^ek%!HpSNd;4Jxq0F=}N@(G1QU@nNliMwRIB>v_$B?o;diChTo7dX~CZ zqX)!6(|@O~>Nta*mP=YHli|(DW`UO3+Qe4J4!XEy%DFEXL4}UwLe|N#sd*JWMI){V z)3Es0Y|xq3EwW26``7)9b8Ddkmv;90NLmzy5D0+VVLCh-mE#SYcOjs*BKxTt!YoPR zOqKtd2v9!t6lH$$ug~y5O`LqtXozNz!P9>E30&n7z};WYFXE#6eFL8TOwnft zEwtA61h7Tt$ylTRP}~)_5u!Se=?25H@qdNZF!jancKOV>c~M+Hy^Rf}cix$F9Geo_S0?V`Qg0$o zjy8^(y?>^7qvP09y$bTs(FbU6nVNh)Ux8)%CU8tj0Dpa=evf|7=g1(|o zmp&c^yWO*Fa^*G*AJ@gvnq<^nv_|nH)!=`r6|BQFh~7cY3~wOa+7A#Sc1Tc8A*V zt%tF&d>`i`%aHV=7}b_-yiH|GsR{sHFOuv#JBdRuSO>cd)u`xH};^hu|ewFJs?v6YBW zYC^=XJt86!K!k#ktf=z}G=4Gf{KoW~iNV|Xq6WG}?rfSaxh+sxc07K0+$tjRCzf(P zPTJ69?88^zZ(D>Uedn;>K$C-bZIYbx;Ms{YC_T4=W(pM=Bv@i0;PD1U-OZ^0lWco% zLMZ+`(17?&>*uyMP`iCgW@j?D_VNnZ(q>sBo~mpw&?y@&DV>#r@T9*V~sodYEvf0+7Wha;rd%tjpsv6_=yM@urD z*K-x9d-9J`#gBLOs3fdRO&=Jk9R(?&+X(oixCXM+V$r@1-l9v-={sgV%Z81V5BfE0 zsK>-|>BZ6F&U%fu{yK*>4)wO|p5k=|g0KK?Maxt`lZx@o=K(aTAS`@tV|E>fC^~AK zc;7C7uNb|Ri+jX{Jw!1g+fFbBN78y*S?LD4?R1n;#! zb#VA;>tqNuyYF_F6q->C) zN>SV~pkpf<_4U^+k<}&$Shk{Dj0!m46HjnoEyvsTT$^^V*UA5X37$^3rfQ1C*kD+omvCao+VBOnGzR18hY|Ba(I`r;B&-H_QW2LQn>!V7AOvg7{2)XeGx{iy`;SCqkMooRIr8Kl z7$&8)$DbL2wlZdkqDZf4E1*WDXX(Tx1oRdE?D=Fq7@O?jO0sFI?3hniG3Kt2t>JqA z@lc5uVZvOPRIjrYs5e2tBDJwN;JeHT_pYics#yufZ;*|pmlRs2hhIQ(QVMT zvjg@$@`5YF3%VO3ywRQaZ!|v+qCtJKzpe?i`Zi?Q9&#p+M+DRuiirX8(E5D@Gs+dA zS*;U+(j%8c6#qf#1$jNNu=Tt)C*kogEk-P!Hda#nwneomQMON1E5A7Y%@*bDFJ zcjjZu?_LQju^-wBuSD}nA8xH>rTtQAmGaug>=~$Sxj#L1ef6%YwkncScAhP?to1^P zk);J_1X@y^kOx22F6Od~lLSyrOZM9f-vfX&SE68eNJx8$+9kwEP>@q%!aILf7b;*l zdAwtxvo+E)(F67;fpXN$d1GL#zP(d^L(+Z!bW(SPKsW}hc)tGX;Cfq3x%e=eulFwU-!gs)|j0oqMa!IJ`cLeSb`(I9>|08(} zKntw-O9WmPYDBQBXGTS}T{YW)t_T(Xlz)XxGhUmtc1_($**Bc>Unc085rhb%YYCN# zkJC?YQ^29V@acU5pp_>xs*B3NT#%|Q*4+7jym7x0)^3V;fh(z=U_P!A{mE|> zr)0E%CHhs#K_3=IB`X>*J)@DKkB~n|om(}PmHVek_Aq{P+x>-P&pq6Fp1SA}unv$&7YJFQ6uOBk1H}CYWjDgbPoP_7_NIdXgij_=GcISi65x}Bw~nE z#POV*{WdqWYBu|4s7b%&ourHqy=Ya{^d7sX{!mPeclVLTVy#G3CWd=(k$qD2R|v`? zr|%;6HbBkxJ$1yf0uZbdaFDa(1D3Mgez8vkVh)vVC~R&+ti%Wj;$U$QxN4ar3P3{? z2pCPk1g)4scmxzc&^{v9c2reETS%aDuP1W{a#6?D8! zZGPeSESL;(J0ba_fnMj zJ!d=jxxanU$84Dz#IuO223Og?=UTn%gb3>3R;f!H36P(Y8Whp715HEgrn90SpIi?=Xp_avi!$JT45YTVl9LV-iEYLSX$dJK!9HCd7r*k!^z!rvW{HWyU~Y_~Z=y zI5@+QTNnQSa%=G8Z~c^4l4dnlO6ada3W;s4j;;fy3T5A2opN|%QC;3@Q?~?1QULNW zQI(Aq(GfR^AjBwh#^;pZi??1AoI99W|LWUQS>2HVy8UrU^IqMfcB#<*#d|7?$f%3F zUQ=Iw6{fah_DZ&@^H5=6IlHN6jyQ)C1e?5BvzrlQX<#XA`W$y7Cor7+XOVXp9zn8% z|NoMOV0Ql#&wgtpV#UZUvv*;6g~*WqPMlxu7Z@cT&J|B&4D&%b^IPRi;20zJ*z%YY ziCE)gGxqGm*;bQ1(^o22xD*WQ^hVv0_l|38W?*yDmG!IwY+dDkBotj>un9w%V!qv2hZCUkgXikYF!>4@s5^8h9}vhhwH1?a*?zlzRF8V)VaNX16%(l)aRL=tI0IpyNQ!G16^(#r>_LL85__6C}~GNMgR212Z)vBtQe|S`|7M$YItpjlU}&)(C~(G*?m^&j#2H6 zmwdkWpwzv22}h@V%kjfI5-S6&z-KqJKfsNUv40tz3{o71Fx`bk(K%JJ2bIhFZ)tH` z85_q6SBevRJ|Bx7!xN51pOqYlr=Q|LcC*UYx4`ihh_#W{4a^7i?p&5T#B|WhjBXk{ zTVvq@nyKY47_iAM*21HWkoy zmwMsltnGH`(*1>jz4apMXozqsa(U#CJk6WPb6nK9D=N)w7j3a?bogE5C;HYHZ z8k7W57-wCe1r~Pm&pDU_7;Po+%<9nc^uBT9T8v~v~Hg)+Fq5C~%EO`H~ zTpZ^Gz)}&V=K`D=2qJ6|ds`oxI6j^2T!fN`eK)U7{zc$TUYeHbXUJnh9zq`NpSx!F z>DX<##_2`sT-(4&`pTzm?rQ73tb7#XOl{=%1`n0gSs5ZVhl~i`7<4U~VxRg01(q+* zL&32}v+o5h3kP`n7Mw-5s%iM1XM9fun+*MhMYASuL-5Fmlq5C>iQQL5ak}Lq7+$0v z&xP)5cutd4o=nPkuFxTV%Nu>T|8q{j8$9swjITET9sdX%1}0P~A8bl|6Y5FQ2zxw&s{pvy0S}E8l01Ncz0`s6}4=vRPFd#SB0$|X#2$y9m9Nnivg)-e;#?YrITABQZ zL8lAjPk-Jb#`r`P0wzKe`RL}asS9IP=Fst)w&x^~pfb_27gfCO-K;7Xk3bx6nHWz)iLI^o z>0wC#FNEpq0tGquQ~6F#cipi?XHO*-Jxac70eGj#E6o>M3a$_nzk4+AKU^uzTT~&> zKxe`OD4O?A8$<6t+Kn#i*X%?&B+Y?>*VgV*3mrxeYu{s%2td|=EaKA}$Wi0TpE!f8 z2hX;6Q?)7wj75Evfg&3UMP9hCrU{|1{O|3l@6ZOZ5&s>UGyEiyfMNG{* z%9ryoud?;LH@o>dX2NC4p9M7Z^tBE6d|L+Xr6Uy;c@c$^IALIk9Rz43^UTi0m^wQ6 zWX(!d8XWza6z}3(r#NBcBbEmXtj~g@OIwP(?k@ONe}B^A| z!EYAsd6V*=5&gr1K)k8*|D6@WJ3YjA<_D1dK>Og$c9!BTY>!( zFO;FkB`g8({JS(Wh;X38$qsEnl6czCZZE0VX7Hzd@ZUx(zmS}TC+HvtbWTCSo@|8K zNXB`eo`|GzIvdnL_w0=V+svHTM@%GmmORpz-EV-)0XMW?haGW}wa#m2dh>kAeZ6za zSU(q7f}ay{U{!YK(A;kgAxT8b0(Jjm`4wGJBX`xS_h4;{?R8(nNvF$k{CoCJB0aPo zb7yCqhTP3g1eNrxSHdSNLM&bvc3GNsHjeu3!Xupk@nxO9PQDi*uPBsX-j5 zyvL8%^auJ%stm;=0T85wRPcUaD3j25O%Y0c{Y}Tx-aA+`atS+5hmCD)2EXbRqdD<( zZExIQs30TQo~nH!t)WrBv6;A9kdvL_l|8jL=8Z-DTKV49tD1RuD(H_%lV;I7o))83 z&%5j`_?0a9r|XJrEfzdeJX3neA}$_wMf(sS(YI<+FN;35yshez4yR2>*;>Nh+ihEN zVyYjn8U5{D$M0~T0xwvGG<;Zs`>iwUC8mIL*mw+@3ZE7?@_&0dB@5JbJr5{Gc{LRg zCDCrxruW;y#Nk{nDBh!wV{>{I9fKnlmhY4F9R`+b-pzOoXLD8xvs9%R_^-VQNXEt) zSsnbqQ$#>)xUpn}x`_2r$ zLZ|TX&N(a`V%Rw>2Z9GzT3PbubsUY%v$|_`@T`aBFOGCx%t@(tsw*(5X=C8zz6`cX zROe>DTL5szq@xonRH^KO#{lFozIua`lL8#%Yf86-cj`QC*$Nr+G-`C4D2+Z`$YacE z6ufI!#XCm;?U^ysO$t5~?4=2>QGgF?dl@_3 z#Nb1iXBy=m(37O}LgYOW1t6l!!m||UX@Z{;;C-P0*Njx*oyO;_45T!yhH&}O6X>4)SB7^U8oK@72`FwkE!x&O=}7Q$lP?P3s*#I zQdhjcz*JC0nJ&5=HvS3m77orPlNd`{Tjb4`1lFNrZ~p=&e*q6^8G~q+WCJ=dCF^z+ zU;@98xj=CF9(d$*$sRB4@_|+iM6El20;7{W#(K0{0j#bvIR=*mSP{^}ryy+I^Zc*0 z_ec#LrW#(obJ|INy0eB|KJaw*Ml?ODt07;8#B;qwnB{uz=$`mB?x-(czRbl|MODDG zKkLOHk`nXF9F)OK2|S1t;v2lW(}}YJ#n*m6e_bCGCdYJNN5@+5;h-SHqLjyn$4K{j zjjhZ^&rDd?fQzsn11io|xjd1%L*EENX6QcWYJt?k84<8l>A*8H2$=DzXsR}m0>8Fr zan#&)6sRSI$9nDO1t6Jmym@oHHDpy3S-yPuz^eLf1%_?$z;|Ep@0<#DF3^Yan zu5E~!eXQ&?rdqnO7FXhjFS8na9gwQ~=+5ic$*VXyc<-%#psQ0BoUd{UtPw94GEOAL zNQ3u*_mgn;2Yd7K5603u+S)nGw~3CzYs~nTY8`|JP&bYI7ZeEQ2ePvbR$ntOpNjBp z)4YfbXQb4!E19^p?A+%qRf2&A1P&h3+q#0vCJTJ9RE~42zX7xrTG}1I3M|aN^2VyC zJ}@VQWoogd3=o@5+nVgu$#Lq~9^Gb${uM*?8E*18@ZC=DJyLF-tEEmG@t0I3eTT$L z%->dYKE46b z-l$T=&!l0vpS8b`l(V$2p8E)Hr0zJ$ArPPG2OcA-VC5YK0$?+x0i8S&Q1B7Z>VgS? zUoII`CAbQCbV`yl?mH~-feMzX)C0Np%AmmS%Rn1F#N1t{@iHJF8wc`?$aTglshLl7 z{D%{gs^}=BJHgTXLF{_7QEH36va+&TC0S8tlKh1*+J~}FU+N}npSgGz&Mt1OYFqPo zc;0k}EcyJOpJWcyjH~Xpxo^58<#Xtnd`^C}@jI;ddl=Yzn_?ue6G*o3$46%4E5dPT zSildz6MaX|kPm#=(rP_N!vH>DMZ+l9L8$ebQe{2gmJakpGwXt?BLf!ZuGJpk%nhV$ zA^WuY*F_lkFfv?BCHlPe^Z2Hr*tTsU_TsAyo81Q&GpDLgRP=YZ_%j|oew;pLSD*7k z*-%Y|frd~;#4uKuT3a6DWent44R=||GdJ6{b#;~D@NZThPCl;juOCVdc}ElQ>zMYr zcgz)fHz*bfh!)sEa!9_6jtey)GwcZ#+mT}s0Gbf^shmFrMX4@N%fW5%yemhdhwFHN zJmVWZzg9_rEk!UFGNgf+uptWjWDGR3?bUxBiccWWcxXYf_Fl*8=dFcRmHEzEB0Xx4 z7S+`lWCy~bASS!Hq0sFFd5?pamn5|Zq4=#>4f~aN>DVB5*J+H?EDAVwj)>~N|0lt> zq;P6jL_ZY{$`oSY65z zqnozU#J~m;!=7`TYD{mL!4DLDwpiiG0UkGBcI+K<;09iCpR0|GA*V+znLFx3PS0v( z{}Ugww|EQOguCE_{rl>@7%P9be7md}qOuToI3tvlLA!zO9j(AX0)NEubI`f|AlH(kk7}mIe`!2I*EhHzD1Kbmyi) zX*MA({mljFx%YYQ_uSw2$M?tanP=^FJZr8w#+dJT$2;ayG+|SAZ92Bw>NlfMUl(I$ zRD;55&wTFu%oG~J#=V;ful4`>kniI_kQzsO$%aV>-n8>gut$T~RFhcjeZE;;L7gIi zg6g2Pjb)8XPo(uP3Nae&tUjHZoW<%a$+;6B8bmk~T}c1ueo>@+eWjp?_*yeb-e};E zBjcd(#fDRTWjg!aRHsy6wAyQBrSO&QDbq@$48a>y0h)t;}UsV z>yqb@wdZ6)uLJ``qxP;Y+}2F+sspzA%UhtxYWkhnToj6|A0tHh8GtvW;P(`m5dv>u zqz!b?08if{aOLK-J(x~uSU=@ocaVeJA+)`#VfU)cw1_?VkUEyq?}<)H421U4xUedZ zQ-pk8!DGsXH@#B$;q3S8&5<+(bbioF2*E?CRi&oN-4tbHWK65%c}IprGNkC8P+O&z z)WnjpLz8Z=nRK{f8k2l#HQsTKIL7sPx=wFv$l`~rU~-9rNAjfcgqLf90Wu>5g-Z|& zuF`lg&jo^sRmVtN`z!>Wmix!zumb>B{2S@vDD$s@Zv@f0k5gS3kDwMf?XdMBI3=@T z7ggIM-sTKXp})G-*3xCW(%Rv$9X3lK(nS4h|KL|kb)l@_#&?qbsyt){`d|B}LOBUh zFFZV%T5UAvUV$Uc)%Wk0yfOM3zyI5C^QbE?Z%5+oY&+8yMG3NUT+Xb;bhA6=Jj{ z5^QBM15Mh811&|)>tok8=8OH$Cz8MsM!XJU`OT{Xe5k4*q9%%%nn&z|XWMr5BYtxS zGA5K5_4i*G^S+1hDr_M(R38i+?c*zMT)M$r znfs8tRI(pv3$1;Tr157&{uj%RfBTo1_f5L<$-7%_Wv&GIin;dc`!OaDwW2t0-59)8 zz^?Hb9LodETvyB;W0-Z3EU@_@LAJx=-o^bk2OM(xhJN?u61DlrPkass>bLXYqc!|C z{H6JN=V5fO%Y#wvLHW9$QF0h&D%ha;scEq8hkOR6A0AEjQP2}SJn7ue2dEHSiu;xP z7({*) z9+Ko;kEgKhUN74Iowzb?)mgJcR8s9SO;luf$ZA7q^LlKhast0$*W)TIbZ?EPwON`} zcdVFxTuSlmlvgq%D4~oyYV}Qb@VC@lQ=_7e@dS(U(!84#&mTf9Hm0g0?g45+_w<7UtIa}LkzwEWAIXtHAV+zZavL=W+VoqEzA7?)D6Vf^Pdy5Pa+FU-U+^8E3_zCV^II;+e09=41YOvuySx8+j^eZ0 zJaf2&+>bK9${Rb;peUB9(~O3{`cx=IWKp(rH8LBvp~32uR()F5^9yaYoFt=QSNbUle$6aHRo-~JgwDDB zabOYTXd+xzE6r03r4`vt+_&CWWlY;;78_1KoM%_Rkrlq)$J9@y#!Rbql$WcmLTu5| zW|0f;*ybpA?R`$Aw>ppTbCoFGBgD3#&~V!c%@{IYT{H1F-B)DU+YO^Rj4-x52^!z{ ztO*;P%=~6OBw;jMoV6rU=Q_FNLpnEZQO7ZbIPFhq39{Z#z>$!bw_2m0#UY7^xIe2g zDPVcL);+GR~p5zt_5u+Pc|8Z7?pGL&E~$1;>&*; zY17+QeFaGMPEKDCKhE{p8Li8C6s0Oj(nxaRNuRHZK+vkM&kKNzQ^I!?(oOTVV-;VXM= z#80bPP|4iWF>`pUu0|Pq#4Y-X063tZ!+e60Nxk&*;Rj@(^so6SO)8&z3g0A;h7ej0 zwimk0a{a0gx11POwdio3IR_UuR$?axJN?n_*Ykn1+KuTR46M}!m;Ky6m~$vWI)%|l z2ij~e^h88yA0#<%HW{o4k+Ijt{#^aW98TqCQ@pw3^$a$|9@YBYYkSr#+Cj*2GT@FL zo@FTjhXy@3H8xRT0P7;Eu}g=pbl<44&Va6Tlh)WrLRY#lYOKnjD?Jo7utYFe?7GhX z;v6qsLD|0!ZS_HeW(~Xe=A_IZ`0WPQ^Qx zg-Rf9CAMF1%}TOp z%XFSP%;8T1?GS!A9`{~(oH(Gn0XI*b+M@jQK{b7Q@D~-^$W&BjSvwvc2LRrW-cfc_!djm5pUZvlMvqS&kiJ zI^#5G7#HCFQ9q?;QCCZ+%JHbrcl!AIa(6K$9^+by8f;|}IZ7+b%ip8ui;lwy?{*m% z&%(-t1!pG=DZj2M5e~q(G1Q)8p2)8yR)&`q)b>d-LsEGpvemwNO{%&OGInyrM%{+| zfpfpG!)DXsE#w&aNY^z)e)%y8w{CdF&v>pU}U-e?LKSVtt~QZ6Asy)A4;e2GGrW$GyCN zBZtp<y+^MVv-%i<{l6 z@_KsW**PY)^ZGyJ`R8MG#wB?PU*$Ut@jV)O-=nit<+Iq{GQh#1^|XZ=ip;F#iCjdl zJ|OKt9$EJWu3@)J?b}7e@A~`HO#+-)5SXtXyFF`a)R*kF+FRi-f12oaEb{DNj%xOV zxoVZht@gxx_M{{KG>PwIM;iv{$!NPAD_%#U2(C z_W{{*^0wS|b;#HxmCx#7Tab=|sytJ$_Kstm3x11$z!AFu zO|zz2{WcmvlV&}NF4Aa6_^i;ctttMre7&O|TYZssTKc=YMl;>LS(#P^Up9DnTRu)j zh2+F+l7z0_aMX3UQYrNU;1YuR-*vAUgBKcp@b z&{R-h2#2SM`yB)#=td_yRTmiSu5#HB zU7B61#W_$XEJH*1$6?GL`-u8a4XOLID!Mhz`N$HrwaB{aDD~OJm6$SToavZgF{dKA9tqxDlozNPX@!M<`#~Nk$y=Q#7t7W&dMyn^4PE7BVRx;C; zYxO)@o;f&6bdz>|med(1paQR@*_|9Vi5sxnAGdHDg(Y3pZS>yVYWM@w-TY;igc@0^P7rP_!5bz`E>#sC^DRQG~Y zYR;(!%cyfVrJKC1u_2IK6~N_I04VumWJ#TLIQ6$z#DrHpzU)v83wbB7%aN-a<T!)`2<0I{d#J|)@6kwnR? zV+mA%2;$BxnRq`Ow30Adq9O&a+!s5rUeHe=&p3St(ML6j#5D^m(`~;!u{q!YH7gHP z%Pn2`)A|K=kK)V~8}?2D^RIEOf1FO8euKq?t09Zx`v9|C7@$B&8b9+&K&@6Xru)k9 zK$Jiu?Wgq$2{6b>_wVLW0s|phZ}SQT=$?!~8D0oGdeZs;q3ijs;BIok04_z&9R5^U zXRO(qxLmHK4pOM3RPFtm#oCUB*k9;&s%4|m~^893CZpk*q`PTLE`eieX1mZgB zIS4}^gjtYf4kO$iCS9CG-~JOgg~v{i{n$RaXZ9`YQ|P^TR1=&;(~cHV06tRmg$avl%V(y8IF@;csKrWVz_d^P-UMI|NUTHTVI*z+cb@nfYN z-?ufXwBz{%bHJ0{Z^nAY~LPlX`IojkJW`TIKG_{%mZM4)alC4W4C+ z;I_WBh>sa)4}D0=$$gZi&3V{wU~zq^j?#ECJI!=dff>nZ`g?eWmrqQqb+2ZblIK!{pj9+otx2Dd9JRm?LxrLW?q@CXyYIqa(%(v14uv z`=Ya9*OoQ8%iKhU+&+yLP@nXMnXCG^g0o-BPNcd10(s1b=d{REDsxQu(J{JpgN~Tw z+g<|(Q@GIlbPcDco($%@O^t}zuZ?Te zu#w=G@&dCOR{hdRkD@&s{HfOiKb(x)u=Mx>7&oBG+VNp0F7%qc{b;;qB~ zIR#~bc?bH%F;Ka1B6`NrniPJS?bVk;8S!}O12`reMn0$&RC{`5$j;_DZAAEIB8xNO zSDkqdU$im$uaS)?dM9$XV!~* zlpM5lBjK`Yxn{I z3A?8>oYU3`9~;>zR@WqH1O_TAv;%wQ>aMiKvALv)?VB67?7vX0eqXra_QTeNfzrJ` zA2nfUU5aNAaO~;!pStS==FWH&rtXJ0c6Z4AXYgDw4F!5}|=CCp6k_3^%d8x`d zuZqkkc~qT4d@x~X%VOpK&`F}r|- zF=Yt3hu{~vN;@RYQ0S*ehYnWu=IxretvViBHI5`HaRP20jG_)(&9N3&4z;)M?Cz=f zbRkSL;rk@Cmqmpu9#ZG9ey`;gFC2&0hLiYdk%2Sk_Kzonuv~x$@FSJ`hX;z0ZQt{wz{xnL)#B%5{MPjHz@ecW@p<|botXIahqnICDU?YW8^wp)B90vE@zCAdGE0$&MDS>HdW5!J$>#s zXPyCUDjE(RkJO3_x`#TeU0$Kjt7q2z9+Fkja6E7*GF9Ee8J!J25#zC6O+8I9sfY&w zCXz?ka`HB51g>Q&*n7ofZX{S|fkBiE*`z&0k&8)VBv@5IS7a$CVIQF@QdbjN@d27~ zU=6+5_ypKAIUAuZ8uaAA+cX|Ql`k$`PBcW1>kOvRSkc2mWFT3~Tnmd^=w`4~9(~3h zhiPA>SG;|XxDJu6jYpQKdBiWhh%`I#shXXaWYYP*lmRf zw~g1&&q0lPhSJq%M;lQAlf#SMM=#+Od;Pgmdxb4zEQ^UYg;x1nTTIs74jF!@F@Jc@ zswL-^Qi9lI+>YDOD)uSdBp-e5e{#WdxibQKtLk0sQmQ00&`n-SPXRcF*U@n)z5}{q z`h5vb1zoXLTMCy1gDTr!OQkx{6?@~QyO#uBx%fGQ`bh={UbZ|8zT>B(^sY>iE2!-# zSSqwaoB2um;fX6TS8~}+ejNilQ|3nz-@BxUo@}Y3KhYsNXlKGx{nN!t^`rLP)ZzQ` z4%-w}xEzdV=+SiS_+_C4&*Rsx+f=-9q0Rh2LZz3cq=Inn=yVW+h1>{Uf5!tp60wmn z!KZj9T$x*;4Yj8G+yhS5}3S4~yLW>$ujav_2 z6h;o}MU$ZeFETVBwm%JDtc;u*4CDu%8)z_t!92eBJ~<>xuu^>CbAe;PhdIs(LLoax zOPAk$ylu~PuH8l1_)*Jgn+yBneEgX+HM^RIj_~H4wzn$lOqv*_oKXv4t2} zEBRCCt^=`Hs*l0eAgu+q2Gx!qBmG$fBw4u%&x^$aEwGyCKU@HXS#+!$R{@nrJ}t{> zSFrABUQbcvz`YY|C)++NH^1Tb)4<{MWQbm+P5;8Aj3Z+?E#88VNc3IcP1R{Ydzqx}7Q{?V?S!SDNij4a_rMQLq*PaP9y98-h|1nb#>*J!dS+YX{b zgU2sz`!*pz5)iTN6@jiiRka-|gs$+i+je?ESGbI9NBf{F{QS1qe|_!2xv%kMU?76e zW-XUTzD2CquT-TXDgP8Dz3O0uIg?PB8-=A?4|}HkSY&xj{vnw2vlr(kZdJbEE0!t4 zk0V4sa*h$u(l94L+OQA|=480(nO47bLvqB{mty_vq{nIG5}lmQ!xYfzmZI8SaCd z+{Qar{l!KDX3WA5-X6>i;LL}LQG{e?@01?Xme<~5!oVz7gJ4}oa5t|M7&Ov+Trd9z z>;3_~f6(tA7=&m;WO@MT1-e+ss~2qPueljV`2qKM0rTQd5;jQbyNWK$zxRn)I>VPO z&UBSF503jydYG+S=bd7WX@`#VvNfb`X*8_{YQBs*DSow(qd7w4`VHg@6C2j|pz4JT zOH~*>=h!)*5TqnWsX?)M`}Qr5v>fMi@O`S6Pu0xgYkqwUvOdJ)&D^IMwr4267xY8- zPulb+{JPbq3)cB#&xctYK(-q6JX26odic%Yj53bXqA0Hi)EiS$@1t1*FW_HP6vWs9 zr6$LF(lkPCF#uJ>_u0(yLDdeQu=bE5=pN+U#N%KPP!>1mSc~JK>g07^&WlI8;=zXx zaiY4G)|NGA9{jp<2TmjnyFXkx?T?S8XZ6-T2Z|O7KBmKnFZI89cWP2tIJ=NnccT)! z0O7q)_0#x3Z-F5uxjMIA&u$C(Nu~zzE;T>u0jK)pCEj1jo0h|M1z{VtX^tW(> zex47K(qbTRpsG=c*y0an&iA<-bKy)##<^w2Y2PSDX8H)l(`Op7#gBrLY>;u$ByZN% zwla2O7=-5M=hw8czqqu102|L#OZmvq z9_Vr4{D2~GTvNhN>M)z*^r(TgV#7@@=xPy^1+JH8b(;Esc??GlCf^wX zcpGe;e~KUl5^4KHBO^2;)`0-FFla_ulH*#S__JO@Y)~Bo0Y{^^V`7)=A-2rP#WqKt|e=%53H&4b~K)AHS&&+}knT%ZTDKcvVwE!OL)VaKKR1#%eai}_T z{bmN-4j$VWG@!=F$=C7P8~Wb$~Iw^eEwgR zhsm1y(R`PaJyDH4+rFis)alkBs)Ng~O2>`RoUn8Ev|-obu4DmXng$znG*&QW{;Qc! zmsP=35)&M+nWq5INBYF5eH*Mg{~6vDaK`HgBnM?b8mc;&Uu0XShuOjyAy&Lz8cK*1lbrGqHK2t)58|TsEiabO|(+r(zP|$4gRht%C~cbvTyay za#Cd}%>SN%nlpo?8I36%wsyQTJurWSLqyWJb9B=0v|(=jyU7X2Fn<&}Etk{$fb)wm zlzj{&AF=!7GJm|wFj33?d>B)FGC_fqzFqHfBYq+1X5+6V`@2X?aHAZKz(2H`G)kEN z7@DvX=9Nw|t#L0jG|I&!TPFR6`nhamr;{t!UA){7vvJI z{{R;AhN04PN?$P7hmYe$>mf{!|C&JL3m%ws;1w%k6eu)Fe~>4;^ij$9 z&Q8ecCJ}RJ5M%nfw4&kdt5>hG3sh4ZST!zmC6q~M$EPpo!Qy7zpSOMVQ(XAFGCQ*P zs=W!#e!!Z6eWTsbS#-Qx3l6J|D_w?8mXpmnp0^|9|D8Bfi9r){-|=6;|2go(J0TS! zQxKteM6d7uPbE+W=Co~ZO#B)fEac*q1oedy@B*q6wXXSG)(=!~L;L1xBEBcmiF{mp z@6}oK^*E$-BZ6i6QP1RWSLaVnP1M$7{1}oizr8sovnVC%J<3DAK3mb|X#bFRf6wn0 z``4neen-wDKfkRpA=eAF&XC-l^8hZn3F#--TkB;># zgkRwKW|Ax9l&*)UX=`?0CL8&v7pkWP^aK^9DLf6$g~(r*%KR)IHWw zoNsyQA9(-p#H*;q;;`*m%SJetZjj>b%t;^3IX6z%N61Kv@Rb#D33mFA=hh(j?D`VB zh`hY~vs?7QPllAQ^1igP}Iu(LKvvsz>7pZmOniPPQ^Be_lH*H1QehI4-6vc&k@Y zZQGHdprvKL+HC&ydiJH(#TbP=h?$8=xeg~*JFeVuhIqw!(!y$aAWuB8#_UUFnCf_R z{H71ZTt?H^o4frQ-_9+ui@U1!X@ea?_CBB!A?PX7K#JB6Ar+4Mwf@4{fjs@{Mqs2X8Di(@5JA_Kn zAD?-k7k7&BC_GPE-fMu~gqNV)+CsQW>MA1Q_2ZpabQ+-M4r>?jsL?e9+J;eC!w#%&cWYwA@fiTGWM|J-n4C^vuC zV&~h|au&<{?U}0K&7KXG@JVbW)luSL(v0J;j~UKnVZ93ld`?|Hx3Nwt8I9YA=r0xW ziPs5oENnJoUMJ0s{x&0*QC3b9et?(K#TH~p8-V8XBITekx z10>8_6hOT)av3ayE}^t67F39T$4y^8RS09`ULXFM-nj6DP{VB9aSBN~_teJvd=0y2l7IEW~_jjiJFcdaPjU=m#W01m0?EqG3;pvj~haX%&P zd5zz^Zybi2Av;aWJLDN>`iM`q!5q%pC8@{C$nE-F1_hQ7WpFrNb1dCl{C%n$y%8OvWq?A$NI}>KWJ>xb*rdyUph5dQCOs9z)Sw)~}`q2U@Tonxi9Eog`v!pzRAhSv!WL^cd# z4;g9jlTKvxogYtUD!jfM@*HFT^NH*v_bv@xMWFkbspcJNZDtC-r^?@dW}CsSF3F52 zbnulvG*sslfAXYD;f<&w5d}iNo7);=cj^a;+}c>}dp)oC-&LyiwzPR1pa>2m^iAuv z?Lha*X|QwGLl>+{SexZN6bQa|#mQgw5C)Iz`+$t<4anXq?RbqJ{A*T91!{YK7a=vl zFVG%UNmBhc7e&iG=e(!7=o@zT_|@Bd-N-^mYD9yT^IMb||ypTq>8Hlf(bi+WVsFE!c?aSY}s)Ht0eq?+n=tZ07^2U2jwp!3aLFWk|reFK375AQ)Iy{)mO?DYnf0t(td)Ok|)c2j=-ju{h;IXw}Y&kQ^K z?Iv+pOl(t7CElVzNxA5J_=0k=!+!C&ts$ZOMFRt|i*K{Cdrq;#(^e9E+2ga1R-N>= zX3m({##)Cis%HOxrINtz@XwyU7~40D7^%4yY}dK$Po&?zQ|89ne&Bq;lk?gDp8Ubi z;EZOq{SzAPV~fbnKW}OQ+n9eVaq|L<_h!0JYtxB{IDeB&k)tQ*#c5MkQ6Z(y7jVJg zb`=(@nRmouFS{e}-(Y0J(W(dQmadVxCUK$0uh-?OcnNw563Gv}-Jvd;g=)G$K_TE% zr2RihP_v+r*X65#nFI9yj{xu+=;>6`o(nqf>0|LDQEIONZuQL4)3ah zFyT4Q{nTeXuZ~^Gl_dCc3BsbQk28Yx4yx^%3wE$GF%qiccF6lh^b!>;X#0NTv^*&> z$+CAk^$ylsV>MIzb~Rgt|1Rf+1?*m5nkJW&lu$Jp^wHVU0N)$1EN{5J=%2uX1VpmA(Pm1IngMr>-~bmQJd-zAU3KeNd#u<_A8v5}ddn zE1Z+k?E6N(5F{I!#mi#>k5zvOXISQXB6HlP=K7V{cNI&=m^I}d@1at}}*r4yR zt6`kW7&LlL*=g4Cl-+2}fvp=?+MQ6&Mo)K#6e(96rQbdGs`QNUNkUM0aXMY%N3{dH zDq?7cA9Wd8q@Lh0Oh}#MjE!3$3%toCvUpAt{QFdblc+f$0PK#WjrzvHO4ab61^m^! zui`DcyB9E#)@NaJaHdyU|# z)I(8>x>^SvaC)0g(8nBOdjaVZ=J^Y__51UMDT-*3Sc^7iBWwsklT@jI^LtJsHMP3} zGHcROe}hHIFJe0I)j-f%bT#bf9L;q&uY>&(De3}PpSbPhr>M8Uv@FDZOfXYHrG=lJ zR^on$AFw^8a5_&AXMxG>{bWGZhwko6euNqbLW#x1r^{YB(2c2c4r14M?w7mPFITm~ zQyk;4;#vj;UE_q2v4A*BC9fBWDo;F3jM=b&-a} zV>tu?q~~k_uo|q?;^L=X)&xdUUtYL$NIp{AUgO5moL1i>>3yO6^5m_kyh*gd6n&T# z3zNy}E8?n))JaG2SEo0anvK>HA?hpeu;*Q#yWnB-r@g*mMnhdvqW(qyu{QtB(HP88 z(I%MyH4AW#vdVs-L;I4(wT4*YA9oUPH3atd8&hqj?HRZdMylpoc*;)?wx`=*8(nNz z2RCl$MR_`PgeYDE|9N-hj^Exyb>$1K>I215h1w=5jYmdt8W+`b$|Q6@Kgk~(+wHOio!jPk{9z6uEwIGyWk{T9=`4$j$$BG>d#M3 zen)JBlrtJAsj%u~SwNqvL^7(fI*B+V6t|h=u3nH-8<>BNpV2cjY<9b|J?#e{Z2v0f z)#r!^LUCBmugctC81|~%l1E5K5t?KN+83SppXY<7JxW0vVwWw01@a&m1V%rdcL47` z@eK3I8&41nNO6r&J@UXb4(KRDASK(*Eh7{UHE>JG?PKBkdtk7~)sJ<#`My_&z@I!5 zqG4Cd$u6=HKGLJaM}Nn^4d_!G7FJ-t8tKi(l)7D)aCH!SA5gd7X1o`==Zqf~tpq~# zM&WE)va`RTHqb7d+i!2V;LDfv(>wiKiYe6X9UaOsvAZo9!RzpQn7=vI48~EP9IOcm zv}I^pjP2P*2A$iPfzgL}YakYviMf90@qc*#NHCSWae4gzKp>PeIj{j?1e7VV5_eO* zff##sPUA}n)fEswK_}=+>DTE+p7PqB&8xbT zwl0w>T}kY9{6)a&n5*+nPXHbY+emou6^oqgvN7}uY8K{_g)`_>^3GK>8dcYO@ijy^ zg}o|Gz+Q)9G*9+MSRA}%u8lLYRX|%a6rbxnnsdj%Cnm#Z9M@MX*ih|(v6ZQZ?^Q!N zUdOkdku|8fB-7;{o7)BiK@%aON2w#H-zvj98^tmRroU$@~sQ8b9^22cQxjA`qWy=~5i{31yqi^~2 zA(@A?rOmYHCO7oV?9#{%x(DOm18kw~dXUzi`qC)Hh1qbZ(vxZ0Ck*N3mO|A9;q`6j z55$QXz-OF{D~V0LffpIvjb7k~Y7qDXos{38E*#0;37UNH%i``1YxyHE`5soTvJNL; znGOO?H*qgX+@kYx%SegFqDnWU%jhq9jowX)NlR_Z1MIU3$C>&@Ei)S&4fQ#(>(qKb zOg!r2XR`DMU-(;sk3<(_k3@h9*Pd58?VjG{{#EJTvls>>2Kj8M{I29`h@W;RJf`B3 z6>G;@b=MGxtdK)vLIv$bd{s@lqHY#+Lf@*1mWK)&OhN+3kJf5m8`ZB=9{<1(jv|Mx z_AN18hjU8I1%Vk)K9SwI?V$vwoRKU{{2xbQ2$e*Icr=9Sn|6JJZ-tG}WkRqEk;r)zLBM z796uRn=;hIGG+WCghV$M8!%`NijqchnmdJ3UH)Z;N+($o!lnkXTx<}>W>9xE#)zNLQm^N-PZT(_O`f*if z=I_S2f(lAdzxHX@uOvFUNcah-Jh|@GK1-l|lUMxVJ8Ml9+^}!8F*E`Z7C~iUd7OYL zWKt^*vFYYk1Z7i_RR)LlOMZyj1}01!_LC14 zYIKX^Ou2^Y3x2{E;S#N{1j~!G9~Ay?c`x7pW7>1_FiPVZ0>+;UbTM{_AI$8I)jKW9 zTPsPlB=fd%HH|@QbL9fqW|kP;qNKZ~9?{1;dNIyBe-O8QKl10)*I$r_1q?vZFa8*} zSS`U(9UT-NL*QF=AA#BN?QvEbY@D=2g5pi+zKqR%xm-NOv3Mw9{zFnZX2g1&JsIPuBF{FH! z*CN>mFTyTZnyay^9NZg#Z*%o>YQNnDc~D|1`GB3CE6QvwWeK@t`NKgT(0Nxt``J+s ztobo0k}Q9dRUvlhyVo3qv}C(A^KY6bP(nqV2scrlyC{mRCl)CeBtmbO61V&mG&~c!)bSw-iO>Z^QqZb>Xa)! z=-O*?p#|1y(Y^HKf3#Sjoy#!~O#n|EYHmFAE_N4$Ftr`~`{45^3I2GEFV0D+=!5{v z7hHLIWtpSZl#ZT;;@M_NXX2l4Pta;QmJ?H8pBq|so)Eg{lM4ulXjPop(8uM#X!Azr z3Gn~9w4lXpt+7&JYk8*GEOz(T3h|~WGOFZ$E-GRL(CCzmI~g5F^`o!xc(^kG8#H^HiidU+ z@;+SL|0a_M)=TZW>w|xkNeFbHg8WAvw0?+0O{6Hfuq*%*ncj#Aik5|ewkBd21x?p@ zR4i6J(sJa6TQp@3ky-C#6yU9?m?wnRDyLM*W5y!}4C@=~mL`WAzZxI(L;whoJkfaj zCiw-L-a*?5LOL$mX$}9QNyz%sYbHtut^lxEqAA66d(mw4fq^UMkR$D*;bIfb#MM2B z?ydLf@1^<9VuSKo+)20&;qVjwt<{>pU)O?5SelnNj922&{J5(9O!eIY0U5F7Abej^6IY=|>+dtN&fZj0=u` z0cvWJCG|0-#3{1^f@t-J63UMlFkCmYc4c1Y8X+qAM%S)`cwsf_aM51jFL)I!6=^+C zKGQ$}1TBRNbr+ffhLaiQtcOOaA-Nz~WLXpX+GJPdQ#T$k2brRPc;TdQhOw4?<&p2({h>krn(?Lq?MI9iL16h8;{ae+e`+h-b_I$V#iBP7^;wWUur zYLCn-moi9lt9MlHlf9p@?31l@-RXNjTJ4~i(zQe@=Y5NunRk6eQmxdK+MkH!Gq*tZ zXJWQmvtn~daL7sX!wzi%Is}B@MnpCNI!oigMi}ScVDjHj5S3nGBh)hi>F-=@M1vC` z`QGTZ(8k4fFIZ2GYLBjKO{lR^Dv3etGI^SA|1vmhau}$kM71hX>}k7DjmFa=;&&E* zG8C@|IoK$$*a@zlU}Us?U)EF5B)3vPEL}D)!*BQUdPaqZS7!IfteQggjzReH;qJ=J zNvck$>)rZ^2OHTNbrEf(!^CE~NTRCY`L;u3`;4QJ5q`yPq(Dda)oUcPSUx#Wlj=j# z6f*mfQ0wI;74!Xv#t(0Y@}64fIG@-xe%=4!-NV2CfO~HVzm)srAXRp512$FIRWWw; zgk>{ACqL|}Pawg`Ly2EG)z1EWyPdOJQ%)AtS`r69kKL|BAYA`Hk>ptcJ0$0%wry{1)9NZ z_Tkn`5l*pK^;M?XZ2g)5P94O%X>&j4fRAUcf8E!e-QvbG^?ZtNgj;Pq&C@m}Ky*d7@oH=}s5g1+=rOoIe`1o16H|R1V zhkcHBSwI(6tr4Frcv}-58QEOdESOZaUt8Bso8(Mjk^hxbjGSGRnQKEKrPVtSnZ|QF zVmQy_nUqECZuar^rcYTG9Hw7)MiDHr*v)HdQ~SaN-F+%g^XzNN9=q%Js;zp@{lR;& zhc7KB35dM#ETNh$#IoA}Ioo+uviqNijUk}dB7L3wFQ-I=H9{EDrfhlQJ^%}y#9r1> zbKT1Mrn{2!Ezr@mg!tFG&u)Z^?8HlQd;&JlES(i(2S;0n+p)%ruhwkJ^cQgb3WyU+ z&NaKEi1KOil3|k`=yyF%7c!`*g$aZ`cyi!)a;@Un zXPE|>rZNGyb12d(XDC1IRAkAV>>6+FFQT?aQkVloS=)+PM0*1cTJH(B;u?cFXikgS zf{1E593~Nv3ww&)(~qxQiS^ezJ$A5TK^&b%L`^-hcd9$}E(G7Xc$6Xqf`-G-x_r_N zF$x#U>M1R!HO8_jXCEsvEag@QqImOUt0cpadsOf<;-oADpb}AEO^r1zEGN822DEZ;*%2TKOHdVM?l!cu1^n6 z>FVRW87kwVR;4=}KOlXATW@f7`dr_oh_d!umW`vB>_s%TOakO#CTpI^Iq91zXYnfM_t6r4SP>5c6bE?LvS$uY_W2o-WMk=AYX4L}Nd%pz zVdw~L=bzF#wA-?@t`A|;+fi#bQ9azr9G5HtwiPp6sTp5AnubgL@};G4do@zWSu!iT& zx{CnLU9|54EcCzk`of@l5YBz-m(a&D{T%tA0osivf^Fm31VJu#o{CuV){;YhGm_nC zN~+t{GCcITZ_=D}3aX#N_phJ$oQBX!mUSz5`VB4*qj49kL|Y}DI#Ulqae3hA1h~)&?BoPRXTlL#``Pc-VC(;@{^doRs)!+PUO8y!eBD7d#CR~WC_HJ4L`77*OI7O)S_ zg6KPA9je_aoC?+S8R@FFeeC+X4k%MP)leG4otjbI9lWd5JUUn zE^=rcYoX2vT?7pmx9|D5ephTe*bRpS;wYbA@py_RAt^byA?lzo19U>~JBvKy_ zo$A|#WO!ISdg4|KV7mD_{r^%#`$q;`5}+Y;5Og8}$F)$w!msv>H7cRer`z{v{O)3P z*so=|g+-2qtPmsO-tlYjeM-j3fE$)n`1uT8SZ5x18#vBCGJg$Hzs?-iFaxc z5_ANgLA~PcpEVbZ)`m5nXyH$7n%l0QRbGY1*_>@b5)su$}Qey7@hU7RVM&>I5E1INXBB)66c9cav!1P64p1E7L~+UcU&|Q zq#sslwaYIiVPz!EC17-)nX*9n&1AbY>(2wDBG)E;DFs}*n}2i%iJe|%B%0YAx{G6J zrd?LME@v;7Umsg^i*?s?bgLtH;od}kSs_^>IEqxr**pOTOncj0f>9q+#9gj$PwnqRWB74l>sh+fsH+K*JMw3XlJS}ng9X5f0DdQY>u z*O6B0BJC_q#DrT+RjbPI>I66%za1w}&#ze!22sI($o{MHL^jW42x0)V9 zrwOY44)A$}7*lLhZduF8JEF4eMZe=Bigbt7uKO4#E^H3YNMn~O)t^=b>$u>EvJoF*)1ljgzR9wXxDrIlZ*8_&0fNV+3MXJSsuM z*df+o5c!~Xv`U9x1S3-G7kFc@aYw$|(54Lc7wO$GHmGYqWvk;f=P~N7ceH>but`g! z4{!{3IdtL!n%=R()~n~x%P*RR0GnY-co8T-SMeHc&0ep`_d$f5am+*fGd4|qnb&VH z0g*bo_y?s3`Ljl<_94?G5UYAPZSI?I!+BY|0nBi0!OLUe}1`@u^CR+ zdV=qt+cY0wP}GV&`!h4S*8ZBmuiD{;!@2+q>5xFuj_>hFH*YRSgaSXP@;;Hz3=%wO zmWbIreMdh%bDHm$&z2idJ-PO2y}w8YfAc`$Owv2R_M_6$0ci;V=|+%{PC>d$N&9m&Bro0vIC&=iuHDp=gz1BSeS~3WD_ql6An;-MZ z;}ByLlf)Bht7+SzC^^($yOVN=?FMcb4|ml>97pr{IG~O2^-r;SH`U3Hqsgd;KfrEC zj`u2~a8S)`3q4`#i@vP}un_h97KQ*}HM%sC5vte-k%Lyno89icJ)2e!n#7y0fUm@L zh_&7n{=V8{Y$l-t%u&I*a(PSr9HnSEp?FFaTH0PFtMW!2!7uw}u*NI6&xLG}uEL#X<*ug& z(+t4GTpyY+KoEr1lR5WwgKut2KB{CM=l2KCJfV9mmy5SW4)<(+x4GWeo7?k=(%W5S zwRhSV#7!|{T|Sq~4CKD&qT^dV`VM7Z_^#~NZ-9h=mPvD3KsoJ~SVu4`f6?N180|gM zS0&-aPF8+rjn4)Czzw((lzRr8`XhKR;4Q3gi>f^PvT2)IyvK;I&%1Xk9c+T%@bCNU zV+dPd?f!IqeQ0WP^oNdbAVuLlkw(SW622aD!@h7q9z@x4SEF|?~V8+1&0ir&RIiQ68 zvYFT5wp(H@0hgYP|0c4EM!MTOLT$J_qQAW}-M->^U#8Wh(Bke?P^!N+@*q!Fmzj%& zTRSWZ@FWl3vS*B;KrnF%8!h|0%PH*sq=1xlzO!$Pzj(aH-5ciz#qi&%hubU;l>$9> z3O5jYi#bW;l`~U~H^R>kwCZ`8NA=%`!Y*AZu~G=PG5>r;eO#bt1Fvxi3w<_Mg?57y!UviOMZFWnrD@4+cSZ-ZQve)HeWj5 zv%2KfBb~M66ZNtqM)1(S8j{h>zvKc;Wg+uDmZlOYccZIc?J4DM zx+P4qsR8=Hu$|$z=W0UCFKXY+{t9IEnVX+887B46;8h3cYL`QJ3~9A8IaB-l8CMJz z6ZI)y-g;F0vIcYdN*}4TIv+AtL#(%4ZE5Sswzn`4E#O7PwAd%lp99J;%Ky}5zBY)Y zDGZy82Hr|Hva&?U;HNQ}Tx?`*LQsVZmcjyMfDYZ?pMK+mfv4SIcpQ&H;WT_E==aNi zX?Q+7Tw6Z{N2Wg63~-QQJeyqK;-PZr7G;=X_{5 zm4k75V?f*O_6ACi-1&D*Efu0De$Djq_{dN8o?|YMAXyYU}I%hgM zR56npLS?AOzRvb1B}@gX)Ijl#25aH+8~X(yLk<`}c)zEWPo;16x{Ny;ZPaqztF3ad zS-PutgmNWQzmb951rzGh=Eew2n^WbqxL$^8zu8<@6jEz!^ebqIsBKJl@37!-A8lS= zy?{?IYI*Pt+4TaB1IusCqKWtJGiG>ItGA|13LtYGHRf|{E^1erJm&YJ^QFJwv-aQr zrRY|D>oIKCR5HljM`$;sX26y-TlfnQen*gcQM7=B+jcQJ6=f1ga>^4|=>Dh9JA{t+ zLwNKYm}Rr?s+8q!0(*T}$mRzb5PpiI8>2{N_xwG)^L{Rf)4z=y@buc%mH%nhte}Py z%=vbxhF)s_4?m$#R*x8}vmq-tbN^8OFirDDagyrwo6n3LpJ==A$`5CG<{a-QO-@&H z4S+epo7zvGME_F;EnPlH(8XR4c?Jq`0S2VpepG6HQ@h1)b)$Cu!@Fus zvcT^robhJ?9uE#|*&87Fv=OG<-^0eF$(kI=CMdX^B2}lfN$~I z{nNvEEa98)?T-DdKq5%nc`fl71e+siJm9j>x7HoUQfq=vUV(vUWJ zwh!pa__*JYF^?5Oql3=OzDn1PFWH&BmjEDuzJncc8X%84wYjv2)qq{BXs*Klr(wVf z@u0Cf9_CwB#c88$#93jXSMPnft6P0`V`PP~RYCB-3|{uRMGccIp(%I>e;4Iv`LMol zLBk85?!igeqgM8?JcnDGREkC7-LQ>X9BW&W_}B=pwldQ!eY+be<}$cz|9nd!0XAIx2G>~0Y7*cIS z41zQoX~B0-KlrfX6~|`zrc%`|`c?t`sC%kJJ_qoL79-k(GKC5mwe>NWrK6c|WDjAz z1}5DRoAbJFoB)aZOH%FNjW@F6u|;6M{5HWJ+lA$QuU(HB`j_8g=IDy#U|&90vdY|_ zRE8E+smspdI*cYAX9j~IQ#zKBpZ4j1GT_*F#Z%xzc-fB=RUQBz!iFwi6bk^1%j1o0 z_nrLVfN{U0!^(*z!7rIW9adYwEZF<5g9qnF@rc9G&?0aIikD4>>Nk8qaENnVh(El2 zr8RZ;yC-$&ePiTJKVa|YePRA+KM9{ie4+4?oGSzxru$O<;*Qr*RUoPBtk3`p^f(R^ zG@8+*Cgkp3xq5JUYf|{wv+wxKm1|02gmzGwC%V5-arCQp7HjODr5j8yIrqixvwyyU zG}3us7Y$9Tsa*ozy7Obi#Kc6)43#SF!bn2}c}lG{ZUck(lG$ulSTFP{ z;d?*|gbAV}QL8*7c3NrE18)8%H-YY{_X8ZkTgm2RIaf;20u0I+O5@=vtMFWm#?dV= zX}|43x;dCte{#62dJbBZ$LAq_2B5UXt0w&HKJ+IKUQrbupsPp zN|^$kqC`zJyGOMNfYje3**|#0JGvFC`n4Q^2?TrHbE)XJ=qh6r}u3etFK=sqydeWBR#HbPoalo|2XZ$|kD;|$R4q3iBA>%vm>wV#6 zqHb7laOHL!f4{!*O-jV;e8+Ncwp^Cw2B*q94yzQ2TmgZUqqE_wKSaxs;`=?ns)cj{ z>B3t-TkRj*U{ExG_r0!1EQQXymX7x9*O&3#r+uU7a?@2Bf7I1RFUGpeXRpXdfVtA4 z+Cxh(B4i(PtbG;CH)$+55f<>fp1wR?2S?J7uAy$ceCiVK;|JgQ%4d06)G92vz-32P z$u>g+wX-<7qf4#m|M$sV)woh~r7l1AIesOdzxX}2qfd>Kc>N}6pey-4tNF}7Cg;QL zan|_~3M%Sz#O&JNUPtms??luju(9#W4<<@xDwsTx@kzpR!p4#E0?(KDv%$Y{;$xQ* zj?%SVXQ_2eUCrFgT$k%xtxQMKPcyfz`}Pmt9FcBG6RTCl)qzs;b41id{JUK6nQ?-P z7!j#KfhJK#DUu!J8Z*w1dOXTP%eX_6VB- zQ9=X($=e6!oSBW-p{$33jEqcggi3zJ%~8y!U#h2hN;*xw*!8svHGW=uTm%GhG&WAC zv;FdAU_e?|b2fm1Vqt4>U8&IHS(6Xf>_t4z#LwMwSl|18WfA9SjO@1pf(K0pTVCP_DqI1wQK`-#P7pFx0ZK5R*pfxRcP-AnUb?ogRBhV&Vd2Nih$7|L@HPkz@MHN&k?Rs zV(VDRW%v3y5DnEIk2OgNN_2;yzTDWvqcA)&**7TZ-#5`Nzp#0?*5KnYVLl=cH@dz- zY6c9ouAxp}t1S;Yj&mDWh#e01kuM_oGPcLZ;{xxJ{ej>6-4@XimE(c{{Iq2fp*mGc zdPwX;gu1~|PcRG|{O;`Zw8uYrF55Ggks2iHPIP8>tP1+{aaW2zR%)#Q$x$uwADe~w zb0X?52_4(JB~OQ0LQnt+fl63d@vSZC^Dym`GMDYHgRg6j3Yd~F=a@hZiPakotULry z!XZ6Pp_iGKVn>)ba)lA?YPru0{3-cF3&?ErM%0y__ zxZL{q7LR6J{4v`H#KTM>W(c1&-Ul+^BNZk3>jr#j?uM=&K}DwKvY$0xsqHfR5#Qgt zzp+hMz)p%ZV{d*Kjz%db0=y%S(4K*ntSQ(JxPCdIotmp~;9qA8^QT9=qvglKic+L2 zMo?!I7ctO{xj1cK<35=6H~~j916>_xk+1EKE;wLwdw=%qN!)+;EcHUsoT2%AD>h;P zmo|NbEx|9Me`f(i$W&NfC{XLQbjGjG&^6YHDWMo%A>V#%!#%(rs2tO5xV@_uEKyt; z@7JMcq^|+)Ne9hiaA9m9BE8pj`~0qeVJ;o83O6ugsqQ(jwe`$@;tNbMP6dt?_=yK; zpyK5W8S$`^&$}Y@y6Gy}PhN^qIl#!!=co{;aVQ)XKLku>qI{)U8CdgFRz{GCL?1-V zrUJGXkU^rVIbMFk2~N(C%3lR=D>m(~E!hn)MLyySOYdQu@m#iAIc$3-`H%I2v8aUh zL)z@8WAH+y58o+i``tb9&xMQ_k9Hv8;cb*Kd!i-(>xG^u?(UxLgA=NP2u#f5IId$k zK74ZD8=p>B9}Q7{bsg)y-TCEIrDsYDcB%jQQCgQrQqp9T8dPH4j)%kQ*Ew&j#w0O` z^0~1w?fF!{Ub4OYI?;w{{Zd-Pm95L~Qdv^(J1%l-)5ZR7K-P*&Ni_!JQvr7w)>*u! zqXQr>f$g>YC@WyST69z5u6Mw;J)}f0JGQua;-0Nv@`7_$OE&D+SD!wY%}Ew(qaz=* zvjewIQ2saIfv=?1REYSfv32@!sEN-JtWfGI;XAjoESd3vuOk(jUjujyeU=Cf57tGH zq21z?OqcC&Ak@wp7_?(Ssg*V6GP z#V5%HufvJPJRrxTHLiCKh`l`eBFfl{a=T4T@^7dR|2U--?#k@6$F$2FGhJ*#0t@FB z`13r!Yp@OtDmZ38HI#1cpx%nfadraBrn%om&z4yM2OJfrtj!w^Vbpnz9ITTjNGsH$ zqj!aDd0zoQ)W`19jQ5RkKfcr3kVk|Tl01nri_tG3=pdu{aZSInQ*-vV>H+QaEW(=H ze+h*e1Zn@SNBNNkaJMp~_ru|js6^KInxBRFdjnQYj-i2mdWmpl|qE`!WqmPzZ@KK?}9VFI^2hpaqqH7-Ij0r<>g4GZiQP?(|rf%6%FfH|mY z0fn_$J9BDrfZv$l#LCA0EUY`+g+!wYVC0(*li0dVvy-ni?%4uwB zdkdcU#esz5=dB(o9R+cC*kzCZ?-r)l#j{Wze!=o!;FeUWQGR*vS z<>locI@CX<*Voq%>43elYb(_G)h<^BXmrIZ(X)Cn586Fg+veR9APoyz->5+(RJoOo zIAHafk7($%(!F!$zqz;cYnt=qO0ycVcKbnR!JYnebFUa#Ne>TGrR>xBy)mR}GsSId z4vv%<9~>zhngy`=Xc3ce0c>(~os35@2X-5XMpQW59}3^HnXjM}SO^z-^p=w`BE7e% z9gE^;WIGp7=Y8!&f5>*iCQ+AUb1ZPSw_>tP77lPU$r-dZ;y~P)f6nS65C37$;jt&~ zdl50V(rbizBqFZ}NSsUO>$`(PIo^2Mm-+g#y;*kmOr=i<+jG{6S3bM(F}b^ZCH}x* zs;C>fkWQL`QZV_Rf%RXvDyD5~(LFuOFVxkp3aR&y(``)?-P^v>Fvok|*MRGX(ckNP zu_H=4AXiKKOxb1=2&vpJ{u@#|IxtW!L@%*fo|BS&kF*}l>8!Iq=ZiqgcmQpf)0Vn< zP>_ilrV4`u=2r@?pT;`VWHi@rUYgIl0hrd(WWl*j3t*a4Iot4v5ZpjmX2jiG@SQ~r zDNWK2Q9~8+u6?UkPVy>9H&!#~&C_xt@acL)CSo`wLZ1vx!36G&vjNI=5pZZ+OAbS% zMwDnnhQ9zYQcl&`^AGR916x%`bhJG@5sktP3d;P4+dw*ZQi(x0JD0mGJ*HlY)F(ayaBpn*EoyBt=X(gvPBKT29=Z>tp_{ zD_Z7cu77P`XdGRpT6Kjqdm=BV+nMOMjbNo=KlR_jMIUS7yP0Upm^{ zhxt>X!bzKzY;11Q)6ho*{u>%6aE=Yv8c4z+1m$+VzGM-RT+%_yE;SW02oqtWg3E;uaUI zF~-@M8#vaW_*g9(6DMb6q_6WNgV5f2(4%`Kpv$G36$80I40xu*evJOpJK!~XV}s)D z0G6!$eL;Z*AJ^aA7UkKM(>s&Y(DlfOAS@Ch1Cr=}u9c zx-qNJ>Q3ONr}%Y2!SXSj`bWwBm-_#GV%;M)^Y0`Ed+;g+AW`0Gn{Lf={WgcrV+g$I3s$khqe?LBiH7G^EY4-KXJ zfMMa!Ee_#5n42>-%CGa`!j)ZGev;Y#mHXe_Cec;{2ZLxkQBefhyprpC8!PF-7ZBd(61&7L0MhrJ+5OiZW)RU)gJ*Kv_ZL{&yeU%7& z$;un<1Ds@z5)cp^GnByXx-4O7!7c8mPg>iWH*qqjZM|DN3eNkO5Qsad`GvFo}smb9amwFET9PCL8 zIWrpI>sNtF8@4yeH=;rgjFeklzPyeDj>_l?_~PmikV!~8{8*y}a=EpnAVd{*z$WTV z?syH9@YvXAlKmZkBYSqh!y0D-CY&?=&`rT^=miyVE_CI_S}^auRQ^nRWf}Pg4@oU4 zMMXAe&pZa=%hG<&xbYa(mS+-*GVY!$C!b1FT9Uh5#wLp>|FzB_>ai7pqHE_%nlK zN*D!T9hCQK!6@J0iUpB~b1KSv_}?LEsZH4;xpPKwng_MHEDIK)U=S&;H0Jayb6QpTX92nJRv=E{2Tz2a{JL^@j%&?FOf1>IkG(j$wM zsuFU*2|iP?zJ|SyE^DP&*_@Sa{1PtJ37VQ^2cFh<+P={14I#(ep?=L^^MmzYbN%b@ ziRF4~YI%*bAS?5?LROPO>T!+-nh|;y7Vg&@ai@BNzRd8M20awhciNJ1>h}dR0RlO{=U;nXHG~YtZpG3)qI;?jij5-hc2q~8W0?Do> zf_&ybv2OoeTW2S(zJBVbx(niol%6p{u+Ebw#*$S8)}AULn2dwK8{G96vI<@TID{k1 z{1?KH?R74S0_(Q8RyK3{YusQ)Xf>>SK|_y6l_r$mt2XrVYRj24Rz|LXK=OWb0nHkkH(Z3RW1uIJ#FF_Y8^($ zTBpT5mz5zE7RCXaXnooXKv@7FiLV(i?0;Cd2+}XWK;ZcASu)g0^_CPL&WmIEzA3$G zS}66V-fszdOfo{x2mtv1)j<1V{=)T1e!U`T9Pm`;i_3TVFn_MUmcnxX`Pi~HY+P_y zcj)fgAXR_!?^#Q3{fynXY+RejkZg?aDYq(a|Aq@ML zNlA<_5oBPcDI$g}(Pjj!biPCr&wcj)SJKLQ(l7?fknc(A4#kHG#df(Yp8m$fu1E!t zNP{X5GfadCcqX~sZ~~YZ-;*n!7G0#AOM6>6`PA(&Z*{G^N~^*7)y3hWq2qokm(_ZH z^3tP+-Su_!`(Q&;(hxN>N>pdq$AGpwfUb*)QEy1dV3M-e( zW|ZtF5&b`*il;3t5BF|rAX3&(Pd9z;mLCL547!Vb@`olCsEajY+Bf>isi-BK&Ni0) z9>Rmsw~1}It!IEs!l%>9e3f0fi*q-<>e0Hcz3p-G>wbHD(uwdt`Ig3Ti%A}WlwSgGIilQ1fft*svgi>~ugd^N${NKv|~aXXOK<@3c2M(*zT znfoBV){v{|-{VFLd}#g&%W+*&I8sZ+6*^?Z!TCU4@d#I8*th4u4wg~c6Jf#OCV5*c zJJVI~XQ=c$D!H(6z&7&VZro^q#~a5jCv*=)fyd`nU7egd!3Lv}8GO(vOm3@b9<5*I zneAtfpJH~Svo+Zsld?7W@P~)*Qn|!fPnW+SBBBY~(oN$2g-<}R0tM5pZuIAdC}`5( z?v-Tyt}s;9^YHW}mW(DX;;%84l0M_)iaA-QTmAf$Y}!X*6k)ol22}chN&0^O#_D;t0_IzlTpT57pe;eAi zJHlvF%g1)RtIcHcXPG?eFK}@qv4qn@!ouF`aV;(TBfNC!oqK8JNB9f&;)Wr(7dTon zjC9W((!iiv<94LbbT-0dGGOfj#Qw5Al~##1-@ou7eC+Bb0WM?4R$(CUsYB)&%|ki3 zk{7>()lyNei%IJyi^L0`f10=JwKqCLb`>{@3rhM&6aTJ@n7F|B>U$+yK=BpT+i`Mz z=3P13+fpHz1_4LBlTz%u#KRK14O6Kn1*N6BIXj=E)&h)a@_lVr9=Ea%z@ZVL(^x*^W%(VZt!X3 zymRb~GDAEXn*Bpd#{bRRSlt@DzE}o_5x)YG9b8JG%1wc2TmDc1SJCg&We&X`%$J&3 z?-03SSo#|hBBRc*&eJX@nth2XUiLGAVQ^ROz>{6 zk?aINDYME^zSS}e0PftZi?>f`HskYO6Ej(8mjm>KmJ4%@|F=K{50>-gETc;LB^oam zMHm=rhz9vfD(X9zE6vuGgo? za^H@_E|jkN-&G=`Pw(`s*Z_^6nDC6wR&`S_2VOewj;r<^mKiW7 z*Magp_C^)`vdUM1qZq*z74|SA!{u*v_ls_=*{%l-4L9kUD&s7)9-7F>{nhg|HY8fh zb{1Ij_ZvK~koGFwF`a@);f_cb*YkqmwGzWjmD^Pqa${(Pq$ z4MxoL?d^%lGa`wr+zX>$U?~xf){ zDbz$OB=sw=j|`7B%{2!&4sGsEo{#Le0NiE>oeW$<+#rC1<%Si-L|Jg#O>Wbk0ZB0` zPA#4Al`!T!EU52!t*wkqS9P?pTHgOqOrhc-zJIDEuvcCF%6YGc+S-nmGI`lCCCJ*G zzlttyG1hVBH$x%oS^0h-0SuDu+xy~b37YanzOL?Db5`@WkU*xfcSQ!vXb@k z@p$=B)J8@p zE2-wlTsXh9He-tD?CaSC9d>hXI@KycLK;iE*H zhG$H~o|8!^=74uAlp|46kOR)WX)G-`8!h6jYvSBgo)SA86cg{tgnRoSNF$Ix;F=q8 zl+xvLQd&fd{Tj~KoPu@)uw&slyG2vp^aT{!8BaqJWU3ntJD+f?Fo~3Ww5m9Lw-!n6 zJX^=6`!>=n>!qq`n1P=j^ZTtPpoBnT+>1$==a$J}y->G(yb_q}0$ujlR^MN26n9&w zJ^Gjw=T&Jx)T-;fmD2ky-B8toypV2mS7et-GI89W}nPAC|`uz|35Pt+Wd^umgiP zCMV}#2;K-S@~ZN~ZZLXe*(~%8n)Hp<@YTx6s5n>GzmC?~9vlVy1g^eWfemG~MwFQp z{oagywW!oq*~04y2##Y+BI~#akiQ9l-^N+XzQr!N#vc+wC?dF(4 z51VvTu`(kl8T&z}-t@*-SHKa2rhCA>p&{rPUwewK58a3Mm*{YTI!*88X!5+nW*`RfK5Vb+gi13iMm*N3+`YMM&#n=bF&yp={Tj-JzbD&O)3F zv4)iza#;wch#b|h7xoXsGr5h>>GbxWery!NjY^hIjZ>GX#}b`~)xRrbONaehujR3y zYG$;bL=h@|DeIL;LePgn{50?1DFsju0!G>8Q7+(FEK@(aof zcxH=0ua@@~@v~uz`S?$6OP(kKBPK@Xo_cZS!QuGah_J*imv_vMkyoh!M(!3LoM%BF z0{ow9^dde#KI%nhyVHEJG5KTN{iOTE_FZsjom%`A=_oA57 z_!SxTPRYA@n~{X0hHG8D&V|Bv$A0FEAI^5Rc%6K^fRiD`{Jm~B=R`r8`W~|Mjh;}B zo)~iVt)_c8P0W!cjM5seYCM~{q7D5Atn2TD2rmNz0@}BQJx(Lt59>9@vc)yCQ(rm2 zRzrK&PI~E3wnwvsJNJb3TV$DaYP6HFv&@=XeFcAtWt<|9q!` zqKR)3BKnAyc;H?Il1F|Q9udFxLHSqE&8fbIvi`dMZfG32o0Lf1r0>RNdCk(*@&JC; zQO}erPfDGBPR%hspM%!$FT->aA`lmgecB{L*POKx7 zjqB8(Z+5lA*IbPF;HZ~Og8rG~KcyK)v|8=$xuSW#CqgZmVw2LEM;~#$#a6~@kly9(RgsdA>J5@ISXxaNLQYk*(kt9aI z=?Vrc=Y~0(>>=lLw~)N2RQoL0nD-A7V@0(uT1=Q40=J{daU0O5`&}-M%0)o# z3Xp6hZ_m&9SG1=F?PQZ}CsEgNX$2_XpKLlgb_*(**XcIk)_-$DCZEblH~?&bKwyNAsOEM!U1qyDmS7L{k=VrtFD!mnYl4w# zyJh4UAwEpIRv5?lxJodG>RJO=D|(ZY%@+aRd*aCvaYn4AWbTpuedY`P=}J{ycn&c4n}jfCVDUIf@x)OMw(sYp7vH00BF zFRQw)On1+B3>%K{h~EzN2@gGmW!i-9c&=(-ykGNt=MAC-)%sLm&v8i4rVitGL1QgH zzqt88h!ixy_Hd}}iR1Mk#BgHDXr(b^uj9tX^Q)m8;R6P!{5G@QUjFuL=7KM(&K9Yp zL(5J4ues3JD`iG?qqTrtc$x7lI8{YUGqQvI${oSTY-LysK`e^JAmkA9E-~tJp%(i(w$E7 z(N0U7lFSt#x|`6@W17K1^-U?|aQhe>GSAU|UyZ=~cFaD=Z)eKVd+lj>$21ga=$x6T z>nkAOxEl1_Lkmd(4G{dY1qsg|7tWi*2@(o&P~hpCE7AGpNqV}qz_Ey(o(M4TVf(tq zdKM7~#F`CGH~b$^lamFZlk`c6u*!Rj+}oRutFhdtDR+12?ZCO|G7&8;lhuWV`YJ8g zL*bvF`k$<7VLIqBoN>POzzyE&a>11kR@L3e1PH0&a_Z3XRu>eXspcmMl|Ya~2-XpL z=8xvPi<91o-<2l(+3uHzHtt8wU}yF7?#r$&rhP7M6*tgb^hLEf{krFM!)A;;jJ2zK zs`CrcB}aF;?ln*2u94+6Yml*%K(R)YDbX4sx{KXHT}bQ0J+7eVx%j6~c=7SJC`R`; z&=O~(rgl+lB|wJ7_^y&H(Khlo*-}gwe0E5t`(gWmu=eB@;_L0n5OIj+EVM6K)>{Dm zoXm#1((NKz{Q`j%H0o!Ju40>){dF-+GK~{4-2l*4cLezK34VU+PSCf6BnLN4ZZKi!>mV}?r4sauAwv?TMjv(5Bsg<&#`jqR8M3JQ;t#mgh?eb9U$@Jyrr%3f>q_QFsAcdrCT{Bg78D0M zY{0FrEV)5wifqH7mB|VVZ6lj>u^Au_=M>_C2}66_NgMUYOG2r=)omt!XeHubhumnjD!Ip`McvLquBE_p7P$ptd?B#u)%8G z%FX#NL3 zB9AlED(E)`qsa*Mq3}5YeqZPwyI3c=UwV%%-#3E-Tt!7iX5BiC2rV^fr!1Rb8Na4)FR{bo#f9rj!s;~R%3~7w( z0iW>zUgvmYjX3L)lZW4E=D{3g+5`3fx1f^+gCbOHg~7U0;sta zo*!MmijKq#271@TPbO9r;5iMQj#6rLCm>KC*3qn-GAEl~CuL>Dtajdhuc%NW{C#&_ zf=x@E%IPjv7nD8BpVP z{HQRQ$+^q*jF?l-eE3bC!^z$GE^Y_AAB$YZjUX|nc_~DYiwikd3Tyf$xKx`2m-g-{ zpmtyFSGi%BT7xxe)O2|+8{2My+uQKAd6d%y)7oq|u2lFx1!rx!9}8V!5Z#bGz?_^) zkbopXkKGK~L&CNK1l$G=okL)c$u>;#U{_%ME$iU=84`gP>+Pxa`4|~k+uanI$3&;X zA05!g_bNk!ecfJYSED zoL}QY_NNCv=G(jS&9Z-*I+yuKHcs@@BDV>ff(sIUygOp6IQwte0O` za0=3p=n(vr`~2hb{yDt`z)6`bxabM0;l=p_ruQdIp8*(?(L#=B*M>_)%uKt)XU8XJ z5`q_##RVDbL8xa~6QJ~lVEHqwoZ~~|(wCw@pb$4^}MXG;0E_yy2wE(W<(QzrfHJrIi3OJ4oTU^iw=_wkDLH#zgPHAs)cP+FAZq?LaUvv6ydAwd z$&JnQm7;gq5DaexX<{8Jzx z4Dqgg@nU47mAng-NK71M9Z!55FcXIpvO@;?tfCxwm+gvSOlGX1lZ-bn^k!JjO@86sW^M)}c zsfN=dZgxYV2MrW$o#JXb@-$I(OJ0{6pQ=dKZR-E@2}=H-o<=fX#F%0%1M8&O9mRZypPRZBaN z`u!>#xJwuI!yg<1VWU1nJ-$gSRD2+ULU?e1lcW*HSr1=uD~llD)!V-sIV+yiPtU94 zmKCC+Yg$QNFxrsVLw-X2q7rO==E;%*>?9 z%2UsYwDEyEjzwy1eUOrxobHj2gHUrFHV0?|jZ+vo=`7D`*x7$F%U-8*5OqUu7y3Ce zUG`_>>DP#Q4E$gou!)?@YRJg;n~x$=RR>-7tltjamj}Ec^PFPZchnHRf2$bFb}jG( z>u{iQm{7fl&J7_P;)t7nALbAHSA^Nx&OSbNPJU+9Gt=s#$a4uj>ouOZX`2W&(M?`R zVB(?gCNH-Y`=FA_B>tNJQihq=FwGES?BYJe6!Jm!Gr+^nbwzc_kBy#oReu4!^+1sYbLXZswi&fL-0OIMWWts4hqnn_nR?isN`cQ&bE1w^z86? z#eG*#?iOFkd@#Os05rSs8WFbz!;wrZQeL&12yhvjkR(1=QFzH@C8xxD;C{bzOY*>2 zxjdit%$(ip3$)vqr@~}alvbD5)DhtIA^cH$>G1qu#3f&w7cTQ$m^>VJ8lPOP<2nbj z>b@lueh?6MD{bw3lWX-Rvs|x{)W8S&^WI|demtLKb$y+QR5A4&X(UrHpTDs6tebE- zS2|%!4Ev$O;dCSUnd$YG(M_Bb*!6i z?F)4`+uf(X?NojJmYOzKc!-aO7ltV|=RbG?6@dJWqLQ|bQKeS4^$V2fS3W8164J~_4v7wH^G`ERHa+eLmtBw>3fJt&{=HJBcO99P;${y zdcgkSsPysMYH0ky1pk|MeU&L|p{PdZxB6|?b809E0Tpp^~t|%sp1j^+}F?8I& zZ3pL^C4vYaE=A|cA_FgMr7T~CLlC9B^slT4qkWeE)$K-*;FlRcBF238Ym00G?>kOH zH4Et!EnzqE@q{B-pk?D23&aEHz<-WU2x>IvuRh{X*l!AJBAvfqx6%HUssV@};xThL zpDO`(VH*sL4)do0R<%LI)vpjk0NAqqhMvfnEJAa^s}1$UW`=m;>TOkQSh-c0O-V1* zeVp%&B+mWg^=b-8dGLVD_r_8bNe&)P@m_k23SO;rz7W1g)++>JTHC|@W!z!i-3X*y zQ8b3kCwMI=$k=^1zi?1f6rkz>cSDNb1~7+_50Y5sQ4c(tM}@qwfJ~y~H#ytN?1~4i z^+68=VwZT;owU?5diHQux_@I}w5QHq4=M6UeJ0Z-ZPo1})CCk=UqHe;%y)O)UOR3) z!lp_tqYN#AU2nh9BDhKt>wRP98wHemnfb%|sD7?BOfDnGb)rvgtehOww&o~g+=%%B zV1*%@G?a`J@XiHj@kSxxqC||K{4VJL+a*6D408HG(Mnz$WHL7O>B$P>Ozr~&Sy7l3 zl-w*CJ6Fy$wXLa%zPuEWH(0A@F_*Kox3wCA3G^?$d+^NJ8&sBNb!hIbOF}8fW|n$i zd5NvfFQ<|JT+nzkwBpxnbtJlerIp+srH*4?O~R4&*@^7!|+J0}rl_hKA99yswXs zZYuTOFnd!Q*{pbhc^rV{#bGV-jV)(SLsFz@tH3ERLrdvf=GUnD^73~U4zrgB1kRi1 z@>fzM_xCleL<8EQr=vcOPDdtJ2IIw#i*JtU7~j|5Gj+pmU0;PSzPB6vlRL1h^tc#* zQqxdaYZxGq%U`{D;+fssmxO|f^_=Xeukfcu`zxS`{P^Ae7^lp9j%8>rB=7_U?F?6p z$tb2}_6qE~pdhym=89MZ^3eTprH_p8osztY@b`u5YC050K7 zAsB9{J`8J_t4{E=FTs8P=gE`N6vmBluH^E3+vwE7$+v@G@OFb?rzMaQ_@*!}`&^O~ zt4CrFm=50%4<s+|WhaySehW!t zO8&Y#Ha{O4K1(u#YqdBt5iSFiW_$*`iS!C8Qy#OgHPE!8n})DB&5_2$2;D>odVCN5 zy)|!FXau`SG?_blJQ8TuwLQNxcKM2H>OuvM7q&N8*h>HOtkUX}K6E~J*QQ8+co^xNWMvK8pA!q58$b(V z)OyjGx+Svl#@D6zkXA4KN%FTx5omu4UMf{jNs;?=US2)YlV28?2trBg; z*foTm-Il*%%WYFrU*x@at7XKUA#sg*x5z!^vN^p^s_lk3zeV=503B>TsPE+qujMt& z0Hn}w&RGnf%@;-|uS?$11^t8r<{JDsitZ;%xRf#p( z4Ryw;VH1of<*y$=`}%IeLrY+7O}o^MeAty{l@`%?b#j7tb=xR5yB&RYhfdYHJbo!q_tdWCI_Gd^26NQ9#d(+3-Rta` zM%(q7+3|u?45XOHVw_Y!m76g78W?-Q252T(&fC|~zT8f}%(Pydg#GnOHrDF_#G~h+ zMIe0B?6+LZIT4_a0uIXUE9;kv3x?V;u82KconTQ>t!K^{)H~wWkoi$iP$;3SF4RxB z-(UZX?_{U5Y(4IPym+hRBrdM6&r0T5M2>bOlr%pvfxGM{tXZ3RdwJBMW7n)p8}j}8 zbAkJ-iGZf&kL^bdH71OUg0Ja-vT9>nV@za8C^i*YCrEGpix7)H_)V|)L(iAVNj%qt z9wp$&-bq2n#kj0pjzQ~fkk365(47%W>#=wsd5Q%z%~It)kq~h}Qey!533pMh-W8f9 zWm8C<+0nJKzJR@BOOl_Cq={8ssHEqnVD)3Uwxr9*fMSR7jo&<+Qr@6^wfz=tlQQVys1C)Y_4Y};xS=D8qKF_ua(^!X~{DgDhBgu&! z&7fYbYQVvLG-UOW5x%tXIoK7(sp0hi8w`uQWh2W&cvcAYTfi@mgL&$L;EBu|%sQI) zRTn^Mna(ZvNh>G1?oQkN&7&uDXM7Q8%+9xY13+BFwq8$v?52M__2z!U{)~-W3hH!N zMr}~j)(*T{@frW|t)h~WjO@$JxfXi(-k#Mfe&<}+w|E$R66y*)h(3YepA%S42Pz*<%t8XF@p zIXRy(%iOFYXNNE{7|hLX3jhvf(y29m=R23zz%BVmCPW0d;Ltm7UEAKKU+T+im)Q)) zUg?S>&$tYqJIvVZ-fi_f^5B5wQP<#Y>e}spR?1s~JAKU%;g1d~Jx=OlCdA#o&cAmy_ zTRboqP%9jk8F#rHjyr#7I2CNT_nx;E5+l49Z9m<*=nhj%W|p&*`RpIyrjY(l4okHB zBFH96%PX(pve{OHodL1p?CSJk9a^yE(!@a7 zo`BPxbJ*LF^G>197In47SvsJAldPkNGPUM|ka4nyn>p_kJN@2ic@2#$d(IMy53#n^ zKhv^gKn^@Vgg;<7n4}OA7jh-JjLmUr8c&UqE0=rK)y#}9 zjQWOonEH|H1Yqd}h>Sg&L_K9#RdASY-$y>{eO!zc!j$kPvRT=UWFxNLv)tqxAti-q zjId}p2Lpn=a(OM7S4#CU98S@-ui?v`x(=rM?M5xImFC7=Wd#m^Ut+#T;G3{ zpWEx2jN3Gr`}?w4pP?!8{Bf|)n-7kd{mg9klCJzcAho^Sct;5(jPV-gU} z!zk_eQ&NB_W>Tg64`JDx9tYru03DE!*nZIesP|^2sc0Ao$Y1bCevH_8J}l7q)6sKA zCsQcxGh`3=?Ek4f@&GdMBGC=BsxzEI=r)p~FQI9IR4McN-DtLOoqU+R!NgAwm!PY$ z^rZXhFFZ+3&^8U6&FEn53Gf z`@gR+c}umba92Dx{nShGR4g7mU%>Pl?KhH?RtA820i%IXZN5^^VeaXr=UOii1s?b@juFtWorr_WP za^qd=$@wI@a-}ZjN!rtSj+1$|d@f~zHU%gw;Cvi=zD0Z=$=$!e#(LlG{_glqiq+%G z2Yb5?rHz(x+?i+MT|57Kxt?B9U58#vtDnf8p8p^mQs`s^i`+L$f~xx7rB5@&J)3Om z*eUmG^4$7<(x?Dp*#(=IHg#C;$s8DZqZQI$WebXLwCvR5=`a#tx6`sM-fo-n)%fmX z75mg6tHb(D4KGr^%?1qKn|>Adu*8G-2rZ*w3JiYtC0}Yu3kyBA=hhiq8s3kkeS2}q zzEidyUYwfGH0f+9Yx}~>G{`o?14(KwlgnLq&N(~sks9_br0cCn^ljG7!8>;sokR({ ztae;LvR~Y)T${Zk_#$kqgdwzzPiE%-R-*~8aibmITH4qKWvL;-#}j)9Q2 z$0u*2UXsjtukND-7)GA$R0D(OfLo<@PI7iMfJ{@<(l08z^(O;n9!>Olf$H#zl8}QU z6#NQUL@XA}OgN12yI*|Pd~~I5jxKb}#aV#SU4w|6owvPln(>dyHp=d9Jx$mh4{~WW zC5&4Q2Y%*G3+BXg1%Wbi6kp|ta}tZ3SPQ$R#%XAHnCM+a^l$=riG(C>EPZ#F-l`*P zcOnZf#Hkm_R&S)Rr$|8$C74XeTlu%qy#=y|hsTfNRa}i#&14$df_XIrRb}PeMEdR5 z;Z>+Pn>D84Wbe@Sh8F9GrV;;7{>tV(YTH;^!tQ}&I>-Sm(c>Ypr`^^v6^?W1%R)K_ZzO4AiU;o794Oofv+0$DF zK&#X|5ehXl0EK4O2^tIeragn5SPHFEmU(S7=M&2qcR$?qe#%_%3+=mXh0;iXRv#fM zWV&txe{Fz!gIw$Ow-^C(-_t5skZE8gw@eXl9Wx^d(zgCOixWQCz%7J zbM3QXBDyRkE}cnD#^&Eo^S=>JE}T=jk>Y)JxJH##&Gb06Rh7|7LBr!&kx)YEy6MRjQL!uCD{W1$F`aa*QR zTAF8gkUfcyNELAH@g+v-OPXA`5-tVlL7fHRQG49t;Kk1TfJzU+^OKD9t^d^8E_21qs+=hB^-Yt=Osgf3auFJt~=VOcGm3Y?G z8KDA85b>*!ay_l-v?hOo`OG=JTz0mtIsuqd3@!7YalCB6?(!v(yr;K6p{ zbqzx)3H?{Z0e2Lk!k;KK2fg%v>YN>to{s2Ukq7Fk^D}D#;*_Skrj;uIJAr`1WDK+v8P)+kdwPZGk(oG%NwRHm2E;Sw zZO?!9uCP@Nv*ow=@oD8a%&?=Uer8zv?AP2`KNkNHZv+2#SWFVP`@dMCj2$Qskgd;E zIy*UWo}Ke3Euk9mAygbsg}N~*fF|F(`IIBn@U;9DAUD0@{O@ZvMpV8C^F`=~HvTUx zHWYTftFOT6KZzle^aVfCR#$CTE1g}b0Ab*3>UD&kE`Y2*&;9hw9`K8)ty~nLkh26z zhJV2N5+HwFM#-0iph&}`q;8^~^FNaij)yt z*ihZ_!A&Ue4P};R_1zs#OI=o;`S{O30(|hqHY2M)j=J?~p zcxDsE%|gl(DTCz54jrt3;(VR0=wY zjb;{QT{y5;osa ztv+3gi;MKT?1MipxxiYmi=H!Ha(aESvsbEUXm34NvG{v97sBDK^9z;wrj;KuT;l4H z;*u60V8^RAj&XL2?jHj?0U2F4TE)rV8nGt7yM~&X7o)wsy}t^iD3{v2@C7{V@h|7| zbn7>!THN&1tB8Wz?m?OSerVzGWa6?>bv2mCK=n-hM%@A%QNt!QZ#hsBpmc*uh<1pL z`^U1H{MeQ$dZ7a+r&m3>PUJYcmn-3jt(uu#05e!+h(hE?5xh2mvw2+M)u>-6lQ0jE zN%b+y#W~C1GGuEV=BOF6pnu$&9K=E-t!pdHs}tS`NQc2`mwpx2TE zyVH&7b2!S7C{U*8yj5lVXLpvv@6!o|t>uvjKK&d26AvG%TK5vVJ-}NbT6if&&r%Kf z$iQIbuRtlL$tEpf4VE?P7vsKc3Doy(JFHP?Ky3jn;s6^8`jV4cP<%A72$GQ7AKADa z!uHOWRj^u^+wG{q@&NN|HT`bp0{d1yt*adCd`1YPU1Fx(-mLa@$<>1YDZTn|JL?W>#KS<`B;HD(eop>b>fnn&9v;Vv24*; z!K*wa4ZtF(uYA>C4i}5Ec)2n0*25P88;gWf+5cn%3P2h<~&d<}ODa!33LVr%q zhBM~t)~KS;?e46&0U5hy#6Av!6Oc8C2P3U<5o(ZZnZf7B^lcA<>xajf9I(3Ko;I2- z^WC3Pk-Nd8A5v@!-f>4tJ))uU&SwT3Cw?`sJptYH^xPkuNw~_xHO7wcj8Fb0>jgbx z?Z5U)!dvx<5mqMi$eb_24Q<$-oL_>;X3d-~%}Drs6h23Ouq)8PANM#K_@mk^9He@n z=S}odyJ&-E2lLkp`7|DaLi$4Or(Oe)pl2`OME3@)1$-HniwNg#@A?YaHM(<8Uk&TIk8?WA z1RIE?T-5%;myd@*Y;{I9vW`u9x82 z)^B}=zZbs5()$6VLsTAXF(eG&WLbVo{DvM;6-Wi_p+vLvh(+-JwaMvwXMpeCmbQl=@5<J8+ETIS?WwbEle=Z zLf=~*PmA53wKiIP6!Lru9&U-V`5n<6urd9OC#znUOx8>xMoHz12Q z_o()GwSf-Eny zv)Uz>ufbCnG(QSXmb`t^CkH?k!;EV{yFZ%W0 zL7cU{4$2}Or-)cp{Ly1uwSp`F29@PLJ7w{icE=L=^uJo zK1C#-TKT~qEp)e`4=gKJyy!1&or0;NktbUWImz`cqwr-+vGeUb?9! zF*a^nE&P=Om3m*fmahzg48pX92ch5CME^cW+x-5FgNlvKmv=YAI&%x$eI<+@y;k3p z+-P~N6>FmT1~^WVTiy|*eg`rUxM((CI~gtxDy+ikXAFJT8c)fyFNLB5Sw8L{miv#H zg6Z)~yQ^2qc#y&hy#LBuKrTZaVgo}y6NFBcZTws`FS1zQSTE1-WpL8xep3cFy^DhX z7y149%8`M%|5^bFaFud&7-s&E-zzLEoNr!xk5ws1o3OSZwkv*D9)5gM1h<V%xsTbE~4VDgI=9z_w(pK(J4p#Z�YSh6gEDxu>fgim#RJX86|ENh2{!R3)!CkHM{-^;8|0K0@2HOhJ9s7Oe6xpxsn ztk0DL?SH$uT+y7m;zdPYw@)-I05_qzcK~4pe87PLykPYuHsEmZu+O#n>aJ`F=x?@b zRi@D3-KZ#WSdS_pus}5GK?D^&zySq5LC!y+!$&E_3%s^>Dly;_{hTpqPa)iV^$x3o z>z+Pz3ju>0NGZ<<-q8HFK9ZWMxQSv&*tBxCLNYxO?F>CJDLeX08=|MTPY=gXI6dwu zyBg!^Z?_)?fN<2}09b&gW+jr?y(9zk>&K9CVR<8Oyp#1Bafjp1*Tfz1pE3dt3aN_l8 z8QhU0!W1}7HU4UE#5Bax6R*UP>`I#|5u`#Jj<@Dt-r9OW%0%|M?#l}fCMFz^1;zAK z1XG)V)_%PqGhh$h1K30J<|M;=A4HAhCIf#wKqtg}?Dr*w?GF_#*K_=AlHj?@#eCiR z@9!OruobhW+Yp#mi<>tcik?E|=l=jQsqWGlR1_>M$@;(hqCYu0Z(n8bIS{}Yg`VEs z!bnOMo+dp(Ev~=JAc76vm#=B)=}BxL;CDKZDVhIRd->phUM?24=7|kUZQPkCiyo{p zg^b`2b(psDMUubZ`?Rdd6C!|d<6D(2Gw|q!Iy5}icy(b;3cynffOC}iGdl20bl`uv zD1U@|46vcz?y(|Wt5w1EvvyBmJqnM$29U|sfw}(h#Pk%KBr9Ok;#d4GjfU%GC5W?& z?6?q}qo7V~Ha#MJO2jsQDqTTxP2C--O^k@@CZ+ES_9dv=bko2F{))ni#~rx>y!8N# z#tiSMaS-a^$4(`SloNiC&%ehGAEh#JmLyNoH|8wES#tdVtVMqaot!J+-}h74ZOX}& z7Vq(>olN|2x#N2f{jmeBlRg2PRWb1KUMwb1;eRWyiZ$1rbaVd*o4{((P_B*#5>6g#wCM5U zi#5tEYrgDIzo($~>%W^G^%D91x3{-7^81}M+!rh1-EHf&0Ys*AldI6%>y2{<v*-mVe}Yj}GkdEOUA*ia0nGjdpKlegZB_vD8!fj?Ji? zE|mRkwU3N~oaWiO4#WW@{|QhIQ zvN8j}4O|&@O?KYqmDM42(VLllEweu#=ozem9vxNVv|tsyJ3>sfW0}g4Z~BiDm>6)P z9N`>rGvOxu;~EW^%Y-eP{(=st#PkYL_LC9#n|sJAoowcKa3A1BA1QGs{f=*T%6t&q zP0P^;Z02~1ZWCOjQ*m?;E85uw93AL;U^mYp)!w)EYA70R6U&R_N{XZ?4gnm$oO0%! z5iYSrj>=dt;Ky)sP^b-Opv@q2G75C2aiGzQSP zYB%~5f5iF^32nbL_3=Mx+no;pV+?4_xi43^0X^bKQJ3A>6pCcxu zCx;#!kcnxOz-G5@W;bHPY|DDTDo8`)RW-`Vw*D=aEvyA4! zwa;h}0n&DYY}|taESYmpFZVCyT5f=MXKo{rRjbUgtD-~8u8pYZyA=PUC7W>Uqg59r zR&41&B2-#QZg?6sq5r>((ibe&7=v!xSpK6d`BvzLC|o0 zaDh73>dF2SY@fro-vQ#l%^`x^U3lSYywRF1a~x0OM=n+>7P3;Vx8`Z%ITKRJV>u+` zaOp^o1c)VjoX(|w6BReQ8Zdmu`1N|~=lZY3=pz7HD;sQF77>)D9e=F0jv)igI(*`J5Qj@uH5!Q^YN1VFLskZzduNt@SyybFUqJ*A{oqL^JSv14 z{9NGiVtRlX1H4IT6^hCs)v8s)BhY_2T~Bj*U2S1d1tHV&(df2$MP$?<-AjNt46EJ)ruq3u$JZ0*+Wc0nsxf>X>>y#v z?APbjs!^ki_PxXU#9Xm?P1(3A{R8yBEIvM0!MVr@xU)Ri)*Zd+O|9|WsFIFws*<)C z?YJr!5StJga)gfPkCC)Y*Z&|fr$OY$!NoPs_ig8x)mB+z<>5Kp^)-E?A+|`9I3m*# zaDb8``BX?&{MzWZU-OEapDqZJQD-}u#K>Wx_(YVKNB!PyAIZSaF0kNeWVa*j9Xfqf zYTw~v{coEZ&t+4FBA;QaHz`j3G>hEOrN2Ahouu76E*71hJS`6Z^5`7pUscln z5#q708LUFzAq5Vmqe?zb(C;^f2%6lgD3r#=5iPDVZPPJ?`ImE0fZVe+*2FGa)`wfs3Q0D6{W=2%QlP zmxdfd*I6X$-BzzXl;Z-){$9%HZ82{N2LF7?VLTkJmuVvDv@BfY+wW3v9`K!3>(TL! z9Q1klar4uGwyU-;pX!~z*|Dnrs&%9ZA^INy^G3G)YYkRk%iv`f+Y55UGk?al@DHm_@(IenL7Y9-5eyuhwJ*7aur%TX0bc0`9^ zey5_QrqFK{DH|LrggH_AMgbVaXUe(>r8)`OD1QlpRCXhLai`@MDA+=>RIFNWqf%Ky zB9@GYjPIX+A=yb`GWg_r`ded(Xu5^t-t^mscNo3JbJQkzU@wmTeI<=Zgh&8kV`>sZ zvUhnh?eCp5jFz@1N{a&rToQ)5|SF%^Pp#^ z(P4u5-;JGRYc+j)G5EQp@J=icZ_ajRrdyrP!}P48p12b`&8SCT4G)(#d7XY=&hgp7 z>4c2vaf=`ZKjAJrfZF1==mx8-Q-ihi?!(<<#j^Hw!RGBrOat0HPYG^GN5cJX`T!9V z2>v)c_*{+#aY(g+{{|mfcBh-`xRHqa_m2Y7)T51zfTuIQeljMUbnHpr`-F}6I@Kr) zq^d3Zq!tI=>uklLsYy3})x$-*zg!1$-mP)(h-KGX4<4aEDuowA91<5MK3KAhZt`BxaXP>?tijegcZbB|Oiad*U zrR)+rKeHDX_#Uz$DJm^Zei-5F8xpNp0x{y?2{ff&CI(Vu6#l{K=sc2w&(*p2&PaHF7JO>9fH&Z zJQ;+WUK}hCUqL!3cR4^^#rPtw^Nb61_&%aBv!|V5g`h?(#$?K0s>|( z!=^p0(gPmhWoYlg*r$(=%Ic39G96j69feV(s0o2kq%VliRcg&-=ptW!nlK!?pPc|3 zMkdm&Q6i0`Fc&CXT%>c^D>az}*P>vd*2cJ;pV^6%#}m;^h+v0r}K z04ejxo;30s(WpJ~AhetsX4Q?A$YI%22Bp(YbV9qUEKYKQo*U?T*)+Ji{a(qxxgjIRYiI?ffdixybM8K91Jo z*zD#a~d@Gv>2&xC8{dndu?!52}`(^IFvFtb67-DRsG`mz8& z0gpu9K0qvSdXFyTRt_Eo5l442hY7yYf8G}H#Ru6>J=WzNhCq5o< z>h(yNju=P*a|vNX#ec^M#GJuEu;_>f{-JTzx44v4ADxz85+$mgyg`(Vf_g=7NuJ6k zUX5{Z3X{|{nusH0(O&#YDrgdK^}6K&{&Tyu{d`Qk?cB{`cOFZ{?tyEyv|;72GKERECYMVM zatmtpM0b&@P`z!9+i02btdBGH5>057Ry=(I4Uwk9o|I4hb~Ifv>_IKD85?<#fThTW z#fAQlD84%LS=dHuR^KGqqgSr*PpGY)=d;!nvhfPi$&Ad8363DKFZuxjfy2ee?zd;g zt@GMCPC|Bizp6>jn9t?=lUf@#K_J`Pw>q%dk~`;iBtK11D*mH!_q&jg5R=u8Wlxvi zoyW$?)kf6vS?{C-k@3C&`gr5{6Rl%@hraGRQ;|>O-`(o+F2K||-?A?D)W5Om89=Wl z<`b_Uu2#QsC^_kqkUW>}vO!e_UHvIEHesLxViOWQ@U1=WS-)GA9v*|436PBy+C1f# zZBT#@eJ?bk5r%IRY*w1$H4UT~0?2k>)HjxhFa25Nbqsf_F&(kdgP8%OsQqp-vG8qN zg$rzP8`!aMy#KMN7wkCFoD=QARPona=$tP354H@||8M5paY_QP7 zYoZ6CcTY$~`{73MG{LfMbo1?7S_DHP$aX-0pYUPe60%vL-fkm*ujXgOnseNh#hM?Ao*RD}%Ipwt2F)uj;$Ho)w?#2$(WrIS6jL2Do!G1{qQ7j3SON zfoKzG>~%LDi6GBIx!MY??d7I>`RQd%{x zKLQoGzYqd^R)YbW{F)(0CzcMOzI~43J&FyaavBzUFV-=#m%)$D?PU;0>UV?J;IJB& z+{qfsdbj#R#zpXQ>*r7gW0m`Y>EKh}6vWV-8955dZfuH!>PZjm7=vb3`H>&+Ea|$Q z)}RHSLXFXGGkHZ|=7)(B_I!}xAF9ES?~#H>B{6i1yDcp6*G5aG_M?ecpG^!G*-FIn!3>GAn77UlbOnp-w!={Wi{&BwnTVe#0(x17GCy9 zcMd*P-3h>FVm)=zgQ?%sW_K#H`!Cn2$|-Nv$6~dm_A@2htj5*7$31dpaBpfyojLBlY0N=X@NE)9*!;zPV(ziDnuQx>=F;q=F>Ler{LQ@Tn zqPMo{imk{m>pg(|EA+;e7QqFcswUh_=T_jylLr|6=R0CxPIq)UETGHqRCRUxR=@=Z z_)wgiNy-j9XOThsSjWYvSO5m|ozbcFQT{d>t>q(1Wtr*+T;|fjm8f1rMvllgS^F9( zOu!f1(gOMbL-<6eY9_gyY9UIZzFS}edyqch(6DR~?ylhfm>fFGmcpHYn-m8Ry1U@1T;;I9rE3&)?YSM zaf=s979J@5R>^||qKELTGEuJqM69uhnl&h7g5ccqwr^$D}NEJgM zBR)25Bo>(^~((SNTWHNa!B^w=-4G?6*K(m%3kmPhB}kBe6^(O^Nky z;Yhb8288f#%EbANzg!p7Cx%wQ;69kNz*mSmlVwDzy!fc^SEXhBWs0!e&uku*0u2(U z3L4Iv!$FCRri*|r*epvqj>~Rb!=y+I^;-Fccd|v{l*LCedg*{DYiPOek3Zg#)}g=5 z#nB6Ip1AGt*~wZe2$ELPH@rWfp8H~AV0CqoSZ~~V6wIxSkN|5+#~ktw6FnNo0eMR( z%H|@l?}s=Y1KsEK@pIxHsafkcSjkZZ8wSObD_K!aU^jRZe7s*o1i54=giw&^5cEeS zcXbEfC8U)Y1c^IvarcM9&J&&D-)hKT$a=7 zJ%BZSlm+dn7=yRr&2!NU-h99sUuhQ}n;?UK$TTahkRM1Oe1ji2<<4=2teuuW;{i#6 z_B)@0@Wt^HhkB_hW@2K0y@^ci*WKB2Ex32hfq_51oFDWg_P58bRW9FBh%Ps(|2CC2^9K;L=|J6s&M2c9H) zVw&Usebjs*$vh-z#jRK4RJx*j)m; z5f&HU`t2Ma8;hHhUJf_NCgPa5-2h?XU-TpqK-wKu((q83@Zczly;J**i<>fZ9W5$( zTJ042CHUHOPbC&$V!z$@D0}m0C_|=(rgrmJ+U?=)NiQDAthD-OT9KB9cSydfAmS&h zW>q^s7?j2F&N;?UnZY_))cF|o5I2!;Y_B5z!Jnd2e&xtDU@?vWVi%dVseRe`)KBR! z(EMQKfWtY6kzV*Ah-_oE5O?1=rWz+9+wp4xWE&Q8O)}HTqFIya;%-@fO)_2GdzW>1 zq$vW=gkMisob<83aMY=Ru0l1o$fSW+H2U5A@ua)n0|$4k>2TtGDlqV*?=!LF2_LZ> z!F`B_kLbBfyoUn8pZw53wjctp0NUa9Ui>S*F1BHOd%BYsbCnsdxGdgdk^iw=t~lfX zHK8>-Z%<6wFTOP&3Bf?D^?iT=o}=}E#NaLVOp!tZX6vrD?3qUSTp&R$z?O2GY#zgE z@dzNA6nToWZzSOo9{hrQ`=!G^H#L(~y5_D)(2 zC~P{qqC`MELHIY(LVPOFUVD`10jBE<-mBm7=LId}8=QlJf=W&uB?PB_w!o^tVzXNE zDpC{5iQHrEVo60d;x(V@=G%_VW2CAQYNf`ke);Lc;dS}DjGKk6tddW8eZU7qD|3ZU*foB z=Fw!_4s8N0)w6Zu^&PpT=y!4P@!_l%RE@-P$r6UA=Va>+!BlP`nzVbo+cyZ z+gwI~XAm%1G1Gt9>Tzr|n!=5$@18)|IQp$L5D^(o_eY&f(tz>JS$~SBP0$CH{>u+; zuZ@U%y$|MV6r@jPL+6)e+7Ja5)PZsYRJ<$2zp+7nIe1zz@D zjwF{GPyZkxq5ztXJ?bmpWCu6@vtjqEx;^qb>sg}S@c3Vfqc6#U`ifpy1T&G#lD*Py zYl!-f<>&|ho)8X|@tx%Lca}PKLV|)D**8OmzN7o6us2tF@p~M11ds>wLVhJf2BU9O zNy{7Fvpa>qZbNTPtTL;lQK(aNl?TOZ1e9;Nrm#{%@z(HKAfWBkb*$dp_;Q3$bNL$a{&nyYyZ8tG7BSp2`+zZbFQ9?n3 zy0m$Z#}!=Z{<@jsx96fTK{^uvTBG2Mf_j_D&nY@Bo1qW5%5)(BDI06;gaAlTxlLe{ zm}QwREMUkzw>lCD1h3a695HASSb)7Sv%lT0&{#5QK{L0rI z-ujVWuiBOr81ZRQlMnm`{kDvcGEUs z`c+s?*lYSUe4T<-81zv6lK|4l@jga~C7xQww{yz^;Md_p${U!X=N@J$F!;})Iu$oP zzSxcy*GMQFXX)5~azCC5k|^%+fIcuk`oB(sT_qV%TK-`M3y2Nh-Ckr=z!C;mGzM#K zGTK-8a4pPU1?8?8g&>8$;>{MpA|NGY@dy0xEz$tMEzjx^5ivNb3Cq{{Pt`-YbjQ?`>WK-L9X{VJP=lo;iFyG}h&5y;3Gk7=dA&WN z@%#4T=mCjc|5k31ua}bigKJNDRoq8aRXmIF@A*Q$)z0R+5_&>T?S_Dz?$=wFKNl%% zceqK60S7Sk&U@!<&B9epcEB`_8I6cFgjNb`qaJdr2ma~0C#mo`71QHrNwUfyt6mnG zf)#Mg?XV&nae1useY`<9r4JK`v;A9TEGa|^s(-imBc2yGv$}-P*VXtVgZTamh^DqQ z@f#H!CBKtg=c;{}z>~0Zbe1yX zuW8CE4S53UFLmu$awR4|OepF>`7GzrmZIWRqG}-N@aAD3KBPxiJQGQ==tR-! z+pxU%c<@;mk(xSh10i6y{3OF6Oee=lKIUFPL)iZ17XHwb?2aI+i0)3YhLf!>RIjMEf1=*{;q!RVgvV_LYX}DV&;;J8?zykpw zgVd+K7KsoGhlvgNEHbbRh}<2Vzv8iG0vjsb71{&KhoHLO{8B*#P1kZ_X3M7 zN-?X&M&ocbAb<`i5nn<_j$C#pa0q9MZ6IDJ-E*(Cyb`{Cr72qyi7Y8WZ{Z(H?<@gU{(tkIee>gAO=xU+K}79+!p z0$Z#&VY>s3QbxJg(fz}Rjg(j>yBoBMF!!Zc;K^&@74L?-v~V091I=J}`_}7S!u$8` z1uj_&qu-FWCYceRKUYX^<)dSx)c)qMD7Tuz zAoS019{PXxIDbJQJl`(7-&h*<4Ea|N>@4zBX8xj={k{fPeOT%f7fr-QUFO2^`;TUY zc1$Gx;IfOsb{C0LyFt4o-~_8wZ!B`NrC*`*6SO#q<>a^2G+d%q+)*N(9T7*a_g9fk zAGY$-l2J0|)sIUSEV6l1e%M^rMe|<`P|ka&7iH?DMTH!Y0^?o= zCc}<*qN1YWJP*qzews#;*{jJ#TD4{*LuF8e>5l{%1k*ED<~-NJe%W0Bv{>(Q+$nh zY7#qWz;{0NmxEH=QhU}CnU9W3EzZP-*muaK%vKsfSy;dKq@DC%#-#Fz+?xn3U47Ik z{@F(1_R=N%X#)-$Q3jnjdO7rD;A?3qKC)6hP|zzoE-n2CUE8~VE^)T|>g!VhfE($W z?6f<09J?{4o`p4Ydjlg;#%#l_B-RitVB&MLQg%T<-S_4u>BQZ~E7*G2@q z_8`;79In`j2&h=tJ>{PUKY3EHTxvMmY-6n$=m{`6_2?Ko{0W;p-+hzDmLEEW5 zR_E^XzxK_tK489eKM3(LC}*d9b$!)oII!OPl2xc!;f|`{Tj=LcdX|26hP#Z-?dp@KS`OJCeby#B ze3}(!zCw1W>Mg{#3SeYL-ggBs>*oWb8PK|_h?m*mL)Vl@)Zw&0tEZifSrjqk?eT$4 z^pPl&dwyQ&Oa)t8W_?c&nrS`MdD_z-AD1hft5V&oW&j*&f!F3A7z+4pQUhj(07BYe zB%g{z2H-_U&#;sd18IN?Evjoi3+Vt3A-zP)knVyX-TFv_Z}F<(m=M|v51B3nU}za> z$Pup^ewx3C2nv77{xz8J3oNnR7sD6e{C=)02q0Ulc~r*NOWTugmGq>2E1GT`>${w* z6fl~vR?)ozs`OkN2=88=C;@gayc^pDERMphW*GD)78Y+VW~@qd>%%nL%UyOgVJ&8# zGkBmc3D|VWe*IEO*l1p1U@0F(g91Idgxd)?#Xb~WY9o@-+Xy-rTU64(jz1|6dfF?l zPte$!ZoG*4jEj5q_~mgZG1&94{&8qrU)K=$6yhY&$q{KHi5kQ^8~{Ai4i z+9VAWV9{%@BoQ@%noO^IjUBZK4mf5!BT+=;1%?)%ah^ENU#x_+#r+|1@|&-21k^=7 zYZE^kynLrrKOpFJmgjl;RhQ7k zOL63=KoYYb=_)MigxXuK3*`dxJdPQsIkMKFT83=mH74{SYyL7QM_F8%d}?AA z8{VrO9P1r-)}pdE0C>~Iw>mae#Apud4wlL_9}S>j|6ja)bySt>_O2*MH;4$*A)p{2 zol19uw1S9qgRm&+PU#Y*kwtfjfPkcQcS^^i*W$j*z0Vo<{>J(Jx9=Ha55^wf{`OG5 zcfK>8`8;#p=d*prYyHw}^8{=h2?c+erD=oy;lfad;8OvI+)Few7g)rR^NAM=$r;#bjV+ z+;TB(yp3W^Fkw}7Z)`kEqqxRfR#V@2O z0D6*Nqbm=8Y^%`_j~uNj!h|8tz6?0$1W9wfkZ+VYqP(t zp#IoncohsD38_v>-eyOFYIOWTvA(3>j%z=yQ@Cx5e8eOk6z_`*9`TCf2=AO6Nfa2A zM$FLfy>z%mLAK}2Kj1Xj9AciiRSY8sHL)7`)fwcnkrleY9WwTTWMu)euLM}s{#tAm zxyzTMg-&XDitZgheu!%)fzNv5jdrx&AIxp8Z1T-F(_>EYS7u6`Qa`@nty`ROV5h2P z<`jb{;@qb|_t?8+X1#yAs=D{F^xAb_3U*cSgExH@W@08stI=BDr*wcj8i+>0OwIxw z>j>r!7`n%}^llAl0to|){=R`D>I@mx@I4M18Un4f#OK9i+aabrDLvQ4`|~&qz4U1V zrpM_bF^>27&M=cfhx7Z%alBzc4I;Im#=C!ZY@D8LUFNJd9YU;e$!rEnZ2tOflgsSa z#w<{ZATE9MIG=pH)DfkvR5xgs$@t=$Jm-!z-tD8vr)_O*>$2ktl@Za=5{{13{?9o+ zvW9!-_J%2H?ZMQ`uhmKBMZ6hi-o8U#zwiI>^*ev6(q@2xp?MKF-;4=x*`>Y6NeIO* zrO1)249>&LJlD5N-|Le_;gt}No}V*oy*dr~yYVV{??{(y*pl>h43dW3E}5i?Kna7P z>lI(n(V|gYm+8$8FLvpKb%t-4{``28msN4)aYZ%r{UI4p;I&d%*Oj zVKcRMmTm@9b?*=FmQ+jwr1#)*`;PNf%2GfhP`dkL>y#SZBR!?&bDkz<@(i^cv$Qm& z5^_cLznd!}`Fc~z)AMQAwrI<@^h{%9ihLMQ=Y8`f;nzt|>T=7si`+QUiJR+PN^KQ6 z$V>KM40he>`&87wBrPa6N;ext7<<{wZB2eb^l8_g+U^lQZLBlejy-yJXbt4$1HgS3 zm6Y69gISw@WJ#w7P<owpe(gr%!%hg|3PZ-(r$A^TZpC5Md=w-DBpP6>YkUfC|uuCI9%5?FY zQKGB4Yfe>-NoT3v1US$z;Y%T$tEJ$v?i6maz1jLm+c_$SNj~`M-(h+80PF+lgl$g@ ztLU~!|6L+?g_Cg*+=ip}3UKe$u>t3zD=pl2kiiQ}PHud^96BCk^8TpD{8#A$+jQ~~ zZ2;7envirV&bgG+bq95{WJXq~)pZvuf?7z*Yn7`S6b_T8 zb11nEQ@WAjaAmEbIxF1VPrKoGOJ~&hY@Kh+H`39*t5&)MLfBJMI4^GH8DI&O~G%3tTc;8)pRS zSAb&I`n3UcG#)(SL*YtCwMITdFT&>jFD01QVsMEFkIXt)z1#bo8?N*A~G zd|1{GL|Ay&m)O)e?Xl4XQh0 zZ2XO+z9Ih2&hGoZ_fj_tmU|KXlh1Ak{#=!cb{nG_Z~nUks>WEGQA-s`I89_Tk53mk zH}5crcB^R|g!RVK-&$H)66Gu1NssZYW9Q}l;n>3ynMR+l-Db4*+E?>rrV=}N|Lp<= zE+~X+6=&ZDBax{}Ds=MBU(oJ*1heX;E~Lm~{f>itOUQvqpPl55o;48K}3w zb{&!!Uk6|X6kT!jvA#&S(@hjzDYfd`<}YeX;<#3Wk1q~dyvWfKPj{)i7TfGzUiSQ+ z8E*}7)-kwQ^p~rW&Wgwwr$9#w0XG$2J2>WXwh0${mZPR=u` zdx@vm7hn#r$n{aL7@gv;&y1+s^%%IgW9wpGmv7X7kUlU_P3ViXm+#wtJ-wirve(t` zzrA`)M1&|j(199AsTsntyTfiA=(_YoSRLy-Y`OaYUbFi?6LYYK|el?+N+8iVdA{-fZrpmt`@h zy*ezrY0r&7t>kmH8~v-`msqQKV!}kN-2xpI&Z8hfm){mdt?525UI=G;)o|O+HN-}> zmzAnxog=Q+rXS?vvx=D}amcKIskPPw=Po(oZa%-#V#n+b;ZnO*f#0^FgZ~j@~k}$`?t_<2k}-#>~O8 zfK%3ZHj{;h-Uy$py?NR=Meww^&M04kQF!P}V!t&dHP!5UqHXgX*V!HhbFEH;cttje zRFW`mnx42I*HYdWotGAN3{I!F&GDs7d+PcJTnB5sKV*Nn&tFnG3`5L%;}wf8WRt8| zW%zfr@6fijweXf$XQ-&F$DKN%y8p6V-VhkE8q@IP=OlHDi~IE!a*A*>17V)Z`kw_w zlCM>@<-GQrZ?N2(W7|_?v(MY{yI!>iX^~&nvq5`Na+?qr8RXJg>`27qn|CoCeUZOmD~IR1M6BkhEK=!+sjP5w0p~I_ueo; zdw(AjGb_t%4X-)p6Hrq}>lkf4DH_T8wY#zxdVPWYZrkXSIBl>*PcT}ZB6&2ZzMLlq zvy{~h&hp3B4ZISNEj))mYrvGqWmUx#>tuYbxPZl!G5`+$Tb0wm*-k-@#JkzdfR zE~t-aeK$4u%2)l332)rYG{j_<$4~knLD7RyKi%9==|Bl~*~Z4#{4@;QwoM0LQQXe8 zl3G4|eIq<;<)n9J-)TN$Z_*_-cbv(umhUAceBf-8t5BL!LoCB`gCpnYBzws zXJ>>z_YVRi`vmD8J?gG-bllM@_Wl8u3|eS4KVe49r-;HKA0O*J0KDeSUn^=!Iqox@ zm6!b74kGYN&{>pT`O!~E7|Y!(*T-R0>~YhE{zY=__&D+O>vU|w#MP-6CTM1~Fyp(6 z)bmzwA?h5yIP6X*e3ftexB`OJS_P*CQC;xgC%N8i`9*shi@e2rfx8C>y>PPe5|uZc zb;%JM@cDOTShKgd#{!0P=y=TTPjXPi#j8KH8e;93Z{AKi=5ri=3q%$BFUk8ezZM28 zE&kB>Jbg(m&pAH}`V4%fQ5TN!_^2Xc_4IT{K`e|}K#Wm(mqWtDI!@g{+r<@@omgt5 zAJjN8N=-0WwUJhduN+^xqMTg%}G`;$;#~Z z9}MH(KU+i{mbb^m5YR^HyZ``${noHlx~P{raZROD>QPcB$ec#sIOvvaUtd&$SQ*{> zy}@Shi0qeHul7(;lLF@z*z@BW=(1h+J&@jEP2M>V{?)XviX&yBKn)#C%ahjpxKV_* zK9E61%Bm?nkS@UU8h#10diy=Oq+Z=%PQ7h{I`Qr7q(YzaTte1mJ!e>8AZ*1o8kmjwI|zaNHHn;A7TR8HJ@+~`j#Ap<@SoI=nG z>95Tjbf1mzAKtrv@#9&2B-ELD+M<(Clj^Q)r;fSGio#)K=*}!MDE5N0{U;mx!VD;Y zW`>Uh9#J-%7%=ZEB69={kb^#u8nQUQW4YY$bYkBivTY}u?eR0GjY0o;;+dCG^((?C z7c}2~l%6hkua6}D&Kr=a%;!M?Q}JO`rG89vqaCAnwpC2pbs9_ZZ7#jSw)2c%l%|Q> zzbCl7+2=&Pd-u^s&A}WjL~wly-R_vQH!y#}?o0YjqTQLvVA#q}r_`>ytIa_fvn z>d;Wk7M^LrwbaAk>K*e3C4+$Mnj3vfNJwrmdt+Y)hTbbQ zb(LjXVR_+ZSMt0McJA|_(m@3=H?;kwraM8*V5_L7hvobzmr*uKMLX=cdf<6m8wMuc zVYP+Xw&$vs+*g2=&X4n0e6T+h_x;WLYJqyv?JCbQSs(s~!tM9pv7m;I!{G>a46MY8 z`s45Jtm=pGg6cX)CM8)cTwICZY+SYLmKXA4Gq^^08hFfOKDu*f8__kiqM`#IoemOn z`Wc`T`$JI4X{N*0ONybTm=L-WO@B58Z zI)n3$N0xV8h!=vWDaLWkEbnPi*`NJ1ofNdoa!fBp>%NNpChpQRJ;7hcodv-8UbyV9 zA=PWm*1}7^pAA=3akl`U868xXXSbY<;!tsr1d|mx zuxtWSqE?_u_56ps!^0N_#upc^lndU^=^s!Mg}oIi`r@%&0g(Um2*xn1dZ)Xk($;;3 zWS`07l)pU&f|s#zba=#MT4v$xi~)YO7FW+^7Wr8n2DTtq3&ki|inynrvPKpJ^3zJ37`)!Ws#H6M!Q^ybZ z>845<*d26uy&J7hCHes0Rt~Y;hI}-M*rZHkyU#<#9{rgV`?KuSH3lC3kN$$0I_KYl z%5X6D(@lolfxPqqy?NWy6ZrlYg4U0)*rKO^wXnZ9;Zq=ea&Z zuvV(7_iz9;XbrQC*sezMgGqhHR%Z5Ktnr{pKUu5!yR+ILN)!|feC)GY3kuPNfPnW5 zjEoj5rz~oB@BB<8zOux>vs3SW=MMgx8;Tdm@9HoAUot|qTFZi)ms*`zxrO`nNa%EUiH|6RKG3ui8&AxF@<0BRN-dik$1p8m5Db!U2{Ns^?t9* z)(7jON^|ma==hBU5hI6A@Nv6z?g3hw7ap3WMp?k2ycp%N3JCc4G_yJq2bvH`u97X z(sFV_*4}JXo***c5=>Bpp-Vjw_0A&d^RkLE9Z7GN=iGC(ozz;L>Zun76ATag4-!Ce zJ}}qoAtekn6)*2f#J@XLd@{QLjng;%Id~Bz6o{fg-c_7ux>;YjQ%>7skwqEYTi8v%@lL9J zr>seeA3kPjNSG}!SBm5P3NB}TZdp6ybd-SdJ@8eAf2o&)m4Jr2dWVQ$?gwX!kj?jDA+dy?eg`{!a8FH5+dYb$P%mY%a196<> zY28sDB`u-@^*|eVd*e2t%XNdambRb`Umc44*yGR7qS_QpelOjNm=4*joQO&NZNy`& zAz@GNZbvpjA*TBfCZw&Ex-`$jEXjxvk>|8^xrb-lDi=*hXP12Rlw$q*!yPMSmM(XF z0HN9G6Pg}j_L4>zQP*-y-d7ovmEX}Boa->|f-X-7>W4HwjIL`8CGZKQf2@P?-7%O> z2l14oRPVmjPf~Gv*;gI~GsgP{(VY|R+H?Z33mgoJEMFBIGSVQZadB~=04T+EG#;2u z*3EAj_C1P!Mu3obSy42{CE^oeq)WbD(VliOnXlNdjzbHZf@pcLE4)iC_ZATx>HaJhf+dSU{ zeEUWd5EOLTZ7s|f+0g;${!H&Mg^hCf=LSN`xmFv z)RD$ayON%s>C}A_=PVwTU~Hxg?&!9vl*KSahq`OM^o+H$Zl>PynCl(fPcLxp#EOZD z!PN3bYAZMoF7Gf`rLdv79N4=dQLBcZqHQ#h1B)$oU=#dNcyXzaA2al{JL{p7Q?$6J zr*K?+eAV-y#NvRxPK6M!YDhawhkfNHiprUTknGDJ^%ZcP|745yKVQiFPS|Be@n^Lp zOZo5aMUM!*VY5n}e2$TUmgQKnn}Q7~{MxbUkC(ig<5_)~7;_f4C$#C;p(I3RxUROW zs7+c1`2CDUB`Vs*HJL!ITWVw&R5xRhV5h}jH&W|fI$bcA_qkW1@BRt5iQSJz%6(ig zFQEA~X$5aE!1UiGO!!OmF$#7qD1AHoGNGvo()U4TA}K(+Z$MXVQO~r*In&Aaepd?bE56gJjKo9;M4e1o~^slN6n>GK02Fp?Z42ElijGs-;hs(7Uq7Ao$Q! zvkMD1{0qD_weDGZA4Gm3%ODJvM*{>sRS$I26jWc*=(WD2Cf!czo5rpexIE0*D7U?2 zXXs$47l^Y9LPtZRXJF{yV@`^!lvh@k2G`oPGcJ4SrD|(AwvLKaZARAxrJXx<=@zB5)T!}eeCz8} zAAr;Dm2k4bmNrowyXx?@Jaa5(>3LUe6AuZkBm84bSpN-Unx}!*QqSHe{hgr7425Z(ryZEsWFm| z$B&JVhzZQiTZV!nDUVtZ?$UGqgun4DZUo+3O^?j4Qoi;pKq1GO)y;XAms9>pWtKnj zI_8#V__*{h4h}F1TAnafa>5Ti|9I+=qDy=>V{*gr!g=C6lZ6uf`(8xR;-4dNIKoaXm?tfVZ%JT8mH{a6M&2aKZOCIVxEy` zZTz4y9a*Y~A-0(nsT1U}&RzcIGY;#UEdoFuTavF^=mbR*?Xzd2`|BB`#8IJf+D(eA>&A7yH<_Y&_stF5kmU^5=YN(l7)`m}pN0>lXxLgsni6l`H~?Wd z4Lkciz!_;P(o*2`LGU$j!vPIYFTw_0*I|3LXJxW_o6mZ_iE09BSib8AW&!l2RLWXq z@!AlLms~MQP;Up(N+9rYDR?C`N6~P|k3Pd#@=uF(N+a9+Z~Q+?g@;=Ozbmbb3Tp@n zB^K*w^Is=>`=YaW0Bq_K(Ja!`T5&kpd>%p0ZIGlCjF<5%PYE69aqkH@tq+f10{|GT z)9X(ba?7(#|3cjC0h0;8SE5#DPbeuP6Ut*4y5Ewv@(9)J=f^Hz6fl(K$SWkIyHKk+ zoV4}2#dVi8k=-z)Gn|Y+;;akE_TUa<0_Mi_NDu~h*`pANcfNPZWsLLIC^}#Hs}d;@ zto!PDxq9^^77XkTENC21iu5I!P`O5sjr-J70PT@woA>-o8n`XOw3|0mrt)lb#U+E+ z6N4eG+ZX8fSf4!#I`vN#Olakh+ja!RqYORQtbNTgXQNSD9W_X}EXauy2Xqj%KCj>` z*X27Hdq!&uSFB|cFJ2ImkmL}3Z*c`-N#MfHtsZRy<698ZlPV$pYWdT``(CRA+J=!e zPJufXe>4F4|7^8({_Qnras=)OYZKSQ@|?EEm6Uu%ogbN!b_HVV&?XU+R_7_E#5@;I z{0u{KMtczl?ZS#_yt*7{C5C?#d_$<)0~uqi6|Vb;$C*9|(z!$PzH>_Kdz;vo4Ihjg zm)p?wO*e;P&om?Bm(QIBUCPsz^z9-gM)L5dMVKj~9}3a^fT(rD2e4I5yOhP@mr7g_ z^$sf!JP+CmaCR$RcZ9eNRhUsUZVXe{%{N8G(g#}#3Uozmp*k6I2ap4#Faa>|{*?Y0b; zd^_9SA~Ni|m61)C%ue2lW`1V$ljzZz$jiIRQ~!9IO#PuJH$1X^u*v%QLUHQe*2%rF1`*Xee&& z-I@WA&$J!qGCzO zdF-Dqo0nI-28su>?PSP6tCacAy3GG9@q0|?Y+~Y95H?3l#*qm+<6Fy@bB%6gW(uhR z>_XjPGiEkb00sFSkE;8-kCKqk*7&O4{pD@{6?aZCGZdd%^Z{U{SDqv#^m7;q2h&M~ zh2F^MO-EA-_)Zu>+8lS@=EOXmuKfJIg&~i+Q1k|fvqWXHltvAU} zmlQOMwMQD{id)#&YNon|TXTz!E$k+V zgJzzc2<2i?NA;cR-9iysc1#DiWkQG$N_p+F{gowHQXlanusunt@fiPJ8iqZw|wO1S${#oz|8>h5KG9xkKBj2(>B0*z3(3uVzm`JS6!GSowLxmgoem;Y7=BH~qQ z{IAh!t|R)hT3-~ew}KVGhX>#E6Az$U4eYq!I_ojyk# zo1k#>(t}cnjGAykg}CDCsEG=h_s0O`-EE0ssyt<7SqBHyAOQs$etyYo3NSZ~X5dFz zme*|u2M1X>x#CMPs64Ll%Yrg^6rLWRVSU66rO4tZnenVYo?qji_58`__M-*(Ltk|j zZ0>?Zn(gO@rY`2kfc&7A{laYb`e3Q_)k4d&Hbq_}nL(7J=_!U`qbR*w;Q73frgLld zu6b}Bm^pw0boK>?b`#JAKt7kK^4b>5LyGKAW3hAr6^QkI zH14;2@I%AaWzCu)p1>xHBJn$?vr(3lliQpucMrPox`AV@_NS>>C7A*S46Ow9Ob|zF zdcM|HZ9?zHfML;^^J@2VwdUqlM(eHN$kZWriZ^@B9xg$iWo@oj^GyjaE6*PHztyK! zSiZXt<`vw36ig4Ao=E8Soq#3fgL>$`qe^>OXN?oCO}SMdSi`d<)rio+qI z$N22J%8o#HqHfUYynNRi@hv=6{b1|-{pzBTw`X5bCzhRd_obRc$ZeS z>sGdWb6eZ`~`+j=kVSWq0*L1>ciCIr=CYWjh}~gvGl4fw^a@B zF)`bgJ1L%#tF{a`ns(hI&*QS2AAF?x>C-rS@n)~(Wr|IOeB;+~b{}^*;)=f9{a8@= zZ*ufKLAN~|+CG(uQ)K|HE-RV~_2oX?}R5_g@k~vL9Ppk5X! z+db3e4hOs|A;GPPI-^n}`lhq_{OPCaWk!{6>ua(eE??F;#H-~idu_^1Z#n7LJEqk% znhz>R+~<^?;rL6+)C(-v(;&f`?rlvz^2$RW<2ORT_Vb`h0Yt@vR-o3rPucU~1=Woes8dYo~}SVzpXZ z37ebQdhXA3KKM4q`}iqV9-TK}JeAPLS=f(*Y5-ET)O=2l7W+eNyXPRnAdJDGyvvZN z9ya`4+Ra0uGi}7jL!v+Zu`mZ6UckaJ1NgLr@b-PNS@oV_I47h;5MHY zSm>Eme48aetX-jbp;`X=kv=qiO5S>hg`#HNi#QOAG%=R`$4@gCc0O$I7ndy_?{DJR z$#tGh4t4A#)kzTu#-sk&)lhX@@zF(XYuN0~<*0yq)jPGb7oe2kItZPwfSfJtaSg@_ z7D4@~sOf-8`{1!_G)S@Km{d||WTL4YlGfhlIOcvQq9RO8)3VXccgzhVA}41ABmH7? zR^RB?^hgtQm)<7*nUn;^J#>HYM`vVbXK&>ivPHEfe5BD(MZ%&k8AT~Tmp_M_49iN7PZd-V36Dk<#gecg;W&?VVTw^*`@F5fCVSuby-Cz;4wOGQ^|>X6z+@A zm%HiDxzQf>;pU|A72iXnqo>PpT>BaK@#7=N>0U29=p``H!wU1}=tV37x zTch5@$hFz}%D2KydA?tb;PvFj)s~@sDcp10m(cGl8tbyV(@DivIZnf$rRa6a2M3dk zjc9l*4Zb2oLleNf6g~Dv4;UE{n6D`0dd<=dapUR@3=Hf^&EL zL6NQ?2?=9KU*mf0d+T=F6VV5$w{)l84=!{c_x!lGCoPgt$z;iXDk`p1vdsfrYY z;2M!iy% z#jaX>)?7At$)>?pCasdE`KS{Vyw;E5mpEK}Z3dA&a`y1cZM%=LD$x!lto4{s4#Vpx z%+}nToNn;RlorKhyx9=vgkMMnti`N)aYI!GOakts% zEHk=5b?&u}T9g2bfk9w^|G{6D*iRn(w;Q~!Xa(D9vz12t^w@iMynGHPDmTRI_gm2W z5W@benWP8T16`E{(c1*IR^6<1-dDT*tD(%&$(@7g0#D8s7CNenF7bM9+F%pL#*Bc} zrn%tej_=UOTKT4@QN2i$TmXa`;iGOi)PzO9rU}eLk#2#XDqO%Os#G%|ZwaE_FxNqg z4eK4X+%MJwnZ9O~lV6?>r15EJe|=BC^7V1P%VL=~Jjn54P3NbIn9WCE8-jC#;q%{y zbN3a1G@-91PYGf)4mHeJbJeA3zSRahp;Dw>ltgK~&DBO?V0XSb=6GQ<=H$%pY!9Ez ze5otAV|wPn*qapNN=#9Vjps&4KSdzBBG2>TX5~tyce(AR|A*dVzgxF4z;#9t5Bbze zOG?TIU{w&C49*>jx$Q+<>WkUT)$16sxo8I86~KgUV>fF7yX|O2^ckXA8}vfA9gA57 z=;}nz3uA{rA$$}moZnrK7RDD%bj2|}&CH;hfXP)mAI^M+Si9j0;6ed(C1aHSROtk9 zb+S!&b6q(zE1b%Gg-iS*OVH~xNN$3m{l!#-qmdENyJCw(Dkgylrcn(7^4IFb9DVGD#YPnYZXslE$hqk_mI1)o2(HZXUOa?8s7n zCiZ|64NHDotyVTPOvfhf;&i)0CsVH z4@1L!M)skJj@|C)lzz}xG-v3`R!kLqwD0t*D-;i8-$d=In*j;IYCAo8#e8;FQW=;; zUz8e!0p&<;RfMgp>YeV*(9>s;KaLh1o`uG&nebq2 zrpT3CrBGVh=P9SR-H`p}6#=1{71W7JrA|K#ROA);Bqpgj^!zsJ#6#z@0?Q#R;18xH zB*182ar?Os=#i}G8OWD}7&N=vkS}>26nLnDm(CVmr|^HOc~+!@XmTrCoHOKd*;){- z9Vs{Eb3b3I+V>5@*~<6$fuNl`s>g-q&xCXZAk>|9CYOpPjyQzn8EM*JQ`1 z+Txs%iOJ`!5dDdRb)wui!)7jH$qoEH>z_)^PV^B}st+l4LRGN8nLBBUu4%1N z5*lO)u91@B+{ib3*o9gI7o(ruiothU)s>qYRMGpL9vHNqPJ3`! zuJRwZj~&JaOD>d!g@wfhmX^iBe1_K(OM&Rc21ZX z{Uu5LjeuVmqNZcrqyc^bPw=JY7$JBY4oPNB4AnM{dwbK{5>XwhARF7>HR8e6&p+tY zU@-!Dv-oW(@7>zZ&ZB87ZejDZ%sPO%CqS*2J9O8D3L-?3Ag6Uui3KPvE$wr}`HBvy z_KiHOqovKcVKsH*G9r|PrdY?vmI+iM?3HhuS5wzuPM0Sa>c!eNBb7Sg_ad9v?l?sW{JK*CCY-2#FTX*3 z6`!I_@VGUSrOJmMNsaMHoF2mVH~X^?jK&<-Jzq(n9(!M1B(&kgdYBB_Q=U*v1JZz^#f>FT zu2Sw#OJQ^}pWn1yBXTUmZ{~E!Yts4RT@91;$L0y!U=ay}a`s+`*<6s0>Iy(kBdTmB z)PD$Hy@2{{Us?odvCZqehP`uI*$0E_V#3*r3XQb1MrTJMbzW@5!0vWu%s)M|8Sc|; zw7Y>;SWOH>D9Q49&AGrd?QoNRf+;0ytv7~gwnnbZAbjYAtr1{&gD=tM6H$=ugbI?2 zjhAX8i=-gAXzB_|6;J^Q#Bmfk51uIUB{#)_SSMr#ayF@Uyhs9GO7CLKuVz)xDcJm0 zPvu&A~yGPM9&v*`ab(y zAEp(_kYyE@=)uZK*bO>7RmK`zG_rad-k$CVsO~lHqi|2v?9Mq}pL0$;J~O{KBJ1`; zv&i5r(Y7PY*fPj!M&Y@&wO*j}qYJHHzstQnq9PUphQ+m`hGB-OU~b3>=1T8z4*{V|KTH(M+bq5kC+ zHgKa0t9By+u$94k^;7>}VP!SL_tK1#oY2B==koRbwTI2J;H^v14_5D2esAXnHJE=K zS-eb=iRz9iX>H(*&(A^{1P@(~|F+GHb4>7zE0+y5Z0G4zokMd31vjTon@e-7UNw6x zS!UMf%=@8^kl*3U8$Y6AsJ+kU4S5d(w;X#IlTvli<|nuyd>C_V8;${s9_&jmaX-)? z4=w;bkX1k%oUSSULsEP?aJtW(pB99zAWs)TFzibM90V0A`Ulp42oNe_1U^1vkOk&} z-t(a>`vDm95EDiU*nxEM%+U$aq8@{e#XyA;>$*$DeVoK${OEYS-;o0%iu^nBShd7# zZYDp5sBvh24R~yf_%O9k1$(=|Zb;=g(KaCqqQ2JE+cc>b#3eVm5N}Do`=HC^8TDrm z)7C)}T*^tq8tbVYff=1v39P2mRd0Gcb?Xks6n7i9@@~4h2Nj>LU6u@; z%|op6%m(ts&#G<)gtF>ecg|o3lDe=U3%KeBJoL@i2aBRb=r@rYl~*w@lNLQ>DWxC8 z4x;T~b))LrH;+dq{>mUAP7w&hUncLpSTsuBN7b%fQ$HyoWRf{c zm*3X)S}_u6xcYdUCWZaiy~n^rAvMuyF58};JT?sxVPx!_ZT%Ho5%BY#kG;g55pvgo z@7IM~4qd#uF0brVuO%lzAu*O5gNu}1%Nu2#JPsEew(u3J@eTAu1hW81V~3DUBW0DYOgxUsEg5Ye#pLf;n%3xO6W11qtoY>*f8|_ zyiTWEh92Y#zuNK&Y1xqZ!sNg6h1K)ALgQaYOA-5;s*o&QK2}yeTh9ZoLP(bsEw#af z4Vhd**}+qRrtN?o)#f5Q<60SIO8yK}8SOu=Ui{XFTUZ5|ll0%|6=OsU(58;tg-k$2 zEhV-H4Q9&i?X9fl&@!E}t8d}>BC&^Wb8X~W%vXli{tB5P=)d5tLkzF#s#i)sIll9& zW#!KD@30$B|84D<%r$IdV|GFQ)|FBluEoR5%*tzCW&1h1D(m|ne36I7FCc{T8eCgi zPkRf!Z^)||k@|4$4tR30NHZeY3f?&r;uPwZ0nz-=?~OtT4xeD%clXU~U7=$?53CR2 zT&fEi;VDfw2qDLlO~U$%b^RO=Vv)62%Ee`GCvb?}%voQRbXt#bq1#3a#_nslBijx0 z2YYiuO8fiZ!0G|QG8&M!-_jUJ!eKPLqcc%r+LaiWp8gR^)xtDgwe=`kIPO$ryikK# zsdT$R2X+awi*NDD`uIICJbBF6$14%5gF~^lurquiVK}!pnX1$Iov{$wag3I|VQ`zv z(>b2ar{{bNp6p=kL_(1khk6%W`*<$P3W3cLkvJ%NHN;~vV;)z9f_qj?kY zP}EcQboWZ;XHu&;d)=ovt3>zBSKTo!CpCytj5e3E=)3e7{MyKvUxqdTL$SD7eyHB$ z8A~adtZA(e$D6#~GCqQR&+6mE)uAm5hZZro;mShl`peO$KQ9*w79dK8+A_K{U-^_; zYrKX{w%{=yW*X=Kq(7U~W`n*|5hYOduGsp}_-&W= zc^I-5QiOrA6i4<~iF;mGPC3$%4}PyILmqw-J8Q4``;SK^AAg(W|MhQ4-|Sj*&7vFZNkZK zm}H`HPN_ltOCyL)&_ZgiP}gu*vt}We=vg-Py<{G%xTdQ;ebJ;|6>b1>KFACD@=y!Ixx9d=z?hddD;occu%XXl8=?khH-p`nRBqinbk z*j_qx_X7r0Ow2pXKzqaS_{1t)+YL1*3Da+T3y+)luW0|LlKh`P2yT5)#ep_X?$xbO zSxi8~KeFk}Ypo(?YweynLKensEFetD)HbgrBPG+wI@~8RcMl=_Xrw44_uaRSrYuBT)nX2Tv@6A&!! ziThUwVb+<6{n%FCodoVKCj@~J&F?CuH?tXH_Hz_dG|7zVZ$#jKo+A<^ zpdomCJM_@pFPGK3SjO483M*-T&7E3gD!90CPY=D1CF9|2;*8BScVJC#d4vg%n zweq>BZ0YJ>+l6ZC3hhEjcjnE*=;*(!UfxUk$vnH*k=9c zZsQ^y*hAK@-g|HSh*QNM{_R)3soZYJI_eJbftcuIkyB5fjo8}Ed>x=>X3Q0NHCf~- z^{cLKUfUBXO4<-$OXSyJ!Fym&yMXcXXmFx zBL?c*{`5;4Utc#60Ot)KNqv*M?TYrskFyRMEpbJ*AG5rMz46E%+HRoVa;_knRog=; z|8Tv?(E`mvbd`zcVN;vSTxCN}(Yxj%lM1Zj{V7LT*RKm~LcDt}XH<3%I>i2Ut9^sT z-%HkERmcL)9qF-dLR~_-+dS^%m9iigH))?EHQ`zAGTpHXf8PgRe^h7lAs|jCA|z*7y0oaHp-@*>XJp^| z+Ne!EXCF?Y<W-oq9G^@6Chf?q3yzbjo+L zE)yc{9vjg9`o;Nn#=~cO+>4nSab_q`oEcNMm8ZX&jXtArXwun#Z8LN8@q@p|lo38} z$R8N@fBA8)0Lq)zH&g7lF;X_Z_v+jW`JXv+h*Dzr*am~N1@&f^( zeefi`v&zoW^It_dISg{LA$%_NO|qORrh;Pw7cTP}`rfgn(bSk>y!EWF!56TCkOOGn zoTS{L0d&H-wmQi_OYF1+fl<~9(MHil6Z8(h_Ig9z|7COk;rAmjKtej(?^P*Kk19cd zcI|m$0V%*hLZRb}9*VK=wgiuje!aGPcrvgT!&-RMKGWLk0n4KL-cFyR-OTC;8MGR6 zt!N|_O48tw*Mb)ndogz~_pp=&dL(ev>SEp_X0WT=XI> zbHg3uTVrKC+U-_LVUi|o+rjPz(@;<>(2pbW z{5Oxav&qrmfrM>i1NKl-umF^P+u1$HpNoo|m%2J{R<7GMg%Z&>VZ96qcuusgjOjSR zTdOtsiNxZ!bNfbHebMSsf{pQBzL$>MN(wR8wO6c`cN|zmXTXar01+_($TzJmiS)5B zj$)0UJIc*U2|y}_t_qJbaX8VR!^_8~4m9?LHScg=Nl2JP&cd`gHht!dF3ZMJS#^=O zO(~K6?Y@~xk+N*j4^Tn2#(xPoseH&}azpKPDn^=Dz&iKbEf%EO;N9VAAL7$f)|Pn^GnxM!kt>|K>ieg9&~1DuS`zAnPx$59i9>Wz~7J7^ne zOs{VDOvL6F#C>4?9#FK17&plDFpgiQ3VS0}I!{wg?E+(7lm5%g7wxkarm4};u$f9t zS>M4k@+or6Zm)KTm5`yzs6WR3#R&u3B}XohjNSn#fQno_*;CldOzFGhOPfDHHHcq2 zt#6fIx!e)q<<1Yy#{&lu>A~llQ2LBkDhhGqR-b0MhRKJ_X(z!E7g+zxY)aGN6PaNt z5h(Gu$z6Uug6pN6Jh{n-f^GlY6X#b7u~*_5y&pXa3hF<67gVBhEB1FB)yA3dOGFA~ z)fItc6rZH2_VP5oraSer#146I_5oh+eB{48H#Dn&@0pDViUW{E0fYpKPuBW0tExK* z5H%Vyf%w~`-7tDWM{u3yrx%6~EX$kLVSF71&>mRh`e99{6lZFR)M%wupE&Ix?+EU36s zf>MU8f6p{6h5P}s*IOBaMKv|4E@d7YJp(3Q`uX9u8(KrxTP#5#+XP!wYT~M;aWN^0 zv3klFGff-;*=~Od1SxjbK>XmT)U<~{KUy~NZ=DEZ~-&!e7vXCZry!g zxK(<~%4#4z23V9ya60z$kTB|tetlS)=pQfn$?K0-`roeK^Gd86JiX#d-Ieh(-_tB6 zP=26nz0#-bO64Z{n%O=|8;QW?W*~#ob!?I|^r6U)3!9l%>!>?~lU17Wj`Iw^z^eb@ z8!@*I4ck(x#`%byB!B^7UsFZGoaxHX zn-n_|^};ad>raqj!+hq(HS(dLjS;%txP{Jy8QtBSl+SF3>0t}>pU=vTnS~wN-G0G< zvmJY@=nHIs1!dC#@k48+{>JKKF4X#UL+$*N=~sH}u|;V&-0S>M)zOUG7$;MoZ+bDRt)!EY97eiO7aFH5??O zU(Dcj4d9!cPh~eyW(u2bZ+nD(+Dy3UzGY~FFWqxam|mU;M~)8QtWdEXvZX;;p*w*7 zMdZCwrjYF&rz8HqU2S|pS4UanzLG|8RuSHlGK+2eUaqTrzEZRfEWdaQZfQz&1cDFD z(aA-$>Kv8rA13JZ`rsnnw#VwHr%$v+7n1Fj;(ueWY(T=1U$cHUYx^s+pU==+YpJ8*V(`{rF#eHKYyjsTQa834et>WYo@>V9GPZN7glH9zew8n&|X} z+Q+MEQ+Gj!_=Qa1Z@-z!&&6ohl1t!UCG^@tknG+o|p>yE?3^hFG=(og><}5Q|cX2+CpFFu{ zJ5c`A!~}oq00TqBj9pcV&!Rch?2}DJ@8MHwYN`EBF<^A;m#L0StEM^B7r+3r@=3N|hvZ$>tn?d8XN_M|vH z4mr2UYNLrrP!hKcQuRlbSHfoDX}zXI z)!40rljjnj$({m~j7h1}CMGrVCe?-@*j0WR8I#jBmz3ETIa5_ zr|t<50*1lqWp5O`Zq#Iv+9Peoya-M4A#L)86)SFT-;uK6|#+s5cMV(3~=kHI%+`)e4-(M~{9nT}BIJNCy4Kb=S$txI?umt`H^aGeE@5N7{fM z)Xva|)RN!)dJ$Ym(_IoqD#{7;CiN6r06Kx^b1<`nRpoOy33@VQnC0%L6mT$8-n@BZ zS@#Z|2F6Z4zP4@Fh2jF!i^t?92y0!b=#$IN_%^N+rC>6DST#ot5KT0`+1GUH{Q1V_ z)8(@oPtW@uxU}h0x!m4l2~;+-ODg6kanIvqxL$267l)AvzpWFNRWz5ji0#**@@Z;g z+2hx6s8F~3(yO$rhd5k1ABuRpILoI%g2m_-87ReRscPpkIb}&|K!{T{r!+YZxbL=! zN&tEcsl2<}w4070M^HqRS`fy%o%W|`!_Si!PEVQl3ZgfVMD|^q?v}!^tZCpO= zVe9M=E=!i5Up1Lc_KG-Ut06aAX<00z)mwWM3k4}4s9a!jEh$x< z9H-4I9bO0-7QLU=M~SRY4YxHy(cNMl#S_i*y-KjUMiH_`l6&ebw%Fov)8m8qmJ=84 zyKl)lXi7{NJs^Evd9Or_54viCj}g>2GxH`KT)OI-NJ>ZNsqitq>skVphIBW4d%ut! zVXW9*+ULaWApedwzBbo!CPMfZ9YVnq`^iNEt&04$O>38L>`Rtw&rTr>!9U9Z|Li2> zXj@pycVDIOWG!b*!eOf|Mw-AS+hKfDx@E&Hf%lQTwaJ{kK*5mkx*b^%<~4hjKf^39 zw=GMYp)9=Erfv1%3&(L+ip&%h(r_54!rMLnE{fW)9Hhx?W0M{5T~EqT+OxT5F(XC% z%NLu+u`XMe-(x91IGYK_qMA4Llrgogo}Ou5258*iWv4mA)&7?=Cx_lGsrs?K8x1XM z4gdwO{AJuyY?V?hyHmO0jie1#7Ka@z`DH7NG*>Ce$%+FRo?I#)AdIyS!o53La~_U; zQ>jeguF`de&n8NwnN-la488ZA>O(4JM2q^lhT^)?!yRt&Jo%tRvWcI|QwVC-&-27K z7vc8x^}{Mvpd6m)6fStRUPfDNqEjKLEeqh1fTSepT&%o?W)2b%IX=9_szPO?&sTbt`U{)ho=idLmczaXMq zjo|KnT}qa8`}7;MnJMpBlR0<4heJd?^-A*vkW5Z9I+MI2zbOQXCUdUj@((Vc#j|cA zS%3)V+Vypf_fr!QHbd_7Unu}BpFfq0s6r&mh}})(ZLErR6Dhhkv@ew7J%NfQ$Cc3I}FdR&& zl3HQYUlkD8x?2A1EqetK9BpvN!bm}5GdEEUjJLZBbCrCKPL_a}O*3#j)>;vuHL!f* z+yj3MrKWsJh8DbQLvCX+r_HLaX^JbiUGJny$P0Xa)y=yW7GuLJ!ZYI*-ZV$a$2>Xc zCMwi&>U&x9-j_l29N`{V?|P2*dxLMDPP_>@ImAk~Iwl>_Z%`e`HRf-QN(ZLJH9d@XBJuG7g~# zP@tiz^-7lxjg{KG+WXEn)pqg0T9fcY{Df=U3C85_Yl7z0Kpm1Oga4G+u=ipqP+H>0=9DI zPg{ZZWc|to{T0Fw?AVGB*;aUTl7UlLE%@d#a*%KpMbbwpngv+^4aZBMZ!bOYEZ?Bw z3X6yuT{&(IL0I;z8~ip`y~H_%arIBOf<@JAEDDakRL8x2|A~HWwhGcQQ!6yO5Qc{m z2VtK1`g5n*&TU8*yXDIzjivaxd*?Qsq1x4GjQjG)3+&&(FDpnV_>v zh{<-e@p1jwIA-61uZ|Hiu#3{ zAlpuc!mU1_i^O@+YxxnZ2d~GR-Gve*>@`Q5X#;(wuspK^Zy7ntFYiT>=1RtJ0alyc zNza%C8!nSBT zxXVK>Z?LrIhwcx)ji3*D>VIJLvsYB%{ThH1!B&s>#wwG;hOeAemoGX&F<0tsgB{$j z743&Ubt;*OK%Fe_@uT{0Pto;3%TniZ$G0$oV)Nxa6nx}r08jbsqg0KbWjw&@JOEX8 zpkH4(WK#C>z-t$CiBYH+z8h&&Cr5l^TS$+@aYp%W!-GxWgp22$-{0UjdiVCgktwr{ z=bWG9N3Xd7 z(OTmt){F$ZkXsYA%1|@Q<5KRELJaSGdycf3pH&%z*~hV=h=bAXdFl``-^ETdIG=Pr(-3SKF2(MxLU}?>X7j?mS_tO{cJJrz303>_~iPIhh6<;m=Rw|Gm zIrn*`ZCe|ae1}6tu0E~tcs&=Ct4-v|)bZNuL-iLDm0vI06BKmwWa%$+s&B&Ig zgdg@QuyKF72c$PUcHl&OamRa??_L80Fy2K_?8gzG#Xqjji9?$iYdxmtj1sv$8_x(6c#zknp)M1~Iqkw$}DEG+8$)ox& zj!U~=uXLY#WoCP9PQ*3zony!4zS6155}QXB_0Jp0AP*1$rol^372Z=7Nu`)jyip3G ze{0sk^b_rv}4QFJ+a5t6Pp4Pb!C03M0N z?JaI?)K$0o@UddnrR@bZ=RHA=etGL>CP*YY8~XHm@@f(P65%l=knocRaLbcQ$q81+ zDNwYRrhFFnfqp^$IM9HxB&{QFKmdB0aUNlF{8n=hXYFS+~+#YM&rn9}iWU zf`iTn4!Tr|4A+fipBo0-j_G^LVKtn~O0^?=A$kxb>2#S8IONZ%65Ti}j_Dyxyx8QO zig1R=>YOi|S#3O<9{tN~5g4lwZpM27)tBAN z%j&Vu!e<+^3GFBT^83IZCbLOuBD5JEOb!oy*etq5c)PXKCf`UVP-gmN<7bCM%k=ZZ zy_T$P3^5-1es`k{SL|tbKac0pNcCL6^}b#l31iP?$vjTczc0~~vqT%k z&@a=|Y7lb#qJg+$-@Up6i2kjS5Bpuu45ck)kt?mPbuu1w1Zbi!xJa!aJb6V#)3P83?_4XG>)uw4KWk+CIFxoC1Jyh$`iu0{>>`O6M|fJ*E;>BS3nw?V8N@vsEBE_WLAS4k%g{Pg2RQ=(cg!{kWZ7@yOO z^Fic2#rST1-Sqd>I{IvN_0rn(lGf-%3wsM8$#+khT@+o53_Q$o=#`muq8}s8I|Z{u zeYXBd!SdvOZx%Ms?k|%;bx)D$92Oelb1^kzDA{IWLztnh0C6j zoWv;yf^EG(_3`6LUd6y}(ESLC=`_MW(3TQ@;z9RsBpwhWf{<*)^eZF_{@LS^hXMJX zjAS|LKwwoo7YE15V(V71{O0a}CoFlh;b6!KdfLQK+^JJ%|FRaH00Gz0%TaW%KjWdy zKxstp?8|gI%c+Fm_L)?>E;_5mQNzQ`%)+22_hmh}4aP~12{5!E;*R!})!gjMB#vZH z5ZF7Fj;3qe2Nz;E^ptCw#VGA00|prkcl#olvxkxzgih%bJgH}=-MbK9O!!l1lZBg;k5|S48lc5jCaGm8 zGdUWlZsN~x>OBO}em6?rZ;ia2xN59$`P3}N#zgLD zCe$BntYMe&rcI6hwS#qbw}4l6exwllD62J}_xir>lP5YogzOS2D5w-~f#T{PKsCRy z@apDn$fY>E(5nPhHhDZ+#{4RHucvI&G zqt4!mH!-SueqfJWPez5|X|fC}&mh4HG9ws`%)uGlAoJ*du#w8qGi1LHVx63o0}Ai) zLZq$7-mT6mP8n!Kzy=_YrO6@3BwL*mr9JfA=LVymXTS*YxQps+Q1b ziV-p9RgX@S?VM9R1lWYu5@HKhC&hQA@Oqro2UqbC8mcSYqOCb02VF5SJr%A&%j{oe zF%IbJ#KGcgWrt2P(u6A%q-Z7oLGHg@o=aHK3y;XuycUDt;1!uooH(&7LgC>D6-pRPCi((+)X1*>4lxI|4Ym0m$L`+>ettN5w?nwY|G4>EFx#oCVO~ z;QsiIl1hVyg!tKsEP4c&j3+n7Za6#UMVUVQ49OLgR2qhOHZe{XhGddaEE|%adzQ(AFq`-N@Z|`KUET zGtc`WeCa?t+eN$+jjD(c%6S?8f6Qc=`nq{?N*tJL0aOC2RqkDFslXw%eCXvNN zK85U!YcEi4v5>uS*A_k>byN8dzgC$`(3NSnsrEdjQYn;kUK`&}{$t4UhwDw+&{n|g z66=dVG6`I-JL50cOU{J?^~^8imC){=VkBW&jAE^%19{IC;DvPxZu1zHx|OWmQYD_~uY$z5LEDjeT5&Q>RL!iS zUZ#U4(JKm1?0Gn3Ko^Z?2D-yLIdf342%|$YZ^pd8vou7{prT$>9U| z^T^0GT+++1I->xxTynzW01_S&xt>I0Kr(*dl|KafD9}Q*Vd#)a&$xv=JOXe`TPUhK6EHEiId0^oq$sXt=qFR2k?GinQ)T`2} z>vE*ky;-{WyHmZXDLSaP8HOuWbEp^hM^q}%IX6=?Xjf|PW-uN8ZbfQ}u)zo2etb26^7KCT{yf(A?A6LX2yynD+}H&<@}?S|2{vM5bSros#7cLF=dDFM8>-rvNKwWjYpF9O9d^5fN&Cv*k^p`N_v=kV|IZ0hq ztO?RnZl)}sAo=!%lf`xJ+r@y7XhjV!=p$!v@NW#PSZsFa zg(|$+NE#Q5Q$ND0(q0R-r|2eCB=~cwQ`!^PO6*wUEHZ2HOXF?Pn5yk)av_IYMONXO zvVJf_@_o9+E;6FDe%uY@PT~3!Q8xWPppV0)0%Uofdl}?(m4B&TXpZgVQOB12c_le z*KeoK;_XVeZs`%_#*Zz>6oWd>ZOU>)k*&6MmC`6`tJLr@J^N(5RZS0vg!@a-9zg41 zSob2&xafn%YP{5g}s%cM~n!OHGId%9ae{=Le`Zgn=2P?VThId=21% zJfzQK;95v?=v}n0>5`MmMwSPw8!VC;rJ-p7`Rg}GC*dDt67 zFafPR<_=YW0_|v**H~{I=-mR$)kjx(z==WvZUc%{)9W*4SY3}rJz69@9U{_bcW(Tb zV(mFuj|-$z_k)sYe|?jN>V-d9HLY=?-02jP;+P>O9Ier1em2w-7NT?GwJ|0&*#0Xr z8K~y9K`}~4GvzsyIiU^Qcc&ia1lPEB7q|R0gLT4mN`)9CwYzMIBr#xZiY-ds^-df?H54j3fJ`-qacBj$yt3JUv*T;-;|lFAF2#y)1V z#wE8x_d$Dx3t)M3v4tnwW%qo~uC@uoZxb=-^IL1*$SrIFy4j84naXNi*7DkH9O*5C*I(e|yGGZ>(faGq~O&fd{ z@fJ3*p#p~H3=#S|klZK9fZ@~9835vxpC1)05v{m@LDFqH&jXZU`7zjH6m$+37(Q#^ z35N$%%jBDsGz*j?Vx&P(MQqML^+)Xk8Em{9L6>kk9M^l-pyuvVA((|(X|xs3PZab+ zT@PACr`~Go|NQ2jfZI$v2-lR1a2M}a%-+9_5?F7OFKYztUvRZa1(>DfBifRbm5M~) zmX%XD^`SgtzqTX(#ELiZUf-@>yplpgZV2Y(<+Uuiv~-9@fMRpWxZ3l;+S&w{24kE4 z*ohM-(hUmllDmP;gH^$$cWHWg>di_VpjOthxUn=~0Op0H0lfK!i?X+7Q%m*w_2tBt zOO%&|8676HZ&=)vq!OaK6ee{0j55%_esGt&s-n^~7K6U%SL@|XrzH~qMSy12l)I<( zF>!p9=L(7o?mi?+;g_f{nT02ZrUqXF7H}X&32R55h!-*Ze17hpADj$B z=}C3?PcbnCRU+?BMK*z4k^MJ{pr0Bd{}`b{)-^G{*$K1@WEA!K z_^+b$_kQ{UW$$5-A7c%WGmXVokLeI#Y$G?;3{+3xC+HoSX+(E%fyNi7U z`nQ1-k@*`6vE3EYJOMgJHlUJ~G@0Io65LnzktH;I!^Q5aJJ za*3CpzX6#C#wk|R6Q1n6yeWgY-8ZVht+HbtGeF|>AM9x<7*$ag_#$+7Ke+)-@Br^S zW(A6zyN~m)zS?5B=j@7ft6Cl3?#@go1%W0E|KF^Q^WV!}cOR+XDp2kThdAsS$R5Bg z1IfFfDElu^y1RLQ`PFUgsGMMaPQ?@cuRB zb07v4(KZ5|JG|m6kxE%v;(yown9)r7UsqErIxg$7T zr-2(Hs+wHI6n#%g*yTRlU84V!B;fdyOF>>hZ=#iV`qZg7d0sGLO(#p3qtdu5otgjc4i)w0>KW58digNk*i^%c(?H+fNa1^L*OeiHpv(yB}u!f+f2>;zB0jf3$cYQ3gFn zNwFNeoByADwPpsm~ zrF-|#|0j$0Wd(go9(UdUc|-rvIR3kzjzWOZtgWwh!2U;ze?JHIZBw(E{=Yod-+6%n z8^WL+$&bs=DD3a8_MhI%sws${*r&a_dj9|9*My^h(M-&ZXyN~7=M_b_da9h0Nn7DA z?G<%P%g8HNuB;*t{p|zrUXrq*p@BRQb>aSj!(3bwG0!5*7w81pcjFih&yHJh={;t` z5JnRNgzsu#tipuc(Z78}-{&YODpr50t5Z{Rd4^t-6y`;}{Ks&s@M#wB0?8=dj}DGG z^I8%A4fnK%S9{HJiTY}G+2*asfAfuoL-u{-RfLAWl~xG}3DJ>^paPNRk?^rmeTO@i zCwC9^n&Nvu^w{941090(%ngNSEP|&&hyP(#kJkR0GrJD;-<_Ex1(3#G_mo}fYV;i^*??#ow6w z#0f=OW&t5^CxHuYRMo@FN%>3usB$RuH1DD$oZSS16Z^lzx6tLERdg#GrI+z|@j z-h2iZjT%1Zl#I7E)1(D|qi<^du%zo(%p%8w+=;eg&T-sp4k^QhV&h6Yev<^f9kCBu#y`2I#+ zeN&r?>!9me^8QBb#yhWwTr;E^m^+OWk@jl8PM*ga2L^F-eQB3q^)hWOb_NYGw`vrx zYQMWPLOgHZeW9&au3nZHWBZ}=h$MXc9a+^UxBZ=I4VTk)URd$+%D!U~b^;9Y8&3>* zE}k;1bi;>_HA)OP7rWv6>X8E@d%ekwFQy~ppI{F8eD)y1&D z4S8=O1=rP*mHQh9s1y&%`gh()JK$buKK0R%L)b9zKK(>0XwWkNS*H{QXeR2thd^Sc!sJzqb2 zXWMo8!NZ3Tm>o6!ELhzBhGf|)AE$|KLS!NOMk8)hH8Lh zHrvVZ0Izxe-9M)b;g2W}ve%r>@3y_S?=w?-)Pab))!by)DhUao$+w#S>bnROhOpz^ zoV*P0z*y0HI-ORw=$d`Q(6F28J&LymMz0&R_599e{qQZfg8hvt;y%$JGF2m#nPZO% zF$)YN3=Mw`=x>FTEwPz-;9;h>Ogir8%pG`qeT;n&Q6>?S@PQznhmDNM){&rpP4`MC zXQ7%nY+?u+%4kpiNyH98=*kJmfvmk$=p@T~{|sokpHhKAbjJG+9-Y0v49byf3z=O- z(0*&x1aX5|F!eFnv-)M47S~Alxh_|q<=W&S0{x*TIP{^j@h=f-g2o;x3I4t=2Jimvt=k=IyyP0jVfd#(Zt z!-mAAG+c$wpuPLb7eoncm=VlIS@et_H5GcBg$JUzlTUFQ(vV-;l;r`0bi>zw2C)i< z4_r?;Z;qL6wB^|Jv-gXUgI5L9NA&Y?+~&G;J2q0#JEqHLYs1U->)XdAxi-@h5z87y zr}MiA1`v@yI2H>dMRZ$DBAEqan#vcrjGI*1OgysZ5u%AcQcm%fE%9AAn=+ylkP@@! zSy?*?haZuUgkS_qf1Qb+csQVwcSuj6Ze|DuhS6H~MS8u-K_ql^PBB@1d*e0!h7f;J zMRUz^nKz8`kUe^nABml_c1*Pys5FZ?f_59gRZ-K>l?mTKnOWH0KxGkX>NN0Q;>0rZ z#y-XJlqVm}o(5wqz@G|u&*Y5vuIPvZ1I=H)=J=%UUq zVUu@%*3~v>Ol5>!4o1XBS-QxJ}&DSdzdu#e)=T3$kQ&G>>AMj-JqWtVG!u9y+ETRgm9naWraj z@}4A=l(sPu!(}=Av0}kD{LWm;)_vvL{TiTVPWw_qDBrR<_NIu*lSn1ITA~8B(xZ;yIilcSYO{_ zaI39JR6oWh+4qATu0P+q`ZiHE`>t2VY2jQ)WuN7Mdud16U%(!dpDEVS^f?0f`v1vJ^g-4R&QpYq}FJ5#fcA!5p6UQw@HSV7Rjt# zIn|Jo?|;*W7Zjy66E7j2We91HtVdPE)!VO2NH;CD>7MJmd-rada$X}R-tf<(81RXi zd^EI0{~IqsGl|mxcXghBp37C%_g2-h{%cXMEc{`*ANIren~miq27%mxHlv!P1l3ge zkGxJ06ZEFLGcT&>slp4Wq1@Vw`@HkEm? zdxdPLA@)*wd>q!m#Z{~un|fDxEvZAS#4O{eXAJ8;mW#Lmylmim#7tgR%vlO zRFL`{P}hRwCZU}YXPmHLr;NbU}>=-+LW*by@Ybdu%pdd zEKV;I=BUs)dU-r~M(~2b< z$ht(+#AGf0**i2OB(iiS-YX<;@XHoa$yt_@{ooHqg}2h*7~7O2rRI!LeeC`#>-f{{ zzEpV6897+$SPals5EbEsK4RbvByDQyV;~)8)P1K3>volsb%s%{)eu@L7iajl2Y%|L zWuCe z0!K}!U{Jm#4Pm2;i!|r-5gSGC?E4!`gpuoRph0o_LNuWqF69J!KpMx7$IF%@AVR%d zhItI(ksqMzVcfY9#B-^&c*3#H@Em=M+>)^-tnkDAf0RW-#EbQGE9N%0syGuS1?J|6 z$40-6BVR$3&5$tN%~+lugE7H!grJ3P*2XiU@rW><{q4bHU5l53JlVhfMi%|amHqHy z*3PEXF)E=OojBqjx{etA`xN#M@nR%?7fI0G9}k5(u9q;BA<+9^&YRZgV;7&xzH+#; z7AxsJ8}ja6Z_k+ruk>7bwbVVx*zrLzNMK%>2f;?PM*hh6?6iQ{+A3pq&7Jepwt-&mBWayGl!ZTwZ|6 zFS~(x$+o?ux|VnaFdDgDv@N#u{r5E-p7}f};!s@?B16{eyAa{YuuwNi*4w>70(Y*K z9OGDZ-){I4V*7`N+`3`j`-J8Mu0Wu+g3=y?o>T~=Vok-viY1$dTtn^Vt86`fZ-J~Y zzc82UplND%a9uUxlbr4M>kTaNEqyCQdEclmEZm9;o07ep_)T8SAP>?$`OM6wGu6AL zfzw2y-+iu^AHWi=)+AZs%44fsex7>Q&X*OVD&Y~@Rc#4N6;K+}P>1Mjnr$#wGE0Rq zNG)gu(uBmRQjDkNd~S=At~KbCe;<3%qHtmIw(I70_m&9OXFaO&L%E?@mXWpq>P8ps zI(`q3+lW4?$;#l{+^Q&!F^cMh_JgS{qxC5@F@)^OE~YU3mqCJZPDt~7bcBa9Qkl@V z2rCx9FW7~eXibR#9rKDc=1X~zBBpxTH9AVV4FcAA=1aA9Y~mZ^cf9N^tbc;BU$}@n z8NhfpiHCUI?CKMa#@I7LH&J(>y1G*5&9IQN4|#Z~lX-{g7q9XzVkPt#&~-{Y)0*Ta zif(V-tl6?oCpQK!mlSTS%@tIFYaH+BdLgqW?JvXH(SGM`o>z@$<_DB`7gvH8o8yDJEBZ(<>qG0e<|Q!`j`rOSZX%lXXyOZ)c259Y}Y_+O{`O1Nd|Z zo(K8-j2-=;#qlr+Ce4f{l;nQk3VHX;Y!ZKuU3}|QPG<=sJUnFa4Q%nXVYL#Mxp9eZ z;AN1Zovlozdw4JZG;S)8VPHGF>oa|l5&IGlfOB%W9(#od8ia4c-a2&zD%X(g0`J2$ z%@cB7XOAUladhG3kGF%7MQbXu_SdP<8VCOL@7Ip)SU4g1zp?P4LGQU3U-*7wm6^ff zV27Uhn34?-o5L`%ETwgk#ew4cukGNuz{-TVLaV=}DExuI<5WlLuD_6;J#%|uc?OH2 zwMV@cNWwc;O6HjoGqYPUEhZPt=D@8DQ|ntndi`rl6S{=yV|7I}swKAvUFbiweA z@|$%ao0zit_JFxt%2k^85RB;NuW`Wa|TpRJT_Gk);#t9_iWimVK6 zzB-j&=azKLSXMWC`JJ0rPF0DfzN20HyXVXfXhB+5krS0Cs*#FF;F4?Sf>{yovJ3I- z?JdUQ@3NKSq|5XVb~uv8GiwU#AN`klLzyT!n`jw=)zd?oiraL=HI;+lIbEJxD_YI< z(jaY^Sh>i-Wg3beRmKu7vcXMTgnC}pPD-7DgQ3k~BOn~a*F#bFQ=S)n9F+B5j(El( zYv(X$8dar2tjTu%V8OZGPB+sh5d$t+W1AVgO6PWFYkpX%PD%pN??sJ9=bH=4!k`yd z=IZ3jm`qS{sl73eOjH~@NE%*QYS0m3iFNOUOx!3jOZS@Q$)T44mCt%#W zIw8+$v!{P+FU+uMB|-J(%@|q5uHGW6m>^CcgV~A+3jUkbX#`H$R7lz+yTEuP2zaKp z-j9T@m>1l>ql?LWbKh0KrZv%;HB{|t2~MX_u~E!MNJrvY5j&nxf!_lKqnJyOk|S_2 zb?P?g9#gFdzLw|$nLEIH9`DT7V6xa^gLV`}RC;Ccnd)8($|YFlXCrbBX~g-Qye79+ zyil~f6j*EPzM(uq@RK`wL2*_<|AWDzuVo?F_?&qBvuQsY7hq}}y61A9 z=H2an*~a?CCI#b}>#n%|R-+`3c!VVy{Wc9!DrzL71(Ke7LVZZLhntVnDv-X8?W@o% z?W%w-O9q5~_Hqa<3T2ar-*A`iEwo4|acK3_U!TKdCghs3v%Zoj8)p9H`F>U+51;;F zNTl!&sc9kEdU<@NhpOE%0&-f`)3I$Knayh^S;tsK4Y`B62z0J4cvwE$vC5}{E(%1SU%nH>&ch;1?0eu)=swA!l**hWvE86M;pa>2F_@_AN(g} z(d(=6)fdxhri+m)Wfe6G*T-YeoE_freWuh)1nr~wW}$_obuF92$4t;#ZxY4qv!Las zRUX$e%leF(R@m0P%qtEHU$po39b`IYB0-E3F_nTQiFZ$Ui6){x!cM&L;9-1#&*0Ag z6QYIgKr|Q_%kmcxZ9{1O?pXGsa{u%0n@JywQk8uLhKE1r%~acHrpTSrFw3#O*bg~v z7+Mw^C$}V~Tj5<=uLl?a;^dRs2Z-FXoK8q@QA6Ly!HI+H-C2bDmtDzdW>6QM`IhmW zmsp`=KmD!rMZvo0%9l4lXgeClJBubNRp{^ABBbR|%C3FV##yWHx2Ui$qc4Kutruzn z(jfj*>~~;A#F?JL)WtdG6!w|HZJpfJ@@bx<3NL7q+fACXN#r_RLo1JUGH{qVwzse+DNjJVy7k8EOKX zeRq9TOa9y-!tczhGIU3}I%~QUvb<32n8P->d98>Vjm33pB>Und?eYBDvTdJ@UR%eD z+uCAel4Nb-K^jt96YurUfS2*wt3Si(3SNHY?!Ljycb>zvsxcx+kK&SzCO^NIm${Y; zqcK0cQ_e$)xcjMdNm!VEa`tjA^oY3@UQE9)YyzHV)9^-DuWJRO@5g^qo>O4uRcTGM zgva0?wm0Mv--(xXzaeMXTS81l6So7Q!gFN!;=Ab`8{{@tnIv1jrA4c0khB=TP&3aAIsZ1Ej4`_V1LsdN09w5~_A(SJZMzXa1} z3I z&UU$;Ded03yMry=i}D8%Z6Nw!zQ)7Bk#k$9zCs-U+hIWaV=z|oP}kwTMl=R<5_u%Z&g z_ItC%ehBKzK(G2K*Kr}`D!KfO&;<3hMdHyWbKeq0IW{pxxnt*Avn~A=M-O<-5MXtr z>v89sswHb4;<^KlTW6zl9d!A@&d2x_>YTnFLI;tq?kenE&(P_zlW;mNg=|(U<`Wn4VRb&#1OtRMl)b8wuGMX!%v0A--%Mi0Mo!j{d~ zAUL@_^*k_BCmRL?NXE4G6(<Jok=u43nXYzY`R}ZV^#9m< z�)jrdwDMl%QlpKyuCs3WDS)pdg_EiAq$W#<2mQK_nh~R@5jCWo-rJb9_*&+y>_iyRkLc&8mLld>8l7|BgIFIWcT1*U20`~ z7gqYEK9GPmdenLzioJ@O7K{OLWOo%si55&R3Xhz@^d#IGs=zSq^e2>C1%9ViM&dsF z-7vwQo5|a`nCHeN5^>tiC>*#qo5|Saehkoqr zQ|mz6t{Rr_o0izy-XAw}bUK@zeO~yDMz1Pjq=&|VLq{-!Z$9`fWg(x?=jg2%zo1Lv{|yhh$0pD$aQL%H~d*qh~b=F54BapzFT0JFg7&=qQoVu5 zbdL4u9-W4f_o9zBN#`2J#0Ik!j4@++ti4ZHJt+7b-_)#ZySGteEezQW6$$L-#*g)v zY&{3u?UOKB>*~`PrI9D4)yeT@bmCsegac$ts3+uJyE)*T7E}Lla8Z zrv#}#0dFSv%?0^H%&g&Pi4(yIYdm+iEKuEOb@jey|EcJF(~?%caloXpIbYY-*;r$y z$bB!0HD@?Ahgl`9F0*e&%&T!pmqKjrO4jQ62`!+xT^rM)iW?HN(qjckZ9KQGfPzem zYD9mHXtkJir)LKM&bQBj>Xg!3x%(pfraZ0}Pg`j8wx%pUiX2MFI)FZ(b!^LXGk%YQ zO`ey*tMJVh$!L_gVdW#1Cp6~VCrR6|(*0O80f_{^u;kV7sU%l4f8l|ah9Ak7V z)Nqp>uWYazvRlZ1v9nQCSd=EZ-J-tJJ0V(R>LpzW4$s!Iv)1|84hI>n7DNnw7CEox z;D?A1I9CO-!Kyp?Vuk8~*~rf)$%0VDDZ7#J9u$sSh1PY$^KWVpobwp}I__zKX+ba+ zctf1;`SUH;i7~31y@oGDm$bAbN>mHWyej7F@DBy82lW6FF*MG?$EUU?SY*THviAK6 zsDa8Y7SYthJC9M8#n!0z2NCzz0cggUiMr7z_r#(Lw0(P#Z3$dlIpL+VKz-?rIG~qKfA4S+%pVL!d!c-)x{_-j^CB7iQ^ER2301-IfY{?U%-5 zP)AoXT4L6%RdkrYw?5tXLELFA+y9+eIjpPakKA{ASgt;b3bBE~Tx|fvYX(%1_wthU zrH4n>wrWnq9MlV*oyekp$a-?0U%$ZMVz$X+hexsWH%xJ0wx+Yo;8ZM>JfIAaP=Y^^ z(1KrKwzA*9q6196ppEMX-|>9#Xq3}W=V8{bi^;Y&{`tjm7E5F1oIoN$kYj0O19?B+ zX7a<`DhFUj(+Tx_Q3hh}dqFGEhXVrx?gPd^wIgNwfeHVIVy%WV<&z1h+=krisag)# z#>=9M2QwGQ0mSE5Z%yNI7tMQbyIgrpH3~b4ZgV?=lA3I0dw-=Qrf{3qwz@DgcRR1) zSmHoJu9Y}lnh*;+eVGFC@sUiON*e}Nm9$T`M%@UHiYSqf(8#UH3jOob8Cse%7XWn@ zs`qF{JQkoGD7+y?b6=xD{|r)hzVc4;$~(ZjKhQ?*(|%{&n11iURc_)Ii&Yodpek;3sD1XK*<z$yNx&Z`59`Om-E-nOTexpH^ibcU z6ToX_nyVH=Pm)I};!*EGb#}5LcfLcV0rjUAz<@#^mf*1gr~pFjJVKl>4H33`id#q# zOK)xNukSoSO-a2C2F3n@H`>shSb;o$s^a5`&>lwA#q*pGX5m)X=zndkm?IN?JrCFa zh>1YCjpIFCK}Lc<7z5mKB4%r01COZ10w(f!7TihBv)yBwiSJ;Rm@FzOu9#IiiE7P{ ztip22HNC`okKmeF!7N;eNKcU2^^fLIx;wvc1V@2?jjTZNTo@`}C00oD!y+9>RI{QL zW2qlj2E(+A3o%za*CWLDv%UC_&LS=p8_z{W$0w^3vt2%jUr@_^M>IH4e@-ia5(AAv zamp8N)Ec8B9frk2{qu5>B~;Y=XNO5HjMeAuj1fu)E+^Wx2*H~V8>f2RTZ-{%4%PIrM0PSdpDnEF+lpu5(?0@f-^bdUZk6p9n`;*7 zs~ z#>9;uKKdQ?P7}HI>)QUP=`v^pNMg#NMyq7NrA;r_2`P;nJC1tbQ;V?`La=@2b5T3r zvs9qTiG8}_8g~Ri#^5yZnSGbZyjO3@BJp>6s59C|ar)yCyQ&Bb+F&q{XXt`1wiH?a z#(e(MBK`5u)uM;hu)gPm{uI%Iw%$k+$BNahh6_!z?i8!jAW@I~+iOYM%x=&@2eq=6 z)KO=5v%L>Lc1E-~MaS0>$|>H}TifrFxvJfsEVBfbYSxS#bi|k`Xs6YUdN%@?_C1w& zC-kR&!|4>?$C;-{$}FDGA46ZFE12#PX#-@VUiuMK=L6Sv>M0o=6x%CA9drTAg{AiT ztXO$!VKvW2itmd(2Yohv3R2_-nV(6q$UKYb2>vIqT{FsG^!)( zPSp>&R6ex9U8XnkIXrC3#wP)!n=Gd;p3F#3Pf>K1%#otJ=P=oVglC z?7}c%E_-K5wx)+c1fH9_=>}DJKsQ&c8MteOA75ledm-eD*lLK=vI5+d{f-2c|=LE8Ug4`e9qQ)HV@p7QQ*&2Ualdvs;#R2|7x*{~#@lxwvJOr#h% z^}B$S_d=7*qRI~mx1E|nGDe7;R;vq!?E^vVN4xQ1o|J1h>=!$W{iYm5_Xm(?Hf1(b z=`_|~-@T7k%jwjyzD7VDS!sK(9CRbNJ8p3jXnA(m3Qg~HLJae%92rH&r+{HlBjqs& zDDOy>nf0vM-OdX@ArXpw(_29e6f2@$ht2X(KM%L9CD$&WeB4Fw6!s3A1J3Nax2Hc+ z?)7{Er+V?7dY9Mwjd>O$lyX>XhT?nuXQ{&0e)sZcx^Z(ot|3rjXgHnL=bqSf&%OFy zu~mzsW1=5{!`l4+LB#u`DA_ba0;2brTVPYj3BMGzZ#i@oZ+8*ar4FIl9tadGNv`&) z@9nV^R$ex)tQ~VX+iO*26t7>bNI1eXD_q-8Pt*@OSo5M@%jdw&NVj^Kz867&u1DGu zZ!r+jaE^QtudyM2*^B}$F%-+TK3_f_9IY*;);hQArcNB|n11%UXOz-Lg57firA|UX zD`vvg@GIHraoHwu@bKamP|xjdWoUL%5BSZ6$B_QzA>9a%enOtp`Q#7zWud^N_~TiIt|~^otXHId2h-y(dA9{52GM zh&BWyI?ZdzEn-axt{?cJ%65iBI*=eXC@2fwVVCqfs~tt+?%ub6gjZRkEGlx2kk&=} z0ijFnpI2jwO%3_swtXUJeq@=IwppBUJ`b|ft)}Y+rmeas3dy%dWF*t^elefvCb1|j zg`2_t`WSm?0#_tB<~zO|adJ5rf+^CBXVt6W)No3&(PC=D^x)tc2Jwq!`FWPl2Qm0{ zRi=NF8vi__Ygw@beJo~@J2yQc!L zvF?RRbE(}r9#_f>*AL6vbE8!<)Y8j`xp6;jMkPN%cVSF>W|db2moQv&U#t^K*G1Gi zzVJ_~DAeAvOFF7WY_x_SJx*1BVO-_0za9Vup^r!5mp{e{Qmoe5M&UbM}#{FVV~n zH-(%-hLLshdVKP>obD7vbI+YlQeyHZTsJsC`uM zv*U~>g#V)Uq;cwijP0Yi!#5_M^M}LBJc{@Ly5^RfHt zbiIekaxZn&+1N)DQwUwNQC8QeIQ|CpBPaO>h{gm8 zCYLFEun;kF!C8Mzvcjf|tp3%cyvE$@#7Z&#kDec}S)x-V3e-$ZzDEvdHu8dIxyx_e z(fMt0rFmLG5$I%6^?N(?%o<=D_C?oCty_M>4gb+A!IXsl(|*Wc0h#q%e=R#5*)P~V zIOJbbOLTulhuUlw(R{Ydb~KGwL~w8vXj)Zzcb%Tb*QtI0xVk6Dd-bYbQ}bb5|7an9 zV~BpFfNbCO+K%wAa)vJ{2inwGfu6;`bQGgc5hf*HGj8V*Ks0lUzjZ8i=tVc zP=aA8(fsDhUA}KmfVNa3s2nYsc!fLu?=(%{SO(y?faaXyk<4qFzn59z4}7PAMsDoAO8}~{P%19ceVZ}8~9%=^}n|D-<|lMKIgxF>%Z>wzYYQ&5B~S4{s$)qeWLu= zLHyT2{MSMJKXMS+fLEC65+C12dy$Z>l8#1kuZ>mP)dT4bVTxxPW97?{dFk$&Avd{s zqN1Y8+`6v*;)NXhcdvL&x@=u(%`I#^8Q{OT-M!>u$46ANoBd2#Z{VX(x9U<@!0KYrZF zaQ_o7UKeK!OZUAm8}MVdLV+iXd#mcL0+7PdNKNi>irwG9nMWPs)eAti1lckV;5IV= z=pnpnY(b%%8B{Jt#5{WMCGxwVNuN@Xm)@iAW#uOP0?JK~-}i}(mj9E$@~@V~M2=>@ zVj^O?f`9xtx$rjh*-z3kwGno)ZGC%RP(zO4dDu8q~Xyz8! zA$oEt-hYEXn3;cA88-wILF;pBI9Pf%VZSkc|JCH@a2a`d57%tKf)+E}i5n2u98xVa z`o?E@29{X^wOeO@_>=zCRAs@4O)u%MCU9b{jmeYxC4uRZ-|dq3lTP-xc+g_46~UX9 zRj~T^i=aD22|!7UfKOTQtTv<~EbRyoJhS!2b^x3xTcEo|7UA?yFy|jl^1q%~hr(}z zySKLYcl$G4{~xttg93)?O~)Ox6R!hNIRHkx>Hop#%|LLW@?So{$Zp#YfV6e5mq z_BMr@w3nxNcRwHzC2^paL}$zVj{?qL*8jin^*=8>viSpaVXcbFYy8{%e3^a$v7j{- zX2PP9R1Ad%7AUT(sulJs#mChXdtCU|FMxprrK4l!-OzR2D(h(2mgDCV`xM%)}Cn)g8cH;&Vb9Nv9%AyU4p*6pX_*?3a5PK`Y|jC2@*} zQVpl}0VwZ(QfU75%lj^%+sXmVsfdmSd0`F9vUQD2y^ZUb2|1qK64+G^q{Ub z|3^dDp9}uKMu^dcf54F)C=!Xrzr8dD@L-^1yBrPulQ!gX$Le?M2j076?KlomM*V5` zK5b9XV`~q3Z5`s;EPoqC$DbU}Q6X*sIR7HD714-%Zam!^qAbum0@?$WF$0=a)0}8S zciX7YR4bIz2U<|7mk&By;b58=dSBRZvTY3${3{0g&m+V)`{o}9M}d6w{NL&bOy?wNa?hjQyRLKbVCvw_p4Wx+R~X`C%47;I?j`Jf=cWH7 z1m;(o{w~boc(PS13kHJ~7F_`5G??^yqIlsgy^vPBrS7|@4z>5tpXWZ>ouMabRS{gO zJy>S|Bbxr)Q)S1sC5xct!E0l@wAgifE?U5vvJDkn&){=@8qM=S8=qFl$bXyU#C4t4 zHIx8 z=~+1S!!_xzGg1Q^Y0Iuq1uNfTs{!?_^%QY&cZs=E0oh9!)Y>uJ;X*B8E5s=TE+ zCg&D9)S(~xIy79DQvMu}$&~Yl-6#l!+@|`=wCHMnNjr4VL#`r}5_mY@ir->Co^y0` zj78I3Qyvgq;y&KxS>21 zg&4$I=kK$4=Xcq;s0wh0_4rKM*$jQk)o$`6ai)hSn-w7kkk-!{y;CdnrOdEltTMsY zgxHUP%$=_g&mPAo^vwAE!~WVPpm#{~eO`JC1Fin|Jc6Je@2@wx0To>pnt2)CD@s6G zwz%yr_%G}`m@NFu=qCS&Po~aFUqt-vs|tW52-qw}1w7U0wxJ`FVN4v*hO2S9IbK}9 zK3VfMPX+F{Y69q6au8=~O4|GG)qNY?&yR9@s1~LUrgzZyyPWQ$I$nh|DzX@W0uuF| zp$}UFg@w4;B^A?rAa3Bye{%xF$RYV&6~o0tO)DNi9;Dbhy6ds4tLgx()@3VW&fT8k z-Z8};i~w9R4V72zY}W{AG~%*&u^uJdcKlaC;Siv|ZCd0zLyFHU=O|#{$vf&K#wL zg(ly(zWvO{paMhG-;6T)_5~bcoM2|aw8@aIa0rM&S7zOViNA&MSj}vnoJ*+fM;QOL zEvtwh#n6YEuxls}zB7rnysO2uGkdd!(rr!C7Vi-CtA=px^y!e~-=&I6H-AMM?77fc z_wd_7rgM2i%uGyg(c*TXzV3?I;zj=?UD$B%q&Hoh7tnKqsuTin589i~zrNou7-H?r zl--@Po*#}B^9sUky_nFhw;OV=Db&MC_jTl-g`L#d&d`9ljUU#hYURgWtX4lG-&hk6 zuvXK2dMn+AVE}BTiGYaux&&I`_5qtC`xx3r1f*rWZo?unP7g~O)*R96b1!Nf`SQDeY19l|2KC+3MEBfXJ_y)tyu;C$%(=w- zft%mV+l+@L8S(SAAF{*j)5^;x96~*UYWfW64he9pe7Bh3w>|;g$1Bb+SG4&Efo$mY ztHC^1Q-FHu8!BSq*!Mudmo=+7v+yz_kav9k&M*9ToZK`+8bq#_KBmrdds_MR8Jx8l z)p*oA_n$cp8(2af&+CJ^H{x-igYMdCcZ_X?_s%xfc)d1{z|fer(qZyqCkj53SfTRx zpoYt&K-SN=H7wS);Tba)E@?FW#!P&fX(z?R`S!ByV3nZ@(hDcmtx#Ri?YLmMsAsu5 znZDdyr5p%(u*WPv+MIOjmVE{Gn)|}3s-;j(w|^jso(hk#toRibaHq3jIHI_oXwM-o zw|D0?`k`)q?=Noxikmct#xewdy-86FEJsYA_P|gR5xDliSW*Q@wJXzEFRe%CJf~0~ zNil#KG|)u_Rw_PC`b-?nn4W#3gKJ<}S#d9sk(i3ax{jVrKPbB|UTZKAI;XuB+>@X} z8@EzUM4Y|krQEzaSJ@XWJ!p14?%t~*#qOM4`|TB*;2PIaT=>--rMzQR*Mr*=?yUUImy5|#*pvND@M50d zKYul3t|9IA1PnLkN>EuRw}w?IByd-m)MS0eEI64$7{S^Dq;;8=PT5ajGB@%~;90RI z?KCdD&wLCR2y97ICy*Tv;wewAL#15CuRjYbjSwTx5L3}&a`5F_`%bYi9>*r+^E^gR zJv~(7gV9^R;pK-BKVsy@^nyi9HOLy>*$3y#bNjfNw17xRRrPT)PV`DUKe5~EP~n5> zMlvs+S0gp%?An@VUO6#^stLpHUTIOp2QGQzLy0~vy0H%K9Upe_@3ROk3)!J^Gz=caOE|y9oj%S|Rock8X3M1mk%>@UeqQo! zbLybWJiIF>h`ROGYU9>(R28k0!P!RPyuSL0;7sAgc{yo=5?=sW+!qyrNJ5m)HJF@y z$N7E!#mqH_?tpUQg4V&)MsnXfX;nklex*pZ@~PG!it_bgA-o-}=|zpBcA=XrH<->QCQGIUQ$LD9zafoixz0C0u=QW>_9(GMHm$3SqoCw(31+1V%d<``YK{b|?+C#Y8 zpo@hTV*5Ii@WKJDo~oT+46VraKT6bQ(ERG@&k7C&`h^WKzVcuKm~M^|LQr&^1L7_R zuS2DcNX91vB)P7<)4?^#*td8p5vo*_Ogky#5e zE2O_)XKx;66P?xK)oPZ!ydM*j+>k&=Hp`bc+drvR=B{P=zRE{TFZ`@jDxeMqrRDNh zXjZ(D(oDl;alv(@vVq&n78Ug`P{Vb9keG<0-iyhls82y+;ABeD<(aAJPPoy?cW%xa^Ugi(6b(Q_y~X zzkKlx^kcDNhSIsbVVqH4)tyT5!!Dz-0~FI;PEVv=ug;zgJK|#_G?td5)wOq%nOp-~ zSId%jdq+3AcHLM|G~R#$8_>>8MLjw~TFtp07~gCYqn-F9=Uj4|`>wePV9MFwIH8yT zJ&YA9xtN#;FY0cs*Ha!3Ijg;Yyk96h42jL($vt49>GEhyrVsQ^F@B%!@_Ncxx&({5 zXeJZmz})5BM&m7O^(NMWanvlFfHVmlvLm=Ay>XI!=IR%V?QDtV=8=zE>vVe4RQaHU zaGSo&B~YE{IG*~nd%N$iWHu3Rewb+_z*2Dd# zTI=%==(x8k)_2~N8m0t|4`+T!jjoz!c?OIsg_XOic1RUN<@KTG7a;JLkO0cv1fPC% zL^0-(a%xJU87U84Y!W!N=4eW9KA*b&eG?r|5PWaWFuu$SEMhh=2>T=EEoO;dGw%zH7GUCe>1fo&TeiEKLF{m@QFjRr`|iDx5CC;Q zrs;?*e*&*onwu`~v7QYt^YdWFb60C5JXn zDq`gB^z9rDSKGARh7>KbFVfb{d+m%5HB9%;jUaLa%B)AUnv#d)^SI^Is$}&CMU;6v zo2tb7O2e~3+^;VbgDiW>mif{ zC7DTpW&DAh{Yl#&P_V$x9{FX=^I(a6jAzXEv{*XQctcIP3eWMnxSrXY@ky=0`HsUb zxlgLT=YpqIm<_WCPp0CsFe1IrM8ZDR;=`%B_D#GeuP%oh#+J7+E~XrBt(S({^GNli zSF@Rir{vjta2yuB4#CcL{)Kr6<%WbHm04A4FR;FR1#@8yQyFtf!I{l&$|OYK15E26 zPoPcl6)-mS@gRG#b1)ZWQ^h(wP7Ro5UToBYNVF6<&eifAx)*@Fzi3Sa-I+YyB97}d zgp220MF#H+DH$x4q5RGv*|bL0%V&qVw5AAhM&_kSg}D2;(N@efcchl!QmAk9*Gz_r z42YdPOE3%RtiOB3G0)V;g%R$KVqIBO0|MkV%#g9;-Vn7)Z$pJ-F`01dQ%PrTDtax* z$V=lvNVD}wmQw&yUH5|V2k&P-sWNhv+p)#F#~lG*CrYk)sZD=i>ZHg~;LP~8mEd^* ziTZ>aXOe(RPkXi{6}%#T5nPTTMHy03sWDfu&BFz=2Wmfk2w{| zqx*r>qoBT2r9Nf2+3DuzVK>jkBO`{s z>YG67f&9VY*L5nNYm{eqQFym)m>sxWhI2uOi@Ac*9j@Clx%@}9ZE z`%GNyL9K0!6$FAPNf9$zXVnisRf8+5bZBO$tk4QuC5`dRo<4!2A#j?5?Ri5?!bcPs z%@lsj^5IAtB7@oHuAunPvFkz9|`>APYqG;f(Z1PK(mUds#-(ldq3C*Hgr_F0<=b zRr%lKVovCjXJK0IHnS3 zyHi+dC{}7%1`U}srP@!}=5+w^f@Z6~$- zIl~Xe-Xd!o)0hI|8qZaOg;u@eo{AH<>mR>h{N7;d%p1`tS+9Sr7cfj9>rSO2^q|}& z%S!$7-b>VhMM<@{J(9P~_|+r{$)1Pf^d0pE;?@Ud-HC>K>LdbvYsJuwt)aqzS$Om8 zA!w46r_=-o_y(kgF zmmAsFhE(X^2o{oy%HyHDyg%>J*B*R!+~{Dh39t1nfohxFQ`!o=xJjuf-Hq{te?Eiu&sCj8UhbFGf$wP6R z5Yn-GY~pVzPWz*@xF#IokAqoo$8j{%MQPv2LgdRAW|u^H&j!1$FOv3iCAKmerd!=u ziUn`n#XBNJOHv+w`j%f;{I+jj;^drmxx;rvUE`yuV%QX;z1CN6IW|%7E2fB@DEo~; zH{=7mjFmYKQc(#{#RPOj%;#-G|DfW=>bn$^%a=1bswhHY?xAqoe`!-23nOmp?o}yN9;8`dllt zd+_iSs%=5fm#S`97c~W;n#$OG*BEm+u#Ofx(~<&d;AhrunfDIrgLiMbmkDAL9xh`D z-snG{nsrb59KF#SxQNadEhYt#IVFG}C{_#|1?qd@#F`n=sQu#5!1mw@L+Bk8q%bVe z9}hEdDW0M4Ud$;$onx0^Vl#@4Oiv|?ql{c#+@RfM22bvj$L?f<{g0oTBcsi?(gB!Z zvRfx+Z#H2oCp4<(BFb%wMDc~&X+zp(YS+|joX5%s%^z(ta8ysL@CA}7?99SpI^O1C zE@8MAT0AN0%_swV?=3?_mO-zE0<|8Z+4U^T&mIc*dQ^F5IedInCRLP$3bMF|7g(tP zOZMMH>DIkP?&sYF2q{&+ijPj<%^yJ z^QkRx8mkgKohBMuck&P5Szd9_TOq?Jct&^E4@t30sv4Jc@@UEP$IXqzdMb#7TP<|5 z&T!DYgI)G~q$$6B11-@ATV^|}jvXLF3hF+D7jjD{)xl_5rrZQ8a3u(n(UMmG)-)*NJ=dC^3pS!K#JDlZl` z(u4gteH5~ra6O(Aid&2CFu@6<)xD@+KQ_wNtACvZ&BIqR?bMR$yifB|gMq+51o_Qy zmFvag1+%b!<16L#323!JrGf2y`MQ3^BzF8VFTE+%x70jH@$|P4pxFv)5K% zDQoP1#NKb6=GV&N?HUl*Y&S(|VEG}Zg&BTf;%!wqSfH<*HBwCd(*k!c#u~E&1-+AK zI=!VkW44Xvd~OK*qLnc+!bD?KOLlC2t?XJy&aNtP=}GMEdKxovI5JzWriDQ6ire*7 zDhzhWuR~c(QpkR3l?{=i1Hb@+J9!ED z=v@|%xH^_m zdaV78W6JuW7rRrOVo_{lu_GC`yc`}_M&Uvg_bw?U2x)+s#7_5@qep5Ldx;OeJ^Y$# zVJPpT&q7Q>ZA*Ql7X$9+)nQ?)v{@FRus(8YtkSVMMem}x#lYn{ZU>WWx-I*K`Ig9~ zto%`dQ!~N(NJ6Pc*#2DHdzoy`_0BbZxJFtn@4nZb?|YZjht`K;k#Ra}EI{u`x)U(@ z`d^~2Pq;`aA2Ssk!@)^WD0{dynY`Yem}uL3kdkN`wd|S9u;5(d8jLi0D1|+Er4KF- zdBi&rRSeBm8qnpT@^QVPdvQ=oNpip8)z@!_;vZY9M8vNZ%3$pdrl2-)m%?NZ_irMo zmK)BK$s-?LI7F#bbgG<}$;vRo+n(QzVz})z635nI5S&zIM88VQ1nrtBWYJ#WPJesD zP;51RFQ;Wj;W1_;m*2M|vmhkBox&sWGv^kecflRk9+s+o;_1*?HRC?vahve_`jj;r zMTf%dT)u<7T~y7p?aB?uq&Vg@(5{ihV0zBrOQ%Z+-&^<3ZrILDf)kDeWYZ+v4x@XK zyA>aus#6v`^?c`;1zV%)Z{UIInr-1#|LHiwzyezDt>{8js#! zI+FCGY9{jyNfBY~pQcDL@UMMyQg7nRXU2Z2SJiTdU!ez+4Q6?@^L0Ffgx!?d*B+Dm zBp<6`6l`Cg`Ca=3X#(5!1wHB>}F`^ndMIc+|v;^O77}Gr*s2_2baMatpgfZE5x9q`2gIquxdWS*;5!Ep(bySynrbCz4^W zlVD4FEPlkm9wz*u@j4D>8_UOwR~LbY-X~dQcL+GA$@z}r?}ub)L}Y&udp<`Uq`aAB zK%M-HcwD!GQ`GCctC&urs!4X%;i}braTwfmml;|1`W&Vss{j;J?=#Qi+5(9ZGFdifBJp@Tw>A+>umha2}|7pxa=*V*gyq|L!eVVNHIYSGrNdV*|3hhI-r8JJM9UwdW7?~;cw`eWDsq%!e5AYSVX z9TD)^NPJUtYm;!tXD8pM@NN0UA*P#;4=XN0OR7wnY|bGiAAeJaG8BpWq8UQ#TAk11 zUFEU9Q0ms@4dgA;<%%OKr$TQ{z!<~jj}LVxpV;TPb_qMF$h5F}R8&<}^E@OOXT~~f z!y6N`4q8iIx7?@Y_pfUpQtj{>SkC6uz;df`80xcq(Ur0a)$s~yH=S*HIKb}oNw}o4 z?6&TbSNIO~vq!f)nc&$Gnj1R_6m#&R6q*=;>sR{fbf2t8BBJ7EcH>WjI`wq}K2OVw z2*&&{N;{fxG&wsC&u=iOQJHi9^g(8DYgl*kXp!ffsCKkhI_E>m)98BK{K7c>?ONFc zI%&J9t#@hEEWY?p=rlE+ZVBZBWMBQNqpZy14_yA-H@B5OtVywN_Me$AV}we0eFb;5 zSyz^+Xi_pBUPLzWNj7L*)~&b0!8h)HIsIkgmzEfCu>>kq@*JFqqB2>k=?8e~xY}1M z{(QCtiC|avNKo{u0SBOKQJq+1hCzzBf5r5{Uk1WL1b&BD``wL~;K~(u*Tma6hs>9! z=G0%44|z12fXB7#Ok+Ik6X3N;>0t47x;nXJbu z_d{8uSfQ@c?}L{aWRGY9)87s=_HA43*Gv<&IDIpJQv^Nt`4lL7xSnUl)id#agjST! ze;=htVs(1s@;lZLYT$%-+$Ebc9#ML69KWy0tm(?KlCNAjIb)lGYo=AG%)#r-T<#{e z*iC*jKup4CIjxR2O;xw+TMDNP1srksND*hIEn_OgD<3d3g##H)pEJ1M*vd6Q<$r&j zf!kG}D7lFUA9L9648cqx8~etJ_bgifw2`euFKB%tvcfajwMW|RMI2e_9dlwjUoWPQ zNQW9#LAL+fd-0C6UIN{h+FYEdeZ`go@utww*j^V?E?Li+GmTm$y%L_WV1IgfJi9{y zW=$nY*O9=349^L<)uu~fP*-wOSks!yd1z&6JfAVIiMaj7)&~Q|Bo))uBv0yI0<5;fz`BLWKb`ggL zVnl;8qpFf=rblu!;_(`z(Szp%VG^%4!xbThar!fbTVthcQo(K+@aRh^L8 zWFg3(CZD61JO%O5NDH33IG`wZZ`$ z=NreAOvjX|d=igNy$HEB|wXvjl2C6b;}J`KeetVR85m8$u!(cf!>U zD(cKFIlE+2$nr@JJRSN{Idg9p<}Y|-Z>3~2)rF*v8R@mi;1j%>b}B+b6!-2HNE0T0 z$J>v59gCN{I3XBn&P;-uJ96gkEhRmWtP-=}DpbTB-o=Y3b!?5^xCQa<$5IhZTlY7K*tWbnPC7H^7u4GpeTt&(gIwW=*Wmjo4_FlyN`$JoeVK_K;|zIm6k!YT^EZlRjcs&$hbZ z^fvQ1H>aTE#r6E?RjTmP!O-LHb24uRPx(x^I=;0A=*r0Mi*UYp4g=;}{L7gQnEu}l zEH2?hUc$k~P^%7q>`14o;j+ayOIUQa*T{C$uU6O3RzuWhImu1?;3zAfeO$qtsUL>< z%STT*-Ey-dUipOiB~yC#R^s)xbM}?{*~#kfZq~;c*?v^&f#Oy@QuDqUIdeQPd-vpa z;aUh~EEy#8^T*qZ%#eIGsKm1u%uvppH=D7&mW69i87m6|B6%h(3cpplI-LhqcIrHO zp?|>8c880PG=r!a8;3y7RU=57d?KoB>WGArqG8lD_lO~?%OE{m&F6*avg&ix|O$A+_ zctSt>`l+5E7S~&yKSS@~$6%YAeY|eY4o+b1`;(6~`9VaMmG+=g0TOQ1mP!HnJh#pIpgBTA<_s%`fS_NP3BlD{lLh4?H0U5QPfZVsUm?O zFCicAt1pskCaR?{HY6p`aY((WTm*v2H(Y_PL7sJwGA-L5wYiD|y?$U5Tx+~vQ&DfP zc`Ko;B62X!e@3H@hZjb=Q$yGv5NTkb$2=Mk%Yz7rgpG%^RKDcxA9x=j=m9x7p?SA861!5>XmiRms|5~1^$Aj)q}jC@vt0dKxa+D(yux^97kUYNVxkJbKMm>hIsGyIzW z*#iul65oGWk~0ef+gUZ)Sb7jFf0&CQKlHyZ6>3bIVt>Ur0#KNdsq!x~hwN zEwNmuGB)qqkDUaoNaS3Fl!jd2jE=v)kZe2opq;mK6yYFeiV-|=Xu?YMEGn++Tgb7G z^tI&GH@B%OstKHM4*w^-xg?Or;@#E9UegWzrb6V5D_KuhrH#FI z+;Cr3aamGQfI0mk0_*Elm&tl=IfIVXHT>6#z>PIymtAY-@wRH7^E~44u@b1TF28Rz z@lA~Csg`=gc#`n4ic{G4s}_Zps*DpHmR{4kPEI%ITD}Vl1!vq`szu7DF}3N(rzJ?$ zS=fHxr}z{X_9In_h?8kwId~w5M9Ra;C|f&15@BG6jZ;x8RAb`l6F0^zc)!7D(#49W z1@87t`$X$zNYynP*>lZlT#*z^jI=kfzD2o@|1Z{^feyu^`w~;?Au|J4CFlAN)%;@M zzNW8gkZ+RGBq?38bYu`z_(+YtLTthYtNJ>-9nI8_kNL$|YGCUl%^WLPhu8cRLxnEf zI#8_QJX*O~CyiD1#ZjJpePns4OKic~s^a55BC_F}A|8Xn&i{|Mw~UIi?ZSoyQ9z^} zkZzC`mF|-6?h;VCJ4ESDQ7J)kXpm+==?-BSy1RxRn(qSd=lSuy&->5&U2A@@n6+5U zHT&HA*vCHhaR$ynv?5}AgLv3(7#=vi(`H6DkY2GWT;R;LMxi!s#U%%Q80x|a3`C7! zT3Y06i7rDsn=E6U6mPFC5+NUEEIK3a#;E=D$!9yZlbckF6~&FQ5~hjq#o%%@^q7yX z0rtJV;MpJwji#Y%pBlr3 zdcoW1f@X0;%mfCiOG8zbsc$1(dbVHCIlfE!>T~?&cl_y&OXM3`{;kHBA7dM+D6DV# z+Ri;Eo>}+GUhgCdv&$iC(%%6u@xKeaM38}^c-wHx-=)u)7<**5G?Wt&N^P_eUny6q z8*wjcpnKTPs&MyBOZ>1!61Xjh^0sqE5nbCwr0!=!|4K`v>azK=*;I$YA_IkI%A?23 z(cCa!tt6K-cSFV;@21xsAvCgRtGJFPp^+GbPF_#5mg{3jY0ShW^#bFeGyCrDgFB9L zArn<754tfkUp7olGjC3oES(%fJwX!>EPX6%^hb6j^dL|6+9TFO!8j#swHGXFir;>e zAt#C1ls+pKO-h_jnm1%z-eOXbY$T;STeOs_i(fWpfhncwmllerofZ&seC6v#ho>d`CYFJO-{$AwMY~GJmbU}kpLSUzY zkIHA&Ji!ihLJ*qUuV5JZFMtOCKb&$P8GLx$?ke1bKJJRQC;F3q`RE6(jI5>-Njgim zQ3KwWoU+{_^4+CutRY%+Yj#{6>1Oi>ATfEu59bf_vYQmt%RWI{^y+JuUFdSBY|9?- z(z(`=0p!~{TihZi9!J)BP0rpTI19e8;uizJg9dksj-Sa5V~6}jgPN6lhYjf+fvfpZje$f>T22q5wG7Wgy8EEqN z{O_9lIG{_A2;RxXao~AYl7jq6l=VMt!hac^CjdD3L~KW2_@A!-zrBVGPz+O0KAGJA zdawQXJ-H#h)IZ@&JKy;yggfbsdAV zjut*Qjq4JCi<-4yEQny*%!)s+jx+#+-Q`equ4kYYm4zNoGj=r?RD#H9c;AP4c^?g{ z$}2@anNnDBp%d=K;#7Z){gwifBZ6>9HKiBr1~HoR`-kn!Ffe5rgzt!1B!BP@i=9d{ z^?o4ho>bVgU9!2Yy8B6EQm1XqZ9_woj$Z) zGdEc&C299H8qzeFcLlJKJmpnJnxX7m|sq*Dyd=X0OYiM+A?z`ksJo_U$ z(tQXCh&wfs%?-QQokuz`A$ZTzIsf zlAEuKIs=_|L&9kyP9qc~W^DA58{Y{Jl@NeRH-AvY*xzN70l0eiy8Qh9t_!D_L)or3 zM?WHs56$d65`|wiU_2Dt1vP85Z(&Kasi`MxR99-PQ7NgbxNEB0o4CF+`7&h=as>kK zcyC?VWrL`&?dbFgRRIRCpimCJ7|q9|%dPs{!c%wDmR}{WsK0f{4p7G)RDV^NZ;Uv6 z@hGvYB+!itV6*hsOZwOfE9Ek0cn6}8xh5ZDVZTeUgUD^$ZtJ~Lm3jCovmUt=Uy+O^B zfUp!uCR@@BL`&=1 z5Ief#wohzB81Kd4vO^YP=erpD%+L%tMm%B;b~x!;O`NnE$jng%m1L}iAlRXkv6Ag0$6!Y-fLPCcwPf2F1~EsvH%0}H)+cM z`Mgj9vi*tGx4p>Q1>3RF>D6ubv55isb-xl*Pt#t?AnJE)8b79A zZ?pa6_Agn@m{4@7nkGe2h&6m*eUh1g%ES%vB^^{S6Qz&tfqcNGI)dL81&{Bww%&?k z->sRhI_olM#_(LsYplhqs;UC=e`60iW6Us$*$dX7BGUJ&MoiN+;p97 z`QTY9e`RSwNo#P>g!aDN!|IaRVS?l}PT*kZ%G~kzy{h`$)U9r6Xgs7(U)k zJg%YS-qhF%jnri#Nd1TGn;3=Y4`veXROJlc8}d8YZuCMA`S5I8w9+kg)7YzodGcV7d*IR&fBbIDM-Ntv^R5WIy$J?Z{AInPg{A80> zgnuaOGUoJ4-Y{t;?4b@2&QtMlh-U4 z@5CeMqh3X~+XCM%DAw%^|N%?#>IQap4gx+<=|J4Ro$FL(HsEkOlM`-JGRB2#}R zqHF&TYSc>%5^t^+Lv5}<R7LIz5up7KsUM2@YRK>oCoH*iI>c`P>d1Tr$>^}j6{O};)S&pPJSOB2akL@Hl{DvS7P>LzRUWY zFknXGrrm(aZsbF%>YLJ7ybCvucV>iO<}~QIbz^TJlC@G!{I(9OKzIzQio;WV1j zY;2-3!CfCqMW+K99JiGxOBryO33OV1RFhaM0TG9n0%YBj_`5znIq%q;01xEpp zc+?V_a86TgoyameZ3l&FEW8^In8sAqbl22mpV2V+>FloIn$jQ5R**L2EUiUV$N&a! zR!mmveN76e1E2wJ>|mzu2MI~Ha_Yizdjp7?^frGi9k@Js`Sfl({xCA?xd^Et)l(fX@deg$Nxzh9RVA^vZLvM>v<0@B0(1hd_*%VnuT%n=;12!bzWDbm4n!P(J@F|WxDbJ_F*bt30xpKhh5f)} zjxX%#{Sez|5tg6ZWTp5}^YSYdlXUXMDt)7FLn72xRlA1DwwadaH60^5H*McT9Wh;v zJk6wnDZ`fInRnIkB6>Iueu5ton_gZIw0E58X=u1~h6fZ1rC z`No($6l#3LODQS6EjVmu3=2j)%q*)}_^tuln^o4+`Dk9ry$IK_RyNdz{;0IM9kz4c z9S=77fB$&6KSPdlG~wC%A^Oxqf?cD$v8ib%Tb;3h8AM?6+B}X zf;Sdals;bb(hJTGE`(=1{_?TtG5g1=Or@#Jj<__cfKNEecnzn-$Y)UxgQjH5RiwAK z)@(t8M(9@7U==kr=9W;)?oK=?FX_h^C*!?g2rh_(EgH{paxW|aPROMfySWr@^rCFN z5c-Tc|7ci^D-IZGB;LgY?EAl;H~CQl_C6~|5_98-$82Egl?_3rl(pT%%f&>BOWM!R zbhP)k=$DHoHWDH(8`2CyH{HCSFM79qL*&0X;(boUfgVR4scvab_il4jXrVj~&8`wP44r0%<7ixHo#R`f&*LC8xDTE$S!qK* zK(xDcQevVooZ6JwdgOYNei~|Nt-W-zae5U@1|z(WEhqWbLpnv% zB-s}fw4}n!R9^ZyE#NGpR6JeP3lGtM0E4b$&7X^zv_m5A7y0ZT;Ej6&tqLNYY>*aUtQt|Jza0boR82b3 zASDtZ@EbNfAG&}~fDUceNmC&HAKm~c*OT$L)WNl^UNmtj2n}W1#4S>woZ@x*D1}L! z*mI5Fd1pyIE3R)8j?T~dR1ADE2>crerd~esp~ievL8aZbNtH<@JylBbkaQM*W~8BO zM8khYCK2kJWk!SkmWq->OGD7$=|UB!YnN^l84-{$JiEEnHPth7b0@D@ybPafH#TQe zd}V6-m5Gh#)lnaV9&b_C9U*62T9V&BH&^m&QD;k8ZNp3~vir)|18-02jwD!7{P)vE z4I`3XnS#2+?#C-7MJYf^geN=onjZ8n9K5b!UZu_DE%@oG7AdEJ>FY9E)rqn9^JV`F zpz=foNabeD{bI~2&BTPJtT3+jmMZXdJj5&GL4sxDZ=`D_L5*jOCbZA2=>hzNQ2^0Z zH$||9FdW0FQBG#py^XQQcdEI32ID~EAC)z=10EO^wE(F?S*tK-tg;=Bl1tHbhcrzk z{vSL7bfgqPw2#rUt@)1kac6k5`j=HyNDP(<0}blvw9i}liY|R$TKTFAfBNvw!2wN7 z()HNzf{yA6O~*hm9R!_P-kq0}{^p88+{7cAsvc8Z=w`aY{-sO*Yse;(P7vgE6?gPg zT#|56DW2!cMiATtoCyoKC>Xhc745;I;CiXjr$T!#WqY|O{;u1WZ2er{E-?PzXFIsl zL?*jvN!>D)NWA%)6JlbYEz#E@Z&ROb&(X6hk1xuC`qK|73*wK}EChdGgRm z{G|D<)D~|NwC;yx=G^rGar~G*)rQTt>_LK zukOyfGeTV@qgj*PeQv5kCQe7Cy$zwgWdT&SX6$yZRPzA9X2s%TC_QTDCR&1f@bU|;=Dk9q=y=DT0utUhOo%;oFT}Gr@M|4Q*Fd=ZQ*fUnvcEF>4 zuU#v{N3qxOD;?=%)*Ruyi*_g!G_=5xcol&CAO&j59KD)r>pkdszUPS)ayzZr5&)*K= zq&lC_9?2?*+t{RfxE01h%wMkdU)-ngIkl_gSjjL-HgKa0#v}{Nwv#EF`WTndjK9TQ z)LvK$32kh&?N5i-9oAVIit6iI0yoc)-kiaG(;KF@UfeJ)3QDjY5?+R_rThPe3_6?l zeRkxz=sQe$>#_n({#IMt6(*G9__9oz_eV)Jx%G8&So>UYd&%}&Nzt zcT|~~(j*g@8zG2W&Smp3P_4+`>yaK%Ao#eDV3m4m1|gA>yxPas^wqZ0L0>nmju8Sz z5ICm*`bCU_6yh3vfO?%e^bEP-5(|ye*^_Nm(J1(GI~u_|M#x+p2f*h$F?H!*qz*_N zx?S;|z)yUZmKvU}YSMsVkh7D*(}Ui@ucU|b_hU>>%i4Gb9FR|6?NY2l%v(gKLS-t{roi8Vi7&y}0qT3o>W@}#cW@%t0l$ZxMgndeJN zh$d`1)ov;HpUFan9I}TbDT1No>Lz8t5-?nUqg10>smC0U- zgjyyWN+f05YnXss$ja&0H_8-n$t)uW@qH|Kz;(!xi>6j7VMujA*<`l34M3?5DT*k5 zCU91JgYLRLt{#}wY}e88fH$9|R9#~n3KM!&Z{_4g7)i{3PxVdmMf(k=urN2DYbkdc zbTJ^Q*?c}G=F|6zY_C$YeF;3YCwKXB|1S>sX5Y(}*w6Wit@>ddKn&SHpcU;h2l8s0 znJh4CFSvJqzkzNoFWnEN(*JBYB0x?*`3C1^WN@wyAt@>Oy(a%>TI>*%>?TIo9|3Nh z)&!=L10f@pj~p4*kL}xg%s(Gqu!j=(3i!6@$l@gN1J;D}hMva{+_A1JsF`i%TsuF4 zO=~PO06i%_eQH?ZuAl(+p>EB@2gknQ&Vp*&DJO?+^EK}s#5B+9`jW;2?+JPa{J!fI z2CC3f^#qJ)!Br1#c*u>neFQipFZ)OxI%?j?-gNn-G)#6QDmkQ0BFuywpKduYd{F2j zf8WI8yx`sXNP|)3Z#b8&>m$#$?1fFv4szDnIxbF^6jh;v=HHC`uF#c)j_2q(j6AMg zv$@ol273n7JCmLeF1^2xg*WH&T$hY}sYiz9VCvDy4-ZB1+TGyZa+B6``aB<=E9Wl6 zBbd{Iq-9w6A-{ggt`_evNukaZE?b)eR^;7}la7+y{9k%z+Y$$d0{8osxQME5-XfNh z8sHERaC?l^Cw)qxTl23IEmBStw6?fc0 zE;U$Sb-#sF*{MZF+mC^#Lb)}uY7JrC5x;GcI=u9xj}_|M0D)Bi7p=02W>l4PWm21A zqE;5Y@E8=zwIS*=sQ8rAj`BQm6D!3)(SZQ3F$|9D>Uz8h$**W6_V=wI?e!lJfxKN3 zL)QEwYZd;B^ay7DvZ3b&87+gm7B7*8DLJ>D02}1=k!xgP5WSI4b+Yy*9;~piaJ~>+ zaT(I3W#re%n%8sazCB*DSd~=kcdWsU<_Oc-_Z)LKGM)TkIkWVXj4aB1?pmVzShXh! zlwofE&R@6b^V!FS_{O5&GcwPUL90M)diQL7IAm{W8yQpy5xnaAe(HIG9h@g{z3wcg zN|EoO06CyuUa@tiU}ip3@?OBCE4`rsPs~lUSe<0}TynOww8VX>BGNK+E$2_SUipsQ zB=QEZPs1;S75(h5(chYNEA~0*W^vs)CK+2eC&{h<%HLNgH(pX$*lJs26r)k7S$jdB z@dak+skOq`xwq}Cgo}Kg&iq2?@JP<8BGiX~z1g$^<`IbI36Fy1GJ8U@vnARDpuU22 zrZuQ$t9f%^*#xJ9UkWwbC06W+7-3s#dLtgmlEtvRYZ|c2P0?7&1syy9`149+2P;|` z3s9Kh5uX5CNu|e$g+~?SaF*mVhgrRro|P4QDt;CjlvS30Z!OK=D`M}#?^3!}7(|EI(ajtwrHYRF}FFydy zWki#%*|VZvgc4&>9@b8<2?I~;wD7EfjgH0}toeOoD?mn*j*4k{g6p$dX}EH#)qyQ zd`Ck3{ENgCw{blGV(53r!>F1h+QQ@ZfM_Q&-0;+^@$Ln>pcvOiTZEGgaaAN zZ%h*r3>1T^$VElJ{crBOzvA6qK&9v$%A`@&)CAL`yyE>S)#^{~hbR6JTTKI3Da9cR zhHFqyJ73H#q;9Kv`UzfD@w=hu-fmrX4)5Od%TjP|G*9~QFvscq)fce;YmMuW$3($5 z0?R%Fx&C`xLZ^EMgHL=*M2m^H_g+D#LQoVZG2Js-L zwI=$zYpc??goYkO^r@>w)PDOpYhLJQhN#se!9?4Ap^W<;P01JM4>4&115WfeZ&hw2 zGaNS9F37{^xICAs3t3{0;mZ8|#YVeDWxIG|=0V2_3QR@4F{KC1dV#?qS@88Ij|B&) z-kg69X6ndtdx24edd}K?Dg86{{gWXTxDQ2_VL4eu^Xz+Dk^^YAGVi;({R{Co`cGe{ zj$?~Q@6ybweds3~&!J@JD?b@z=}WuD?rE!$aiBtC{-aJM`ipauhw@IPO_yx(@$pHN zKD%f5iTXB)sh6EcL=?U`OeTZ`pl~J#{&YySgnlyW;N=lN9f;Mi0>^#MIfbto06Bqy zB_4;z*rUf5z89ce$Wf#&;*#z#0aX9-qxfTH4{!3h;jz;CgNKa=n}=Bko=KaNv*BCm zANJC-HM_UzIrc@H{I2@fwCkKa@mbtw&hB9e%+G70ADMMTwvI9mSj}lRkEf`5$VjrdNpt31zNZ1PDh$gIDbk_y-JNwr4iaUfBo$u5hS{rX)4pVH^J zy@>1O_!r#DDtt8oPtV!PfW1F~<2_5KUxSR~{h920N3Pkj%mx}(Rc1-G;pLfeG^iw3 zHQXpCD68c>Syg*b8FsizO3OrpN+ZBT3ShplUs8EgxpIL#68T`vEHGk!ni>hO>;)2U zx8x=CLEYA2`SMg$mthkb=b~Oz_D{AsZh_>XF5}1iP3*xGFv0~66RR2lSY$h|rvMsf z5~udw3e#|ntI#aGul9vmyvW7feDWFo-z=k_m-V3)IeX0Qa}WbvrjiCQ=U~bWD!I?P zooih+4oryvl$#eSJK|KgTF=8mhc_(rO^iBBXW!mRg zXYzvv#fI={UUGINZ;W_*i!lp*&m{L_XCh@q#W>JZWyMQJC^Y%muNl`&Ikxn0wGLi} zB7H}wWLHm*%Z}B_@wk~%Tb0&ac68lJwRO1_^bume#9mCh+6H|!oN2!V9V2R~frsU6 z&3@Vy!Mc0M|2v#$+okj9(jtr@{XFJ#>Yj51?!*7owMD&9)N_6$D(hT|`0$T|Pd}^= z;4TiL;s?2|M2~$3Y8ysSS<``*iirW*!ED5nF>xFaNf=dUuE~5{ftVq-${Z;z!>A)L zEuaioMXNKx{vH9cca<|<*;?H%)0Tm*g!96TAvBc$gH)Tbi%4cS z6GKLe<|Z^fO&CHgImgc9->X{7tqyq+<#ar$h|HwY4P&!YgyKq1}Yc_{A1 zQosed3e3ZC^5yACL+PlOUg%Z=TkX?4B_2Hk`<6C;48~swSgA1J`f;=~qU2-ilLUSZ zfr;?_3yTD(NOD0bju(7wD$^ z?It6+QMKojz0>9TS%)8j4jq7^Ry)Pt6P|dMGs>RK-hBSJwCTh&p(Kjfm2$Uk%LLlQ zoxmt2F3w^6(-<(BHCYb5a%fxV47Ha>)7Ve<-ZxhkTEH9Gvx`TTsAu}eea0`q_<`9u8^pQfu_>qefPcdqJ)T}H8Nf4$;f zL?D3rWx@_C7Dm2MEEU%ns$85O%XKEiG=Ce*@Z5C-EVJFMAm6WEd`uho>}1*#8Pg;o zE&c440J@YJuxEaY*(u`1=6btUsOOBjukBAa#K?iysX!LBEVNILk)kN3XyTisWMXiw zL)}cqP_6ekIAfgT3QGoe%59k45@hag-shJ9%vZ3HlAjrt70aV-lawu#vML5Za&;kd zo+nH!d{0nQ`R<9w?B4e->pwz>^P*ZVEf#-1+{H`We)cDDzMs{`K= zv?X$)$7hyC8ZFrZOVfu8-}3X7)20a)&#Pe5fo3thD%}s36a@z+-cdB$Lsr;G-PYs0 z1qM0`$Vv*T7QI6AbDGQQ&GvK8P)Th0 z7lhB{rMflp)mRRyH=Z_Dg3r~Igg;C)U!uW2L(%G%0ivK_>5ZhD&KCo=7{UGLt9@UJ z$M`2pH{kse^74^A`_T<{Ex;|MGY0a~>r76*1{zYyQ6Mq$43AZ9#Pz2l4<`pQb_QF4 z{+P7$Bgpw6D>L{5t`NyFxokeSDB^`?ei z-Jqx3ZjGi#ZEDyNR$ImXY{(mRqqovPdqih6+4DG&SXQ))s|H~#0ar>JDzV>1_ieHi z5R35xa&Fy^wwi)mGjnJpS}^RA%ifwxe|PM zYej~>G-cUcuu1&gU?ZIN{jl-7J|Zg-HdlA!4a@zc#YiyceK1!m{2woXn#!-0anl!2 zLSZ69>utu)dPH5D>qME$hN-)^wl{5gL$y4^j~ooqYMnA)Al22m zk}#XCkBUNP>hg6*Os*64^?Y|s>g6G~s@ljcFTve_ep>?Y9j7d9IC&lK~V8+C?VSRf<<-?2A^kuwSoPs+mk-MRwPEXqbeYOuD% z|D-GhkIzpl#BtW+C(%02x&=qaGNQbO_ItiW?}XF{=5gN#q5- zeM~We3P7dI>&UFK-KeOY9Ws`)hBR$`aq}lSyJnL3Q=#NzgF>X_u8k#~NH@a1ckij;w zehi~8b!LQhC!{8|lNeP1RPR*v3R9dfP&2cT&&;E+-wnIDxIBIxO-jc!P7(C+H3fJe zF6%OgFfFHJ3M3J}3$VdOU{Xc~(njtp1(l==OS*ark9pBrpB~Q8ohOA@8T;KXjfcA= zC2ELMK9_X|fy;UP&Dwu>r8>H59?hONnao*OUe%!esM(5cAeRxVllTO5g zms(+ALbKG635ll}D1Cp#>^FarCD5W0j!)XV_N86;gs?f6TM@r65qjV5XT1edhGVb1kF6Hs;c%`N@^T z;{t}fO#2MKSXKCjW$O0f=Z>gb>J)?Bq30K3+MVS3+S_DFe6*U`O;VDVKAm@LjpO?F zII-i^L2qeCvl5?V&dK@`_5xzgf1>ae4bAtxNoF_|aNkqf4M6EH_Oc646>1-36B>Dt z33?wnVXU(apH0D=IwD5W-8y|2&R}tqm4wxHGane$80sX~Iy&o=){U7f!jC^EbeR5p z|0|SWPElE3m1TGMfyZ@FUSQG9qh;EJWBfJY%e(p5lB!xNC(WVeCnq5xPaB-J{xjAr z@B*JH(6ziZ_1eQAo#b}4YhF@>aecA4?2RQafp0kh$?94bxB`!}3Tk>11xieS$R4-Zh{7b#q-&&B2}^TgF_0?2tchYZN9lzLwhmCU%EJ=BjJ>Xc*WmxTNFFD1kP z1o>%ZSAYMn9JK-W5a*d2y=sf4g95_(DM2lLGwLa}hD+q-Wt-#aneF<-Jzy+EV}HSQ zJ16_fLRF}l&b2^WdU#8~@sRqe-vAe4M=r&Xj4U)i_;m}R6w|ygJ5qTRIcAf?7)wN* zX6~y}AFurxJI=UT?m;uq17D*o4LJv!4&h?O1uL^E>_L+}>@dXq+;IIox9kCB(FDqu z8S==zb+pHVT$ti9TTy_(`_Y!dv+@8yo-T;?EWkjY$KRTRz^9%-r}tn~Lh+1MG9CT} z*a84bW6=FhYzdb&w0&}mff@;E4nwwHzsXQxOBQig&*I5d_J2hs!c-@M11oLXd`K76 z6S#V9j|Fskeysrs)CEsuS1LHn1WO>hPKfdg>XKec)qx~vDuQ;X*MBJWI=%9P`^hI3 zd5CF61GcyBm$MD`SpRe)4N`JGm)eGkL+&i%BRO`#n!H#tFMqlAV`hotodgMxU7Y~H zVgiV`Mstns!PZ)aFQ}m4-R3lf|2e^+5JIAG>6MVECh=|I1$(x3xB|nPj6vBsV4$7! ziXGc#x3*({%2`iH$$i!@W3vUwO98yj7Tg;HpE;JEyY_H`uNLJ7K}l;hoV*>C3uUd* zZFOt<`CrL>w|~)tMhrUZ>HTY&MjIxDFW!8LUxwLhhiut@-n2ULBJ-b=kZ*Dbw`#Z= zASt(*5H`F>nc_iAW897{8POf5s`Y7sbL7b$2mH%pXFO}$GFH?r(PW;RdgPH!psc#B zsuiJ^;~Sxjb6UdZ^UnLTgr=vnFkQufgFp0A%?Z}xK3SB=W|kHNjnY>f=HjoHnGVu7?e5Dx!yO0r`h4 z^!=;hk7c%3Y?sHwOg3}+`PY7`%qVVy?c^4n`Q;l=JyrQWh}+~g=G8TYWdS>#!jQ&$ zI=zPi4L^Y$NRqn=BHiDVCNkAw>jjb+yFP@ImlXP(0E zQfSuyrk@@rqp%Z{myw#Atyy>295Wkoa|L%tI9@Q72>-+2f$Y15{g+A_>>WGZiUUKW z=5P5SQ*T-i zp{s5lujSA!dRb?WQHcketn{qN(%bc|{KJN;A2y!(&;JV9we#sRv>!ZOXYML@XML@4 zW&kZP=;UNDYKVR}sKt08$D#A6ePzX*=WtssacFTf;j;4rm?=s5Hi-P8o)Fykx_VAb zOcute=^bi6+0De?R?~w1967=zph0xoacagQ;X=k1+^d~TKWl&7xF78NBHYG9R$$5Z z;z$kJ&!8ZLAt@QOd#KMmV(5b}sOq(=@!+Vf&CcTriZI9=-8#aa?Lehy+JcVd$RW>q zEs2*_V+;E!HF85%pkr}dCrJhwLcUbyerF1JCFVwEJv>?lp7#}b{30LK)!?h?U+w!~ z2L6-84hQDzg>geeG27!3UT4Ac$rlGHR!ppRn$+@9tiW4k9%H&J=U$jrOb}+|-a(_x zg@s#1X(Uepch+KS&}d0ByDq-ja{##G;h^aM_UM?a)8=u;d{mQ^1qN4i&n0EyQ#+h` z(hOPN?CuVpFM6~7c#)_U@TQH^-(nGgxE7UkOd30V{2JNf&%Q;*UldIg#&7Xgpkfj6 zkGpsp$Zq-jR3UE>TBaZa^Y#&k2|BHMvr?T(yo`a%rxh*x-$Yw)dnx1QxbM@y&)J)= z0~s>TOD{a?DoOzxug|Q^ZgO$DY^tMIXKQeE&?(Rwk~($aW-*;M<4!Ru?j&oBu-7k> zj*fY0RjUCzVL50MCv;RZJ(7Mo4T-Sk8^2Foj%7W4b4{veNaYI3Y&Z@>HEUTd^!;z9M=LO~t`j;Z8B!d0XdUu`*+ zJ|>vs;klh+4~ZPK71rR?P?mW!<)%yk}GD-u|zPm+i}FXA1cI9PbXkROs~Lp$+ET zt%82i3m2XI{PQQxh{L!}E3k7+eJxL@F=qRfPWD|(grsbuzuym1K)%@cBGDf!C@_%P z_RWje8AA8UXR?&puk-X)kLsUs1M(l~pD4(x#>w`c)~gmK`>?I{3jG1>O$%0n7PuJ=GvrcFpd5J@u>UINmbhz5Q)Iy@+Nb zj~dvYNzIUtjDBmvFML{qlh}^!4Cq?*E(wg!F90>`xzNw0f=_gS=SYS4U>hgkx>tfk zEw23uSkmjo62zZFy%oA5kth$+SWjDKls$TKBU1orJ;f zMbdM_$q@539f*E(H^AX7D!SU6_1uH$`_e@7pU{E|>%tg!BdSd4aMI-fD}bvc&1oKi zA2aBIk|Fbfg;-*n-y{;+Wa6@L7!E5V_71Hv;%dq_GFnx&W0b<$sn#Bg^}I)EZ_}u& zok1$Ue+&N02vJh;?cZ;Ly;yD>X+B-Y2K4-v?P?}r&TOR0?YC2P*7am z{iJFg2WCD!khbJFCwLQ3Wj4jf=9>>Qv*_nh^bgxk}`zob6aG9DdHKvO* z#@;|YkO0s~#g5)-iSx0;ESeV=X0ViMTxPsrRC^P^?@R7^x^RQXJDYg|3|Y~DA#35_ z5})qUY?(9qs;W)>Er|3vQ!V{*LZAEiIWWPJtUAWofsUDPi+KF$MpwvwwNs#VSv520 zJ5o}cwW=`8+S?HKUiFAIz*8g6_dR#_F zya~0to2s9+IBY_nboc(9p_nJPs|zxhcPtKVi0LW1FNB_EW3#66w*$_L zMhhUmXI=F4DnX_{7a-3!e_R7LZJari4KT@D|6UQ~9cd8_9YX&+wgKi=2KPY~|Bxgo z%-Y%-;*}Ij5Mjnf9#jHZ9GxpCR+>KH^I+#n9SPZlzR41{3~iQ!c_=-bjEEHoLXoGs zv#D77U-Z=Mfe97{W=kdaNqgvZIjgYDNSsn*azN=#4ZiuTKLYk7%|urY$PCtD`yP1X zz|!}*9f3Ygz!)B|IH7AQkdAT>Sgyd3HiiT4fpgw&0weXe-=_Qa7v4Lc0l;cN1Tf>h zlSY9gASVGn~c&cf> zBFcgv+HPuZDLN5K{MZDqvCH%z;aQ^%MYZrPCnqr8ltJ<0j>EUlc~iVd$e*Vm_P@}h{n_&v;-^6=16vQI+~K8(ww{3y z;e!U@nx!iCYHDke!h1V{lD?Lz`moOUxFRXQO6|BKm1v6waJi+BdtHGAq|-4XYYT7& z5YkujL>zm!UW%6-7Qwf9|L%1y^T*OhxO46igxo3ezLdhGQZ^Iw_GSVlU4s{{>J$8q zc#1KT**s=tgIxp3aXakSHn6vm={z{TSnF&`Dp|R^RxM1={ejJ>SP@iOWet^&MD7Ek z=3xkK0+~2{zg|U!sG2o=V%$i84(JHQzsPDDI3q87$qKtTNkx1z-oPet3ASLt8@Ie` z<6Hy(*W`|sVpUE_p>0EofrgDx!TNC5wzerE(F9xQ_MZj`W)cX*BtG=JoBX$arC7?I&hH#-m>d9vHblN zs3C}Jm2!eo@7!bHK?oDwM&ac4;OgmS_ z*ukP)BrP;F9E;hrPu+a%Q9>qyUcmW!x0LPf0_7flerKbSrdhjE3Z`>WVIe-UH|Pz> zZIF(Q?x9q=FtQ*j-KIZwazeC?2;kFjd2}F_2-5U2{0byAySwTt3w*u*r-cM7$*M8` zUYo10!_RL#SLsE(RwfQw;VBi=3oSpbC z))i=0?{$Dw9q^poQwBLOe!1Cep?fD;Gq-d+Qr>eVPrqylMQrt@oP(7wc%t2LHe{wExRf zB8-x2BkH96?wp6?+LGGNzkkBb+conpUBa)eOOa=-?ah&Vq%urDO(jxlZYmN)!mK;? zj@+CW2$5UL1jm*`w+|cwoH0BeEGToB;l0k&xB|qx4&^v{(|@4 zY%GFvXZ(U4I`ykG0q{xq`PmxlwW+Qp71tO#?r<($h8LQg>SZ%H>!C2D;vP|iQ{&R3 z1rC;1j|*B89NHd;;|w#(H)wD8irW2oM)Z_(+%+YWVekx{S}n3-;JiU$g>Bpxqh4vk zdtI+c>A|HTlH$o{$y|FZI@x8n7=JL=c^sO^cQL4a8=t@j)lcvhs`HM<`PqxusNl>x_tMbF(TW62 zElK^`-yS_3(%8CyRL+U!6C@-y`Cco%VeLJvXXBRs#(IaeMzxv=V7EfbwvriTb^XvF zj5Y5S9*22jS_aqKeG6@f1syP8G5tY;O78JI>w6X^qL)lAnY9DB0Q{@qYrmw{IH_xl zYD#_rcAY?jh67*n=9_C&>S{F>aGRDwU)n{=pqW_v50oK--qu) zUIWI$Ui`*uboT>q4;=P#SrcyUPRpw!nJ`t<^- z(YK+_)t7*_YJV8}W^mT?C$~zI<0UL^*Wm5Ow^u*5AqeF}NJvk7jtz>a3NDVMiUbns zhXd3%G+j@gAWe8b>dJkh`z=Sg9&z3bCyCFs{hezYZz0ebvbqaF%kW{GrYss zYa1)sI2j`j-p1Su+nO|!q5DFCga_-l?q!C%HKhai|SrvcZ=6XnEy;*RA9$Q=D zo2#C+flui;bJY3SOByBg9xf%a#Cx=-3`n-|s|ZS=Vyz|6ALJ{k zXYj6`zLqcrEk{>>X>xUeuMNe;tl%0Va6eowB`TMC8 zK5EQYg98J&2VU3fTbw#a$C~%IevQQ`Xq$T0(tx9aj4nJ?2v4S4HdtX26O+%9dnl2= z;tY-X*45WXs_MHtIJ9l+7=6?1pUg5MlHc2%YPL=nDlw|TWE-gHs_}HoA3rrixLsEG zgoO)MWbf!9KKLW4k5~A#D~(rK>zU}xFE0m1ZT)|AeRWt>UH7dZ2#9n_N_VHUbcb{! zjdXW|bazX4cXxwycXxN+eSF{V``zcpf9E-mF7{b-&N0S}wYFm*m*?3N3NNjy$`{^t ztCRU#wtJ{Ox6cfRaB=)@b7kBN79*U5W7h~|TYX`sx+~+JqPf5xJYvc{eM$hfR48J! z!eZy*v6$!Ft`;dtM#Cdq+fiiwqB=ASfz}}W56uewx&gkzc?}v#T036InFnnDNkt12 zt%NEZt`=|?NErqSrU4naIE1K%vd_0-!QY9%CpuF4A!!Iqz)%OwU%*Mo_%>Yz7bc>1 zB|vtogJ8wI+jzfrO+a0t@PIh*dnM8om#B=I(!vorAAykCb$;;^BhLABHe&F@9UDd{{R>oGK6D zT3FZ4{f;~b=Mh=p{Q=mSo34Pqp^x{wXtlFSpx=tS zo6~#n2~B^pL(e72)!*F8b+kVGxr%^LBa4P6_X%W-_vi*B;1AG|4e1Eb=E&Zlk$bc8RN5jI=87~H0BWJJWnBQ3gK)MIat8=o(e zN{^PtF_cx)&S{Qwy4pPnXHa+9n=|K=KX`zMwG8gBGyAdM@DW&U30k$N`3;W#TU zY}K6;4RIr1>IM2_$zC?r?&(%a($zCdSH10WsV|KhD~X8zbfq)av41cI?p2YJ#Cl~N zDhSKC0_56m#y`bQ==E|?J72>I^>l4Wcc1AX*!bl?^2&y|<|7maD&4I7c;QdgzI=Lr z%#IUwpRVuV5(v;fk8pO_rNF9cY}V_PEBeXGkF%qTK#Z! z$P0ern)V6qv_q#XOjh^bsc+LmXB1gnC=zAsG09nXhq2P zUn9mQdeuREMxK-4g0RzJfSEo)^;5E(_RmE_nO)#Dr+#MQg$=>!;g#yt=1uRK8gI#X zy7-c|^PXEJ_NIwkdBWe%-kdL=5eO`L3h-wREbJ(+6I+=o7$NZ=6ZYd0cy{dEX(D!G zrDQVK_H`nh?YR1y8!^e!<_`!7x68BNEDJ;lBPICM?{*qVb~TwOt=(^m*48&RfXp7$S`WqLiQ zNG)}&NA}O{$4-I1dz&|E<*Zah)|x5Ih19knf@%z7zfr~pJZJ8M5T!3PfO-F#YLV3AEvKtZD!ZyCn9h(tiZvMdb^8x0;K_OveC1bTqn(xHr z4aYHudEv_b5w+o@?{L)pQ$CZZ(Kl`r0`R6Uze}HN?19_$tzVjZhpnD_=byaG`?`_ ztwaujP_KU;q*Xv~us`A`QY@yp4a;Ch-q>i2DBWK&@`}8D__IN@#%FiUB=q@L8zIP0 z`+9;WOd25g;$dBDArcB{!lPbnJVvlcbLRevNU`qGaIOva*D)NI8_%#v3Bh zovMAhv3!%$g)QT1FARWNT4t`#s230!hV%o|LTN}qWEdI4wXJ)7L~dCb#=uQUm@`$M znyjw4yJi|uM=P6isa5?9Xo?&$gCuN_rZDd*Y+w8Wn2eSD`(N<-*Hq&?|Dd9&r&&Rd z2Dk-U>bSiPtkZp^@gB7*_M#I!RZv1qUwWC7&cjzftTZR9P$_!+Twztj|qBC1h-_PlmXhe2VZ(PfCJ0t)!&e$+g&LtRzVtUaHpjD4qLb z3HQ20_G-fD;O_dVG6E&8ILDswHyZe26P@xLW;XKNm17q1W|K|LO{A+M{mzQ@L&#|L z%RwsLM8!9h925b5I2gNRbSR<>e-5V}vmKBx9Da!jNMZxJQ@=zDk$jyq| z6YahlN29=#$}JI{gXlPZ-L*PBSf$cO)hNqX<+V)1Ff~ze@g8hn7plpyA3`83#Ow=N zGzQVUGA}I)_{sT&XdyqQjPz(%>f1_hZ6xXq)PYV!SPwYmd}hbL^df zTq|3SA6j&S7QdApCXx>D@AId;piwgbtM?iSi~LV;3G(6pBP_}yCK$&!z_Ath@~ua8 z%#CtM9wJRbB^|cGXz?||dR{Ze2v~UTuDD^*bLVFJ&}+3}1h;r*e$Z4ZY_p9a{nkPO zIl(l_`lO44A$*;?%O@C_nsh#Gzn9v))pIcz@04xVLq52ryyP{{Wa5^tAruoAitMo=NjRz@cx z=^6A|a4n2xxoybJk7=`q(CxYt%1rLtZx1l+Pnmi(Bpt8R$87SmnC@yYn@@sMPz*Yq zgr(mbEMVQ=-xE?lgON(&n+PR# zkpGL6g1_zlF{W0N^>5(3jxl!w-k~G>Zr<17$Rqf|Kc%Xabit;gBC}duGcR|SIQr%^ z-Z240ixDMIZ^=9JJSQ_L`GkvKKropI4dxNo(sYLR$Ff;7kNZ1@d#i9WpB?6m=Y#;SS)lzN_QflW5iUvR`_15V_vu7aho9>05&eY6De-JI5A$usMA;abtDNbE_Vh3r2f?J{3VF)aI-`+F2;y-}~59tG;o#89b~^ z`(wA{j=^C>(C%naUG&QR(H`BorKP1(_ray|^(2eV`u?0(cSXhHui1uGCa)m zekbSmnvx@UQ!0rllby1F`==3GVp5}GUQrQOtU0@`bBngfM~*fPOT{CX3>zgDItxTHU~-bgo$;}3do%I zSMG5f)KvSDaMYf61fITTxzP^c^WcXq4C8;Hk|0w&tdT{Yvky-A$Yu?!m~#J4{?3jc zgu*(x{6YU_TtGO={sdn{)(2$MjjSw~IK;7r*|@p5#65&-@VmGnFuJ%ZG*iXhw!JSK zs*JCX8_emy`gZZgBpvV!T6N9oYH;N+;A!h}O7IjVNcRXBBZP8?oqh^zK=SSTow=Q`_r_Ty&!wodQ&u1J{Vy$EYG7$#G^ zsiD|xWk1%j1sIIi>8+WTE%}u7_4j#`t>oOP%-R*iH(h?X8RH|7gX~jupa56Vdn^|x zN75muFDfzCb)0dw+3sp^HIGk5irC7VgN!w|&X!aq((GRKO#Av3J3x{MHBgW&^d2hc zNSMoM`kE1xX(b_mv2+9i(#4|3S2;MZ-h4fDtp7tbU0jJE#T-9Mg}yC2p5Wo>ASYbW zSDnQ#6w8|WF-MlmG98;?8^T{rqI+(>r@Gk{EPyo6u?!fvEI44`gm+lD{Qs4hh+y1^ zAzkY>odyXz&UiEYR*V?Wx!R5w6{0;7yBP5B@T_d?daC!0iOJ-rUF9+54}D*u z8=9J`^1&Lv(DL9}TU$RCC;lV^C*|wt>;&84;*Ls;C4Svy@}`x;2>!Bu8tW@9MSR?* zz?#HJ#00fxnX9`+u;M|#zZ36UCS^WRQIux_6m|LG&&%~ zahStEE@Jaol4LtkBL+*FoS(VoWiizJQS)rIm94LZTW)(JYKk}M2J&#$yua1cIX)6S7Z4=HcrkV!b&XEQq`1>AUS-Rw; z%Ag}ICnOqx5C2h2UZ>E+WMl_rR=Lr&plVDRM4Xa@AaFAjR2ms;0Lc@4@*CAjm%shC z$`=aK&z>HhSaNr|HX-^*Hs8; z@O|-daU?kMtA-Do`#%CKV${1N4$EpydFgnVXiz*~oUncS5{=*u(QLo_p^)3_YaOIp z@*%B>8o;mv5*@w0pV254bY!Rk`(wziy=ApH^6CwG@t&tLgJ5@C&`O-T7TW8#Dy#(9 zv$kyTUkZxT=RQ{T=gfi2P7G@)AYGEf4A=4K-=+82pKYz{Ab5Iu=H%pj;aaZ1+bJxr z%(e;gs)M$&ZnZI>aGFjZAAee;j2|KZCzbLFIqK~EmK9NNxyEqNmMpOcsIG*lJdc0G z7N07RHMN!u$hR7N`)yr*XuMh;j}FOih{#yTcq~qQOT!&fle3)9tX749k*0w=;ozhR zF?&ZFa{4{(uP}*hvV>!q-rfk&qB@aEQ-pjaP~{UM&BBWnbJkId4}u zIhLTSaPLR4J{|vNWqoHZq*=oc!At^pK0yvoA`f<{daZ%>P)@exTiH)r4|;df;W@42 z4CV*(<#zlc*78euS{2Xh4ICA9^&dCMZv@1|XL>G9TFyN;%(vu6ujTg2JS_t^EbSJ* znwz1@)*48&imu*Y!X6CyJbBKQ{dOpA61uG)2p*>Q1yS(l&HSRURDgjYp{zQn(w(Y{ z=|aqCL(B#tGY!=73An_o#RMTGrnQE)=U&F!qR7mMUy zeE`d&<2nCayjJkz8#|VNZ%XKqH7_8FckD9%??TUTjurCUFa)Qf_&;21C}q?Si9!Ot3a)J_s-h_y*t4cQT(1$VGkzA>5JviTb>q)MwSno|pR4e?L)2@z84eP*=xF{rF%{n${j;IJfDginGw{}*N4oEj zJHqePhfo+m9i%UqgPIxm+?7b+P2(e>LQ z=O!mn;rQhOmGG>OoN~utX>l>r21OgmWM%P}d!7hwrVm2tU#w;(>FB-jeEZm$s|o3g zWU3~tx;s2Hggh}9=LysB$ey(~J!^I0uw6MPvBLEy@NL!E?dh|Frz`jY^*u6;ARh`y z{_~5WSifW=@O~RcHy5Nh%qn#}UB=z&mqrJsEZ-=k(>TfsZmMP4`F$V5dRY_Z>Mh>j z66oGSc1P89Ht$WWY;2I9&^_;1UcWZTSf`Rwg<-Sxl2z|gR288dt);NOKUr)m8L0Q@ zE}y?%;^?hoDE*toOgYJ^!-W7*=G;#-KKCIbtSt0roc`QK=!n^fss~2G8F9$MzO(I- zC)L0j`qq=O;0?x!gf5#WK3!{wyz&hZ{Xu*GFIA$TqhDWXrzy8UiKulbO6?>)Na-eS zWrZj$EUNsrg=HJT#3K&g!+)3FiWg!z>re1i(Q}Pe5w4;;vF$b3b*zGHQ01my@7bA9 z31ekeEC~oT2r9Inq=41fV!ZP#e#QaMsMc$O=pWT!f!NI#_SbrdU?>1DH^Cs#+QWsP zI_y7qU^_iF2a-1|M$Uen4UF8AMCJ;w=~)hJQc_Z?sI3;M2Ah#RGQeBFRSPPT=>FS< zqGtnteoR762&SoyAfReJ_EnFM<@KZ#ad9bvZlICE)eJmlfHI$lo;3F? z9uNh!{n6r#yr8skEK&KgvNLg&XMa3aNLN=v((Qu-zN?QERQds$Pjd*1OYKF6m$I8s ztK0XzwyCl6g<$&Dx@_jSs0J-87rT;1P>!-Z_U_PA43x|w|NQ0t32+c@o@oxCY6B`^ zdjNk0`wjCuf&z-wm920F!eZCNsI>hr-Mk^8>lCunt&AOvsBS;O2g0cNj=@nd+&&gT zf67zBD$c3doo1j(^iR+cM|R`McV{adS>p(geb%`A6#vGAq@z~6>2ycTlV;#8-C$Na%;`6-yi(MvJug5`3y`n zn{RvOCt`Z^klLO$uyNQc*kpLHvC(JlrXC8^aWOj9Nev|*aI-#R!d5#b1L_bM&y;LCxv*nxj- zO{2z(iAkw8n>&QyTrbt~fWCD^g>$~-g3EZJgV?hqPB8BtDR2OOvT;RWu^K4IXQRG* z|0VY;OfzLNs%z&#l&!}h(rhC`gRSW}OllSUUnSHuZZ0a4q2IODKb@V1^s#94Mm?|E zzO44&n|s}~W6=yVXuGktL|ez{K-)d6j$aFGH^Ptt)W6ik1ln%rYboIB z95u-Goq8;wGerZv%@u8~CXNoU2ToLd98ZK^d}dlHu20HS=4gt&j>)JV#L-)|z!Z+q zAsVpGuBcbYoLkkzKZ1V-%qJ{Kf;Ht`Tzq`ra=w`m8`pb{aXmHDTRTKoKt&uoDjB-4 z-o9F4A<&9Y9%iBu{+0w>%>b*$;u<0cXp4qV8Hg80rzpj5&s>me54?%piz<$`Oj$aA7_(7&-t&PF$IrW%$QilhhqW~|KCP{|$p|GuSmy5q-?5qt4l63~fl>}c zuxjE+Urr;ut@)y2(xoS}+ZSn$kd{9r4iCD+>Ca1PgGW4_c*h5LjLzC^<@)7cOWf8o zcJ{1~1XTuue`iZ31EEgJ-LgCPwJ;Xw*JIlsR@MO?lrC|3Rq1U!7YVbBED?3T^xA*F zwa?T;yt`VnvrD^Fo2l74UTfgoz%DK*;D8^}Rm4kper|G+zBo}MVLxrE3>i-A;BquR zT8&`!IA$6cCO)a2wpbSU|HGGZu2)F;rC)y%IXN)vSe3H8f6*sZePe-=#3uG{4mn=E z+I@kbUPb*SQE$^u&%~8ng%0p_0g=E7;|CDtcazIu!-WCA96I`FMmXa;?cObTDyv4iKCcbrZ#8NeBoFKYiJEYx7y%=ivbspWs(3&Rp zD>rsKEQqQ0FDLwl1?O7ih(K*KxRuN4%RR{Y=t~iE$3jH(J}IfuA}TkIL=B-Zq^TUj zL`cg>F%bGRzwS8z)^nLLu}xm!lP~DG7~rQr(jb8VWDZ%JvhNcBI<(k!Yc{iqYy{9O z;Aukqq$y(e2bY}pcMesFaK^^@!qtn?pzH+P9Y=8Llv}(@x%h1B5_fcmfGP;BsdWY? z?&$uo)-{Mh?A~?stKXVS(f_;01(K?)CRm25JIf{X1=$!M94}DdB*zB&2+&uapVsi= zOnhHj&FBka*iyC+C>D`WGQ4R52rG5_V;Id7NaJD}%3w%_1RxYZOu&L6P6F-^N*n1p z`Whp8?6+{(ezN?7E0`dU4>Kw$$o&aj!H1eg$}C_d|60RuF1pQV{_=d9!rr=*Ozrd( zQeJ@nbr%a8=dYy^nGTgQR0V=wk!T{Eh6cA-`zmm?mg8VoJTj4Nr74TzP zftA)U?A5~qgbbUifi14!ZG@C=YhJ>$u;kSbd4a(ixTWW-k?)vGPAP3JQ`%(W*Lr!4 zwjz@_58B4Rr!F5)T#S-J7XWikJmzO3LqlEZJNedULwe>IrS+FUmlykI(EtQUllu6I zYfk>dZ%reVMT!gXQvnM6_JwAQJKgyd^EGtf0e|`vR8yRK>yTkvmV4k(pcIf#*6-5E z%6oKX$L=C?9$E$&58ekI8Th18vLJ!<(g5Fgwrg!}xKWh;5k*rNkNU+MjZ`Axv zCFxDUpP#~W5}D0jwlS+1nPbf*{V*LJCnuf!oOw*d>ztq(Gy7{;ff62)+|ZfhxZ0#n_Pl>ne z=9AN)@EMin^;6LvZ2cu8SO`Y5Q7^@E>-B)Kr6dqRn}gqkl5hcc8?_iH>{bJLIQkDP z9XX|A*6W$PvP5o5Xr*Hb;0{<0KOzp=>JfDfssX$hEm%GwI@gJK#M>TV-log? zT30uzzO>@Q-{#1o2*gKFO?|`7m<~x+2-smog;35gz{~M?e{%opx|r zqiTOA!Gg?tCHwR7r412A%fujt$Bu#AX8Erky^Tkx2D~97LG;#p2J?X(V1Iz04l9;h-l=Y-Nz;9sN8+rpHaGHPVI@6Jm!4)1l&gvZ_>OS=jy7G?FmI^ zXFk{#swcr{x^}uHckkf)gI6e^$Nm9&k_w3&w9g@9q2XUZk+Av|F!4bc#ZBNp+xH3N ze!p1orm;q$Rm7+#Y+X$6mGnA06RrQ`_D@oC5t{4TeXR4eN4<~}3(hK0$$@nhfvUk* zMDT`dNzxiTq|E(NPM)gtPYs(-LphsYx6-Kdb9G*)C&I8+N(5B}y$CT=S13W& z9BM-Cyetd;p+`4=S#HC2!i$paiRxWjDUpy;Fhzc+?i$2?e}7BW;IfGqf)gqC-1TW4 z8t_LQ!~z&$(7atXF2~OXFklXB<=uyG0aC`YqhL%TVjJ38=o4Jm){IJL&Y`w*a70;+ zafDVf=s>zB69T-e-FgK_WX11^8Z=qb)h(ep#Q?Iv z=kAw@O4YCPORBJoOYlOvPaB}x9cPMcf4A)BW zo|}ovv?G*d205#t;MYJe4!~WDiS(WiZXM9~XNTUOAw&iLIp0S&D(~M+vHu||&60|6 zR`g;1horWrLislF;%B@#AE3PWW2|zy9|L>Q7~m3PBK{KwQeYwMDPy3F;t$$lM&w0P zpz#NqiA4ed2;jQ^A{6flQhrQexd6MQ<(~*2?~7*?p!s( zN^c_}HiA-4XO39LKc$(IISxqPlSqge$$PIl{yV~cy9nRbqf0a-fB7H348_>kUcA&K zvU%eybG;&x&1o`OetK}pNJny z@7}Y{&U7dIl}_0!A)u6KE%oAI+a3%e^vuwHovd~aMffoP!E z#I4D%BU>9sL<4PZ2ZX)bHNNL+{nEMI2ylQRpbUz-6+pyXL?rPt2?f~a=3ArL zVdUeYju}30jpZ=~79yZ@R{GO*ro4>|roOP$HZ`kcD`O*ka_Uvy9vB$uYh#jOa1?3~ zmU)(wFOiM`7NyXlTU;n}q3<`=DB491 zv2n)_F|#je+CBR;4|fC4PsWC&TepSuIQI`+HdX#VsCcxifmWPMt<64r8 zRgBd-gXx$*&GjII($HLM<4c#+F1mv&2T;{jiJYhA>zt0_pIfyhP(@K8~O3?3C$HR98$t<=cCF`vj+PhU3SbZv1?%-`HF=vUn_((Hm5 z@e0c_=ByMbq&E#BFxtx+L5yf>oXw9w>ODbrl($eLI}LOTQmCecSoln`ZPUr|x6qP=)Kow>MBFtAaJOcHw++XadM0bjT1{`U^lwvqHB} z7z-jO?gsdbD?jJamR*r&l((}&*#DgKjVPzXImV4lO7d=KV1ITzp;Ri-_+^J~A~nP4 zHg8;GHc`|;OZ!fcCrGDXyEBQ23<=$#oe%Gob2lGfIq({&ylviJ5J6s&lZE0k!N6fN z_r(;5jwHW$U($xna&vJpcpU316n=-(_^_qjMMm7SbywQH(ZTD=LfMVTWPh;yzSZT{ zH#ZlO_Qe%RBEdbsD(QKVS@(XaK(U&(rKR}-*0~fq29DI-KvP+BJrg}%nkr%tRd97AMNa<9rqIsiFRLP-*adF2lMB8Qj9JL-> zKXJa=R7B$NrE$Q~n{>tUUy+}swy)dz`7O&yQ&zO;ygFLh+jozuvjJD^D?{F1`JvOO z>Gf39_qM@ejBV^`dnhE8>-CrIuie=b1hulcM?0+*jTrL<%@oaaKsZP*Ei6=;nozI4 z=~NBPy7&E=`grU{W7c?o21%`c>;6L(^{K{L>zx1Qp$(|eGlbUYcKn?_u{3lAhR9tq zN8>d#;y4@k99BD$O6P1{M}uo>D+Ofbd0U_^u0N*CmQBd(!%HfS)I1g_FyGh3MZ;# zVg>!S%)!d4(%{I!><#uoQ|mZ{%?=&`izch7nssO3&_#}+6_eh(THSK9{pAj>!QWu1 z257U~vS|f7(G9DA&o(D%VXk7Jrk1#yMsjwR;m^?PR~Cti;=gQ44f@&w@FIM+nXr2- zkb1IZZ{(HzO!8_BIl_m(6392yI+pmnfd82@Tv+$0)5r-gq0nHmiQ-T63ZgXrcNm(Z zrb;>}Y9SI{GY>CZk#quvuV?rs59;H2mYUNRil6Il8NVGKYtLD4X<$yuGhQ0e+&J|9 z)H0qd{J`a|A*hOAA-FkzOXYM+Up8%!AOr?zR!IAzDFslhQ4)#0ki!D827I+Lcd%>b z-$obLoydbGWB?44vD=e;fSnDfMA+TQ@H%S4O`iR7Q`>uf+Gecb%ZvOuFj%bnS36O~ zg=KdzJlz?P@7o{)lXt-~Kmlo!DVrz|V9Y#6_{6G)(;Y>yjL`?=(^I^kP~p6UZZE%P z?yt%3kI+TOGOVJ+3N;)Qwam=SDjjYNqptf;aaHO);x13Yk21zyuoQiY zE9pk%FMwJ(>@giR0eeFcV$#Et{7e_*OJl~ELhZ25)bjH3#f6A1A|hgevED#ybK=DX zb8nqn@1H+-8pOAkm+@pf&Z7gO{i$O-eJjoNS2-B945qt#1)9a-LuMLQi8q5#SP5VC z&!MmPSJh*0Iz{BI-aL%GZ5lhce)eDrJ?>X_`=OSII#pgSdUfv4-LX`%*xuR^Ozw^8 z1upe|mM>(6)*e+?(Pw*Ieu7def7Q->P^-7-@3|@T-<4Z^=#KUml4jo6!{%jUnkP;q z5O|`(FH_0*y2V5~I)}0}NHK`XVDnoy<8mmWBqL7M^P$%GeB5KG`IAMOwjOC{h7T~0 zPq7%Eo*tgr{eiqLPI zy2JjHoNI#%=SS6eib4_#3XGqPdalMhkjw0ke1$l$^Hnf(H_aci#m2idar413^)Z~29zq)(hY#G~1 z*Kqm=U9RHToe*W*;7CJG8GX`u_B+k7d5IGp)v8$)Di4LivB80cTf*C(39F=)<9UT| zYc(9s^6~n9UylQ6Ehl|pT02Kmm3@2};<7W5vo%g!iEkEHx}?Mc()!-_u4;v6o+Hr( zmM+ns@q@lZgG zE0|A&_5=xDpyo3z9Dp;uL^^*qo0_Xh&?Vu8)4n80j#|pL-?Km%m6I@v6*%DxGv0Z@ z?~GT6VQ^`f3o^ZaeM03u3+(U>?6N%8`n(K1lcMBPD@b~RzvS`Rk&YXcDXw+}|4bau zc%l@2NZAfWNMtplZ(}q8(V16xN%0_;3(!73Nt6t7K(wXocYLqE2955u>dd;5D84si zc(@5R){u9|xQ-24S4ZIP#P3f(YNbh5*gw_@lBVIJ&uVTTNbkinaJ$xJHhgu<7=%cU z{k_Ho(Glps)A}+niS=|o+g6?snWK2y`QKMYrtIEK#jR_+Q3mOJpnuuVJ-uYOo zlfH$cqa)-Yi9~z%L^c%KlA91fv11Ad_zxnL5lpZp#Qyn4U;Sg9caICXq7rUM%C20L z%2W`7YN@UIM^omkH>VlcIXE(vBibtJ>oHDVP0uWzn_L>R5Pr2x)=J2H!ef@~R9aQC z-+ZiJdxrJ|uIpS7iNM?3zvvbfmlW*&C8dJ;B)n-w3wZ?trmC8}GLB#rRn=^FN*lhj z_h&*6$~r-4!(yd~=CBHS)oo+)azM^^M)(*bN$dIi=B7W(G9j>(+bp^V3!Q@dEm}KP zyZr~8!%HQLC#&1d4x#9KDhx^~4EBOhlwi)hlf~Aq^e^;cHIMf&=xLEsJhHVo+zyy? z<=Tq15>b5%cgGVVm8Cy(zrFNX*~YxIKK3TqTI~#xC@!Fy48=h4q%xpoW)cLuU&h5z zv35jhQf&`6zz``4A86Mb!V)86Uu=J{j~d=Fh}|*0Ib>mYX^rTu6>7vi?6Jqgb?@d_ zcyNula6bQ{@Ic?|B08XX%zK`o+gyQWo{rIi=iWvqra@<>Z=$V=VnuuQg|L*farpGJ z_!OSUSZpu2(y^)?@`kc)hC^4b3eQBz%=lu#YExFEt!=hoLY^>3JhXAZJ5GE5SX!;d zd`bwj3LU4Uz#vgUpncgRO#Fp|>sv{Fwei8iF_Ru`)8$oqNIByLFzDWx)#RY2jGo&P zx$qelR)w<1;nmp-^2u^=bqX1secZbd8(vcXu3#UW4PV6R{z}if`@SLh~PWNTjLZkNL&Q{H(DAAbEve7Y;)1__g1M9JdQNCLtx<^`|!CbVt zHXb^Prnn*|t{E!GNrQi63I7!517_Ma<&|P1fQ%Lh!$&G9P5Lrzp?lXs9y4h7)KDR8 zyf52Phy822Mn>dIH%06^3NqRd51c`yLW21}PT%;gHVkbtrH*fvY$4|Ej6C@UPl*vq zI0ME_qx|vYC(|snq>ZQYX(=IGf(dogiFMu{ZJpfMnsS8V53B*QqwpwN9sp$!+wVn3 z>EuEDN8ypC@P~KYbZPvvtq4@L2-q+wiWaO?NA&216Jntn&$yUCu&S}|PxwSu(T|Q8 zN$>2Az7e)9p(<^^7%>!2hy$IZTA!$o44}nQ4m#xPB>M9z*kC91Jn~%IKM?2 zDQBp>scvL~J>O$IsbCWcvU?S}ybsF049vB$*3?u0dM3o>BDDFJTiMwLG6xe04LpX| z5G~K&MJ58=C0{ivb(69(G_>9f7Jd?V*;>2y6nw)Hn+*mZ(#aJLTTEnnf|X;!@Txmo z_OZ}(VOSW22aasf_LpftHAkA9F1Pw8Rp;%Q*dvfqBYbAo$a$&IM+(VtXCL!W#CAJY zVmL@#BeSA#ylmFHO2y#Xv|D1O3lc!UTMWqXjDk5R8u~7+esyG7QCsWV1U*%(WT7u3 zWiN|k^YljwB%y(mTvg#W2{H=njOQYf$*kA84SGXQL=k?YYYCR0qJ@g)6nP)cYG;Z) ze8a-vHc>XuZ}-_kN`gi1-Zq-G1$h+Z)-*L&dR%21e}}b~c8BAt$HuIm;d@zPJb#(2 zvselpRiE8kuoT6{p0k89>P?V%)AFm1Ve;`U4!jF23Ow@m{yy?%Q#tTHl9#2g3915YHI3@ z{Y|fM6zuN$q?ZCvETkXsOzzXq35Vy-d#AbedLJB1cnM%+ zUA$OE1S?0T+djE2=*W^;)|ir~#A2pIc4qa6Y$lw@TrWpw6%+ki{wnzI#O5@p55kSD zx1txk=~5hTe(@*AL{~JOD+qP7Qj(T}UHm;%ThJ=^QxNVO&T@cu{GRxLP|(ry7lW_1 zqfK%1)kgW`655+h8cmX-q8;kl0f9|%o&!QM>2)SoE@o?Os?PPDrE{fA@9;gDq1={v zGVWlqvIzYZN~AR`C$d=bTCUpm`|Cs6bZw3c!QG=_>)*860)clR)O5T)!?-`T{9@C> zrjGV}SVMlk0`^{&7&!=W$vO78rbYp2kXz;h3%0G{m_Qg-?Yw*{BZE`HacYdw&G~uH zBnee7X+q|Gt&-vb7??M3qLMRe46`qWtpRV8Wt?mgUbfoI;m`g8&E^Ev>F2?$A+ebLz&jh0!@d5)&6e@G zCtyr{GMopu92T5b3x1#Xh~z}{=(u?CTH$tKVK3lL6q-!b{Oh8Jvz zeu^@>l70@*Y9<2j3fryXsLqai+4nP|aI&HMAS1Aah=%ODU*U9H<3$ZHS1eYpq)JcVFE(;yNp zym{!jp9+Y%=e!no)VyJ?%6JJ`(^Z^6pBlLKjRl(*D4&FRekz1A0OpT@ugZ&)O`#Sx zd^5cZ)CR>AF7{0*Szlf-q3E_ff^v(SB;?CVd%)?~th;-_;shBq>^SUM(5pRRg!qx4 z;zzyEfm)0)J_ieAY2<*Apqd#Tw6(Qgfl8H4z{Ep&7D zLKWrL&Zl~d(X%Y~LqaF2J-Q=%TB~}~If4kCvFsnkD06XS@Xrp!im91ss)_a_PPuDq?q%5%l!->R!+Pz?x&R?K3Kn&rcr;g1)p3 zl;Hu2H-eHX>l05`BBXCFuXhEgvM__kSS-5Rd|^klA)(o8>0XNMuRful7Va1TOd0zA zdi364&(>tAG~|k3`%D5j1>Z=r3rx(6yl#xp z^ffiQYSP+~fsui2eHuD0^16)n`rYm`?;Ezij9JvYI;?jqSIh!8Rbv;`xRjtF1_r5v z`NkXuR>CH{ID8o{e1DSET!`zJ9nWuG+9LO@%~dTH!#KI1QzR0HeKv8Yx}DOdtR|!% z)Z5v8O6-VG>-QxmKuJ8-k1WYPevoEcQRxd_G;x9$QPi+p8~BX!K|#=~-WM~rf{fzc zaK+t^OOmPO+vIN{nnR8s#u&T`MLKjgYwn4G55@TFLo29pBCd}Oj8QFM&prl7<9b=h zR%T0RT+Zhj1|H_b4w%!JlWJ%>7n=;qo~2x2oC$bUybcd1`PMW5lgv5Enf@JR{M&EZ z0%qfytizP$N^7gYwagEL7qWeHdhw8k5p8XO*Ga^{VMp|=Sc$HGwY6DsS6;fNx*8*f*lDrty=P_6hhfxa-ZwL_B85Uqh+VO> z(7xcEqn968q?9(-b z-UZuy*>Wv`hRPGyOh@ZbDQVz@cWUG)DpahM&_fUeFSTk9DG$zULk@6AwX=aCM(}mP zmZzswy%7azTZX^E;e}E<<7;7JMdTY!tc**e%MGJzSBF{H5vyM)5DaJScMMo;KZgYG z?9N4RakUPi>F6v4D+@03WimQ0Py(WolhsPf_W#2sx^OH8vkYMBWH2E$uZ+v?II2>) zv1f_P38*QQgr#I$qiN*EqofcIiWEQxO9H1vg(kOHq+hhaiHI~ZEv*~Ec-4E^mikl- zIg4K}2@@-%z_pTJ4P#Bif8(IRE>UPEZm=|O5B-LzvP|$(4 z5+IZxC^{MoF$8A*y|kersVyidl3>1dj~x%^Z_sim7&>}3jyZ=1&N2aM-W!1DuyRc}11kc&L+^`}dXuw3-+dn$cOBfU?865K% zE9%ajQKJew!x>~*=S#2TTuk6%qB9*XO=>?5f4`e9KcsXps^q2S1;%or#;!yh(YU(E z22emMi!EhzEP?DUV{7~L?fh6yqjN+}++-ymzG(U`pf)V@p;HRO>O~Thaq5%y$WR18 zAJ2luDbU8!0QQfn%1IMOf==o_1YzFQc{;-gU!Q-wW7Wzc3Zh7wcYOS;bR5ha9Pa#| zMlgk`C!^`x-!`y-MUQ~MJ~YtuqW?JkK@T`5&{P?YQ`<}pYxngIwuxpVUb(gLALW$=Rz>gQ z-M(59P)hhlMkRRzWw4OvzUyrALFe(`KlW#`%lE%aetP-oRfddr}?wxw%0xCM6)8YH;8Td*JrwjpS6cXxLP65QPh?ryoO|E8 zU)A#uR#B6+=Iq%rdUSWl8Md_f+>x`TZ)m@EK(DbO*>twd_0Cv~u1>A}22u3rv-_S! zb4v?usyaos6-De)*JBEooxO{c=Aw7|OOLVCZ-?o-^+J@?mn+iWWR{QLqg=YLPVd*m=UL)s$>E zD~;E4wg8U&Db!e96_?@eC?)R?M#WjN$p`K$~B?6Oo3LN~X zALl*;1}`WYja~hp{CfyL_Z)NId;OSy9Vta5=}dimdyYw&y|MWlyX`KwYq;RG*dMKU zN?zmmEIj8ymC;GVb!R6-nlG+6S%Wx--+#uSYhPDHpZnCIf9p|7bub60eC!?*%+ zL9kQxo=7URqQYUSHH$G_kPz_zA9I6cIjXPy^UWFS$7?sI|6A@)Sz-bM10}I-Ry);_ z6?0n~B;R#4L#lFNq+Kq=yCaLqEK1X$2ZE%E^tZC;NLo^fGwVf*u&7}XGzNe!G+|U4 z759V>*H2nGILsm?I>zi#R< zjgehSF64p;bjqoyQ^>jn+N&6@=2E@r|E@ck-&TVFvC_>{nF%q48i2jZ9G+iX4E5S3 zhdGn|xZj-)jEg12r-4ULQdBBYEhG|tQQ*ehcp&c_2v3wrisUZ4>K**vC2~hFjL%#u zBPkprNht<}<1!+DkD)0dy5IsqQF zU$G1yke|b_UK4=tWW(w39H9++OMKQE#=z{m@9%)~5>F+auQp_}?#<`4_y%u$+U|K! z-Ki!nDX-tTn;K{_s`Xd$+4^2WxxNyNJPdZ~gtUtn16Oi#T=}T`}v-quS@sCnG@5yReLEb%e%H1 zoQX@`Tu^l9myKn+OemM`#A0DQuc`%obsxGr=EF7CR|)Ow(I2T|Dq&j%CVh6bdYr`pSYD}NO1ALDA&}PL zYDRPO7VMuD)|FEaeT~|Q;FA|(f1yYvicsAHGd9xK*knReRhahS)RakVk{6!28G zK>cRCZ6T@kA>!>NG3Kv$a7iyh!O=#9&;5G3#Z{BO-i ziQb6ZW#FntHROq)EoPA5mnZ-Wuu6!bvWdT1$+aAn#IaY3l8G>^wymfS(6FXL6yTYk zE7`ewFD^fvD^|8IRIehEd$a0%FH|~CLK!M!@bcDYk*WiMc934^!;>06%+yt9lar7d zC%~>&lgRzBJ&*up^RDUsG+*hIrSSf>8}4>js1f;V=#O=nyg>qot!)ak(AjnzCM9Kl z>vgt(FZ^oF?nK%L2m7btX^C6&QV*cn)b$Ry-)IPZV~8e_lb&dFXJ?t^+eX(5v(ahJ z+V1cCSb}T{+iUy;+d%l}v$adDykCSIs}i!qq)|j%c%)zVD9=&9#b4CdA;UXPmtexj zmDaFr1vn(qj<4-#U`|g@1`Ss)8@I5I&sKr%@V|Q9g`e9*FWd?Jtu8m7UNAB;V*9qt z8XYs1ZT{FyZw3Z>*^i+T&1gi82i&bvynBZsc>l6^M%7P&J*X`9ip|v~DNAZz9w`H` zxP*Ncd|cOa-EwlQ1W?i4?3V3Ys@(wp>MGKb96~xIE_JhteY<#Ab6!Mie*jerDz@m! zC#nx+V;AD&uQAI!8q?7fHWc&Ib?O2#lym#&oG8*6-~l*taIz{?2;LYB1eS^t9jNpBxno-q=C2R8DTEp7X)r{5~$Q^PF@rA_i< zxa7Bha>*5(a}Rz;pT(0;ox@wsC+}iaGfllH3CP?26q6gSDx2bLjalSo55Dp+jmU&7 zJfZ$ELU1{spfRW7{P-dc5?4>9V!8F~zQvZUjX~6uYt6|c70A*bpaMc}-!i0krNAcY zp^E}UJwhQNDin@n2*3n`Gr9CQ3YfZ0vaq}U?(OgNbmN zSRs`peIZ5f`Q2qpDXd*@Cd_ZCiG&PP|H4k~Vo;t20MhNmfZ?C;%H7&Le0wBYT8%mg z9F)NIPm<35epCIM+V_i2e}Ck1HB4PM{}1V9?APd^h-v=W$T`A#WT!a#!mJXII)Y6r zW0QMT3!62n#ZIr^OA5mrxPB3N5SlzUWyz?QW+zd8bw;-viO-B{LMHf=T;%24$p2iP zvI?J0lHOqTS)|r*Yw{!gJtlb7%MquQ%{-lDbQ*HO8D#Cn>dj5WZT4LzZ9ID@t_dYE z!yV_mC3^z@>-9y+M0!(&Ddioe9buzNJ}m*C*IN(>Bzfa<_()33Q^{0UegL+?Rq1dq zxaQ0DDPb4s!gH|CWbu4n3I;|SmHbK(iI+zrUa-dQZ4*>|0AF;;6#9L1yXQp4UUk{S}6@*WgT`*LGb{TP!wmcv=`UrYD_noF`KFAc#}Ie#SjA>Tbvh_~Lq#apoB6Y?p9O0~7@ zKa9J>F_`C2_!=)6f=03^X$5HCOO4~1pN>Giz*o2!mb~n*{9XuPr4ULQiko=nmX?Nk zJasSv>W>d6U-W1fR$B;S5ZkxVyDDfM_D7m$CD`iso{hYUrgbAeutG3-+JK zEd|yh&wA|YQWOi?{~r-(!xKxfekmqZxPbj|5h32Ig$OIwb|GYK8>Vj;1>md-|0s1! zd@cKvLuYYTk^xdiV9Z7K4-0cZp-L|AheKh2eBuDrDu`86SI=eMtNxwkgvPjv2mb2I zrbbLN*)yv`d{$p)4H^p2jt75oLojkPGfVIELV7Fg!1rh+`l|+u21&KW)|fkriWU(y znb>RT-DeZ}xP@}luWyNX6$&OFEYO(0cB;QaSB^``9PKnF=N7N8`;4b99Ud$b5<4Y@1wZqK{FeAd=PZ$Owdq5)NF+ZQCZYc;^CxWL?t&hTH4m3>1L!ct zwy|M8U~^ZZvY~l-H?iJqKm`%Cu!tBm0Sf9LTOATpGcd5R`||S8!MB>93AZ+eq@0CE z{zkT-NH+3idjnlip4CVwCx?bkHcO3=*Mxk92cY0PRkiIqfL?vKmx(M=EWUc3un6eK zjLxz0j(WTI-g4_6rJ*Uvba4JDt2RMt)-WdP-{?ozLLJP#|FlyLD~vrUCCeZ9qk2sl zSix|!jX4cWspjSP&13xSWpcQ6PYg6QI1}$gHC36<77CN#ZtEH|#KtyJnV0*b5g3<{ zgs4d;W!U8SGx)2bGA#GyH|0P&bflN8fJ~b@@*>B|b=}c#JuZ#od?B#cKQ*&@!)HEFlt?`Z7pceTGT$5eKJTE%zG0$x3^W&?Qa5gUB9tn+R$e& za8~{wp$047=eio0;K>{%^=3+U&b^CaYr4V_)$P7Df}eajtpAKupD9N0Xq6r1zz_&oELpJ8z9%rRz+}q&A32wj6de3sx}kpkU*Bi(259Z* zc22EoBJ7?A(jWH~9=^E%X9?XKoaXRI%?#B;h5$-Uk}G~Ivn_hc(T*3l1jUI@KkTum z!e%l{AUvu>HJP`R`n3C3?~orH+hMvK-#SC%;snmEr0=vBQ!Qq4|BLnlqn3*Q_5^-X zM8kHMjwTu_xM4PY0vi1cj3ml%34i%K2zSvXAu9SVOUN@KGG!unqg?m1@XFtVYP@_^ zno{1ED&4M#l&>D2O@u!?6Bufn4>)g)2({fD_Afaz%CC&iWZ<{@ye0+r*9xIO%H~|f zwU(@wB0HpBlna_yFAmhkUStIi82rHxjnB#;rit!K2~4|RD{R_N{HbSEvhDe%3t|0z z0bG5U5oH2~!#3M-mQU2?qB=$CMalk;!!8AO?ypL77c*Ej`q;RY`sOC6#5za}>KCH= z-nMx0D=26m*ah%A@1n92goE>bR?cppY21XaFE8jUeMlH(2zryP&?jOU#{$wj5pJMi zSokoR@bUQxzl&SD21&oTNw4pYlxJ&Twmkk-;A9bE(kjzlNiD4w?B44#pg~s ztLGm_0GWJOt5W0H*AmLTShlNAuCGd)RL46RRGC0p)UJYwTD}tj`IpoZb&?h4=;%@- zS&TZRNlXI+4~y)9n)Z`r)1c=$Pne>Bdii*euk@fH&Tv`Cl^$@{XYk})q8Q>9fOLs`cW3*TW__~E0Q$`;Ygb1(JIfAA&G zT#8$>LG`5Z+zDd&b-=zFj{N(DIPJ;c;-oc+rneYUBkJi(dEIYmtB_;3STG_1kWz3ce z+-vU~dmM1nse4a=43a6b`WMMw%W72dF5&9P_uQwA=k{a1Olwk?CpYl$m*jM`l#((c z*vjsoZq&ml%Q(YUg2N9oSn#iyK76P(0-x|YZ0h@SH(C6~J5flG`jaiRBeC4-oG(hY z1*`kYJJIa|qGJs*MQt#?|6oUXzI&H;Va(5ot%=1Uq+9P}iUOJ~c!Z7kg~0TO{HNj8 z4+x2I^2hsoC`j{aA+WbQr67!NT|{wM*&&`gltC5Dl;!osgh*OKi8AN-{Pu*6-^;kA zfSP+_b91I+sLJ27u3lv=RL#NU4D;1*!Yd0?m-`;9t-x&0Hgcxmt<`U*?+pmL@o|gX z&P(>xeB4)Nmav2%mT)g=dWE0BcOc~74Gu4pJ9Y<>$7-u{oPi0buf6o(1$pOwu4BK# ztSrJ@`~ir=h^dLqHhlq+zB~=cMCy*1zK{0Mk=#M+i(O~3W3164my4!ZxB6x@oYhA~ zsU2Nzv)+&PoJ8UDE#8O1t#ASRcl5j@nyzTMVC2{$wWXUVOFo&OdphXCs>&+rhqJJU zUtP&!j|rqXC)O`x0e0h5Uc2PG*TUY5^BgA1Pb7f%gCCk~u-#z%RrsgvX27I00*8N) zbcCF+mbI# z=LVKm^z#Vvl&n2$D1Kdj#E zf^~YW``A77)$Ieq-*Ogw4h5$amO@;x>BQ|@=lwbC0o@J{!tBX(CKl~8 ziyPnT37DGb^TQ2aomtNldVw)HL@nwdYGd(50V}$EoR9u9GvZ@pa7*YP<#m>zO{zr~ zJ!vtpq{XjY*89&Oe-?f|37w22a2)n0JMReW@!2`Go&h8R{>)>alZ&)*x6AnVao=Sn zB4;m`4p=r;TvI&2x*;fkUNZFe#b6@|4j(k!LHCQit zcC$(N*<$k0Rft#mr47PJ<4v5Cz~YG7`$?ZCsjVBy?>APiyke{M(Rg}7S1fsm7fosf zcTdGn0uEj;eIFm6&Sb8?GLcUDdS8B>*#S~@WN>gBasd!pq;-TtSz90b@#-4=&Nj8N z)t58G&KIApp7gi(-m0qX&sVX<=!>>{>D6oFP01uRAB2UWUfkDz!z6cHO&qvwL~EdH z7A5h7VuPhfpKs>I!XhHXB_xEZy4Ln#geCYRyzXyTpQ%P3nDM^)9=v?rMu%JA zv7x5@#X*s~PW-8q3d8AX5%C=C@d+nr3Uu(WqG7xiTvNoM5zXgYOmvTpA5zmkaXBB! z>xo?PaKt%&f4FI?hQsX=k}}OkC({bAvu^05?R!AwfC75!w?OfFzF^g8YJzT9RFs%f z5n&UXeGZ!uAGZ@oHj9Gg6+wRvyCo)%#SFrka?&0Hu(9m#D{!iLFX5z;eD^yXzN@!- z-V-bEXEp)ad#9$To-!j2Hw3yV$HZGFAPm(&q+SlTb&cHZwh zM$!Iw`dH@c;!^d~#&ivB**jLJkxaJLVrh^n@tTG&+v}XLdHuY+aGjq7t^IbUUbD@H zYiVhzKnW&A*CPpsP}k~Hvn9fDYgt?vbbLN>Pqmc>cuO)TAq!|2NcDx6-Em*>D#u0V9iS^HI|M~v8H zK#z+R#0)3(zr6rX4~rYYeGR2_2O{LY$Ri{4s3Z*jGqrh3;Yv1ZA(^a8_3eERHHkjVJk%>DTl)|nlPgZ9XpPP?c(^+jNpv~QN zb(w@89uHzQYg6wo({MFDhY)?fbKm&B&y9mc*A!s#`YcLyX~5I(3Hsc zd0&+-;IGfQuqkXC=a9_32`et!e{+O8)?jRy>MdiB>%!O+7au;By@j$372)S*aOKX~ z9r}2vr305TPVY?ZQcKN*7}DbV`5kBQw*$Md_9~Pft)@w_`(d}T*-xGC+SY!$8ti$GO?`4P+p!vl zM#&4PZzhwBCVl(40l16kAuikeqZ02@`>(!crG(V)P*c&)kRqLzb2Yea&047Qz%ERQ zQm$_$6`Q^jp%Uje8?~&sovrt*wJClO(=fIoP-=f9FpjlfUAD2UT}$U#$Zwa0q<8a6 zAmV{Iarxkm^}er*U*#A3c4sLrMH=@5L*I6&l(={iYnsVOj(#BRQnM9=9Z;dXRwK$CDI zO|trnVe8v_PYt^v(7qcjO&ohTi1u?2PNdl>qsV$#_(+gq4f z@FhVQ93Cvb-nmv}soyXgt+ zjYRqJ;RM1N1bU3^5(yt3)*D%Dt26yJLR|ef(-p$&cIF4kc3Y=!z8|&h_@n1NUO1;z z3Aq7g&O73-r+^{5?vYXE^`larztmker`i%fp)%w-C!ufj*S=RWw^y z^*kglbTW8@^s~HZ#-xO}gubO~p515qMqk~Cg(b97wIa7~koo!2r8>u+RM8UV@BCcE zxm4jo*6JkE6Td_!`Z)=rD1>$dt};#wCW$irM7%C#<_>K(2Z_HyBa9t*)RXN%*bu_` zY9-Xbdl?qiwX?2g>_6wVLR*qEp7fDwnuRqD+dF{T@DCrV%3J-L?aF8|KCHLKgRX{b z$o%Pz40l6<9(wQ2z&C~itzQ<+ZYxF8dKGe;uj)RKa3nKddmKkBzx=ZCdX~WM4>LqE zXG`^+rZ$eUlM%M9u<2DGjf9WkO-thWk0H_6J;vaZ5S;O zJ`<;fI+Tyg@A=_L^0f}>tDhiCOGe?xTcP%cW7Ww#7`-Qosz_KPqYb+U^5b}8&}4^# zvNYw#`)#K5Q{PxYQTEuA8#{bvUZfN~2e{teWpG*6WfMTT9eOz{DKart<~#_bD)QW? z(^JkJJ}}L6XU@wvLNha^K(JkIRx+&VB4NBf3;Shw=Z(G`e)Oj)Yyd(}ON-alx#x-? zDUHuq`Grx>XUpB|ZoU{|jUZse9U7n0Cb?8u*gyz=Tt631WF>g(wijFov@}ErDzDzt z2|IQ)#bqIpO^-l$0tSqWuyfQG(52?_=T#CB6QBOZC!gK99+$AVZR&c~!78-oCa!9^ zK8N8CXg{CPsgu;f4^HJUrKswfSn1JUY2~^dqw?i1urlniA0mzYy6P7B(`L2^)$Mf6 z^L|+g1x09j1nwYP*Ilej>Un&;$#ABh9zu6#C}ID>7E;$!JXy#xB-IrxU>2P5>VlyM zjN6(}V5F0U54f`Uv4LdrGEM>tdBHAmbno-pk6A_m&HpHl@(skAT?XfdwlUskv`&!M(Aa*U`ao# z)2y!{(J0@7NPBCot1efvNJEdvh&_V@tzAvKO`d4}&WaFuEq(l@&cw~;fUAR}q5!0M zhmn{6=S$SME7sDV!~y4{2EvqER@e`CzPjps0nWbW#uA zzXzP9tpY~hC9M%@kh3(h(vLl|tD^D!yqI8Tx9ul}I}VL^q2Jwv@K|IAM}}zRTYevA z-fli-)p)v6 z1QC#1&4yv-)2X_q_Akiw&aKg&vHsq35*==OQe5{CDCY+|9E?!K5$5Uli-goHB&sod3N3ZF3M z2Hxg7Z-szeMY)&6PGMLIce)^cfwr2FsEnd%X9|?&~>T;w_s&SY)51P`BZ!`SdnjZ z`X|zne*I&K`kb>s`q`=D!zWcCFR%xomG4T{>Pj<#H7#r@n6rsq*BtypsQn{A3Pess z14&0>g`(|-P4zngbnZ502BbnZPyh9?+At+O721E<-K+1Mmd^S$6|dlcg7F0MV9 zp5}kol}Z==h8>i*9WQr^=YSoTr+72mTIu}dP*W?S*H*r?K9IC(LdmI>M0E(hWvpk9 z18M+iDqLAoA0OJ&%7QvtI6m^xe2+ENnYvbIZ?HCXi#IX6*1THgm-#sMx4fL+Je)V@S z6q9;K0HkJgv0t8e?ow4TBBwsNf+u{LRzhtTxbyptA28}Igg;>!IZ>mUZ0xGAsj^8; z&Uj`)qD@2^^!qohNIXY1`wJ!I4kEadjgIQXiKL5fq$5Gj>2_YigXAfpVFdNx2bNe| z4N13FAO)3V)gP+HoP+FIdqjaNs&8gel@9`TG9|L&aZfr;!tVYIsrfDsM9*Ol<9yXf zD|qjK#Ba4et)Z`wUpL--ouLpl68@b*UC)w=uZ!fwdzRM<*Vx{BamsNJ>UF_4W|CC_ z>%f8pTV=DGnUc9doCKrO%$wqCCr( z{e5G8(lXB8iMfcGB)TLB(fqEFoDSe72wV$lwzoi^MOI4|Y}A8DRm>ZFCmj#mSCGAWKgCSV z)c^5*EOKZ}?7ede+yq{4)*Ebn=uS+1`xPFWc5plriQ_eaf{14Z&ejrGc2_*)VE=IN z;iU;{>yKr}kA1mczX&p;5`6_w`MpaFr-M4+{{Ap{|88Voe4x<(JtGRStmbOu%s5v< z$*}|9UNcIDK20B5I!X>7G{t2P#tm*XEtNxYf#LdydzgIe{vBv)M>-};`1>}`q_y}o zI&Rtqt`!_!rUwaHThmf0)wB(wwLc0>eLK#hZ?Q_1k?UG==((5o%(k z{|jqilAa90V!U0yp9F^VxnHPZ;#O%v#lwa)`(iZKpDbKo{sW1qOQqGw^nD@<|BpLi6 zvySHHz%ZK~0tSc5U+5Ph5*jYm732DPw-EffzJ`R6$nrlgg$y|R#&0?W!~<8RXK^32 zlZyp}RVA?VO(Ky8`!YfX8Jsx)%HkAE* zfPhoEhfCp1ZB(QQ05~Vbefln|2Ws*iEhOjIb`QHY^c}{IOx)ge2=h|WIJ%jX6|Vd% zp&5?@e@RmPq!EL<^;t!2r>IN~k&zDXWPgR{)+K&ZAkW9>B0O9vVYD82K=&!eWCkg` zkN&vk#NYnTAx3F{o@Y|DL@7$2J?O-G>1w0PH?-rRtlj(~>KBV4 z=~u~On`w_?!W4@(Ath(O%h zJnW}vlL7Z%_-oeLi+_S-{$HkQ zYTzn7zt{d?i*I-`EN-cxvM<>4MI-3Qvr=q&qN|GHB%KT8P^;Gz%WHeO69=Zs?x$O(3#AK#K-f1YEaq*dpMC`;i z!V@PipTQj-oghpY6=6n|cQE|M%`t~u2i;krt5n{PTjt6d-Q3WY+0eVO`y0X_~1{u&iz@v_>f*q|? z0UDy47eiPn>|7pB>yMp!S{Nb#yQ9lC7hwVf>V_qJRR7u|MAcVeFz*}tWG#`3wv+gi zEM=c8+=EW*P+K=-^7$&CCp<3hQ$MX#{7)Z}yu{v)F?U)!BqtQ34%4NUI-CRH#poDX zV#%PtRaHup3_d2%3f(f$Wd1_*Ja{tM{Lp;Z33N2mRAzS2ZDaGnJjb{2jo;d0Y1URk z68(6=$nM@R7yFlvLx%f5;AW=(TK?`QY)>F5a3#}*nx+qU@BI48x<@^Qgp^Q`!2?$m zl$$GosiN;94>+}n$vWuCXNLwviyb4yl{<9;s=x5$t}ysI*>jP6flFq6T(5W=H5&6h zVs`1c7k!1kWL8T4L>CRe)61FZqw4~lD#$Si>0%-~=$FcRC;|Z1{M|6^k{NL^Ils}M z(E{Dz%PGAl0DMZasLBcFYkXiApQKb2Y zp^7mijl=NO;IR6?1I(L89Cv$mIVl!V0LKd}@eiZrN%GJHuK}^V^sqN1xQqo?Mwltg zSxpWlSGlL3A3ALeR+uJ_wRD0TPL3rV6%nLBuAkKP@fPMGJ|N*E%&GV}BUXa=RP0Ej z2|^kv{S;24`yE*-ov>$`1CixmRB~MN>UEGI3zR*i;kPw)>J2~0u_`s_?1D)+HfvqZ z3p+Zi&ai7*O>pea+3nTCZhz!+6~X=|yTPHV#2khQTh}aXAW86SC&hfuL0N!zRO7s! zRqP5FW1Xp=HG1gFE?Q_Yq~wvGTFt47xMB!!TnD@1NSD$pZ@=w%e8-<>>6r?l*gHx=XiJ{rAAW)R04gUUD%2TTOQvV!Db%wdZkZ z+5yhrYTy)HGsV?xY&3C0F8EYMD}lI8jvYyix8vscXzz%v8+jsTWO*_!bMleU6!69| znctJ10#*Y3mUbI?6mYDa33JdK6M#&IBaGek>p4KtV*KY1z7zyp_zJ@r?QDw6MoCPM z9*m{cUOrc^)wL8jpqR5$GGG-yA8S%?4brLNPZ+x)hM6wpB?hF9Hi6fMcwBQjf5Iun z)Cdcr_O`&2LeWFFO^40JpQZXhQnMVHGv^-o$|EN$lJY9L?01lzROXzsekzJ4fUT*k zU2ht<2%9D(7jAK~x19IV{2mKfp4;FBG?L7SKlvqP0l|Rflzw##VC;c?fdTohA_3>{ zT(6dB*As3o8^-<7&u4Ka%oI{BDpfh_s!z0hk%ygVw^U)=FHKl5bK?A?c7QOJJ9SEP zCBSy|chtT&YQ(%11TInkM;w%HtnAC)MY&3IBdl)bdT1y&Euf92l|UbzUlmm5QE%Z4 zJRrWnqbyaT5}39BLVFUisKMiIhx4j)JZ4?iUDS zAHE0!;}HPOhsofB<>&$J!zh(@fkgePhEy$E4}dTF+dvNv`!}$elLF4ixgiu!p2I*z zIHt>ZJuZSd>r>;rGl0T&Qew`mDMgZOpVruAT>jaUCC5EUUJo@KKranyug`!DTBciG zyidUMdb#ITKe6ze-2-g9zc0FmQdI^w_Gz#mMBh)$R`hsNU4hRL$54%Gs<=}r`?ra* zwJ2M3RJWQw#%^BNfeWU)EYI zfrIj*lg+nk=c_&>4y8oPNg;1>apT>0u%-`?K+*yA-1@eEt%E=S3J`6NS1AGiVZ zqG$(V80wA9jRcx^+@9}+?>g~(eR371LyYZpRhBcf{9e^mH_`XY3Ay!?r-lyZv4HdZrsqx11KM=Qd+L^HU_6hbT1ou-4RL~>=@%1D@$8>$y& z@e;w!IvB**$@IQF_f;-l9@%K>cv2G3eZgAgFtlHBb!mSOz<`;tP_%yqfNk_PY@dAa<>m;=u6UBIVp3m*IvK{OaJYKJZc0KtMs#bpogl4Tq zCNOMCivlaoQa+Bt!YczvKo7@;P%7|I{;qEU1LbBt%Jr`Sf%=cZ%q^UeqzzeH4^;~4 zskqS9E-tB}tT}jyt&se^KxBIrO`rY8xqx8(-{niP&26rpE@S ziXj-NN*|M&ER#}(&jNzhPYC{G{wpLl;yue&3cP&e+jfEKjW=ZXN<=hbq;OlRw~3YGpq+x9!mhp4~j2gj35_u4jyB!4`2b=-aVhM=mH((k8IA(wYP>_J3 zr}$3<`JcG;Nqyy$H{wK>D->3pkyljL{&iyT^Yu|VaqZdc3M66g3O*_=KKc%5#T5W#>~TQTmrd>GFV?+uhRNsl2YRdDzR-r%gUx z_~jxiO6Oi}--ML`Em>6BoWlUR-VI}g#8S*)i)y4)Ar?!0OjnsKNNo32bDR8uyjaWx;5EOlL}c9zJwd468t#hn`C@7G)B4PV610Izs-mY>3aL^r;+`Q9*&g-q0N| zS6WzGFPLc+w{L0bkIwX}J9$Qy35WquNAR^HDl=ty(Oa5V&v=~bjU?@*7OMb|yi~Ln zVE!5{6PZyn=UD)f3OwQ~(C3Ja=??@)i>9`i{!CBc7JSkpY&_Z{g`CD^LObBVU|b5K zy04I$hNoFBr@Pv${iq_x6t>`>9ZHSksv!aNDp7nmZ45P8S-SOEL;D3%4ucr>PwF-? zKaV*;xJQfg^U-DAFNd6BUKvH4u*c#c)#i9zht`;_=%ztwJNsgiu5uJ3y zdLY8)HT$fEBQ;-cI#Ca#6=+wjOh*+%=w-ANZEf521>g91i@|Rz$@xwHJ@#*%Ck#&! zsp!6+PqNd@jG&O4RPMi28>D1gVVX`kD(i`8;?66Q6u=Jw1k$D1P8v!UIP5o8>QxDb zulw-!DvmuOc46%iJHc+t{D&$(<>>8AI?>rMBaKra)2&T`_Kafl`cEeXCcCc>^IQU_ z_xMR}XZcGM_GG&gz@_|-^KXp3exIx0hJ?+F1?+jOq^#$G76`1DZXf<1>_g`j`}pFC z>S~^!NM$!IfnvZK@dl}L#S}Zf>5n<}zJ~1!bW9AH`;X0`uoA=!#TgF;ZCLq(zLhDT z;jhEthWp3Iz1u7i0PMF??JpKav1R%^L*|$i zlk+COuH6fCf1wPjm|pRLA8jkzEsQRLW()SL9f=U|;t0+YYTVGTadchJRuD{<=(~u6 z*k$7$d+hZba0}egjtIk0@#=hj166bAy|$Cp2Otk7rJbk6UscW@eY-QmUZJIfkfO%N zjkGtN>O80-I}x5Ts-m_Kv?afg>!956P1yGT)+zY_xq1BdXg~`RZ(~7xelkkX_yr_p7nVxO8VcqXz%pBmKCaUXza96%YpUQQR1*j9^QZU zH;`&jhr?eF+Ml*6c9?TL%krF+K|r;vV0J#ZEt{dYNcX5C32mljnUl(=ffvPyNd*fE zUl+V`17hp^vrLOxT#S>v8)WxOc*m?@hvuVJ<6P6mMaq}_iW;R-%C~#~L=hSlhmC0X zs%Y&u9Opw700F0<`Yr@zZ|em4Zno@uglPMIw9-@Ote?1WQ3JO<|3!fe+h2!eTuMDi z%py>?@CYmMq{)p8V-(B@q)CR7)zG|NSMwouM12CB8s`@d(qkMg5cx6Q-^YIi`rXuP z{JqO`*mcj;Zo@E0k|I&|L+9f)jY;i^r!PRetK@>LMALG_=z{!^BF{y8TId7HU2!MP zqWmysB}$yvB^J{l2Ibf5lQlO zzq7mEL8WA;#T4)+&Vh)L)6Z6$Mpd-2+wTN?GH@&cwMYG#XXN(1uyLRgf)Lh z;lbrkr}w^1bzL(yj&_h?s?o{3-Su6~H}-5OE-CXhvDdv)?1xil1Md0fWnK{HJm5(J zkAmi4Ntp%saCeww6Oj*cRILAtKKQPgd@%SQ^ntnZNU8lokt<76o5rZ>xIhb(Lds@} z(fu0s{vF&Z8<7S!{P&bY@4w;hKbZ*vEGS1aZ^b|KE7uADV| z^&S!Npq;XKh!ikxINgy2q4Tg4`V4Nqe0WK9%5d6vyC-;Z&>+Sm<>>p+Q>`LJ<#bv= z0_Cfla-*>x*L;i9<@IzC47BuezkB}2UA)=pxlqt&g@slGy^K|VNabTggYWR9%SjPt z?i*U=$}7zDDTTko!6XeSXZ|&i!DfJnk{bubS-CV5WGY-CZJ(sZ#!oTF`5xr()?u*d zqk9D8gj~1*S&Enum$B~?;J5>s0QVRDSGpr>lQ0D+SE3$={`eojfp~3bT(Nm)Psq&5`D==Y#;rS$Z4iOk+6;&Q46xYzGDu4V z&@L;=@AwG+n-2nx;P!`K7cAkm#^W+8j4U&^5ElOP9xN87cVyXq_*Cb`1D4lWHW!4% znO}%~hFLlkj(zUV`ldgK8skKlvE^GbSQft ze9}a5iMw_(v&v(;FcnUZl_ISalIZZr^U}1=CTGt?(rU;v;h}ZM9y}3G!~D=!@Yd6O zos5l&4DC-{r85)Ve1h-(>JB$#0(!+W8xkGe#$P~oD>j`IT51gX zh%>=!O06`EE|oDMM6GR94ujqWipX}+Mz1e^(HIx#C z^Wm;b=%W^XV5q!Z^(?%7lyNWg!2@@MQvLska-!%`5^RP+Zu;;zc*lm6|0C}D&IVvcXE-E*7rH`d96GqiWg_oI>lPxNYDP6<6qlz%0j7hro(^^KWO=+^>XIf|wd zCmkYeLbK1rqZ`QH0$z1pQT31Y(n`QfyH-eo#j`(yU%}>}(RL{uFFiQiDKj&q-|>e3 zaY)DH*2@`%|8P`pgF}fpR7I^$pOd(t@&0}sb@6&dkJoA`t`z{n(xP7spoL$>@x2w$ z2qx||iF6D@Au#Wko&6(5YKT@2&bq$!^bY+WriwE@D0il?|De`Qj9p21La3l|Wjb8B zt9n3UGG#;?8V_tTFcyx$`|1t0`?q-10YIHDUPT7ozoF1S#DWlo?ymuFBM&5OBE)Fy zW};`=)>9Q!6WhEK#)83z1*WlpEb=YwFN?MJUrY}#(#dJZ%3(s`dBx7~myDhc`cS>5 zX{1zbENcw!Ubyo-{y)yXGAPcjX){QIJHg!vL4&)ylLU7XEWzC&xNCsm?iSqL-Q9JN z0fGkIdy?mUzPEO__QzJukGl@3J2QPwcVFFobsuLKBJ5ACVL7Gu5v;U5u+Ra*W`&Wk zPFPa@~rFb&una$MT)G+zqpXh!XEI$Jkq znoV@SXDcDYpH`&ns7hwq0r452#x!MZUvONxfG)J+=iksqC}{$Dg8n_Sw_U4)8$nTZ zM%$sQm8!5Vj6m#SnU&BGZb3~J)WDQYMB9rWo%}`IEfynLhQ^J9oVV*m8gzHZVwx^p zMU|nc44)IYI;&*slhu>b?Adq>~N8%#jV8#~*YFM>^vqu{H* z;QdbuG*cD&3DX2n#B$&M(Rzcm*Kv6&wVvk_{tGnM_ZwBGmDXZP1_*A}oB^A;DFx*A zI@wgM0l*2;$+;t^Ey9A~I37#LyEPniTHxg#Y9g()!;~l6K)^$m#6t#WhC=Btk`-ni z6q-~o7UqNZ&o#H>Z1>8lgJvd2Cl*68(w|8f+KW)3{kpX!{`dk259)u{3_9}i0$TiN zyqq9CTK%Xie+>bNh7oZqt%mwlu$LX~+{u?jD>E0|SLgCiJ;?HUEQmR=Vyuem{z=O9 z6Z)#gz50pqh8Fr?<)u(_H>3gmDKUM ziw6d695;qQc)<0#UQ2WS?JW&alC_ggt1Yj8b&nhny(<^s!^*Lz$eVd{_{9YjhuG3u z+KEZ;g}@_JMi;*edxjn!O$f ze!Y8}DU+AmKXoL;)Kfgq>5lZ3E!p-`=68}47_Ytt9ENeK5MdfmMdm1;DJ(IE!`GHi zceJ~F1ng@pN{1_}?+Y^YVx&B>B#OJHhw%&O7m0@@8x)f(hNo9y04WevO_6w8tr4ra zENNPk!SyV0rTX>jPsD0nn@`d6e^~BIgX!OKn zn6lKha6db>2ToUFq@VN%Vi>wGjMtY<8ihYwh(KF(b<`r^En;Y8Ywjr zi-I>o5N_jq+Iy}5++?RZIzZ*i!Y2VA@OdXUfSjXqC&C9jZbwRA#^|nf-ZP~d|K6O^ z?QbMxJAaTw;EBkGTTe{nPQQO;bEZiKOCS*G?lPMj9UZhp)?M zy&8{$ob0o8_c_Hs3p%%&j)Qr=y`Y)MXeiGaQ#haGy?a8BnO4B&NXvC_dmZb>`6wY} z(Tv*yc+gN@J}2z@tZ0gATyV9KdNN7`BmbM=@2EGoc*KZy-7IVjM8=O%6oK%I$jnU2v)2L04R+2= zCHpVC{UG|+ZhM(WcGG0%#MCl-*pM5k<#dc6V721S!e0Zp#D<7m77&YzFD zs8H8~A#@tVzj42j%C}GU)ubzP$;m?q;k5UBF0 zZ_CZw1lr+uf+-c{4-*><(3H3k)k+09Rmq{j#58sL2(5edB0TX`@Kiw4N-Qq|h?R;9 zRM`KCOo20Y(K)84?W+=D-3fJ8ntws81uZXM0)_%i2=#nkVuwM`)sBlmh`;1ceoAx# z5sgIiC=HLU^r7V4af!LsFNv3e8Y)5goZyp9^F)~oNuy(Aq^=x!yn+&`@-~>j^ywWw z28;8%Uv4w(P5ddtcR%=;UaBe43G2wl$16?dBbK^(BN4p$@o98kKcu<=%p985oeJWBUB`N}1hzvAkaK z>dVq5rx?pJHp~^a*-euoTzT=#)15qH+;1?^bcOtQMVrTsx15&&QjLm~$x&y@tH=AI zml(I*yEvyj-bZU6`LBlrztA5>gl{TIzbz+C&xdEidR?A#YC@b%A$p*sG!MhBM0`UD z5{zmnpu88q2|w^+X=*xPr5aVv#HPlq>?yz&3|MkH8&c6NTn6eJT+(Gt; zKCK-6hBN;A!d!L6f+SdGXWzzYS{gIWtlBASbna!dq?F!SeFIY@*QAOHk@cj*Dn2p( z8)C~XJnT-&1L^2yS2{S`mPuPGVtw5yciR=|{$LH7NRCt6Z9%Ud)_M1bknNdAJ!F~X z)m8h_$=wt)_o}jem$+uOY+!J}p(Jhb;q9G@)f$XK2Ec8I-8%VikN6f6AOW%Kx>Yq( znokIAWUjvH7VgT%sa){7Y#k@x_m$t&OYV|xs0auW@}k&djn1512Rub1ccBl_espk? z>1o(_2lv#=l^4sH3&f~*tD{i9eh26q#cWWT@WBU1G&!1187kkq?6cv}D^+H$m~6!o z^MkqHF}a<=CQ(F4wfMmwIT<>sqo_{YN2?I5eq+KU@hFEst(bTkn1}OGoh1C@E2$yA z5-W4K-OVT)JAz-hq~Op(Q)UZYwP#M?WT7nzur|(_;joeE_CJe<#{$&ge82Tx_29)$ ztA!6JQpQC4JA`3Nq)`;by{F^NGc&ZXa@-ha#xb^63Az;4MEdkGJA7IRKWwt!q}zm( zOm{vIS{Er>bQRZz@49v~!}^1fdx&mGHW?!`R5m}rd{y<@ZMHlg)h1M_RwBgp1`=d; zNj5ZIROphFV^sa1m~;bi0!Lw4tcnntT-t!i78%T-%S#yDlDOc7<(a+gF)pW>89wD% z;R>qiIj&|ifDs&RlIlRkP1}PPok${SWDgr5h{?Hgv~~9x&MCnmv$96vu#k?I;o}U? z?VV!26T2#l_av(|w#=ci9fkD!C}A&2JfrMzw%vijyEq@b(agFKit70VWs#VrVQI69 zOu7>aJ9i#WE3M^S0l45MDrKrCBt_}IamCT-Vwy3T^{-!gaw@+6+OLc1t5c?3U{I9n zo`Jq`%;es0efsoT@3t~Osw}9D4&a@V!-P%l`a3WobYf7woc6$Ol~r{czc-7W42Wjp zEtUxV3Sy2mF-+f~osiiN;3WU)+(4DpnwNt`{1I2?1pJB7f!Ns5VVVO8Fu z>%KPb#ky4vZ7rgbXUNf(OIY3*$|Qy;uQSp?y0P{1wgM5L23R?J!B;A(9Y{?!TQWF6 zv@NAsHv9M}wH{)$6Kh9sQc(Is59B_XKceKRd6-`-4x|vZ!fajtW?Ud71cQIVBG;&- z84O$Q8?bA(RI?IWR&;9bn6=O)${Xi7cO*g_oX9xqmK7{ZS( z4}6U#+8VPM7BIqOLR)~XAgfDFtYeOR$uB_h7CYuMF@hm)=mf4Pn#ih3V-L&(%N>@= zbwcpOYgt^DtoTG10|xeXW#Un}VY76vqtKxDmvgOpZxiPn<%aXFY80jJgOSK@uECx? zE$%`$f<}7a!8Ms2>WKsRDhx7}RcX7XXXYU-P*SN?Wv4}=K9Pl))7CJ@m8ydrpG4z! zQDVojJ&i$6kNiPVYj@H729=mVtV1iV){T&UxHV@TJ;gn3u9V?kGFEgfG4t!n@)iVr zM$Mq1?6nl>R%-S2)YL7v&#_S3lK$I;Ez;q6LULN%U7&~j9+QF?0WFzGb93jZj4%_S zyPR=rxn>QTwdtd$Q*-UA(&SuWFiV~|(p5$fRet~Bs%R7XG<~Qm;P=dDpk#sp?}r4s z+9H%oiHjCa2}ok{zkC&yq!{xZL__uA<+SDpzGO=lVMqiq#rYG&r)5l((V2gYVcHB~8SZ~}k zBp*=Zj7hcXOInc*A3Fhk*~n?2j~wNll*#TTm(WOP{^~H(*<;2s!BZcX{o6w5XE9zG zLrC^QzXZz3g)KK&6uOMeS1CBdPT{i}{1nz`@CoQxS!}-5nz5uWsHA3+T#1S4F^}*y zNvOm-OdzXnHL3q)UYuCzCVm(zP4B_#U5digexKE8>-9;6hn5DD7cBwEIptP>h#}Vi z)87@*1DfJZLxQ-oE#rhm-l=Jm;Aytmx}D-yx&$}MD3&Blq(6RBp7E%B4$-e%M&RGN z8;8Z^I$=$-j&rZ1(wu3X#PpjvbTR)6UZ3)X8ybCCvW}aN(x?3QC+qm~HZfg9|>qx$d za4vZnSf$#d07@YF6{1ckQGJL$3>sHlc9?KWS@?d9QNdXyfWR}w=EiN7mz9^i>;fLjSo76)fa10 z8-9h@q4jO96M2K&&t|InX3E~CoL)D7=Sfh2iF0z!W>_6!&e6F1;Q(u>3zCcg!y>YN z0H2I%!N6Mc!4Eg}qSwvCrF7+$vT4p90!*)(_20k8j*W|(SXv7GLGabX6g4;UGr=q^ zb*E>fT%Ns_MGJR5evIsXQ?7<9??=>Joi?j#M{B}#mmJvpVxl%0jI($eiD7q)#z24G z{}+h?vF)GWRSRLpel>RL#X}t(KcJ#8a?aE0pH8s?Csffy}Um z>}9sfNw^E=yiBL#$M#0zc{_MOCAAam)pr?*vB*r_bfH@#sj_`EvgphtOoIdHt5$P`7do%Co z>K$NU+bYBc)%0-qPL*zoW5dR=`T4*%=o=GF!0k!)Cp#tJzANlOtsqb84^)HPt9DIP z>1w2SzpE3Q-PSJEwyr8oqI(o$_%7JGfY?tb^pKWf;5XZ%^a7V^yqNyY4YksmR5L8E zwCwAcuO$badC^lP(#jQ?^A>LU)6%#vRt7a^BP=uNUS2S#`W>^t-2L4_01vR+}=rk;{`N@of8Ve_6Zd+11tUjj=2R|Z(VEIg7(~7$J`QJjOo|W zNePAP2ET&Crex-wzB`cDOzS&ws~)5tgOu5mYFxCRG0|h)#Hyob_$o_EVI+2m>6q$s zvOXk_%Xrr!yti#_zYEW{YN4^1pf)?0-7ecyjFKbs5KUt z^7PTqIqoB1)5c9!8XbAc#v}@4U<{I<`D#RF!ySyG+!wlZT>YyK&f^tR!WplY^Uc5M zSRD-@9V-v~?x;B#G}FKmw?>}DiOEW0YHI2kcbbM{%}%|6V^I@Qkx9M_84Fyr(w=VO z^rQ|01ooO^QpQ!r0wm4WmN|L>X{?w)*C`s`-~*NsO%HZ9Nh+DwX4X|%7AH)poNGRl zni$0;@h&WSG}*0_=zFgzmQt{PNASu88>@(9WeKrJVb1qjCm&-|_C=t_%uuq9VTP2( zvt;BvT&tt%jlsSW6@JJB`h=$7@rKK2^IDu>b_8Ol+FqBO~@92Jq=o69$ANP_~Y;wv_8q!^#cey2$SX)PWM zs*bQv7L7RQ6!z{>W^0PXB>oNzyaLkw1jK4a$<37FQSpkyJx!cVd&iYU&EI~0k1YB& z{H6`9*Nl4b{8iDnng*^UQQ{VHQ}anpf!Og{ILhf2_ZwGjk2U`VCSI@0)P8flaJDx$ z@0I5JPTyKb^ARd${AGmxaRWaw&JxMGx0;?)2Bh27{WWkf2>0T2n7qtGX%|j~nlPzT zid$bIM+J8jwF}`4y0X@!XV|)T7TWYp2Lh?6jicwU;!>7y5gw&&_ZM%(`y~MSobW4X zqZ-t2F}(bP(VVpO%uuXNxwCK5#~V+@OtGmptCl4x>zAF+TXKg&16?#a>w- z{s~Lo|94mt0gsed*haGxcMz{6>hAR!mQ5xJdorX(uvR;5N^Anv4gSXTtMzh?gtX#oLPc)qQxNS!~ zON0XHLkzlsh@OK~cyYCH1^s-~*ihM$GB8~`Rr_o3k?@BcB-Db3eSa1;xic;< zdQpl9qs)%;cN0rMm$0&G>vF)fDZmX4oK1f3`oP#Wq_x@1) zRwuatHI_qSKhp{49h|t9ICEsgjLhVvBiqwQjwzJAo*r0&j|;wccXv<;3MYgi>)}j@ z1Y0rRmJe>K)FjrgEcOd#e}znIx5ZG?#HT%m{en zMX5=q1Vwvtt=;~_^!Cg%f;T@azsEY8cl`Zi4i*~Ux_e^xGf3}syyE&96Sz34@J{8u z)4Pyk@W3%n=uT0**@1&&r)>9T=z#W?Z$WU)C+flSv}~&oX&cc7EN>${c#siaNm{mx z3O2cUT7sJdPxFCP=$)oznLh|*j=jr>{QC9lnTzmV$FkaW;o%g~uDfuxeoFdxl1q7Y zrTCWZD^PJcFc-A_WL&dvQR)RIEeTk>j!-UKhq){|4L&) z?m9%CR5QhgllaI*F4%W~gc)quCBgn%7kjt9vIeI4KuJ77Jf>OpQ zA{72=2uvYs5|3`+F786U7`(ZbkmL`8EBhS0(o@2&WbL+o^R|F`>Mq*KKlo%I*S@{r z%EQlr)wAI;tLD^Mbg1lf-jZR&u6orYZ_#t$eLPs7+)o-_T&` za!HrWFK<36l}mk45dh!=TG1KPpW=Kn{KS0hq!~@UuT<~%kjgKEz)&M2t zVl3;4KXgxlgxqVUb=_|f>)2UU`Cm0&^hgA3Eod&%;iUv8v5-;pywr8)%SK)errK1& z^_gt$6?4F#_EMJ#qAEJgS0^uGV9mX$WM>0-rFXz^6E`6P7gj917eLJ$?nMZfAv7FN zOn)RJd-#rk><1KQzxllGNC^ydGR78IU>#ZkDL3qFiA3YYOi1KN2Omg_im&U7o+eDK zF4MEJEgGbJba1KDcspe!A>od?uqu$PDQz8O&H4l15gd+;o0DmG77!t_B zb+8*U2Og&skv@EljpEKFp}a0LIU^Cj{*wSMf@-sk<=Rv)6$fjA0NAeDIU^lC4q&%* z70Nn4P5#qw_g^h|s(ilPAxC2L?((_O3o0+09mo@5ojy?8te$8L^eP%wBUDWgGwSER<0jh47CiI97^nd zJTCc})qU8V#z~Dd29$>l0p{Eti|_sy7>HItO@`|r1VCuWEH-0InE>mAM0s>DK_%En z2jJKBo^lc5O8op{u{(DA2)*_CU(xy;{&T2>6a)e#Kk2!gupZD9jc~al)2(X;Ig>o} zX_Eo{Cg%GQos(g|T8CXEbz%K$6rZ;XoSluP3)~Irmq7y0W6J6bw&nnaJcts8e~F5| z&XL8RD&-ygzP_cpINMn-97_GAheg@6{=`E3e+_?SND1+Fbi|E-(C>x6u$Y zQ$i4S^caIMu?xr(LOb_^{4SBnktB9&s=*^1f2rhLL~G63#GNZljb-BQS7i3@DQJj< zplxMTRPkk<-WvmuY*Kml&_3CSLNrEta;aNB>*td}a72WNiwpOg5<=zoOY+@N=L)Yg z2qT=4c_I6z8)qNz2+)chg}Qcf{Q@EX;q;7-3O*n|CYbk%Y#R`+f$`UB>baJIu75;yG|JJe&ux5;cxeK}gK&*Yu^M;Qds$P>+Nk8RC<3rqZC_Z@ST% zSX#P$5a2h*cMrOL7R=?CsMS)Hxo2;na$~IhF#;?ju>PlC*aJigIu4w>b!L4A*EVfB zcI{-ZLL#O%M}wkbCD|`){c0neQtg*`wRrgwP)8tM zVttIS!t<2pl>hwgy&=Y8iipCOr7D3I6Y7d2*kL7ELn4eeA#q#(w|(yqLc z6p$GhE;tqTziQkjWPiwDS6N(Rn)RenBYspFsh`M&!sv=f`I}T@GbGIYb4f_>L+ruH z0S7xQs+(}k-Ds^JQ@b|dgPkm`e{N3qdy9ISW2KI5B9%Hd*!|-DqrJlL@Vbj;EL{dR z&0H{xLG}20&>u@?`MUbgD6W^GTl6rK#Y=$>|qT&JWuvAuqov5Y&SyHpK(fqHv=Y-tfW7I0| znasc~2!~Xj913PyH(}i@v(~P{9nR3)A6+wH+inQ!H(qJ^qSXf!{=-8*TZE;Y{}%EV zl@lZ6g|PSWA?*D>%}}jtFLEXi+@V_vj9_{B_{?+BQn=drDu((%)kRooQS&IGZtcfw zj0+!(Zt@4;!O(709Q}H-L$E6R$n{rFg9NQxgJkHjgCbu){V_wX}8Peng5CX2aDz2`BYOx z%3My{)0yjO27K7j{q%+_O(r9tZk8pj9*idliHZ`CuGB*xY2)5yLY1cKzWJSppZel8 z`2Fd;s6~zEs%|HYoF3H<0z=Md5BL4dFVD#_fL<@3zGfC*$S9DDEMRyL(7oDpdh9a3 z0cr0&s}q>_IDimzc*PXSZz1N-sXuhTzGH6Ex4e84 zsc>(7$=2eJ9p!@LIpt~M=DQ7<4yiXBcv~sGk|M}m9<$D+6PXr1~Pq$xLNmJ zzNj0YmnQgiP^-R{Fz0#wyZ?3`D~kAMTJ-X|=lwhXz(Cof@|L?U!GK#eMIM;;mTMaG z6z%6TOW*?9tLU0LZ9qWYCN4Z!%0htt&aO*}F=lnY=jh9sO?iA$(F zD#wvbJ!R{>8|OyErxaI`6plYCnV-vckh+SV`d|dK@F!%BxWc{x-U3tmsZmcBLPtax zN|nk4oW8(>R{T*874Wx&T%h?3;{9Rrs2fTC^)k6EqO>3*hKS&d-`=*`42}KLOS@B<4 z;itPQJ$g-VHX!%&{weBTXJXCokLmiPST($SC3cFA{upvZ@$gxi!rDKGJ+9~ z)CCrcM&6HZdT~GfSd%|f9RRcbAzVh##Sp{!CZ)F`AT$`YJGEEJ{iQ9&;sd~8%q(8; zH14lRMq#U3+?N-gD>OOn-N3DQ@ZVxg_$Dv$dsI_LJ)LJCj~h7UXsLBOKjTrWpkvUor=K=3-X+6jULR_nH7-~G!_^Humq1BhuMLp;1sJ!C##e>hsc?X~i_ zpOUnOk_R~d?MBd&qa#$GR_EWh4xWc+z)N3aJzkuIrKmI%V{@iNsDhV6eyM#=MTQu8 zg9U`(%&sl>L9F*K3rIaL58{MPO=%tTG1uk5pu3ByvuZUq5Aglo;c*uLTSYV@y5p$Y zQGNcB)7}*JTrFF*5>|gEVXi_KTLx~amZNa>-nvU-H9jBEe2c=c!M%^#vJbj08A&XbRg37~pm{>=8o8CfRaJ^S@F zqx+MykkAYv@<3X2WSmvHY>ZlSE7VU92IA9QW@rjCQ(TTo2x4&Yq&`NCg;~_R!PQd? z0t|mGR;J+l&uVzXM{lfY3#Jh^FGEB8cDJvB`Y8CQSN2t{7pFr@NEbjCPn`{a439DH z`>i{q@z%)OTiAH@_)W08Rd<5A4nxQ>aP(<29C*v0cPpr!Ow;T!0sr!7OEmGzIFbj) zdHpx7`La9s9%ioW9^~_w?(KwtiHX@dOoYlert49B)0%{Vu`IFU#M*kdgyiydP;Tl$ z_}TEE8BLl0Os{;<&Bn%@Gfm|skx6D%ovQt$pmD#eIHO*7%U2>&z~$Nrj4c}QA1qD0 zemvj)x^O-%$XzOSZ$;!F@~ROAi2PvtQr`JK0+MYbMNS7gP5@oHBlA7qllKAE#)6=* z&i>Ps{4EjKk+1-qUkIwuI*~K{r(THnpZnYl@#*n0aY$xalbY?P!=KvVOjl{$g03{@ndfOJ(pj#N}?hOlWwzU4ITs zDEVjrno_4GNr#6BzrA7aRjSvLrcGli_x(!Ty)pO3?v~gEcsWBx!09{VGqm^py?)C* zL16(1uutFJlE5pCC^=G533IyM-5=-}8PBbp#}>n=H}&2&Y!xG^=dL!LegN2Bmfx}3x-PZhyu$^AiSRqBI`kcP(ynoklX6CKG1YX|b zJo9=$+4^Zf?JQW$P4)yBaH5*alOYVe$BnG+&Hp)Nz)m+FXz*y7>fvb@hGYoHYAEJ? zA>iasBMiLwU*~yH=HLpv5z#p!Qs#Jie9WKuZT6H&2ExP? z#GSXW-)s@G648xof^oRh5fM5)fQUWxHE{4d(IctK*PTzhllVb+H8ph`OGixoGey(U zF)`n2ExmgpiI-6ot%OgXFHZNPlVbvYED`8DUWB#YkEHRD)<5hyfgT+JYT+~gd8xzs z({;1l`HH=gTmAiT25J9-pRF0de-FUT*wq!BfXH1owEiA46`v)P?PU9zs(Dd?Ss0TH z0ZJ4J-7SuW4(Bc1_iFt~zEj24al7<%QvvtZ=?id6wzH`^c2ySqn-4|MS>m!}Gs2?M zl#2?BJMN|*zkf&I8>VtK-4nVYK5pZ?7`u2#-@JZ|UFR2r{)Lg*z@)Db9$C1*QI6*( zX-(x6{-J|)h!J6H!A<`_WY%l9L_M^4phyy9w>su|ORVN4fUU44 zWJo?%!i)r7#q`b3rqTm6lX5d8vSA<$#+ zITx8IKf%^PKQf)}f_k}Dq!g*UaM;)CZK3 zI<|{Cchz-*z1x7%RnB0)v}UJ`{FDl`5svM&s7)N_{PH_19ukiM<|a5ntVg@sy${WmwwmO{y=d-B?ni6w?_Px;`QGlBam1Hd zUT-v4aOPy^J~nsO_XKXa$vNEOBh<+IY`G14+|nb|hyzc5*4UMRrwbi!S?Eq+2S|{q z!D7(;n3)Y;Z_(N)|JeXw8#LkZs0WmF(Au)i7iAhQp9VWgZD!I#k%@6g0vE@Rg|}ic9i{9dCHe$lOl7fL5SeSO2Dzp&ME}B~}dcsUtVl4_4X-_N|yO zsHxmRf1idF>QmWf*;O_OuHVT-5&6l^RA{zUU}+bH#?En-Vve3munohKv)&tK(!j`S z+%sG&(Q;SVw)Lg$W5m2Eg&>n)hhofZ!ssTqUE*kHg4}t3&?(BX{ZCwc{N&r^u!_QF zf$N@cx7W+Phez}J?xYCk2pTE%1UGBWv|g^;qukbq5{G{0TXJJ6H!?CZy~EFLVxEu3 z86^D=zx9hNt{zxDl$FG}DK3eO4%&+7>HYL!L$6QJr0)aMed1z#0uUSyqLJ%|17czF zwyJbnV3tDj#t2rRK#zQ(q+7S75;gXo^w(dRq*~|a!62G~ezJGmNUS2%$wE^o$#_&j zsNwT1ETp5Nr4FV2G@IFyoxQqk_=3vd6?9BY%)w06w_7L^Mp-$uALJ%|($G`r@MvgK z`!(;+?Zrv2adG9aaAC-yk`d_I7Y?U*6i{r(*C>+3W?yTIiBT5wXE_e?KZy_B*qWPz z&bvL7ps9ra`IO0a(1`#?$>tMc6nuX|RC-7*lWixl&5B;=i#kecQbE`2lnz>1%07E5 zV&8I}v;|WE;XDC-_N?4W%nX5c=-J(%jf>jn)3oKdSK|thmukUcyXu14%GCqsenf5e zVJH)bxwlGrWCU3s2UbfuVxxs3oR}IcU%%fzyuS!&w$KUxm^xVK*oZ?Idc+ufB=mcb zd7{8s{{xBl?}YZ8%8BPBpEgf>o9EW3Vu~o}>)Cnz&TxEd3synHhgN4H-{IZ9Eo!8* z9W^vcUfx%yEmv7h>lfaxuP*jHBTGxcx5F3Ds{LNhH_zlk-Z14YTUt7fyHT@zl1z{^?%%!_?F2hW1eS|Cdfi?X9WkSVtoE1N!s#qm|;L z;)mROweg1I@?j0n{F|{aQs0z`VtoJlW9)d~5BtG{xu9)bdKY`B7 z3WEl8cIkq?av%?Lskq~(TJ%f0GPvQ$`xwAB#tDo2i*ctNw=fxBCA_K#N>S|Z_uu|j zV2^PxrZ%Cx^?0Ej;BpiOlSO2aVhqtnDB#*xzlQRc3iwgDtCRY_Z?w0(ODU>6-J!hf zk0KaPyhcow_@8Jm$MJ)|!J%#S3$+rd73c-)uig?mZhC=s_h0hTx8H!jI6m>{kMxCk zd!mjD+;d$Hfg`qJ9~+(mOQMqBEpQEjp-*`LHFjR5Wex^aRl9pG8K;giFb;=QGrG{ zcQdIf8|DYqPrV;BE=5SLafen>-cjepx#eiu+J-D~X!)_dgT-V)@db*7z^!CirKR$v z+Qaf#Trkv7`0#W}49aca;%RZ{)xpe>Zn2 z_3!4!B{I=0Mp&lvN0}Vw z`EZbsGPN-%JrG53Ihu>GTj-W^2=!i}_d32j_{V?2PqyGB>Nut2Y-9C}3q4b;++L*2 z+UD8c!Q2RhuB?!YN;Bh#-hJ?W%<$N9-MKu9^1Kf{yCAISHH>)-Ypgy`#LYlDF4%tw z47#joU`7~_@Y!n8xiX6|%h!2Fv}!j6jkFQ)Zr*5~hKJ;Wgcvtuavx26ztn`|>QI&$ zXG9}HKzp|hCT7dcQl}_&nhAgt%Xw2H)f@ndutq6Poj!yBjMlD^3JwUsP-seBE{3es z{u~MWM_;5nj$y+wP3X!q9%Q+1=2#C{Ymh`|e>qvb?UPb^iJ)Af72xqs2^L+VO?FY+hMQi26H!60CGbIxJnqu%@%VF{MRoy08uN>MEg_TR(YC z^X`wc^K4B1Ux%M{LCIQwF(>#PPZ zXiU^-A9LV`b14*wBF=z9J9ZcX$*Zl4-8?+g(85g1de{7>qW9k2k>w#hG=#m%wsfL( zw8$6_ahEVj_^(eGo*ltU@-ugi#O31%c|KH|e@(wMKj9CUALLoM zMm^oigk~&H zC|A5n1QKG-q{Q&*CjMZ9gj2b^k|&Ovitg{awM|j=-B1uzrlF!Uk3OHKNbBW&^Ua2v z;;dc$Il|AcjFyFe#kRF>E=-PD=-Gcsim<(g+Q~3+ZYJ=lASP4VLVxZd8Hn1x_^&;-nCTzj4+os40&kx}gD(o9`YVOzTM5 z`$+n(Mk!De)6>Vgl5tS*`a@xgOP;qv&PqqU*D|@z^BIjJ)06F6sGPao{xP>5;$BA- zTV0XMr*I_xWM%z};XbKsGLyT-a9khiZ*1nT)wD9v>raT6OH~ z?_s&(N+U(VQ0EUdL%&6n9cCJN1_+|_*JkpF+RZ+a)OpntqFl8p=Uc`Z12F``=k%Uz zh(gy?%9kiWlpbq@Kcp9UsMiolR!jkK>so$;D_a-ZySCW&Cx{Ik}(wD^RC|KS7{M~h^9V82ND{A%)`uBfO`PZ45>p;vNq-Hv*k$gz|k@D+R*`sneqkbh^p z%q>&p$c|AEfDGvK&$km;cx)~WLy9r;>(3j+rMk_pfDpTLw7m_cs;cVH)Z6Qc9=Lr1 zS1UY)m{ib%ilpJRs>4*f&Z_Uk`GNQB=`3u0%@&PDG0EeWan0qT-Q_-Q9_I9_EwiCK z5$}c;$`uepVgXJWhDA_WcQ&)(K-}UL6(G ztJ~cS>z*41r|i1EXT0=inyv}ABt1u1OGLwqJIr5m$yoTQECHtu_ze3{RnA#G3kBGTz8L1hTtt#Mkz-L6pyw*<@^oSzrd3KFte6E=KY3I{ggk- zxr#B6!Nd{X4}wxHHx(Z)13!G!j0!s)!b)3tO-K3-*Y%E@@ zN=#Cki=7?rt;}@gN7Q|fGQuQ)1a^xgEX89dnO{;CT#DK4yExtnXopofi9ij7qSf|+ zM1NF2o3BoqYP`xe(mg!4+_Ubyv#twkzP1$mQNf6s-LQre?}}OioKmaVA-0M-G9ZXy zq`M&cNGWW)YVrD$;8z(S7}K8GYb=EriVtrF`TsQwrILRw?S*>Z0t1}UKx1o95O0~I zt$()N$ry8IP!uz=JlL+06};q|z3mqGrWQ_cBaD-hj7T6JoLjT@xxt5e{kjbzZ4F8$9Z?PxU3$8nSC5WKFCuZjs!h!N`7?XbRnXm50e@JZxdTP|?zDUU9 zkWAl;-#ZkbB7W>MA?GBQO{4YJ%!)p$&n`Y5FglA@oIr$A)3JV8m^#G?9Kz-N8Tp#m z5MuyY`2{(aZ9VdpuOSFPq9JwZCuF6rMn3%CgSzt%0&pSkp=z{U%@~t#(P#jV9fa1b z9a#1dI**Ecs(N-##ON{v#jYaYK6!=P^mK_%_9!C9QZ~WUO}X_xS$JIba2?l_hY>md zv9%NM+(e4A)F>xrB%j!Qfg8#?)Ff47DbJp;zM+FIxnkzrnKCr?b*}Fn7t~ZMZj^_? z)-z2yNYQ1zj*N6#+eJm@2P=&XoL>gSJX?8~RYQ-oW_P&}27uta>4Aec0p8vpUGQ(* zuX>yRfiI@L27NZ+m&3d9A(?I;TY=~aa!Ccpqa}e`@UTK5U6EkIjP{jA=uuf{Vw^VM z-5tYllR&j^0RR1G1(Hq(!})GrivX;A^-HF$!UG;~80eQhN z5c#psG~jJg#oeX5$NgAfM~{liWWzz5tu&aRK-(Wpz6+NeOF7rE!sX3%sZ_g&tL-P@ z6(mJx1NhU#leSD8lUH(e#?{gD0<(@&RBL&r zQpmglgoE@z;jFn1+xYkXZ|;VA)gO3KX#QJw=i;~Nuzs|{wgN66SK?qJfu~0o3Lv

iNI~DOa(yZoP=JY{}cJ1$o$d0yQ&ZJ@?pInlv48$_YAZ7n$i_8 zaxLGqLU`93CD!lJ9C;~OH^NEq+#S8)$HTM0(3&`c_%Bw<_A6e2tmo&Hr)$3GtyfhTHqBO?qY2nq**Yg9O znVg6N?~{aBmsnDFTU7}?OYRl?+kJZ*=Z3H-ky z;lJ>&hW76v>HlbZ_|dKWdhDCB^;M&v^{tP;2m^G8(cU|T*h`3dMB(1h_~rn1Rp`en{WxJ8;ON2ihq%W8 zGyLhwxugP&sV3L}JHmLeS@3Vxh-yObueBvD+So-?kSqYpG|e*R*%$p<=kj`f`6je@ z79nBmr|y03mS3|}Nk#xD{YqNLBQMIQF!0WON8(vK=oI&qS8`&S3+oT8g~DWs6voqG z!rgV;_6g)y`>M<6fr4^)uBl6LzDEit{QI|l(pd9#{ONLqy#|_=@t+H0qO1(kZuH| zJCyG35|HjjQt6J1=AuKI>st%dy}x(+ynmke*Y@zfk8AHW*F47@bIfth)w5|MthJBR z8aBZuFHzdi@7rEBs$(vYD<{hJZ~@<5nxW?BMn=c-!71ZTaO8OGvuG{17cT;!B17{g zSl>l}U*Of4jziWH}@Dn@IbRcbHfn_PtZXJtxn8@+MUdWcoqc2A$@D?`?n?R({g zIuir)_HO0)1)jY80-SvvG%(m&W^mdTwKFe2qp{f&QiW<%PxLPqN|WHGn>E(gpfGTi zl{pYkIxV}qnKiG@Da0gn=X!$`i0&RxYV@9RN?OjOdF!iylw_txKg2(@rLE^~s!4?= zQgP9t&U!Yqr1;C5nv6~qOP+eE04KvVtZBuaf|d$f=O0Vq400^Mnn0^jZ|PW10H>{o z{iVABCR3AOt1V{*$oUNRF8GPlKnQx&NnZh9*t+@u0!q^_Ah{MfY%S@LrJx(s{jYT5 z!g_5vRRblUxlcB}|BATM~oxjX4a}SR?4HIEb z9xicTEBj&mfd~;rvkdkRT8>;7D%k1xhQ(7FQb~?%uWZHDY}gHW!UM-?d}rScUk0K( zS^n5zZCWiepRxrH%^k1?bfhf;TvtUYOCJTGe1)nj8DH6h5~Bk9mA@H(UZ#{+VxlH( zuU)EjHecen3}Z=QPUuHs8Kop$(eAOYZHGunS|L?``)^dG= zLLgt9pe4(_;Qusp#$w>p0ggMkW&3X`<+axO3ga3?&<=@{{h8f($uml6AZG!}r%G1> zSLmI-WK;}yqxo1df9#pfkbihBu=^A(|F?XQ#A!K; zIk=l5>*Nuq_ifv%Ag&TQQ#f*vl4t2qV~vBIxQKLHOb+4vVwvl$s_ZlM0x@7qYUWcE z&*vb)pnCG4HR3an7*BdyQYe52SKo(Y;{MfNqe{vEQKqCvZ6*IhJ$S&XQX2?ez+3>@ zh%(IZ2gDwFvk2z4JXh9G(-WaFTm8Tw8|~fd34BLU%_+_F_sIO!?(=VR@c@IuecB${ zA8QAJ4(|!z2ZV3<2er6w8ABG3L6B z3Y!47-pY~F_wlvH2`EJLTh;mGwvOn7?z2o}*ohXK#;H*waw#jX5O^>?0Wu4@?hi|V zB+@?}D!l{zhmX=*|Do^r`BchPsYs52m8lzA!8W4Dgo8L7>9vnpnZ$zM5ADA!Q2v4w ziO4h9o`(Rim+n$VnsBgCe}2jI<_RdXls-ijyV27zTEpXjVC43+&Qkz~Mt!h_@#&vn zOkD>9SiTYz&=i`Sc+b9oyl*}HtB|L;97#?G)V^OpHodXdD5EEMsHjT-_DwlrbZS%6 z2s=Wh*HIhCu4u})re>0ri&;wOIxQ?V5F)4}*&n9OuvOZT&0`Nb6^4yP(sMs_*zv!; zHu;TM<|)bEVJwQ7W?&xSInPlWIC^mS+xh4x2=^AiE)DIP&$lCYdqw|q_}q##Lq zH)zY>D}>2dprUXpQ2wAn#L==m;!au>gT69;gecInu!*MPXGH*+A+C&jmy!dB>t{T_ z1yPWL4liq4d@$HSE+&ChG@y(K^t5o{c^2mY*Bn zDUqXjxCQ()7gAa1?sH%{lh8!iTGk5SEA&nVm6uh%Tqo=azV3vYJ|L(;I*R@LkPhyc zeeyFgF(BK~|)%k>@@MvqYk~md2*Fl~G!!hJYupQl>^J^5e}T>tg|jK|+Sc=Lse7>@rc_0-~TI zwsc3wK?bZGwvPA-8DMx+@O^F$Gg#}K;a(v>x5F8GoUm{*q1i8@7xCC@drg97M%}-1 zI~K1B4QJhK@R`peXQAjOe9f=C?(g#hc1QR|FCG=1bIny=DLdZ1Nh)1rujZ`~EH>A5 z=QIBjrg8fUKn1#7i*LL-=O;bv*>{$7%5|u_&#q~s&bL%DW?u^GIkSHw5!}YcXlFrP zUOk1}rRXQy&^{WaG9p`zWE)ZYO~O!Z!=J*P49Lc&l(lrbeVvXpv&szrZ%{G-D%cMV z9&&xbzcpUCs>3`y+JQ7PD!i6;?)#1NeZ$Kza}MLG2MYfD5HCqD3!uGE)caUCqHB}l79@6qbKF#r_fnWvFD3C}?k)Ro5%-vl|N23ZF+ zGXNIP6@5#-i2*?_h}2g5({I>OYehWLWwYdY%8fe4>w*U1dCg} zP9`rE5fND&^ra@}rKHU$%CKt;Ia(^7Wq-uE^vqEgp0)p((ECEgfraVl8=Ry7_#{;9 z*w*a5`20x`6n$>|zG3h-jmP&>2SQ7)LEFn|)v>gqSrYZJU_dmJ2bv39g1``2wMrxH z*eH5XoHmHng`_j?l?tq#c_L}3ueT3Mjp=G?$9_c)ZKT0lcacfC_^de5kW@lAHM%X3 zF4fRZ&~E$g+2gwga3DOsrSPGl2cCE^%Ew(4OslqbZKFXD9$ozLQ@lWURC?fZQ`D{* zqzAyLgJ#-}8Wx!?R|b4hJe(*CsD9YW*{C)eGISry<&LGaF7U&?@i1LY-v0QZ@vLmA zF`U@sSa(}fu~`*LW-YD|BMS2Kk1rpgN=rk079gFH0FG0dS$VSe zEsy)R1GnKhTM5?R>ZPIPO}m@ob&C7=+9ospORT(3+S%N?!CUL6zj-dz9LP_ zG7XwXvqn^1Mp6%c?*ec!mO&h+*d9D^Fot8AJs3iL6CXxC3oRCOlqc@5rANQt&}0+k zq770?V3PWOiWPm&v+rrU^g6J8vVNSOD2C?^zNP3{a&q`JlaSz|K?iW<=}h|9oUbe#==}ir2*xYs@)h zGr-!Mps-f9a;==frjW7DWsdvd_=0)PvIRR6XeQvOFIIon%sSvasymlrB#!)n_mt49 zoC7r}3_KX%47(^eDCny|mQ|*bre-LB9V|I7fcoi4jhM@}w%*{+|9W?zPT7y(Kx|p; z3r-pnm>YzDi^0EXwZ*?^HB|z09?@CQn$H@0G@S$A;)ea9VT{En8dZ25FMHF?U+@FhXdaW3dvH&N zeQbo*h$KjVKn~n$PBw&04{`b{p4AqS=H#^Asa#F{0YMi7g8*l~f)0pH{3ZK9M$jon zS1Tt1Z_-48-TAd5YV0(TKe6rz0t(v$GV?uFIb!bp7{ZC?10>Tpmz_2uraBRXlfcyG z-h?X(t@v_aETqO%5q*&f8vqD{JY3yKFexTzTO4}%futbFYs1$EiS%eUBSS2BR;oFJ zZulP{xHa{I+RP?-kv&m_56U(^-GirY$A?4{p``8Em3#irXbG~>|r5vB_X;6&`z2VMua z1$83kIjh49V zcfJJ;{9?6zOcVV@ocn)|lj?0pJ*06#04wlF(AX;#qgNhZP&$GKE46r zz(1hYd`s>jxLc$Hz zJ=27^>!QhT2Z!d14cO62ec$DtY;0`6%{B7W2tf)(tlL$@xT{|c=!juRC)rugiK{Z& z$9_nPkiNqV!UOu>A`u!F=gIr{`GOVK2!f(pIf3ff<}3BWI{Ptd`;gAvva=Lfyi8Ac z3oR**s1IM!+PNP#L!w~`UW=!^eiqKnM?K}UN2p0$YZVkw`b&`aY8 zeBITtTlndkNMC#A&PhyGO$7_bbPqQZLT|`}7myKb&*J>E*tZkjQ zM@jIxXhiEqYp>q5)OgTN?{*njdmxb;gI!j3+Fyv&3i6Qk4{Hm~hmx?DHtU>s6Okv*K<$URqZ19?pER3?GhS@YlB2}SZ80c_DdM3a#tG#~P9Khb(ryA2T74Q2p6HO_pJM`WNJWb)7#RY< z;q{>a;Sf2X4I>kT!^uHLMt|@uRI#J}1Bas^92PJXB|Ag$5Ai33-OS&TiSJA)_#Bg- zaatrkDV?T74-wIcR$?N}tiFX0uSvs(`hxX^b_YC)rG+^MAdheakxr}C^A@xy=1F~8mzFewJ*>5|ea zl4LATlVlFntq>C2EX6Ny20YbBwQMKxM#4fuDXY7v3ARIwFZ^M^dAdrT)-DpCo&I>F zX(s66lDJ2`5Oh;=v#5a}q<(m2Z9)WBUS19ueul&ELy3YLXCl+j+XCp8O-zAXJjLzJ zV$higaH@my7d)&7zN8?e4axNU&Qcp5r)HapC(frH>f)_GNX_-L#l34So*cKoMF(%{ z!d_8#s)lFR-|Y7D_W&o%yQ|AY1Qy49{KZIrAOzQp5*)BA)3&qg1K^uh*h6k1D*=Q*&B(?U7u1h2p3Pblqb9ys z3Nn!Xdwa6t3r7LlMOK%zBj4X}{F0=B3Ez%(C6~$v(gPF@n}BB0iScoohG-sl_$ynf zXG;cwMbMf(wgtZA&1UhP9h6@ENIWnX9R8LI)-oy&J0yFAL0eq){%6{PnD9`DIt~zq z4`QT;U0ZO58G^w_@u6e72YHD(#gR~kmq6>FDTey?H|B=}V?Nt;@wQu(;KR41F;WbH z?>|n4=`F~oqI2+3y|2;hdL|b_4zeYAOP*;=AE;&>#wTgavvyW89hbdhobii3$w&vF z?3K3doMp}OVx?)*v>&Ax38rh%CMpqa5ba%g23p)4jWeD+X-6xPn+(%Mq9GoOjPI^0 zzkj=UjOfHHiTwqx)Xbnrf>LF5NcVx17&H6 zA9n~A0ZsC|G-N=QVBva`#rgp88HxyS7Xu5aP`eh*AL>Y9@c=B7`2o;NDg*8PBSCMO zOvOComlDGCR|jroKZe)LPYeAlWq0tsi(I(;?yV*6I~&(V_!{D$=uk*JMZbYT-d z=y*}y76o;_AW(J#pSk!yfl?!u`7BdwDZ=&j_QPVTN<$N-(cWkLj^D`E!}|y(?$?Xx zkyFjORj$^w-%+a6Bg*T!4CGV>duwq@d|$AUV$&VLFMA4vqelk0Lz@byiG6bP9kk%- zDV6uyi*X0y!Yt>{^kL$DSLbSrkfPY zsU7+bZ92pP+DG@+ZQ*z^rt^vA-O*A0h>t|SbWH?-N2q4@4D%3wWL|+de3wM58qFWHBw%|j) zL_h13=Lzw}`)tmGXe$6?n!${IQ246m-H2qtsT0Vc1AuYO5+2sSgy-KQir?CFHw6%q z_>8xxva~=*GHDS#|JWoRx!(fwfi3|0>t4jmqQOQ`aAc7N63ij76m|%u=TT9jHtw4+Sn@K zBJsvA!bi(cW`6H($c|%#O(Qzp>PjGKL)Qp>5~SwnYKAer0-%faTS^CSMu9)fII-xz zDgyOp1y%;rx@X*Fd zpF1%1!l+l6BSTIZT4$HK^6*KMXSr%6gq`hN=%&(wu>s#TIusXcl?# z7gaj)T9w9B?@3=&Gq!?8n|2zI6+7oaGo)DQ>5v7fjs&b!5(uG5?-9Rj4&A4jQGFu6 zdGm6a2*83#hu<0*0OWgaceBA$+>?S?zMfz*HTOrpx#F7#KKM&{_w! zeI?bghg)b`&LE-tZ|l{z z*|$osAT{3& zn0*61RV(^UhD}BSZIxHp*vuB8C$VIao%->^>WR&D)f(NF?|PqV8f>Gj$|mEj19mT_ zoax>Bz_t?gzip+Hei9=O9uIgGAn0wlDyQ>fJ5!pMf`aLX-#Vy-wE^4dYiQQf(T{nh zgijsJ=>vpTZDU#g*D6aC)|fZ*(c)?rd66f1(Vb^+p9J$oQWcgZsd>#11&oMyFIecT zoz^qyIFT~R?pjYAISt2Gk~b8_*Ak1*&G^;SsMPyfyZwqRyZgtUWn8xS)5J7v-ldP) z2pdA3M|XcXs!#YF1$WGCcO$>s-p}KFQ~#d$1MdN!Wjh#(*MAL#RiA}Tk0=WF;Eg8z z4d(fHqAb{k%VN?S{zti?2+OdF6nuk|Gmf2k6rC#o{+gmeQU(C_=@+5thJ#wg1YO&S z-f!wu2C@fGjEe=?LoQ*^zu5yg%r5y{mBf<4+~0WlBGW&6yU^4eXSV7*u*q^e$4G7O zWZh5H3elC1;3Vvsr2j{&^pDx4xH}ixxAXGR{GGhtevu9Mh+s!;m2LLkR> zba<5|C72q;gSIn`^+&Jy4%Z0jXV>a}NPU3K9UK>)SP8c;A@XX7|QV`;z1vNTgJa@{@*`p;!SZQd0<6_;8rUfD)6Y1F;132Ps!*PNZ z#6Y*a+_DxU6{u8GcLiHb%1AScbix z!RqgwqS)t*$$})x*Kocd-gn~XnQ>uTh-$sa86lC&M(negFfV7fNx`~N>LNN5k7X#`Zv1LRh2Y(eq(LAR6y*LE zgNNZ5-u}^OFUK||jdeyMHJ)DD@chJyilzF&ga<4gbA1OX5xOST$75PklRUd4Y+~W? zfaW6B%>U*!@bt)m9buWk(*w?KMhtAg?_!pw!z8y3=0On!Y8L_sAStg=KmCEpsB4%c zFY3&0o1Zl7$3=6Bfe0hkKOwPIm*CN8IMsb|22`uZkJW+88PZ+&^ROAc@8Oo6;aG1e z%G3G~na-=1cZY}9@zuuI7WWk-=SbEF_@0I12l+_Xz7PP#spi`Qgrhe~t|ViG($2 z1BVAae147sgBvI=&1I0Je<O^JgC-u#`a5mz7w| zvC+2kUr3}?FFL1;eMhZIzYL|2fy`AEV9W~I4lezE|3LPewWKlWy#wEW)&tZwA@Kd@ zD51)8g2V~z%Ws3Vl?F?Y;*Z;d@|r;TK^H<8)nE{BzaTq)DK~lH8kw3h*lNW@4#QF2 z*r(QoHo*3*)fBOP|6h5t|DPyP*$YsmmUt+;wdIrXVB2hgRviT$ih0h@RZ zd^#g&b6P1NN{<39Q5+jhG`%2NRByB!ErVTmkrN}KHHe!boJ?YWYUlZN*X>c2#LR>e z?#e3ndhJZ;NqgvIqNWErsV08aM;(Zc#Syb%%X-Ln9!)K*2B0XAr^`n>?#{ZPSgyS- zH~Ma@!?oqL$$mWtqIg7nyM)TmN@TCL4hLn3rlF@kDXxj&%1-eaD)ez3Ln4$Z)8zcU z+oYHIe(&PPJD;x>yrrT0i>o{rNsu#bEY?#%jsfqh(1Uia^6ogE3_71T+re!iCB{15 zL;+!y4n%oqB+9qGC0T~LCo*Z7qPI~B=ck%onc*q8)!&IkY(qDom;j(q2FXWT49Ecq zW!Ag&Ac^?=z(YF*`EkpFPmm3Hx(8SSi5n6Sz6`8Ig*tP^8pM zN^Je!S-W^o$@0|922O^J!IqnoR8XVY$NR-O3X;mxvb(1!bS1hHm>bY#M{-*_!@l)j zMB>xO{RfdqVdq+yTeO|xmd4Y}e2JTi1`;)@ufbp z#OHPisj|P!}yUtE?a%v~%L$?|`%__z7K3Q5iR~tr z&{90Z%{G%V@i(GBz(DN(4KlCoAE-Nb$I$JU3$KisD~cw9Yg7&{v>tQN{CFLiSTfb2?NdmHf}JPxFB!_USw)Lq5|MyMyOpUXAmf-*F!F3 z08ZHiKH-!9aRJeTE}$57wZ@;Z)+sjXWAsHR$r~eFydwIR0+c}L%2C$;!;EQFo4L=djX$)GewWDW?!iN_n2 z^$OsIM@#K7LsPTMvXSwNfgL-~H=ll;kC=}i$qVWBIV(rWQa;Q%bF;XA)2wbpKeG=- zsY5^WRvMVuU2`g++r8`J_mm5?;&=Q0SA$WA{Vi7WS*QRqyxn_RR{77LwviI-v;ima zW=@Ct>iec>^WqjW|9t@d^TY%G1%KeHms{5P@@G;3N)T-N6ZkeVW@Vs}1V7 z&lS~B`)OwMzFv3kP3wAy{WhrpV!?h$+V$9j{H>}gN^-HljQwOA#l;mh&r1i7;HruD z8kk8gy5?_JBi>PiS{%n;{D4pXEmG&6Ig#SJeNnKXea52xc~N-vBxv@DJ84p%i!SB8 zF+9<5#Q!yk?4>R8_4ZxJ9*`22tE1bkt<+*b$fPq8Ohkt@tYe?F&2_I}sVli1uC4p+ zWZ3B5Z?LX)VuTq*eYCRXukZA=-;NFK_KZxUtNgg9rz8u29#4PG4vI%(x_{8oXS#jfrLTc6-p4@#3PA;%; zJX2%9$;D7)&~0&Wa)BVMg_H$ME@r-F>dtQg)Z2-xWfi=N_)C9CxpRFeaK@OeWi1!X zJMX3GM3c6cX(K;KW9 z*0t}4AMQDZ1?S@h8%}+s_RmZX+$5IiadU1ptJLc;tW5M?-0>C%vU9q^uC_>>6?0@D zd3ydA-?Z$%ebX|3eABdVqL;ofJEx17DGJqGgp#UoI>WOTPWy9)!Ynzq5;5A3eiq#t zFbA8>J~f_>*;$}SRNCKhaAgL`Wk(*9fig%gGqHkNe;aBef5)In5^OGjypTE_K?1H< zki9sVZ@0}{uiPDGT1cOOz3x)s`XehuE3J8Hp zix!1xue$|xe7DbTiJrD8)e87{2Jnb!sHiO&;3IyuKve}twAb^j|5={k3=m zpLNs1-a&4H9;K9&NY9+F;$_bJyo&{P zn;)7A^@Ivb^-*>Lj=3 zk83c4zDz;GhB*SdWU7qcNOzWvI?rNsYZy;jnS+SCgtyf_HyYxt))x0_H+Ap#`9Dji z&YpZ(O1=5PLXoBg3-agBOOGVA7Gt4MDEu5uor{C%AXk9wyHU%kIQqQ1+a}V&_|KaT z^YFiv!L~brnF_^|N)I%MOBfGzCw6Hn_-%Bn3{l3;);_ipW)Mw;43FEpHj>hSz&bvg2)2%mt&mv|w zGP!6Op+C%1Y}E8Ck1z!G8LxlRU4eQYSADE?!gj`@KR8%I=e|V-<~kJpNU#|7$FwY> z$?`hA?NKoM!3qn++?u-@RN@o_{-RMw{M$L5YnwK87PVv7EURuBo)z7_@*SPL?|b;o7E=NBfx_xc7hC*@}RB01p1h|8nh)&1?Q z{>Rk7bzLN|#}Fmg{r$&iXSk01R|0;1T`J@*I5iD)Dg?Ojck`FHR9AXoB+=b=W3jVd ztMy+A0q-QBmDbM+R`dO$pukNuJ9TrOVU1$E;9%Y(gfYo2ulQ$u!<0qmk4;+YpS^FM z){iX2%W-E0?_=Nd|1VLJj1`gS6G&c|nGdka5&efV!^k_|#8PFp>9kE;0-iYO`>N|- z8%|oeH}T+VQHk1Z3G$lodqHSart7TI2cgl2mho;G2#pm1Od7u>$aZjy6`v^+^ba(e zT|*;Pk>=0u6d5*BqnU+tGoz6(=hG;u`NBiCPOIwa7^m~sHd|xTYMf^j4z=ng%`2)U zjdz*1Rd0o^eHBYn9~p04-4t&ybJ=8Dc_?=p>ev%(BP zs$bx`GJM}ac-a41h%>R!m83S2|I;u}QXKyztS+1Fu6`%9M(S;BEA^9oC-8}&U;@~jm|L5!30{5D6;eE_zV*y|>i-FYBoEiYiDP(Rb@n3Zecx%*TAPiDj zx{Tj(VLThWa-SngLXe&Zh)f);ab$cqve5*X4z$f%J8K{6U&5X5w&!9Py_CuHWFp!$z4!6+>+@MGVu6F`vyHGHnn5?i4K zSMMy?TSgw@_V)?`*O`qlWPsHKz+0__<8B0*WTH?!|)tiv+J?!v`UZ zb}9^H3Y+sC0yk%{@+BQQnw5dzFxt&zMgW2XD7Kh_;83U&bmM6U+J+{EYd9n?I*$K- zKinLjM8@~98m8nfA#Rr!?K4E+FCBf@nvy#~)~6p0r5optB!Z7>gz=2JDqyS?5N?(L ziyUrR54}ez1^0n#0;s7y~KpDk~lE2ZO?gi&3C!;ks)4c9H*~iJLD$ zn)noQR^3PtoYY#o@w9j_kr@?MWr@4NbCpe6t7fKlG)&+pX(K4IXqUKJ-m@){Q@FR# zH|OFVmnZm^Gu`U9fARqH8V;23cAq?bHQGNm?x#{LlK7mJ7FF}=eazK*`y?b?xZUB9 zulA61%17kd>jqeN|4$3YtJr^2<*$FJ^0fV6xlrRj8#Zk#|7_U22rQ2ZdA~IP^hvDR z{pR`r@)fG7uZ&ItZ~Xdigu%a2l3y8C0RIN(T~enQ@CVi0xZXc=x+bw-$pC{S56#<% zAl>J2$bIu8r?PO;b0A6cOmMw5>_;(k)}?C@t+MP}u5vd_U}Me*DR2)Novmc*EH`!O z$FIWp=F#)PJHD+y`lXeihu!zzlL)K97CI`j&DP!h1;d3tHy1BIAlHvPVqb-girw!7 zsnO^EQbd;SgnwzxFaK!GqcN<_+m~FrO;sd*QXvY~<24584d=$pv=m``Pm1Dymg>y^ zUkKq4fEH$FSkK+<0NzkfoN^HH3(o*l!$7z$jebx9cv;4+eoK!4-XEfS84VZ5_dYZl zWsAvS4!%;KB=sEF`c{-Dpr%W|mG&CiQ1hzJc5Ez%TZuB^Ye$sbs^PC6e8l4Z<%q=jc02^~Kv}!ffeDhNgWbv#}RD6KAJII)nN& zcPRppnUY^w%97Ct8QO=IGid}ymT{sHx zCHVo{4cIwe!~K%oIp2l-vtM-P8ynD;U+8=16czBSmy~XY&E{iUQU4T@$!2G1OPs^g zZO1!hSCX69CIgnS9Ah!w!Um%YfiNA~0S21-8S$oD|CbTrA5d1k24&AG-i4ZGe1})) z>ZR3#%d&fESC`OGY3HtIZ-i^6V_fGPY`XmLtS2_Cn7EaN)ijUW?ViSe3Y}DO6n@@{ zz3O!n_9)7xB4|hS=<30pQDo#2%$S1~ts{!FB&Q)Sm}?E5n_F?$$*ucw*D@}97RFe5 zx~>JxB{a>g{}u~)ec?@`jd2-=?dCYL+trZxYd;BWKb;ZiO^EvAsVx`LMHai%w<*>CCohNsD5hoO zru6=KZ&D5HU5Hwus5u z013}m?Zl#w0JX{K)*a11OFqIr%3XuQqaxUncdq*@#b#UqBPh+}hg-~~%JY0eejDF% zMXNS)zokv2T1-AELJZvZx_sa&c05$+dd$)-PAfTCweN-eYa+!DhlK@LoIXx%MKL}< z@X45dy#~ziYfiAn za%!Kfj!YKStYFckIf}5f1-EQdK8i5iJA$(GOqXIT>x*aM>AcE{2>bqqRT72mSwxVTDRk6 zg9imOfby@&=qp|=^|Dhbh&vlxGC6XBwq4u;QFa{Hboer}m{~QuO{sN@3b^k*)7GxL zwk4TT4lLaNMi1XDS{ChZT+ouUp}q|=;CH)t^4TP7 zOOxMb9(KtzC;yOi#x0r%1v-6#wKU`G=~%k!4)3EaGEKj+gTvGX?~j@Xp?w7v6}Y^~ zjSFM8vRsg?o*IAR!{YX=*&4!39IG=OfZT^t+Ak#zL(bG2Ye^S7B;TgLH#HT~*S`bc zQ3_=d-`Va^S06FCnSoIY^8inXH%g;SXt1Bfd~7z%V>79K_*mun3FcD~jm!P$%K19$ z>~~2AT}jRLXLD||0#;Utn{%38M6Q-w$cXh9J8Yi@pHMPht*F9URm~f59g6Gx@Mf0g z9JX9+O*@>;AtPfN94iM=v%vl*e>A?wx2pkq6kW^P{EF@OHqZy}W!isDDkzdte&BCD z7~I19*S5)uKiekJzd8fATc_#~P@CGoGg{?r$ z9*WN6PUh*Go>uyBOwD%CPN5;^A-16BwyyVO)7Y7pIFR=N`fsj)-$qHQ4 z=J%Dbja`O~IXsEl2V=0!=nW|FHiN_FkBJW)#mxa$BIKL=>Eq3ckYug&c&jZWz2%HM zcAC3agn+Fd%fY+@FQf}8D4rEVz3h_j!2!z&<4&Lac&r-)TX2$-57=TQk+52Z1yDYP zJSyvVCUAvMihTW@=gv@}K6`k+yYOMM2qn6UyiZS!i1X;`ve~iGsok-G#NhjcEb{u1 z5{Q13?U%3Tk$|5NQQF1%J!-p!+$98%#8-BfmxGAZzy$g;f3b*~`AcEZ`x>>22mIf3 zAdua~#y0{nMrqw4(A!UIG=cR-M*`1gPcJ_>aI@5euHOE`32v861H*XI#SAgxVq_0l zLX$B+mez&H>E7(SDJIm>eiys18AV@cpZeUJ-tQgNvNJolUUq$lp}Y#*WY+?$FJ=<- zTj{o|f_Jut)L2oko4}kEafXu555mFy*3?TEXC^yjWW1R9V+At8C)jpI2J_}_E(8SY zGheL7(57t;j`}uJ&0nu072EhIR``-MAkrx^pub@>n--e3U3J+wmxyOglS)o7;Hu}_ zJ4yb|XP8o>)o>Aj0^f;v-~n-BjHp6K?z&m-@&h~apKA$6U;bQ6u$tw`rmjSeMt%8e z=dl#(>3(74c!Sv-6PwX`>_CpolOiJF&j;h50O;aUNaP?APGM<3${BwR@M3sD$zwk0 z-c@Wq{M^Hon$ppnUHAi`fO7FSIxp1Bd^r^WAq-FibsJCOiIDNZB?&)-IFb`VSd=Y~ z&6fsY5%}$Ym97%7bm`%ash3Dj?^dEbQhS#Or;M%OR&hssRwer-I(jI7GE*%3Vn=ER zZgW-{z4J3t6W*yjPEJ**VQOA}%=*v|RW1cUf2U}eUS@io~2$0>Pri}r|q zi0v+(FSvkduMOjI@@(e?0fc(qcxhoXg?k~)*MLA&L2Ac5$?8owYmYr=mp4KlFRm9 zlZac@Dp?Q-i-Jo~Q@9kL$N7$n(B&pu<*eV<6bjsVpTnwd)r@pkk7_XAw7sDgcMKko z)v0YRulXf`{9Y~yOv=&0N@Ls`6maif0?b|?&c8Wr&Pit(5v#LjvDrYCvzZA_Di5B9V4)-TrP?x_)%z<9O}*NA83LfH6a;McAb<}C6$riS9INYF07`gI&0`!cZXUm@qS4! zE=tkK(nDaGx=y8S`J(|&4R&388`K4VXSVeGbd**>LBslP-(SaD3l(m$ZT~6owJ`i| zC!)A^wJu9Ol^zL<7kdqAIFo0w6h3=fjZuriGl_OA>q`j zd;U_cMoYWQ#0#Mhd4kM89MCyzPXq@T!CC?bAgR8vf`@oSM83ezR=(~J6l+c?TG zamhg#`3GoyJjKNACH3WNJLTsuGy%k zA@8Xr(=ANyNXosNyEDT%>yhA?+oc?q)jKlA{1HMd&wip%Ep=*Tr3p{&-p(i0fQ#3n8OBSVcx%X&U_fS1ooBZmc*^n8Xk zrjRNC`dDt5FU+*Zo@IVZ1nr3g)ktU)^RFQ(KPke?moK%SbHqz;Jl27QSiM#gI-GEd zeB|lzk?o3|2y=&1cKa3Y)2MkzM`yZim~&xmw!;>CA+WgW&O;2yS)C(_NKJJ9x{MiX zym0hS#Q|;VJ9dHXANoH_xH;~kJ1j@y^>b|NNbx$ICsu8Zq6=1>Yq}k<;6JqS<5Fqa zZG>@lI_#$MK{BJ0T9aXiNLduwDMbx;?cJ`zyTkUm9UG?28je&@QFBz$08hS02rTR) z15l&J^e6g}RRRo9iWB`$mT2_d#MKdhHYx|mN?h5mPII`|?&+qG3JG!4lD|-> z(3y+ZU*lZ9&=QhQiDn>MaqYVR^B-|^|8A}tXNAl~C@WZ`&oD%tH zu^8eMc2c)Bgg*OGYcEN=t3M$Kx~-mLfJAc63nzn1M18g6 zoxCL^_%0u^UAKe5OZm$_*owh;2k~(m#Ks9VJR%gk#J^+AP{E`c#+<8E&!`}ju|jB$ z|2}(5>0BRnuBMp}wNj*fURTAy6%YG4=7y<0 z@KFhpqx<(APIlZB`tM^^zE-n5m4}o&M}(~HXAHP4gy$3%a;$z(l%kRl!-69D{oV!8 z)avG}=6Jf=SVuZl*%-`XQq;Q2cdV~J_nuS@F}{juF@$Tor4MTE*}8z?aZb2gu6YT$ zt!KD8Kb*_@;R2h~g2lV%^1+yT+5!s2{4sC{&RzT&2(-i`>&RW(DTHV{S>?XYe~%Kg z`D&-Ruja<5HJPBd^i)i+4Ix`dv9y-Z+gvdve`aOq`;(&c2Yl3H;6fqJXOCKspWF`h zheE7F-bG6^mmZx$>T?bRLl@hrzd)CD9E|TwMXW>4mNy=H`EV#c%IBT*ve)1a+797O ze!jBQ0Z!T+SneMNrBMrO^-<%7)QRlh4Q-gcz3H@$)Qvvstjh^Z1!}tc9(}k^ubNPMRT7K*#_nEy+BGsRopS2* zXW%}#+**SUH8A;!c&^U>`aQ;baHZ9ylkuysm?WAKbSkH`E756Ai!|78^VQK=%Y87v zQo-C%O;|iZ;Res^vaXROupLo}`y|@!C4)61MMFERo5w!Aw6p~WW#rs&paJz5How~K zIKX{Qado;TXckIVo8O~}z%@7FlYF&_SOxQKY}&l2e)*b@((7<4!jei#vTg3_EN1^C ze(s7Ejg+Bn2<9xjq}#kwIoo9Jwwf@tCED(O=ahi)&biq)U(vli2o)T*!$7HMp* zP)c`xYDP;P)R%%UzdMym6jaP>x)s3Hvu>~OJz>ULw}xP{HajBf_UuUxlfLsT9NON- zaCKHQopg8;%OP!pWh3@pA~IXrC550K($BAHCXG$?h?^TVdy=b7Lc1Yy&E=cBb$F8z zl2AzK#|ZPl*Vyu!pzQ&To-Br^=%S383X}R*!en!`yYnGfg5Fg#_?{mx!vn5Vbn28U3hU?h9 zM7T0PJQ0`Me*fw?*EKoorR7Za2}gB5SR)*76EsL6JBR^x3&zJRd}#hdX^|JEGj=aX zc)YXjZ7rP~d!oc^Nt;OqO4v43Zf~uL*lN?ob@Q;Tp6kvZGy&Zy597u|8s#`a4@TiS zKCzzY9uFt;|BtS(j;eCo`j(Oq>F#bx=@gU{kVd*eK)SoT8!0L2O-Z+;q=1yL>6Gq< z_t_Zte)pVjj5E%EXOF{r)|zwvV$Mm3QwB(D@D#J?I)j&(lV8Zd7TXSK_b|~nMg>85@sz5Od7EZ5*UbIK66%t7Brrt-O z#q4MAdir0COwEg)Z!!06K$d7g{%Jpe?FxLEVpcmS%MZ<^8C@S`6Lmg)UO>susU={0 zuYtNO-YDz-<6x*w*#bA=F(u@qF<^;>Z6ATqW{L#3xa7WG??>NYj|kp>W_)b;<9d^>b57gAOEx?eck7&#y@jr5nC}SQD(VcmY7nO-eW2iLh0m?6Rh_H+`ORECN(>t6 z1sR)dm3mP}SWl1mDDMx~I#m%WtB+J0TCst1ueLLio)U6 zs0~-~{o#ls*hvkwfDz;EzPdoxVBmX0m^Xm~Y#?+kv5QIA!XlrQbI36RmeDW@P9csH zSaqDZ()V*l4CaGhR0*2wR+l@uY@s|liDV^erbT`($=Vf}W#oeB-zxO>@= zBlg3}&N`E~bJA|fE18zFDP?li@FSg_!Rx`yVWH-$KDp1_=dd&$CbH007zjuN!N9uv zwaxy12JVrg`%a=9OjC*E>L&&F5(5Z-B^Sv>4`>ZU_B&v7EFAn22P*jE1%Y{^O+RR{ zDT_DEbA(jMC-pVW;M=swz`FHx)EIYVbh;T0T0RhNq=U4IBRlG^UI#L$42&4Yy1s2h zs~?a6V7%ej;JGvjJLe@EXW0NUKNu$ASH0EEF18o&tWcd=11&#KuV-dCxW5Pvj){x= zw0hH4klEfw;@zckhYQY1OpJ`4ze7Cq=M1>^K6#J)G~?cPKQ(GQKVQOj#8bC<)5WyS ztTF(gt8`SIuu18^4C1@uVvUU;>>ce5fB3K>DU%lV^XJONmt-N&GaD;m0K{-H0MoC% zAoF1t5??xsJegdlXd=TVpVV8M^G_RiV_Qf9(8>GtLgZXT$DrYED)R4N%UWul%yMI8 zZ`E5TR{;yp68!ID+6#g3K#I+k`4SolNs-;#bq*FJI%X;O;h+rPuGgk8(L%O{C&Rd?xm6Vk#|=DWh#2U|!x}i3AgEj&O6I;ub z{T9y;Z?|_B7{y&P?1{W3)gX0T1D>4L{nu!(z_-;VjF+ufKV&YBb-gwi&h5|S)A1Pj zq-rycyRQ@I=`W5+GsdnLNbWFoJZw98H?XhynPJxE zic3lYV!WnO3;MnD7w=B)bug@T=CAch!XxBuZ;6^cM4=lYy&Gz4jd~RW*8O5VJ4Uc+ z8P3dmZfgkUQO`pOlZV^be)_>&9LiY)#889M0>5qzw^`Kgc{QK5!I;qrZ;coJvoZs| z>RbEx%AHRhqj2PsU(2uh5O=)R3VX|`KfMmejP2p zEMCQ}T)_C&(<{^;?iowd9R+2bty@`oSaJtFuF-XxBf^Nl4QSXAiAQun9SL^}gPDf=6Q4 zT7XPQ8WnxNWDXlzPB;;Py(p})k-soLAAIk1`>gVcRCq-I6~+124Z%xq=TYzcqO28* zN7Rcq9zRq8fY^RIlhIy>0o@IX3-G=jnA8og?cVxn=Jcj!@6R0<01peh4_1>BN^7<4 z&s!VLj145-NJkNO?|a=JSKAZ2&dtt-M@5|{t>6CeT<9L79_T(2hN)i}p8W>C)j7|l zC0TexPuxdZQCDjTaLjk`ySoJyKZ`l*S5}-ujnn-Sri3oHP*+x^9EExA_Xl)4`x#Xu zyH2xc?*nNwT^v>~Q{nF~uD#I+w|72JoqPTS&5q8^IjkG2+yk9KNVq`UX@j`4%lp}i z^;E$+KHuj8V$))4g)~DN$C4Bx*Q{yb!q=WA&!+_EF>Nd`HXskD-OmUAz*tz9umvx8 zDdI(ELK3}F4PHqb`awXw6*^<>D)Z~k=80iypWU+;$|R+ocd~0F^1?c$h3wZyOkLEx zYO3!*?|WOPqO&U%!R~s|@coDm5Y`Jc(7QN7p;wy{lbIZX&CO3}(MOm_$f!{%TPnF0 zIs{<1gsu1r7ci5tOM{(}=i4Vb-j?OGpH@<+^zZ->_*6-n=I?lWJ=Wjxc;O&&O>#I? zi8@ruw&lzQQ|D#^C4a1HVd;AVef_!&Tnqt!_vz?8>PH(xYA0b8KVN75yeKcK%s=k3 zD@w97dU*IJ10T|8U{@n;szM{@?v84h1~W+LNm`}~HC&f+?vV{8^!Zzlnu^0o)`oB) z90+!`e%EP2g7VFlZ(CFUar&*|o3^#u8p`WT&nxVdA)K{K%KHmOE`jrtJ`4K|oJIbH zM%^3K34yBp9e72v`{DCFF@@3WYUid?9u{jZFOU0j@u=G_T49$Lm$x#y&G2g44dT3V zcQFfR9gdV6g`U9sea1#24}Ayex48dq@H{p}%&+1|3{LWEW?nk8F-L>4hoSMTSu?y@$|AZ9#+W&aeXHAPG5SvAd(! zud}j>sV2u~g5$swbYQiO!7Bz{T@m;cxmoi|!=;1NsUz3Kjf?|N}OKvqQ9QDy##3|G`-<@BPW>5R{|xu4vJ zHn+8TuYOn_c8otrJ^f0-!>Q)!Q#Rt8m?cd0!R%T>M?*WgslSJH*-b-(kL2Q340I%5 z+SY7%w60lBJaax|?Liuj!;C#pGJ81FBy6k0eVms^e{tvR?RRk@B^~VOnu9e-DF1ws z#ryWQFz=Kj-r8lIb8Rm%GtPRs>% z&N9AI1FJ?CSU=@9{9#;_(mA<~9xJ;uKEELxjq;`>@9FE1g&GMm#;s^rN0_Gf8TDa#Y22MTHMS2b0p%b zIcRi#I?~j;g1Dun#US7El=1-ix9I+jA8Lk3fQjk#4e=_e=ri*z8y<00+tft#u;4a9 zG31h@T~?vnm0Y^wig)N^DZSr*HqNP#HDXL@Q)hSI6&9K~I66koT`rmrz8n9<$dZ^y zZ0{JNyI)YhHwBC=rm|50{&1GC-hJH&IlO3U?p1v|jR*`2cMIGzE2b9~8OQ!KZSw7r zk2bV210!a>B2@AvW5!)3?i9W7E(b?e3)&-eJC4|_75W0x`Q7xsr@nOy@}ccIU43V( z?gD=OviB9;i>yMSK!5ARoR*?l4ykmGpFBqE@;!bvM`Ad1GT^UiFQV;|IwjRnUkahaZTA7&d6oH5}4`={C^VFyjZ&0fFbQj``Ncm)bD zuRdishKGYnyMInN`KEwBpQaF=`};e~Iwz}=_31d0B3aYz*D@Om>M&>JY&D1L;|XzF zu-YnbIXdvyaf@Q7`j;cPh-!_b#|Nx&Z*&^jfh-l3#sUP(6ZJ%x2sa{S!;X(t<#D|? z#1gm6Wzw+etB&)I#qg}V+Ib@&>8Ns>P;#bQHuOuRGmDa95~*q(x3l4jVN-XckRtEF zAP#|f&&>n}o3VH)dxuCHjKl&jvsKch>B`fQH_L@oHq1Z}+&Eg~_-hHlIKX|#Ni?AH zChJg_k))5sO?6nMC1>2r6ASx_{Tx|*vo`k_3SH@g9(C^809}ji`F3t_Komm%ZjF`>rtj|Z0 zla5=P?_O-Xxr^kGc?4$X!kTKiRcgPCfLPaKi@$0$%Zgaof{8E!(&65z+eB8cTrMu6 zF`#hjEo{$YVyt|6-M%wWKrX~9-9c~tW%gVgk)U7RjgV!7gAO>T$ImQ%@{Rvw{t;Dn zXP%&MZ$>HjK%{4Mh%zDgu1^Zc-EDK&qi^^}-%K(DTndt8B>TtvWI{*+z%t4IN7?j= zeaIB?^+L>ML_){ko&roN{n+QGQ-H*u>_u5RNK@&jr%_^EFCXcib9`%#Mdw`bBxU6T z!;ix;7N_j;=%>8UpEh6r(o))~ecxE%`PD~+%Ltk4;NfiscP?4-oOP&R9^MNj&@?+@ zFE^%?7EJjv-tWhtElv#Xq#8bCe_-33nh|m%&=2?m*vJ758Byk${MJb-quP!j#~?$d zj+8~nw=;*(HCdf&o=#I^k7`gW&KI$ANm^PZnDCX_rQxR8*5TVQtSQf+OiV%_;J=5# zilx7Z@R?k`x-errN;0N!R`?L7WUVF~wOjNe^upWveJo}to4qaHn5mrVrya{}1y!|J zTN_F!$gJKeKZ@Q1PTy}fPmYpIDZE{SFr>4FqK&qfM&iQit()34eNbmhK6jm8&TMHaqJ;11|C4_d&0s2+?6@BM))=+CZ+9+%8cZY z)Ek3$)?4C-Q7eUllX-l#qnT&~CgM&n!s?~o7qh?g&Vr$CiZa~qG0Y{7+lX!F1rQaFyd9GeOqEfXYo4pEBDf%!A{^}gzeFNcpT90$D z!CWbO8w@;3;7>7x?Hm8E_$>AipQW_2+`-S8THhxz7${cMhMqe{dn~QR9)I~lc~B`_ z>i3n2uP%CL4dxsX+0YEZWjS+*2Rup7ml{Ut|m4T`V`Xk2Nl+bvn2J$dmceZtkhqfzzF>z@= z+ik6qT+6Fm4A7m0f^%V0q|?EzLbHY9i&DR-e7%>@+1aXxVBHvin01rmvW;Y8+E2io zmEV`^?Dq>TZUVK{m%$0UY}Mt;s4iwE?3+XLkZE^r{T|0j z`t&2 zhx`>zZf;GlX(?}|E#z?Jp26_O|Aya4&@nJY16{bC2-eaL|lzyb;huPI4ESEd-(o2S|3;sX%WU3J=R^0qxVZ7cxxZtF4&pT z?_k;1@VUb*3L>`}EnaG#>4*W)hY;HHekUebz^F($-XFCo4 znzhni4U`v822D1kGcz;3UgV-8BeySAFot>=$uhONXckM`dg4vcT3)nsM_&;oK^QWO zY`_>F@#0)biyIhRt||g(v7mi+mNt4+{OWjgOD81LCLLbC50tXYe%(9+-`;jUQ+K(2 zP4y|9|8e&uZR6KAfZ`}*u{eEu2*_5*g0x(ZAnA&ZLg@e>>RGp=>uyXn88#qcX0LMa z@B|d+h-Za_*nY)~X``@MrQvL7>~H0c&TjqO{om)yiRo&u;HE>NIwzp{f^1=%GoeoJ z+CEq?>SfqAebLttxv=(={Lp@nqy1Ng-BQhG@}|y0(eNFJk|L&%%L_Ea{Pnw&={-pO z;q@fKoCI~A-fMI}xg!ep7W~$al=>UU$+Oju^DiYc@k@&Bujy#GR_Tw{k8BOFzj}K+ zV)D&4vhtXv_ZS2*;l=;jQ**!oHS=EENqd6}GU*lu2IjV3#PzMJz{!x-;q+}9?#N4< zZt(S}#j&MWI){D3RXOB{dw?4GpGREY*z%~z0I`h&j0K?+`23}GXvTS%TIfl?rrJtZ zt_BNP-?@{T%XrXM18vvm_f~i<)<7~pf95?5vWC_O+g?&o9n|2%u1Pgo+81v_udZ$J(T8G43l(#y3?>J+s+1LNE}?y*=l#x^(uk_BcB`YIc@d~kmsvyg?#<0S9rx`~#|^TYaPW(cTWKV2*RlB=PTIGG@7y*X?!Y^2B3 zW}qFO3DPC#!pIR`=Vpfa?g6};J~ z7SPkrbxX5Gab(QBPR`3ua`Z&6q2=`Z3uSDs5Y$NzN@)N9DA4WEKyd?a-Wdi1l^hBe z_A}NLEsgkc6=QWG;o_QjECPP@s`pJ=d`7WXgN(+`7NLwbR)#WL!vpc(cFFvD`@g9ATU#E8>uC zNm_$FC^C9iTvMz-^_?9N1}ie~3&64z%)b)|S@EFCVDY`try%x~RVH~AT96^$v)jG?cX;viBn-2nG%NewjwENdwOPNWJT&@UsjyN&RnI_I z&scVSZ_ZRI2OL8gAqMUz?lUQwd^&5?bF3s@tthC18%;FX>`p*XUQ@^YK?l+e3G+Qy z(BihSP5rP0sWGiXJ89J`mywbPtH7`j`m_8mCP+fs{?lD_)SF3-C`4{e$YqWN zf@xiF;>kmgRu?YeEKDQj@>IPFp=L9AsTwc&nuN6%pYKsX)j_`26HL@2Lukz9FO>ky z4#6r>>XQXYSH2e`=hF~az+Q_0x~U(9x++Mf;*@uNTj2L`))DeMf;sKCzY~L|Iv@f5 zSN1CxDgPF77zeZuxP$_ zBOvku&O`$ zzD3MIyIk2e>MYnCB5$$(O9l7L-r z2sM2BU30WYOfyRUtvU-H2rq5H(na<@sm8Ytq(*Ry>km{A%Dy?tk4_mw~~^M^D$FD$jUAH-#C}fn6lX# z7y`-IDuVB^IgUv<;?`xTy6w0noUd4^=;)hyzughyGo(0<<*RQ8XhCH#>Rw8j?E8z0 zta8d3T_w>wN76*;0k9_!^qE=Qt^T)ChW;htgltz9VIKHimn&wM@-sfO4)VHbowB_G zi=!vSyzyA1U>o@W}8ULsaG6sC)r&+Hyx2xZA(@FeU*yfLpfIdY;6MtbBH=Bac zXSH3m-8dJw^2}x3botnAV#=oT;#(a`Zt*C0GZq?mKD9k20gHy`5I^k z-sjke=z*AwcxsGj=4n4B0u2xNeyQOc0d$$!RHH1atn^^D@J{~X=B)2c98uAg6wXV^ zMo2G|tiZ%vSXO22qt3-^N>)^?=8#kt8AW>q^%wj#9G^P^;E(EA7^LgaevaFkSCGkIgQP0^(R8hGKN7&zhmm;z(-c3kJ}nM(^nf?9O}sLxASax`N@5R62?BZ>sOD!T{VDgA zbAQ_8(ss6OPLpSM^nFNUrx>4wC*rAXdJNQzZllK+6o^{Ddq0&yU*5iZO-8}+x-^RI ziBi={BC9*!wT+DuzIpR!g6@#|y^7qjV=B;<4HSjp7YPH`TU~dXs=Lp^ltz|Ri;lo2 zYKcn6{4cg!^8xI=(9(5tCpl6B5V@9Y_>jIbF4t1N?!K~1Bq46V_qU<-dzqKZ4Igah zzP!Ki&*Pz~>uo)uapQAmeg53+B0ZJtHzg90+0GLX7#MlykjX3BPNV9IPeP!WdA~lO zR7oYQA)xB~?0iR4_Ttj;l+w`c+V??L29)0at>_lmH<+a{GtSv18e<0Jim01Py5XsA zuVnjQt;UZVvT7nYvBmc=N<3HnY%oMnY8QlbU`Dk>z!?JW1h^A+j+G_ z#eRb!5`mHp>w0Nw5M13_p>oE&JQr)0jCD)p2^H6!6{(w>dmk&SH*yN+ujUdYX!2~; z6u(P_v6hM!fkv*1uBfIB5cY( zh9wLHriTK94CKAOM2K$5RfIF8d(qAH9lMS$kUQ;}`sEgHpGFFix-bHPF@J>qj@M7T zFfLVnz4p4SBJ%`{;6GJiG)Bx6iAnWLu08CRMMaI|qsF=Q5rkft^Vhi1-yOB;LWA^^ zz0W?f2eI6ZLfCy^^09U-+a@sJ^f9pC5eWhK`lp&EGqKNqSr({6|5z3t??@ByvLPS8 za&D8_cM96I-0qk(ePWwaJVXnZSHFk3lm49ya)a|?UU$;Meny*GL;5n_b=rB9c9cMr zhw%38_Ygq?>Ud|h`co+@yRp#F*goCZz_0Jvq>%x$u;KkD#qUJ#&fdOGSN|=p`F`-7 zF%Wh00wCaX%@>O{$T_X0aQvApX9RK8-KUtcv@t38w0B;sjK`=bD7jlMj5?RsR^{ED zp^_q~kZ_kR9r1VgC>h!V*ZKOm(BM=isWuld!)n49%PA!ff1J0)`mPP z6>Bbm)J2+gY>y+5b+xDZd7>uTK~V6XZ12wjWAKQPNtV_U5f}&_C)^WL`(|g8pPbGl zRQCqMEe#0|5qC>=s(b6A6KL+{@;L7lm_dp-jDIWQsRm)b3^5P}I;pn58RH71z|o2`EoSj@CANAcfrYsj*Wg^~b<^ ztH=typ17hEd)FikOMq-MhBsF@amqLKcQ!mZ990FxW(gpVu5Ip(a}mu$iWC*A^dT|H z#V^1)>83n6zY+#AU$`B|NGi&P=d$3NnRyMOv*rCR;GD05#3P~+I~}uhkm=X^%Su{s zy)ht^XH9)38w^yM_}&nD>EEW8p8gB`8qy16MgTs`)t~>B?lj20Tce`2=wtUv(FoecJrs0j6Dba zV#GDa(R;hQ*!&q&q%5~4ILkR=!@`>w?tAWwc=JNvJVSE@j;kPnIS1f*{u!7bpmqZy zC9YEyjH`B$eW>uQiQ`gVFC5gss3F-4_QSZD9FYHzWvtT!>1T$|MFcZEdFqRhIeVMWNj7hn8=Eh; z`cZN9zI548FU*6yn(r=1K2JKCWeUSnLA5|QuPLCd|8IGb`@k*N)VD4sX<=yzd#*OU zaQ?}~)b*+OHqzzvyeXlxHa~vWJ*fOS=rwbIelb9eicwZ{GatF zg3xx|xba_C=Q{-YseM~pg^-`}s*PTP)z~Y{R1pp!%$FB682@=(q%6O>|{y=T$Gbs34VcI0SBFk}8QLC3NMj0_& zpL)Zeyw$G^e?M@KlJnEXU%v5LLCZN;PXJh%1L*^RMs|+(A%w&b+Wc3B^Ain_-h{zv zr{(8Jb*hUmC+N8;4sOWbabQ#&{J^L5-e(Las9d?EI61#EY2X_%9mE%aXj27<_8#)+ zG28^&_8Sy{*F9N&S70I>NP?r}jjOH}?rrrX22XpMCmNO?n{QmdxomQsvXFR5d%?QLT@v$)2ck%~&f(8n_&T|dE*B)W}MKmS-~ zV;TVbstKr3@)muVtbiXNgmMzH#p+Hg)~iFsj#LcrS;ULQUF^7U3Hzg zBmP5c95fOz7t*Mw!u4sL`imu^YSjFt0bBps)R+1F=J;=ssLmQCNA;5P=KZJ}x!^t{w^GUlRK*j=A}Ga9B{PSRb} zBj=3}&9(`dUr7wiuVjQ>{`7LGrzx2}I^XEq{P(~q67aJmvU8G;=i%D`YMFs60z!Q@ zEV=bZX6!LOwVneVS)TZNpzzel4z;P+UBh_QRgU_ zb94L9iyJ0v+X^cpIK~IEN?hA#$qK{9zdfbf@h~;;MzhhY9wrF5jX2LT0O@FFWa9LLjNNvbbkN3TgQ_3Z#A;Y>4pGxNMqP z_2a?%nU^gXaQ}Qwt)#Slit-f5hlDz=7sS><-3^lM<&e9)eZ-l;?7Cr9zH!kr<j30_<{AHj+Sn=+J}uAVMcHJx=X z`MVzgx|DPGhkrZc-(ofev#Vd6h>UjAW|BsjQqGesg}S-Rq7*I`xTV-^>9F8BwdI2- zh7$)KmcHgxARAhbt!s%x8Wqg2jfLi^SXFHWsRkkbH;6JNJq>e=#s*VpRr5@t(!p7e_H-*k#!WnE8z^qQ<<4-%E*`b@& zENQ+!G@0ASom!hiA63=zo{Ebl{Mh)z{qVarm*KaiZq_nsz|Gv^@GGtU#STylcvoByQpS!-Fn%Zdgm79mDI9?hM7b=dbJw4mZVmlxVp_+@Do;jgL(`ZK*gPR@jwN1vStE8 zT1qHy$QU$TV0HR0i~{@{Is<~YS-MYeI8)WHmONCd8g>e2G{$}JJ)NtMD9QNK<%ZJ( zP8Z(hn5T=}U%Il~>+gWy1l_VEeOan#t4oXi`%7~aXqG^DuB~@uLba0>0o_oPAFrNW zpxVbIG6Bcjw$;dEfhT+=fvq$50~E0|!s;Ih0_SszQB7= z*mKt|XlAi>57+BIaT(M|VvDmsjiuRf&xPF`nom9j$k39X>Ak<-``}ki*d7K@Z~jaD z`C=j};~oa`x{S@2qqRst-jJkmO;E}X*F5vBkdu5z&s~TY&l7`6N7^H6SCyqd2e0%o$eh&z95mtjf zFr-tRmr)gQ5YOsF%6iY1e;)=czI$N~rS+8Zbu}l(i({znTggptbjKn(UIepBOgSVc@{vCyum|b(5=*1h?B$y92!bwyh0_xIN5C@qSh8-zaQ0vX}B;8aE>m1xXOVX-QX zt(f}c!l<9A`wPgDv*j3Xx{iP*_auej3`W|}ZY52toel_og+YdY* zOSM>R{vRy5^miDyx;DI*n#I5KG>4-UK-eUye_@lV2Z6H`-OF(Zh(uMu;Aw6_T7&z}`R_(7j8M4FAxWz-@+99?8pB65chBV-NL3rR;R232^LApR`R!H7?_<^|o8q zE;hR3lG|Rr1Q>qPzM>-T#H=p^ZF7;WLMtGjA&uW|J)Z)^zNqc0W4^jkOkkf89-a*s zmo_UUQCpj7W4}d{{^yKZ!`cOj0tf@c9j)EaZcLmZODBqxlR=V@4R zY{3*Dd^_Av`T(;^ZP|oI&WJjxe^0Z&AmvSOwS*}e+e9YoaFUTj^aC)hQt}tSsjoRw zEI4w+;DM}qI*zdk&cZ}^T1N*SG<`Poh63VFtu4R_c2;#2JOa*`9pQ3*=c*|wgatRf z>ZNlqz=)nbn})yH7N;wRSqT7^a{#+AqM`oWv2Zcw@YiCM^`craeN^E&Gyb8XdZCm?6-@#Y_9= z%fSHk{a9)@1M8f)yq+Wz%=nb4(&Lawq#l{JAvJms|Avm!lo#@JOH)_`1R8DvFyo)` zB6%Sp7-C1{N=txsYO#aml=!e{6T)^v5a-gcfd17D)=n)FD57U~puSS6-Ce|nxF4OIY&!Z7ln21!uATs+g& zDnpv6u!(uxHT3Plvv^!O$?9>`VZq9zLsT&7LyAw$N-Al4dkTNHa?v4v=vgYrO3rHW zJJ^5o5wWphWlBPODvmTqzFMPIUgo^Y)g|lwAu@#3bVcdB2ziBafj1)T3hG>bbWsw* z+XVuBYX8gIWf|)@x3pya0sK*0e+Qxi&gs8`R{nX%rNV{!Y`xN!ixmZdSL zck}NO^sCM{qkP*TAUqb0{hBSAV)6*8M~PBoAt^N6%|#t{bi;v0ae^q(OJB z^rZ$csOuEXR>`*G=ays|+uPJ;-4HZ)ZW6Jtd*&l>ov~6Pw7@ZM|M%!+vc(7GTUty0 zYg)ke{@a}5_)l16gi#q?_QEq)RxQ2yif%+!w;feJC2cf_;g3)6ua43-_cl8gKZ?bZ z^CJ9yA+QusuE*)ksZq1j2-B8awj4Rt4O@l_Sy4ORZP-3uGyas^_-WzI)Hw^^*n<<& zUkx!HaRUB||1COh6K4R6VInDmi1Zm1wac{+%xo5;sG)J%U7P~`Va~QG87D0iO?<_4 zckNoLSRsRD`$uq7#yUNygoy~4jZzIfTj3{6+~?R!{H-8`#~c=6hD&PhbB~`25|;h4#^~!LL?>ejmw~TutHfkv)KF zqH>f-N>s<~xKtzI9FF?2CAvMa-U^4&&}ayo(NJqi3%^r{HKgj+arzP(0Ek|RJh^Sa z7vHnS)))9)AOMrv76A={Y=zO*bFvtj>3-7^n*k#M^i!gis5i-*NSO>44vxH<46Z~- z*g204$6IKQlFS-<^ZkBr{KB-~{Jf5B9O`^t6D~vapYWlL7PHK!=8dmebai#j`kbc_ z`6zxQf2scxv)k~z+wn5L6Gnbw`B1H8cJ(!+-Z=S}JXk)sgjuR?DV?qXSnulb058 zzRN&XSnjL;rgf&hD zH3eMdCX-IJzm>0Kd-Csy!O|j#TCK)!P2!ZCt6btNy}aRvcFO2Pz{l`!-O1xsVWqoCl2NY~Sa& zl!|M?XM)EfvV>JPR3aeI8|hzQ;axw|D-bo{&miX%M{I4Wk#Qh<_QK-!-7R%4`*O3f zQsS^zwm#0<;H`Dia-n)uYq`6~nxr9r`tsHE#o6Y>({pitBw|ZshF>NWUyn zuW$;&@WDc6eDK}&_Ta?8kDx>&cbvezOJQyl`C3+2LfXw=@}@rji+{D9+3&AJz+C`P zq?-nrp{X+L2~^jXP6@kZ-AlwOLQ+2x>zVz@o7wUI%mN4vsaFiI16OS(*xB+}Z` zpqOm=t|muI#AIX?#bZfr{&r6Va-rA$#g^2@PMg=OxAY@5L?-hi?1JJH3cXDhhD;x! zXPOWT+<2f1E(v~do<{QuPgt!=&c^4Vk|HIoa|SCp6IP)7eX9EA(~1goF7^~L(!b~V@yQZua{-p^{^AIMJWd#`l?Yd`>KnU8;n za8FL=t;?dB6}p zREsQ-+7ei53jd#~1F!-B#hn^Z+*dG7gP3qHkD#eW zLNJM^M%Fq_acKb$-?L}{utxoe9sYzX>|g#@c+`sU`HB@06B83o{AG&yMLQc0kDVAe zeTVhtbh{y$XXPfDQ2tE+wxxTcZLve};abQ3lkK?cPmjZlb}g|-i$^Ph)zzK%`%=d^ zI{Krm%}7~Xz;V)O)YFEcMI4>FY5Y7U8^MizZd(et4s3LBl)3N?NlEp+o3nj zxAkb`X~4w}9+4Z6Mblok2LvU04i(b(U!pcpbJar9=Oo*;fsX~I?q0d^{ohpBDVOb1 zz5zbxH&xeqt%ZvCRQz`;GgP(8X5B=m*sZR{IEe?H9A>kYP`>x`S1V@pq9TNl8CY# zd#(0D>6-2N^Qzeu)CVL^_ist@zmfO_1c~RbG|BO@NS-9eUZc~R_J&4PrcyRc5xwu3 z=a&Aqw`O$=98fuaG?ni;Kvmv(QFzEf?_uJV)n_JPhX+m#iVPZU$XX9^?()>bMS_suDG?)?lwjbT zw_LE1aNo;o$B_&dbISi2jnR;7&m7|?dfOTJ#|pL;D6?5B{dhVoShOdMz&E=Ewm{!n^|LWHeOK-e0RWZn~q*{6iJL=iZd*^odcrRQ*l*n)5pY1bGuNH{{LPmB~-Wx<{ zw!RH$q}hm!Djp_Y>ww(u55TyZ|K&@8+ucMb8(TNxu*6^c0Ks3Kk9s|XY$?&z7=teS z!@aIUt{t~a1UasBRo<~_l!AI8E-wN83P^n1x#GC4M28%7Y*vUd_fXI!7?ly|KB<&< z=&o@@jd(_ennlb(56zKmV$lDY$N1A(5;M^cMdq%)$EYKNOOe02E#B>Dt?q9YT}lD2 znAW($PN9&iBUR&kL|tPqHei!$1L782{c>%CvFJx>YFbQr_&Y$LFYV>zr^j-j$R^V|nBiJhCBt~sxbq!x1 z)^|nee^zx~zRcQrshf3}7NQOPSocrw`Vc}uKtLF_v2xhthasy7UrVkuGmjO^2Oy7ydZZ`h_bstzozpw?rR~CqY9R?`VTLG%M7tmIic#-ua zmMC9NF5js;fRT+!=y6oBISocEsI5k}OJF*CG!v%n*9#cPryH+rjW%qFJWA%&$Zd6q zA(~WY4xCxv17S)55T-_SuvZV6QAKs?2so{K+NzdlgGt3$1W7 zo*9IQunfRK`)atZ|BW7t`wztjIcTJrotRNbNG;#*c|^bGQzVmL_v@kN0+%Lf8-5-OeZWO-v!0GE0DM4iV0!@S-dDlY{RfL<-2BGk zzFsQ>Q0Sr&>NdN`RC)d3&qTt|t%W@iA`@v7H|DQ^*Y5g>+S!uVdLPAi!0*#!b8`nI!>;otQ2xI|Oo{rPC*=FAjkGk4{4<&eD4w{Ns z3)2{YbRsfsFkk}OA)wLF|CHABNE*xUa1 z7vCo0yA6TnpZa0fqex6q9cv;_G=^WK2VkQ82kD`<4s@VVib-m9bPQA5|Hk26-68gu zxVOfuUR_CVFmcKU#1zF#?Z!m1VA2Bc6!wi%{g)fBIagw-4!nSmwIClbBGv8*or^w2 zD;L!@$5)VJIBIxXq4uh_G~G(h!-&h=mJf8v+>lOQzEVMxVy7|$U+D? zTxmN5B57eWwu?mq-Wg73Wz>LutD|ZhM)eLx z+x@8`5!g}7b=X@H@EHIBtgD#A^lyPDi1sUVcnu}vk{g0F5D}-3HleVr?OKI70AI&V z2YyO}c`-wx6l%?>Lqw;6pPE+-VNJIz(2X0WpP3oI@|#%_AwxqzVkZZ-oT4e7sz*6B z5j$P`u-^?a8rm%EZhg^IBL{^?ZCjIC_Ijc!yE65hqm}|9?RBru#^fc>&wmd|fN#}V0^9+Ty*kI%APb@y9s>iX|DG;JJ=p`N6tYcPe=ptj zNxTSaIQZ~}r8XBd-4K@Cjo~+;n-cuJ`Mg!PiHvSfAj`*PX`=+vYF&UhLC^v_LpSa;ExWJyx767f?nG2D=k)(1W(O0jKC>8^l3a4lD=t zLXQrU=@S&Ds83n0>vdnJFY8*n{uX~PVOif$jwSCRmE^2s@^d`m(~*e^dm z|AwEH1rg6_1NHE5seuUr2`V+f52!`iPs1zdPKFV)5VdZ)W_leeF1rD`=;~8(MqwT` zL}MEp1X@D5mB2R3a|bh`EZCbQ;0?4rdkXVszC8wl&F@gLe{ri35I)hM%sGPPciOh^ zPE0HdmR=JR6}3G+Pp0If7cG3vVD#zHeM4+cW10OU`MD;1uC#sCmrpL((H=b-?hF(SKOGPkOc67Wr0rW!J0&0ze)#zhol;;uaN-*l@D*K+e*%0daB~T^)uaU9j zKahJRp&hUh9t_KLn}OaW~@1DVk6 zqxAn|KWnBvAp51l&-aLw--?Raks{({0!Q7MoPzilU2G;iNLXE`Bf@U+qS_hXIU|I@ zl~A3_$07}ZK^ljn3+3V=Djs^0w%Yp3^TML*d|m`BcMh~ySC)GjUvmG#_&FLh3XsC< zoEJ!Sqv$aUw`Zrk)}Zi;sLoHq12i0a3wd;E-o#jjOqbt%N8wn*sv%V~C+ELiW?mY7 zfuy;V;DGOV)CXsi%sbdeZvp)6NzEQ)X8P=F`{#i;bb`GdB6k~m-Y8(fb*m_mMWPsSdRSdXj6h;d{%Q#K~Cq%E7o?E?{ zf5?IvGL6>^;?7-;uKuH8nx=*ZUtp;R0S}F(*U=+6<;)Y6>GWiv&k9fu% z#?KD`h3Q|9`j5%HN(srV$RZevK#tRKi!qk%C4Y+^(kB*4F&Cpk_OP8sjn8EmRn=K_ zz?%AbiXqns$O07flK%AOSHO)X5u}H zU2h#1_4Y*#3)0;oARW>ojdV&1NOyzOAl)IIB9amcf=CSAJ*0FxGz=i!-Mrtq-g|$~ z=Xvk*7yk`2=j^lg+H0?UPRm)k^vXr8bKpn=Jvbs1#iI7oASwOe)?i}ErWfJpX^mH} zi|53K3xg>OaJI0uRGEMoioa+xgDDLhIu=Qwc}cyio#MuEF06*GKMFi~Jzz^Ff&Sk@ zDt;0kb>tZDpZ=0RKkmp!>uV*CFfMJ+wowEeGltusR@Hw#>dVKV8HeDR-P?P$-#=d7 z8UYvR9f?AkhFY_)y0I8~qYud67>e{`cFnaJ#2yPD@M2EE+bg)h3H!hGQU7EiM)^pC z+*Mm;sc!^c{@$1vOJEs!`?^*Fa_MSK8%Fb<6*rll&}VsXC`)>S>-s^BH8xmzMIy;7 zjl#w+GyZL>DkE;A6%zkt)xFQ4R91}AW_s(m+5VM-*!G8`jzC31LOkiw2+$lSB+L@n z=Yv*TEK&51_5t-EI99CSdp$To82Q~N4Y7afLGC`BXWv!b%zy#<5x-x%FJA6P?pV9x z@z^k1>;qngOphBHR@|uM=h+;`dh?lSNPh5HLar^>|Cz!FKq_#t0|@AqQ{9q>bquZj zvGNZIPyR^JMUTiyi9*+Qk>(HM!d4nsV}A0D268hDLWR|J zJ(D5d_3aynP=d>&6}hpWU`02|GvhiJ6GEkIk{DnDEi#^>EL6j{gxpsk#j9rLWw*P@ z0}&`d6v*~K$^6{Igzz;R`1_D@`$uU`U_U~VFiEm^`=l4DQjRa)6Wlh&Vx_w(` z07iubXE>bZizQ}Sg-udJPj=u#;Z#J=+NF67l>6{jE-KHwG0-FM+I9x?D^|lg#h6#) z>p2-0E+<(GuR0JJ7XdG4HJ`^ygSg{~_bnDMnhC#WfvXVc;s1NxT|FHi#+1_l!!AHa@tF(YixF%*5j@94&!PCA!@rdYk2RoosNtp3{FfJ6==mnM z@gl$|K=%RVg}W_C-Y``%Tn(-J%oDDSe(Rd|pAFx*uP3f`;>_DWq2qZ}2IqI~Ye0|o zf0>L9F8MqDk^I|4O?Zdi&}ZVVj_wdo-`Be`vIFLGYeUYSt7)tb#UVkbuY9+XY613` zvrlHbZHk0%UQZn+n}z&Aqi`A;P8CQAEL@>m=hZ1Ln#%%=2-`1x-~|JCV>WU9r5yad<47$l)kV55a)xyn`0oIa8Ei1mATS0DyT5Pj)$41<4PbBi z6NuV*jtQR!s(^V4#`(4-L~9)dLuvVi$78G1QFH5^Ci|Z-mjR|;z%)bsb7JMwHy`1f zPZX{|7wv@hs9T|t4Qv3+=K}o*CMMyfyqU(EimRWGZA%2bD_~bo(8=J2UFnqMSN~2* zkp;miM_u5ilKV zFYg_9a=OlRdwhga_N@#~@J#=c;K47=GjLPGu0UZ6rk*zpkCr|o+r*``XM4#*1kmW< z&}y=jKe8@G}>zcR7-tB z0j~{d)Y6}-KjV9Ei@($JzLX(vJlB1(h|dOg%O)~;;zA!B1F+}-d_ljTTxc@g5vv&@ zq_pNCu3DuNj-u1ogI^y;7QNeb)U5(<<@=S&LfG;SB! z&*g#xfw#HA;9`5{lIah*U&a#`5rgqG(1P8@eajwvHxlt^xYA!)8?O(EoXjL&>}UEO z2rZp_9Q=H>;Hs`S{?NA6g#mSRD8=h0Q4Ev2L09&kx+E0Q{NP0+O&t3Zoz7Glk-<=a z$uT1a4G)1%0x9wlJ`LgudLQG5=d~dJ^_D#R=l+F6TlnwC!>OoyXst|A9`()YM?CYC zG4CpQ#2=4@pXAk?QN6}<)}O2Ickx}ca24V%{MV<*1OXo@i#9zw+W~<{B~}cXjaq9v zwj^p9ZS;od>NI;fN3N9z3K@skEVa@!m^cgfxf?mzIuU>==<>!q2*CEd6+|g|cr>s^ zyL|82ICV5@^GC%>2IbWETUb0JYgJRO>s6vzHF1mu?D3%{k*#!QnielS0=y}EN)!|z z?i1Gyw9ZqqL7_X_{ixm+`bgi(64-sb3?o739^u*L2KABeY;L zQ8uEdsYO>L*2(lv@;vV*3Ny+wGiwA%(4jSHT6ygX(==;Uq=M)OP|}_sTvDsVOy6zdB1$*KuG|Oxt~5h*z|tJ zczHEsQOFEyv~{vn__I?o608VWaU7Q|w!%L0U-I*jv9ZsjHqhy*4u2hlD>bj%cW#}s z&R>w;If=HMNcnL!fCsRLfW?uv1LWOupTUxku~fa}SSD6l!$d@A1nNz)K&s_~2=5B~$5;RWu6C1#6U$C#E$ zhk!r}S+D1}+@eX|;>QhMf;8{?U&DkWA8!yx5vJjZ|1NBO_i5~-1YtTQ0s;85VgF#i zSu2YG!&`3i)Hs3_C+m=d%&IWKL+M;r!AN4T>(~7+k8}oG-7}Xx?d{nVk69~@viyr8 z-)GVw_Ap35+%3in8vYk-bTrw2ud4I)iLw{2yG3wx*{9*tUzh7)R4l_Kc#n=)3r=z} zA1%QiGe)O%olNCgQ4|~IR0^QpJfx1&qD+`)5)lFi0KQAX_e|}iGkSV@XLS@zB7rtA zpFUHH5^o_5b9To*F1xOe-&K`%p(3t-Eh(^;|Gy>8;x(AlHT~tdx=g*!C^*Cxcd(Qe z)O<=XfiA_Q#>`0;+>XDvyQ0-*G`e_(t{|TNnDdmoM-uo=uM32#6@j?bA7bye!a~dq z+u0TRRbC;@?XE{Or?2+#nr(GZmh$S^PfJg`3fECuhfHxfWgLRn0+W37TYbn~jw;Vl zd@?#i(LaC|LX&Jzz;nXec%q_2i51dewiMeLAKoz#Mn%L{G1{m_c4I)V1aXG>b*tZE zuygI}`Ubt;4s2T1D=H z=TUH^0TvpBsQpWLv!oPVjtjlIGA8AG!OSp=z2dCYmjC1ZRe3$F-b1*yav-;HrZ;uz zS|g{x8VWiTJ8{fb1Rctkp^GuVjt2ZrxAEO&^N1}0*$ytt56f;UR-!nLsxp$+>03y& zN#8BNnBAl5@69ZVP27Kmc|CJUx3#3>x1a<9AH^y|x)@2J9y~RuVc0`Mqh0T0;T(tm zT#`&gL>W|E@s)gIr3%08CiD6H8{2q<64nab%axjQ`aKz(b8v!uvCFU08a1Vd3^I5; z>p}(xjhF>Qx;Wz?%Vw!c!kAI?fcHFiJuGN zb>AR-%H;m*5XSIoe)tC8G0J*u=BFD%e}@+U$U@8W#1Z%-N2p|$jtu`rsu>F{S=?4zEMBPf zn5wD7TCnL9OQPSKb=NPl6t4HT`hH43cbLZmxgd$Jcz%ar6)W17ybWM5Jn+S_Lposu zV&`N?RCdu5(L8Kt_$iJQ@)GxX-AC%?ZO3C6R;HDdv!+;mx$EsbXj){;+1M(FgNWov zi^!;z+@k*|@t_}${yc_uZ@kg!eS}T#`^f|=W&3XnS$=dZx-+ZmX|RnT`BTb9gN%Af zh!e8;VmScLWBpsiqU%7hW9x3a@ad8~(PS2|2*4K^EA0F5er6C~S8NDh!=&C2^&t4I)vtq@1h|{= zEdL|LZGD>&>7z;_iS7E*!5K0UHe)5&N>7}BrBy%}s+=7jM;;4HLKH!I`bs%Awfusz zsDIls7bMXEN68(4mc_|C#*!Ini2n{@YeoHKVphdB|Bnw_1{?wCSS>Dle3Jv!f#2 zj-t4QH=HfHVWq_h)k`M(1$8c}ukgVJ1_q|K-?(~)FiA7*o!)j_g}&Uq+W+Q~nXeZa zqzIfxBgQBif&T-TLBt3RNIX46;3hAjmEpi)t3(AtaO(OSSh2VRDH@ikiP%E3p^Hdy zJ*QP>@nQPb9KYYiI0BI|iyWKdm2E)|0>1@-nm02y7Ihz!J zocs1KY<@3yX==IqTCiyjlRjG{J%+GpI2WS8u29dZNoPk^1Md+`iL0K`hFYk=FZ_bZG9!g6cS=b68*LCNGOi$m1 zqLICSgbt09Oke*W9!CbbA-+SmjD2Q!f#Ke(GBRRX>5dL@uVram_9G`WJRqTQp-oeZ zWKIzmmlfvS4(3zaGrxGlCLC%S8v5^ncpdbpsm{UqJM^A~yHO8IPiG`_r_={_osk+r zKRqJ3;w0jDd6qLm!&hbOV*Ji1mEzr}(4Fu;+v=>)ohK^nVOAsnh&l<|;gIh!JGAU? z!&n6UX`kapp$_L&P6}%EOv$_jZPLVMJjJS$R1GoEdxk7n%b{2J+7?UvRlO5dKW~IK zkgtr2)AQ?NB_&EgqM!DZdEC8;*f}_m>UfflT`ZLdGM;=|T2y(3Q+xi4jQ z@h3ZB{p@WHO% zsWKhQZKsHI^^Y7Cg14iE{T}uMCbd}ts|?aU+)E?9kpMP{RROU<;$IlW3+6DtvPk8>=6ND*r-NF~K7#JD^1>UFzMh@;SWeZplLOn5{Zc0?by z*I4Pi@T!`3rx-0QAeJb29$0Tq0re+poQSGU9woCrM3=dS0h4cj0Sp@Bokq-BM()U} z0x@__XfrfS^F2xvhAUJ{U^%tss86UkJM9`P421?!6o7dj+@MX~e;^eg8P|UZG6uA> z;L_r}$c2Cn{CFc_V`ZZb2lE>TM=72dAKrvc{6}6@@%&e^6SZGk8_`W(HV~ej^<&n* zArfqm`g`xbn4nGbz#5U!p}s?{U%x)(;J8M#E_?7`G0$6IOgI}1)Tr2yNnQ)k5c#YA zF#gZQ_C6haEz^b6vG`LCbbWe%q9BqO*O(#q7hD*j3Sor29HI4*l+iOIK6g}*kd*tO z{K8Ef3mw|DA#D>S;R!6#A{CXeJW?Ywja>cyu2Vwc7A=_=tpr&tCavbQRNbfg-F;Mn z#H#lZ*pT`L7^vP&^iz^{Xy<1_b!}ZoI{Xixfk|C}8WMiL>LLK0xwYRWKL@#uaVdj= zRG59Bnp^Zn6t^oM1M1GN9cjK~5K-|q;MTU#zUSw{4~1z@V|J2CdPF8QAgq~kq%i)8 z!$(%dUw*8xBIyrzUpea~NAz{Hu z0|NJ?GLj4)9=+iO*=~r7Yq&)px0+d3We6)ndAF004W{himw+Tm_SVg<&gHE#U?%C4 zLHz2RW@1)R+~s5r!o>>+Fr!G12I`L9qDS4*AVw1cUt}fH4)Nbr3PL-L{;NX4faYyP(4E&Rkd%3i zsZ-2?-p&c_KF~=nTUuFRiw$>u4hI$m!WZ(hk?wAf6J>rjsBV_CMBq4RiiNdIsRKGxjtHgIgoA8p~OLo z(&y8k*Z%=4IU$@gE5okq^E;orke7Yp6mv1Ge|jN}5$~7E;8s~# z+jGeXBdZb&{kg=5#SoyVqW>;Fof)%YZ_mQk!C}-gmWb$|Rp!GV-Ev4HQ8ek&R0&9r z=E@4&!Cl;p^Cj8#lucfFh*+{&&5J|UXguxLhDUQ!(tk#?t9GokM5w2{Y-r1lTDrUY z*pC5u^eM{*!4PVZwrIkls~U32UNu0jxN4wPo011k==K1*x1}&r7aCMr?k1Sa!+uGX z>qWz(-dctV4#%^(&UDpkBj0|{Y;=l^guZFhjxRsHlK+%{;j+ej*I5ttRWBBT$DNRk zNlV3?(Dw8`{#P<`kvE{quVD0_@;olRYC65I&cdB4O!ci&qjRZvpu z4kQ4iTrh45Ly*82@4!0$hL=Ym|G8fZfL$|%;}ZXD6c*t!l>L`O2NhKd{pjRmXhDJ8 z*OM$v6jPe!`U@#s5HNyKghIJ&09K6sQl}Tph52 z9_!t|i)=Ir1TN;`1c_yZgLsV78?ppf=6CJI#H{`R$*_a)wx+<@#=uPe^e3iKVCQCg z2w;SW+%6qx9m&9(DVYH6t9N{+*koOObmdd?1&H8SL$aO(HVQT|Q!k-@?^&Oa0mCZ0KXs7+#9aiHs+fTqSuIef zBe-!JWmnIWVCGrR3{~-g`UINH7iV~i5D1w3o96xns*3o32lJ#z9`{=u!b~MRDwmG4 z?2*;Pdd)WO3>~g4zZ-?vG-4JrqWnrz;njEX=mx{9hqK>s@YA-u)UpZS)jIG|+@gS9 zb6_wGV1)?D97h%`eq$fwqZ{_mu$|B6#4xTL%d;VT{FdJUSnv}6M{&(NzPtIB`Hk8c zW&^u}V6_lFa=KLkeh{K@d^&mbSM~QDY-Pu*EX7Qp-?3w2*Qj6&di?&tVA%E%wS^v0 z16~baa8xoP>JV(csvj;Rl~cj&iAB$_+~Ec*_)%5cMKB&8>B12F0R14@7=0s zz=JpV#q|Lw`J~0lBHYKu_`vtqL1M$6l}7|;MicK z;DdLj>52-;zmoA=M=mQ%8@*7ye@BAuJMr5+UXC7-6<&-cGlkF7!Bq!5_|gpik&7^( zHj5%C_!AAwhJH1JwYB^Xz6WIEI98ZK8<4Xt&jd*MH0{Tp=NGuj6pulcjy$T4Lh=xI z*9Y#`w|_p#ln7B<``74}n_Rk5Op+T3cqDx@w(X^$vVPj`%0?<7qu06PZ_lt~fXuxP zvgRWA^3!ZnBq$veIsp=dA&Hae0&XaVmdi%7D&iA zvfi@fs+o~Z*VTuMLqk+~dA%5Anj-}?0y+<}zu&GN9^xM!ofH4|-Dm~2S^2#&A;JV5 z@M9JKGLF>rG;O(SHNKqqmC_ReTw@6; z!mVfjeL(!bl)dN~#*vn&uUl5sQ|BYNFpkvb zj~@Jnn0GCil#T+|5ahB4e=4=QhBDvW)p%)@;PI0ZABG7S=ujj0{wT@=&W61`CX(E> zIiwjxOVUcOEvF&o5$G?aRgw5YM&cQK>!aD@l>QGY-ulnM$c*T*(2Z>asCaSm&sLrL z%XLZ1E)$}lEw|dVvvyk~wZmjy)g@c|ubvbESG)*6ZmNKc5$2|W^=IF{`22M)qV@Il zM@L_CVe!~~qrbniBx|RZi+p+M^rCn8&GHy@IBIvy_XS+$^EOR^xcmB|HXq)yk>Nkr zSRS98jQlOYvH3BvriSnSynhD`Gp2ssPp${~<;_j6x`Bb~Py_gh@%70PxAm!4b#1Nr z&&md)^yLcqzzCtL4a|*?A4isFHUj4I7j*3TA!B3sLdw(kqn#ISb`x6k0Da7ra+GQ3 zg$rD~$7@t;@vkuD%tuX0J7ADq9?GS}=yAW3Qg&g7Q`691B#$@Gl*K*m`PV1VGA?Q!UX#~ zCN<{9C}7u*JYHmSacB5$Akk+2`o;f=_>I|}f$t>A(<^BtBDjnwTR#5fpR)(R(&mf5 z$>&JmOB8AO8}6Y*uQ7bon)@=}52+gL+*K77-I?+|v^4!Op*sX(vVI8dwRNi?S~1+A zq5gwY_6|Ote+2a}uMh#$F0U9PEuFLs!a$wU0}Kp8uZ3Gh9ZT)g{{WmnuVa{FDQEgi` zPEJV6#S(4g7`CL7a}J*?PPW&r9&C>~C-4b6`qAEZ;rqMGPOjyxA7zcbIy&>G#bHz% zvnGeQ-Ki!eq>BL$ox*;nM@@U*eQr}jcSPyv9@Bl%hljP}TQF!)e4cQ1BfN9LfJQYYY$V~TFu?*qHyw4d`R zx{V9ThX9N!e`UUXY^%G&9%fgmfr(egf$TmN=bx_a?ho{TxZxtP3O0!!AlJzDk111X zeBIP^KO7vT1rDg$xYpCktUR2t7vW}WpJ+NJ!W)?wsDE~)OK%(aO;Esuar+m4q6o~3 zlM7R@(e?lZvxASbc+tQau}P@hH7>l0_|vW-`G-5vnI0pp5%iI1*0)~H=wd~ghPb}U z$m91rSy3gj+4OX%t*Ge?VeaYV^84^}W3(Qj+U!I;frhyOAl*I4YE}4uC9a@!0-!Ej zB3RO^7kgL8Ar=sTMDZFT%TQdOov9wzWTLqDnV!yP0L~3qs|dj2H-ngwSv)GH9kQFr z$;2Rccfwk%?V^=yNSIQ^_Psokud&fxMK86f0e=lTo0a$3>Dbt6sO!$J!1p6!52JXi z@gBx4{tL4?q4y#lP91GaKBcRafo5iA!Erlv#uWq?t*F{xzAX3et&nmV#{*9%>u%m0 z(z7LIWx00?F0|0Xd_3JYDe+$m0EU8y{ZS-sy)84P=xQU;tZY1l-w5)SYvVx zGSKpt8I%g3N3^xG>jo4zm~-Cj%4V_nt(noN0=04)HG+~7rdhok$<94MQFNj6P}q;Q z9(ek-K%bHj1j_SBBa*O=uRdcEfIJ@Ws3y5TLpLQ0HGnl{y~oi^k?R;((u({6Zba~f z_R_QI`9Ckh5f-vMhJTUcXWt(EDfHl}Gwv^C8erMS8#e6%ZePB|RzDvbOAVZkucW+- zA8H1zo1293{zVV==1FPSJ`X z=3=@SMnG?YmpHundgxH`hf|9BCR`aOV;p0zBfWXo3s%WKr$V6W6fnL8DndsS22kz%Q91)U5MwZa zLbuijEVYC``<`91uATA4DG2qjU0NFoManH+RUkW9Y=n0F95Rk%xiG}XdC0WKgAw?N`;c3R-12HPn%)q!6=P3f1z}~7B7GqQQbtN~T z*P8Q@!L=Vd3XX?$7*4WPN$J>JIgAT6F~`_1EL+~-IV*KsRxyrr?`2d*Axjly*p^kU z!;~fO=u#L5mWxi3QV5r0siVR*W8Tv3yE9iQ-9bDgId9AEK9zer-o649NO9-HqJvk24TWq1!%p2#5jYPqNFIFYtOA+zpBTvY<`Q=z zJZ3kw0R-^H^(4|ca2tamE_q33#sdXo>9VlFZCbWL%-n+v;%$gp%KM(f4A;C z9caq+3w)=sIb$5W@Bwre5jPjMyKeXk}z8|5%rk=(cLQ|~2c zdfOZs&l+GQt2Nz5i*QBy=>ZW=TJI*+=ik8v5=tUFELXe?h)%nC8Pg^-1Hj+)fP&bQJlgp`eRCiTn<#PE~rxX{j>2ZK>gX*3!frr3J zaQH_b2|-^9PJFnZZM-i0%6T`t2mniRGBV=KQeKpU4G?nzb#y@Ea`*mPrmaUhi4eg8 z{ymCiE;n5`a%%$YNV1MK9Dl}8=}Q$zbW|t8+1L!?{z{&LhwIm>PM{$D z4;|;&zbddgJuSnwCsH{Tj3Jpq{C&IliLeHE|$;D3!!6PUwo7M!%3{L;6(k8AINaJSuIr2X0-F}ei0CdxWn!xOqd66&ScCCIY+s;ih4YqZ^wVXoK!%n zmVq}~`kA`_(Sd`|Z2w5gB*x7*LsuYBT@r)f#!{QMcnz|aY>m`)J?QRN;GiX)hQWDu z=#IJg1Qz`-u{PB1hJ5AvfCk%d)A#ER8-k-a&_oL& z+GIYByd?s=_Bwlelb}>UD9z*LYB5()as9Y@sz>+-?gg;}%-+i+BqbS!#s$u?&9-ud z7mMj**k|Pz(qeFaGTVzOxM+I)1|&*`U2aMYopHy6ODX_in%0bP546D)VsNaR2+xH0 z%wMvjLq&K@2QeSwU)@~hiomX8$)PY$wOixD*jBGC%J(m7bgWG7x|ykx=dqud2v?Xb zjFeYa_GGs-9nS6q66;@)Ce|!D06w$pnH#~>>~J0902Qx?%OqSnQKL4W&GpaD7Fvr3 z#Iw!~j_7ag&)7g=DEJSwe~Yqj;D!cPl|d~hm8Rzy9m;7|R{3pTL~kd^Ewg`;vO`zU8L1;s5zUBtEMBO2J3}37t11ex zvk7KN6{KBCcQR2|GPrYM{%xV7)c6<{16BX)q|a2Xr`q+ysO2^W5=)>5%flt;?J{O8 ze@oyat$d%O&{2g4TEr*tCLlIbvef^?1YSuva$U4FkqP^<6$lXk+E_9~+;@d}^3x=y z_o`2#$Wq#43e@`r&xG; zwE8z~1G!|XCNGr=8DQ2X#Msd3mJqs?GQEMZTv_Wp3*&X=_MCaSoyOts{7o z9H&f4X8z(w?p}?hpy?w*@LVFm1cdh34KL2iAw7X|uk>&GHd=HI6Y|yA9x}DyT4xEs zsy^94-|%geKdpZd(5(jPrEUm!u9>^uGnzjHi(d8?b9stc9iCA<_>j_ed}Vp|tY?AE znRwTFr1fc=|LHT>*&bU;UQrPN#V>-De_#N#VI)A2 z**S_1*ZrjTWF+q30dP<0g1b>d77>JY5dT>duvkfX6dfZv)=a<{N#mU!lGT>G4&lCq(oosEZq5PI>WlEx*p4O&!0CM$OXdZAeW`BcH|F-$Hvq>UKi&Eqi0sJ(c6Ahyj&m!tN_||aROYXE0;{o zryFf%gPTW%g(dCLBF;pE4<`q>cw~Y^LJc!sn=s$d+Jn{wED8z(rcV!H=)WJvy>->g zUe$AA5R&5~Xu)X|>%_Y-&0qW^Y3_4^ml59Rdn$25aKhxhp#&~}(JMd*#+9cbEY&mE zVH=~q6hp9Mem6Q|&daqyF<)!_G3vCSw)=U&G4Lo}=?T#7DNTQ_)@CS5Rk6HC zCMbBqF&iBH@yYHLXSNn?nI_|hRy3%4$+oto16sJ+A*s2&slnr%#iZ~{lhY>gA|BIs z(P(|}t~HOuH`@OdYII*7{VlPG-_E=%U&$jV1}OZhyg(jU%49kivwi=bix6zWdi&*( zejLtBI9K+3cWd1XSYry?`%^w&Wn2Ldbux3OCo|AQ(~l5$k_Y;n9d})SlrQa%6w#r* zoGHv3{^$*YO^Egn&6pf21vTX|m=JgHd!sX$hVIVkb!_%=n#(pFe_KE~bCps? zltR9UvGTdB)kZuPM-V>{i|hXCB3#g9Obqk|RqSTX{_EZp)VQP7zv*ZyBM#)|*3}_> zb6D5b#*o=`cise=Nl4w#RPp{%hK|_sdNgcZ-VCDW&=thSS`AosWk& zI9RMNu)a}}q&!~6Uk)98wJ23bqK#a(8kU2c);Y$vC*#Zg-?Bjb3$cd{ojv;X)40fn9>;%&Tz`=ooIio7wCfZYgQ@6ANFu2-@Hi#El7M^h!8Zp&r5=>m$nJmw=26< zJ;Q)f%{#h`d=`j|ADa$WM1$s);Y#lAU1yoZQH@NUtX{(C3|+EXa$BiyJNL0sf^N=@_@rw%gvecp(cNE9-D{55X#Cn9g`#S@IG{jxU*%yoI09l92pE0C=Mc9&ws8ZHxwpF{_ zy)wkPUc`?R{hVTY>0x)GyH8j}iiJ@;qJWrnD`bbK+dws6OsQF zENH~xSSLpe4upj6XcL6(*mgz~2S`|C-3yVI)pMdjXCRhQvI4)=LSDs9hpSO94r4@? zBa$nl$@N+xKAtv@juMbForl0uDXhG#q9N{Ocmngc5O(rh9c#3PupRm`ifD4r3XE1( z;rJ5q_ZAeIZ`RZ=z_zmWVt)EG)=L7lH>M(4cPkB!EF(Mm)H6RzbhtR&NxU}mtXj4Q zMD9F)qf}SkR4YIN%ehfLAp5F30-`YTSGUN?7UOTcU^YQV4 zt>Qw__HqKw#eO;K`o_h54JS1L3dFdqpyOknUk4N=%^Mw@$Xh0X4T)OO?p?!1bNG08 zksp>{TW(mwU`r*uyl!=S`@<>Ab6?J(_+-l?t<`$8Y;2e+D*e&KH*9hlsc0;+?NBJH zAyRT`YOl75Ie(vQC9v#=rTr&gSZ}L~0VZAmCO_O~-`X zF*c=|CVXa#qPi~>(XU}u{KeM?hk#X%l!OFbD=&^pU`QTGIX$SUiDG_!{Un;QPyX8l0)XK>Rf)F@1ATgC1_FiZOfxd8NhdpNMP?Xo2L}P-cb*MS zXXH=G5XPlfY+DK_A8~9>N2?xG4fOQQ50^V^ot&gpow65^P|1v%U z9p66!`TcUQl9?5UBlL!!$uRICn0nMO5a2`yK~48BUxoE9Xu$*kqe&}93^|0Dcyi7QTtb8|@y?Ivy>j~VI9!`f8f41_b z?)G3wV>OEiNeHMyKhwIV(zJAGM?$TzfpWN=#!J}1gsEYNY^&3O`F7r)r|IHY~3@JMhmTpfbcHNhhCSnsj8gS8XFjJe}4V@-? zoA%s-B;cZpI&{KrW0;QDQa3f^)Far}Or3V#(8$cOv@`qwx{{=}C;$&mfIgGpW-%ZZ z!5vXr-{nEW|Flr%^t^JCv?Dcr$JLFyw68v2;gi9_5;a3$D_`4!L<5BrqOLVx8yD`z zF#t`EE7SzY0U=NgUZ7LE^SdG%p+m3CuK?MD+wqDe7^m&I3+)kI{HAk6P$va|N1U!;IN>98eQHWl;VpJKtN& zOn*^wc@xlcT;reJ3Jl_;oBH>Us~_TkNW)9Y?79}Qzkl#%YV*gqPDQhghdVA(H83Lo z^<<8s{mtIWu1(QP^k67IdI2#>M#&;(G!Ecyw-*k&a|+b*+s*ow&Q5Q;x8>^+L3bJ& zSDf#F5>jSz94{uTYnl&SZmZsqRd9b-czV6kB`v2N;Mb+J7ohPwgGS-Qb0 z>$5g+#0g`ZoErLopgaC&6Y=RJJ>dNpBmAlFODabO!z=Bn-+lQ!)v`!LMUfE_7e&+2 zHwKQ2E7sFXa~*0N0713Ys_rDDNm|odaiY{@)|6?zeZN(RHn;9CKRQsmA;5%D&+nUZ zs@|FeAU5^p_~C7BZ3qhri!?1KCA5ab#o-Vq|JHTcP@&rBYA&y>B?&$9QyM?FSO|AM znz5m-ZX>?W;&&v;zRxmVUJO|2h&YgRa`Lz_c=Z5uM+rE2m=KHL1YRpYPsv6~SLjoBmi%-PT2Law*u^EQPdJ;tOxBo$j_ zNynU@mZwt~h^&)~yJe5_a^M<#4xz+^Cmvw&>k9F;5ccRTWC3?%1NUibc-0l+p!yu; zmH}SPI7T^DY`M$JM`j-P0<113!m7K%bC}MBnE2e-KzS(;_cAf{-|8~x>YsM`;{&eX zVbzS~Owt-Jcsi=xoRT+uRJ$@HG&*yx3VIumL*(BD7&kJ#bIOC$9{L!jn>Iafn*HP! zbpVjb3AnJ88GbTLAE|LZe|@8Kac0Y0C#hoB!Y&F@JsOWxkP$jFSNC`;2xK4}L8`3T0ZKvkM zt$rQ`&W#>(5eGrX%kl#I|Ou*zS7b^C$JU zvo4PS^GwvI=UlfUDs$gcN5oDL&71JS3FI$uxLPhf&2U<~Z(_JaBdV+4$jHgTW^A$} zVycQk`{D=9@s4u=DZ2x`)TO~2G-dGt#ZMbCy}cW#4z4hjl-L|}FJ8kXsF*>h~@y+H}WfCI&O3$qjln-j_la zySuQ%LwEcY&i)_m9V+P2F;hT0O`G}!hPE}ZQ>o-E*f!cojRfE(oOcqZv1VvgFLX`S zwX|c_I>{A8ynlO2l!mN`jLWEE_AHy?(!vdlT?b+@rsxb z8fp_nK~gsEY%t(VchX%87%a5-J^`pxTEf0ey&skqc8wZ8lqm#BS-n2s!8hh>Rh>Bjon3C8=l)_)tv0}Bhlh>10Da?=U_3gr z0TWc*8-!V0Z#*D3QT54AH;LRj==Y<+dXXpe1sCjtDfBFwMBtdx0-(Zp8gL7x72+oM z$D%-GEAjJH?~VU%P6^Z)`;NX3EIeu?kt;u{jBDlu^Lh(l{ssb(e1RyjFq&Az;r9b-F-d3N+-`IoQ{qO?Oc^#>SXI~ z>yMeZVA$IFYWC;CQ^F1T_JJjp$hO@2C%#Z-nj9?ytz&?~_13q{Wec7)ofgxU7T7~+!IYav6+Ddj;J$TB$q!i;P_oklBFU*j;R-uhzbhiIxN z{|i(cqC7J=Mhu1zlO+J=OEO|%AG8AYC?Y1uw{Imq6lsCMDc)?tmb4!L(gc)E4E3U) z9AvNz4j%-jkZP?q2?T6~2~38erTy*g=~MdG-r5daBo27wtzq3jf3~giXp`G+OySa)U}vg|c)qDkG&r+L zHhNh3KaL)P9R=A0%8781DnY~R*zy7>&i(&#~t=&10 z{!NEwXfE7EK*YqOkp?hjY`Ll>O?oC&j2BZ-e!44LfGS(DzI0s*KPU!ejSQ2Ta_N#! z(0(lZQPP`0O(i3%uCkPwwjW7lmtU=Ll$&(+o-xZJ;^7<;!x zPDct}I=XiMh?Mj#(S`1L987M&5bsU+b@h zVYNEeG1=k~{_Fhrj%DnwYv>d-y4In%(UrvZZVQhs@Sjq! z{F?u=kSIX&|#?DGDgKPJqVFhc#NoQ~yuw=pb!5r322(6h1{ z@++!BhiHuC#amfTLa=zFNvvIt!(Y>kp?a85Y{}=7VwREOetx&Fb<+|C2eCY0_C9R< zKpnu+G^vU-33u*qkBn7QuoH?cUlPt~W&3fXljD>Aig>oLgp_VyLMjLeZdlXTfF`5vJRfETBXpah_qruEd;= zgWJNiKvhLJBPGsee^;Y{(xfVU*l9P(oRzkZAW%r+`pE$!TfoGlsDd)-Xe zTCNrR@V(Iy3f-wKDKZ&c>0=|IIG9TditUPdtAKqafn*7%Ml*@yLI3XhBm56xf9pCr ztu1)x!_8>9_pLdCc#fBcUsY`DtLZb}-=^Z=bajiY8jGtH}iCr(I!$1)kJhW1Tpj5B43uC49VU zuHqi|MUEVFD9&yj4FJw?yU;>a?2>k>hr93V()N(E%^hhx-=_Ma6_q#+uZPw}(#bv6 zGPVcT_uq-{g_LD4TUa!72~R%F3FZ z=OGW&PhYHWRW+F$9GZP1u961vOMQ0)4L4^pG;uo(T6qq)EA^@Whn4wy;-yWuAQHDE z$&z&z^`FK@CB)%OvtbuDnV-ikI9MfJUgzuhayX}7);{JPW(Q!(OBZjUL)3uxHdvom zG9#v<8*&(K7~ICE5k*$GhGPPbuOUIwoH)X!5u<%elrq1N`RCRiWQL-YVYXb>pH?>E z4Q|in`%tY1&`@AH^23K}F_PKxA#s}$oa8(7Q zZ<;1l4UOp5w#2yyNMH`0AF5i;V}uD1_~7{|qe{Bj>3MBaFzKy9zx=3i?%BYRTblK&rDe-#i{qcnk{f#B}$ z8r)q%2(H21-Q6w0-7N$N?ry;~NN{&|_rc{ZzAt2=81V=dUaJ-ch!Q_)EcRc zO~u&Y#rAo*o+ZV|VCvY+%{AF*4rpOG^|%VA+vK?3J=$~2;;!u;x0q>wj=t&Zrfxj1 z-pgGV#+_|%EH(QahSgwwG3sN-gtl^&(K44?uEG=b)m$6_$5OWObn_&XPcOraWYOwJ z$K5k$;-0FGR}MCVqAyf0!s|uu4+XaayF#4DQ){RPzO9Ci;e*lbaHrCr54cG|yVVhw znBVk303TEXi@R$`dOj_!ob!u@I%JF6*a1Gh9NYe={gpe99#r=3F)-{i z8W-UArm11C;CYktc#OXrDu;K@bi0@#LyO2F_ntd^4x}liJB;<_es3nf{b9$Swx4i+ zCvd=`>%67zy)(; zQZ`fwdX`rYYHp)cXlZD4`ZZD;Vu`{{Tx9e(D#BMiM{p;JN}r;DdA#8RQ8YW;e>tQO z#5UEx{#sDWn&vxbY(9&m$?t*(wbZ`CF4ex-?7;7Sb4afmr)s(5+Qg>sx#e@!ulFTh zYc%kPpLHT$&j;Qo7#gx}ryt$C5^X;b|NYEJ`p z-vfIYZ9!q>v7awKFn~uatR~5_ZvTJ=CiEI>#s)N{^+n?g`C!Ax$T}+RJ%lqqH`Kcd zBE#r5D>*U)t7e=jBAZ}&_}|BmeK+RRv--f{ZmcJXbeYF5S4G;1ITeoetu^aYSAHiR zo=$%i?O*xfM{Of+d%$bhd9}*WfpI|0>a&~=p{<~=h*Y8O!rGm(%-fKpyYCS4 z$z=wS+vyw}uw@r7+BuGL0}r?@!6u(cJUR1CO4jaCdrQEM49j>JmQjP&I1JVEzTwdl){LhYw1pBY<+QCDN zbSl%xn5{`w$<_)C`5(qAvBnoz!BTY_M`?da(=^jyyQ33^sA<2Rj(I60Y5>#odmehD zpVbU@#KMxQ`C#xuL*1b9)UMRqagl&(H^GaW$~hNbE7r>YXz@?a*B8VJN}5cAswE1; z4CE{~!ORLPzV0?3YtZ0C7H&cm{NU#Z1&;NPvgVXS!#Wyj!0JsN2>B5f#(eDwg5{Bz zogcz)A_F7lpBB657Cj$#ANr}tjF;C}T{{Z&7UQg~eZ(_`$s>M71--)f7z|obHJ5A* zXI{-eJ3OhB@7}w94Wa}#R zBQAxX18DM}zAQD)&L(%0HL%PfqHAw)nT__D7!fb!2LrafZkGpyYc4Q8as0^d90;f^ z-~8!7^-ffUJuBvzjIVU&nJv&;N@T|;P@=blTIH56M_)> z73^)0co&@ZKRcW8vA?>e?;Jk-I(IDmTraV93P=*G?BFbKsc8Q~`%-jlbr{KB&-g z;p$MH=($>)j{}eXX~h^q?EZRRQ^^Q^wZqOR#lP+DKFucZDbPd!7W$NB^^;jQM~2@G4H(y`YzG>*-qKUp zJET^k@h4hh%|Awl!h%=Ijm0DyX>fz2@o1&3Ft32V5T2d?ZRE{FH$w6UBggQq5JIAH zBa4S}d2O3bs4&+DW+~WbeysH*cEZ@nXrqA9Q5o)un>vCadHba?7*DBTf1#&guR46A z4j`Jz)&Sr;L)yy0^!pDBt8A>*HcN6`2TfBxT_Qis4qn z-mws_KvA$*+x@Ob51EvyJT&kqqZ6~5ufmK{hlj6P;llK@W1m1S zi{tJ%Q3gG?UUPl_~9wL-F($}ca!)2SYSeeiDj$;(v1qfZ~D(E7lix&n2iZ{i- z)J6PY=L7;)?6-cJI`d1Ty|0ZEFnc(RSyE7$Zrc@>lJQ>U@S6{=NpH8%(e@#z-Xy;I z9r!JGjoPR?-d>^MU#ws0Uz!qn|D55lq_=x*JnQhX^2n8wpPj$e#?$e3pkA{!Iq4q^ z<>N5+#S6NO=*?SKr>dgTNKIN{7ah9K@YV;4|G)bH0H|*Re9N!eb#pmQQn?dJagR6; z6~z-qX|N65&`3f!Y-I7SW8qEF-!}3Jb&J3aSIZDg208f3+mYO@_dY%9{+kx>>0yEk z>{kq*_@5Df?G~u!Wej)0Hgt{ANyk5WBjT;>={#vkP9v08dxW^k_Xq6R0ZQ$;1$Bep+d?;PbP@w#e&;R^ogTH% zKW3PSMbf6t-yns3njQ6jV4vbYVq-?74)We7Bpi}#bt-J_%EykCfsblb=hc9zqw$tcuOkRxbtc%jaGg&xWCrk_}-*xlbz<*%MP#mT}yaQ%bD~|i}4VTLPKz**a&=I z9bXP&#;N&UM~k33NPx)QFRbX!Ps0a1jE~Xz`fMa(|DH6ojX_Fz?w|Wo{{#AYR7_>! zNPs^>yO&wP>xn2=`t33=ccK0ii!mzSd^NpVKrf|r@auMXQJnbvrZ36;*ZI71jdt{h z5zpJ7U*BZrD*1z>h+3>tuy;dRT~nB9_@6GNQ5L{-OD%if4~KgaGGVRdF(;jKd{sua z@;zV=vz&_24`VQTzo%qUe2f6fjj8Pm18^4}Z7nG*S>cN>LTL(wxu7Xq$T+>(Z=C+H z_ZCBZ8|?-?>{>>qVfmkjed|`{upXM2&iyVCJ)~%8ITLemx)U0w^qHALS#RW5Srs1j z0uue+rjzm+A5YPn>zHmRMeHoI^PTHZFP%OKmO$1TIZwmQEFbHNP0`q{6b>k|P(4H@{G@2XH9{C~=3TJrCd@axqEY1um? z)n2D{Kh*DR`TSSgfHVyjTDDO)$iy$QH%sVwbo6jI`k{3b{+;j39E&mKF58A@!UF-j zzxYWvC}K}aJqgrPL^Gl0ki=RK(8hlxB%kZCf)*pRUp$Z@9RgDsuo*|+I)Qfph`L+r zBdb14S?MhX`8z*%6Dkl!S{qw$G}orQxAihXDsy?ruRnxXUSn)<5)Z<0>N4$) zLd_{nGs_(9FYqHm5pY!t{+BRRR>wRAkD$d~?&= zHpG|Vh0e=iz z!d}qyHeECsr)?_xzjc_>in!-z{Aki1K)D-)D~atancL07qGhQ+?5}gH`E(a3gz$5n z>29#Y>AcsM+BLs;)y?sX#+!DP%oMdM0A__R9T-jIPa{nR*+bQk`jL;4IE0hhWle$7 zYzM8vDGIWZzeC?75(O7qQ5i$c=%m9}K{9U}3NrQ})~aC| zkYU5uxU}g?rTNQYJnR$jCfm~_J$sh=ysfrW?113r&14|{w-2K=?eX!}2E$}2AN5g& z|N0NB1LpL?m>${VeMCAN#?%?X%8q>oev{qV%CAq8qi7kASEBOBe$Cp6cCBl3V`oAR z1$U6(8GDnPR&;c9=SNs?@or7|kWq*aqNZCJE9a`lV=ZXfJWBYRgF_el-xJh0>CWZe zT1y3R{qi?dp)CvqOBC8^Po+wH(9@9Lz|pM!Y{}aHt8jC81KzxdV~ojN?dnU97qG}6 zg%9?@5n#gra<2%N2Cq)0r=rbrqp`j^3po*)$H310qez>V0$~&FphEkL);cC-!uR3s zz!?H+>;Mo-Ljtsy_xeTOVM0lO_FFwTw4$aI`fwLd`Xr6Q{>@d=ozYUSaGsh@v`Z;7@ROEvNwy7xpILO{M z?T2K4=g{6UnQyNWc7I5?Ivh}NFR+7rXJ?OF=8e!%mjHC1PzAZ%?6lUXLoj)j{q5#S!R)fe1AK00$gF8n%ByFoG9 z*LRW4%&PJ;!jj{v+rChCadrc0OS|B=^tv2Z^o3W-jpkzs3n8|*xq?lKGl-TCKp7Wm zVk;RPq7?^n=6~V~sB*JMy|F`~svui{tID$?=Y;RWC3tpHA}Sgg$4yyavLV2OE=#SZ zc|9E_K#=wi8TEwOaH!^d)yeRZ>o93qG)h)#w6(H}O2rrJMWSz_M??Q z!5R2SM)Ec(Q*MwM{dXnt6$U5juh>}W=S=3`|0r4U^l-Hl66edVrf*d~WiacLi^^+J zHKA^0K-R7_-&GkO+jzyF2B7&-qNPgXDf2kiZL34T2Sz>qUx+&%b8yZK>ZDj}%*c0u z5ouxt>4vevUjOx1RfC9&1q1E8@GBhE?}`s)pG+JO2N#Z#`&1R2Ec-$a7|!nc`5Qg> z!r^FqrSXuwzmp6$bVNpyYVSEMKU`Wjy7NlFUG2RR2tQP>t`)Xy{K0_`eGE@?T1!*n zt_P)mSSpR`V}R|+!*3$_^_Uy}OyKi!H%2J-rcs_d%}(gA_jc8XaaYsKbS|9!$i)sM zyO~v3!PnvSVq^?CId%|9EN{XA!VGJ}k0xDVn6WtxH?0l%e%I;MF0UU%eGCeYIGFtt ze8Q=u{xxMXp0A$fO7X3VR{z^g+WgS1EI8f?9+R5uLzI>Yc0)HwL;KQkc1v$kN1w zCer!e;E|YJQ?^ILMMQCUczCL1{5O*#7zEeKO|L>Ix;jz;LHz#tQ?XrdcgRM=W0#(d-TX>ny;zOlw=4!} zF2rv|JbQ9~=7Cw9LI|R`Bf#Yvsh_lWA8q)iaEfha zUvamOw7$B6u5oj&3f?k0fMl4<)c-aaX5{}QV|bk5h{zzvE7K*wSGf6Nge3ZC2pg8G z_kxP)#0-AR_}#RdJ%9D^#`BD>iTa*?M~c|*Cvj4wyXeGJ)0p;E70|ePmK~;kn(7kT zrkt&{*?kUKPgk=<{;?TzZUJ@<-}0p0G8C=DUI2JWE=Fj8NXz5RvYWKKvrg1wx2*ZC zDQb0>lX%bS-DktXI{Otxvu{82Qelz()FdxXp16_zc(GD~nijdeJ>ONdxGm3&=(LaV zXT-?9@{jK{W#ZX+8bGyvzJ^3>sg<2~K=M-idC2_TVaz--ou_WAEgQS3Sakk;Q1^3T zi;dpXvhtm{yQ|fduj4<0nb7O6=C+aL*PWK%>RLzeo@sm$tG@x`?h769M}8LP)NO-V zo#9@qy)mg%>iU2;LbD4rjlXe&7eHx^sOm#`k%Yp|JomJ9(jnbasM^`se75*Rc}KiB zJPoc~I~edZR5}^bR*P7`hz|+mwy8d^2&YEdDO8)CNE%lU#*WBRbgJ+|uSCheMd4k) zHu2E)k$z>6vUt7UjS#F+f`43NN%CMeai9oBxII=UseN2>Pyd(^F>7k#+aG7IV?zAS zu|E%>PAhqnqN62|LM`@fBQ?0I2BSDE8h%p^zzHo)KWm(Ko*fsK3RZ=&+j1)bDlQ+! z=U@+5pP>ce6(zvJWUbw>U0oF5`o_Tcn%L_lpsJRL|B9;qM^7)(|B9-Xtl0y=;?qzR z+ub1GPXt-eotCC%oKFMFk{U4^F46V}RFuy^nHheFL;N3-94Y^oBv-qMkXq_i<%J*F znD_XqI^0cT`A}-qiBBSgrk7 zF2LKNv%>rD&Z&nGqnGY~|9f0E)QtD~frsU1Sk zBu417>AMN?A7Atq=8J=L0XA2bRfzd*pYm;TNeX!H;3q_aZ}By7V)5L=m1=dUnIv2O zOy(c!9N&M#OQI6g=^A)PV^lDZBJ~YjZmP!mMw>qmTMh&y7hqxRsF0)xAZ#N;G!^y2 zXo9Sk9WLxU6Di3zcn6Am%yf0E_U8H>SlVD1;KHOU9Rl$xfbj`r6Bc$eMiN6DI_s*z^HL-L>=P z#3yY1+DNE8GNS;+;oKoW` z7u5D`usfuuuRZC5P9$qw^Bg=I|rmm|Rrb$7iosW>}3 z8?i=L$YbF+Zakz08KhRFtF0uG>Bb8{QgM`iZ9e-r3ETkuKM zNYCu|GBeuf`x8FHx4_qmN_l2K>G8#!k(n$tr!Pxy#G8RU<=Mfxz6aRsu4Ct{ z(Uuj|WQGx-Da2~HE`;jBLI4}QJ|Oxx5K^FZ*m_I5IM zdt5|~YK~pO=e3}}-}uS?)&Vu^+$tcUBQHCZL7ii-XVUQ@8=5!2Hl#^@S$q!(&-=~co94ZMWoCRpX61?dKiY-P!o7zLU1J7-8ll_`o zo6#BQ*S~UkV34@0277k!QA7y-jkpKV|0hyK2qN?=)<5y#KvJTo?}vhA-f#i6l=wK# z^u^3*pI1Ig=bE~@?jj!2RPQAqR!-u(l2=2kqphwKtgq)Y4D8%vtS(SaZ-G4I1<+w+ zJ)$$o$<|tekTI`jXZ-KBiYh>_#kZ?1)(bV1_)Cr0){!<>HgmoDceCPSQhkwjy7}sr43)%rm}rjmc6o4Splg%GlSt09j{wT^N(>J;PaSC>I01r1QNliE@&+C>uH07hSHQGwz zX6)iaYR81a2d7lJ*n1bP9801G_`rO@R+vek3C~GM0pW@oa+<()t&w@EO)S7QQ;Uz+W4Sl8O~742sLK{xAU&HYMw;1Sf5~1 zu_wQFPMKZ*HJDx*0}o#czle^tpg?~*(HH{aQ7Rh(pnHQG2J~2Fe#IJ#GB(LMOH}<% zmH5T==0;BW78>+0jI2)-U{_}k*W9DT6ECKXrRJk2&OVSh+HTK+#>nbms$VhnA%PE} zpKwKx1M}$GW2ryF@7?6{km8UD!*x>zl&5@;-Q3Zk(_tBD!S{!q*3cBwS6S<#fqqV*7FS{~z zW^~c}MT~lx%!W#sUX5`DJ#0hhmxywg{5synN$!4?6YzN7rs=;~sVi*HBO6_nO#4B7 zx`etxovr2g?(#J)Do^H0U~C8pp{n))dFe~K3QH%dk=gyd4xGQ0HKp}cdwo4-Muw^V z^LK^ssl>(CUM2YQre+38D&K7ozkHE;D=j6~e@p8KQQNKXLdjeig$(^OY7&b7K;W0a zUWl%QcaB^pN@*M?4yfg=``(mrV~M%^m1)Ttu5#*a9Wau)IV=`kMY^5J1lU5S$+yTk z&4D@AJY(w3ep4<@W9!XVu2#oY_A(s~^f_oF0cVbB%MSKe7SUo{LMi!CG5x8yJt!Ev zOoReH%*5jDyb%wyHY6NCRqaEJ8bTV=0#zlGJs*jQBLoIk(cdRZ>g$X%X_lHYh=Ho* z7>#K}ENi<3AwnHuFxeW{HnBf+W#ypnge80GLii=|Q9%bN1~oPL3fXpWgNbjA2kqZ6 zOqRuImBoiX`#o0~fr@@VnVJl)%lfQ~0%d1g*<`OU)<5LBq2UaPQ&{5~6sHiuC zL#Sy=`uF>U`*#2-i5% z?5+lWa9J%zgZ!2Pt{%hL1ws~+N)aoDqJ&m~EJ_R^!egJ|N` z21n+ERihJ|4EKXoj+!M>Dq;TX!SL0~0Grn52*e^fo91o`dQz0Nm!59AOXxxOxW(B8 zs)R0b8e+1Jow5p&%!4-PF-^F`L49q%6ry0#KdAL{KH`~HJvYnqtE8L?OfB8bBW0iTP9V88rs+b~NYjzW(LP&y-#P|2 zp!T)PCnOh|im;FnN(|;^&wXmbJHgsCOw#Ys;~Z*$Urq_^fag&39r93jF*!GTpC6jyZA?@yV`_6OqAV;n~0OOU?I?d zLPGFGnBP~|q)Kdlih}7o4~jT2tj*FMM}cDoXoamQwZ?zzq!SYJ3w&%fbz>$!Bh(Pl z4^hjZ5cuA+v3RE$_2=t3MW!aD-|gGg4(gZl%Az2NAjGGb&_>1=zvt{IbK7<_U-l~Y zaP2ud;Ba?nXv4>WLs`^+zN_(0hO+BO>%^Xlt}tsTSrzDrKkIH5Y(DVlnB=35x~a?B z`)p1sq~YrDOpQr?-T)B0ktQ4$B=cz1X^2}^aweSO}5z^W)g@3)wAecw*9 z9MBXfO`OxtSl4+GqlL-e=SdvKUF=F6qCy0Qhsuv)NVdH$z7+Ks`l~&?IvY<-l<)lj;KOa+ckzV@lUssMT3I-g_Vr*`eeKi4E#ji3% zVgtxB8j?=XMlO0BD=8dH&j?-3v?{06gk^6YgB%16v$V7w8&WEvC#k6nb``HCYfnxx2F4hBGhGS^mRU^-)$p~kBXEMpeVTrp zwn1ETjh`(cXTHW0rP5g8Yt@2}mb?QVfJL@t#{YDgub9lu;Fe&lHxP41;=x?OA!`PHXoEAf+@ZGOL@0I{`w66ow(yLFMncMhWX-b~aEZ4~GH)xK$ zfTdL)ej@tzv%IJ1Ho?mAlbwA8vV&dB!we zik2nNDv}c?EZS@Rk4P0Dis6L>1S82xph>AH!87aF!^(@>%0Gjb@GaIqlVP769qx;W zek#Y%Xv=<>?CqUx;InE9n~Ul^hm|}(U?b;XD#d6KL8ZV0MA}@X(QblA{u|uErM*7k`R!bb{j8(5la~ zEoz#x`-{^2pUXWhBXq9equ_tLWopek9e&cgl26)7F#CmUIpuWjHO3^&4Eeh%S zn~{LF+B}()gD8UdyTOI~7}y1_&mK0umn*c7&P6GY#BeCybiv-bR!q6LPt%RQ{n)+s z4iH~!PpxE1I=J*O2>wi{;h>K_s9C=^0aV;A@P6f%iO39L@$-?URV0HR2@U2oGJkFFd`M5jy19iBhavO{?)#PMZEa_O2;+ zwS;W)d0G@#;!otalwW3cY@@_gImH$ETrpgA3IOidIIHB&hFyt~89gxHLElTv z5Ux280cFrRJzXINYAzvX9!8$RG9%-p)V?guF2L1#b;6*;@X(3)z21yf;+P@!_9eO3 z?2pAk;NaS0>6`?)rc|B%W1o3NHx}T480rY>1`sO0hcbJ?xE+F?-<9llf3}@@S-R@! zi8DUAJ|*DKw?Iv7?gSzo^1I8dU>7|jLz|ql-|fGu`hMUo963WT_nrlYq|#Gat@tpJ z@H^`GuMcj1p5S}Y7#2HFb3<_RDnN;y&~p8Z{FitCOO+P>l5gy<#~FUQWua~;>z>q* z8(s&Q^=*rRVj-J+erfXJ&9L86kh^YG_2;8(!D(-^_MLfn`S=h@yT1_30Y%0|3(ocG zSs`1K{!u+}itkb(RVBK?bm)wR%MEUnl>;onR!Ur1DphvRl4^8!q@QyhQkkgg8?FD5 z*)3MADdva43K5%XD`-I++Qv*z7y9HUhG>jT=)1T6B0q4ji0ODoiT&sj9_%lF03 zKq0zXG*&fX#o3szp6(fCPWU4OJw51xf~Jgv=Iy0Bc1})8b}Cw?!{3Nyp}r(HJM?4{ znpgFtiUSm!@)dxdyhypLNB<)-N5$>|bBcU%azO^g@H~ojV>3Cz6T+f$^Z4~^%kW7p zo!PGsB~)oVL#X%16Lw^@n+TeMd?wDWP4TS5K^XXtWJcmM03}Ekq(K~H{sB&YXj8kRQtKB+~9vmc$PR= z{X1MQ|D)BH%*NOJ9Zs-4rqx<8@cE$E_V$;?y`K?zohJwtx2@!59pOvmdM{VZqbGDC z;$A;M;MNX7;GPh33H|;03u6^c-D=Xf85ugE{n`>5Z&2BV4cT2PuS@P8U$!1&ZJ5$S-0A!k))_UiqsVieyIE`1o76)Iyi!1=u z;k#@)FiUgQTiGq72(aGQ%D%43dnO_33tZDf&4pBu z&gp9dnKE3Ld|EHKAy8Q6Ltw(KvdkqkepJVxZi$=w&AVZ zf0$QVRj6$6KT_!V>@b8rr_-mXag|+PjCdy44lAp|Fe{_-G4l=jEV_fz>$aYaoHGRY ziVpmt3sGQx@)t#MH$Hx(wm(mpQavds8}6)2`mTCXnhHVYt=hIEzyN z`s2Sqda$rFCkW?`T6ZdX&P%<7v7cjx!hqcc;wIKB%cMWMn!B|I5uT_wx{0sRk2K7@ z@iCHr_?W>rom* z{iL)kd2XGQMCYgX#}snJ!{ROQqp8lEQP7si0Y~qPHwirE{LEMtS&h6>-Yqvge1J3O z;gx$&$826~6P73Eq9hRm!II1Bz%AH45w3j?goP{eW#j}|9G1Ih^=Mx%o6h7ngq}l0ZMAnR~ zH!FB9dOTg{?3hjgWS**=o;u!LBK4<^)GenRk36|ag9yC6PCD<0W#S+PZtHHj%&CIWU91~6h7fC z@D2R^fE;>FFlAA8i|$R-#$Bylp?3lG*=N;%m4gS<1v4!p`?mhD!6(hK$00^`)uukP zX)JpIej1ox&$geK-cM{j>Q`?1Za-rDX|1SpU}hVM29@EE?LpiZ4qp7JU4rIl+dGq9 z$2QhoaAfYeeek|TG{ydo7P<7Z-Ip2tAT?C|brJa^{L>U8scVV)PMmbLAehKYAfj?0pn1J&YO#CV=3eW-TNOX zq{e-NLZmC7_iY#BXIR~HW7XIwEV&iHi(piGO@-P@>b{YcqUWuS%C~7B*z6G>e^&U_ zN6OEcN_MN1$|{v(_m`36WOV#iIg^8$Q%O-yg6pM)xN7Hs{gZ2$_prFcTV*uDg0ZJu ztk^+B_q|RZBRD3aFid&(jGX0w@=++0DnS=Vdv1#_)Tbr|sH7!n3&9v7ObMx&zH9tS zVe`n2ZCz0XhG<)X5Y}+9o`ZOBa2#oT;+WzHM$utzuRNF~3q>tzS=(J>`nR^H>c>LU$QiUKoOA9uMmfq&w{IQL1SOh?jL7|u1lp}Yo;^KEaTO$&o)_Px%6v^r zRY$7TD;tqk9&dqn4;V!x4e*nYMfjaBF+f;w{h{CBX3JlHXrC#xe`K5MSyB^>p}s3Y z&U~nGyNu6mZF~NHA1Z@X@98bG3KxOuCspGVyO`V^&lWMOsCIdto1JtKz`db<}; zSrM4>3-qq7Qb39>by{8BC4kDD|9VkMBLQyE$?Ci-LCd&)EL6-eJ|iRq^unCO>F9mB zzrU|TxVu;-!T-Yosf4_+Y@q=E(bDpKzc}|Y0-;OsP0OSL^p`i8eC_$jBQ%H7{&$o| z+;6mjoqLWCbY-HVz8s6}(TN$UDDrG}tzXpDLy^)%yZ_WVHLwcf(vA%UqE7-8Q2m6d;maG%ob8#FODa zJa|c~LUaA>D{gBtx3I>~OSB0AXq_yVKq8v7qvymn3~}=(5?e<1$#@lkEnNy$b>JvKontHNR;k4m7}z96dz$XzlHE{G4wDm!f@t8` z*2Uv&L}H6IgIeZco14CknqJi|x znnj9H*|LUA-jCR0t??gaeUt$mIObWu9{mANr^C2TV?i$vTs z6#Lgq7ixza`|Kf$--xD=4|JXDo_?b4KmV8B?|l(s5b9f9f7lc+G`h=kj}TfDI)o!R zEA=DnE-r7HG+<(~8*XyqXWgZkX#c*4Q$(<*Q}NZEQAy9B+b?q-E+Oo!L$@NjRRC&k z=OFL*U=N^yK5vmhOg>XLS$sKHwWZ{oZ<=6Jag%qd8f@?Fy+TBt?)>Qv^NOMuNDW19 zqX;gXGXjS6usFEFDUP$YeSO}H1_&A}gb+rL<8ZZ7lOY{T{xwd~$gb?5WEmNqWYW@u zvyaudNggim7o)#(A4FwI=i`Wof8HpgW(ej@rYEf_Tc9l9PBnybSTWnsw? zx-J3_%YVtYMZeSNneDbhFznf?_R;L>MHJH8=lP^tdtc@JCfZf=er8y7g#ZYNnViII zUW`KA$-g_%cx)Aa14+mDE*I&+eRjW7S#na6A*t{?H4SBIh5&1MHK6MiF5d8{Q6S2h zF*d<{GT1eb-z+l>UY>m=NX|--i_WXm6bByNw8Ros3@rS>H}GWS&@TB^&(Im~`K>Bdh`z~=N|u#XKE1fqIw$NdWL(`!w`3~GcoI$UqXbX5ATT0Yi5!uR5egrqle5s|&*Mq(#$oo5S-{>c@&sEA++P1~82o+k2Vukl%uwW>t5`;+r zXGy$V%;fsJFeH%;-yhpVWKdPMgHi!=~mr=v6o2WD+?z00w35hENmel zId>kXs&^qBf)SqJqSJB+6?96y+5zt_pHxjz5rrayy{xbq53{xgC zuEj3Vhl>95^2ubk7@v+!5ceH7@mYL)Ozqn$@mfcKJwRtlj^&#J3cZqTEZ#0(GQ;ek z<#fNn>5`5m*2G@qT}2uPK{0zpgBlugjSONr`$ViNQEtaz4xaK9Xkj|*<&f#-^3Uv5 zKe-GyDsDYIfG~*+X_ZYLXjx>hOa~h73~mUYe4L{5?v$&bk+c0Vv5Mf?&G8_%3r@hwRZ#z9WQ;I$lnMy z_hMmN?Cr2h$8*q82&IAmKR>gnEuN{L z{PY&^y4WS6*#cM8Hs2XPJ5d)0L`dX~eNrhEQ+u6a!^;ypKT|=ngwH0J10PkQor=zoT^+Y-5( zK#IJBw<`X+noF$Z9wT^O(#osrv2Rqz`Nu57`+b}J*~erJt27LK(9YvWqH)`u$~_VT z<>|NH8jcC3+w2yVGo$p7JonguzoLA5)%F|ma7B;d$;!qie@|=o_SLU2s&8_hJE-uW zPtS35Ga#NUXvdmO4XDzCFa3+VKPBcJ$wUb7TIJSn%EPR z0_N+48P>rwO8tEckgC6?>unfLJ{0T>F@1sP_6`?Yn6=J~Gx?KwRj)+CLyo6pJ0Rq4 zUF96#U5m-x3`)l^&}gY`b{#SNe(&Hk>6u*=zigQ<7z@o(tA2ukhetN@K+jYPybq94 z{tib-A9aPFl?9`Rbt_h8?lQ~1x^*gPs0;B+Vy*R;tl-W8_(B-N!u@K)rSH^kERD9G zp`B1wzhc0I!R5hhlE(@v&>z49E}O0COzm_&!=FE3CM9+Di9c#mlKVKa_Mx?&4p8V1 z7c^tB4d|)|3~qX+g{mWi$s93+B}r}ui+>RTefX_l46+aBJ)tcwEyW)JF`#ua3fOyU#DcGw zynHwi`eMVXPW)fT)PMJnNQ_t!lbI0-c`I8$6px$^KhJwB0W0X``)j&( zb7A;AEKL2H+ysQPZeuVbn!WR|cS|0l+I|eyU4~^CpbOC8vm{|q)P$2F5QHjObBGx? zuy)@orqM8Ji~;D!c7h!84%eVLzbD+Zk_@Mv&t6}THoyP&dToQPr;I{68>RgCBW(I& z7EE(u7hskTa8-Q31mpJf0#)j(j5WRStY_C?Ik&bRy=N=ruKnfpnC@n+U`(FSCz?_E zy8db0ip~Ubl+UjU=IFI~uV%B=NfL~uYqY=rRuRgMMiqF&deGe;x#H!YZ2d;2>W$sJ z%@-m!lGYU^U(`oAo!%ss(b0cbR>0;;j&D|S8PCB5yiX^A3T#C{wj>{BP_LXN65%A_KkX<2sHde_V%xe-b&p(BzSorT{@lzCZ)g$MIRyNq^P1U;c%hKMK=1_2?k z5ZNb1?4B%@Y6>w~39DO zJ~Vy$p?@^W*)K>wn)<{4-Yr5>X?E&S-dfmXb>|k6dYKz-Zdq{Da^so!XZLDMP1z!d zE7jpqU8dPV`K|naJwGx>`#9VS#1(C1H=b}^RaOyxFuJ0Hj%{1vI73F>j#>1`)YUz| zxm0b@{5)gE`h9%G=B2fR#}{t$;CpJ#bW}(*VP9M@uSu_Uo$6Z)zW)Df!2{*hY#~P1 z)5e~i&am^Pw8i~u#02qVLzBj4fCY>fBMv$wDoUJ{0ecI7eT#LdI*gc&k__#!7;RfY zL@#*0-mk@ERHFBehupJk?fChAU@=7+MYi+?Ah2wK{ZIy%Aly(X_Kzg4z%8W+-JfFN zjV8xL-J;~2vT%GYPua~ytPHeh1BeppDUtL}l)a!C$k5r@#c!@@*5jn>PSvu0Hsugm zN||-?b1kmbOwskQT3|)DUk`Qmvuwmcg)u*=4He1u`6$wKdk(yQKOi?Q4vKA?B`HBH zkojvLZ6Khu;H@{qtP%^I)ra1pfKwhY3F5@TW~Y>oZw`#5yPr)@QSbqMnHsDQSoO5V zhmrG%LhoUfR)@m0e=;!LZwT!(1yuN`hZ0jl_rfT6(m1dI3U~G})tA^>f_%>MST!qX zQ5@T%>?1K!*{?Juq8XAazCjlGLX+k7?NYTGijF{ z?*05OG}(LiDVqkkvI*h&B_s6ID=B8!+xU!MYp&=%GfwOJ#R6yxU}*9h=`>L`L0+49 z)sI)`++J1VpH?p|G8DjAB&9lc+aWT%?opgi&mS1r43{_svEnE2oYb7;X!Cyn(g`=O@RxEMI5X`=f7Av-SPPSO4mB?T?dV8_o1GS8 z?f!~?(agS04MPKeh5Pc}<5(v{b}al3x19dZ0}e~$syG57r`_|2ODHZQQ_1PaQ{9v4 zQVwJ)d38e+TR+cl^b&g0~7czAndN<&6y&hwy44r))XGd!gEvJtm_v@z} zjBzfb8N>wr8!l#ywSV`5&GdFz8OXjZkUM)xl{t8PtOURK9ae-?bdyuuS}}gUIP95t zBqT?5M{4!yCYxF8&4>$%O1HhJZq#pAPI}wf5xaX1{!GuQ^Ch3>;)*cqHKjVJrI{mH z);egLuwC}FXM)qJLu8+s!nTYW@;W?v9ef+2o!bE{8ReUoTyA-2MwR#xD-svS=wkr8 zzHy`y@~VyLWI9{QslvEu*UJy0B3|P(tZYx+DceX{4mPd(++BDX27vG)Q-D>Fx&U?gnAg z-JE-?kI(y^@BBQEKOAe^dk)6lYt4J+HLvwTvWuLIXLjdiT7gJoeJYwY?I&=_{4Nen zu0<>{l{ca-0~-)48<R|}TgSh(`mC|eTKT++QraPx{ZI8|fuv5*#XUU-3 zCFLhuEB!Qq`*6t{B!eo73|uozcFyIe+1UrfNgYi4)rdj($;kOlwyX`PDjyqkkv8P4 zs|Ex6wYBj}o$tgQTxCR-w|aX(%^0mf{y-j0fgr1}F_wL)p3N$CbSf@h z6RX>k675k2@4?51P~08Nv$duQK@QAgibNtqj7aAT<|W3(V#z|%BHByFoYxXSuRj%S zQDX?B-$f|*7+$X!?&0r<2u#b{kq=BiNN&OkDyAi;`WPg3+{qaOUo@2#t^#k@;bNy_ z+gMRlR6Ub|ZKZmuxD(IO+w8mC8kfN$+GmsAtJ`_18Ay5#_j%f*b3IHOp-msIbp2i( zekPIn`Q(UD6`1dx5Ekh-2e;M5&zjT2m>|4nS`k_4PM}_tT^?cqQ0%x(pUlvr6x$72 zvM0*Dz}|@<)fgb7dFM;jaY@EOj?VjrDuF9kG~6nthV2FfKeQn=?qq2Z=5%g<{p>^%10QjBA^uZC}EW2;jt4AcC@>KkFN5r&qR*AJcbQ=`G9n_IwKE$rp}lC>}Q zf(_n--Pb~X_Q5@-*4Q{S#Y2jzTPvn1MTT9V?)iQ~(uvX2l}R3$g7AZk6g8D!*4!ft8-V+B_3VuH!nM)&gQ;Bo$xBUQ zMt1LT1BA&Z3l4-GnJ@%2_@h=~D+2BD#n7^|#SzKZ!6LNr#O^m^Z%&AbHysFPJ@L09 za7LstZk}SZ*h`+63mZ=8oz9@Vl>5=HSl?P4hbQcClK#_L0Q)S=7o}EQ=m-}h9_44< zORBtad3a;d)^_{3c>`)*6uW$kQC5yfswZmx^+KE&dS%fWLt7z19P8DM{Pihp1RKhp zLjV*~d}?29hk}#8+IPD1BKJi@VPr<%oFupv=&&-O!HXiu2`!-ubtzCfx#KK}x`)Tp z@KVRXm#dDLWR-oqrRh%qw2qELQWY_?S@; zhhC&DJjG`6yj}xt>ZVDn-Dh7ScucviaVb0uX$2)b5pyw)^tv{c{$r-g>bp7 zvi2&w&g;8LY}>laV&YdYPDl6{q*mNo{V zJ~=>J2g${qDnF1f!`|lh)PY4Mk;3Hq`g$d)IW;6rj4w_dW1jcmDa3@fSuRBO{(FeZ zNx3Yx0))Yy2nDF*?iIeM4(rTH66xjY-FcV^Ufq=dG9A+_O8aWlhYwCO(V{i^G5#K1G?Ci8 zmySD6=?dXBa(%xjb?+`{8l^hqeFmHnLnVL?fEFZETb+`fi?HfmlGDjH2a(f7g!)mp zTJdb(mg)y{xM51e%W#vsDX!0b#E^f^#zriTt+y~YbzMdPhyRL9t00*e)#pP#3r z5j#*r_{Ee+;a%z3gJ{;XYbQoKa4^D@d`%a^Yq6hB8`@cgP`J`NNi{g~phwXby=2A^ zy(HIlva^DiuJz(hvM8R^RHJ=x5!XA_!j|^Jo2fT%*WEHryU+qq^T8e3?+f9SEO=Yf zo`7u3&G_R?o{%H0hXzv1C6hW~B$pkmSG3M9JgAwN7O6<& zx=t#n1CoU&A1~uGTd}_3(=98(&S9BaT3{A}l@x`}C9a<;M={}Glj(R?)p31$@R(3Z z)oT0}h)CBZ%X#wX^NAomaoCQLS`Y$I7mpRbix9PBf>_XdWu0SS$B@~ zg@;NlF^$6HeLPWO?j<%u1-=|on2`jNQbt2s7j33QKkU>E*W#m6>Ihu)`m+OtBW|FD zCVoxixMIjGEmUM3m8w2C^_BI)=98IKy{hSK1M7Y3Xi<+S`y2a$X~K>pmgMojD#BLm{&YD zv_tsJamZ=T#3VVxrxxS?c$qVx&XA(z?BL!k68lj2Ha0G)QiTU~1?vpPPHZ;WUXCZdWp=7{(h0y<<~^xX`cZH3b>e*Qeh7*1cD=$7kM$jP z^__9k#GAGj^~{HU^9G(!Q@PaxJl%-5+vkLSk*19`bU*TZNgJ328d5hR&O72u)3RSw z6U|z%hyB7vzW$Ak1mJ31dur59{Ym7BAx~O?iY-$JVoDMUHzp}oLmV}nD zcYf0ZrmuHto~y!*`7`-#V>35W+tXS97i?|^I07REs^~B(&tUy$ zxPvT`d?x}hiWpUNzhE_L!ccW?4SaWhCeTif|BAYsep$FMD)47%&^B{fMt3kS%5#GFb*K~6vfc;;H+(eTTRcbp%Ur(BsLXl>;5d$;`6ry zT@Q~Xi77vzI2}F0k<9|eXLa0jI?uue9Y7)i%LrqzC7S*9BfsFnF2O*eF0W&^tmd%M zmrnEv=Z(#CClEBYE2U>`tTU{xd9jx+7~pHFgZawdvR02?M6Y$H!g;2!@DaTjQ+U}e zOdz{AD2%&MXgiU`TC*=Y)#SF67%CTR%=mTK^5F9;7A39guA>Q%p-IZR+x^AU1@vpDOjE|Q=izb=c`8xVa9}0o~I(yXi0h7 ztslT0X_9i%iraIs4>|EY+<6qoR4~z(5AjUhZ*6-0TYmwtslQr_7|Yk}#Lc*N+ZZ27 zdz(`kNwdx#stGus1i!p$XlSszL!&adq;wn3#ZQFfhFg0cUaBa`4S%*4MmlCW*|h)Ace5ARNJB!-mw%bZj)Y3bj-wlc~o+MI}HTP5g4<{1Ov^{RD!o#tA>F_R-z2>m; z;9huZ#qRUChE;}{xNV1s%R>taa#K%(%w3%a`kEfhwOJ#Or z1I;-tt~ii-qB8omEFryGwKN=xIcDowUwZhFmT`4+->EfzAfLe;7T+siuyEvxmqe{u zyeKBk_}ZN-Odgqbhv;Vx|3i-r0Fn!leqD7SxFCw<& zuy3O3?Pr*-3aC`ZPpbsFm5oKu! zRZ#{u;(1bk5i4pT-IGl7cq06zddR8WU!@J5%LAE13vLMMJ>K`)=^RhaHBOVUy36-C zs5YDw($lt8RdycZ^8CT~1eTv?vc+E?VJGv@A@bf&HfK(leW=Afaw`F69e5^E0&gy+ z0hX$!v=Ly}br?DiQhu(#24F{Gg^8E5wM^GlpQ^!1X-}N-uo~fkYX~r$@zxMvTaswN zpR~ER>%|J!0Z@nCRA}_RVxU7H(g&bRJF=B4QK)FH4W3-Ci>G8W(#tum9i4v>CZikP zB#QO4P;*#mP6|wws5=uXg5T^s(oQRLjDxNb9CJfSnhbO53^$xiw(ReT_9GVPlPQPp z{X1m>K%F9>)yi;~DLmeHI25@rzseoAI;HP~UxcspxSteorle@sY|g3b{b9_d<0~FRqR^}+!zTCpMlsF!gv2v+VP+*W-M?UsPFrm zm4VP(8G)9MqI%UiOBn)iV|j5%mHUmyYP}DLvY0q8Yfq=y?KySR4|xS4*14qAZy_}p z7%C9@9|bM5e3t%Cc&5C6>P+fg)E!eb+)Y0&JkxkvbMmQkR3db8_uhNT95^$qzSS{` z;)Kj@lETlA*$xj6vs&~BUz^g{e5hj6&?;*uoIjX55_dIJ!&j#E`A*%oXZ?z)(`Lx{ zS@w(IKIqHhsPKQhEc+_Dd0=>OK=)%#tqx0AU@VE}!12WneIk;I%VI!`1OVljsz^)d#t)2q6LElnlZx&9^iM+d$jVg?-!DkyQGl|1E-+Wvo!qHAR>( z0q6$52)AZYRg8J%#kJ~Zd&UiJq@am=x^MC z#*-FzMzg0(B~z0}r>jyC%Y>SwZ{4ZH7p?^o)0`Xl@X-A|rT2CQlt4#kiyiKNk(7h^ zeR(hubRkEomaS|{0mjJ#b~;ITbhb^KAo;Ljz-`(yoEquG)QQX1s2 zuAJjL8vz{Z8l?`mGB;Aewvn%5acuHWjoZW7sJZFl(ioU)>a9}kxBU%@TPs$fKAT&x_#$=y!D;?x zHF1CVxqSL+pSjR^@~N>;T$2g7+S|we?7>p_Xj|f)5XSlj{LuXQAO_)_>*DP8GcQmP z;@U>%km~1SDpRYM&S5Hwc-wJi+iDmDvYLHe?%>UhWt~rM3Pk6ZPy}0c)t`Avp!z=+ z?gU%?J`;}FVdnWqg;N2HnBmC$I;EB!pbM*oiV;*Jxz@94e#M0i#90JFjsSHJX978! z=*v&7A-brl;+XL=Xi1%qwz}}&#xrKzAd597XurUBiAkx@;kH5m`lxb3I;cTU`5iF@ zcJo&h44pkh<@e@@CFd&P=dn}r`B%0T##LC3wpzc+F(w|YY=1rsLH8K#F@;wUvh|>{ zyDO$G7Iad7R6uLr3^RF#oZmk&5f%FbB?||g@=LO>GqV6wAbEMczLIS%Z+$^>&4)>W zku_oXr7D%+)wz;8Egcv917{YOwYFZ&^%PorAbuDC@{o)GBgpwrq4)jBzArZ-)||X?tCpx@7?Et z)v~*{C0&T@+BVkjIJkhEt%r{ND-<&}9J=2Y@9Xz1#$p)(*Y z3EO$$F6b2HNRlQXrIw2`gzvtOF-*UNH>}|0H31+xo35i8{|7;LM*~2A*AQ**c9KS! zPm)w=gQFR}*YLj4byE-Rec_V7y_ZOe{`iS6F$3LbbjYJcMfRr*(s6jK!PpWie~aF? zb^0`RTb{EWwXD#Hn3pZ-LxX7_;0VoQI&O~d@#0!VyXa;mG9v;y9_zMcM3yBkF3tvN z9T3GCaVurYmw&ard!%TjJT^J{A@1NoEw-2^EZPskv{;RsoOQ%6E@hf1jg=IgWsLm& zXmW@>4PYkciW$%DU}IrKUvjffFCs5YnGKbHtN2Ed(1yZ5nNqfj#9#OFXB1d2Nkgm8NL}Q zCvs@%eIR%X8ztCY6k9u=M$pWY9QG_GJI=(SCU>%CUIFVDGE?T{S9Pr}P%|>9HNJM8 z4w%xOYwV2abb@yX{@K2P?I!8@!W8XQ{$7>_fT?l5LbxQgc7^{snu#tgpH1{^E18m{Rqn}NU-E;QXa*sd>(-DsUnB$6kB(>8X7P!HU zCw5qJ7J{FUZ0;<*Y+&FEqS5T z;@}bmo(72{lDcX^dLUXXFm&~DpZv-g<$ly!bfWRWGniwBid^D?)yC4?`Y@A zw&=OsTahh*aYT1*ZJfWH*Dj`8H(X^7VAB*wo;vO=U7wF@OV-S$c%OcwcLP0*<#CoC z+D`)mBNJ+8ADx#^Gf1R?EBb=`tJ1fYYae}@K72So-_`JP$~0j|YrcP0=*;sN@=*0; z>jLL&*Zkc5B+3DUo&3GZ-Aw)Q_p9tjWtMwpS@58^HUGQ=4<8L70e(;H+S0p)Opdyh zZy9d{S+x(B4MTIpb|%02wo^pCFG?WODWW^xbuM(af2MM4j*9s8=XinE zZ(;_4LT3>+TL%Yf+l8jWR*1AsD}AEWX5PKNWwpg5C<)YHwuGuwT|K8X8T%fpRmn;J zqgAi!jSvwk9#1_ZBj+~bK-h&XA&^mWo_-CWxUJ($j73@H03CcLv_a>-z!Ak1yfhR+Svyy`qBiyx(f<;gc8npumM4%T`jm*y7YIXET5D5Zkvu97l5d z+-hK_S;rHO$8L!)w%@vY%7(Zrh{?fl4yB!2M=dnX)<@lFkmOa{b@P6tkwu}rp#-9} zkA^q4Yca**+)e;618*v7DDm1X)6zbQrh126Cxbj!#d7v#wscVUD#@VS9!RIz36Yvj zN3OWZ4rT21)*O~n;lWAszQ|y!{>09hfiM96I6RVP+kPLx^|UjWF;Qx2RR3#%*!fv| zf%urI=FECW)jlJr-U1?*m|jx^?RDVyl%))NlRbJrFKF8yVQm>Vs}tSsExg z9jgCw3j>1%MZnI|q4Z7sDe{tLA8MS+e10kMEqkN4IIe56Ql zUY3MzmNj%{Vx5>V#(l7V?g{(5cQ5i)yX6=pb|Qlbu1R1^4%A~l8R&cy_3%hauasMD z05*B$<`KB$i%L>VX8U9{+S( zjbm?UYzSR0Fy?i39Hv{dDpt6AE7*Rt#k_`-3b8bxXmW9EUbPpbO@lgnfMeC=w8|pnRoTXaUu0r;9R& ze`~YLZP0m84rG8%B}83}m_U#x4W8qm>)a(`<>zMyg&{Y0cfD(dgm_zn#5Q+O&6=|1 z^>#AC^!cV;g?6ZAf&N5YabM(eovbf40B6JylW#AS+>wvbi6{v#A+xv}SQ-#?G>_kP z=e;F@UDz6bRM$O75?<@l_awUf_rO!1wN1pz`bOsKyUkLbdKDdu-#}e{ms~Z7z(Ta0 z74qiNTXp$tn|n(he7Z!On9_h!05WHKBh8omK@V$PmJQwL`1cTUh;)6{Z=x15L0(mZ zFeICI*H>R4+d8ca4=z}@f}S@f^o!u0!d8H=g@)0j0#5(sb2K!x%-Z6D=DoeWMyws{ zFzFWAu~D4Kr7$qS2wONtjT1|dSPRwmVnCIl%o_ihw$s>pox5}!%=@v_)b>7^s;{rN zyX_~m^ne78JM8Zh_cS;^-&|x3Ki`;IYg;JMAFG#-RE+?dT?9FsR5*|c7iFN>0HB!l zk0{142w&KKs&VU? z*+(PmdBft_3KHSs`!h@OD~~g(ewq%Ll+QnnofV)=5vuBPC4+KMi=MY{Y@)0JydE(K8$16BQ9w0c zOszy^3Tk&C`$k*;W6?kTvgp_~-VG!a@cN|KS3tal-!gs+0P`;xWg< z{Cs3IMhH-jOm+vx2eNIpWP&H}HqqF{fs`G)rMC|5{P9$7(ep5~rRRinc%)ZbJ`11x z);1=Ip0~VjX}!4jqHSo{5JkcrNe6)9CtrNsG&90ge`6+zTTtB8>AhK>1ig?y4rS6% z#+x$ObC?6dxq`fToG!^aEvS_CN#(=#&Koe*@z#%{cnsVe(;^y~hxr-HA_k@x`jXK&zp%eW{|h{uVGyJN z%neyxLb8%NVE>hIqYK|g1LlUHpngU@25Q=k1Zuhd3XO{aLadmvg%g#45TwpyhV$PF zv#lN!*-D5|LLDB~b*ux*L=?d^b@Fn8GO2w3)Ddi4{Ckgt#t)w~AAxaCtc zCjjuWx0!*)EEY(Xn8e?RFY3uRXJ_Y?uvE{-ip5ruQyVbHcOIZ`C(CtX9G7NnTysCZ z==mglI7guIVUX77{r1)}t=)gq>o>Uib>4vPllCY-+wKFK=RZ1DO9dfAx(~CjDw2Lg z`ZD07+L9UzJ;%M*o%NMJdGfmX75#$}C$F3Hjgi}6MJ8(vqbJc@o987r_d>$f)ohh3 z4A&*5R_3ZoGNsJYVy-_cV-62(RY4Ut0LG&6F@T;B+go4R3lGrN;>d^M{-!1G<|BM_xbIoengkY~PS7eT+Um_M@qQ>O#v5L=CO9lQ zjApd4X0p|4X>qwbqjJ4*@0%?8>4-3S>z!nI)AYGYNb7)h2}rk-fy=qi!y5!(&xb#f zN@WGEWg2Hs!(VL@Gql$69qe!x4}e2RlEW*+k=SDJzTaOx66Rd*)!0@SId`Esy6a9<#Bjbw^>3Jmf}a$(sX_W1-!<&B_zC!CA@=g9e|@t8 z)PXy(a~J+~;D}HM4i&>aRw>2%eAJh^QM-ye)87e9obmJ^7NR5OCN2HS z!j*V6DR0OgOZw0;%>t!#odz2QE=`8^aWAVI8(9rD$6i0mvGTd1v zDoIH@o}`xwAciCJ%hinL)yByzs2Q})HBXzC+r*i}0n~>_jeHFINk%8mZ?9%6Yt-~YtJIr(@(d$}%<@P2`h@x5+*Dml@439oqM zaXC(my!kgyb{v3yJgY(te21ENX}5k--gsjU+_1uh{MM%OuDtOo=26dLO0ILcqiX8& z2v^sqcMf1~J#3EgnVA`G;uqT_zSSiu@3UjV_PFZnUwcfk;WH#sbI@_zeUbV#9S#4# z9a-mobg1Ki@Wb3svkPu(@PO$U~$=$t0^tTcPkPIC^N&8Da02OFDC-csg7GvY~uA4nQ zC(q7S5J6N_O12||L`qSv?LMcrn^7dbn#m)7THfOWBtuUxNZ|7PVLq)Utnk5vKDeA7 zCoW2plyn}?rO`GDbV9#-f7c(D#~^=jx9|X8wKJ(WR2bnI9P6$flPQWO2?hqIjUrna zCJg6q&(F^naddHoJD2kvk9cT&LJ*&?J}%kdp8|>+ES?HnXU;$J{eSPvzLuW3%y$|Aa((5X47qGLV$Ci<&Q9wGU znP$(KIqeR+p4y&5*ilzHVo@FuM#gOb)C@@^sUh(zE62#wt>$YX9(K;vZW+OeKw%+D ztHejQAW^wMMRgl$EN+T_?mOC>C%>zru3>~9tYJeqIY=lHgH*+$Hk?hnORqqHl+ALi z08T%~PnGyuDUz@1Ikcp53BY3*HFRb^R(sb8*l}j!*gCibq=L&PU_x^u34P{fHAY58 z$P#$Je*J7iP1R59oG^fgW8}K?BftN6w=@%K={5hi=J#Ku0hpjvXGtn5CZb|Q4!q|k z_eTnz*WJkq2etJcx|bdBuWm;Sg<%>{TpIhN54u#rM1tq%AV=p!J?0qMw8EWTgp#5A zLD$e`y^s2PlMhyCg9r2qpb{HcG!is`sSNI8=i~DM=brPF2$|KE?wQ17kJDZ3S2eOP z;EUA_7*#+39Rr$AJwmco33Frz0LS`bw$iT1Y_IV(qlBhEpGvD46E=ZW+@Xn`cnz24 z;fL{FPiGtx$#|Bu7k%T(6U8sQTAc8nzlcH)GQA2{_h7Nnqkc9t#>hT>o<=8VlAxh6 zyf8s)b{GeQH^jIibH~)|!DyEuwVbYnG)wzjzk0j2|J$zGjU|Jou;>dvWd2QIC7^lk z#KLaT-#quPka-qh5%7`vCw8FhPEHGryM+JtWNNQ8pS@@Q1hOfx zY9)y9UesiyB!^$}(t{O#dSL9@sEXr8K<;<9O@uIg-Gxz6UEND>s7nvL&J-q;`YVEC z*z|XcYe{l3I^Y`616U(+-iCFf`f4WuR;Qdra-cy|goZ}1e|};>Om)fsHyF9FHA%- z*XLwb<;JsV3qfeye*VAxm;WMer>r54!VT6;O$0aK(OhoQqj~!QgLnBwU@(cb&psFY z$E!CrC29lKq*WaWy z07ql}f;%r^_KC3Ju59X35WpWJorM^F-tFWf`(DAgVs^i$U}$_eSeofT^nT=x`uU(C zv=S;j`=_x z>eQ6}uxkEixxW}t>3X=s!=-pVA5j(EBEB$vzt2p#U@NmDAj`xgrJnL3s9%KRscQ`W zYTE-qRIB%GK@lKtalrP#a~2-UnZp9%kpyX`)}b=Dw-;Ym>@#skden>;EXX|-84~a8 zNE*c;#$$;#FQK8&2qRhg>W`&4m|5F(FK>Ut-ZRkt3=21`L-{0tg5yovgP!s^!aFZ( z9avN^@5cZVwqSfaPl6Ds1SJ*kH+vF|eKR@uaUuNc5_{4kB=+zQ*6N6;g;*XI@ioAv zm+b|ZbDV8TNo5~M)SBJYi+VJhY{W8W0K^g0o}QkeKkEv3tE;Os8yDWCuP50dUG^S2 zcAk+GgFF4Z(Ew{B0t?;n*|z_g@B1&Jh!KDv;a9PbV+ZNwGQb<@!3Yp!%k2obxS(Z8 zOJwCiqHrcx+iYNPk{HRSQ?Kh_>K8e~0R~nai%A{jbR>bxK@#gP8}abnyv!Y@eW#`I zW%KsyXM=-`MF4jcYry0U7a=f6Ia03h#m#ExOUwRz=1Y$EQL@M~v7CD!qgoL8LgN#C zBgdiw2<@7RvmXJpXrvyZ#1ex_`KZztdlVkl-1tC=#lE{bwO-)JLq4RJIQ_`c%T;CZ zOyG3HBf>^R**kfmj^11i(fVc&Zf45^vPB@LGf6qOnD=>+w+a2?8|O3W50jHBj?T_v zF}GY+EU_J3qg7u^_1IP7u-76uO+HVe6G#?95gP9Qwaa@vMGYpUaWSHrpI>hmU%-A@ zON!w{_KxwgVW}q{T40;OmC-?c~=9Qc9(LdVv#`1p>f0 z$)Rvt9u4iayKWXAexY<;jU2^?;bg1R92Ys{FchKNJTZ8Y%du1vzqaj|d${*rBO|jP zN=mF*CgSB(M?ShAsWNIO$%|uvHSx9R>zaXjC1!UADF+?Q2R-jYYLZTKjQiH6LzMH# zTPFvXs9UQd))9-4aoI4|fjq`M(d{eqT!d4i)AN?_KG8W4pd&fhq znOibXXKTcJ5HSlqXb6Vy4r<5L6jfeL=pFd{GR*A%wPE&$^secD-z`-i$2wl2gk?h_ zJ%=LK-%K_&dDq<4LeTm7^KTPDm(SAN$LfzJGj%fZoATLL^LKKWk^jP$h-jBL_fh%q zeQfa);owx+S#^TU<1mDMk{Bh9SDC^PbwUtNtWmJSOSE8!$H_ik8mXt0v=hu8f-D>Aaw zFxD*(HqQs9AsS&wBA7N_ZQOA*zSY?ghK-O`kA&KJ^qnk1XZw|EKRRi=5t*TV#*V6~ zi1h5vEQw?g(m&y3nh8Qj7N=cv0<=P8V{?$q;^HOr)SI2lpHAsbEJVail3s;)o~PpT z)AR~x`C9tFE?+=uw-aXVnE3{bT9|TKRt<8M zdcJ?T=2QWOciv&JzI>LxojbHIaFl@T=vYCZjfVT@M*&#CEZ^VW?-kLeCjv4{QsThd zpLstl)~LuRXacLE!#1>g2U;zJ-H-qH}`O|B#35cEy(nmhyver!RTRNQFr&y?(U}^ zrJ|I=zm)QhnZEI_x(&k8T}*&~2ZpsrA6i|_ZiNbz)GM4ug5Bz?TgN9jPaoesobNw+ z7yKq-?UFBf5kZN+02{|FgU~hukvta>E*KHl9ZW7Aok5;PNEAsmn&i5p*Y50|OB$Gw zhKNW`+W7Ko%6x;(yi@s%cv&xHLa+CQThCy_*y6>>{yQnQW;f~(kMBEm^Sg)eQv-&_ zW}K~Op0lMa^rY~JB~OIdfvf5f5KA{MmmYHd2UujR#`)+t;z0r>|dJ z1be(;VUfb;lheeX*IbCd;5*H7!93*qYP1(GyU~^(@-;gC%~!9MC>UQDI7Cuk-vHn1 zk|O>Oe#9@OjFm|l8D%grSI;&VO7bZm6)Lgj+|_=TjMV{yA{pyl3@ediElnnzYSoPp zOOkrm$L#$eR}}_kD*TA(Ahba1oJd^Y%T_;+UOO)&Yx zO*uFyC^CyrRKllpGtAy2q43sHsI0-^IP0CyPinsEa%ZQ)lKug6VcP{Ms?=EEc8gph z$#S^&(1US&8&>hp!K6XYPRV6V?eE$BBl3d`)(^r7-*0_V3H)f!g+>-s=s5w0Mfgfr znZJv%_3ZJ*L^M^h&7nny6SE&=B+{l(0cD5FlZjtvQKT3zh*2ul(_dNq%JxcVV= zT_@Ma<$#)rs;G{Uy^y=r>1~3ihKwyws`IM>WJKUA!j25EjOqgmT>r9qUqHQx z4}1uIS#s&`P1Q)hL`SbTiSL`QUGIg7RKM^zyU1xb+`b=}vVVPzhGHIlrlsbjWv%i^ z^T|{aiy0L{SUu;Plk4NchnzaiRtJK!=l+`m8m_ok-|kP#BSQ7AqdXbuYXL3lZ7g<3FxjUXD4RvI-17FLsFO5O6%ClgvqlYgK5mC1q1 z%rYzWPeK<9JWBs)1Dsk*fp4gmA7lkVFncKDCX86wE&NA7?D#B6({bhPZFA=J7&<<# zb?Cgd=uTDsX1Dff&h4cle^}jp>%7)+|NfdtAP+3wpToFRBxPYyYHO!B{LW9x%d5S* zwsxAL5ywa$b^qhyx*VcL(LBE@`C#9Bzspw+(ExVQ`Q03F0S8|9!|X9INH!K7;cA~o z$nl*|x|S|a>*jI7{i;x9l z4G2;pmvodh`;wpDz<(p-w_SK~C&aM#5?E;830QrDF4XUYt^R%TSB4h&DCO>b@h$_f zSiBS5`(p=5ZU2~y68GasGZ)F;LKFhU1hkUEw3`&G<{lsI2Nj>^<7XBz?l@Tk-!*S% zLl)yne2^*o7iA;N75yNdG{67Qr6fPX)P%HstH3>|@V1gi(Fduct4l&(zweoSJDqA= zKuNLgtJ7{)b!V*xO8@iydy8B*79*$;JhLJIp}mp$I6&}VrDCH+jEPCN1EnKAVC z_L@D`as+n>;lr}JH`ZbB8+~E?7Wj1{b4j)w72%gfMkFQW3Mf3TArtwMjP5bXi!n0d z^L)yjc8;;JHhr8{VLbkbsitYQT4y6BGoV}r7Nxoj@SjBzRnJP2l&8x*N7``g2Uw>h zK%k;`(;MCw3&<&uylwBh$NhYv7-Pz&cOPV|8u5|_;}|q$VjJotbXp2VhHg$hq!>XQ z;Hl<=Ce+c~DSjTW%R{mkZ;HU%-tX{=)S9#J&y`ItZgq zx$~zYt?E%8Fjer2^s5Pac1T%R(qs>WJ}z4$W$|h2*{P%2XWvG^jnCEle9e$(@;+m;Lw2Uzxv)e?G$b?W_<o2{zZ;?URhg#Z$iE7h`hl?NVIw{5jQY`zLw zK+S{-Pxs>~$boqO2&=tjtBu?O|oSp(iX9`CaG z*-M+cnk8TA(a-{3mvjAmP;IdGV(wDr@y!8sgG;2bcgG>Ky=X}Nsn+)k7qMD`$;z*F z21fmxE{a}iXvAEk?V_hH#TiR;OH1PJ=Rb3EJUc3m z1|hvDia7}>hI;3G1Aj;eUUBsC26!^F*v0!N@0#@(C9ubSSCZNKpCIdb%%S%!0zA@F zC)(c~d(90_PweeiJM6aS9+(e06-xDAs=sD3syO)+ve`3PY?H0tU7u#XaD99m{i4ow zG}^ArVGH$wZXj`iF;i4}_~ho|AS#g+a!L5|zBB(JnqX(1a%4m`G;(vUwtDCXeCyc{ zno?A_b8H|I81hb6IfsS>2lwgD{~wEgWCqbmYxYCEGh)wyK*Ab)pT_p*J@@sMgu1z8 zKS+U9gp>`#%RZblZrl5XfF~O#F-vAC%ggBb8qI$5JXw)TH+EN>)h;e-3u4DQTz3J? z1=q|xW!L$Fc*mNGW@x`vxe6T-S%K^v9XJ^yN@p-}P*_+@2j8PDL3-6V#wnH8lZRW@ z?S`DhL=?$QSA7;kZf`B1wt0`}1;}Rld#RM?Cc;9_?{`{)6X2Fw z{PerqQZaSzeL=hkY(@uYO2V_D?EO8}+rO zzNvOc1^;q{tiWdL)Van)GiWX%x^};y2wzItH-!|eXtFh1z<0KvUeg8cUjcD=Y$Zgs zm%ygz;?<Bv;Y^AQE(0|pMh|8W+Aq$=e>Zi0S?W3@nRNkNO-)MJ*6Oh?79W|A7YuYrEg~YuYhQ!c1#Jtj}Q;{9ge66_X z<5TEy^Fs>AokB4oq`I;|+ShL)UO!~r@%4F_$j+{==|lC~fDjMVyb(J$PZ4P|T_d7WBH>eU z24p;L24nU4vtYP`l@+~w-sdQI1Chm=OZH&ZOcCjXt5HuLK3zGqtG(*XeWMhtvD^V5 z;@#=FI@ww;r6xWG8FDQ&YNEM)cTl}s*&RM2z6_pqGQ{kG@a59)wrhES; zUoHLXt@Ezxi`U~vr(U(BUaCLc6Up9diI5VE|!Rk=@m)gpl5$kHFIAJk`VjeY0^;u zu`lo9V616!?8T?`P@a*BsM1d37kKf;HEVHm`e?1pa2pOtHpX3?=SNl<4Q8U;HDI3+ z&MSr!-zjTWnvN)lppq2kk{pZ(4<>UL<~I6-lCXUuuscC^flYh>{%881?w z1E)2ZyhJSVFn=B9TMmst3Y+tad4R=SCQ06>w?J^hUA~l8`S;0RnMSBgAzxnDKXJ>; zKk);6${*XJEbE+PSB=f2+5tX{#m_(KX7Y}Sz&jWv`K(I;uTtraQ0rkz(R{uZEvrz5 zT<|bcrWi0pKv75%_eeyI=lBN>!0q8q{wKeW;};X_69bgo*+-Q0Vm8Lg#6Vx4l9pC} z%0_A;(f(mVm0;{%=sap;ie5FVf1jwzEK_-E!}yuKlI`83<^z>%L{iaQnRUCldZT-- zdi@(MZLqdr!W*+hZJ!eY$VMej$eE-6<1oX~a;BYaq@nOwn`Y@N;TDf>MV)wB_C+~W zMp5lp(K?>x#%V|Gu#yrH^@pXn)c$ci>fY$yvFycxXQBMpZU*b;PtKfGb-cnFuKOVF z?(<_58#f2dQGdLH=WTDm@sK+085>Vv1j0mt@>}9w^j!la;eY(!KY9x-MEvv2_qqSo zH}?Bvf4D!D0rVba*h08B&QD90M#`J!Z9Q<>-=2_BvF=>P^fQ-v9yTjpw@z2sMv@=@ z{5C1FPjPb7J0Wv=qFWFqAoB4gC#|{+u>?{f$?bK#P;%J0gKW`n#fE`9$9*lLkM@kY zL@$)o!`8M5skD&bS!U*hQ;0Vx)Y5EPs1Gmj_2ylzie&6SbLy8k@2eR$EmNpP@h1fC z=4^5-j;3``&JopQx2SNrDu_wSaxKNQKOBaA!JE;n4XU-rN#N#w+0kTqMPIftAibEh z4G`tTN@NF5^#Yl6wh6d|cj`#_az6Tny~Dz_{Ysmg+v(90qlHBFY}*y`<82d?-u}0_ zb?h@W_g3UxR0qoZ^ z($|1iKQV3|Gymf1JZl3(2F!99i>TVQUVeP+!YYmckeU4v%mA_jyuNjS0v?qv(_xneC9J(khazI zS~3M>QYq2FVqvY^V&Uy{m+tP2GZ_8ez5vEktL=&lPoF-OPZpV`oH3jfwbw~wzWpQI zf}(gCe{13WAjkced@uKD3bAH9#7jYYF7srle!2PRV?99G6wKMM`*p-i8rcri$XsD$ z%&;pbfyUhdKLqJDsqbqQLImqh*e>dptqhv>-t^@*Zz9>O3@dp5C4V`x@uULsCMZp! zuNJ_2`zBdJd2#2eM_-CN;*HEx;%xS1YEY(bgikkiL9Q||$cVb#pEL;}{7n!1X{r9z z8aA}|KFj?_|ITwN_|P4kH*?B1}@eCJx8S6g=c?yu5K^6SyYZ=%klN@)DBtN7(2SVaR4 zQ|=7q7YE>&4&9|vEbkSI$kNiO^7OuO9`<~kwOfJ{R5Z!YQ6+XCQJC$`)@Y4CoVoom zrGID_S!C-5dTib>f8#I;1(-O_t;eo_JGS#B!kbIoQ&e31Jwa2Zc-LG=t@l(D6%u_W zy&883c@jNe5R&0Co#Qgqa)&~^#ItJKis8tEkA3-HJes)h+nIAEe+W)Gu$sUN&snZ= zcckNvEq9K>tF<0gvh{OtFE;IV7M$>L@IT$(f{eOf(r=y9Hcvh5X-Kjt%oXB66ZbVM z#>v0|@pST#5yhMek`02axCAw=&>)-!+9ZtG>I z?WYoAYdM6H;aK$B{fa#`Dx)n!Zlm){i-niE$uA*0kLplb0U3nFBKk$qy-uQ(Qa0VJ z!ril3;3s1A_Uj49gk)d$Rv)kJMNarlbS-es)rE(b*4Nwg+ATgb2`o(qpAL(tHYy)j zn-|Ozl=m`SGI`5Dh zvElJ5%{gP!cRs;9Lr_-p64d3nozK3K3Pz57GbR+D9kU!}`)#79+PUkU%Dp;sIJHZx zwPtLZ=tK1dv^Yc_Jz8sk(kp5Asix;`d zrA#zz1qvb_`QF|pRMNL`PZP0a`t|FpoJiGdI`xRNDv7_h_W8R6xkkC2V+TbEfiaHf z6A3xkN80_oR+J@Rcui6;mmP_n_n8)%Ixt^YrT~F)fy*xSFf7JdC z2P3%>J;f?H6E${?pBXgTAe_TL27mluUp0&KfZwe79mW1<>pX!Qb_tlLIbs4T7wI%t zOK({y-#wxvPm_7taD3}ac#&_PWUZ}}(-YnLj5j5eU#}DQWQB&sCQP?B1w=PhAm!bd zxddgxE;FEHP3fW;$6v+BG1hbuA11mMrNGy(%_J_nE7FF1HHUqMzBy-NP%uW$t`e^N z0+wB0^eIQYHm>ao3+3L@uT`n2322ErlxpfbFSeDDbANlH%C2N(UEat5W;a~+YJHyX zR!Eywx35xo=+dCIfHL}{l}SXAWqajvg=Qd%te!6JQ9;KBg`ubYc!`bc+_+;rD6>ZN z(#JO!206LaQ?5it#_7D+<}0>b^F}kFC&RDJ^_JFltJzU8R%`e>mziw5a_-1v30%%8>JiCj5|%sF!bcb3 zvP_>#)|7g;;3L%?)HtdIS?}km9Cbdbs7k2p2qhFSD_XGAbi1<0UkVkLYAu%B=xyuqOQLa&O+e z@!|A{gZTS$N?_Kyr7GKb`Lv2GShEdFtBfmGL#V}n6@!kG)jhs1GKVg8RF(hh1gxmA z&x!=k!SfU$cM_h9MNhlclW;zvW> zK5U>i=7wa)i|-h-*Ne%VO5H79HQMqU=7tVOeAcX->$2@F8HUAz-#h$pIgp)PnKbH0 z%dIU-gtLg*!-4Q+F~(M&=Vgkfa|Q-?+|!66XWvYJhMT*51}SkW z&{h5krl2(bcRLOTZ#6mqoP{VTe}`0$@tqaV$K;q8CLW%i#H$?@Ys=Bhou8h3*rs{n zzc%HZ;PJ!CXcbEne-ZuSPsfs! zrteDSSn*!VXx2wG*SeueMM~DU+j~s5{>T6KxM4n~*2pTU@b2*y2q8g$x+lMH75QhlEr6ruIK>Ngl~U@p5#WDsxo`pHR})Tnf4Y27Hu*6Osi-l0sY6G_!8?O9sm;bDe6<07V?oB&+9NuS(jEew*6+GyRUh5tsITmDbpxZ1YT|T8>~1 z^QdNwCtcQAsH$!r2hFcAZ>>PwM^oSCJQaZ>xDA?`=VUT{>Be}7SX!W7HBXQ9v@b2j zA4i1iWCXp8g}EoHJ)b}%a9Dtadw(ExwCB2cLw0m6;nlB&1qzDdy^s!g^8Ea5Y*=;v z4=jWQ4`C-zz{Ke-#M?Bp?&zjYt~xHhMJD3)GiWLL=TCyIZF3=&{2B65U$+^=bNO4C z3hEI*Rj%YvmpH!Tm6@oQE<9&O`iBw)OoD+&3vgyAbq6kORg=of)IBkN_$>3@PFjuk zjPAv<&R1&t4RbJ&$w$%l+nfJwPxpRCXyk2iat-h6l}yjiZ&y z9EUcoQt*^Asw}6!gt<>mkTB-eO~%V5Z`ob=Y&<7Sa>Q=oJqf6C(9vkrDhPa$qg4Lv zh4ww(EgjfbT8o7#@WV$D?N3({M%XqDim~mm8LXV-n!>-my8~-$A;(rF>I881&-lr8 z5I9_`2{;|)Ys^BAN|D1(neiOi@qN%w7@65)nzSpO?l87EJnZ4yxVCFLms{?#a_Q4$~9i=dHuuUEu5q5LqCg0 zTZ1;V(W=Vp=Qg>a>i_{z(n6d@p*x;d@b(O~B$J)S&b_L~G#3vl#s9#-GVu^9`k--H z^H~&@Pn46}prcv1a!-|ZYsph&9p$_={gUiF6?w`%U!r55T~&X5OEI3G8)CQwcv>}; z7hlMgDihae?k?BNvx{6Ld*w;q^)V8=Kw=lLx{DJW^(#$P+8a48E+VT@3nP_`kU(Bb zc+@>L$G9B5-1cy(r5Ag<%Tg|b4<5?sn;=Y0RoIM;ct+L2cRl1tXvNK{)KN%*^j7=M z42#yNqWPCJt0=_qRZu~@`Cg>-{8?&&z;=T3#c{lG!82Q;2 zctCXb%Nvh~REoh_UQ~`1w@Ii+xKn&LoMZRsI^c}M_;0}r=$2{i5KB0f8;Q?~JjXXz zQ}%ooYU91|ILMsPUTJ=gRhVT@W#`E-%&Y3M_KihTWk6cC`+PlzJ4YzIwWEcwFCRq< zWgC?TCS8{=__e$z88_LyDDmtj)VGm$mS_s)l|GdyPz`zi;r-RL<-wvgnqXJ&CyB#a z4K}lCA0C&tQ&=k9^X8KqrYlMERX$YoY6YBf5Hl^P zy^!;sRo0DiO{QraI+;c`g;>^TO9ip*=Tdak+2{yPQbGA6Z)%|l3~FTP&9ktntW~A3 z(Jl(TKt#wxl9a?tS&MH*N}YuJ)Ss{kob4<~;DoPTPzobf%;MZa{gqnf zqLPy8v_Ja4WCjpbbnVWvYa_eGWW^0cU8XKkh}cGq2d+FKj9j(-uWWNSMSia<@;h zPFn8m(n%;M$Td*nu%rkzHa4#IhI(nIp;0#1!zggFQa_+A=fu^E`H0S5Kj*Z%HJPil zxV+3H;l1AXyq|z~aj1N?l+u#GLZxADR_z7bfJIZuU{k3{AU_F}P<|xBfJ;S39S2u` zf4A!=RTIP^4sau>KbqLy z?>l(Zng6VmB|6s<6wuf8a;)}ri|?j~;up!PDWSay3~mh%0qK$vBG8hxVx!s zphRr``p26)sYS>iS_%IA-X}+9PDf&Kg8Nl~6UgYNISordB`&WV&odM?Z+X$!AT5tO zsa}=n!nG^|7z`Gv636w7sJJ?4@=|Dz1r#+mWAMQDcm1yf%F~Z> z#xghQC&O(QN7rV%Q{o~%pJg@4tKiacr{+&5y+cxdmQ0^Oc&mnxm^j+YHMT_nVVuj_ zzZZwxtNs2-rLx=Jofbv)QiqN*1)=N#KGH@Z06MURR$5x<(Fh z=uvr?pHaHgIjuX2C~eC$c~Yg#IS1%XKl+G2aKw@nFz7#G%j}0L>xDEsd5Gawvtp0$ zUw_Ann~j&`F?Qxa2IF!Pj`rK$*>cq`G?QVyzmt+?g4}zV*>ooQB>qOGEeqdq5n6|> zGlf|zOui4j(N33>5h)Y$PV3mdm~r9I7Ni@vaAD70MD#hobKz$m;?m>I55ZahKIm&R zlbx7OOM%KK3Sg=oveDX@)oB$`IlGI-%j5n}c4gy+%utC4i&3;@GtL}2B4*8oDA**-B zz9<3c?l%QdMO|Hsd!Fr30t@Aa@inWh-qNM(TSF*z%g#h{v@mjISR*`z14b$MW$pRJ zZWYgmZp&@?5X@T=uI$3`soAD>&|O+*8^}dmm!C<>?amjTl&a|>9nEwkake)tyDx(6 zFJVIFFHcH+ki^jT;TV_h)NP9@rkI>W<=pe{@K_A-#M?Osg88JE!+k!7(+Tz?7rsML z$U)O3M}$^4+_+R#@V@b+;YTstSOQ_p&dQ^o-}`SbvFv6|A6NuU;EvN+&F;`?*4srf z=N>3WIBSH*(D{$#D)nDm=vGT$_!O$ZlIxnOg4l4GZlfFgcGn0`n-F}GmXNP1U=keq zHpl&QQzqVak)x{S;ezPXe(on%mK;6X_`1V-rgx+pY_Ued9$VlkC7=b{p{AzJajX*9 zhDha(^W3s;-o-Tl$pVI+>`c^i{6Qk-{JrAL<^Dd2>+SjFyJ}6age?49gZ2I?Jgex4 zhQ(WkhiwyrUkq3kj#8t$)Pq~(N2YNi(D)oy$eUM$MP9hCM^MeVlPqBr{#i7f8scLH z_=BB6^D37Lq5zrF-Wpx6vRpUXM__+zGnOY&m0{58A**~iMdH260$g(5@tT!!Xyjnl z=ejZ-Mu0o2p?kmOUAnn1xFGT-MrG=8wHxTWgEd~hIny2V5fB{zt`uJ!u7Q74^kj zYb3ong**AMy|;0|r{xm024=fVIFMT3kpsUKI_vyWXIsJo`nstEzAvf?4e-fph;fHY zte8?ldWHR8q9n!%@lJ^ ztgkikJrIK~Tpui74$)D@O9tewDn0SKt@*Ey}=vN@hb_Qo*>p%kvC{ zX5r;PRbt`A8XJFqmbt;SlwHW%zTiE@wF|wevdJ;khGjf$<{9R*{uV=lQ2HXs z7P3wf5mGlxXpT0Sz8Y5_68CZ-7-;8IPsDU5qn`P!jPG5(gNV_(17RGBKZP)$N^8zK zhv~ProJ{uPg?;{hlRd*{D|fowT*0!Tfp%#c$+IDm{DXa^$y^JN;CXx37YrX@P^DAT z6l?1XKO-@Gm?sV>>t&SPUPif5jay(1O24f|5RS>&^ta!V#vk6RMX7mNYPIo*$MM0W zJ@9r!{C9UZ#Vq?|oz%dS1PCpXSua;VZn~g0 zkxd$YV;)J`j(kc;BU+{;@TO`gl2S%3AbY%^ z#xxio7V#y22$@NdHJq^SIkU4;1wT@U=TfjL2DO z`uPK^0NO}k6|q(qjOnBtqDDOIbhN@R>W9BExN{q!a;g;pAA1x0{Q&*E#WNXjL3r2> zdAqF(+AqBb#DrwugvQKKIo2rIe#tY(<0fg1Gu;&$;8|Cul``?S502ru-8M_G45Jc? zB%I9rQHp;C$$*8n<|@qR3Qg#1*Jj@-cq?ayvEE@MZ*i4)Ggf@D)OH|}qHpHM8^R{C zvvmSNGsuBAH&rM^eDad@(~@(0x*-J#+}shA0|jxIZGV1RA@Q$zNl4$Scilrxb+sum zg*G4bF%NzU*cr48xwGroPbOkpu{6+4Nk>S#mjZAGbumX?+k*L4CN^>52i7T}`c@*i zgXaMnTuXQ6LoCXF)yMc}IsCBw`kJQ^b>Elrt8g*)J^ipM3QAUWVD84f8a((*((&b%IND#0bD=7U$}CmSA{`}gA201RTobsE|14yijy6gP50sP zT6&VMaK}`?tA#c0`>n@_q$=)cH~BjTeG^V3bH7GN6q z(Z{NC6AQZctcOjya};Rn>s+#(M_gTIU*)g23;?Koo}BzDsZ>-^n4Q?zxfa5}^{$je z0lRjUzFFi4b$-dc>d>`@ZItyUZEvNH0i?#~(_ODg7z&C$%+Tu@?Q!Wwn_)|82Jo-f zt}4gGXh>}`Ka8s`3ikLup!O_A;-5lG-Kq53kpt~~Ll391;68g+1DKiRmktBBVTh1? zX?;D}pLe$>`{4nB^W*}d*yV%f>}WTDIVIc<>@n>#7@aJ`&qy%1MxC!qTE=07D* zuMym0HQQWct}VOBu5YZ*VOmfyOR_H(bxPBPns6L({>E6AivV6N=XGQYU`ECt2TN~< zsbYsZPPF%J%LY78yYr!m*JJ&oE{-drsHH`b0;ml{e11jbD%QsVwEg%DCF{3y-&S{s z$)>-5B3_y%OYxNnFPdZ1OsimF`l#~Y!NXj|t`sTm+HDUZ+6>BhE>|`2k&J5bk!dYL zA|e;^%JJ?LDVFFID((;x8n-x}KxuVC3%A{hAcahg6n=R%f8+CHWFvY?Te4q7X>v3l z(K&b<%yjtP<~B%K1mLmF{RLb5_Y7jo?&Q&>MXR@>hPPMH+MboOa z5?MZHI zZd}!%C$dDTsV}Flg^hTaA%Yc$<&_lt^a~)dep`3F@AB>1q}&z^yo(0qjTrIb8?{uW zkLcUq$8y$xv5#EFg~ov8&@wGRE$+otublDy^QcMkcWU&_e$5O*1 zqUvx;5p@uI)58nOZD#DDMT@vCU}!4 zUZFu5r#eOGo;_-Z_UEE!SxfG7ozoH5jZ$2Wf^1+twl#6qc&|N%_2`c{csu0#BXAY_ z$hlBFO$smTGcuN58CKm1M>eZ!UvA znjz09T8;I-Pm6TcX$nxs&#xm@p&K_Lg5L&vRu{`z8)YC9E3z>4tS(2ra;%W--9sh3 zymL;yUX!_>kdNU&3H4(yvwzjH-87QZ(P6ce&6402*F?!$rfLFjlNzm^Tg!&gWK%w2 zxA?RmL&eftvnwH|lHA4~fVrGyek{xz_Y+H0&QglL$Lr7`2NCq#ZC`D~Uw&)Ybp}_T zNO*2XYf)dQm6U2Vo%snMAHt;una39gGYW?LVRSFpOfuuLm-*#Gsb6Pqf#^d#^~q~$ z^zC}k{B2BF&Mcdep?i1W@vtvda*Ge>Z^L2lm{-O0?J z9Rg?YT+71%Y*8G7ZJ&?@OwVB$CKB!7FK!YWSiiukZ?bx1QReCpRX}1DIW_WP2pq`s ztoIs5qriZdVbb(wtY=y59a)_qwUeh&P9XxcOTCaP$({JI<-}5Ui^ZxJ*Ib9qL%ez|@RTRy#o8W3fE=jLd&$+(k9^LU&?}Bdeqt2GJA(sBfL-Y-4A9Wk(1ElYr!XhFu z+G9jSBPPidE;VG7o5elizlV{dOug6X5Z9B&{A2a0y!G9;E#MY5{!xuR)aCua`_wuxOTHHKo7n1Z2NUEEa=ye;{bdP*dQTks%iUEBE z=Iz;^` z1@Ci^rFhBk$tjpdrYVG{U%@u+>&~f==TI>X*o_7Gw?#6Bpg`eUsNA453$ra|f3W>q znTWQyRb!nI!|h+1BJl|<_{>m=i$uic_i>t3`(@odW086A zpu#x}W3;Ef-qTFimN(cZehJiYVFdf`cNVcp5JE~91?bJUh6{!>%bsVlJ>H+ey+{n8 z?8^Nmh-YbJYOqf7hV$I&&AEZ&K$Xyt=y^`P%y66x(Wx_RJ=4oQ|3$B)clzl4=oehWHZJdx{ zM4h|Hku>e+5-`iaqL#$=2Xg!hlGJE4G}4-HjZ(p zEbT?q>4ulCjGV42;Z~P?zdDgrYFtR+xOl5v6`{cC!W~G!jRXb zZNnI~CVibBb>ovssQV{ezt`A`+Wh2V$pH z5X#tuze3iWZo1d(E9eMDj;?~yXvw|rIth}KA}RBlPXfrX27}z6cFMr($?S~3f}IuB zz(sU2jDXW*Lf%s!xF7}Gsj6TCish#zNdP##3czX8K$nq2r5bZ+${WWgRW8${a|4;) zZ%=19*>{T_?O%iG0YKfeY`bvu#}fdEVLMnJ#@G%P&aLDzI$R_eNW2mITyC=Nf-Tl-jJ~p;ivreA8X#Nmy zneTlNQowG*ZR^J*sd06T%t?io(R`g`{lecGZSRD{Ti5-wop+5hyWcs4dwg#S*A?x! zbrobp`O@JvLZ;@hJ?4|AH$Klk)ni`35A49ExNMW!$Ko5a(cV6#1^S&sXhJ&M1ZYqo zG#`+Ue!E*N5}l-YdFQdwc4z^OVG|L#&8$X|GV@eFSqS;|QdenPI0ml_m;fCL^8Te` zO@V*?5UP_TLDIslyR=mk^23i8vWf`6sL&iQXbE$mzP(Cil>d&?5N=)C^~vE@46fsvD5($Mq3`5 zENUO+vo(Eb$#S!#QseZx2KZf!fb@w;k z)wTmUR5IfR>poyqvg4JMPMPDLHvVL;LA}Xos#Ih0)A|JkWqSt6{@-q67t%$UK5cCl z%Tm!5^{>>ARYJZv-nwf0uz03(8SLj{WF#-y`R~(>T#EpRjX~7h{c4NMDou8myqBL) zz8hUDebA%KYPxF+DM8hiPYoDcQfv%5g%TNDs>65N0`+RB)`%F?;qA#FI|E0a4JtOk zee;fgUEbF?Ho>mQhBi|ovVDV1{Ab}byf!XFuN87$@dQg>JO#-92a>1cMx6vacATC* zf3DJp{`m1DzlOA8@%d8(7CM0C)qr-||7QQ{2-6|k`g8ehyjZFUSl6Qz!8~u3D_4kq zQ27|-8QY3yAjjW05H0npI<@_ikwzOLdyn`T?A%WV@FuIJg->8;oP6{= zxru3+Pb9SjI`;yrw(gRxSGoAg13H@f2IG!dfU-jci4QcMn#^8geJ zvDPlD{)Sknm``4=7gfMyN}Y+fAF|kG;j2~(9*f3qvhC-KDePKLjx-$qfO%RE36O_T zw*Pt;?H2ImQ`&Azt~{S>Z9koN3MHQ^z6-?C^&Y27NXwy0(lC8bbLagUcl#gi4lZ$C z8Vlp~8p%GkPdwj=57maaE%giNReJjNTURj3lf;1HAp46s7G&{?`s<#6ydq|^o|@0N zF%Q({QNG)Lilx6+nxRyU=^<_j1)2ERJ_WD;(s>MikuKG)NZO4#(7%Zd#wSg`J@T7{ zsvgVX#BwcUTL= z9;wA}xB=<^cmmqsA9LbZ=hE_cAL{Du_kRMWDKHGuky%n^Y7fvNtd0+z1{8VWag?Z? zmdFpFjq?t~;W@T1%l5-%(SE+C=LsX%`Qw;x#ofF!;B9OQ!R9zoS z!7Z`OwU%r>R8qMgMPULf$g> zE2{UILe)vAzoz7jxXQnh#ccnN+kAuj#bzGuzCfm1QyOOW7W5C+y0@o6$8HW@fIF9(${?R5!8VHOH?Ov% z^w%A$-)rW_`Q;|We^EbMTTM!IAv0wI%$FO>=N3~3ij!-mAV4>?#dF*@Ef_kd$IQf} z`wVc^ARB8q3b5WPjOYi`2;(1}e@^V4=@#mK^@8KJa*lpJ7xHa*Fus56uU`+kKLH~o z#;JZKR3@Ezp1?voV2veOGw(4tab>jYCFtu^0+9&XliO6IZzW4KnqaCD{GHC7%sJn% z6vpyB>HExk@jW+9kQJvsdc_Qy=c2HR2&v{f5Nw@3zt+@NR>*yra_BX&jZSb1OTWh1 z#bZrF;f({WAy%sJztkFnF}@c|-|3IncuN=iS$wqrf#@b^zOolWjdxUI(x>dhpC(zr z>^weZ5{{lf(qtTarSyI;ZohqFQ1v|c7z$76?6X7ng@s1_zTLY~r+`<@aS0_p~EP{hAnoa4U8;iJkc2Z0jqcIjAvdA22U zzeTe0U*kHAP6x~RU*r0(aUDA^c!X6E{QnoZ-f>izRH@nk9bsfkOZ&a%H~&(ZENWooY-?e_nbrgh7;AUfkqed;Feuje>eu>@0_{x_V_E|YBV6{T}*?(B_Q}uo?N3pcj`S%?CuAqj|9bQ z$6cI1C+mN)lnqxS;^VJ@vyRSFky(Y7(N40Jt-soNmXWk@Og_`o5luC@SF#v|;UOeQ z8;)tPfB!B1WF+9=B7lSzb~90bx0rwMLDFH^Bln!|lKkl-|H0c1UxHT#j)7}f@;3}W z{L4SgvIjRiRRP{k%`$W1KfCw;e)a$E`v2B){$CrsH1KxDD@2NNflk;1m?9z~s2%a} z55ft(6PFKe+jr$KH>I@dsF!aZM3o86K;s=L-Z+vM zDk09+{yO%5$qCy8coZ7Me|!lp1bSP_ajWLmX_dGI9f{*}QT`TGq$MvDf|UJQ8i&;| zFCNS%s;H{ENv6L6zsiaYWVaY1I;}m&92`!a*#2vzZqUa)MSKzPUg0%9w?QVg*Arqp zI`ii57xFugy-?l*=ciAeM6es|UY9r2=+KZaIkg}41Md@9FxVtFGE;DL$wLN@g|@#} zr-6)skT4R=8q9x&PFc!BNNY4sa2}Ve*n3-zme%yMJ!AiyW)1YYW_H`D)-9nIOxIzD zQP&vA)DH3vJtL0P7C z-;WxwMobzNQzebcF;rN<{F$PAkEAi4sCFBB%H7_T;IZCyCp)1{5Ms{*lG| zH%rF8_BuL2|M$Txhu=b2E8u!HnKw-IfFLtt23P)Fo(G<|&|)z%0!Fh2Pq?ME{x!e| z8v&VI@pkyo5w&*k3cSPcHsY}1Q|Lov-MHauM(iS^GBLz$I zzt}(MID6xGI$_r5Y6{K>{MBCR%TKMUk%*7;GnGr>P?H{*oiQ0WG{ml-ngILXhGg8i zkDnzjdZ`W-Q}h%4Wk+!EiZ~{~v@xi(EoBAe4lLpDHBM#HWsKv4V>0T$28*;sdNel< zPH74-2e&WwHaF|q{u;Xi{;hv@wN&k@+00iS$mb2RB*gOJw9Q+;bV^Mq~5^S#N$Z`y#LJS8ooRIsc???k&}Z^c7Z*|J_Eqxg(R#&!jA!q;K(AJ@Vf zVG)TDQ}upE(>$@6SpAlyXWoyI{q#&?9GY)|>GNx*`!s6dB?FyiSbiOhxqeOyGjlR=* zz{To9k!X29ffSPKy?j-01{fG1N(5BVsdaw_u0XE8AHNR1F)R`&7VzQ0+ba`km%|@_ zu@;6-mWXPAg_Rq}Kgone-A?B$^y&281jocan1@DtJ0bzu!OygTpq<(MnSF1mo3W5? zBnm`8FOiI-q#GO1zY-|LaMBX{!R{v_tS@ZlkE{Hk-COz5g`g9fdC%7j~|HLn4%+I zqD#snD3ep(Njovy2*iOgY|0*dhA_((32@)HMu$4FP>4f@zv9cFkq2V=f2yj1J@EHj zUOw7FN?;#s7ew4Vo=-XYHZ1BUQKPg)HJ41q_~q`KDZq!b0}7ID-Si? zz@Xk5W?L9yJ3RcLfZNvY;z(15(-`%D&uq0i886CZ+J%N0!KJ7#e{_cMBA4_1RtJxFr1tU9OWp2Zp7-HBgc_n@zI)d{i$NK0;i-q~6MIsLJMtAci3r;`6 zsR%MHeBV#{9~d?WIzKUf5beQ-Ej{N!9`p@aVynS8Xq^~)rtd3sgyAKH_3`5{7GHWq z9C^$q!Ie;!7i3ge$kN79@2PNlL?k}F`u@3Jx;N6{JxLisp6CTdP~_Kc_e}WOFaZgV z$f<6cSK@iOTvi{o(c00z3iLvl!iBX#AuMM&)RLpcKKrwCk8A(2+I-fRcS_3yprWr)*X_GH5;PJ70BQlJ~DuOvTZ_g>!L#(7t)w(Y}r7SnBm?!7*-1W~jz;-}Tu{-#$lxgR*h0~K@qA83)@u6KTz17A z)y-U`9Oi86tiF+f5xx9+<;%c>93!c=cWnr5xab8$E~dXc8<#!2J>VKu8$B9#JyOmH zZa&;h-}IdmogWe{A-W*4BC4Tt;U{u!krWHgS@VG1wvQlfhxqA%E`g}b{aRRkAA z1Vqn>K8H;bCb8{sPI6wb53=c7diB?IeY9OMq2SQ6beLP?6%T4LUSpYL(I$Xo2WG#{ zRvjD2-p@`MC>(g%AKbq)aFC$Z|9xY7gJD3Y|CUjKk%G}odt7_2luCzhCB!5ZdMgW$q!bAxN^1JFlCm=ls~O}y^scIqagupb-iOBL z^P}kFrH%3hXvChsHhSGBZtD=r zBcOzy0~i-{(y*|`=VWBI(fgHQS8pfl=7>P}g0PfgML*LtleUZ%MZ!K^Lt;LnDm?im zgeMRmn?#M^l*0wM*Z-^B{;WpUo_}?rIgb$JDD}g&)Z*pw{cph}e#ANRu*!KJ#1p z(7)!Sb2i_>vwc+^Kbz7iTOhZ)TIycl4!H8Zi@qcMP@SrK#5m92DA35?#&04irJM~m zlQb)dWYVSaipKd;$+`=10bX3}eMrnqywQ!G8=n)WU{a~dbuK$zv;4~6ezEe>e)H83 zT|xGlY%A65Nn``EyW;iCk#v)y73IC;&5xfssbx!Tnto^8nF+ z=7FE)M`jiM97(+3jW(@9tpsz*@-egOe%2HK zya7^7Ccx5Qr^=~~ZG~;5jHnM1dq_LQ2w?PSpfHwflk4mqjRtEfYCAWzW*M(q^;&K3 z400}8$4_b(>Q!Cz)%811G2Aok7N8a2sy7-88Tgp`F9z0yNfGsNdCySIIaxa%Z3J2s z>;yBlGdC+^N{AWOWESs2GIg<9dH@Sg+&RMtv!-7%Rxk z?Hanxym{I^#HZlaWpH2973ILIiT{#xS$`W6 zc8i0FMh1K|x|N3H=xmMz!D7ymb_mwp#$8@@d~KjHs0&URP1X=#ch?x(pMzx5)Uux&ryn*qoYTd^h*CfLf;XwCw!}JX& z6&y#rv^s%g^?$M>`$4y3t!+zq#hv}j@geeZ$ediw8CE#u@!5wxFUXJ)FFw?x2sV8u zYo@3OLl12~he3cPgn0&S!9qVmuta~hrC@1c;Q!SR2Llsi34`#TXOy7Vr@u((=V{Ep zukc?2VUVETUP3=^sc?ThjT)E=|3@1UIu8ttsG7K}EcB{o;$UiO>u3RXlEZFchjt*_ z$!Iykz~E9pePCr(sg9uY|FQ(Wb9$$!z-IymF&n)H8=Eq_f$W~0j#{dyZ{z902><<^a&^49CR@kDn= z&$*#P1)h5O-dehu+Gt5wf}lKuzC(zWlbuK4UjzQft-oIR+t7D^4Q1oyWchvQZ@2z? zsJf%6gE$xjeW{bsUjqBj;NNflXP^M!Y3;x9;@^z^S1**)LMQ@&KcyywVr(io3Kb-= zrGzpNdi@tXU|`^%Kt=oSE3}PZ<6yMdgAM~D0wXIS3Uq_rn}6o@YI73tNIaBE1P%ce zSD;46E|!8xDQYF_ZK3z9_5S@qBM6KG`hXj|V%sr4KktVqABnZjzd7Iko@*LC;r&;e zFXlIt{6dF!G?7jl-%rhu8|=?v{qHA%eIdd^yomJw&WS%6I9dzix4eA^mwx?! zm;?*0>+=637Um~bH2Ycz{U2C?&~_~NzXb-A9pp)<4x!9Q5ho`ONlD4kkm0w%pPnnHWf)ff6HCdRpJAaX>w$^U z{|UfU+CK*d!WtahXgv6B{=S+9eeyL+mVh8i;s+19LfHRFq;Cdv1qWN2&-eZRr!dIJ zP~AM&sWde;J&sqN1$;zMd;k7@HaB$V{<%mPgfEm!D=T99`U`8@+dVjKl(al+vJNgm zpx1w=;3#P($4JzT+=HO z62%BoVTJLkcLcxR|ATWp=}>rvyQvSq`JLH+Y<@$m?$xiFiq8-iIQlzie_siCr0$2- zfRJ200XO__b9zRe|1>;u1MYcX&Na0N{BJz?vnkRFy-Ty_pY;bVPj8jUhteY(-RAOp z=YJUR|3|9`%bf2AmEQ^Zr2^Ap$8T(``46*YK5a}(Z{%T^-$(qh^2i_1yU4eObiXh9 z3q5ojsjmMZ`MvW$>j|fgWcFVc40azb|D;*5F)1(2A(`;!T7N@apA%MmOex(rk9_ z9%~52RtE*;!=F_J{Pwy(n-~CJ!X&h2jo$o&IIv_6pRjky@aaPQuak`D30>hHro+ttCG#ri z&bF<`nN(?wX*BE}miSN-*{IFSAl!4q*JFA<o($aEJn$w;NPZDZ)>bz*_L|dN+ubML%H+|H6 z^hr(qa83}Jid#h$j?h19lhr5Pbv5;z>EmDI%A`E;XlOT3<2N2X?En@iENsVrx=CT@ z0`n*!cgq$hXJ-}Lia3DZqfQgr%FeRQ) zB=mULF0Sp$&~HQfPnhuarFl~?vl5f%Y8y#ocu zGA13a99JYvg|q8yZI%Y#AnY*4T}j1SxSGmkqj#?)?cWNdWVWeU@SGe`hw&WI23&;C zi~R-2KhzBj?Clc*ebn||{0UX}nLbaHK617b#Gzi@#-;%tk2M|a?O6@5 z%y%XVG`~@YxW706q6SW#*r%0No_1KO?kFFiqhv*32rAvfH*b7W&j-G_{>txaTj#im zK_cw^WiW~VGQMc9zDD8uD6ra=1O)>l`}Wd*>*jpd>Gp!;mK4GBH$nXYa`F+Vz-Z#p zi2hmxRB^n45(hGFmHQjRU=WD4e~$PsYn9ijl^Sg%`AE#Ma5IJM43rZb#cNTq< zpz$2A52T5U4Kf3?n$M>e^uCMZYnljPHfTy`QcXc8;!6EK=0gc}n0B}gSwM3&vIB?n z^>|?foPH=c^cGc33JS?5M3D0mpRR-mg8$;plYBFiMXZq8%1!&ENJ&Y(FkAdvTlGiF z&4*|?I5^Ig@TL9!+O@tS^w1rVEH})9_IDtEc=gI|+^{AgfnlO#F)m*tPjqlli3?dm zQgSRR1IcnIS?|@07q(+u@90*acc0dlG~Myz05)=h8jF&WfG82c!K%Bw{QPRiD;;z` z4|jQZXn?5s?;9Nb;35d}VDl~87hD5RxsZ~eH}u2vt=tUg%=?m(<+btqK7C1eXVoUk z{To>w?fXBiZ;TG-o0X?Z^m5>yp}ZYQmpr>(e7s`6@Ai2g$99cpwOCw4SfX3`M$qj@ zB&;tsk+*TXKX3KstoiX4T`~TwK!}M_`0)4e&*>e2T@{NaP zv2NB7TLe+6~?ERI_S_ z!Tjoo}b(}}k=S%kXj#b1CO4aIEmGMhn+e5D=T6);N< zhm+}4waB=JsCn;4k@H>fII>!wK+)OEh(|T$ohDoQE$oE&IYez7S#YqV^@3DabXSK`x%`1GjkUH00IpN2%#&~Xz ze(4H}Fs# zD$uNG%EP*&Gckw~IhWgN{L^HiRzZHG_31J4-_3)69a=jmneSQVCMKf!T(%@09@cGy z@`$*LM}ni}x3|Y~xWN86uMNQ;P}UhnAqU!ObyMws8M%L)^M5y_B45JsI;^E)kSL@L zs~6ajEohiXL)BQmdMqKgmDS|k(Gd%u>jR@+ol_K4qU}TPlJnLb1{^o~C9~vX)5`7& zJ0~UvZ0d_ z96DUq#OEPdpw_Aet<$h3lnCj2jMcKLQG;%+!;TtiNRBp(E+Gf!&mj7>ek&Utzg03a z4-rqlJ;&FM^`>b<^d4$$ciHs69F~Om4@aWSRgkM*7vZ|>oV`&b7Sgzti27%11w~MzaBJV3vdPdb3xDvB%X>+I&es#=hN>uPa?p zY_-*7I7!0x)(BLNY=|)kWw)&W$CoeuxYJS4MT&#rTyn>daT@Q|gv5Do@~ zz=z=g^b`H(bkswd9yWY9yj!U_n}Q4MCvx?Z$UG2LqOB^=a7)&aO4Vy$d8N1_4kv1j zP26=V>I`>;WZNQmTzV%o-CkI8HkMZbjvF7VQibJhZyWMLm~k8%^da^I_T$f&wUG7H4Z@0j&Z|)Qv_CW-RiHTS4dZhGPWmJ;V(q9#<#q$x{PTvoRG3eZ!pmV9= z&dqEF>I`iaWW3fU+P~TE+!QKO(T5t@>XjyE3*>x9do$z<-y5b?3gc4f*S+s= zoIQ?~Ua$AX#)gxy3XT(Vds9G8_g!~>P)au`r}$iaCdABL{8VhvEb8F6chXUArT@JQ z$8up#>#kEq@#bo*dAPydC-=Hpfp7@Zmfw!E$P?ITM#8DwWHa|8h8{>;k{v-NEV}4@ zDY(Dr6DcOn-f4h)FTo1;KeVz;7u*F#AV2gJZ}#@DL8&QGt@T=`DAKDBu4nSP(Wai} zO0|XLFQ&eL+R$nUnn%g(=6pdP4jNo^bTo52E}3_ZA()^@5%qWP@Gm2A#kA(jR9y}< zSmk4W5$VSA#xZkiupWsNq1Tg2#Xs*T!qUKQ!4EHsw{q7fA9!$dVpuXaLG z7L<|ppFAT&JlO5{T2R8pHzrSv`n{nUrDR9fuHC7UG*rAH9UbcVAu@q2!QKRbaL`i9 zaHec_vfaMD>L~h29BWFxUXgxdqEfPeZN$9x9FpHMt`-aS@Afg43?J(%+9<+*1~2X>SW z4zh*y_4R>uPFnP8*^ISE?DVTxJwv$|BtqRd)0Zl4ZgKW1bwuFlvS;WdY{MHa()O%z z&_jimW?CkA$KnY(>#06P8ev0ho^%fGCQ@`LIKiUI`5sO0(64?F=|s}SYF1Ac0}tE;u_X76L(^SzOQJ(dq2s<#H`>wyJk z=$t-#HOOcCj|!Ne-e?B-1ny$ZQ{kI6_Ts<_UqX&tWz@$w`7Y(e%qp-`DnRHlRH{KZ z6qi}Qm3u%)bvK#UIgtV0ee8^&e5=IgHpsid?-|5$+0Py5$MB6P2)m)Aa(8kp^m{oS z6_u&jP3Ni+@!y#HZ$p3g2~pM7Q&IWA;%p3BZmiA01k3v+jTm5_YS>KUhzb+rZ0ogR zGTR6<4y&NRoMm_9<@wTOTaajl-Lh9Xkq0zjFZ%mue^?9yW0Trf`Dp zU$}qK(w1r>ncA=Uw$$($AE~apTn{KvPSQfGkCT z3glvMPrZ%9o|Y`KuBA}xfHU3%Ri;X6J_!<0x~%-==*HB5TY2>hxNFq7q=!?yA8P)5Fb!LPaZ$b*_i+7pae#h}EU7P~>?v9hXX-F?voe6k`*i^h?e zbvtqqIYEat%F_P&kBT}h3E0j|%g~^SyK0L~@y$h9b4bgw)5)^@j5|%}#eBIgdP7Au z`+!xH0m)++r|XtIJ~%Ge{|^3#^Jxk&Q^x2%S2}`3GrDelIlI}1;ACY-lMUa0nGLcw z>`nicT$o9(%1;GFwrr@%+;bVJedqJP_If5tg~Dz$VbmRLTJ6nvSM z#6M6U8(9@f%$j&}u`OBaxT&#gX#jO+#FFYSmYP{aXBfTy!B36sg}Xot#G_r3hev8C z7!=93lTW=R&@5Nz9nO#j&}*ev%s5S#NjhE3eb3zMv`MIQ-c?EBJD+HXTr%|Tfdo%G ztV(PS&R;Fav2yP2O!QSatgEiBD6l7M`7eLpfTKB;c!8$;hMWtkIoJ|?1q{J+zs9pp zq1N11>vj1nChF=v9+{~|sIi&rp&(hZKg(_YtGM|ETJYmYj+&p3d% zo)-uGKOTiG4?~z& zyNz}!Su!UE@GKF#M&6>`-=c5W`4Ly>qCg=DVXa_i2m!pYPLBpn?ZxY$JF26+^ux_Au+ zI@0Qn&Iu6Lk;ze33s1&s5fP~H5}jXMYJ`N%_Jzh3);4X2<-9m+ri|D=y1z1VoK_}# zOgvi@$Fx|V7-ynRQ&bsX$y2ToW1#7LDhEcSLOK{Ma7yjTFu^^g5JcQ$|G)xHGQF;Uk)pY zc^jbF0pudB^3j@4bp3Y+>W#{o5($FW9~QQrj_MR1dya)6LY?sKPGPd$@jUtKlMPMD7~8o9B|PRtcF*L@ z{jLZfWjgh`3i>n0m3G#1L!gJ-@dhc^;BlYl30#U3E2IBjwbwNYFxZD1CE|Hb2p*u} z!nM`dyJO-IPyg=p_WIPEs;kCjpFT^SMHgC-v}yjHO<-ve5>8Tn=lMl@nbQCTk2!tP z?Wf>`;U7_||HRrNC!c~<`zV>tEV}stp6>2>odAQ@8d+PZ+op>I}8=U48bbL^OqYd?JfA5m`jb?tn>U1Q$~xBV@+^pGK;;^ax6H8 z$w2ln0zd1JChf(wwAEP3?p2td*d6WwC|^BCU#!4{;O}v#f9YL1TEij3D`_(C&dV2b z>NV$^rHeH66Gb}BqMXM0MoP(S9}b-b6efjqDm}z8@8q5hITPO~4w5ggHbD?0%cJR0 zqlg1%8rGFD6p||4@N4sarFxqb%=SqIU9CSr13;a#-dn>_^c;XLo?avBJbYtfjS_vS zSb)K(dC$NSF4L^`-i5Npm)--PJ64@%1Lt9G?f7U0joPfNCi0V}YxQe>P9+>VPP5@(R=)2cGlRYT7(;L= zXtgH&Er7w|qodtJtU>2?$QtQtt_oSfwx{diaNm#EwJxKY8vlAN1Qn8FRy~tGAvpfg%{N%vyEBqZ9?FR7c!4?^-WmrwX-550@nz zic2Os4Pl>{i-aBN2^fLJk%GX63 z2EfgaR$R&n_#76w(;-qbUe|hx`eJ>36SaQ+DMt@oRw0buxNCT+buQa^-a<-V19hr& z)(Q$gyw7*molf5G9Rf~<9lMv7vloZ1cvsUqPFL?0%74KyutU#cIRELt1vo|GpJzh% zYp{g?#7ZZS&GUj7_YgJ*`CY&07#lvbVCBX3yg}s}G-NcVwu=z*>fnHsW96RprtI|Y zq?%~4k6QyXMaYDHQQ14S#DthY$cAGv;sRK1=IDDziqykCV<^y7vw3mVL&;}nm@+L0 zrRYqr5U8ME8&!3-X-Ml^Ov#7gk zfAuC-FQ#W*YFf9JsxIJOndjt_Sqk#RB@HFnWWN%QSJ`MPdXJ}tWtAGy8w`?P^3PCj z90CFz#~UOQ+;q+k=0l(nfo7|RcG>1ppNHG9X}(xMGIcE-7x)2g=iCL8&u1Bq6ErDj zAYDK&bY5*~RSRSyym*k&E;WvB8r+rgzBh$)-WyI{L;qT~&d_Y@%=iy^DDnV=Jsh;y zrNJMU3OGoa42{>sY)S`ji?q?LX0sb-2I`y(fG~W#x8Dq{9wQTz@Bd`KUi&=OivN{+;=|O z$7Vijt89Hf(@I;6I6w!`Zg3-=Z?vwnx88oYtQ##5kG^JB$|WH!IeIwu#_`8v09PL) zK677EqM{zUl-&|9yupXcgwGp)|$D!sW%ul#8Jb*%|St@2u)SGL8Amt${0M}Wp%r6zV5HK(z9 z{u~`5YFli(<0(3cZi=al*~AJyibGBimCG%=x$<@zNDzNeutG+esS2YbBsMi!@McR`x%G;g8rj8s73pxsZncv$p5HZ_ zOBNcJ^e*o0lRKfqJZr2eI-&T6cH-D4?p?B!qBhDNP>@itkPytc*zAJ|nl4k3I!`l? zSO7Li&_Ql2Wmrtt7n#;B89CbpR?DgJ^~>JV?JP7_&nT2nxe}YVL#Nii@pSFBJDB#b zr`<3CXqEXg>k5#W@3t#a;3x9{3jdxj&MXwsMrg}29~{lPQ^yR1T4HiX2#HmF(iqAf*CO0Fv>Qq|EN7q1kdh4=#*|xa% zE5MVODMQ1c*$6!;__R=TRv8)=JR>!_Jbr9DEGtD)0^Ob+O(z&h$IbI4bfx?p6%eQ3}!2j$m6)SmVLG$1W&}J}rR@H0)xH z`z7YPs+QM6!*}{j#<7>FP~66%l=q!My#mXY)><~J(@6FlpgviyW=D);)Xtg;G2hqM z2dsW6cK99}!18;dw&AiRf}n{^`GHK}ckTlQYX9X?C1fvvPSq#dg@)H(^KU*88=zM8 z30#v#2a&%FblqS{ZeUSKVHmLXM`)vF$wBYD=o8@WlsafUEX>z^3>Np8ruD7wD}4L9 z>YaasThy;+__D*-#K~Xr2&kNdZ09c77#0j8c8S0_aZgo6KSiH0K5LT>yL*eNCx7JL z+Mbdp8KN;aEA8uz%YMBXkBw9aO}b_djron6&7KZbBV&83?ov_XGYhLN67h*)y2@v< z!qlT^$SW;LJcdS$LV~gBQnRN&J`?zLp}-Ooh`7@jB_I;OtM}_J;-^ zi#r`R2aEE-@^QLVL|wPzfluD~ST@r{=3QX|r*++qrMOu){H5y+#(3Lq-U~FmgxCIy ztBap`z6}ab#W3s0Y!1;JT#E0{g{C|YB;sVj_?yF>S^1;`7D-N;NOKiw?HC)4xopRQ zF9dy#b?4Ql4qoI`>S!aj)jRJ>dQ0BFYH7GGzv68zLg01S=IF^$3Hfr>X zk0DixAQdbGu!na)Hs|B@xNn^AP608sJJ$&;bq_52Ad$MYUb$4lv-BMRW(%B_%2NS1 z3*r4=rRwvyG45a7_s2X_o05V0DeSIq7OS~&k|(Pi+lQXgo$9n;j|HmzueaB1GQVbd zip@H`=yYpXddNUBP%RK6)bve{-s3d2b!N_vG_7OPZ`;Q+#S+AE+rKPTQI!Lz<#ilS zm=gA&`4(90`MyeN?z5Dn-u5Hm=4(vem>wCD!;<5{q6XgB|*Hs`mv*~o-UEc#h$3J_hS*4 zY@2jw_IjV@*4XvQdX|Ya#?BbO%RzcF*wGoe?dTX<{1QYKs)3X3vcJ$qeW6T8uUvdI zp5wYVi%qhyrx1im#%lBVZ?#bW3A?MR{nT$cI$39MaP`mwj%k)?DbXmkH{~XepGMhY z2tRB&R@MT&^!otuG_H}psu2f}6?Mg!qe6u665U1#>2TteovxuG(tIoR+OAPurdayH zvFv-fC|#yG9+|A&5Acf<45qd-HQ||Zxm@FgS|Q+e_1CVLuJJwUVp**cb4KmAWVu!& zsvV9inJELfME=WD#d^!hb1`vDQ*Y|*a1L7MCeqMeN+obXf~#a68TAW!oW05m;$wOz zuxY|3VhO}MRU4glz+YFA3g)mNG;e;Ab#5N-t{vZ8RE0Y2kA6itRx~G|8fH@OTg6a~ zs_=Pi@i5dDJIYZ?8P{a5I(;V0adm&$lkZXABt{_>dMmeWSc9+x(5o|?tb@i1RH=)# zDg1$!kLy#%$Nlxro(Z049P{7wn!Tg7>;`^$Pj{IW^MLbE6kNSimvnwa7N*nJ4G&;Q)HHpz#GZVrzlw~3i}71lO{)(FUaSP!-qv<$Zfi+;7js&Uh8isOre$Xt z#|vawjsHM?oMTQ$IhJ|GlGh8Gd-&M`@c`4vA_qeAuM$4BGTxnfHFgW33#8&LmIR zsg#o&tR8BPh1+lSa+Q)`tZ>H?KypZJC3^L8;)^83L9OdqMUI<8J0Qiu=YGpQBW+cG zGvIxT;8f8h4@SDst2^hLWR}x)6I+!Bn%qzM%uy7dF{~8lC`6zEYJ0_Cxh90NF#&e_98)}C z(G?prI}_iLKi zx&|Mlsdx-(9c=6gR?8Th0`82B$2zAk$S#k&#ZtJBvZcIP_44G>_0CL$niubVag&kD z-IloJ>PKRUPh!pwg(Fghj%j~)A5 zEc|&gN6mUI=RxE>z7Px*`x9@NM!x4(I` z3}5k~fOOU*a@Zwd5|sD8hs`5;r{)%Nv?Nqy&^%+@vAxiEYadCMGV`8wF|`R=FLAJ8 zRi|CKGoBkoCqi2NvbqC(rT?28J!x|G&c;_Bd}f@C$`*XI$JK~wk4gtn?FAjTLzY|) znp(EPFayN8uTr_z`=yfSHCsQUgbf;j!CL-~D#83H%h~)3Vu#DOA(;*pF6<+tb@rQ< zpl_hen@Uz2GhIzFXXo8X&GLiA=Dy1ev)z!eeF5*i=b%~JT7E@{Kf=>q?lHV7*rF}( z&C~uW7kfdIw?g6XMK-vePiY?%h@SBFe5Z1J&!3oWuAI9HRm0?#OwdE5+*~=LzBWE4 z2)1p3NLK6W(Er!tN#DEjL)H+Yj_;Q8EOVq&+=E7}PX!P^jh?M7E|r<~6&o$|0 zlmzj;4>&G`N`GaWSi*wcQy))<7N;x13Nt}?Ets3lkITDmvz8#<_K6BETU~HfgL)qq zFvE=GoN<2_{E!9s8vB@e`3*{fqdi66{^K3s@$Jwd)Ha}gOK#)=qE$(M<-9d4s~ApV z?QN@S#2jj1H-Y2BBBRmp{4>06FGB=xoy2ToNnkS%lw9!jY{P3oal z*PNAvIlj#))6b7T_i+(OT{kxYqiN0kWGOeL)4TzndQnv;RnT5%#Hx&YZ0NOVTXOFl z9x6ivCQC@sLSERr#7RPscrsvTldM|0gncoi{e&?*Djst?7ztV}t9hcriGEvDT@9b0Aep zu?{WEfgfi+b|I=Btu8!zI2u`7v8sDfZxwxh+8@JIufKh=FwcM1<;Qz`Q8Ezuqizly zm2fFkdzvdZSnGIe5n{PB{yE>A3lxZp7Mo!Ivb~Dq{ZO($BD#k%(-*77<~zQ@Q7hy@ z_P|hRfF0d6Pb6 z3arac`bGts+s%TO{luUgaD(+TvgGkPtbITS8l0*6;+zIzZuQWJ*c5A(!2@$A`qnnX zNq6c8aXL>70H^gxdhHy#fuf0UlcQ)On?gSN~JgV#xyT1pI>%sjVyY`Xkwv z`)JY6gwGziCC}QjG{Z}ElNb+DpS4{cO!olHL99A+U;LLjSD}{p^P}Y;AC+Cuax&8e z*)Q{|FfLXzj_O!4U2T*=pPL>7^&$G1I%G)a`~p52w=E4IS+<1of*4wSo%816@5!Qf z*pUkij+?=aDK?nY+=!Z$>TUOrML2D{$Ed@q_u52j-q$-53@;1sn7blPdLmW) zmtXD_>*NRtdR}~63kXU@y4-@=fmXWZUxwZ&#;XkwR-S7?EtMrPG3&GK>TBvCG1f+V zIpxyb!dK%X+%l_C1Ys-bkg5DG^Cu6<&*8k($JKG8U{)+&|l)l zeICJmUmd2qye(!UN!=JAz;<0U2W(6Q;*eY2%rbdcc{1zno9Rb10B5=e_O2*3=QA3) zDZ+wU{^;JM`#gB^)@%aeDG%8%Jkqp8Lb(T zWXMg_9R9arj`H`cIIkm@&QI1+Ck90Kc`~jxa;tO`F6)$$RTY!0Y`@SblRA&pKLzVp z$AEMlGv0+rZ5h1ywzUpHiOM-j^ApVeeg`8B?&XF13p-vW-8{B3Gc_tH3->)V%KDrP ztSv|FRrcfEnCbz$7R*8{-mMWH*fyY?CQ&#__4?cys$}&HCPciR3fdzv5Ei`3Lw4$P;`7zf0G*G;nuTva!;k4q z_)FvAXBw?RHHy+5H;K4KnG#S-{87SxD{S>!3y3 zkx@p^*jrv5{^pE1)NSbhSGn@`w&|=<$uI-fYmk6&w5z;c=rwuCD)vDRq5l4MpIQH1 znCBzfXbuTzZ8N25mE5z%Z9t9|znG~&H7l6yt0hQg#L!Kaj3$1%5vI02o^;d_sq*Z) z86;Y9^l`oYa-6v~Py4lU9mz{267#w03U|NL8+F&5C}?3xQTf#x_Kn#td%1huf2E23 zt-8aXav5&3T>E+2m4rJS=0o2Hxnr=K@)9G13JMs|N%;I4c%I`B-uPv{3@k0E`&gxT zzy_^j*qPi1eX;6dLpT{YhW=_`tDabloqI&BJ zEB9=+!uOvTX*cwxIBi8pZSt`fo3@zz`5?G!ZN}v#bz>wb=&AjdJGrz`MTuq$)vbNb zd+!=RAZToJvJ-noQ>-ua*72L6o0$4SR^$HFZN@8~lx}kMYiO}hdMD`qq|UIWvACpY zzs2LpBpB8HsmkKjerrOD^k`e^^JqI$W{Qo&r2Wm~lGIR5&3xz~$p;fT6PHO_rAQn0 z)s2`p&5aT$;hSMigJE=-PjeJrr}yo^1yaKkDi4KyuKR??-S=keL#j@O$mQF*$RLA& zp;|4pz&=zLZOxf7(D4ejEa^-yONaq2$HM}x;CGn`JJ2U0G=o}WR_DE6lJ^Ve!S#U; z&L?gjtu0=jXwL&@#b4b1T_StbhJ{p(=7&l%<>s-CE}&CyDq`j|ZFfX+aR~_oCS8t* zgE?7Ua3B}iFa|@mDa+k{GDMcYi8V2C*z)>n&1kjy+VoU2ALX_!2=k@39@AIPTCmGr zVJAFzK2!MN#{&G0wd+PI0|$5^o?bIQpMIhXv@({XY$hf`QP2&*U|ztf>-rzGt$t2li7J)|u_+Q|;5_5nW$hy-g85R zS23Akzj-K32`<_n=aw;~N`p{W^w-GmuYag0#wV?guPF^j`OdjF zIrrT6#&{WnA3d7gwRcsmRkdoax#niv<3f;brpLA_-AR_iD?e zASW1j9<=1G?(MZOplfGw+8dh%Pw+pmjACT(3UVn%c}<7kc_!vWMN>%1pYMUkNc-o9 z-IyTQ$P49{veVwW3}YazgrvjA`Z(l1{8l#+H)ZS+(v%xW|8(RA!{U=*({Cu1>FNRb zDpg0fqx;GlYq;rarQLddy3d|FD%&*`Y85ItcFJm8iUxN(-#?N=5Gw_STBa)f6#lQwp(oj}8v9or94lbo3*!X4cXg zkd1mS6gyFJiEn{J$tA@xg7LL&cIfLYQMp?;s8F-$#!;ev28d+S#LP_czH^-7d6*$r zyWi4Wp;1mV3%jGVxuki-9Ab{|F_F6FX}^Tk31s|ov>ZjzdZ>6&6?3A3twIxuGHIr> zvCmD{2F5YP%x%!bvLW{K0@z(uy>_rV*_+DtCg5yqLpAnpUq|OCTvD+rsCGxY)+IEF z#S+b#E%!f79aT*4fp?Mv0=u8C(9?v0RERig6WA=T6vHXk!(iUeZzIhWtL#ui4dm7W!)H<6m;%M+yEIf zfMw8%OvER`N<+;Q-8Rp!W~nc@w_T>euDD2JH8?l!q&`=tC-~+XC^Mr2sHm0d?js$J z5A_Ib@wxK+dm=~0Kma5Ru-J4|13f3*v%11*Ji8bTI+QBR3e48cn;QS{hU+9_-PaF? z{$RAobP|Beqq^&=#lcw@KJLG?!GYb>S8XB$oSO4;Uqkdi;d6@Nu@$kJ2i>4dgn1{# z=v|-nyRFp%4C8Zm6Vw;9O795F_hz>q=y?Fnur;wnAu;}D9%WhLM0Q>ab7dr1;RdeM9M-UF5z=Rck zPph|}gs@r7))I0v`+gPlpU=|dvFG;YxYlu^X;TKyCoDmIB~GzDI#=gAy&ID+fYMn6JLikD^UFad_rJ z#!-$b7@V=x-MK#BRdbOK;3CM5Q>{xipB${k=|>P$z*-w8`ECzz zWM{jpiyyEFlr|S?M~utoRPBl+@hkL{y`rd=FbYOV%@>wB_XQ{liwDW(fL)7?WpIulPf(%f17QFA3btp3;E1S?G$XT)J zuNkXtA$tT42ZLTKC8xFsA;3XbM>>7Y{G3up$g->|b@~dy^JQge605oV?UnTCdIEjg zhQt}o%I@OB02OuMze$Y{F0Gxs_$ z*}kLzLbLXp5B89ko@4fWAl)X%nU`EnKd|pnKBwZcyqAirH8-&oE7q;Ww*~;rSHL0I zaT7dSd{u3!QE5!Rh0$_A{#B3c7efH@_4riSX>amAlQu~A0?4$zXyZK;+?F(i4U^&0 z-<=4YwCIXks8+g~M#`YM*k=swACT4HF#aJmpb(l~fsQNd=~RV@>bbC=-(GU6I!e>C z!Tr>ho4$4>;kEDG1~)f)sp_5m;pt<@_mPE0+#pk`ljB9fNZkG59Jm)71uIP0g`tHw zC?`3Dsz~=fg+Y|Ykvj~$J&EO$!hEy6RzN8i6%AIB_*dNvAGbZ0a&c|Cy5UdG!8AcN z$u1Ma+f;m|#{IY_Gow0GJhUktvMIvH8w_8>N~%}$h3rd+`P|o8yae1nCDpL-S~!kY zXKMX|ZUININ4_6l>F(>+Vvm6o6O!1?i|t|O>&0nQX>%xBWKUdeMalC=Yi%(K6dTf} zGT(-+kF-l-^{rDlw$hg2VT#y2X5D)#Fy-7Gog+5xXy+s!T<>*DO}DW}Ta_W;3q6_z z2bZWHM8M59GL$7++5H(p%VP5lwP$IqI5%9-*kJRfCr+}`Y>cVGa!tf~f$#AvN@tV& zr{Yh8B#j2S9HnyY9$N#zhr`AIfo09MVyh~v*{I4&1R-Dclzr}2F9xFc4$YyMo!=4^ z)G5fR&oSZ4bqeRmin3m*Rv`-PsChOD)dor2bhz>=5cPlUe~9`MW^aCsbSm|0Pk#+) z4*ob7{G`lz7u$Nu8}N^urxA6Tx3p(WDCn>7Idt=>XNiwXHJii=YxT|a;p)`Vm=i#N zz}Q&t=Fytup_5+34$VDZ<+6KlQxmMXRSyx*`@16nyM#`2mOrXA#4`V+;L?QL%+%k1 z(;nNkLU%T?Wd;obr#3BW!sF=aPrDlL&(l&dC#qOR!o1mCtvBKA)^FeY!?aW!>=R9w znNP4y*V#9)b@N|eDMpcc-DD2g<8q(51I3h?$_xv=yOSNeuTkhO7xt4_ck(j@b-a%m zR!Hx_rSz=kgxB0z)G!p!X7qzLG?iZA@mE$+VDIhh6uOl}NdxFAo+TD0KQ9l@`v*vr z$)MdPF^bn6#cPpU{T`=Iw*|l6Zmqoic9IFB#YQ8x4CNPeJW?I{=)U+A8n>WrcPT;d z{Kr>W`S?*YZ*_t^ITFPP)eiR-&{Vugcv68olwBXCY?_VZHWj8j$=I|~fs(1)PJfA+ z-D%PbhrL-GOZi6GItcOk2XLblgZMrWf;#Pf08^q``#W$5y#cb{U=Y?#6rnh~^;DPd zrTx{^h5hSvlv#s(wvXut7=eL_)be3DpL@!}!e)NV)M$hQtnyU#@3Rw;^KU1LHFDOu zr?F&ZWe1+$zbOfF7aZ=GB%RI0>}7tINa_Evw}Sez>6JnUWswr$n}XH&M5^c)CE;7Ily~1vAj!4+V)qCuwNfou~zDBxfnJ3!eawq9#8Kn%zLfHEb*4Q*2BsZFQzz|M-m=BH ziyZ&q!%*L^fd`49e0E+vuG9RLVrQ=DMc28kmHpzYfTIlv#2e9O&A;)AKC@0?y-#iA z75VoYK3Rf2zA`I;$DZIXU);%|jsUbWzhE4oD-*Mqq!70O$%7;&M|7}VLGv`xPpug*(8N`5t!nbOs*$Fp^^1!FtM^j0kwC}DY57U_ku%#8kaqb`UXtz|6^ktkX+O(0CI6T zo|a;Jr*UvZV+CWQPI=nMF5i8ffv@Z=`7a*C0RUx>l$#3O%?7d)Z-bbI<_SydD|Qb* zw^ujyj^VvXn6&wB?+=d#OjC-zFpGf7qOi-)=LOLzx-J#DbonUF9285()=jtk6)WAs z6}kN&mEuRt%$%ihH_MvRUbe+HQJFVPlVn$?K!UYedi!I5-%>f`p>_w7<~g>PLx;{X zQIJP@_8DO7svz(DDg{7A1(XvGJB)B))3>a*_y--{nvB@{GvmL}zD`25TkBC)E<2xm zeeAwv3ro(Z6`zjLb&#S1pWXwt+|?CQTbMIr+I=iRlIC$K1&+d%{f>PUe@VdYl*^QJ zJ4P)TPp@=7YV;;ezS2D_E6ICn_^o%(aFmM` zrix6}$JLCJ>?XhZ+H~tm5^Ew-bQ6=oU=5vgK4TyZO z6LLB(KEKY|9Espp3XU!bnkdo|^|qRo(x|bjnDn%esHAc~vmjS)0eDkU)0I{r6ttrg zs&M>MD4-5KwCU=Ih{f|4 z7W;i$z4nn;sVfGIlke*QdRe$);7hfM1aR0@dlP=ypG-!tKACRKl709;^jLln6VI{M zn2w~C8-*2T8wSsH%(NqwSXEignbq4IdNdkU4@7P+DVS;mnNO5##g$pi&1>(7PkcvL zIX~;^X819;M4w1BX+B}vMD(8u6F___2uQy?79kaRFl%W#6@c)tH5;=pnT+eT2~cengte9<*U29#ni#QY#?0ZgvSzZ_x1YA5w96EyompZjML9VVN-R5E)kZ+xOqkF*(nAK8X zHKU%`Tv-nM`MvOJW4O2va}T^0GSN@Np%|3A>e7wsRRy!k`ebI)*{{b6Obf8H2|QX_ z+fVlIZxRUkJhF{oOG{mt2ueApMg<7>Fh^y2XYQqs~_e>?U2%%f5h)D8`H6 zm02JxiRH1hlxy}J(7;%8$F<%jE}Wp+EnAYQ!%16tYC5^A*~uSM@WdT^XIXYW3IjOYANu^%c#>iiCjhZ6(l*0`+bq-MrSk z>roG$Vmqm8i%n*c+MKdjv)oeVA#}f8Q^WZW!C5`a%4%O;zojGaa7LSkQf-+| zBb$hAV02a|$_P+$T_u#{IXx{?5nd!USFW=q40MaXtua436ZTWHnltcoS4Xy*!SmeYn+nur<5I6P9} z3yrR+0I-$UvBvrBx=pn3Fi-QjD-+gHlwcYwnBU_guJ_pws#~rp7M*^ZGKM<4wao73 zkL(SyIYy~fxe~85o7g21=%ve>kfXQaE{8En*s@*PZFU8wO)gtyMr$opwl!(6)=uqm zJ|~ck>WAX@I=C3JfZ2E12%2znEaS5wpT+<-SpAF9nlT_bJDAIT@8D0rAbb7I9w2Ng z2{amoO?qQjR`+ax6!R`y#;Cl$Ii&yvDB09<;xDLoAz@g#hxnZ3if2DKFN}?yye4D9 z8$1St?(q2%9=U-OC_qKc%>^n(vo#<0ry3&eF4vkjPqvt^GTlz0-ygPKvUq%duioSl z$Vc>d6{&|g0Q5xw#!t8jX&iCb^TU;E1lMkw;F@|U$BjL@dYe1^xq8pu+5$f9Vlzh| zfb-`Ar_rFz9It1?6@fklr6quyj?a^o7XE8e3s{gF}!w&55QP1{mE6s zLj6p1f<{!sB&VLcKWE3|Ytp}}RpgQI`) zyoJO>Re83(8z9LOvfqi0UFRva$DbP2tr}CxT&c--fj5S-_!N%YT3bzTU=$fjaxaMr z2OR*keY%-!20-ls>z#KkzJ|)*G>Q#0i-hA+-_va5&aET>np8l!>Fy#r>b-#j+DHYv zX-xd^r%MksOlcy>QqR&xG$u-^@^Gk^H{|Lryc3US?Il6&^I}%Ie*Quwkd*i3iGlJE zcMcxe!jD3g(u^%;V^GhkZ3O$-2GJP5l=t;D{|bF8%g>(srxSz7Ba>PVveTlK@U2Jc zQ=2Y7CMqhIDX06Q!B5KTGOEH173j#O^98$g?*B?ag-!py!>2ju)r%7AC-Yx4YM_${r8cPn2x&>UM$^1HuE?o zm;^t1g;~)1p-gJjy~q|D)6;x*9#fkz4`a$N((ly{fdG2sJ|0d{EWdj7G=0~+c9_5Z zD%xG;xI{Hvqv!5iYjya*n=iL5fq?S4m4(4lYv4t46%MQUbilU#npNHA87?3fBT=z2 zkOhl`IfStnJw82{A>DMtgJ{Xg1K6ym1OUubqR)8CYL3fsTfXf{cyaahLDb_)#~tIsUgl{(r`_pc zA1}{-8Ob+`fNGjjm6`HbTM^RvDsGKLt*t*xA$AGta-MHp{@b{eTo`VM5^S4+M2h*+ zLBi4yIn<%ib^6E_-nh{0E(hv-gDri*YC`|^-OHi|(d3BcY`kaM@-ErqA5IRZ^5up0 zUS^^-ODp&*-oJgNGvCO@dgCRBn#$wabFwjN3OwEOxkjA#Bh~K^{xF>1EavVCTO+y7iIFR+v~aeXC!d7>wSqCvUv$;Kz!C4_lEnAF(PIL^cT}` zN|*+Ik>o<{+Jydujf`cFu&UhI%yuX zs`j;SUjfX$!-3_yTXz=cdD;&jBuw-cTuk(@kxfiYt~M5$7pXVaPQt(E11Ff(rKOCf zJV3%kY+Vg+mhh%t~CdM>@25|wo@1UV zq$g;L0JZiEs))OPXv+Ea0iuB}#PoBZ{~pMyFqOw0>d z7R(bk=^E(vFs!WmyK5S|6>X(Lm12p#Jx!094w8QVC8IFI3<_@sD+BwO>gnp0mXns) zU`=Um(-{MrU&iy#Ck6<{fS2d+efnuRjf?`cB_Um^SR22HOTrss0x}uQy>SDY8$f0h z>FP(!GY}weRIpfGY?8j-au5`kYI^ipU)U|w#;Ts0z$zV5m%KmXz0OxP_IT%?Mhhw! z7_HeQdIVgUf5msik%EBs0)!X~J?~fXR710eyPvK>1}PVcHB_(Go9{&dl{2<S1{1rI0yBvC7*jmd)eZ<8W^ z{IGIB(UZzR{rgK`*1ArB88f-E_CMylK7JH}rBS|;+BPox5|z}8-LM$1nVyd?}wm4jWzpw!srqK(i3| z1+48?^M8N$FVj>2RB;cKC_AD49<)3>oxt*g(n|`~_|K!aAS zhpq+}2C)4T{ssD3Px-Bgh_M+`3eV5o{z37e-3*I(p#bPm^Sa#(YB#id{nhp_gOKzf z&uDzY!T9sf|GKhF`r8Z*WnF#!Fd%R6ikuvK$!2D<4D;tpix9*(JazJcif@NoCCow-z#`<#?nCgB~>n_3m zBk7-l|J1*KEwg{w_g`lFZ&U&3XWxS?voZLH!}_0|_2*0f zWm$i}8FAag3Zq9)A@dJ~lmGQsf9}M8-l3}qurHHwnuz0{E%HCRuy{Yk{fv?7107-l z8d@Cj6X9rKs28ZQU*JhU5EFBn$cZBwdm4K_Nh3uUE{~=#i6ob|LxoWiVA6jCBP<}l zz{{!uW(ChNS2R>yJ}2Uy(;6Ea^WM!s z4Vhdh`aJs0{ta_MA-0WNp&4F_|Bf911H}V0#C$^~@aX?;mVZp0X(}*vrnm6rx4+pC zP8!e<&fRn3NB@&g{SWT;=f8X_Wqeu>D!pK)yneGGWLcmgWN$>&-?O4f0d-0NJr{L; zKMVffa5>;11cDwK3gU-_|GmigOAie#$wEQ@@22wKmu}#soNC63Sz^Ev;>}P&+Ng+pt1=d|rDAL0Jb&~#<3w&eA0nd{d-l+b~bytiS5wL~` zXkit>-?MW8Xex1Fd5X=3SpMeb6e;i=UJjcOHl)b!7gHMM_QmHcQ2~&XQZBEXcWw(~ z!ut;je#)n2tk3zX?{1D$0R5llBwpuwAPHicxnm6b_xXy9M=UttIsdQnz;ku*_c18S)o)S6D;de zUm~;FSqe^)rQ8z}CKf2|1e~h_`;A{Ff>;{0b~Rd3ITW2@%yB}yw4^6V8sO$i8-b~l zx$M^~uT~=XM?+&MM{5?W6lZFzB{8mJ6v90&mV&mo@j1s*>X=6(_z6+WI@+;xNY9dP zsIBL3wy$>VK(wkM{V(k?9a~@}QtwtOxo#UKExcISl6kjh>na3y>|Z*f0k-N$@T@xM zm%&pY@MSXU0jX-cN!ILy*JvvMI^B|vmQqij)}IVZZTGXgftJ}54lD-dJ^Ax%?yd{2 zw(^L0kLx0TX+@0K6#wnnQx8I}Yj&d>7| zYUWDl>o?!fbb24{H{Z+Su@@`qcvii+J)KCugH6N;76?9z00AO>oob(8oA0i|u1+d4 z(p)t^e}zA&?|avo>@N9>J(@;6N=b;La}Pdgj#omZ50*rwv7V|=tcH0e2Vk$N;kaxQ zoHNzd%7DDZ=m@|$QgNP@*N&rCW+Uy$-FDnjjj8aBLiDCWmJNgplqUUo0redf)*bu? z8GXvWIxaok>|MG&4K^?{(=K@53(KT*m2=;ivG+=q#L%718!QjL>8T*;WsLb{R9evl z^4=B3?~YL@1?~2R0|lIK4*Dd=&8O@k)7DlbQpTOJ-|(q-^*mAf@g?&9*WTnGB2r9sWP1- zS@n8`$0sHV1L%S^wCWy=EQ36O5|wRWgi-1UnMa-h#<;cd&QA81u#Omv{>`~psA@a( z-Q}{&{<3v50U#zXt0?+uP%pt(HD3sBQ4YwXTTCx`b_uu3Un?{Oi%pixmNRr$qoCdd}=<6~cR5%P8>0 z>$YP%7aA5L++d~S)a)<;30+Ipt>lYl-O+YhkFvkhA%q_SvTmNq05TUKuj}2YRpQm=j{=(|(t>wG$wrH9If6T$b%q(!Wxi-d$gxNajdEkfGXgIfL6O=g`^^%b z=eH=Q5dM3PsG{oq$=rwR!Lj*RL7^4Bil(f0g=N^A(L&O|u%zu#E3|naBc@imPrE8y zPyTI(NsCZr@A*$71jO|;(9ZK}@^hZ5abG;S>{iRTY;2mZ))LYkS&9H@M-V=z-6qs< z2z+pt&4iY_=O7m#XQr6UZjG!SFK?o24bq?_M!whn0mpyqA&d<-Vt=HY z@F_;Sfo0}}swUNaDXOR)dkAlFWKTMOi8P&Vg<7)wP;Sp{F5|4@yC)}j)a;=*B{?7i zHdc3&ABW$Wx8pS=%;g|_=e1mJClh6Zgf3rl&1Um+=-E9x$XSI%G8l7sq0ej`YVL>* z>Ft)k+n$C6q$<%N4wl3=S>NV>tj5o`Cshm8QV9Q)9*y{T?1=K4y!$PKs`riO7TLstS)$<~B zZI8JvSpy7W1smeG?d1GqPx^v6H+MJO4TsvTkjjU+ldLh|%Z_RnmF~V&xF~RZp;Pq`^MMR#G zVx87c5sR;b0I(Yd5Pjb%h2*GLzu!?RGueN(-AVKe#KXR>?0U31()$d5GLIWK_04Mx zC!ClXuQv5V{HCtHqX|;E;_1ieLA4(%VA7faZ4?TE^5pN#r>-V-6EKawfU=`{CX=#K zf{s_6qM?7tVRJUN&W!N(IH+`Smqsax$H)&Tl&hgvDAOfT-d_^Q_+b9^?gS@Bv2Oso z+_vu{qVI{FR%?rDcXSQ4gM*Hv5nKQI`GPE{jh|3yrrI;>tOv{+9nkXX;FEK4h$uby zM8VVY_HrYAd-JQ#m>;Zt)gpPcn?!8w+ZXXWuT@L%R?{29_P^hp&kIRzI?+wM7ZlyRO z>D?Qrs+}{@e(A89M>JNmuqTJwFHF`_qII3&h8QKOI}_B^F&E)-+~2+uVoy4C2E6US z|HtUkc*~=x;T)@Hs(sz}9Q zohH*=X@*xa)W9Q|XVh%s=S2$~*{w5ng zJGG%nF$^o1FeR)y4ty))Dcii6iL^GS0s^)*YK~Ju{xeyA0^4*pgfv;P>&(->r(>IE zyF|)ZI?oe>*KUCqc(@)LbB|F>RCG-dQ;L1_7tm$g0n4D2=)p%cM{Ri4!kB2+S{wGn zc2gm0Ef1tFa3AA1LbJh)q=!3uHu5J2T&`s*PZK+p*dU-S5zlsl$7MncMUum%$&K=; zkev_D(VJ>NWKUX{VJ?2eW|IM;fN0@um`R^NJViE_lf&W$T`w33 z*uAo`(@lf>yY%F*k^GY&n1K~UQo~GXnWkT&gloliS$ITA>Lk(&y?n~ znl}xP++>YL3T8crc_|j2JT7lCaDx^AO9tu;M?mJ{W`GWHJAJ$%Cr2{y%RxmT;2|(a zjTL$e51ztr6zi6Qm5|-RV;$C^X_k7=~TlG)KSl(IznmH7_880P`3ZP=-?riIH?rzCVx*+5Cy2q;;=t&V=bHp{QT!#OUp z?6F@!AW5%M{QD=)AslQ5$zWnF(C`SM2Q7ly-c#kaztJ*WHn(Bi`VP~9(F&& z3&F2%yWUOC@QPgtos!As+vX?{`e7TP9<^zl9bv{?49*b+2Lq2fg23N+8 zvjqlxj&1u14$Mk|9Hf!K7zEUx9@1nch!*nhg_Hm3=T7@lpUmMqU(C1y5x(ubmiN^q zC9px+oM$+%5_gtw25KsyqM2#MMUdZ8g z1H{M!<*RO(oOr7+2(|FKWl}L6bgN*h1FL%3CQCq-Kj&Z$&$RXY(4Uo1;qvIkV41OZPLV-ir9<&Bfwmz2Y2BvJ6<=@WM4vPsF_9<(~7VZ{zO6NhqvjGxC@s0D&W90y8LUbgE)wwDa+Gz|$EC zJr`O<;HTOjLH80}vy+Bu>IRt0?alW-&kVv07pdTjNGe4DI=){345CYvM8BAY9tSc9 zHW-(_#lWSKD_khFS!gZL+`z1_22NTgiAUy^0&-<6t-h7J7cFy}ChH^<-Y*ly)}KN+ zGd0<0`+ z)90G5CUMdTRfgSsyh!FODBQfAW|-83k4*O4jK!^6IKkme{b?b_?ozec*!AeHQIBa+ z8SEmS<3bY{2i^Xgy}Gi=f@-lgh&OxQL6Kxvux?R0uqxqT%6=K~kfClB{Zy{F9#Jpd z`MylCvDXS6(&Jv4SK2@^948LY*cEmrg3r;7dyd&pF+w1Y2ypn0?Zy`olkZFhQb^fQ ztf53CG!xF)+T`!|uB|=E+NRHsZBA41lOP8ly;$lmS3w{>@C49QWYL}8; z8y18fB!pqp&pJ`BdYH0M<6WA>VVh#2ogZjZG@h@g*f|xX#YPdhbU>aAMEeb#Bzd<1 zOWiG~PyM?Nqhz$!&W9s+h1Xrb>aEl^>{fL7yMQc{QP4?<$NgW$U+3iXHYx3l7oYW> zX%V_CSnsY1g*YvK^D^P%q|9_JMWjSzzYcVt?^9E3#~G?IjylHcKjVn&kb3Y*TnHKq zckvu|%nq(!=QbQ+WDDPG?bclb0txI-8!ggDGWg|VNvw3ynYKOTfB0jEX{5Q} z-Yk28vGik4*AiNY8e$voR~tPqbWzf4|XuiuvMR>O0Ee6QwMG4+|cbG>2`tN&_7G=xd#mlCCgsKwI;(J27 z5E2t`IWS(uQ3{4*t@fnKYLg`@-~o7+;tZO2)7fO}0Rl#6&#NDcLy7RLsu^!6#e{2rvD9@bDr>f>k+1)E(r&WHZLO$Qv0UU3&M2mZ@ax3OWH5wnW&EODdWQpua z;K>uT2FyGG;$oyWw7sW<0@2KY=-$CSk^^ia1{zM!kv(pzfEP4*zly+SQ)w)R;p+g( zv-pSQ8(-okOIA1%&l+U~_51TVE$p5K?ZwtOZ_{t-dIN#+DXD`L->noock;+D{uT1A zcp3KSZkv_&+@-C96)iMuW##n^sHaXTOQU3nRBIOsyM(&EXWnaArs_72Ic--Af@Fyn zoX&XT<6_8b!&u@%86iB|6GNHa;qSNv1~wH56#5TsOH=#kbhAwJJWF(T1G9tkP6R%B zFU`hc^WB#{F}6;&M$EbfBdRWF`pmZIXU+m#e_Y73dR#2#|Hz`-)1Z?1032s=;@n0g zDNrd|X{-hcOVRevN4`T1$eJ2YENdsstk$P2&D@n6?W0EAk2!eN3PN|gYQ51#vTk|V zLbD6RmEZVjbN?tMgb4)@U43<MCsaY?#Q!9|Mf*06C(r4}g@}Fa{aunwfY$2u+ZxO9qN*Ocjc{>b36c-P^v)ny z|MnGb&$}AVRhUbV{KrM^C^>S(I2aMA-Z}BX;`mVb63x{@c9}!O^ZN9Y z{k(7?Z!W=^wcQ+d)+xSt*!jR}ZBVQ0%^5}zP%|feWx8i0gD^WWVf`)(B9C18=3Xmt z*qLJ=fg?CL;zBr60^fN&)Q~h|S7>g?WnE;r!>4~qFmJQTbRUo|$k4N#H+-W`MHA6 z0++3NDy?M?qH*Z54UtW*JeO?1zq9?aI!&^{zz+6p{dnY!Y z!^nd!F>hqyZ>D~NEu&X^ZX4b){LKBCQXnGcxU%PK?@umM`^9-f@()L2-qa{?5LqJ` z$E;?N3;t{;IS(E33mtE;AktfjxO%NB^W%^=dBh6oP2f@By<;myoxcFBIQ{~5NNwpQ z(UPq66!VNZD>LHyY3AH_?p3$&flH{(B!cko#7RGhBz?50wp()_BUsyWP?@PU^;d7? zSZUf{(EuIIDYHCtN%Ieu=Ou0-dCn8XYR;y7cIM+_a2r%b0_$w^23r38Cw$c%iB<#^ z|C(5@bz;q((XU}-W{Es&_J^o;c_|dF5&4@IFgoZPIRjp{WxD9|-VCO9Yv&98Z)vQerf*=Ez(D}9 zQUQPzs;3+Vtt$hUYG&<{-2`B&UJ7DI>?|}{3)N26E5mK1*=skx7J6l#5IX+#^=`g4Hpw%dCQUKd8oe$QgDACp){101jIQDCf&D15*L+YE!tT}?O+~fXV zx_Lu3K$JQ*+nl$ZrmSLDpm1lYB7dcv9`KPB`nA>hmWj}ddDG>X*>loTLFz*g!)v1~ z$I@~*j4!;tVxruBNtg#cNf5Paw|gF0lEv43Rh>>6$_(evu$gJG3RTxdtdD{VYOJz41Lfz=S?lO$p^>fv$@d^FyA zC)YRg^7P}^VJ$^2sQ>)hJ;O9ev$y-^?fdN}!?U29(`Q zzZu7TCFrX=Ait&!T~NR7kmF|| zh5{Ef*NxqIuFBDl@SV}k*JMh78jYc$`wL|`VHxZxzd_lH_pgcUIqlb##`fLiBFFu^x^T2yh zJtvrV+T7tp0x=`YJ~LJ>HgdXb`mmW1MC&*M&J5=@tjc${k5S71;zg@9>F)F`_$;8T zE6=|3tfb!T6!G+oyA%{m==Lrh%k_9&6|3w0sDb`x^;cOClfKW(R(=~EKn8^ZU(ek! zDF7;3DdRxT*2=Mt46Fn-$cNvol0Vj@sy zvm6j~4T-AC{}|0%Fz<3q5r{TBwqHm>NN+XX@zN!9uAX($|Izext|O&m+f()ensP(= z=CBRISR6KwFDQ^R061_cY>F9_lI5S7tC(Vp9|dqeYLye?ILYlxQ9hMVVc)E}%iGPi zM-c=&L?DR6i*K$2Z)fZHQ{`N0MfeCWqV{_mEO`(pkUP6h-LqquCZ5)ft|eA%n!_

K~FDb!%08ANvoW17x1fl1U^qX#u)lnvFPGbh}ror#nQb=gadC?_85ck@b+t* zL>-y6-gKQwom@6cZ3ev@EO$%#0j61V;`gFQ+p(b)e&&Lw+s_Ji)-=E0dGYBL(XcZj zMNB@;j@k4>Vo>6C_QG0)rHoZQkaJDLlb}FCV`2rg?Uw1*L>xTP-iy)qJr+_ukvtkM8@$^iMh(swVOFtqy*G zEiMxT=bLRA0*D(0?vj^B!_1#UrsYgW)}L<2siq?-2e|$mpl+r7v3r4>{LPdQ3ELP3 zrw@<<7%ZSHJ3lbqbPlpXPF!A5t29bBpBpS#L@ZsPl+rEqMCB%;JK?j@lYeM40P&u^ zbN-5?oq;8l{#vaG)Ev=&^4X7|%;nGr%Z9-=*ZsvcM=I*|E$X2_9NLKgawrm7QRKlc zkQE%|z+RlfIMZ4IW1tW?NqBdDBA8i;EvCC==%{$z*x_~9p2A_owCyL!02XGonpK#I zM)tQs;gzZzJ;Gbq8Q$mHs^RZ;)T%Z=51G`0ek`P$&L)FAyIbBK<_5@!3JSh;<2fu^ z(c_B!GL8v;qdR`x=In2=RbA$^a0fO6;CsQD7UTd+)X2n2wMKddO%6z3$0hh;SyHK{ zgcrYMA1GO?40++HDK>-|G!q3acmJgYRW$v(p&X)%RpXf2def6kM~jrF!W?=5OQ*;y3x2n615r*dNI!iSK0JQx7*GoutOn zXlQHMuEhxw_AMpe81_|Fd0a+w_^X%Eoj|p<@mw7-={VKR-w+L_BkkX!cDz*D#0qzR zF_7nE$&EvPrAc^y`QBt+>WeI{Kjva84ua>g6{YRa`-rKuOX)p#lgczQ)7|+*{0Y=g zpt}9#v@k*4&5`Q23qZ7n%(grFwStr4gdM)=ZKDE!1B^-!!x^Jj3e<}123T)+Tpqcb zMWGM#jS>`_&~G#Cw%xWArY|@lKGEv>=Ii-YT}hD~l9_K68Rv@r?Ta`v(`zbX6e2pv z$4ZbajdH=q`cn0$U~ZQuZ)h-zN0sNl_8&Wr{{S7Op_Xk37v5DWJ*$i{{svZUIJ~=9 zO9=aJaIIGQBeM60fy?r$HO{y(2D$>1@j50LeK@+-i z93wu_Gt_8(;cA_7-gnbRWqXxWP%DVYC(U8+`%wrCdfq%AKa5hLP7YadYhKX#NSE-L zb_2`Ejc=@7weqthjMD8j3fg1YZ$67f*v!7kM)yEDyCivlrUJ!mEy;Q-oR!^&ugf&Z zop;_`9*wPVg?|o2t2)?C@k&4!x$ec{N(kR zPwsdF^0WuotMuT{%^!Lf9=Tgap&dgyW(KWeL z&G^0*ezMqI6oTay>o3@Bx$~)M<7JO-C9Oz*!^q^2V$QAVcJM7w;->U)#{JNEdPZZ` zpH!M+bnklTh>-lQJF$k+=BWp<$Ou-tw=@HA1`+C8So=z-&saOCJ(tMm^dsMFrU|rv z#|4YQ5v4E@MTe|ICuI%Ji47@!ERrr1{0wBfBI33uPu^|N@i7bGwH1X5wpB#WIg?UGYB9OTh9mkX zgl;+!tI0A@PkO1(fd58t@D7`uaD`jODP_tb;RZiXnGnZr0)SiNZx9BRzdP?vgKUl2 z(9%b{u5`mA4f!4l?mIi&t<0q(yIOzy=BqZ`ZApoZ%T?*!S7V8qW{o+tv&?`sS-54I zJoJN;`Q#Dmb)#^I%#0Xb9<;fje&Etv{aGv&N`h$y50^t@^zKlbXWyF#gwEBHk|G{Z zdy+-&JKv7XuCwmSg3~qXaR6B>q7t5gwXI>+YGUjw@7lus;-{FgSC3XAg2@dx1bIL2 z7na7qoOVyt@~+G~)P|PDa{4Uij%PmZML_~LQ2@Nlu27Rnw?DP%9i!+oZ7yp6<}~-S z9O4KjC$#`NkBzwr42SfJoh6o^lC1*MFHtY7LZ0qe?DqqhT+Xpk2DrKN6I&JZ?yn9iVsIGMCd!PsEVbT~;Br{Ij32tWq?0dE3^&M;?zzS%2p^gFBzTpXkq^!TW1**W!Uz81*97#q&t+B z?nY@5kd_ANURXk;8>B%%LK^92=~ASdrIxN`$)(}Fyx;ekxu2Q$8y^_hncY3E^Zdu} zplo|vY0&8Iplq*T)U4y6F&IC{?NDF(O5%RK<4HKNqEWoRAwwMA+9pFK(`{?a2lD5p zX2KuI!IVWxu{1=rtEg7Q-^z5r(#DMVISF)qCJmo|+6csZ>~oBYqL}Q*(cJ6AUU@`l zJgfh1=|IymL^+ODNCe@x6SF@NPlDQEAsY5}1na)P< znJ#x=)9*NGd3UOq+cXcnouM=Y*4TUk7=6S?{?eMz5!|C!#-dVA))!rDxCwmhF9!#v zU*xVbtPkyuiv6*N(S0AMQ{cdj_Ye~-(7dNLxd0$!D%?P)xG~95>XMiNchH^Zd(&}| znxS2>!2GVdE3tsTZ?*7b97;4F)=5O{1%n!lT(|6l{w4Ri+o^A0b$@a**Nxw~)WzlM zj)ai&CHU^2f%EQYN1tn2wh8kk@?@qQf)G0Lb>M~^iSr{asMxiOD&*NqoTB=!6;^Q= zI$8^u`q+1C=w4oyfsab}+r2-LXcwz}0DP3w`OI&alp{Y8^tq$GCpXn&;6<|(cB)up zdh1_jw-vF1YpS7M$s8l}2jUQ)lsVX)ru6MM>1VM{j&WQ>8nuxGVpyC4x zMYWalD_|AFqR2RKlqy+s1SjvZ9ZQ zT2aOLbpap3ZyU_Gj8X9OY5rxV>7FKJeu=6Dnybx8-`5bw-dlG0=--JBu+xx8i~cn- zy-$ZuO7=v0FB4A$v2IW9a3sD)onO2B*vNxgu94sSKhUN~fW3B_yTG7w!B|zI9q(xy zl7{is*Im5fng{#kRjwhKJ66elgGvVI5GH7kOFL7B3`*erMKS(kIOPM7vT`WXIa5Hh z7Dr@;Hu6wGtA~4^Br3GqLU&$-xK~02vH5zkS`F%Li!Ql>`j9A8R-D6=9y zzx#bKM5-!BjxUKhV(TPxMDV_D0~P|i;&E6i3DqWOxBe~xR-{b=;%Ql*>&bjUDBZkn ztk87xQL<3T3dABUvdT}ILNw-1V`MF zkyF1_Qxr5SlEn$&gv!VmzxKPEud&|OH@RzfkxD>!L}N?{TiX%CAq{)`-vz|7Ms*yhN^#U@BW9zn}l)S@n&4w)^*T-QU8n)|F} zSUQ2eLvoMOz36Jwu|nIfUV|!scPxu0vE(;HT}as{5BTUW-$p>+MTbirpu^5=GFhhDJ=Bz)7wc`}nLWo}2758S zvvlqCeq)Z#z7A1k(j||JsPJCV>Rzaag1rE|MPJKsfx6@k8hXSTABa<^G(vcwj4|$~ z>b}&Ec+%;e)QFB<90e(2hLE0t_>PgjLS=HPiuJ8;(#n_NGYYD*MZQN!atKj1q$jN!Jg0qYznNEK!GtXX{x1{mvp9Mm^dt0qek!t>V)=5@Tt(l_d)G&$Ry%%Wfag+rXVU zwnWOgh$h0b2mu%4&Zb*t<^$6ASLD+@3?D?7L@{6(^s5s^Fonqoulm(7@0yh3C|$8G z)v@(_&NsmQy518S1#B9(zzDo`d?Nc%}t?Oh3v+!K_cq$}W4;Vreu-uUgz!OP&^6 z$HT@^B(FX>>KHkg0L_xg1A6ul*qFp^ZEN|Ygl{~(H5eXC797(vVp;EgX6;DDfI#{h zR~oe}Wi*AtK4MNR3|C3AdZHRhwsNER<#3<*HNF7`9Np0 zzx!cEI(7@Fk9OzAc!+1xikuegUpyGb4SxA9QbjZ|4dxh&_3@-86l!vq8879|BuU}u z_bm)kCNT5Fk1mE4s$&s_?L3diXAFy-Q@g$1VJwp?PF3fpjGk!K8AgtZ_iY1@YZqLC zW4D_;Tyh>%R2syIZ2}PrcLSJ%pUCZ|3mZrI10Bqo_0gYR1DS!nrx?Oj<04n{jPYXmuHTa$ zeXc3AjUIKAj@cyo2+b#tiE_~(Fo{SJeeR5X^;+{zcBO3HEIv8VHk@$~THmc$F5ZF=t ziPdo0E`g?zh4Su`ih$MNxG(-IWOW<8vPI9ki8x+RU z!-JfF-#NGS;0bNwHNt5WG8fx?Gb>ZuiB_g5fFRLHF7xU2 z7mP;$5wHCG+edCplR>mgVn59;R7{4nH4*=U_DdtMT31_#J_h2W#yRNk-=eZr44RUk zt)6?O(2(Pv+&^D>)v{&}t|xyE|AEve{h_DkVCin_dF~?=G(;IZmj1sIxm{{c1j(vG z@I^qvF^xQaQ`;BBiOwBm?+>wO+J39a#C5BQaV12~b3c3>gdN?{YJArwxQM(6`+Pbz zRdw%ppR&a(ZgBgt+?x8?Q|?KV>~(w{k9E(mOl(^WSh52*7P2zEal}{) zIbz)Jx70lmxR zhswAvH*eN9Bs;R?K9GP{iOjq<2EIU+y_xkCo00SVaIm)9l33EM$hu_l@|ZxDf_V`M z7!flou-QZa3z=(n;^pZpq|QgAOMqK}I~*Q}iEICLu#0aL?h;>V-g)d!nxq?`h)G)Q z%kv*xQ~vUw@Y~Ku(D+}m_%w{sxRC7A#M|6t7u zuF5OVKX$K+6R1l~$we{Zr#UO&qAPX=7`$zia6D`tqsr=mH>GK(aOiq@m?s_**u{2|@}KmM!m)ysJ(A^zgcqkOLPr%#{oj88i5!IOtiV$NMmgW^!x8?bxx*!_0% z!gaJLnxm38dyb>r810x%d>t*~SKP24gQRW#1&-uRQhPX0R@ymn!^bb6SWa#7`T0>zbo*sKkauOdm=pp!M zQEFLpQ#l&h?=07sy(K>|N3G_QUHCqBu8*9nuu^izW{Y}EoXrQ^;*V@p5YQiwcUwGH ze9GWIjqq>K?3rLvD0}}i^Wl@iGRUVl{HZt*vsG<>f1)q064+YZv?S}IE#{5$EQ8lM zOUx92h?=1>wf;RU+NXaMqLn#fPBRapsD>OZ4th*(x*8`%9^kR&W1z zV-|)b7_#n3bo+wPFPJm-@@>#wLE*2g#Ebe>CKeac7n%FmEu%F46|tMyd^2L5Ni2US za$@Km?B|7DIMY=P$vc4T@kJ}{=<&P8u5p!5ah`O|0S^m$*)xz3*i`Z8+*dE7=nKk$ zmnwhXeS+5b+cdbIiBjLN(3`*4)+<>}ZjJ+<2euZ2_}Tr2zJkj}fNOPFqs?XpnquLs z7be-dZ}+C-lO4p5-#dG@#S>vRoLa?OngoBkHfI``c;H&{8%P)wlmY*1xlLjV^cP92 zQ@|I|i>P||@k{8t*6)R80W+Ll*vL@CXeEP8lKajG_RC!hKG#6rJTv-`T=pJ&@H9WV zBCb0%M#TZC#4W`HZ^`d=Kk7%d{gmL|E#Sb9;MKWimVhv@`wIA5#k#m zWO}CP;i@vAUhq9P9q393f1*bIf(NbS5Eh`JiH6uKw*}6$Gc& zY4zxe#zEI=fh_l;4!MuDS2X#5R7}{&_#^*c7Y}KaJj{7&eJ>G4TmnDT!3O^yhk!&i zoo32at9SN&H20BZ5VcglIAEb-|EH+WxwquXSwC;HZg8)9h|gt}-b04eqwb4UT;aFJ zOD>`x(+RsR{Jh%C``dK-vAy?n_%eCgAs{vZRC}rY+m4NMddWKmcAN>FOQ)cx9ZLQ5 z0}({QjQX4-X+AflWZ(U-tf8dO5TR{eaF0~89#R4yp9*e+`g93rCaG#WDOMUYY*yWP80Gn@>X`l+8 z>nZUrv_smz4t^j^6g)xud3C3^0m@xg35JC5g(Q&AJwZcM)Auf<2Bf9nHQ$Oi29S6M z9(pxPcoC#|qY00lsvOymafO2^+9L;)cEzuq?iQf0D69Eh-KegwdYw z)-T#ix%_RK-qL2iJu0EK_K^)_Y|&d-KL8GqYGUZ8&>XQvS+<}eJ?g~z!HONpfZwhN z)zKYggJ$p6_#hF%E49A@mUF>-E zQ2x#tmn2ikA~P6*Nt33+WFzlMBl0<2lpcRp_NNOxdYhjNZVv?YX;i~*Nscm(kPE1v zWg94cimiC|*630FF@Cko#h>eMnsAryIC9OW&OruA)mhVWI7%;}#+_pCF;?JfC7hKJ ztB&B8RIkY6cZ}P_>39633$|8vG|zynPinU+?H)rABIS!B&R}Pi?IoW_n)7K^{P8Y_qfI}wea{!cjm26Fa(x~(_SK0 z%K;>3ItSDmrx`~IXfGft;?O=9f(Y{U(LKqk*>6A%c&Na0V?`c=81T`NZ4a-d< z(!6+yrdTk4F)y17;p=I>C5fM3@NG42Q_{H)r`+2+Ih_zxM7v?RxV5d8WyO8ZMgSyk zmLOF{zijUp^L53UU?NngINvy!Kafj$o{GbHp&DpZWsv#7Vqf%_HNsm6L(ZDoXYOo* zi+%LCG$>^Y`^Kd`Fv*#0r+!7+Oz4^VK#A7co>>CAlF2?yi;8igeNxF^l4$Hjs7ETC zh)Nbp@Gxh}Js1tDBfX$PjcB7umh&k*AyFK@zAjZ&$Winc)Cw6~xWKk8=TrwSUK$cp zoI|f_7Eqj~r;7aY@c4Nq>l3;uiq*54@`?!p+_8}(0$J_IW&J&5`<><3 zPD2{XbkM{j^4e^6(D`h;#{6?r&HYQP>}4PmY`nx7(sZ zZ<5%4csMzuxzd|pps2V%bDX%n@V6^%^A4_D9KSBk%so}BA0L^WM8gkqbA9cY*_K=lIA`&wVebyQ>6p38D)k$ z0tpg&O$-=(4!4(Y##O+^d z3}ouFcPweucF*C&z1*+|zwib}^mi;YQVJ7aTWWUU!&eZ>jaw$HkiU%krlINX#J8Kw zlPW5=pB;YL)~)(Xwd#Oth>Uky3=>ya9TAI9^ZGGzE|_OzL9d+wrI7LbYpl=s+JnKN zrSnvyRqvToF3-BPg_1n=V4&D9$mhk2Yqf&Mns(tfUMznJRH;21>)-&Q`#OIyVHd2| zl5`VJQn@^{>?DyqRCA4RiD`X(mhpItNt%ppAwnaA`lTQ?+HMz7c z3m;v@cf9ki#UP?5m^*A*-^FfRvS%V@YCp08a=Eqw=RyVC5+B~ZmRnL<>Jey?qvp=b zT`rQg%=xJHC6^9bp{z(ek*)bLtTqGYhf;e+(| zB!9^$zfX!K=t{m-wGe`~TjJ#l=ospzBT?nx-Svw9yCY9VM}pbt0SL)`b&K2bnv>$V zKZzC%&@@?LPci~pJZ=cpGW(&YH2g&BpjdMPi4S_{me1-j(+3-Bfou%*&zF~^AgYeD zxTsbHl1P=6s~gOPa0azy?9ONYOWa4!Dr>;6)uO#JHrUIYY;{UPdk#O)DSI;9nkR3;MMQVo%K-F2ycp!;+=ij&+M z+vZDTm-p($>8NYpWj5nW@-?&<6H*C4r`q8=rqIN^qT5X3EaFwE4tX5nc@rDeU~%UjG{TEqTBpFr669#=KjgXFCJ?@IzEqnr#PC)_Gvp@%R{aA*0Wsfaghq*Q4E3H zoI+cFRW&)h=Z4(HXjf&}GQqeFn;fcamT5{@)zY^jCRcrLUzna8&4{>(3Q2IwR4nM$ z2L#Jko_-Va+z8)T?mCD-cfOGAvo6?hpBU4owBDScvt{8w9kjo!|O*VH!e`E3>xeT14w#{-YSygqnc)imuL0M zbdq04cpTe84hu1R4C`@SusELcTM*P9x8ffLk$!weYlb_2h&bm))IhQ*itIh;UyRgY*6P*om*;5|Y zMG2j#Q;QwEcdD93-lR7w^g5X}-{(aigI?)!c~%|$?LFvzq5o_AESTA2>}_Npp4gqDgT5dFkTIiiD z9rd(GBQLvAc#`ofey@#pOpp6bLLN+xPJscpsj127em?;o1%tCafev^ysCxn)7y>OkrQtes^!Zj^%)EJ=nVvH`Uj{78B z^M}lRk?3&6)Y|B)nPI#>i{{t;db}OsCpp};KO%M>g)f`Rjwij5EK|$>Qn}vBjxzFH zZFASvl)-7^7)Mp5!wSULxX;XXLZq>NH`?qy+q=?WGIxZU%m+#vDG@aCh$m|T@8eIs zX(uyK;f~ekEh0bT4g_eq4hoF@rZfxR?Fu_={5AsvkOO4 z98XX0Rve-f@Rgq*BA*B#U2u$=eRKYb{Ix9uOgcQ+yAOK{RSR-_AK{(T%aE#3#mS4N`Q7)t6k*hVY5s<~KSiX!<-)D{SW}C1@Rj?uRJwmB z20H%ok~ycW^Ba#vDenSJuVdA6-r{17oQ+PCI;wFWCoV0YZL%ymSs~Z*A~;NUGUF5@ zY&25<1toirpG7P$8YoxM?8kdKS?Ib3oVb$HnhDD~NGID(o|m3Akm>W&{^ky8B3xtf ztW~H4wE*W3DMzMwq!$STZg`DZ&()WC+OQBW>da>XID@|cdt@um$S@a!Q1pfwIHhj^ zxr`T{X`l-iR?yaR-42F;|Mv3+hXr%*#(xtBc@t?z#8oBpH{6%a?CNqguzbx9%B_=- zP|Gd(m^s>biHaV#fB8%lV>y^;$&)_hLzOpbo(2*TPpLx(fOQQl1E7^Znd|>-9nA!9 z--p0>j%O;p12y&efpzox7dPcks|eeemXya>#j+}dI~#3RRLJoevGN6t{7#r%XZt-- zofmSW^*k7(;qDZf2GBf7fuC!*G0uZ&P;hJ#RCeIO@bmG z>YNT60ctDp^%-zXK8O4j++JU@W24tj?1_D7!fSLgyFw+H{?L@%t~N)KD^+JDJ&sRt zMf7jN!o;)W-?QduEz~{>$!B|P1BGAp4Hn>>I!>JVofN^XlodpnaE1BuY=J`G7U+9* zlgRqi{L?2v4PPG@^A=E0zI@RdAq@*Z8+ZB~0~~0YMk>gDbH`ZmWYRChw8#8NoVNKh zzhl&VHdSE(G=OMSP_-CW9uOGV^j{N+IsVPg6II`%&>ZailomMI1M!EB zQ;ZLlbdkuqJ*T16|8ZXOzx(W)tZS^e@$rK$Q$g!9z94A@rC--htJSWt5x>bw(YLBJ zs{}_G5f_TjY1$g+moD7u`kWsQb015@OaUH=^NJsb&8U5Un;T2;>s>O`lfdgOR*g2# z{wVKgrBmr*cf~W{5>LQDeV$907XUoRK43&rMLwq`=#atS#T%VBcf{QAg5Q`tAaEB+ zpg4y>Bx^%H=ry26kW_j$&AS(?XFabEfb}SN*xDKTZSPo#+V*?ALA^CVt%-hlOMO&I z$lN!mn~~`v?`E7|AZFTuBC}3+*b5{YEjydQQNNdOF)4wim+^c3gYnTea2QZsWBT2m zR&943_48liOS}oA%Cf&NDDJHZDw`@zfSRScD(!Q#7ijr;-z4UrZLOu7-|ZAo@6a82 z0%Q(W2H@LHzPX?-t-1iL2ex=(Kuz?RS#PUfEo> zt(lNJ(S;hDzM)tg>kP)Y92O85SottsPo43C0#))sS%Kq6$9mMO53rv8X^9ks=f{G$ z%2fv~Q$JqHB%)$K^g23_N@kn#QVv#J>p;0zmeuw{=w#C#{HpB}_QLjlkcn8#_n|Dd zQ~9Y803@tO+vnu{pc~d{7=*X}sINjD9oztY9?z38jE<9k5 zfUEq#u+1sOcAz81VA<|x{%oaKi9NF@E|9qI^{D?=_!(FO%xg=Ccop$3X;#|+QdKBu zCY&>6lGBqn_)^TvauNrC*F~nkhsQVa+|?10e2Ak8N^3xm{Txd5(;TeY_-y+(J#Mau zzgc_YBYplT%gFu!7;^P1!RYI32xhNqqSPWgUHZy>ulh`ZO+`9+K?KG*B?BWlD#ORi zfC5xF0Yyq+3J4*;$imo{6POp?mR_b^+!2H3@T+zJ^u&|wN=p$0czTy~1b9HE)*0HN zvp72iv9sv_oqXGec;D>$7=CcXfnwn8Rar+W;ddtU+o%he&x5jK!vS@}P(*=#2)@`- zjQo4_Iwqq_>2PnVc#BaoI%zduCzy_Azyic(*36lTvs2Ulv1WZg5b+Fc1jwMi!OIr? zkH_2c5o=6_%yzgq7cXwg47So~JHq;(*$2qEn5-B?SN-O9a(uMb0X@eI3Qn`ZuQa`K z+YdFqlT&&T4=?RS4(1YRsE}b$2px(pt$bTPfTD9X@hri0dLX}6@6uFA%kU@}F1^es z*$U`!y~8PQVd2J$!4Ok0Fy{_MraZ$VP4%;WGhC-H)Z0P0ZqmtTV-gogZ@@DaVDl!W zTGkYvC8sPs@LlRN#mtA&I!DFlVYfso8C%B@_eeupHe5{JJ;)=ZR9BhRfC(!&`Qdi^ zaZ}Q*l&eEqw1^0fk~26dTbpY&7)Lur^S$H2HyBB*{SIvdz9^pG`##e@$R?D1ilyWk z&h|4OXTIyZ|Ao#Ba_Ko%77uH3zATwnLS26)G|?@G9>H_>dC=m@+9%Ms`4~s%r=|5w zlZA=nK@c2cz{|sLIR0Tf-ZJ2^%0{PB=(1HP|x&NlVigc<;48Oa>-LhqR%%oNwcY0K}TL~6ThS40_2&bzg9zq zH)RGF-xK*BEw|}G4d~t4cXGfwq*wO!f2!Oo&|I#7 zJ2|vP*XW*X^+v`x%5XTC0K2dUKIiOhXGNE163otw89(ph2c15D9Et>OdjuT14yv5g zY$!=U_JG5FxF#&mfB@WxGxjJAMq%(I;T&r`U|enFKX_pj=s;Nm?;6li9b?&?XR@Si zZ=}XEYc3^2BE8_E01C?dS=ff9%aishmNyG?O!5Y_$h{Yh9G5E zUE%uxG)nR(giI+oPePjUyD;s2uL3ri=gXvoZnf$(?}Q+(?GHy@lc>dNEJv`LzTN3pu_=T?ghuw0EddfV8ntX>Eb0Li7 z_@0Z5{w9fuc5jBSE~U^vKNe(uFA3EU-cwl&1Z;1%RSnuoDoqXpRpPk>RB>wtVEA#x z->L(h`4Jlb=CyRVYuU;p)NVwOD9 zkhuPOw&Es@zT!voc`f#UnxiF0;beif?e)s5A7ria`D*kqFxK2F57|9|mICWFL!Ze) zSH7V$zs!ZLE+GZf<&~~a)e~Cw;5F()MdQ_Z*W91>RVM-CpI)(1o#ni7yy-*hRJoeT zT}dnMzkPSK6yz?+yUINw`U})~N)igJi8nxp(E#W^gX#if4WOlldPCqY+XA_{Ir|KF zkhY`c%iJBunP(FrSwfOSiQnY@NuWj)Z^=Q4pp&1)?U`gY6pG9UH=mOTZMH-l2i^l8 zUJli4Q5iV2Nhq5a(LzaN%dpF46I)ple&4-oS_Ji2dbM$rTzezqS#d-+Px6pHifX3SQ9zfxtKC0fApuM#mVE55{X+>eMdffpq?dw#KI# zlEC%54&P*)-j6fThFK^7igP`0?`6`@z+9gr*9&+PG&ofO%py`%02DS4vpA;*#AB2} zwwZBMhRQJ$-j#jB!ta|CoIqES7n^6TuVoF8*KD6c!cmk@Zi#qjLN=ZCu%^>Vo_kpJ#Jlqn5es`?vH@#40+?kV z{?VdroD<-F6y08T`|nhSVYY?e7BiI7$*9(&=DY?3hV=i45pAFp7PN}!u^EjzXjpY3x$!KA$ zbdp)BF8UGHh*F~&{lML7jrvc$`h+lfbBcjM!v?q}R*o60orNCDzZogrEfb8P{5&*? zNDrvbwdkB#S7x7@_%kr8tZ+=!*^5{aQuyzTBC>tJ`QPOK{L<|BR<}Df78W&hL1rU4 z85y!vt|@fG$QaxB>C)6}IfyuRZY)w&b}?!4a-QnX}QidrkeXf5Uass83nj&jT#xNaw`KTL|P&1 zOj%{B|9i2`wZQ2ka4h+iNn=`s-e7lWDcuj2oX< z4HDoRA6%h}g-#wq*>s1_T_M{K+TxQK?F)*42&rjs3__$moUI=iT*ch|KSa{30vzxg z%i0dU1{GMxiu&^QgKQgBfR#2t{BGv!*blB5+f*1HCr_Ocs1 z`^6aywO*1dN(nCLj3L}8{cpqPmJ0`XjUxnuvS@Xj@ zP4#k*i?lby_db{NbEHeUbb09f@t0n4b zZ6=M0Zji!8B)om3`)jpYu~NV7-AvWo$F0@}bnnK@&QHJOUtAec0~Krr)AqtYD>O@` zHoEimJ`m7TUk&;~K9c|LliV-^_u}yIviO@nKl=W|hU&+TaUiA?{569zrb*L_fp^3m zyz4GX(_8a^q&A%SlQO)HYaqW8n7OSvQf5@}NDB`0z@f?@Gm=dGS#HpDq%VDJ3o z{UBi{Kro@}+_EL8^UJ{pg5#7$>CgyNOLVW#UK}s=|HC{1+riQB_MMdcG)E~05gW-y>(c#&@fb+F{V~2_m|t;_Fcwd~-RoR2=hVeU$8z@d zYf3`vrgmN1|{WVtrbs2p{ZS`(iI7ao9o^{-OQ$^9Y8J2(It2^m!aS|I$7-599; z_Vc6Wu9KVI#%&S;&Wr6@oCfC#vRTDIO{_{6{&bH&{=$}u@Z?JDLv*1T9;Ki*VIG5H z+U}tJtlw74%RgIbwj?b64yJvo@G zhOXtP??SLI=84|&5P+|Nv@jh|?!Y9C_A}ySFhqYdU-5;SE1sL}&-Yh!;o02tO z(f&@ZivO8ejR}D3RxbV)a{fe!C+>E;3|)L%Pjrq&pPi_Dfhu{lv0~H?gbxT_DBflq z%qbgHc`xe-Bye7~j#(0lte7qTo<*T^PLo&6*e0Q1X3tSLLNd+x{m^(gL&=~09~J=Z z#GgXWAP+xKvjS|~UNxO(_L71uw^cje0cf80W$3N>_@8cKRdS0^5`=#}5(MmeeX(bK z5K_AgTkN@axz38JO{*sAI+esO$i~1&6S#sDqnq-fWbw~i?DJ2`>~P-pB0um8Zkk)C zSV5UUfrqn+{rQ@L^qI&W7deKMdtio{091he(j4x6NQiB-)j^m#n>=_fIyyY&as{is z1ZIVvKMk-;K`n3b&8~*IA?yX$uXb7fv9}a$FB`LVRCfR@BV{NlXAU>|OjY*QL{4>o z#npHwo0-gAJ5^B9OXA2SHdUOuB9gD0FRE^%x3#FpDQwq6{0|7l?zh_E>=H^)_Gl%&iriZENsuGT1! z%wlrHJFGG!$DateKJ_k*dU5m~lN~uCWVocm77@&WMUUN?nS4X1LS__sJNw+6Q&GwnTfnpt*6V_$f9!6)Tl3KUo-`M~P& z%3`?GmF?l=HP+zJ25A`MDu_79>^e#2-cKUPVjRO{zw_F<+?fIlMb_W*N*IY!$G_dN zkR=wMS>=RF%Z5wHGD@B*>kabX4yaM~DT_;~&ou5{{{sT=7B*o((qA|b5I&XZx@A$J zH!`Wr^t=!cdBmjLW&SuYoJDljD~8+No<(rehuwQG`uZPpGM@wO=y#s1z!c8EJ!8|3(oby%Hbu+@kHtI-T5@sXA$q^jUH(A+)NlfnB4Wx6;{DaI&Us^ zC5)bp5eawJf0$y}okM|IcHfm_^)tQ%K=O{^f(d9=sj_=4+p&`n^% zP#vJJM}V|H()ug2z(B2He~41*tZ<0ZjS1PdSgGVX_5Rgn+kLqW+NB*A>sLM?ZnHkR zZg`yBINvn+2@$hwJlry)^m6|E{{CRLN>?Qi&7OaNpPQiEM{9snc=qPAe6MPCw6+y8uJgKBCPk0|?*HQM8q^EzS)@^^rUk zMon|sq?}|(E@_W^6B$C)MB78Eb$gLVU&i;nF8y`_5Scw_KNLF3wDc{NS4*bT^?Zr^ z^n|pTV9ofix)b*qxvhwg+fu+rUp=e9(tqlNDge}|J?!uddnysL3LfBip1V%v8U1#d zW;LzqCsC4buS+Me;vNZ=9b!=}n5t-@2jc zk<~)RwKx~l$^jL8rSR;Z+$H?j>#qQ&7$=Gp^uC>*ZC-&S@e2r#HMuh~>hCOz+Ag zZ=d9zO96S9QvAcqdg4%+jPUz0RX|U;YEHkpb{HI5WBw5|?*g>)`VE~(@lXc-keT>R z27ROIaRKoL0RH@y-B5LWu&D7kIr(xJE)8?f@PP;P6XE{Z1Xx|HKtQX8LB7HJVE@=v z(slBRi7eE@0yv_c$OMNw0$@G?f{JiKpxDedit(_km87Ffq}KanqR5!!Lf_EeoGs17v_yfo71OY3a(>zqB&?NY!`k>5v;Ln;Z?M>*q z_W0oKN~r>7zJ?Ia`UJ}ein-?0=>IU2>R6fI=)a<%f*q}0nO4O!Ip}?ok8Q*E`{;qV zNj3=_S*)5ieDv@CP#Ngze~u99J86s$1qA+HHqXEdX&aWs`4;;&;eu_cE7QPG0?s5L z*(=DP9yWxJMf-8Ya5lcKvvFQxS{k=5Hsr2o+VgoSfOOduY|Oa#!0CloyIoz~J8X^% z6X+PC;aMC-XkcK(4%A9vY7GKE_Mbf8L}so7#5aH8j=8>wF^%DCNsnDthE{w*(goV} zc~bF`{+&?qmpv}x8k+IEwdM&u0BNG(%RFWcMA(ehae{w4o?40nl@Hxw@@)jq#9MTs z^c#q1h9l$Jia&U(m(yzvi=(Fj+AzE^8b00~e7hBD7X!Rd%w9*ixLg}*0A~9o4ajow zh^wf5+ldid*x~EJBcQmc(eATOp6qeK1S1l7(v^AqHhj!N?&kG8u+!5llYg`I922b2 z{)l?c6=kdolw~*pI1sS}^$~&SqvYMEA6$)U;(x^-Wyf5~3#0u!_ymZnMEavhy?h9cbCYl9U$0O`yMlRL7G+`Y zv+1*86l2JR<9#UZ;~IHR_vN94CDxEFA~dmsNMro0kI}>6?FobnAYLCG5EIR%Mi+c5 zR2Ie;zI3eFe(K6_)&i1`(fFFS7>1WAT>d>RfC}_g3Rrhwf^O&)z3upumYpu5U$!df z6=47a6QqSyiz_*abQ6l@HH5<*vmBOfqalT~6|%{l$di)!{vt0L7q0GrGWE;YS5(k` zy2)}+wjsb@S{hwRqtdX!o56u$by$$9(jxeVB#VJ4%pQEXar~KTn6?oKbOJ^F$q4&^ zvw;-b4ayA{4;_~OcPuqv2)8MOB;t!oe}LsHIHj^Yl&`^-;D_6=R=f{+vI^ggzl^`! zNg)FAX29!(e-H3|?g4%m&mxCA8hrWgJe!J0W350b-eu96BT>VV_PQmFMtC2=i`K@qkc>z zF6lE3Dis#|onW024EyNIzAkC*Y48VemN;6eMG;b$H?y@0hOE9kf38H_WOMZ19YX$0 z>lY|;>P$v8M@s%U$&LSZ8LHhHVHktmv9=wMtLy-N-}`@=%>c`F)PCr@_x8W*0C@b# z&F8^*+Jf81H9vSG)TOFrjKVUgn|wFkiy)>OEDi8kFX-NY_^Abao85D=kj;UNcBb_? zEpsN|>;UeOWcDP|S$cye|7_Spa5pDNPr9cLXAyfNo#0Z{FL2qU$mqwHyZ&1#s>Bxk zAJVe93hG6m39|IHKflr9b$wGWMa>(>5@n zC`-QjHVag!ryuTbWh+u1>IjW1u11=6sh67&7?qqc2$Qz>_|=<&!5C$J)UtadeI@r; z_N^iWhn*dl+YqU)^USL=9nBwqh5iI@UHy4jTV&z4e==;1A))c5?atseTzttN~ z#bY`&2Jw$wwlU{W=RuutbltH(U4x{UoKFFSL&c& zR}L%zNp0+J_jy;qBYHnGXPT_F=nCnaU+IDg_*H_~6fyAjrEUL0oj9pKqygIEI!;w7 z_PWnYEQYNSQ)kV8-IQ2+Z=F`9@cXa47$NiaMonJ;m zWnv1#;TtxrKu&_&>Uh&>lqAI2sKGassx!7sbq~7rNd0~ePyK~)YlC-8{b-N8df}$0 zWwagR-gxaZsC5&dw7ed?$C7np@*8(tXw}b^yIKoAVKMEv6D0_2?;%*1T&%Pw7|g=m z7X~&9EVAc>*4lI$pmRTWJL)ZB;5HK;O;i%~bZ>9&vTb!iYU(8kD} zQy+e{6ry`T38U>dBsijlO;+enSr&pET68&~BH3a`gzdmp^8K@|oVZOZw0eax>oy+J zwTv%AcsxDZsrIB{YE0Es$Go^h-6%dyGPsPJfUq5P zPaS*_Zq-at$9$k&`+wMb&!{N3Wo=ZlNKym=iAs_zNpcdAEEy!LB*_Rgseu+zl$>*v zoROSCL?m~Uo75l@8X9OaP3FF=bJp5xkF)Ri&i4Me;~QiD>Cr#B-%zuvW>w9q=SfT^ zA%_*zJUqMHKoGX5Ez^G8F|cB{EU5X+ohK80c)$1V=f^!M+D2upHCcF8e5d4AgU|A` zYhIhzOvk>y2d+D5he<%+tjr3!*{nc8Na%4o?4yL+AqET`NZ!jp`k8qy$J0duh}sG> z)*|x;BcTDhMn;Le?YKd(l*Z@FL-+#jjTL@F;gu%MPH@VXjR(#Vz)08|BuG_y(aoBN zzrT@|GdWuMBGYm#aX=HQQe%?=)+U&!Am(h`4;jiaW)y^$4C@aKw8&kyd1cq;Q-i?!3^jpTD-JoqaGJQ_26cqpojmXkt(RAkv^(@ zeKkvTjYCbKT_tE?K!BwmIgOhR6fU)#8gq#H`IEmnvpdc%r=S2Zs-E9V`%#q7A0@J@Wi##gxA6F7+;Iv$%&O zzJ#AL+z^K^d9RAoA8pdHFy{#ky&-j#R%u*1oht6H&;QsrwCU?)J;-I1Xd_H6igMs) z8fDFL1$cerK+6-ZMdu)E>qKuWh?SSsNp2SMD27sxJXMUq`@&5nL1*LXy@{HG6Ys4S zcwlU{;ppS@aJXl@B}sx!5OhnaG8Xy9wao}wT+1kPc9agFMzkGFT+mjWR~k_NQt*KQ z<%TTmBi!ulCCNUY^Ku4yc0n3J0>{6ntN|&SmYhY*ngJ zCq|}J58wu9&kVM_wDj9)Sn2cpEU3CgCbJ5g{`C`<(tPiugK--lnSH2Yp%57O(i zzcqTi*|ijYALs__y%iNi3}3@1+x0zEN0bCh9R6I|P&jd5jauDw1uwg{8h!?QTt4eI z%X_4hQ<5U=RAZ4;dMq=-uY*R`PYAW2#d}o)@K-G9tDy8l6Euf}N5jlndHlRewR;8rXOzx})Wfq7u5zt1clZl^CexMn1!qCbvL{oFQpJwX+os?dZ zvkPxptXfypFdUoZXH$V$rm)h7?xf+^ci0tWw$S#|!%ZCo*n{MTPnhS6BPw(MVw@?i znQ+>)nR+wTL`eW4w`vamJf3MN(cN&C(ixh()PlNP%4s5s5QYw~)LJr>8|HX~>_Bgg z_{Ge!^bS?&-KO7en#qv#o0hzt#EW$(BGNX@uyybl`|&A;o?4gYXNHCV%~+{cf0zeB zP`^@d&n(bpgL%a<$2Xq*{AsxVy{;p#u%1GgR!zWyw!Y1Wpcc#x;=~;^dcZObe6+Er z0TLN^Zc2mo;;%pbyaj?tTXFqPi1G+o;y zB4qF7xqKrX;TayY5ud1gl1WM-rU(Z+Ydf!H6wfN#vG8|ay5A>N9{FMn?<|7{(=IX0 z(>U<1gRF_`>N@8Kc%q0iReRGm2Y%76-P*a46>IZ}-rWy0*TPL z2B5nzlVMc&d24S)<733(i@tY$mKV4~6BFPh?&%eRGga`WTr*?xr(Ra6p~%XeX%C^r z`y|APApKIW%C%eY zgjEavk+Gaq48iqc#NL{)JXP94WdMYLW~ipGdx|`aLIOlZa~3kE9?*4P9xNdpsj4Cg zSid1wabXOo6qJ-t5DC4>KsZ()gWOMi>blWAfa5`c6ZHCLAQo?9V zL{Hc|g|3vT3-H-P9&gScz~N(r;l+m4-WQai0D!}LUu*jH+8#HHuqRCT0e#}ttz&qi z1i@4@z+uLT-sMc)rh!{}!X&5nglYEoWVQ}#-t;7aFa=6kFd0gUS$p+RmWOBAp`1ln8YGWGeeNS_3}z$!RRPtD8Bx?QY@kw*OrH7s#y*5DaT& zQ1QJDirb!>)3$eM-`fvQlp4B?|2llcfn8^HDsSfX6oPMzDur1MG>wu|^KwOnETa(& zs+ASwFGMx!(TTJ*a3rtb%&xeTfSeQWQgg@pUK&+$&; z^7stMo)y52QP<@-m+d)tGn5Il5T`vz(QQ7AV;pgfH5!UPHszF;84oAdU%O_+r3z~3 zW3a8{L>%;-%m!}sBh7uY=fGR^nA}fu-Q?8LcKZ)c6%~OPq>hX5u~ex${#lRES_BHGkOuDCp+WmZ+Hq|`VUyjcCxT?av@vkW4-Bb)tI%S2L;pUvTEQ$2`frA@!J5i8xC)YO!z zS7pTUAC^U)c7;&k34e|5Gb{2+m{ZDiRlvvKP$3(m0PO+zqvBDB>P*wQfWQ;3UVW|e zDz3o>&CBz`YX7j*hd<8pV>6^IS1&D{_hiM#1bW5&cfTjE1U>CTQd0Ah@!GTqz$hRE z(5lh=O~+mpxyHYo>O2vP(Tj#HvQI=`X>^wctm!#l%mSMBXzc6&g(wA+a67MnQjebk+D^3>wm-rntP>P zH-P8y27^YV-v6ri?wDwi45S-M%dj`Ok@>$-@ax*lu;SCtLXpAH7 z9?@~E2{d@oP%4jl;@q(CbTqz6fyek`D0fk1=#o6CGCqUDi9<6aDB){T8}Fq?WM6*o`JA!m9Uf;BXu}xk z8TC}R+8RtCr1CiJM|dgj9{sdF(UxR_SpF!e-=nJ>VHxVsZuj4ZzNx0=V#cR zQOM9~7m(V&0C;HROhL=FEA?%s^-!SEaXi(T2LS1viCu7gn>C2zKr=aa8LQhMR_4nIiIWn)BAwKvA5=xPv^ z@3eG1;*eLM3cq@w*o|BP1>xX&u^asmz|siNy32Mb4Zg zHQ{rZwY(QMCn%*F=INVQ>x0+Yw~vdMDeBA!jdosFdh50Gt7}k^&ud2zg!fXPwWGHPfm>iA{~~NQN);oZ{f0er4M2dL#G3w`puE5H`CUd{$NRKy z@ZnimU`M@zA0kHIO#yVrTFjaNMAchGdYqaOcqSUR6+$oJfeYeWo6Fa|pd1ZY0($r+ z;uj^xC(7F2mzIL~QeSW3AT9a#UdS`#p1g@dUmDdlj8d3F`T3*wzTRD`7WbS}+dI62 z2$hZ4m?~3Vsi+o-B#brF-;03J3hBhOubm!U0~L|_M!g)W)>!S^!n{-2*dRnM3NscK zWcW+I7I*&3`{2eXzWuuWT{=M_ghH>(7>_(aDbi%#@3JfgoKo%X(r;*!?&xumX#S|g zyv~Wx_-4g%9vc0Ph;#MY`_X|a@;I;n*ps-qljfmTlT*UR*aQ=1mfVD$(EL74u9LyU z%L4|Ee&5gQj|^7TCjA+gGFpcR)z#?jM2VEus^kck`Te7 zEvT4c%tl&qh2Huo9YtC+Ya+hPuE|Rbx}ABNgacfpowvW2>2OltpSj|8bNF3(@+;xE z&AzP%74JpM)syN0dO?0eISm=93iLTBQ%u7moHlBnW|v#>u8QMSm6&OHKvny$6f>ds z(y>V)FbWYu$(4VDB5KaRDrr7?mz(dF1<6v9vd4s=yEX^m^~9|k#~1sHKQZe)y+_1d zK4!}U$^DXyGKIHkp58XTH^T+YVTcus?(I>n3mddCE_sIfuG~k+D9^I>(+!chEO|zl zmv2^PNN^`U$KGwuH>yRUvP0tC{Zv;Ouu5B{y#FTT;oa(m=Ajk0Va z+t`?+z@)v{?Uqx%2=zI4(+-B~DTz0HyrWF&p9NARTUvUree>Cl3P9_zo}c#fHk(LI zoGR-;)0-;4zYpdss%Se=Gx(XA&D)q8VmFf?TRz11A|C_JuDAV?u>XMK=AY%9?V@V| z3&D_|v?6vXcCwB}symC3^Ccg|t8?Z*0^mSum=JyDPaQW}62lb07&Jd7m3)?o49rUV z1(!y7%QNQTcd=66t<5ZgW~i;dFOQs3_3q1!*(gBipm zRVyuIa{}c<5BXi7iMMFb_z?=XeEsp^oY_@@{1~F=`1D<8e6iX24Mg=@BATcEj2J#P$aY3EQ3qZFwpvGpas17{jv{4SC6LAcVv)QE*+@HSFt5Z?9o1 z-hj3L5id=T))~*;#a}xLl9#&!toN;QEM?5cy=#{n@WbPn~ zbZ>|Gte3QFNbi(3J;4m!aynXF97nBT`E?F{6Z-G68L-4^yhHiwV7&%SruD+=2SC6+ zj?mN?HY`$2o*#kQPOJlW(VzF-i?_>ae%bo--R{Ke(+W!gIuZXG(u~0ZBgF88pw39~ zIM=u$V_99|f`SUx8f%eRYY@t$+fTEv@fBmWmd$sw7B4Kr$%xy1@|`KdcI#?c-WHGJ zH&VFf%$yjuf7HBVFeNnmdDSy1(RQNLsb@K726G3WQ;K zYs+YW51QqHWT}y>`jT6+Nq6kXm4yBf6YwF6!bT4)Fjg2k3Ro7jS?)sb1rOQ*WV5uZ z1U|!ViTm3NolR7+^LkDQ9F?}wy=L|j2Lf7lk5LzvCn(vNtdzQcYmM!SFhU&1L|yQ* z<+yt6Jt3b$jYHL796-T&Fcc^SGO%QCQt?dK3FVW~18OBBWGLqKZ{v;J6}SG|+OCwJ z=>j`XCB5th6>oNx4U8c%o>B5B5}LdJak|RN&151`jVn6rCncvzZnc}KUDLzJTnAV4 zlPP3o;TFha{nFjTKBxz8y1reNvIi1`#0DT~v=`aKcG7+*K>$_WhwtRq{3)p`nPtUE zh@KsH;Jg^d*0yo$qBH>GL%>)-JKq!dRH+uw7gkET_S1z1HQ`v;x1P~!nIsHu0!T|^ zy$2yQ%z#S%XsFivZ%tCVkA>YCyZYj!=0QQ4YcYDa=$+H%>Bs4|^ZF!;+>$M9Jm^ER zjmvY)e9}ZTH<78QsXAzx5?<&^rLXC#1DKOc(KusU!JMC`LKQ`{>)+S(8a6-C(m=AR z=cS3!6%zv`=W|U0B$aS-@p6Xl#d+Pi0YHb$2HO4^kW{~b!GlRu)D#ym_B7E-(TTe& z@knL#_HR9nlX?CYX7z<&>mS=6AZ>3vyRpJN+HwQEkTS7gRAlB7UW zW4UBs9Q7Vp+yA$qquyh~&CPX83vpzvlB7=cMk$@i@5J7}_cx{ehx-1{Z%49B017fd zYTnXx?=n;T=GhlAl{85OmWPtw5v;8LD1iS>hyLNK{`I%a8#k%Zqc|Fse^~AF&>)lS`rAhGzrK0E(k-U{56g`!@If}= z7c1<7PiKQeBKUu**gxCa-~Gcsbo3_&@B-{a%)g^D*8&EvoW+Wm{szZij{4tujQ`yH zKVtv4tnUAEg^OafpV+v>%cr|s6r=q;lf%L#*JDzRplehENM;s-eLhX{{f^I+1&DwW zOd5BeQr>15A6Td#)cviiTniArZrPj8Zf;pQ7d=S%(exGNzjibKa5(=j--dzf#EL*% zdOWwAUr%(k{N`n7n4(yn=djr*(!92bhiqbh>oT%*xK&G1=V8|^zA>lqkVQ+#vP|=}M8`dYfz5T7Ya5+oPb==+-BnhUWwY z@BCXv_t%e^As@bjJgy{Bi=0|H1L)>rty_3liScvTdlIX#e~&0mlrX!P{mq=?^= z0aKJ2_AR`i0{9O@zTZ;Ix3bfC?k~Kwb^{6!(Zc_h1poRm4I8i!LBrKAe@_f%5h0J%IjxEkc6m-LdQ zmv$GuA4MnTTG-;fS6pgXjoQPEeM)%A_RqQ!P(yfjy2TFy&bfhxKC=Uu%w?{l)f=Of zlkgs*K*k>LdSTt^!>#*pb&g>mnO`qZE9Gr*EOOIj*9!unSN zYr;ul7%xd?c$hTy#jO7s`CAk$}%^?>>q!(@v{X}B=ri@LV-0LjW zviD8uW2O?T{z}PvS!Dfu1n36`bP|+M<^oMZzK;Y>t{-`xz5!4_&9mP2*7)wP-U6)- z-D52iqQ2Sl0ITleB35@<@tzJsiY3=KO@WyVY|^e*j3>E^hxkS;@439t_EJ zr^MWk8o&slR-x=_UFg|1U}WVbp6( zOmf$|LMY)Fpx?00ac1NtQFKKNu*PMU&(N(od#{{uZ*C`EgNt0!IhTLD5Sxgj2(Y}P zl)J7kAffZV9inOv|-MPd<&^(#C+ZUrVv6s*b2rR8N|o1 zr^(K>6r}o{EMm54kWk4zZozrrbcDr(X7b*j7yBc3{z`@&>}vk>{OP_8EoN>wwGtQm z`jO(>^;AfNFkJqU&R!)%R)C1555$!m&O!U9qm5F-9xGrU9Uh>w3am!Ifpy?pB9FY7_(K0(d%VyrPdr?E*X9I?eA(Vi+zKo z&yOY^G@MB{3s!XgL4KX-CnHw5Gay|PvU9gm4i4;I?x+XwGRsH#4u85{Sph=L%nUU} zLAN;8k-r%dLONjKLh7)M(>s?6hqK8SWTg50?~+uK1)iHPr;7I^HW0kEI`|wL3-^rq z>%kzPZtI@`X|gIXCF7AVH^mS9h$8_^=#L0Ui46-i8oTz|I-=@{(PhkV-a>Wsu;SPD z8TN;>?cH+*tdaY#WEB!Co)O&${Zmh`b_>D>Q4cUyx)TZzSC=jNRvJ~Nw{Ic14QPeW%)8eIQGvdP4-EmvFmwykRnXrK8ZVT&cTGb#0 zbXCNH5YW}gKXldpZ@L->=ql~M&{eTMa@%v&r-?zZr%xHNE}b&Jx?D1V&Eq9ULa``K z+r)VuaQ~@SS5Al}OE!BpQOl6(ytkv(bKja97>>KR^@s5e{>^w@0pqp!pT;YwXn4O2 z(z`mcJ=L?y{BwTuHZ^+QVyh%Z(T6`PyWgMy?}NghdIc;&04{}6+^GQPdbW&M114fF zj1_RUGJiO?(!V)dt#adD2etp{Y*!wr<{t3TnBJA9rSZc(YxcwLJO^+1>Fp}<;p@5| zqiV9g{Ha%0s~7e54`Z`3-DfBGkFf!g`H!*vUutZ!_a_PfzWK+L+|M6y@6EM>$5sYY zzd<{{f2a6#wIN_K{vBgyePv~e%}}0Y`fE-C;B~uFjHqkydZvh6|IUMlMPz&*; zdD2JQQnjy#eGh=CXdh^Q&m*-Jpv|E?0*hc6x!-dDv}Mr2AU`Dh%*(2 zkj6cDtB%_ zuT%GHKBN`49{3WwOB2%VSrez|&%VDYAxp`jJx1ZYGGOp6qtmomy7catvx8PrJNbZ% zTMq4}Yb1lP+%hnEpZdu0Q$J?9s30RB?J)XGub2g%qrD=?F1JeSE_+gt>7Q#onDQB7 zK}R@UVCi*x>$K{*I(b|YU6=(J(rO620Fie6mjHqAC6qkZJyfS%V)&>0IjKh-U0o)+~S-8)bh8Q1ZwY0;s4 z;1X0$)N75p>W?eRZxv96f8;(iVH=6A+=v7tTs2O5I$L#FP0g7*r=8oiaR{UqGI&<~ zrmCn4gGijTD7o~tT^S49)pr2Ns>MHJj<+ye z@=9q_G!_vRFS$F5lhfoAcGV_<*Rx-3PD;q6dm6rO+nvz39J^AEJJ63(3|4>5dVI94 zeihQF|B+e99&MU~#%oa{oa1`Y-#&~#GbMNg>7YArx$NF1vWh86mVS6PlIl}f-9P`j z!2s%`bbj1#ko-OaA(@N~#FD>EhsR3{%=H#ejBZity|Y6@vZ^m!k2$iBla*~}Xyb6r zE-p?ZqU2IYP$JrZg1u@05Sl5}Q@S?cx_g3i z>zfvWQs1q3?++`E-Az{?x9`c2`j{fQrk8H|B?@jSh`t)ja6&W}12Fs_(%54NL#fJ! z0zOR*2FBr&FrStOTej|`6j*eFvB{X2pD&P}i?uCAu-zI+zNk*~UP-na>j_>tWgVMX z`aHhG@!6QVdtLH*6+p_&^k~dqv>W5y^6_LfXB?_^t34rz7+@5Aw5O3QZl<0t&i~8G zBDqq1{qV%AzLtuF>b)H-?g!PbxBtu4*TCrAhba4)Am;R~Hl6chWad3&Zy_PQ_-vU| zbFEd+AR|yVkOFubqTroTL{ItU?ka`RNGU55ooWW+63FZiN!j+D#nV8$`}x|nMI^y} zLZUaUcF=v4sI`LA{G{0yZ!Uz_gOeYUJm!I+huoui0^^w*QycR-%V({~XIsm{DQFlkI2Qd&jU zcWdrkTP-%|)#h~lm+tn;Gr!wQHzT)%2{Wh7BX}Lb!_1HL4uiYBz!*zb2!Y&vQ ziSaWm#b+<6La%_SN=B=eH9uZX<=lz5;pBUBy)Ci1C7cuD{d4+a;hVqR42-=%TOtSD zY5{WP!=S>B*;E_AXE-Njew&{2O^SQcVY1otCGV#|Y>l|N9pAgC^aL#C%?~nqvjOJZ z`dc?x)(^^daQZ=kN26oIotK5j%%~6J4W<6(AnPgn0m%|`h??X4_RGm*!S(N9Z44?n zJ#si7afPqNqBPmf<((yiala;Cf>s`h2w$70M^Qm*Vgvc+d?-A{ag+Kj?n6 zeQ!KZm7vyk?}ui!S43nUde}|D$C4YwLnbxZDI`l)`)o}tPrh41jd@jbFZU6^e4tu# ztFs#)0d7n@-l*2lo%e73HV6Oi%FwmmlJK6{;;9Cyu|b3H@vDQgp8@D>D-;F@75c_L zZAku^JpE)2*e}&cJPiP-T^06cO|VNu9CzD9>`TER4)8j+kerx_uiKK|yM=Y09@M{0T$7ZOA#1$S+{bLJQ#@ zBTw_l!Ndk#_HB?4=HrI!b3}iXaJQp$ZU0u*DOEO3zd06Oi%J^ii=@q)gIv;MxYu^2 zfh^j6%8P>QV9!P#h3n)vp`BhpJEcli7BjH-HQ=0$WAUAW+xYIXYIaoERw?=E_J&}m zu5CIoOl%-U%2YjByhDt0v<E3$6><32D7kngf+Y&WX+@q_?}&tb zO?566(CNgBBd2D-e;CZ3?pKGt^ZGmP6}_{LiuX~ijtw#q!m5%+L&42ZJks{+L+C&! z+xlr|FD2(g6NX1>8A9?ljGRs|-pq9%h%>dB<)Gf*mKEp|vNMrTm%?N5`J!;?k26kb zfXg5r(phQI?LdVgI-XN~Uy9uA)UBdb(dVXFnmqz@ws3R+wJb^VFw0ptx$ZuK&-RnB$A*yJzL&L?;WLJt{m7h8 zPM;PBN}IH71R_{&n-)5B_O4KB{0|Q$!+rD=q(nkq&QJuB-CN@%QF({ zNdud;%dUOm`VqaJimHbyqOEsq$=&p*sr5)|=0dQt1ui;ZZOZ4<(i7pq?=xC&!_6<9=1REck` zkBm?Ymz_ov_p|qTnzvLqdrH&vWHGmg7VPs-9cqrR9TjsLp&-^7@zo4kwC!fp9vAy* zNg1$f0=qtwN-|4rB|226p1D>J;B~;H4B!@sPr|vU(&`;g$J=b`CaV3?9{XEFSMGOjbg4b<*$S#|?@?^YW>k5D;J9pnwHZY@kU3jk~ zwn?SPKB}tEFBTNZE_DaAIw_D3mb9JZ3YK80{2=^kJ_FGVf~jj~MvvbuYa4CdM?fHb z*GQ7@P3%2jK-SKJ)l!q(8-_iS1x`bOs-TsJ z;vwUonbCHrY|A?Bcl8)7l_B@88&ry2q7L8pR5Ff52|P8AY%(jf^f(O!t8-Z&w%CoM zifu5`au_M-X5TuX|0Tl~W|~}cq+>D?1@oR;^5TU}ecOBn-V87%%gs#U-^ghMw9*E)+7;|d1G?PT~G znCN|Zdqbi2m#%dLkO{|+=bl=&`#;L<0PqV3H&5{RUI#CrFi)K6*&FBVU7I4-OK*P~ zFL84VajbEipZzYIn-oQ;2=tb{8G^Pi-2s7O1FZXgI42aw3o`B}g=!@$GmV7GwQw~7 z1E_-5YFfA1+UUNsQPzBqoQeL|c>-}q$DCc{Qv@9G9a)0sfu_sv1R~)Iv?3nG z;-?#KgGMZ`wcbfC-wP5J4dh<#unG(k!NvOuYDD^`86ip7e{;I zV9h+0_(AI>#Zy6;bdi3k=+5HBH=_QM&+^Z3y+7RGt~(U0Z4YK}*cnMuMz!pzq`vCG z$yI7zQ7@Za6Z0GSZZIfMKlKBcW6zteYeq;?SJzF|u6Gv8o&Q)VOOvQ|7T>uxn8I#` z><~MlV<>@H(EvT9$_PIV3H#e`F2MpgZxka_ZpPKB-G~LIxYSc;^U+P~(u*qxQxxk9 zo#u6nD?CF89DuM(2o0?))fB+jNEhfgbDyeG0I)ga4#D0^Jek7Y)Et49F0orJUSyd0 zdG+xMss{d;k830Sy&5vNQYf>Nd~;4VORwA+zvS%Ck86-QXJ)B_;9%32yQ3O1 zNeoPB@Fa;DcU>U4tR$1`Otr`Ji=4cz?fqsk9OPa>8F1$!29dSH1JX9U@8!ypX}?|{ zXM#kHweou5MR=0uo@GN#97&iJTeAY$ZH%1{ghd+UCXAhVM&Yts12cz@`CF9MJ!{;f z=Spm3PLFzdH|ARVNq4VPQKJX$)A72wv;Y%h{GmcZQF8A{mjr9yEC&C`(JR)~1Mr;F zOcRh95Tn+er@KP0BQSo+0bvX6`-P=@(IWc9XnCwlJMvx_;)3m&$U zR+SYhX>V5G7}|fL*T=8FG+<%vn3yRCm8}>~SJ4+IHr%6zW_CqFEf~IjAR?|X+G`khw5X-H55cihu(ukrFnlMM7g>E_$ndc%AHb<_z9lW~w{z{2jf zZ^xO)=6wIiUgj9mFi?$A>{XgLW_E9|&>VuWyW3y{3>!vD&Z>@=)AzjR!+eu>caDSp ze0P;7*%)Qoi8P$A0X;DS6?OqhXe!Yo{Rr(Wxj^( ze@iLyWWNIzIcC_PF^ha^C-!PADogGANT@^iA!af(^6q)|bhT$a=_=_g5t&b2yHTt7 ziJ6GAt zH_Z}hzzN}&lNGd6b933zQiDEnFB-G~MC!^?0aOK}3@uKEkD00aFst0sE=^R>b)W`y z`(Qx5Ok~ck#D*D`SY+dSRAFz!cn=TOdKwcV7v2fwz`4*NHKrF>8h# z8D({Zs%eu>up;a;?fi0eamDQ6eETVL5^Z%(bt9ZcC|K94=7ef;%?^?eJk>r9yu z9{5@+a7oV^J93JB$I5}!<}u6TI81Con9gXvdKFvITE&3g2O=u|tvss~U$*7$hJ!DK zo?9Q?6J_i-j+Z}JegYR%#o_ryBJU_@hnQEOhOcBkZH=GG9jp~_KniB3OB(^0%1}c|o`8;EUm&G=i;LgG zgmj6`KNzsbKz5}dT3xb-@2JdupYa$;&P%BlZ`r_%UK4OP<5cRHK8M{5&N(nv5lF9n z?XB7~F5ZrL9Iq(Q0QhUHYSLcb$*hIoF#LN4bY1Vgd=-IVMGaF3g2cMSbLy~(x(>6`#ur+9wx(r>tmVTE0vDy7y@CGxTtDxvLqa!sxwQ3!+>w8H zOE2b=I3n~H@h4^rujz)^gOjH(y!!U;10JPpKD$cMl?S|%%s!s|!8zG%_}h5|Xkxyn zHS}!7lgtwVs<4}88KPCr#cWriBi&{UOhMtOQ?H*QO=IH#;g`1GPL~a8IWa$J>M^2S zqWXjCoeljmXHfZs|M^MFGTV{ZSZmE*+r|f~jN#Vc+yPhf2A7wniK4Jl!CAlTR;}~< z2~CQ~T`0VAtX4^?K>8>gE}RS4ttQ26|mAk~`aAe}WRUA%sxb-*zcUVPhHDw*x! zS=?wK^nwARc)<|U z!m4FS(@jL9f}=^Z?&jQ%+fa+x-W_eEo?R}qxgmq|c}q(&8~YN{nMAk(a^a95?)&g4 z;H+rMwOE^KOxK2jufP?_m4CQ(Z0b0F6g?LiFSqW|9_>ge)tthqT^!Pv!dySShh(cg z{wOk@F%NIHA@MO^Emc94R>4ao@t|X;Rf?Ol7lG5;2kWTG=uWvkL+3s{=d$nU$!N$J z8KXw^(-}x;hK?SkxZus%I&`QvYUrYUpr!>6rj^%+_l~X3_cCF+9h}MuS|CAf?ADIS zoO%{TQpvomkT?4zzortd7x2nzyvPSE@d#4xZ+sbG$ou*!oVGd*I%Jmhn#Qkkp!{O? zF#W`_)2{Y3lB8U0^zcy*=OQzJi7*1%JoyBDJkvwrzL4#0H~FQ*t%#d!|OIQdWo@mu*!;0 zQ)ErHNF}~F=`Bs~^nJ=KzW- zYgdB;@Zf73{IpC6sU=T5qqgZ*d%xL0^6*oE)KBXqB4ZQFjnmNQT>a%2KU+UlajS@= zq`MEz;8DdH9Pc4Tu)2-wowT-q62H+GrXg+ET*KlAOQY7TjSu2I?(NW{$#MSW@2W+w z{ZERJ`;`)UWk#*q^|-{6KSlW!f5xYg>6Mr{3GgEaO9l_odhM%z)PmMYF#{gJY^xmP z)G>gm6R+vuvjJ1JCA9fqLiTV9m{Z*$E{s$AEEQ3oZ&H7uNPl$Z5Q2#MNJ1W_m+v5^ z_eADlG+KEXU^k3cmJ6umxD)33B(po?OLdK;j8~R-!$NIHNqN%*`q^QsbGxR76y?z{ z-(Fv5GBA>($rnjq1RQ@^-GLnE^}fXEOsBzTZ^gok>$#l$~J-O?!AnANHMtPz*A6E*eDQrL56%>YC zr0L?+S(y6w{;D7BR9sW3cWS+xVHM_{>sx^MEV!*gRe!vf`SP6^n)O3JIR{Tjo+>-dtepIgBpEQ()PLa%O%s;t!uaNTO!x|yx626+lfB7nUfP&C z5XQjf<8jkeo$o~BMj5RKA5@$bW(a~S5!Y3!=A|(kUkb51b6TyRpNc!8xDn&e-|Rgy z$)C?tovRHj>BRFFm$^M`LR5Mlyi$QX(l2jDa|gAx_OkP%7q9foGk4I0VctNEX26WV z&O4&MqDPL?B>>Do%KBt9v*P$XWNVC)DRcl6VA;NVH-Qffa~dM&tgg|0tCT}aqfI0P zEk))CO|FjdNBQ|*yqmgLF%Bh|VT808Nlg@dq_XdM190_eNS$diT{Gp3d~8J&*`>w# zIhR}wzhYinz^qB@5}0^f%3B+M`i?ah>zyUsX}UCkX6qnR=-U@*`$+d@k)^Ge=hFQ^ zCA1E^bAH0~K>4Yg*t~>C3%eyyK;#%{@Z*)jN`Mz3#Y?(6~QRd?Iqe zgE||r(&I&Z+PFG>GWS0&x+~%zX0ym_7%fzMSJ?I4BN>G<<4YY)Yk9DIH!}7G0gblA zBkO#%ee-Nwrti&Fykqx1u>eGiNNB}Ydi8Qo$>CY8;rZ_3j2*JBHlw6$E~~xiixHET z{a}oo-R8&JqKd%v=!j^=jDXZ z^G^q$8f37gN101YzfohvK*nAQGM{w_y+5iUGRme#XCadB247e5%hB~uCHo{9oJ=b*(O-LL~Xu`y}Hs%Ea6TLJ)Tox>~b0RL!q>Y@BfxlQ?aK zf>IS;UPxK<-R&2=241*sZaH?K=^xs6-nkfw`O>ah+LR3BIM6KXV?XW#kzLcD3`?i- zsLxf_gVt=ducs#4Y+PXWt&vucQme9d{TD|;BpZDJu4wy!Z5*pxoP~att@?ofZ*aW&NolsHG=1 z+Q+e_2aN60OV7{`1k>5T%) z&Fq<8`lhZeBHoR_Dd4GJU$Kq{`s0`c8F;};;;ru$`dOuJjMVYrjPaVyV-?2U=Fvu6?iI~O0F2tEXt9?jHXk5^JD9q^T)qfatSJO6e9{b#I3vT`e0B6 zycPcO-dxMEws4eaPM>(by3d9W5>Y?K-o(z=SRaPGIw+T{r|EAxJDf_AcFYj->{9tP zH4S?GnWZQR+_08^QrwRqo6Om2ZqGp!c6v)+94&h0w*k}R{o^FU_Nn+-BdG|$OI0Pt zQ^8FQEQgXfyUZH>vTEo1cb;|DLv*M+-(95FnSA0I1Ss{t2<_o72aMDFv~2R(fN*$J)(}mIdg2 zFG?c#e7nDcr)V)=T3YN`CEAU}=-N}s^TuNX$8%5#Z$BOV;$A`b>}N%*kgwZZv}Bq{ z^N~q(${Z)OiEpJO&70^WF|mxEfwq3dyheP{>~|xU)h+0zOmP>VNNr$oXkwf8xRcwj zP_Eb3=hnwd9;kXy$>#7fuX7G3+~+wY*s8wUkdJgNYqM+72}8VJuHF2xX|>p6qErq7 z8DEFEsSqGOes7msT)yXW4t3KF_!dx3l72fc+haV9!ER9`G%aV)ejTfNK(N;Q1I6gN zd(vl|F|O1dRrVUd0haSul*~@5o4VJ|kxBWU{CEM{sZH0b+h`m-W&TK3Y>?6Ny(QtY zPuDh|*ZJs4$RhJ--9?K^S<(bY;jZbA{+mvXB=g)lj4gAj^isY|58duw0?l$aw?fKa zV|P|G{NfyMpJ&q1?$w-KD}UrsL_k89E$F2yxV7A020#U|iFT)S&?h~RpBc(XVfXtFROVv1xXJWt7s809Iu!kg6iACa?KqOC!uyT<&BNo##k}XwOu0t z-x?6dhI#jsJ`%E9WJJr3fIrqJC=KXns2YImUwHqKORowZT4uiZ49d!(KOE} zlxf-Lt3_ba+c~Y-IrKSjpKK~_9`Ex?B0(>ZAPdII<9!ns+zh(0z3DSA--Ok{V=d)s zb^!S`oTYv^6i(vQaPQ_Mwa~&*s+YC4VU5cN!fQaVvo2D&R@mQ?bp`R$!4#_ z^RNXty+lPwOs7oA9Pcv&0c!2*kap5u@-eY;hcxC)Cmx%=)(sj7aAJuC!W|e4ad#9S=dY{;R zWC*1!jZ=E1iD2o0STjB8_zRXqkC@U+fgUlviZ%>J(JnLC1&(bG$v$2uo+hJu!A(=T zyWH=rkN9lSPhr@wBpDU9ie%Vgo3UqP8B%0$^|4}02)aT3ge`d@mp=&3G?RE9b#Dx3 z)KZMGwQq|K`TsEW7FcY}nWQqtWZ-OP}~NJ|cl zFu;h!07KUZL%ipA@B6>^u5~`aEI9k@c=ql(_&oZ4-(>J|DL^77kvf@q`wdq@Wk|ZU zwAjkr67egq(0bLG9;+mQYp?Wy8!_;H(9La%Pd{L3G~IVwAURzK#neKL*B`eOq6Ax^6-UOtR&nWZv7e_Mbm)hysRNn2jh zq63(T&TI1BHcXQ93rdrJi~j1%PVabIO-S~PP4X%I6p5m>`#jnky{fxFPpA>#iF!M; ze5V-l4#Pka6|BLG>gy+%kTmRQSC2R7R6|#MbSDnoE@+AJcFYo zCIR!xY;waO#D-O^L}9l(LMT^p^}o-6{P49)?}rtE^k&WIqP!TrT?7?$R%d@MzGPXp zyBAfNv!tpmdiu;;eY&7ec6Wcr4{?4qCG}9DvG>tO0RJ1b%eftMT_3R`wdcPZng82w z1?%i1kS?}&2Uxec8;l=>QuTO&<^vZZKvg)IP`JNI`rurVEG5>b_-T8zu`EzRI{mKM zbMsxu74wa|8$#%?Mt%BdWOqqq9wf2Z>9#nX_9L|KQqpn1egSE)%K)7xcQTEg&7xWW znoUfStY{n?>TvlAI@OHHK6h0_(gF%?Et*N|!d=L-Uqn}rT`B2>4#dZ)bK(lsD5g0y zX2@To*g?#_KH*d;pTw|boymL5DrhyR69il4sbLDFL7D(9!ux$jm@Bfi5Fjs1))Nu( ze{ZuCOqo{CcanMQP{$ilk@@@eDcX3JBwEOnpL8e7Fe_{jP@ki+Hvgri#w;QaH9`Dx zH2Iupu923kmBP{g9eZ+(VF@IQbC?_fadU;giBH2~i27--tH}8j+?4q^bMMkG(dWoF zc@?Kwb_KjCe<`RL9Hny+1;Tcf#O`p%T`pQ7u_>l`pdj)*d9F%N`2NO(&1Sb}lUlZj zU!7ks5vnVx`#+Oq`n+r*IP}Wd#LtxI{3sGiR7U4D+`sR9y)VQd?VGK3qCFM|x?Dx< zUiO58)^kxMIMJvn$*BxZj~~!B{x2qcseVulDfuTCxcl|zy}r_6o}7CwPD{Clv9g2O zN?Uh7TJ!xcrgsy3cPZ&RSy9lS@75hqh7b|C$nUuIFo)eQ#0(OJY`Y(LG0XIHSavk9 zFdDgg_0#lq{(S}6WRN7bs(C^J0Tn$B9cO2(VlUWT7ae!9?!}Kb^H#OfEt`)7VO2|h&v!8|8NRG0%f-&&yp1_~EK)YsnBfvbVLB(T&yMyyqwc`P|E1hV4 z!61eBLKW*IPIi(us)WaMv~UXfw|xJ<6-6RyH|`W%%7>U)a{U^C@a(ildc+M(k#c(QEDYP8G8_$ZqA-oGiq>|GkeG4;b+E_ruN3;_n5 zrgb9tc1++JV+OJjCK=0Q(!r7C7we_H3y-zZnZyE)9B|8qy=l_9jsIjxcIZ|^s(h+1 z!m|?Yco5$SS>~li>>V`y)HBFW71L2coj6TkidioI8X2RFUkz?ZhPfguriYL&5^gvt zcGQRg7?kLQTc0$IygF6pJ$jlLQ?7Mi!unT5A(JUVAuz67?U7_-^Znimn<~1@-s4%~ z&!en*E`cH5hO(cK(&cGp^`^TKrlA^L^bb;fiCD1puQZsa8t{R>^o)uDeLoqzg7tZN z?1v1qg9?V_E&6w2+N58-TUgw08RNwM>m)AD;B@rts$JVDXCxVEbBIs8IQ%b>5UVhm zVU3rr;>(rYDr9(X^YlgdB-s~k)Af)1rIgeM66Fiq^9hMk&&7DJ=WWM*qNm^Sw(0Ac zq5_=uOK>`rIW##$jHD2J3K}jIKaVs}wZZku(Sc6i;j*LR**Lhpqb)6*JR~uL@kA0# z%Pj$s&!vLD92gdMi=lG8#-&CRr5$`Q|Ec|*YnS>D+(1(+$0M?AP7PA9zT zy+BITdeCt3wWzf1xXJh8a5yMWb+SWfB-ql?=0*wlWn+1|KM;k6Mmdk*m?Ec3^LKoD zY2F1O_KC*5>+f>w=Fb%BXy&^x%PYEe+Ff&9X-@h>YPH(I3xJN+&~1wo>i0_&(P-u5 zU#I+e@}ddxQx_1UPf6UB0&{iGrHrX?xjF zMg?C>QGM(0KFC00CZLbltTZeyt3~SCJuT2g_`s8R*i%YubmM&K#@P82ac1(*N`;uK zbJzJE>Tngu6GLd(@Yfu5LwO^5)_X-3k?;Bp0oh^t`Bi?<(jpN~ZoJNh*Bc_ymru5V zk<01{3AD41LO}+gyKBvWvPx-*uPGs!4mX@JrkBTG*Sbrf=_FLtprT;8xpMf7?OPs> zEMCiv?}dZu^O)>Y;m$F3SW2_4_CU1-NB~_`V!Vl( zS(wuY?r(e>KXxoI(|{JZmS?=a{1?o^K0h4k(7;-(w2pfFnfoiQSQ*snS-t0S9K35h zrzL+y^^c8YY?qMG4>{AR5AhCG&ii_vnAi<7MG;JEx;~rX)5}Xa=vLLl`dZa9*5Kl`~RQ1W?2$PC6~;xJ?QEE7iGiP7!M53QXeU}v`p+|L^UzYqXsa!a(E`)vx?WFr2`8N8_UA=9XyMAFCQ)lyOsHy zAps=H=P=WpYeRdc>9Q*j2-4l!6=a{j=UPE;ss*Jxi=I{@f(AP9-Zp zjhJ<5RYj0YQcv|-Qr4e;JmAw;AQYJYw{g>_Q9+4W-q2+M-Ktqp+a~>ohZ>`zLtZPM zjz|BU`u*>(0j6ex{?6_fT;YgH+|RKtm0EVyNqs}KwgjNGQvD=iz&X=^69-O>ar?By z=iuzlyyxOMC)eXllgL1}x)n%P^WOttTqT!Nw_EMIJBAkGK zUJ$jxjUGk$1Mz>~nn+F#A9KCE$gT{FpM{}Hf2pi9%d8G_p+nh8LwAUhg7MZamhUQB z4|)-+0U2JN-+V?~V-%vJfb;RdiLxp+>^qIDXSSypj#>S~1Rd<%>W_)~Sq0NTZA>ALjGejv zlvmZRjk?6_t@RGm`6-o`3f`_4$GpD2aLC|Az~+wfTvFJ@(qWzq@_@-DYeyhY)c8$~ zx;R&k;m57Zz1kO!$O$u*0nhO*7N3QQ@EYku_)g_~m?#|NH!5Yel!lnjJH;@nzg=u$ z)mE(4Ti-iT394!m3$%wB)_oj(TQ&PR{2pWF?5+Q?w&VK36Wwm+GTqH=lI?@Shl??r z7Kh7JPC1WVog(KS+GbHY4Z0g^Qv!PAFZPFi?e#sEp z7Q1rw@lAWdwzPt&ry>QvX+%3vsKzjx2f@Cm$9d_q?H@z%Ao+8yAXsK?y+lKWX4gu@#$NC5d=O^}`a$F0u?_zcRXlU(DBPn@Rg0x`v4oGw=a? z3SwYO^t|St`D+xtSjC)^dxm^>YNrp{(eB$=I#&)sS1VuNFR8Z9K+a-GcGbW+;_TGI zzey$0ODkncxEL>Zx4oH&)O3~~R)T?!Fk}9wJL(|`fkFn8ChFMFmhF6G5=aBbdr5iw zYuy18L{Pt4#UCf2H^+IA;{G5bS#Ho7L=^Ow!*oIn-%jqPQ0B%*d6>M%S&%;}FH}@cP z-+OljTl8$PG!NzDNYj)@BEBpWM)pWj*=`ve;ms<3cPeJwlD47~@gYbWh=si~n_ZQw z|9Mm_7n{ZDIUo44!Tjh?xew&7)>c%s92C69ie1aVw*Bx4uoDUll9vQ2T)yP5O;d!6 z#?rOEPBCldHyxFIHC%TEAWi1t%YT1A7nSVcy(!{*Cccn0ztD5POgfQ9D#N& zIhH(sptc^(O7} zcYM>seh!?Gxt;Zlhi}(vThn(yZ}2jGIu_3VL`t3=p1tc!vTdDTNTch?@?1GB-wf$G z9JO4@@YkRJyhMWTHvIue1nNAV-Hx+1NjJv6GfToMdB0ZkjAsL=gf(>0QdRXX;dsSp zm{G-Q!Oa&aft`6(m0oM>i2e~B|D9%@%k^A%#z5InU0es| z;6lMoJQA`TNgT-bUFM^du_t42{ei>KNCQ^^jf4GWH_3MpZs3Lga%PP|qe=a`;_1>Q za=7h>SgRd09p0U`X*j6Z4hlXt1cH|D`D3qLLtFg9fLYAK1BH?8iCvn+s|T2KsrjI^ z6c5#gz1*;wzg`>e)f##qCWldPGfEN8cvzv2*Bk0gdPDaG_f9 zS36QO&++^PxY4J-$$uaev*Zt8L1?52;Ow-6zj)#=cOD+EpprR70!BnaNUo|J+8pYC z9rrH&?gWBLvp|KTuZrX`Uv0N*iH46EJB0d{Vxe_X>KM3fdDuslKp-JP06urhmT+U2 zHWWpZY7cRpDp{r)2i53_dQtGk9^5*m7&TjeT7tNyy{RNmG103}95F5|&4RdLhy{(= zts0|GGFs;r!0#lN!=Ygn13ZG-Siid;YE#HO@Ih|rH#7=|uLc`jXCzpk0nR)qI{WaK zs^0G2Mcmu2Ulr)0g*q|xW>@G?w}5T3*kRYS?Gy;gNQkhXTKwX3b5-C&*$e^Y5Q=ky zZwNY%j=xR>PPd%pt~)bvL#@D$^!6GI!&1K_0CQNk73mvu1La)L^I`Xcx{2s`4NL=fY;0$%>s7(zmm(+vOg36ig#O3>J*v0H1KbB; zz^|PXv+_67J`3;Y#zc}Zk5aD!X^N_l&IzH#?TqM3;JWVjVII#*w!gm{t;CN1GEPiY zc6V>4BXo>cf(i+nYWN@eS3T*pb;tWD2CPL0Rhnhip;>IczkJn-wv##Np`q?JW+$TO zZ72vzFT^}7V6c?-YSU$pO0F5?i$ah%;U-xVW(2UNG+EhRq`jLxFvPz8+$S` zG5&$Zq%@^ri?_tpHP~vhS@Wx0 zwpc*=4d)+pJxqfqp1vrh%th$JFB zm*}eUNi}YV`fu>MonTo1*BBP9Y0r#S&5WIxta)JlLz*jy!Dk+Kp*p#&UN7wS3VgM; zPA>D8?`|B!4dEz4W_i>vU9WuQrq`-|mi}$`FUrheKE!CAt^>XK=h?n4`b>|a&fdz` zX@#o3TGuNmb0PVBV_trhzn)biND1~>#;}F;wh2ZW5~ zPH3tFnbhQ25k-gPwMsy>RFGH2)fSX|vA@O!=aCcuY=w3!+$Ip;Lb(*8>I87q@?TtAd+q=ZJ*CbB16^WD={PneW zk29_c4*1S=PxTlcj0F`z1Yd8#{=0di|MV1?wFrK#B;?59KbM!ZcpojT z&+{C}G}S6h77?CI_R#F{xvlbv)28EWjc3z~tWNmM0!N4)jwkeFFN(rvu%ePT7Md1LMm1eC7u=f55bINcQ8 zigm{M;De>lH2mnje@}A0!E&Q2#Gjf)bTS5r9GQ>j&5dQXv&}wa85VkZ`d@~_E;^si zyO9VCFB&w{+xKLpxwHO$I{5elksu7m05Y?3_F)=q=SX6=5ezIkp3tPM4Nw)Fjwyu> zv!?mTMp#^Mw?}p$!26}yXhIYqZ;ZN#c^%%~HT$u->l58E-Wk1NeM4qrR-*N2u}n3G zvbMz`WxDt?Wu?7l>h~zUeeB1k>tsn%#W%{#T*aKjdTVzD@Z~KDFSyqM6t#+)g|1+; zK$4hT{?V*}0-K4k;4Sg=BkG|S&p+K;ee`+7fobeWz95y)hP&@a(`2`7wvf})3%b8; zW)z%U7$?kRbuA}LPXE@K6tsp?~1W)N7W#H_ek3n1%E2jeuZE~0_@fW; zSOH_Uxn;l(!8_5~Cwpgq7Y9QGFWQGE_>@2%Nq#CO5udqR`wB(#6$J_@D>qxA>0cr2 zp{$-HzbOS)szWlbOZ#r@M=bCDGDv=`UA9;q9wd2yTT@KxFziV_0rcKi*Lm;$Hf8iw zr{i0T*w{uHJ1)((U#<3uKTmngGF~Wjd77Pj4_26O%y| z=;563A}o**DTKLbLuU1h0huwHQUvoZ^iZVtWZ}p1YeaYO zj6eHzP8>wIzr%jLcg=LJQJWTgGMlkDVr~pRYpU3nrw)C!JL^oNkH2iW`W7y+b_wP4 z8hmeeqGk+EPM6FQ`Enakxbz0UQ|-u-dNyRbWryDAXS~<-*$lT01tj4aPiT^apks{` zSN{@`&fVuuB0CJ4>Cd`jfFaA?0x8Kgji_fFsGXn_pgw8hMnb+_>S8-Dv+DG;`6jro zd=rc*O;&DR9|XNl$JH<0k*8+BG0V|x>E_W^lZMqGyI$j(!e}hoFpJEL;LmHmVZ6I7 zi-{)XdcZ>FhY7!6(#i#w_GZuRYt6aMRz}$TZJM!)#?9Hn?!KU#1WR?=8rgcnM4mdg ze#mcW7c+K*FqFzxgLR(zlOjtJ(t0)~>4Zl4_hS~8@?0$ydAIp4nb||7mg@NXUq3|6 ziYV4xe|QrUBJs$=ALxzWY$Pqo3+?I@hs;NQy9nrl0J$&r^VECy4z*`865apdWl7o@ zEnAvn=^C#X_8Ua^of>COmLSN2ZT^01J&ZrjlKBmmDEV}Phx?GD+PIH_=2?2GBq`xd zAW#cmLA` zAV9m>r_Ye)a~S6p!M608kmDt;Iybkz4tn2!`SKP)vYB?QD(k@LQM!CQiHDMq4#j+J z5|DZys!_b7P&fISN@>57%HgoUFZe~Qd<;&u??1~CN2Ro_qWcy5Ijb4^?9m@~9$DdK zdNC6sTP|z@oO~5B?Fw42@|w$j?bw_Zl~u(_S2QgV7oR^E>7`kQVKHL#hjWODWM(^) z;zV^uPa6Yvm{i4WCc49HAm>8E!AoS#43;J{`5?-^*S+gQU0ZQ(Du-XZ4z#^P`~K|( z!qp}m^3lL>zvr#Nd~}%xkFe|BvmsB{(s0ZENSn#VH=Mbh+B1||@?;?h%BrogyrP=A zgn5U>u)T!6^xEg*df(uZ7zx(G{L;t{e9`@XzlGA@Pc|N~{zvnPv2(cB|7U@C2yB$XMz?CWdBZt zJ>6Xev~TA9*UC^u#Gtk&$VqT0sEVwomrw9bs$2KsO9L|ezkS)QI^G}D5($Ie8`g1F zueaf+?W5xt)y~oSaG`exxntmP)WsM-+>@RNxdL=pIIngsiW-<1O1m!9r8FlJeSukc zoU8h%KB0W|YOZ=AIhx4LFG}~R%UV`n5MOrlp#x9s+7~Bv=6mIuB_HiQL?COtYegkw z*YqK0Q-O?O4STJIRkYZYf&wnN*l4_sXyPCRcyYf+YjZ6{huq4e&a<-3i6C5Z4yH#( zo)oTCrFt|XhNf6#Zxn%OJhGw3#pn<5{WYZH$L(HRuZ=W)GcD$G##z4R1^d{NP2w?o z+3qg9kimq!npJ(&wzIr(1Vv;3tFlSW!T24{yrakX8dDdC$-kFS`W+HHz8e+7ce13B zN^;+Fi&MVY;);C1-ed29e#&ajJZ@V%J?Hb;^}jru5xkP>Zg(W=QL%2`kT_^`Z7^ta zt<~0%$nck1E8e8KjirKa8Ho7CMvwY04Xj{>*Xr;@gR|4_}Z4kiQ{jeomU z8~@g=XE^Ni14gPsZE#WWmgTJWtCft@=oa0Do5Z2LI0FR0?ykE}ppl3H@dNGMkVi^C ziCp)W)Ju&qlN)LwKi5g3HU_@sJ>QD-yNl)F{;mj5BtdUKdh%>&!gQDRF^A$$3xa<# zkytdA$}-WZt{uc^R$SvKar=)Y5Nm!*&|#Wx4~jgfiA^P$agBj-5w;?hmGhlminj(Q zbA2^Fyeyy#`@3`XZ<&;0!w(!mmCHqVXK^~z`QOV8r-GBR<%}a`@w_#J~Q3zow8Jw+U57vs`Xkg4FW6XCcEek zGCn^^7fWg18*aXUMhOLicQ>KQkHokB50uw8j9vb=xJjqinmjjt+Iz$&?31d@5q0Io zJg4Zp0y!y*vFZD!*x|nEc##o1vQpRj;*A-0-m3|8xyTV^TVB|$>G0OX>Mh5*dATF_ zL=!v2`{=Xo!!^ru{_S@RVJe8{hax;rwiq;<=tqFD_ZvBYv3p(NS@_i5efmBX>&)&& z-w<#A8^n8ZJl0(D7Zft8)~AkS=|#2F4@i|S_zrwan|t%d@L_(E-s#ylw3Mwno~$?x zM@vZERa;zSBPlg~V;;i%+rAydYDHdVW#Pv-ox@+qFiB{mPDpve%2|;=u-QrE+`Q^Z zz9vQR{oemPKiA&>hsyq+*Z#4&Avz)>M=3wAn-3>bjZVHO8s5NM?9t5&c`#otQ%{C{ zNrmny(Lcv~`nKD-vwH4vUxVJ><|IR%F1s}a^)2L{5Pj4d(q)k|GkD)OPuwSCB<;OX zsZBkscn}Q59+m@sBZze_c72{4AHs_@_z8?I~QJz$=zr4 zyR9IciN5-`hhk=UvZz3aS6?XDw|k$0BPx#6syA>Y(9lREDwd5f29_a}_0xZi^irnNoP+`2`}QjfHn-nd~jc`s__ZcmnFwt=RiMb(Y)S6rx_i+hjOpA>MF)j4soS8u*boxZ&cjx| zM@FB_pY$c^>l_WzRQK94!k$(@+DbHq?w3o7P`W6>sVHGvgxBr0AOxxhLJ!f~ADMVp z!c`i&56;+3lJDaRVXAX6rDXOY{N27nt}emC%KfH{q}2h@PkOjysn=D?X2fr`?BKl< zT#&8!Ec@qFi{Xv4a!5{sVJohaIDMzYl$f=6%VAA_JCTo$@tC#oM#N@)xMQ^DOTSSr zpUrQh_0y}a7B*_1*C2u(x%cRI>S4E4MhTV)<)7YaF|g1VNV#;A;ZsemwhC8ri2EJP zHs^zaEwbtiCSSn*-uTg>#|^}urK|smD}09Wi_fNLGE96+fBxvn@q_gCe15-dvWfb~ zL2Q5AI$ceJl)#Q_hsp>>MA#eUrM89mX8aNl!n})yTsORFRS^S`!1sm*!s#J7y{-i!! z<&{p4V=zj29mlm@-)H{eH&LIK`&5gUd8}`#i5NG0io6JV{5M1;_v_GNZ<@mKd{tJ} zQ=K8t3vnV!FJp)f`TT55WyX`gQb$S)A%v92FP=$U{I&v9;mwQrbk7jj8h^TVpTka< ztCI}Dp%zHw0Y;x0UaW`&Sy(Q$C@OVj3X+moSQ7L_R`DNQuSweFbLG;)iP+wlNAq2~ z;B&yM^}{yk9v5@BU7v7ltUb&lQtM@)1(AymCg!}c%k@Yne@>La7NgfxiKE*e~HQX_6i8ku6SaHR1U4Zxgw25pp?% zQ1zqenfH`G8iyObtPoTza>cD)lKDD(@{itJOz_y-q3yKCF6$%;4>{18ey&k3nHeBhTV|TF zcN@7EV4zU@>pBbRP`W5N8xzWl(xx)1rLuNdf0_q1Aoqv{2zDcV_OSWdx;-|0@%EK^C zoAnI8$}sBZRp8vv@kooe%gV13b9I0d@DA_CI~2@choD|`mlep{j9;(mo)@SD3Jpq$ z%h9^yEM}y47hWm8k0vDA2_kIw z=JW!U7ZlD=0LF%^EG0#)ZKEqpg_Ys9y(A(2r9#19Z%~8r>A)(Wc~{HSFEO88+)pqZvImG_`8ay@&X~^!Ny0d2Qz2 zBV~?N7Mqb6v4Rx`*{Jw%N|9DxlhnB*j_MVjfqLh?sK#{`vqP@HcS50bWJ@4Zr^072 z=9hXm)|d8|8$O2yr+Bbjlc+azxxnztwBaX)!4Pd7X^~^z`OLQCt zI${5d%Hg!~N*J-dZ3}8OV!jt)(%OtaWUgxkbjBSp1tJYCh?ht&hzpBq23j*W5| z&DHa@bnTGa7Nv+@k0Lpve)VN+3dk7g(+Q)^irRV1z<4l|(Vg3iSx zA()j`$1Ou`gg63%?4cQZn!{@m4Qa{eBK5gxTo-w81tmarolqBj@k~w}? z%Sks)bt!jR&zxX;+;W0Mk_V0ocL{?gpTS2ZmCPeCKRl?Z z8Tf6J`70kj42^fIu4d0*QhDkVZf4&}44tgz(l!9C|> zF>O`epm!;^2z%IFr3dl%{`VAtSQMf}F%vkdygLQO>L!+uCI%A24i0Hj^jeS{+%T;x zj_~TNqraf=mqyD5p%;Ed1VI&?pL0%eSGf6i2C@GOOiptEFp1|~b`_U*@-i#BqEZ5p zXWL^IJIQV4MqjKYa1kZ~Uv&L5-08+kpRX7$uA6kXB-3>>K8!fktOG!T%_7`Ra|;$; z!w!qUdXFMC%d;guT!9C={@bj3AXMJ~DI3WRHkRK7yzd0rRTTq2<4yB<0+7mBW8ndL zN`5C@LwEd~$KIg`IVH>&rdArb8r7=58R-4bJm7q;Y-yA`Qu02szc~!}T;gt>B@78e zkmKL1D*GCRMy>g-?zJ^S_ED3z&uR}j^sh>qJyv5u6T3%fhZ3G$|!@`;NJF&X_CR5%y^x|2^hl zyVuJ@gV4BL#Kwqzh+V^1_|2izrmyL3PXGIpz5m9qw}Cu*h@bQedpSa~HHVJmt7u_O z3y~!!f3vK)P2O-SJXUjmW=(wbx`EuuMoR4NON}}}c-4wH-S=SMDL;Jsr8`}LMpl|f z?@B;92)n}{o0=X%x04VMSZr0aa?vOBPaSB1JclD|p?lo{eg@5qXsO4$-4K198k0>i zuRyq2=K+zW78jaJeM~~(|HXse|M1|5W8;G5F}@IN=Tc_5uxEd|%YlRb+4{)&bL(@> zKTKD{^<5{=*(=u*M|oUor-2*)G*YHH9`I@qR3qqVcdWnvtZ-qaLw&^|yvMlqs4i@_syUV+Fh`Po!$Tym#MVzBjDuluY|Nwta?dmM)z! z`Uc~Ur@l@RO(g}5o9*%vN}eO8AI*^pk@0mkWswepfhI{^#44K!`bhf2-EZQaa40mM zx_;N5;AY2tToCY+-TQPbsH^F^2+tPYS<`Kz@7HjFwOy$`*&0(*6KVEv4lkZA9HI@x zU{<7mEw2ev*Jo1pp;u48JrgPFWg%h~KDY^|OR4`v z%x|?l*Mv#rBu$Z`UP}bz#bKAU&E+tbC_?(%H9+B%1{2z=Q>776TVt=?xz6gQ)$5{*QE8uYvVm;p|^^LxVa9# zV1F`Zr8Wa*4ub$%ao=ymw8MPWfG{%z{}V@$82*E;wRKXti1O9F>U{Y(;X4eh-;LQD zAo=2c+*xl!iimHwj7O~}3vbz~Sf)=|j*PCRX|GCFxF8LO#u(^!?yC{NWs_GF%gJ!r zT;a+WfcrD+!9AEcvCNT=j)>pM%ohC}`+z1gj! zA(Ukb!u^7OB~{9nqBH5?!VwPox)`bYzaeLLWwDu3H1g{O&P(j|N8V?O)C?c;!ey3T zr^ue;OZ|Jhjb{BFF5)$k+CEWAY`oztP40VZ<${1SvON)SnxFQvEX~*ESJRk#FqC`? zM5s7Oh^co4nk)_blmKdTbaHC^W2{Vxj(3E>NWRwfSpHd~%uQgt&I;|-M4}!$?!c7w zSeIQ%i1K(&XMel!p4-cm=o{=Ff67L#*ThVW^OW&ZcvA>Nb6$#Hi(=m`9-$WX0cfs2 z7k(>9UU+R#(7c}IV&ELrfZIH<#t;^q$r6I2tr%0eA~P)H8k=dv(~v z28Ax-dAj7uT>bMa;L%Ulm!+Rsd@$(2MB{Rp#ZM{9p*oPH>jBHTdDKVpF>jY09y-u9 z6}7gNSCDf1fSpM2k!Gvw{GZSIG|XUKV!gaSY4}(J4a`?Xa+kSq5*yW0Bos?kiTp#k zp0~enus|cWCu0cV>=d*nAGWowpSA^9%&wNg5M<2NIOsfxC#&nvLL%?-L=L_1JmiLM zDULXrmoC8KhO&bhATctJ`rNdN5BQaJrfGtMKs=XMDY!*rMqxZ1Rqd(O9x$mUk8|b# zyW5a-H~Z5za=6aUa!pO58JFAyXLVFXKrU~NFlWld$7vTowsGG%7;9B>RYau-%_KVORRUdK}e#^ zIoLlB=DybJNYPKDhIru8zco%9rCNOY4TYiR^eq}UzTg92)frt9FWlMLyh81_W>nuU zmX~RkuqnNqQe&rbVCBP_{BKs$ti?95(xhv(*x^&17vJF0*&R3MOB_pW6LYTjOyrFD zJ{i;hJPM)@WH7@2NIg5{)~tr4xnikeqGny+g-&}7eQhDRk7`5x5L=~BCDowTira9U zuVL9=@||6E4fhkc&$$#n{50B1ZbQgV=m`h$Qg^p(KWk7`|Zr_Dc7Q2bwW_<9AV zr#p^2*mnxeR_v37JuXTrNF&Cp>sTMp!w^&F!zc&SEsbRu zWV6^_xrW8!i$F%#zYZyRPEyea1J84(2rjR!kQoPlkw{eAyYL7;zlqqZI9H2*xKz(F zrqtsXLZxq7lntdOq`U--^t_%+8!fp?8@pqHm##ugr)>QFsAj{T$#knOG1Du)#{6~~ zk5Tl(u5n}8^iAIxKS&j?;2)Q|#q#dSyupTa0&IxS+hE(chhsk}F+N?ZYy==dyvrQH zHC7OR3X9|X3r}_`0@Z`&K2DH;zAZefC=y|#&@$10+v?iWBCf8Qv-$!!>zRsi(e%^s z3C9LEUc8u%enmBLliRtz^uQlu{kW^Tk?fNPP^&#llmS@I8nx6XpD8~h=22Dzeaa>} z?+w2O4PHja>DsMzU4SJ_4(0pchlk6eJR;=n2QXMn@X;EUv?N=M`hM}(pvWyCl`#T& zPV3}3Rw$FG)Xq^qzu5vS-wS5H;_O_*Jx_qZ?ngcYdPNyxcpo6w-KW?dwvFzuG77k7 zrgBg@E_7ckjXLTkPqvndn=oS=Lhr(M3xYgtnH>gb*@3)87fN_7a<``Q zu@_y~*>48OZ0!o6u(yI!LG7zZ4Cat8#NBWRe$Z+lb;`hc1={^*@zCsJt06h0oT%I6 z;tnbw)A)HUsU{pbc|;(J`un}jZ?2kUqq&vbseYH7;a^G|q}7F#4Cz!5*0?&M)xQtK z_`+7F8!k8#IuX7iwz(_y{6Q1mToL@C2WH~++mQy1e@*H8rB=(;g)iN8f3-!5 zOI_ffF&5V{Ss-q^F+n^p@;xAv`{ALIn>QHQ%eS}xz^CckUiSQA2EFbZY3Df#8t1hB zy|^z3Td^}R&}l@Mq;N}%`!-<_j3Qg2gs^tG)UWFbtI5M)A@|85z1or3U2lE4yymky zqjwg0MDNkl$3ac3dQa#?w(rLz1(m25-1pSJRJuCq{p^LCqW{;Y5eFJNt=wrose}88<=ebWN=TO{x6=wAnlr+>oJ(s@w<_8SG_>UjOR4&=fz_qF;C*#hSib# z5+7Or2Pgo<5GnnSvwUIQ7;L@T)_kFg^Z2>yGvYggofj$+t|z67S{0K7Vj~4%w=ZfD zLP^Llxa%@*$X9qm0EjpJNzB)IXdtqr^_V^T9_4qt`dhYs(O4BIGu7+Ze?aj@f>&xaAzZO()KWrnpiSMZAyFV>CceX$^ zBinl(y*-_qy%)hO&OMt||z=3p-)Rq%NE%E0JRA5X1+V(M!oh|2!%7RqOANF*GYD`HQQY>A|dMh3#~ zHvW;T4_O3kUh19P4A#iR1`kms8FzAPzaM$Z*~)vJ%Lm?wzDnF}>x6``@knW_yX8n< z+(V-n$G&6CsLi`@=qG%{U>gQ&K+b~7WL1H?_P5;0D~@eMcc5{)Ds<*+3{lYV7>1JQ zJCFY751NoR?E0PLi-PXU7IL2di8cY643ET1v*7QnM$Nb<2%&EFdMV`e4ioHPUVfuX z*-6}CfP;`H#0tN?M#~-#ej5@toVzAG(hc(!-$|>UQs?fd`nuX$+xGj|xBC9ET#4ha zu=&03*X+43e8hRuZmOC?7a-sh*svk94EwQ&@U{_4tIe{*>iE_{m)WsLcBx!BtklQ@#2=^JL(y zFTa}YpoLP8W%atN7upmJ%NFZrTJF3L=asAUYAZ=DNf)!)8kB;|jU1cR@~*bNLyn+s zNGUrcJX-R1Ys_7hudGYU~U_K6Cv>F;#wqRH<6;}dG-K^c0Xwdc>CiK2f0 z$JcLVjlz_XzxuxU5M{!LxTk%6H6l-0GRrZ*PWxPpkKaDkhdhe~!}_kK561f#iAFL1 zfld99?=Olld6GN7LfJgEs6L3fm99Vi{oeiw%FcW-^lmYN2MZ;0V44Xn|3DtS&Jd%! zZPNZ8IdwolpEQz8jA0p>*BG~z-f!Xeemf*_S_;Zz*2>xs?1SX*6C*1u$bp&p*{z;? z!8dqCZuk8}_VCB2;H}U4iTv4np~qW%G!kvF)r%`Db)ClAF5F|z|6GUvgfz&$+fSy? zYYNiYg+-PFBh%G(Up65w#&e#h@v7-Kl$Y@~xWS3?;Q9 z7Y$QGEvZk|4rE^id8xPm>(I!t$Ns=<&VA*|-AmUZujQ$y+jWk}bh%(z7?#G~dSCCi zxoWnrxP?FBFbgygs>$5w=kgr1(So{E!0+Q{NGvG$x?c7JAubkx%UH-VF65zposMOt z*jz{T;~W4=m&|3o4S3_dHoflyC<>LqW<%BZel#3 zs^#u>2Vs>qM~W0FGOY=fPu!nAf3JW#X!!rwd&{V})+TK@5Ht`-Ai*ucf?IG8nxG-U z-GjSJ2Zx}+9fE}5?$S8H-KBA-aT;rQch1c76tZSpkXCYL}k3L1TOhDr=ut$b`WJ*6;Ml3ElNmiU~{ zZbV1B%DIO+8a$G3KQZcN)D|<@PNSFR-WIM@ul_@4=4RT4-B5bUe^$-#!B8*mjI^W| z0UaQ2hg{~&^76U# zK6x4*m|7s=vQhGSaCVE+de2!&m!gXRN1YjC5%9@rj0vx=iR^0|JE`nC)$FSA*Ym8t z_VDv>y(Ind&AYtiX!>7+@O9O`-vq&nGJ|>kINam) zM(IGfOQ3<24IFrbSxupixx8pyU3VMim@W4Y#me%v?@>MI9(U4LrW}<59UXs# zCyJf29$eQ;_fhOr&f#K*&ChKZm}V3?NA*e_Fb}r#CMxw+BqTK(ygQO3S{UXW^l{1$ zy^07RS;k^(xuCm&bxzJZ3IXAhh3OR2=bATRB94QlVEa;z)HR=DS@UF0#a?1EJ%Su*DP_8uz6W1O z2EB2wkL~KwC*fF!rM0*XUFtEe60N~I`}g%l2|`E<^I&fUc}A6YCk!n$p{fK}q08qD zaD@~4Z~P=B!`+jo8+ivAJJkHc^hwGun259i1;yiftOR}=g#77qP_$IY+=^cFekitS z?@)1uw0>Nv483E#Rv(eT!gmGTk1{$zo%+`zzMx;h(8ZC|94Lya;al+zPZiI&%~DTI*(5h+ln(_lAUY>#BFnXm{dm`f-Nms(Sr)R`eXU zpJ-jps8&%MA(|B1XB(52#mPG?<7O|z8Y(M`1&HTqH2~d}x%F$s#wPqG6xP4vnvpM> zSF#8libuhkxw6CPSh3N(3N_gw@)PdCe?D$0G%7(A`iAh#q#W|kC%yi#WLowCC~ojrnTmd z#A-D_u&vGNip+@|NyPTj)e`b8mo{ZAm*wlzjtu)RIR308zV|78s?K9!Rr*3_E0H$@ z&~Wbfoj`;dzaJdQoECJ0DFHNA`!nU*#nDffC|cdZDkX%T7__&?*ojUc(=dU-7ha#c zy(i7N`EctO%(U`N%&^G>w8*>pn>Lx$A#swd(DM@3otfcz)4i#8(nQ=E3)7X#EZF-Prr}Z{EE8$ zcpps{>y3ARo}X8?tkim+M|gtG0#Y3{2+QXO$(vq(oGMM+HsYRU>&dStVXLf39P}II0r0fS;aU;Y0C-Z?Z{3UnJvWGMTv>dkVR6>en8*Hw#WhW zt9yEsYgSVipP+aS`Y{8MmDnPs{f{mmMPLc&8w3Noh?zj8f zN`)5f#*faPH$Kn!VdlKq;?MJzuA!&X8|P-~2XPuMh00GA-s6tP-R!}%`+oFQo}5OU z+n*$;e%&MysanS|lcx~R373OdMmrY> z3zBLTRP~SfQ1|}{uAe0G3m9lo+N|TgIIWxQ;X31YvgBc7+M)J_kn8pk42ttfdpvu) zfO?y=6@hC3ni-*uWv0*JWzz?#s&sG{3cV>x^;!)HNb?rIWKnx9MI(!*zQMI(h^}YF z(56{q9nS*3)zVSv9tnH@LX#xr1AzI*HRWHmS+^NZ7p$89B8_t&-e;51E$FfQF4o2p9wDb;&8JY}lP|lC-CpnI z2n9LtcApCA==0DtKwR_|(62NzRho6bA+54TsFzw(O3epV`8^4wB)z#3gN!k9@;>ZF zEdk3DwkS^Z4<;hDe-B_KLH2oGSWukDJ7r`y;7riUsL1oWvKucDpKs$$eJ){bs)nN9N+=Wx>-s=K{N;z_-; zESh47eaNk;K#XBfRq4{d9boBuZ7P1n z%5dDxvh)WgpHw?YgTUJ5qwnaCJhGUs(5;{qV2U1w(bSHW@BrCeSNp|#F#r}8{0mOna>)q#fI+sOJ z%QFd0l|;2QHLky8Fn&7c4CvR#)5UE4h)mMDE^Dlum46topi$j0NQl(k@6j1N8evO< zUI%~&q%JClBckh1O=j~{PYzo*0(| zWHKPm3+$b8I*Tb!MnUL7QSBgkEU8S~YBm9u%DA2&6YUTg$s0Z~WlH_ois|;~{mD~= ze6?eftweGLsd5z4sG0HXo}eP5LaW+yh08TUFVA%XDaHF`F|YMBmnGkXHDG4t8}*oe zoS?Qoa&gj{%779cs8bpgpi`B3qLZXQ4Hj68Pu;&+68sc5etEG>h!U?+V@_g1NP6Xa z?xyxFsKd)crud!yu-)-0e(#6uZgXG&4-R8Og*f(QV&VHrgEz1iUZA2*kC4-sa3v}7 zuo6HHvv&B0oD+9?eRd#t`m$!Ros2|m_Qf`rTfjzv6d5TJ3jjY9LwilkRq(u!bMD>OK|0QQ%Y6ZwNEegVGoFyu}=rh1$}PQ0j=a^Z?d{BLM;@gdyBg5B>7% zhz~N%ZEYtWTg)!KOU(PD+hI*J4+(c9@XLVJ)x`8AfHXamc-MQUt> zHw(}5KK#_qqmY%)FwS|}$xpO1GgN}%hZndefGsQ_qN95!d=oqN&C4`jliMQtUFGIu zqn?PvG*)NQFMey2_U;^}at^nHl3PQ9XOBotAc9H5B*G*hKkEeQK8hbh4V05cN4r)g zMkx!{TNeHJHYS?Nn0GyRuL@MCKUi%&jLAFTG4U8AHZ(e~=ou`z1Grzmuo92bNX$V2 z$V&WW;zBN+a{7g!`i{=Z41nFXYdw+h-S9d)Qb;jINGyKpplSZV0fuDeUzyQt+^_Tg zAo$3e&>uZZUV`tK4{WgXsr`uJ8pGGT5a~T_XFuzY8>8#0s|e*umnF9kWS-mJPKO&R zxad>r*zA&N5!mOS{ zLn=%ZKT%xE6?Ng_G}<%5t(FHJob5($e@QYE;!K`&B)TvyZL#W4=srR;|4VY6p_&V$ z5p|!PVxSr3q~di$Zg^Vel0U}Gzb{(x;Gfhus;C#g3cP4@?JgrbD-~G^mdkz}IgUFr zhB_Nb<^d-_4aWonUxpy!?OZ}aGdm+0QdtKX`VOM+v%roD7jTPT8k^xw4nYt7$8GV+ z?tW5b0xUcCuQ6X{02v4XLt0;%H1;kaQkfrNb>V3K9>=u82X(m9^kK|P$p}}CMGji= zjgXelci0zGM7;cZMO*Fuq<24G`IP+Z|Hd+~O}_d(Fq_=#J3G|ekqCNrQ00dc2=Vs7 zs6Y7pRTp+uW|P5T<-hW3CaDi+inAJ(7w@~BB~X<;=^kLVU!tyF-w!QZi)uRYWVrHP zYjh_DQDBvh*+(0W&bhWCUfKv-#RD`86v+h{dP z1b@<$_Ln$Te)r(0G1<$MCE*}XK)N3fxxVZ3Q7v-Olb}+dhF+v~l}VOt){0SlCEn zrR@R_?~A_Ni1_)0+~>(T3P6i=mOi{WtnJUsD2W&7A)z#Wr~6$<=nVsDtIbY-=tg-D zS)0knEmPagg2IwGAMD2tV?IA^u1=;W?7OA<-L`Zxg{6>f1s*kGoKs{V#!Wa}05@tV?RLQV&=Pmv}=W zv5W9^FEc1d+OCcVJny>P>Ac${G$t&OzVqDw$dmjL!iZ5uxVi5_B+z;QMDdYI%eEWI z^A=We2kRAawKqb}dTDO$mu5qi{sN^s&GhE7O%GX@%^EYI4?Ubvb0Qs9gf68fV^m)i zo|vY))o^UrSxn1w+InbmwXurFW~UDnc(9_Q8f(CNrRJjVa}#u+Y9KCSX-<9DCr*LQ zRf2u!?>S7~gMi7$_U7f|>%Ob_e0UYhk*4@6nMb#?%y~{eY4H#%7(f2BZAYNf^?iBI z=7+;L9Q{MQh1!FOcSsQHyKde9CX?=)Rj+f#mO_77+E*6vjglr6W@S9spo71(wLZ_1 zW=!0x1=-b6{fRFqPXwgw^|(KcJk)290J!GC*M`?s$nt5CCx#|>TN$O}m76d0S{s3x z)KQ7#cRH)AB&ZH*7~J|&0vBaXMJjJYHUQSSDnZ0X6DwQm0&QL75vJ&a2zFp7cs*8( zw92OaH6!YIg!5@N7y%mqdG3c9~NfJ zzC3IO!N1Rg51Dp}*-}37C)gah)^KrOO9*+~@aAQ-%eUeMf(1(xNL<=?l&-rkhH4+S z-(8I3T|v&APv#?a=ifV2r4`k1(-0fP(VP~C8}q+N4v>g`Nt?z9@GJ3(+KsfpcY<+F zK}C0zBrr$g1&og8O(p-Z-vjHKD0SDRWaXy4A>CTNap>o>ctd_=V z9Zu8eg~2|=w~^3q=((Q`xy`j`EuCIB>h`RY7G^Zx-+45oIj@C3&oO$dHq@Kj{8bWu z&Dq|PyX?~v@RhQXroy|?E#^8skrrHVsES4nK$O2$MibDia$Bhk_foFjG$%Zn;(_^V zpW)sX_X$g>GpbjbRHJ{Vg>S9{bb1`&hKIPS~b;+lk-{cf{q#Ok$y zGp&m&_@=i2yZK~GN(94)1!+XG6=TUJ-&^VoqU~u%$SGQBGz0S%Jom1kWo)D+2YlK4 z^bCO|27VkS;e$D+*z0p5{^PkStCuO1gqRJvAz9NV0SQObEnEiM1)U4!-WtS?TZWD9vb*;`hw45^lHkSU}uq zZAG3V8C@7q|ImRn>|LT-TOydNQLUM3RPMg|j#I~O6`eM}B@CnEnx~nDgy*R?@hFrM z_jMku`fZ+Ul1yB%ckFDO@MJ+~zYa*^ZWgTD_oKzC(w-X8wHRwhTmX|>DOcN>Aj>fR z?LRBp6qM*li;yQ(QT*Fy`=MI-O3KvRmh=Ff)qs3zUikhtW+0JMA{?P5aS$8T09}ub z%LSBy`iolZ+`Us{J)fnDA$40=+9H2_M)X;(8gT-q;acqS@>U&yjOK+y6bY?w_DY32 z-l!BQN@&!YmliPxooT6YVs`hVyvh!Wp&5vv!^~hkCZy9id!1i!Ei+j6QvLzkW#$X` zS)KU|xQ%Go(U@~GM zjqtrd;`nZdi`!WA2#MoTZZB$>T46bh#!&%uTKrxEDA4ayopWIZ#D7#dTak=(6WCw0 zwOB+MD>Ad#B9tJ=;jp=rrBj7!{HpWYN%J{c=2m}jr|rw2e3WKu@0y>5bg}mQY|wHj zM3xM+s{X50Qeiq>dva4cAEr70S?Du&)~^;n74`s4`)-5oLl_P0Xi;RSkri4*F5t~l zcbCS6^O|~BdsN1gB&!1%v0c$#po1a2?$g$%QoBC0o;}as<13<9*A~a!f0S}!47w#z zRkM3#TYw{XPY1p|%H`X#DbF$kbq?kgg~N{vcb75rLO#LXo~{El`q0$8@8WcLn!<6Q zivU$J=#hyq(C>}=1lQ_fkGw7EJOJ$vRm7XNub)lZ7(a28N?2^;TYG}u(N^G5tD{tH zKKNsX)pe%E>Vo<)94+609#g8DRm^g=lQ81bYbBHNF!4~l@b)6x#Ini4$&XVi?n&BP zwUUF6hFbG7PAIaN9-ZI>uK6KPiOLI}zupqiA^suD=VF$JBtl&2Hos--k>#7jx)8g!sKLGmgpg8I2N@9xjZM&{qH_$;815mnxpo+oJ)ob?fvUX?=*RLlP$0f^f9U zj2+mILvJSk1FS4EqRf8+3k4l&4q>;9`ib?hzh$jQZW7P?VC{md6M7Yw`y%!^IcM!e zfu+Zu6=s%vlE8c=f&pp-m4CmPr&<)Lpt~=nY}v4xjYqMy-p%}>(oJ1+cx1PH+Toe8 z^3MoU8^c)*HCQB|S}DW}@W!vUC&~*a3Y1yQ<0Q%r9nqPhmYdeDwN<%rK zFViC_9yMAI^NG4(4vnWMh3f0VBNUp2oq3B5PObU}6oG*oy{?Pe2ZujZ$iH{M(ez)a zfXZlVUCTumxh!YIW{V_J%Kia5YJ7q{VH3rvX5B7+{rF3^E^sXf44)35`_ImWI{C{N zwOzz5gE!aJu}(;>9IC;?FDHZ0Y*g%8xizU|LeWgc3FhO1QPNDllOhV&ifJ!>xB9Tv zIC&u@g~WKN|5AhzHa3PIE6r+Qpiy)b0bdw*$R&>)fUX)b-qm=E0T>LT5fB*rV*SdPi$tR)yW%9ccnx^dA8C$(yh6P z3YVddW!Q#J(-7Fg33xQ-!*VJDl$bq_Fffi5?yHBz1Jx`DDM*m)mVTti*7TUJ}*<&vfL6 z6!hkTT#@CrXWX{ZN*!NG_0>YGztkzIzCrg8_r*gtSo#GdTp_-B(p}9`zs_#Oh}+GC zD@Qw*bbA37mswN1Jf3}0Y~}l{+!t9vvCz&d?_Ns7iutPrBB>PJB(|f{%1d{4VN7|H zjWP|U>(fZ3Kx_R6T(3f5qbI0>?ZAk!C_o$L(~3YhCXQWDIIFpni6t(XiFzY3+CMZVuoY(8x9)>q+_<$HoRsLLwV9ZqbB<#uNvQq@?|XUqb5U=dQ5 zjJB00fChjmf8nMd`V4)(^P3#V0V=6_Btd@C+iQ`k`NlanuFm^GZvskWHu@4DI~DQf zFio{;oWSO53pQS7jQ1?I+mduXc%#dC85&9JVM2PVCx;o&?{Tr4Y;}Zi=S>=)){T=P zO>XRqoZ@v;p=nTo8o;Ip2L{WE3rcJaEpC`l|9suwn9|CFT6A^3yQr*uJ4*{mv<3V{{{Ajv_&Dq`eda5`(<~R6ao!X>c`8RG7msD zJTi8&UDVGYbW~ro1_T~~Z?-IzZO&&)(Ix>y!thetl;jThW=)e{_>~PnfZ;36uZ#Fe zgQe}jEIIeC+v>g8s#&Uve^2a`7(6o!pgI>n2Z9 z5Z@$!#=iq8qkJtr-n3hGEC}eKcBb+7c;Gd@?bw?JN+Pzux^af6hf~q!$|#gSxG|5k ztW=$@kDiK66v0>qRxANz0L$dzCs$1J|R;PaN znHo0v$?alGRFyroh+t3NGiBy(BiyJVwxnEu#ke^lsBdWZpJo{l^s&j8Yr zVchd3j=`NXmL-Ty%XtGP>9u0#TLYRmzuLy=8MQwmqJ8z99<;!np$l}ClQ%>%6?rKb zfa=Dpj7;N)Wl(1X^(~-Z#xQFEC;DiBv&{Hg(eNwwKgO zd8>;qgPhZp4K}c@E#g$?VqLaUz)`VBeFFLPdMaAQx$gP52VexLlX6~#T-+31cl#kL z%~RWF%oNgzUQU;2UxZxzprsWSo_EuQOjm|#6mL;_nb&UDi-#!LONG|xA4niAl+fTk zCcellf|M-{+1DKO#T6EyYq}KPuvu_2;-a-ndaik>&;Ek;R}HXSKhk4*jvGI~{dwBl z3@rmqx6^^6qWEivFe%Aitz+GnyS}LX>yaVq>nlBUfhiZpRXtx&X*#?^C6U8SzrA1a z++19({p9-oqOr10U^N&#Rq_Rhc^_c!L_|=bcAf z0gWqHsFko66Tmot>#(e2<7V$ol#rVsJ%mxsFlq5q@10K?GZ@XQO z)ZfUoV1se;JD#bHqF-pfNj0MH7KXBr0-eT3Z%Cz-3h3lLSD{L|wHoGa21v(l_6}YX ztU_;?yE0AwFpB_F@HDK_C_{tY9)4YImw{`{T!O-{H~Bm(-IYWeNUn0^{NEp^UNXByobl#8wtAk*!oE+HWMOu-|W>& zni4BsgAKYG5ksNJd}xI-ag0m>v9B(-GMxK*3WYDKGRYaVgSl-XZzgJVL*T03WiLzG zzNB_b-Pw{dE8`O1UBPtU8mYWOA$l>?!*2z~@dstaL%SY~tt{KSlV^sx2A27A6g|Hy zGYs;~_ixp~<<2_hktt_*pOvF?;p~?zYtL%3Daj9d;w?Hs_!HHiAJC-8-wi)-H;`*` z!^mO|hNaR7v{(Q+J)Q&rU4l%Q{K92}zEH+D{!yDr6r#y~Wq{{1!lbY$Wuw4VnUsxFX1^x!pZ_u{HXZ? z;adSZ#EPr%5xpo#g~3a+@Z=?xci3HC1}8hz?%kZPG%xHY zVgf+8g8InauWmiTtMxsdhpB3w?H(w@@1o+(M_nQKu^+q2IcXv(pnPL{K`o{CpnohkUmTT(xd|Ef3dJ;T^ z%d)UCrZ23#TkNRU%l=z*Mug@GT7z)28c_0@eJEM%{c)N1Tql?4 zc)r@^+k9h4P8UzUSS!%ot#BAHrJPlza2Qt5ciq&v38Sxv^nE7PCUnv)EBn1A|7c*- zNmD4#8H>71{Xm1T~~Po z^*oC`A{*Djx|Z{$|G3uEp}%>psM(dw$abj7)HTh`cJ03ODyd(34leZ@0VOcr^jBu& zcq)=Cjb|ryeOEo?;C@r{+(0A#M;$R<$VzLsMG&PmK9Vn5WkLeaV%@NCD17|nD1bHh z)0Y3P@F(Ky6HFh!(G2gC-aP39dBKT-xdf!>-f9csue6eng&9>Rv5-WPsaJvO!ED!N z<=1_NsQRruRsn}nFUhzC8{^gpKuDTNP3=z$0RYr}o>4{7A5{2_sweo?JsiZDU-DTs zGB6YW+pxr@jvmOpPbax*FcH888a-V8dyB*FJkgmy| z&|kNo5I8JZQ7W>Tp3#0Z(0oCtY!8k@;BR2xt51??1(uvod$1#$(fdX>TAN6S zxVHr{$;10m2M27<^kXkv1!aG`@5j7vz@KRnz&%NeA_CntLsRWrZq)guV-7=1z(@yP zv|cj-U$@sWP(Q^;hn~aX^D)16EQzgO^7sDO5eF7`$B`2|vf}ap^w!zz(vs@tkrDKY z4s&?HBI7+ZOp~xIn<3j8126s`=+=KL3Q{1W=e%nSOLIDgkXU;342Q7+_*eEI;E@7f zSA@+0tZ)uh)DV(V?$SHcZbstY#+8GMaFLfcJ}Ds3_s7P|UHR7o{nrEicW3dh9s2+0 z9g3|*;BBzdZomFjs_zhd`1Sqy*pRob1<3x4Z5dDLn{*&Oe-1PaWB#D* z=@5bC26PL^=oHIDHgSx5Kg72}bzS;_fv(w`EQf4v$Q(}FzwiGay_#sL2Ze(0bC_;# z|7Sjq8*ZGA;Tji%1%gl@4sra7Lt%SiN)>;KegQ!AyQ=uVMnAgHjQ_NfJc^q`F`6fv zK%d|>Ia6Y;s1#!F%OeQr-mDM;C=lgU@YD{gaz*fN67WLn)~YscC?f^>R7p_&Oace? z_^)kF=yQK}qgr}aWjaiDb#n-Ih*uLSp9QaAQ_U~k@vuDuYJes9lkGO)ZP~ zSfsHe5&9|3oq-f%%6}3CeH>r5-kmu%BfAPOH0BI});7rw#X8NB}kiTtCKCQsLkrBM35&z#BK<^6?eV_t$TJ%4& zpWmN)LIB*`L$O}}|HerE(tlC#0vlHW%OySTKPJI{*N{FVAyNYOruQo8$^X^>{^gYZ zf|>oxDg6aA6Zw}@`cw4z?LGeEoBwi3e<9@k%PIZKDgDZnPDW?|X0K2^yk^rL=(+p^ zFzo9woQ2p{D6TcJw5&(p^p$SgariRkYpvou8fp}-1WXN3SQLkkGTSIqvm!}&KdKbD zPR}ZAc7K&3qnj+4j7_mFVC+u-)5s*WyaLcXy6lym4l#ehBL8y={>MkeDS#l1#L9#D z-e{5UJlFkPL#b>T9g*$gy%Z@mFmN~lIM2ud9n6?8Qf^5u>s$JF^r$@+yRr55KpmMU z1TuX}rrzuxS4qAu34=E0e(Vi(ihkt*BO^5iSX`p%FU>XTu$(5Getatt`}T(R-BOd= zxzik_jTbXgb+aw!T0finu$8VV^G-~N}&eC zNyCV|Ct`*HtV+)gD?p41a@?QbGIyPKINLssR)|Bj9!`hkI3zS$9+FsrPfj$i$s}Lu zdYuP2)-_fio_)b%q7g&E#>TjxHMKD6H0)@PxLPdl58?SGR}a5|T@VWvcR1}VhyglV zYq8~W^E4H1rzWPtQ<6mMqopMicYcrFQ@tcc;9yBSE}DmZ824p``UU!)_!Y<^o97;3Mqd}LJRg;u*^Wx zbN;(xUu2BF)LE8539Vn^UZdF`X38_2Y2=dW^6>Zv-0zt}J#y5P`f;{UDzt|AU%i5yG;KSetPsl0S#sRf3B*q@E6UlnQ`NM6Ap zpk`kaev{DJ74`Z)r`dKY`%FNH$#}aXlJ`A;oTP}E1c6Ijk3-k<-y3%L-b3!;857Ob z1tPa(k%b!7fsTiG%!oZme!HC3Uucu69`j#a%_6=KS^x+{GJ~4*UCL9ghh|zm;k)XD z!Yk@m+u7Jz=@x+e$b^5QVHzvw?kZ@a#G$g1d_s~P!X_H$v-5v7-v9TTA{FS01keV= zGI{t>)KmLeW7Rs(tiwB?p26<=^8o;t;m$(E{5j1uLn5pnHjv88lsVB6^EU9)2VGQZ z%a;AI{tgaYL+q^SyNB+*U!EVj2ZpJNTj2Oa)oa2UH%^DAQ^baB=pS`>oO_DUs|?^A zEdi<|`wN7NC-omFs#mBngYMfU9E{Jldk!+$Y8PSHFM)GGd*E}l_?ELk61&c)*R9v* z8E+FjRx61zwgtm+nYCUkXb*q=HsE?oD&!d(OXrop_OS34=4f@ek6OLbB89cF9+TPE z>iCy(_5YfaQ_KR{yNkBZdj%l|KvR0m>Ejer_h!jQkT^A!4wuv0+Q`%u&Ma_ z3R8b1?Dm(AK^JMcdTqEy8>ol9M=N5EWWc;w^D3iYE-AsQ)CMf-iVcQ6zWq2vcilw*p(*s=)oDOujxaZ%W&Dc4Q7fp&?jbPFHm}u$c&BZ|_HiN#+ zt~zE0FnT=I_Kkkpuy~OGpyk$+GL_#YANMxKB;dFmD`4dX75Bo?{r)0y-E-yX)?-wpy-Ri-^%_zLu%4*pT2j5k4Dvas@Oy7c1aIKg0RDOTF?){xQJ& z!9PcN|97w0B|f*>4k!K4E=*r=)>IgNW{1egspv2_9wR4W>s+=0%(Y?GZaj;B^nB>C zn)h@GIM>^cNdAFaYnXsQG&FNidQ1QAmY4lJpB53ZQYL|!vRQ6*iGcJJH7&t^$@1SM@J zq4>qNuwy@LNQ^hUp>gdPQ~VL;v=b^e8AWNSJ7>B9AQ}3?eiBrIt{&fd#2D=XC8WQv zKLM90z)>cR=WB|gt^yB_J-=52vY5OI$=o*7_&T=bDc=_2?=L&;*iDCI`I9fa8}<^b z$SH`$|GUNc_fhD-e}&>PB2wUes1`u|OX7CPf5xmeV?1p+mc~~bPY%*Ll*b^eJD=hj zE7~bWh=F*)tagMmU0SPTiAGyZzakuPELj~0(1SOF6I_dF}&XH@IFY-o| z7#5rQvJ1-aBZBD-AS%%V4NkN~==9G8UK!cVn~v1g8tX;+$3!X$b=%@X;~K74_8W4W zhjyEnp$+q6vbWsar7S>Ixsjh~(r2|RwC5mxh>vD+&A{t#nw|3J)+5lc?CB;`uEgmS z)a@E5O3Ny`G5`(9YG{@8Ll_nrv#OR|9KWU?W1%vbi%PQ^#fQxPXWNiH_`XFHAg=C~ z;0mz9cdlsks&Y8gIpU@ZbvgCEKBXl4$}!B-pY&b5yt+s(g$sqzQ0jGEwfw=g64)D; z_v9y~?2(5`W(i7*#mLQ8p>SLXE_pb?D>9oAlhYKg}kSd;hqz{Uo7#pDlZr+tuJ^dcS2A`b=)?T*)P*qYNSVpyTaot zIRy5?WHB(wg)C#W97l7ZLcpW|x)sceQN)hwYYr?O#$Tz-y?N|q>EOvO)LAdUYhqUg{^OXk&K$V?Av&J%-EmDBWrujUO zgK6^qUA(}6Kv*<%q{|RpHU*26Tz}re6RR9 zPCBy5qo`%lG1RQCPD|_CqCH^HuLYAOj}s3`)hjNI>6CJUIq0-lPh#mfHga%f8ViA( zjotD|Bg*FpVt(~^@dJ<8T~yJC062U~Ox#U^t~XXPC62<#ca&r2-3dXx#Yo{RMRyn! zhm~mxumWi+wF|(d;6BEx)vkL07x(+b`Qbdet90gNnsS>*Lk3(VtcHuN9*`M7> zpC2HiQT*i}B0^6oSQk=%0;A^p9EMv6Y~RgSS<&oG6=NjgSyA!GoT}=2=_3};qZCC$ zTaVTvh1-mNrjtD>BP$aVr6eNp=phx{$c_}oOvGjQR5^AYm#YhIrMRx9(R9hnv|L08 z2qZ*zA)BkQUCr!gfZaQVQ)~&w4wltUA*h*-o2Vfnx{bplT zCAY^thfr4<!PglO2lK>eZQe3hm^uLwA#r8{EV97LJ#F zPB!}b>+E6{djU4>^TAFl^7P&^*#=pGye>^D>GQ3|;plO+6CXL@k z`hYJEm#=tQ>%)6!!>N!@6yn%`D>~7#8tq`Qi#+i%M0; z&PhaX8-R7?DDRmd?1^K{0pQib^>on=#J&-VRW7R(T9WnM9yhd^V^&IFu~AS7EWUCt zxH^gSCOsoqy4{Q)1I__1_t0&~hb520`-NjyRNvt)qEk0ItshaT&Kl~q!9kK(aa$R> zs}UZkf!H85=7u92m$5U#54m4^1uiSUBa92*9yK>zP0<5FXep;|#fmFh+@~$p zdI!@MvNhK#&>BaI1-J1Xe&^dASIgJ4-3)h!?tq`qcC?qy6J0G-DUR*H7JkEAzixke zBwha5__d5%LdgN*cWS;0*U?9#%a#Y;@N;~#r7Zm{37&o+SYB9*S7z@$q^cplYt>UP zUq$ApFGuW9;NAF`bb0yO#f@|Xe6K2Ja->|S6Zb)0NG^HN)1r0$@fkvFa<%odgL&r5 zCJ)HxWm8haS}F3sl!g5x(eSx_6-A5K!Kvv0Oj2F1g?NYVk$jq34=3jCyMy1XX|yQi z66fkde4rJ3)AxG5*(nV`FYXoG$l ziCjr*Z3VHzbkOr13RV+D`yM!G$7EBl-Yq6NDkD~+3zj!8CaXt$2A8`DQ*VQ_NKhH1fCg8*TQ>azRT>4(eh^# z?L$B!rJqsp#*}+z;NK$uo*(pn_7m4<#RcL8)&gAk`p(XW_58dFKfI2#sKCc5(t9eT z>uU-^u|TF9q)BC@5xQBNQ-5XJI4pD{?tZp!B((+wU$)b`%-V|9?_gq*-)|eyY1T9~ z3V%i9a`KTHtk@9f7djmnKk3ZrS#at=w!(YecAYIH%nZnwI46u-uGcP%N#9-XvAAfw zd^nhrVmUonS*cCyL6&93u|%OUxb6w@!v2Z8rTa{AA(SCK0hO$dMNb2GrYzbgUx+=^ z1|DKvw6-G$x<8-6^HLnWFH;V_TmUfL$5Iq*+1k zIugKnbJ>yI-MF_SlAU7Sw>g;d@Vi;rO?1~R!fx_GgL!o;99!*0w{&AoTF$rLOzTGL z#rgi-$&C3?|LbG#?;X$QY7f)w#d7JPMH?)PaZf1&>(+o>3ydXs;bcDTk}$mwEuLTa zWrVncDHtUU*F;yB+1Rqn@VTfU=l|)Gy==7|#JfAIs;4o$ERVP!P#-rsSx46&mRRc^ z1hQEv({vO%h^Iqy9lL6+)%aT?&OWTMDxV^KZX9C+V2wTeMw}A(oz3ZVQ$7#aYWGG5 z^(L97Zwy_ph7KD8XH%%F?Z|f%JLvotGxq~^{M*yv1N+ckvSzD!6_;jl)LTC6(HdeB z@j}AZ=^}WawuE0gXK=OaF%I-Ke`Z^&T*@Q{I?+&U>(R0v6sRWk8sc`(Eh_vV*NP`H z({r_@IQqBLk2EpK8=6XOKcgR1Ki_yzGj%cv;6+dZJ%lThm4Su-QUPeI){-2GlpA!( zMqOh-a{y)aB*yzeAUd`l*(NPHtpHUazNA|KXCl`GM9uwAA`s%4k{cJpjeX2j1dMLB zRX9l?Ms#F0n#)rgbv`x~JWm%1j*&uBK(I0QJD@hJs`4}Ho8Q8*n$HUNlA^@*S^c=S z4r*0ch(cHl5a0#%+^fDiPO^<&__+Oz90b^?>rOsl*qExzfR~Vy6yQ;L7tgrkI?Ud- z665RJ!TG@zu%@&t5WM=sb+YA(u6O+$VdS~)V6s!L1wfTIfOy|jR~_0G@{*w#ki^47*d*FZnv@ergy2)}Ec79h+FHSvYuJ-JPK}!uTx@b~zUwi8!%^ zTUx1T&qL@wB_JR0cBYGin)Mb)t2Zv!6l^cp7W~llU4+^@mxn6{2}(kR*-d^jD}K^hFp)DXlcR|Ow*H~1EF{X-kV9LDfZ+p{j!b}Jc|50thbGaEpg5Vn*K~& zi#{s=qCW|fm~U(`A@93fwAuLgy0s82B+z$UAltLNeZ~tvvxm}qh<_(3e}H%;O)-oVSa?Oa zF&y7vwrL!cZ~gSWvR*c%{kxpywCg08C?ZQ4H_n4 z-tHvSO3HaUnueor&Szu{sT5ZZp*y{;ZC$Liq>DpE?wQo5QLMyr81D?r^2Lo|t|%Sv zg_JuE4iK{qicl)5ClC}ugKhG$poRH))^`Fg^vGt|rSo`_RAfhcBDT*uj5n#2*B%y` z22vH|j22Wuri9r8PPCbYQv^Vcb|!&K{`{qjsHL1FfP61b|GM2m%XFPLNoFh=b`;89 zOm8HdM%`pxXm*t7v5LB!rr#{gCw0PXEmX*5cNs2+gmvVIEpT-3?==y~WgSMT2MFm?#q=|0HAG zL7sTiJqfN3uo&y6LuvYbTB%5T$yLI%3K*V@ts)YK<8)yY7CoIL@>hG5oQ(4(oWmeVWwf*q4&Soip( zjlCy{a?m*90+%Y{KPc-VUDP0r|&!*Jdu?6Xi);ZdZ=ulK|4V4`wT z3>W;?ez4fH)sq|5=mPKCY~>QEupf%q7O8%6KmGAop~$-b`YIZs881Z)WE1Vn1(p*Z z#r|gTq{!Ij9z3&B+%yAf@Hxsk`xZ0`R@(tebD*1WW+^b@ae;RoygtsTI6!i=&aX4 zeNBg+Mf8%8sEMKf=4*Y-fo$Y`gW2hfZgAVd6W`sH8w^SAE}#yiaI`(#>lF)ZN(r)i z_hCZD;`<>saJVM_l^g@6D&I*|Mc# zL`iKuzxxhdT37FZi)!JRbjwqrwL_JV&^G;(nwC_<1{F^*YLidgzJkV$p~5jukibkn zS2|pR$$-%=QqH0mgSB}D<|iR=EwI0)ZG^-@q<`$U6Iud(hWVm3w;ow@*oA9Xz@_N( zHt$ZRoAFuZ4VZ`Fyt@TfFG;>G|5FPB_H!Q&BmMtC5!Q$M>YEF_Bt9R52)*#VnI1X>b~Skb@<8Y_E`%kV>0%LU zpClY^)5kD;wAG31n~l3iQuZ8p%@f!eHjg)m&Bav`Yj3rcAvl40cW=@AnyyXlIv&yGnQbtx5_6FD-5)W$3EU)rYeWVjg;@ zuT9kS$M=hQw4JV*j}|2D=(Cs%sDc7CtnyaBWIv`Wk(P`UL1rMzV_I8Dn_MS<>z3Q< zkPv?il&rU4q_V%U*GvHd6{lI>sleS!hP+coj1>r2q7 z1>nZolPVl07Oqe@t&rxXxP4=MfTcT&0@Caf&W}2+jHl+}qS_iao@LI1+LG?cT`~>A z?&@M16}Icd>`1BRXctZvuF}mWoyQnmq0r0y6zg?`?hUQY6dYPS;i?u9scbByT6R@Q z*7<(p@ygDV;NP`c)tKL^46*y$aW!QqUx(N;2!a=J$w7|%)Mf)c3Qz5|yw^7Si?%L? z*TBNNs;DmO-p5*utl#e1O$OG9>i}zW&m9dm;J5T))NR=Uc6Hl%wQs1WXQXjsrukkH za@QIYFa%)fY3x*LRHO2h^ks*lyd}hBBO&O$^9pD&$H^Vw=|}B>M*#TL)i;+O4VYp} zKsZ2vaNsKU8x9T3^S9h>rU92d^H0`7znAe%dVYXEqx>hD@jrt;+SEvWWJM={V)p*`=@Jzqu{xiMK>QF9whJ6h|M54xeH zF*3((JQ4KQoDkFx=;G^cMRAWs-px9c;$YoXw zXdZnVC?N?xsqF(8OP4C?sL?DL&HI~=fQ>Ex{+thgTupUqBGHxpC*#GU^kKCYdCo_( z0RGRmDZIb<`Qs7EHn35LmTe<(mV^TA=qMhCwcK4}-I{%mzFQU)^^mdhhtBpUa?`h>S*ee;9GG!9C4cM%{S0LlnZvg{^Iv%) z+!O;*XK-G|pj3_w{b>IR7Rdv^YEX-7zNSHseX+u#6t?!}=H_3s#J7iY$Z5e0biNtp z^8uYI>b)*xlB71jcG`4NJVv%N}-NvgiwGWvN7vT~uv-)X#=uNoJ(ZxsEZ$v~-FiwHSa%>I*#< z`kq$>{Xb%}`{$M=M1AH}=WdtUx0YdO81Kj*1`~b=vw^f?o8Ad7Tz)ZL! zN0(J!;{zK-__%+`m+g!YOCJW&1IapsykD_0EJISO!L*CLzeygt`UQA;gK!CzRutwR zB`YE(Wd;~z?St6sNjFczBRw;ec$)wl2ka%`|Vi`YldmJ@YC3@407&Q;*){ z|C>>;mE!an26s|P_s!}O-8P_h$716=uaPVbi3YRm9rZB66=9FvQ6{T_w#`T1Et=kT zoEvlWB%QQfK&c|&Gsis?%0$LY*NJVL&lp?Q^oU~v>mO0qt^>opx zsv=I(VR%G?8pohyf^4>mc?N%MvTDb@@{*!#Ve?(YJrl)SZ-N9y->B#lq zgN@?w91RBG;=`V7QjS-g#y#HeCk|Rm^7u83!vT3`)ni~??4~GUzZ-ND!-g>6it*o? zK;(@BDnAN(=^pY6FQo9Xf`)CspM_)!6KgBOBGB;Om&3AFT#^%i#O>=mc+;z4iRW)6 ziIH67r_V&VF$)?Xbe>8;c(OI)so?{tI=_hmlvgl7{EQD6mYmd@cSY)U+?EGQk}p9g zs7Y8oiIoo{|A#05Q^nM{Gs9Je*fvZ+Pavq2GkxPz3b-E)Qy!lBv;!36o~?CcOxIHc zQ{_D4-4=Q_bi6_<);_YIL;~DAhC#_+#HwEBEvU5O^>PLH6*(WOOOg{Dx?X^S<0f|8y9P4`&Clft8M&{carq47^7u7c zXXZm}J>lM1MZ>Qi`XxWI5&YVyrC72#yXcgh8z_zC(v9dIh&;#$g2ss#p#6Kl?1Pq5 z$>nX2+dAZ-N^qB&3xWm{qiK`|hDC1LBTi~bdrO%y*(Re~8pT|B6JQgo7s`tcR*M_7 z4Gun*uI>LSb(ui~eVCjX>b2t-o8rfzv(P&W)8EZNztmrv`RBjiDe! zCfU;o=Kb7PIS9YEnQ49eQpWYujV~(WeD^=Obc~nVS(>c!!he5$!+D*}C7)yg zevIPdf*l5^=PX!t>a_Z@z4Gt4mfNz>%!$&7P!SD*n?! zneb4JlP|f-5fIZ39@g)>%~@MsNIA7R$-j33 z9v}`BAflEh_joQ*#+>`dLj2EGtt|#nBnU2J*Ghq$#uI+96xDHLgBZTjiJ$aia5~)ZjfCt6pMUhc1buULGjp8%PHaN5ASt-1 zApXQGHMScDICX9(k#KiwrID+D4_7!nZw^A`HnwJ=?eC>me{J6MDbS8RJSEe6iT`i@ z?w`o>pCA9vvi~hFp!}Z$_a9UHo1W%BPUb&O=Kn7z(+2R%5C+9N^om(;F9`cbUI6m< zr!Fsuf{ZD6=QM!MbOH5_2c&yKaId3O9`o zt_IUlz?}GG{!BRsw5@)gKHY zQ7Dopc;*d9Ks7au$aAq#4Aar0DKz=@#o=V7MA*wwIr6h@NK%Tf@ryLF3^PO~?m!jX zdNoc6kVkOUA|Eci1W)_(Y+DUjkc5A(v)dzX^osk+W{N!I(|T@@^i`A2!60zaE^UVj z6~_$<8s<+g_vPJp1Kb-=)}}-Nk|wtPnUI>{?a6)BglX%xnnD^&&w0XI@XdFa!D~4- z6WP|>BB$p~6hJ@euVtS&zj1<1X;__*jdx`Ev+TvE_c$*Gv|WDF_L)yJiT$$QJZKSg z2-2?!5SKoe0?=^nool&x!32)DCwA%@2QAL~zJI|7Vs6X#p!~&6y$F`wY|bhv$LH9Ys3UrPlXyshYK9^Iy9lrrH=WHF;=i+frjC@+Zv{Gj7B; zZbZM~;HTp9{|*4xW-A(GEp_xL`5FiD;qeqSpiB4bJySf8=4!7jG}e>lUfKM#_kC$1O>iZ;V6Q=ux)AF^Im2d1z&s2!E z_i3v=@HUf;9hm!<$HsYorPc$_6^!7WwgoHxkAvQIc4LmX;$64m*C@bK1u5Im5?6en zS1mD_lY+>kNQBM*^}tASdWG_=m{`X-E_OZH(gI&ZWL|4k_G5a%KgB;U|FofkbJhL- zT4<(?e8pTCUmZB^d?M3yXIOgIMxK|xW`T}T#fV5^i)sQ-0`^ynIZim04IX$KEN&qD zw5Z0?2*wfp^==ukfLmtZoWe*}HYb#*)<}p;_CClQ7!=h%R$PsHu)!S@Xx&p|rTf>4 z8xmLC@_wvacwEqrE`{YaRzPFWB{?F=p2V_CWqAY*%v}SM0>rN>|MOKOEAGJDiR)bq zKFi>|eUa?Onr7T`qVj@QO~kF^ue!e2qF4-(JUMiS$H_6)74&d?XI!%VdJ5+q zns(8P7X0(Kdcc+FTxC`T=R)jpW3lf9C+E7L!AnKbM4reMq9;jo7anCKl6ix>n7Dy5 z7n05|lz2#aBUc!%!fyts8&Mw+m*1D-@LTJ;nO?f_e}6{iwf6g=MxJhwjt0*dSufoR zo?*L~#&J4LGC?5?A?1)mbYvVIj%46T+GN0jrIck`5v_}ZG+=Pf%iPYqth80&hy1e)acr~kVL>ru(GTTn#V{j24M8(&7JniW}K47LESUegTdPzo@*NaW(4|QTcs_C&C393Td=Uj>NFH@i54DLQU)a zZSYubD#U|}k^OfZT=LL2V|Fbrq|K_PTOAixHW6+)Bf7*>hX7XO&60D47?1D%F&vS9 z*YBUZ$Y~!PpYtFBon7{;p7@KO!Lj&nZw`Emg3Z7Bmg6FEv`MJ|I6$XJ69oZMvY5uc zdxq#O)VT+_9(Y+EMwSA@0vogQXBX>o6s!LGbGlwcE*?=*F;4`PD|kfZ2Y5xaKXW2A zZNp&1Jmxlt=&ITY#BHAEBbtU^*T2Iw&VsjAv+(|^VU~1TAuYa#neZO`;wzd55pR-2 zDTp3J^N&F!{5!k^@N@&=jT;YQ5E8F__pfBkb{ z=aDj`dB>^S&{P?8v;r~4o^3M$(2Y2L;=3);Io<|;98}6a_vX_-GAhGACsy*mq9piC zjFNwa9I#v)K;Hd{*T_e7X*?FQzYtL;2$a7}z z#chI=fHMM_ob6`Ltw-8iwo)?tsNe~8Nc7-tsXMyI#Ln-ss61XNK5vZ#=Y8F`AWYfj zw2MODwO;ygIjBA z;gule=fHysEdJ^{{ZojT5XunN+~>>tJMJ&XbP%aTq(hSpCtOA+a|~MmUlB77s>#I44}Ynka$mU>h*Ik9AcG;GHq zz&sk@sG_Qeo`GA;D2U;u^N)F@(-VPkO|;JBr1r-^#}8H%536G?wsDix&$;bd5zVG9 zi?{;)mz7~@^wuYpDlkmCb#?UNH;}C~FN+B=Qn>Mmnyi-4iXiR?*DpPix!O}1uaL98 zFo{3bRXkVJ z2yz%t!F7ja>qVe{m9;4HM2eI8tIdCK(1~9mCMhw*bF6fZWF_KG`FV3GkQW{osdnqi zmy}D`k36n_y8ZfwaphIDG0odmz7|@a6>o|5h^xI*1_X1!R06GrNapVGTPjuHza)aI zsAQzwD~S|4sl)5r%w2JSdc}k@`RKp&P-g9WbF;`cGGO1mHN6yE?B-*=K;yGs5#KCg zo8#MQRPsErD@9Uhw$!U7l5h_+4^yW<)Z>rG!Gz{TIYFC~5z7ch)2S?__uto)TBfD< zcgi9bPL8qK#L2yLah7g6)Jrnoj53QZ1$ghG9r_nP5?QR17H4W{~@uZqod55&{B&>iPxVsU`uV`fdky3*a)ch`1lG z8KRxWt2!7~>*JByi-)%9sg^Hao|MC*Z4ZXcp_vj+zWHShpmY;(8k0nd6VkfEeH%LC zg#8Q_yZ!hRpQm_IPKj#?Bc{*WdXMiq^{p-Xl32%z1m^6l%86P$Esq*?Pm5dVjK636 z%147{gCm~TgfxF2924zF>BjH&2m@#t!*oC7Z7?XP@#}r;ghov6hj?j}_aq0Np2!LF zA=8z?X1#45@0Pp^6bN{FZ12SZ%jyX{4^GrkQEQ#k#mh7~LRRRW)KZ^Lej{8>ZC4(w~C7{ z&QL!>{QQjhqL-G!oo`UL65h(Do^#xAxOD;;@>Ml=@k^r*E1fzIy0JQ4WMuKCUfJc4 z;a{Wu3VvRiZ^Eni5yQ*$XGHW@o&W~+J>_u}bAMWCz!Afj_3OLxpKkb6U-Frc>M@?6 zSDHPIudZ}N-RMj+=LNC{aJg;;{Sz{WdV_rxkV@tjY^ z)&+s&e=z8=3=8oan94Y>@%9!Rj@iKp97le&tCS1_h5gF*18v6&N{)L9rC#fXO<{?Z zhw}|0wG%^n!h2Om+*ualDpRL3Je$C2IYTOp68(>wmlDa zMmXMK+~Qn|KDXYgE9B_BTh1c{C&M+72<^A_j5AMUy=n&saB$T{=If82tPDC_MW?yf zrI5BzX*wV~ejbNE!#;2gM37ijUlz-ZfA81ryVdz<6j_*K35KS2=< zCA=3J-es8IV{(X`IvC)R3B0FwB!?$p$NYq%Spuo-6(r`CDIR4glc4A8K+?u_$u+Pi zy^YL*c7TeiQDgD!xwckPpX;Is%hsYt6>i%2&KrF``wQ{u)<4@Wp9_pstlIdMU_DZL za^R5bu{@*`Kv*}FYhU;4^jTfk7qeR8hMHvs*OakzHc@fSb`2Sh>|&BVw}tw)P|GZ) z9`7dH@uJwQug&aQF0S!KPTk#;91r6ybCgJl9p_j`fiMNFjj7|2m6`tGmY_)J(VBOa z_u-~M8*bPzZcZyC+6&ax@t}zvHJGooE!-kXdh?I#m7c~kTob})gwRJ5FdUa{)AgI2 z`5nz6vy(S7Y}5R{XZuwtx*d7OPFj95l9L^aH^Q!nG!;&^vu% zxX#H#7T!68t!$^{BXKuA37g)dOq>$W4!_3a^2nz9&*&_5f{j{Q;(oyksV(o?IzPPn z5^o4QY@EtSN)_?ZEM{}HCS z5LLoc7xRN&H)*f@)Ad-K-xHSK)AI*(`K_sg_dtycwYr6{F=PwqCCD;`i5kBaMzq4I-po!Y}S{Is0yWCNTP{w=vHg)oZCG zH%vP7?;P1rrn#DiO@(jWDEAIV{Oi)1` zJebRR*FSm1wkw6r@XJl;>ocss?-+n<7(VyxT!fnS+O`S{hx_5q@i&&-W zH;fxP){H6ro-;%+?^`Dv@F>`dfw0r9-&_diC2J3PMZNu=L5XL5>vEO|4keP?}@?q@HU;0A0G z|A7TDI~%cJ-o)~gcbG79X&M2Y1b()R&y$dbqw`YzzOD3=Sx*O(T(MVzXI%Au)p1X` zy6R;kGKk32IZ$Yy3rs(duW8)yUUBdKW}NA}{+(gz9_^VxuO`|>Hn~3b7*eEA@Jujg zljQ)q>57ar#t?>qDqx(N-=ddrkQJ{O4g)Uxh(U$TJqgBFQJ$u@@kjj#YW0xQmYz+3 z4R!eHo#kB5EPkoFUFY5ltk>x?c>6nCJ(|HY*_!D$D!M~5rLG==ncokrZyEh}h7+bY zwvi3L6AvTo_@;VmY9j`7^nAmcju3q{PMwZiX(gfe?5ELZM*}oZ` zhpXmV7er<8^{&{_x#SsS8=SH&uk~p{pu3G{XyiH_q>keXlgSGl)DDm9zb}NYIuQCc zsHoKZ#kH$FJ(BPQF%GBWs9hfE9Ks+fCLD$ylj7*`fwCt)jqqEI_x50{;f%;dq&#Wy z6Drbxk9od`8n8KE(F<_y(A8uY%#!qPA5A-Xxz8H*zKPQtTmes5kh48vO?W%%Gsvz+ zmwKMrV|ClLm&64B)RD7!uiHEGlG^=z-J_`b?ZlO-nyJ8HltYvjf#V_)7gvANxh1SnFasR$Y z2|)`9aIjD{(a{TP{hntr55Qoe&E#Vh>JTQb`})qRh#vp#!eQ&B?|u^t7dH&20qZe( zQM$@QZ1+X`%F$wSe)L$IzM&}(m=lgIE2<}jjP>iKAblUbF6ko?AOeE4i+F~-a}4s& zIoD)5nMHtv5;Gm@Xg-EPt64*(1~Ycw?bwIq9csEfRmyj2Aj@mSP-Vi~LL;d8(sI$(GXCG}yt@@)4AEVP~8cz4H*IILm%9mES+ zBXdAsTLp3-w^qJpF0{9oy%dm}JBTZP61*@&+AMu|oLnxoE>j}geR}WbobH<&_bj|m zYy714;S)7!P;;(bwy9dbJ0tO;-imCF7QE@~k;lVbS>4^$XkWxoIw)LssNW zU9PFVE6b1i%A>}0LYn<{R<2I@Gw!qX2|Ex_*iMh~{qc31+e*dcq3%w})ah|us5gJy zd|3MMT#b*^EcN5}wkHTFFsKb{gpI4OikKdxXq1*uB^Ht-xu+S;*OwRxtQg0#h@^?6 zCt!;ib#L%6oj_B!Ii7%GwyKRQ@TmxxWbtUVq$8io1>g$09D{8EWK|h0;Vk>(2I4Zm2RS4476; zaZFm{R*+z9y~CB|7}}T|dlwSEqTS~W_h!FDuXK1iQOqus@EAeaTUA~05Rv?>hMa;e znLS1z^%L&ffTTl1G&(Ig-GVweY&g%WA$R<# zm6MC7&UXi@tCiKb(XT5a$PnS*Wm*JlHJ%53N4Hkix8?xn2oO|?+sRkNmvIiyz0ajn zg>Zl1lGc!TFyU2Vb`JGfl_k%tqS918;{@Xe>74e%o=ISqKQAn562Cl=?b)>kJ32sJ zGc?z~S*;GRkXO93uMp=inl+%b-G00ikZIlQ9(J%^E*%IEG?0dr+*&U$)$v&!E{=Ik zi@qmu--Z%^-(P>3=?Q?}9x6J{Oc9RJ7qR~6) z2iJxaTPzt1&%gw=(#=NK&zJ|`YIc^#g{+uh-lkkP1}(V8m!Zi4t-tzz`K1pT$0<(a zFm#POw{+!+xerOmCD|T;9lP--Yk?F@y~Dh;M)Tx~cb|g->26_*8%N&)vVK{aD zd`r91#GAo{{%?W0TOWqR>=F|6o<8>D`t*D; z2YC!5%o9API(RJAC`F?4=j;c{HHPvxk0n%uWB3*LLBo|AuA4+x19BIgQ(36Uk?PHe z@11rwKZgqC)Hk31O5IPAJOnghI`|F7a~pqZ^Jwq_CM$O&OY+7WH%ASI~3ne9UrGAj!`fGmmHZk4}&G`mxRz6#*h&Kxv#bn3F zcLI5x^32Oe%ZoD%$*KBexXR?NGVA;4Yd3O%3Ou_3gHfy8BO5G zDT>dF6^)ia0Llf#>TkuIJGMa6$w5x0jKyFF7u+YOhW6Jp1eYNUg$|7C9tz{ubjLgg z@Xlf&Y9AgSYI9OCZZ~F@qbR8%kqoQGT#acuZejl>+q(55tkVk2Kh7|_D@qGJEsuVF zQduW&SsDr%*)Sflj!6Vu2r>J_G9CV*xq$NGuixvLZi{~#(-~*OAo3$PJP-bi?6TeB zwMgQy8?7yA@8cZFkwGEo)_AuKMAyrYGR#8ky*7jD)2CQ~u9Ih`yRC?%)0EK^!)l<3 z0MG=ZABlg#vl(FS+xfPt`y^UZI41wt{iA!s9GGnIM188V+rGC=*)eQ|nJ~Ip?Ae)I zl~1y=r){p zjhk(P?~H+-NXooP;PSX^g;}q|9*{-)XjHruz^B*J2**HCXL2Byw!D%?RN+(I>_MSt zIOfl^Zkr>dqB`uyb7BU*86i_A%b>QsUTZF{v-hD ze=wR0#3|HL-)xI1h^AoQy4XrWbhbg)Kn)8e&}<4CNS*BDDfI;k|AmdnCOSmDXduJM zRpg>kEvL43iIS7I@Kdnk`$JD2L!9=`>#ZHmIw`uo~s&2_0Z8-rTnV-!?~ z@r5GzF6DA>T{?&7T1ZGDP9rZz+u0Qv1>a~Ot6hJX`6fAZIex+jOyEJybe#kw>@k;bVm=Cp>l_)t}#|1fpK|J+vIA9iG)!PnJcSh~7G{>J^q zMO?EmTT1rzJ^gSN6PJ<+NsI67k(Y`hp9G7{3~d5k6`Stbz9Mb3OR`IjU8?L}z1#wf zk(aTkUwP{oI>E!#qbM-pSoOgVvo6BrzhO3FT~vP%$Pe#0g_rvQk33dqmk#(;4)IH7 z4=U>;Ur~|}D>+lv^l_%Q6zW6Ph89W?(pB8AhQ?p)Q6WP5XQglVf^w_Udu4km_`^{9 z_+Q!obPRwz;s|e0LI*9p>*Wtc(;7EGArpP?gp%|gt=LwYYN#s3SEQ=8o9}ybTn*sE zaJpZSzJ(Z`Iyhwwu+mU#Iov?MbyqXvgwI(0#wtWfb9^Xu!mgPMVF<>W2SKzYDnbP} zd}JH`u(#n!xBWfWEIxEqjL*_r%!wa0_i*YBr~l8o-QAx4XOc-Wt9fs!FGzen{7lvQ zK8~@7006zaWE;39qjfN5dHA$n(KEcgEobsQQ>Qln>{`{n45Pxy(J-$@_4LcIlxtv} ziV&ME4rw*PAw-~V*ff67Y`0y2E72=ytununkC6+sXdO*5I2G=sMju~p4T@%Mg3mMCeRase7J+aD%zyD@3cOcL z$Ta;j9QDrMT$Ww*4p)6%;8DX*cLmOTv)|GV<ZYaE-q_SoLE|czUd#rBiKfc`iqy;yq`x+EpePoGw3yBa*qV8RePb+w}PF< znU*2`ru-m*Ims?y_Eqi#77BNQoOD;2LB}?sSk!vz8a>sNv8!zYNgd}!%kxm5vQpdU zEubz(*a|@yK>N#>LTTLat$E3}KSlZRpQ1cHaM*XJ-6ZS|fx|oP-Q_;sgi3Y`&bFb8 z6tE|9CivO+)?$lMZ%A$UeCwEmC>>{C*?oD(Qcl>&#B2E*s?`JLQAPUWHjz+Y0-^7^yVfwOU1K6uXp}=eq?<^Cwn37 z!hIdgbA(^P#xAddL2a`0{B~K+c-VU&&@k!mBTao(a_nxfsVEo{bx{p76{#+kW+v>c zu28-tZ|D>1^H~pU<*wO!pFcTU>je=3Sm#H%G-|T-@ySJ#3wCiXbq~UJt&&j*1s*VK z+bVODO@u#CKR(BxBA)KK-q$(dEz@RoG6v?V8PcE0JSXY8#r+4t>UGP0gj0dj3*8C1_)?BetlvU>{UzjZnT!QLC3nR;_WRrnn{Bi39`=qBDRnVo z%YHeKkntkfbusT55|ug+26*U=ye398oIzVEF^&4^J$xF6>dL@0mKAv&lTnB+PwNv- zY~1kmWS31;bNtMSb!#j^_}1=q-bj992b zpHV(yo|Y4~{_%>I2-~ygD6)K3tV$344J1O{v7A5IFQSTpvJg(6DODfW^c_qXvmDXO zOj_~E4Oo2{49Z|oI-Z`YdiJWJWO))3EUN`eI~K}bJSaUqtmM%umtm| z$C(WK=r>z@kM5xhQnUqJ-VfJyEcpXQNwn9`^*T9052U9I>>{Nvxe*tICvExU|s z*Cg%v0TeK9GkYm=AA2=s-}lE}?!<>GFW>6p7+vj8KHF*3i3_kn$F{COfi)l*Ti2dB zMVFXx(?tACtf+KW`8s$pwJ()*DSjA=FNxdq7asx3gWa3s7>iv$vr?YLp<29Av7ZX78}abteoNQWb1NcL3YeHdC}7>#-j41p9$a3{&97^Pi^xcJRXBm3 zak1Mrc&rticl@|-(){hg=V{f2GK{By9qcRq!chP_#0AEdlF%|qf+Xr3JjNO?Nfw%cySJgJ$jYSlGNMH&v3#2R@^%ACrhhK8luiYCX~fp9L1 zU6e_(4lL4P#}Q1cxhWG}!gykru9QO*r(afA-@DqMglx{k+RAB>S?|tGjCpw&8P;tk z)Kc=+663dnZ^)u5oR=A`UFUD*S|sFWtBUqlTOob*tqKD zwLZp2$1WjNe8qV!V1{Z+B0&k7m%MrX(Qe6TS(>|uRQcx0;v(Hzk@cOE=@5n};VPWV z?t-R!rr+kQttDpo;bKb>s=7CsrKEPOVwI>%oce2r^nvN}*|+>e6wtVGN+~Op4t9$- z1P6@nib%uO%NIG|`d`kM;+C&FMPWO=YMq}n%GRE>ozxNHCj6#MG1c|X%YB-{(AIc` zh9|`K?NG6~$jLr3Fk7uk(YY>5w^{1N1^BZ`<~VzcJ)oE+)ci4qF0I#hwjZe|r5L8o zt^P3YX-yIlulI7-NgeN%|2hvF^l@pqL6%pP54e{m{FxY9k?Nn$EeM-h`w^|k zCiG)u7>ubLzSTf>;PpBfil+^tffOPc(?FGQzW2?JAw zvMOF)EqAu<-68x*>cQ{4_NN2HhPSoZq@kz`n@;=5N~1=c&vhq$-^`_V?{wi#!nGfv zQ+}=cblFWp!v+YIZ%I}(QmVz23ih{8L8R2Xd*-?)iqJDazdVWw=v@G1#R)62q6^U) z@{DB742pXecD9@y!_MntBHe;c@wb|LkL7?AV@f4Uv0BzbzJC6Kyi3-$fY!y;!=d-( zj#v7r&*H42AU;t@4cJ@`Z<_ra|Wop40@vING2#au`liKm!8JmvByD#_SZ3 z77l!+pac8k@!jr8pAyR1qxzjV#=`D~Cnd96U0rKlmfEvou*JuK1!0(F)gH|+qYml6 z=T>5Dve8`VxhfH9AS744U6nWt6_H-QJI0y0t|6H}4749*x>HY997H!7wQd$NaFm&wk-GqGD6l_X)(f2PsnNn=Cy)&eOzHrj8pAeDugXk@nkD-mEUV?f zuy@f_7Jwu%U#{1=awC_PK2B%HSMceX1ZmL;XI-wx{y_RZH7nnD3RNjSw(t83)Y7_53QQ&y?%?|E40tj0^#uz> zWh2n*ZEEt>%h5SAssmJZvr5~ae7EjoW_-LCEH5uh(7y&{xO62xf!k|+=N6k}5mRJI z2GD@Dnt$u+WjJ4RciVcFF`{-ZQPicpJK0{SY>OrJFn%o4wAOgrqvvkPIBw#C(b)Z=2HK6`W+GQd=NT zzbp;z3$i8c=|)3LH9lt==v&8(cMFCiQjeA4Xw2tQOLTfp$)NzNi!E3GlS}5iy;h_e zi19Sh0Q*LK_a3#S)p8#i5vDG%O-V<0{N2=ryPPuT@wdnN;Zh74OALil>q&1U>ir-; zDW@(D8q(lbV>r}{rP-3JUQCle9&xfi)sygsi{S;i4!`JORBr@aS||i^ecZ{;oXd;N zs$OJv9TkA4s|jqI+!XF{dq^bNy^IMdBX;eDAEoQ1ui9Q_TyZBPmLGY8BDm>`*Q7cO zG-!JB_3qguye&sACjg9#e&M&%o_;#PG@`XX)OAq*B(}Z|>&Cu;Te?zLCwNP6Dy2qz zKF=k*iXmT0~s(~T-^Qw|GM1~y-qo) z@Dp*DKVMtrNc#iY@?t=_hJcnyco=AA+V1Ya0)Ng2FhYf!auFkQy{334`@t z6j5E4NlYf@4uwR3Mx&VXDD_z(<4zN2UD|Ii+Hp@z-&1f!NUHe3$ezHY6qA zk88M6=4~?aeb0ceeA)ru5Uv^pAE5GHJ5nypmkb8jX(7X{u->ucqpQu%U4&3JSaWXu zvUp;TLnb~1$ENf&7t(CfaQr6d?#?q-*hc#pd6xuFvXs;LIvcaSwM)IyOnm(mQ@J=E zOYiG=p`8S-ONuQDpG4|Sgkat5DRrsG3i6;AE&W1qZ)Q9IMpcWy;#mhYNs+u3HIv4# z2J$AApH_7QsB75T{BdX`xi^pzfd6NF`k%R6F6hh3{pl+yofPRe`U#z1?G`q;0lXSe zDV-S;#9k78BHXa1RH+_wan^~|ADas3C_nv(Rp5pNx$q#foW9`is&Y zDaAH!oag76?OLXtTo2q{f4Ypy%#_1$BuD{d1Ot5~uEBMjH_YvC;9h4`P#jEy;q-MW z_#_f&GlB~d54%Hi%6_!8ZXmz^w~#E5i-Bw)ch|?e9l}>@7ksE*nz!dFU8&eS7LQ>q zs%}4aOWgl#0(!eBeXDmh!3PQLJ8JPspo^Bb&j+t0(j^oDFZ*qSE=*A(;6|Ah0_WrM z8Q`FPTr~Y+K>bqRpl-jmCKzreeCy;2dY$oCSiDQv@N~TKQ`G!89&YTy$(f;uOpQVR zlR)tx5r~TZqPT8fRJgiX1y6TXXeSLy1_q9&-hCOOeWmW27R&As z9~&5eut7*0yyRLLaI7q|6>=j}G)T0VnPE6XUmeVdxfceV3V9Qgcamv+Mz^B-r{CmB z^MPmIe&tXTeC-1RhuEW@JeM?}xuM5JKPbFM=WQNXX5gDJoj9#CC7)|#6?Q>pBr)4b z7TmoN0IpP0FMNtiJ@VW9WxbVcxB7l)8G>9bg7=o!tO2f(?`50uPJjvX44a#eUUO@D zDYG>f5lnKgx01<;GkB)gBRUyvl`D%s42mT(jJ9`wERggcUTh!mHHqj3>6{RH=HcAJ zPJd~DE_P%y7nFT!0-dH%lwiE6Z?1C4 zI~Y^L*?_WwU{I0-5-n_VB1$}u;kr)Jt!CJ?9(Klad8o(eXN1^V&p>4D;zQBW-hB!Pl5H_HHvGf{K?R4oH7bU)5MX zbzNrqv0QQ40t}QI`Q__aO~Ky}Jh=zZR@oD+68k`@hxN+N=&^s#kIiiHQ+a3x7+oa1 zYI%PbRu{q0aSwRwbvpg5P;Jws{jXZU8YDy92)JLkIXH$y;yVJ*5`Ac$)>~TM`b^H7 z*voFY%!_ysX1lKkG{9lh=OS|ts?P4VFbqkD1Lb(U4|4O-ll|^sqYqEadZS(C+-^=) zb?WWCMpy4!BG>lw?)n)Fw{KYVC_}(NR{_T6)+Y9;l*4X~Jovh8-z4Cv`b=`>1Mf_H6h-EeeLuZ$L2G^1)CV)wxmf1*zQo99 z`e6o>70dMg>v#QUDH(qM4iMk-30a9%^`RUkybi3)kYu#RQsS#!j2rW1O$ztD$T4j> zaL;S^yKKt52NQo0aB!+U0`OVn4RgFq$1yB_1MZ&fyQq&ff~pj#;^QQ|ScPq=4nn_q8LHA`d<~E*w0? zO#IF|d~kUu{g>t<^7O!JH?DNNgzS-?JF<+_brjFQcTIH`2dC||#0BNwnKe_R2Q5^B zkO&2fMYm}_;y+e{o}*+mJN_;E)ijyzQG7Bbr3pT9GJljd?@XJ0tyj5&XiTQsFeTJ; z*WY{KQ_k{2{o36f(&jOIZai~*a<+gw7N8#Bxf@LT-7U|9!ox7E!_aZDLxT)Axgd5W zQ)YxibA%--SZx;D{v`D*qcf{Pb#JbEtNx&IIy??1ZMFlYVwK@#`bINQ)K)EEUq{=T zi5Ow-=?!x|;5W1B^B(R+Rg3F{yw%B8p@c@CJ%PAk(Nzm_>%Hf08UKxipWj6wHZTYd z=%tewl&Lt%GSb8C`fe!Y%eLe~3_|fiQ1aC(sr56aG$~brIiDniVFgHNq0KnGqLAVH zg%_JVx@OUJg9052EIW4<-+O1bWqRHP4-r)vw__&w(6IgU_{D7&^G$(HtC9+#s5Yc49zgDpmOvjadTgaSzSkNR<120385t zZRrF|@~4c6z*;$Lpou?`p?Z?Ie6vf0EFu;7^()5|G+gylBK`7k$JW!RBR@aXMG>bT z<%>^Y=i6@IggC_+40w0qbFTuGl zKVNGxvbV75#*NvEkfyno?0B}oJ8|{~5iqzHz?-yl{9YJ8Rhtru&$Ck2n}|j4AsuE)L$}{*JeZLvwZdkGwaBSY$bnTa|()?wVb>yqWFhAl);CgQb{d(rh)dj6K z+1ee^eW~BHtRLeL7FxDmo{qcR8;@7_aCt;UY+hPB ziVM&a=>AY-A!qq6VF4?%ds@pn0 zDHD6KAe$F%^0dR)%6|SdJ6fi8-J5Btqkf6XnqOSuTTCKOC7uc=bZL+GIjy6jO}k#K zt)mw8G?;TCWPbupP#05!e@$?hGyU8#UO#VV6;27IbV~7bv@A6l5*}j)Rr_;4qk&;CsUgWdK4E+O% z10`v@=V$HoXRY?SGs$8N_PuS>zRKn-)w{)?A<8?!vwn!S?E+FYkD7~}B8KfGZ8Kss z^R6)3*?@!&dnhzvw+p|*Se7{qDRFf|zZYG2;Fo-oWmNzA*OVxv=rbh``=V}kWQ15x zd7b5))v`fJRn>3ol5hr}>hzu_(G1@o3iHgv?Sb`MgdP@AdCyvnI#8}e0eFn#0Gx38NNutepCL$})hw$>u8nu$9P zRTM4Smvf}{MwW&~*G`LfV%5Jh1YAi`lzrE%TPsnkaLO836}&H2em&w!etV_Z zMX)Tq{d>w> z7&C2~+Gc|4lCOBy#ocAfn@qcgMmG}8p9KXIrQ;M?Adv!{zCSCK2lXN&UR(Oxo153V z%I^z(I7WhXNy5PX^Oa5)i3!823K}K%Bbe$RnOyGAc34lbC8h6*t(gsbkp*UZ{kf3T&Z;9+Cr8T+I7EL_jgA@dS(#xLd(`QXly`0H76LRnFt1M3i z?$e{eVQAi6Ug=mhMQO3;F?ZMT_Q_z4`befnPnb~rDH2qBlrMx4uNKbW6&QJPK$ZZE zO`@gt|HK#43@zGu^0QQl!a(R7i>bm7#r@$Kz49Weif&xJHwV0Xddw`NR|H}w3c~9- zANOrwg=J|bTr+3vUs(-9yubg058`-{Z-q}sQ-w^l$o#1PH1B~-Xy?A?L!G%W=H=_HLEyboYL_`tFEWn1{ zWv`(!DBZoX2UlQHjgCIq(9RwQ8c9+c6GHg`SV+szEku#^gOOv}m=Um6WHJq=j9t#m z>r5G6@q#2!!vz z6yGuE?H$?Kel|x!*nhY$*c#~~MWJ=L{QThUz~BtR)%d!PKM3#Gujg)9Jb62It@Yhd zAT6ru%|?>LweVi4a*B~RzS|CoPtguxX%f`)#%Io)SpXS}h^1Yj2m_WD<&S(;q*<}g(_+LJU&(r2n}N$8hO~LZ$k%cN862? zR&J4U7Q0LB93#yi2kfEO*6Hu|HWUBgaa5%)8BT-B`nGxBo4M~iUP_|P9eY4?87l;Rs=rkD<3;MThz6{QPw zzI^`S(#q0_ULB7Ra-;EfZG`vxyHl!TZ)EdiHj}X1`U|C=8Yky^gqI?jI3 zTSMyNwO8FZgiX>N${|(b5YZrdU*l$x+VttjGA~(9v7_$sVlI)dA8l=S>gOHRBBIvH zYy~;SU1r=DKJ{cR_+aZXcVu;s+zTj*V{S5(MU)n8|BP)ku;c zB9YdYeZctKs3_K@NzScc?5;M=hJrwIXkm14*JP6g!FIF>Xe#-uL;drS0wU zBk`}-$J1z8VKk(V4~1f=i$`0pI-M#$h%8Pw!9~(L-F1tmfbV3g!@eTES=E=w_<0?o zY@jBD>VARpKU3Fv%nIEwonme28W`ngtJid~W?YzNjweRR=f<+;$-|6%hCWbNFBuK0 zFaPnAt;ju#@5~Bcj<4TuvKPr3reK2nCE-+)c=YIFiCi^tZg~XG_vLQ~uJDpx7T=dp zznIg9T^5hZG?{rlJ~#TFaL_kj8DHXl?y`Uz!IqA}?PJ6s5yw^Z9kscOqGw(JEJ5@zb*T z*oexYVpP_eEY{WT#%lVQjXxxot> zDyufBx|SGHpDbB_Y?t@3w+-?W<&lsk9&6+~YF&@3_l-Q!N-z?G$EJE`GyTu#oGt!~ z^xeHZ6(TmB%DC|4loAa^gnCk;S;B7WRQb)Gk>ENbzhHAXdrNo|jUS(X9HIr*rkY>T zI@6$im1o4k4=vZ*p0zU51g_&*A(vL~o<$;zmm5{!zY{V44wzp8gC&RL%iy9<_ub$z zLb-b{Ph)+l`Ar%pNDJb&6E78(OI=i?aMb`B^`YjzcW^@-T(LT91Kgf#vIA3}!F+iPvq(wzN^akoyTJ{bJrASg=6&j}F0pvn z(42rMWFv^XCQMXNcWZtHwdL(N?pfpvPQ!u*d=XjTYV;}e!Ql|O5V_e2TX*I=9W>{{ z>#$ug7yS5H!Z}NDZV$--Ot3HlUux8H^m>=dW=FoK#1;@rsGv47TC`|}fy>^*qIlbug%99P-w6=r@8ASp z$nF4ov)oOOes{jTDLqGQ`F&?5IwfT5+1!^zr0Ko?0SHTA5J1(MVt(Lf4X>Mfh!NBH zLMFMm6TXBs6EUU3WmJ$F<-de zM_x}VFbExdun-;-l8NDw)!h+I4jDcAsX!>?=LSW(@lT-l_kPr z47o`dK@U2A?)nQA-i+)gIzth6y5_e&JC$4W$qZL% z%L-RJRWr=Z)u!Y@dP;bvaQpz&`hgro!oO_U49@H=mbPxClmQP3R9UiN5IVYw-_+FU zl|16(zEEiuPgeBzYEf$ zB6G}-&hQ@rrcA3YgI@l9JFffGKOPmN@}fE&+dxYde)oo_MTj^K=nT8(4Um;@4!)gw zV|Ufak(b=dv+i|SF*=eUjLIpktsSfljoII}Pfbu04Q3f=SAm0K1Yeg(+8Cz z+O|k!MR*1QB$L&UW~Qt-*`T7>Ofnn59QM zH;@d($+D@nrx&^ecKBu(6T+(|HLQs41ii&%;>BBb>S@04>_$Iu%^GTTUfYbvYvPwkL$n&~xccldp85U8p=&n}ET zAsFxy%C2w`Sn9eQ?Zsv~Z&T}sL<+|O2M~GR*FpH#+yw0BY-*{i!f(vmv?<$1<^NYu9Uj=06hBucWEP_f!kM~J;!WmYL`XcYVH9Yx|3W6@Yn&`NRVvALgDR3~c ze>q-br7nC~Oq%L)m;1xBY&O+)b_-P&rlZQPm=_rg>$x)_K0g%06rqbhQ)UrMk!#T| zoRjSz2r-ndp4@)*?|F*8PFw#P8X?4D$^7mbRM z{@F2|dl~Ue|HWC95{oxDPM%>f|Nh(Vq^8SV+YxTFKP_i(?1o2s=}ph#a@k;w2ZkOu zZ#%7F;5!|jJq)bR)2!v~P78cCg3AmYbjpasj1FPSLc&w^4)ib9OBR-|$2U?8q-h*$ zOIj`)*TP2^nvRSplrz_*IhtTQ>up9m&2YGyrGU<>YAHkPzRw%x2@$%<+%||saTayN8Jo8|xm@Gk95i%36tn4%mq>_O-{^5DQp@ZkK5Eiw;@g1|L)D10g2V_r zKz6e(>F)X*=Bv0zF@ITmS!jm-ATi!vVI!d(K66_vu$pqw&llk>>g;$D{^uo6eL>xz zHw>_Kie4`WubxSxD6GF&$dti?B=;Ddyro#R{^cIaM99r1Co$CTxVUCaa>wYNJWj(GcPy`D&PZq!0zUO^&ebeN2A}1Vg72uV87K_nveI_y* zQN!aD8~NjzZz$P&>*D5!I5U|TGL^Lg6kZ6p#*GBocHt|;S$$xkESVI+ws(N z=n2$vGhx8hk=wn|Ev!K+nQLsLzmnby>}>@9%tgL} z3BEUQ<8Qv6!hxcA+pcJrvmKmL+~n+*-(6{Le7b+U{j);6XxcEKr|tqEN^8ZNRF3S@ zm75RPVH|Uy+&w=-RW`g7KV5HnhQ7R95gpM2ttX(7c4@+AuyWUqX~S$OVEk>8PHkNH z<{sIT`&C0}m7^~bz8iKlh>j~6Ny*Ijv)#0S&BY_ldv}Ck3V78#nE_XU_Xu*C!+I`p zOxIZZQrlzkFPA!Rh~Gb{s>dD-Keo=307<6twqGRzBgR<;8;8qiOKndlr$@%~ZGrbd zR)Q5T{%ZRkE_p1J3^?83R z{u2>7!z7N@^A)ldTN47hyUl}yU-&J3R5oeMNI*!7jSfc!F4CyHhUk4zW;mmv-`EBE zs`G*81f&FInL@=PWcYGO0D3eTTgH%*XKN?gmm3U!ZTp9dcjTH-atI#u>#-5%MtAU`ED~GIeT)ZKyn$B%R9x*q=St@J!308s zWH#U`L#gbiRxj#U@KPvJ?>#70n0KfVqx1Y#*;ve-0Br#}Dtx-R|IqNie~&sJN4TyO zyF^o(u0;#x;tqFoN6-Dl@(Ojg$^wZW)S5l*+ymNVi&|h!(jNRRb`^uoN-GAM8 zvcqB`s=fEU*v`G+YP6`EN8);r^pBynyrf-YYoo;aU*=Uqxh0V!HSep)-dbJI&Pi`p zG)rQ+fkGT@7ah{t61Ir=T%QSZL1!9~hcoKVq$BJP- z(~4nEmx%jTTCzPPHA=J&VT9i_<#@T=l}mvAg8!nC#1 z4v|06-?y>?<`&dhqb8fSzI;J4DxE2Bolp5*l>`Wtm=9aNl1s&=$uzg#`^@udIhu&0 z>kSZxokG7BR~uCm)YL*^M1+jAflML*$Ry1e7)x8AM9us0KNZE7;y6y>K`Gjl#S0AH9Y zmCrY}t$c69z>91MD&o~X?DufZqA!*wz^#dv`D2?0XQB&J)fN!@Ind*;k-Rcj6kD03 zG@64nIvVgpq*OZ+r6=su-CxbQu^v#x1aaZB^52GLt5Nby=YDyZ^T}RJL9XxB!Iw3+ z(SqCDj)zL$<&^Rg{PlDV?t9k6u9~gS^fo0q`P--+!ORWO@$V83`2Y!)Vo3MR^HP~u zI{(fU5TqpCzW)2gGr8Hm@UvdJfJQxTznDqjz1Yo9xDaQFuSs#A%UdkJR0TB0o{YYU z-pog`DAzf%&kNyREPw2F>IY##X-Xjsq~H_c*ry0`1UhrhU@qy&PxYrv{zx-mG?78!5(50ARkH?6*bjQ{%x_-s0g>YJdiR(b;incTU zJWyFtE94r{SF@;aLSntuZ0oqPtS_bGgF38He(mV!akoXpuaFRr(KK<# zx;h|;7d<-_G#Y8z>cOHUQ(a#2IoLw=z45!+hmOec0a+s^eGiZ|eqjGf(dh&-T=ME& zZ=^A0V~NN2;_SWe_RJHU+&7Gbx&(S8iSW|hflZG$O9^ilWj@dFzu2%cNYdP;2(xk} zg0T>M&5wD-!gE<_l$fGuXlUYjgSxk!bwt=r$Hx04z%3zu4 zvM*PZ`QkOj;>o&JY^2NECK@II=3zvBjA%@=2h8E)+0=Z+I8bGr{Gsd6b}C{0lRJ~+ zgp7>r(5(f<%&<#W*<7f?yGm@F9QmGvl;`;!DEF-4!xQPefCQ z>=FqTM5O=OA0z`Msk=`@|J6iz_7~(|S-UleUAU}5)sukkeBQt2%e%YZT}K}KfbHZG zj3~4ErBJyW4jrfTx^9z`YXoEj_n)3Yf9DH%7MXXcdWEOO`}MFvE-&QcIf`wax(Tnp z86C&@OpIK+Ps%1_1uXT5Qg=(NPWAhfn7lJ=|GC}WB5fP8!gLtN!6V#u^ZRBS&Ly{F zs=pf$^n`|si)%<67=TWw&Z{|~Cg$g<{}vQUI|`&w&`NzhkVe#niG_hxr(*UECvyfJLzt0&jT#&V#(0CJ!J@d?Ppcei#0D>GUbf$PPdV7vk~? zYg6>=WywUvjZU_X<4-TX4#pxpDp#WJU-0n<0$k;ZLq&vmcY6`MvD#efO308Q+#@_z z+&`PA=_R?FUj6K0+Bq|oQae*fMi++TQNl$i6^hVI<`)PirbV*LZF=+K%#Z6YV49tI_2@u&pd`ByB^@u*r#F{oj+t4r4q50xl zqg%5RcPvmume)U4Yj9z>d<9kZDbio&b>_rNIj-C}M#vwK>2aNAU+?kO4h;P4i5_6H z0A|EjFBGXW_zOB4Ewm6Hsb2w=c}rK!i#yc?1#jK?wW^J=O)h+D37XV^kj}RAB0B&m zZ6Un)lb-2s`((#j%sNsOsQV!dOestuWu|(Y$JhyC=Q{l@hA}CzJvT^wI~g7wD`vue zE5srqFF{QR@Y*b=L^P*s>JnpKTfI|zXK!r^?cUJwLf`-qM7yx*cw6Fes!-cBnLLRA}z+>O~;&ZFzEPdb&4>^rWo^$kxn0`FVj85i~Vk zZ}W!@n!g2pT(U<=8of#DCoN)Ii0z&>1l!N%BOiiJCqs)Lc7b9?$Om8($#mKefoLz@ zQGT?-wZOx!lEJ$tB;i5ZunWjwejkgCG>mJO6}XvFTfm~$z&L+(SJ?Fi;JxN_DS03? zs|_F03qhtT@NRM_!$6ibOvv`<{@T4xZ>OJak%5h=)>7{Qe7>V6d3#SZsMp5cjjZ>5KUK zw%n{5`b37L@Ik~!h0?D#G6R-4$nQA7jO>#`SJsj2-F~M~vM{h37QIt(WO>TVdyoOI zW7J=YmLm4D;C(hi>~RL9V@8CyfjPDMR9spTFE;BO$iyV5WIR@Mp}z(qo^%A}jC@-; z@8IJ!QB^m1gpVK1b_oXNx-q-N6f2zIU>->BP|ZOV`uTY@F&bvBV7fAMCqSF+X2c^a zRw0EptcxDjYHpoGJJ@q+q08k-?WIJvz9*q9pORGAZ*Uzuh*^JA~F~_XS zwSm&N5+DFeqR80F4|3%0F&8 z+Wff?VT$t4TDZq*>e#LZ;89)1|KL#^b}&#f9=*@Tzy)?=hP;jyJz+q+(#T>*ECw_r zGPpET9jaV`PJI`kP>|t@@(Ytj8Mk(L6wLxqg6iH8>i`J-?}0#U7Yw;(g9ij|lS9+v z#Jhm1y$htLWi|}b&0xQn-r*rW8~(i7uPD>~JD(a&+}qT=Ua)*ox?_Ek&ZEc5Z$nq! zND(h6ce`JBzAwyvMg9E?o2FL6EY>Mg*m9`6pI-F5PN5tMUwwl(i2P})RkA$_%cN2D zo0{Rxc`w10UUcl?#hHlXH%%J*6mo!9$GsW&@w43W>W*R965&Scp2gCQ9qZouy^lv; zK1H4_jyL{swm?es`9^2N#~*c(?2DS#_v4FII?}!|!2vTIiUf1X#X8IPjnWllt}jM> zaZYd`xry>MKAm^0S#!r7UnbRdBA__5OP6juVB7$jowd>CF2g3L5q*Y9@Z&CNkscSOt(NBb@A+J3|6HtzVQt ze{|T{7Od7AoQ|VC{`iHgL;gk!8u2r~-@620;4Hq^kptt~t! zA}~XE?Y1S3@141|+gv|Mc+z2p3h|4eai|0-RgM)sQk_&U<%6sLC-TwX}a*64Du`l&V_paN2fWr|0j=a^YWE0jfsEcuD3b@*!o#S%0Us+^P$G>&J{p?+$iGmKM7P-dvSq@q%?9SqWU27 zrcE~67yh_|HZt~34m&fU9F2W#WF!d>fp;29PN0lq-?Yi@>B84AbunPxqY+SND>WwP-q>IAz=8+x}iDpJ!ETcDBg~D=_J{qr$jIX&ZE< zK{B7M|L~dM~45z86%{&D)~Q zj_|kI#yqdT=sA8~QLo%CD+8&% z%c6xM-Clfe4~0R$bclmVHS+4S`VI#DL#?7{!KDx$dj7dq8Dci8EWbAk5%f#jyk%nA z++5jO3u~P=VnExVEv^rN5524pPY1*f=)UY|%@GCiLF?~H@0B7@tg9)#g%aJw(hPA1 zq-;$Usu_o0_aYK0R^g`lMYmM*Ji^$;F^;0Hmw;jOEPH)p_*{fNy*+-xvAo6PW!uH3 zt+%ydA%I#h)Bl57Yq-R5k=pCeHgfw}^3D3?LxlTlRyufvlqtpwx4uwP34XbJH?2Rw zY~N-R6PI!ypcx#`0w5`N0k(1=Nx&k`(v;AU5x4ixXtUnW={UDucpW! zk8`gr!I#7NCYW^RblS>On|*NpFim0oFw^}?`%PRJHgiQ26kdzE=SA%HXJ?B|hLR$C zsBSuTQ*-&Uktu%4ADdI)4ZD^5D}?>3JYw`fYW#FcFWY_T!pK436bPkY*B)L4&9Pek z`cK;_cFYtLole^#jyHNzvhsm@lw(8SO}vm50UKejgXX=P(aV5YMjo@Hd%%6-8ql8I zQ<=$_y{6f-3Q>Vm8^Pw%bGa~RLppiV*8yCcNV^{iq7sS$OClFNoApqNCBn!~qA>8Q z*^>(fw}~TB>Y*&0RYgwuD}HB~_t`9^o(?S2yq2$Oi{$ZwgknqE=^4!*Lf(4O*+^ieey5Uyy`f)=yzWE<}gT-SdP7bd@(ai2rS7R?O0|Vk?IT_XvcCp@<9VkEZSvh+6 z0`T&onUL`M50JkeRh-stTz?{dy6Z*1kNHukU2{MApJ^WOlHyedF8NdAbdldmH~z6= zXDc4`Sgd~XPd1cfMpDkxYQ7^f1(x*sB5Z)B&$$CZFNof!9|||J@}J-i<6b+!V-%(bxAJ}-s%z1EE=*DuP#++VY)B5rNBP^%#{zbe^~`?DzLc00DeXXo;cPvDkVy<( z2Jy69btVSM@@I1tRAOmb-=DaPDanW()JhWOqyD!2kD<#@-G_*_=AG`(5%2Rl>P}fk z!>4qto|Uz}APoL3?K+e$F^xBU+C{kB;5Ipy%)9!9f9gYmY?4I%?t`bj6-}FSqF22G zF9T4b=;|q7OfolF%T=|l^;)z5zgr?jR5$_6K0Ig`pQdY@kWWlqy!$M%bF zs8nPN@je9^8T{TmLS4`=r9V6G_4U`Yq9XdUfyTjc0;tkppkx^4C-gA zGp`Q7`u{0;`I&je@8;U0T%+uJb3~x5a7rgaoBrE}kns{jyutxxOw!;7_?z3*rqf$v zTUVW8^P+Dyxc9!_6WIvbfr3BT#HtT%=il*)qQM1`XYrzm1;+Aswa3-$xg0fCd&j0@|iJLi6r-x)m58CzcO z(GeJ!?U_AM&d%k=NNDuoH?R4|n)VG#j8c>OVAl2LfCL?KP^sw8zRwe9&wgiZwUgP; z9zWangxaio;FyYZYcmm>0xcQo*<~I5m(pW7;-tA8^$Nbms~+(bLhvszGV=x*-7Fg{ zLi*RTviGm-Zb~Mt^c=;Uxq&1{-|v*)MvxqbA|XECMyW<~E9q%MFWKPtkbt|gdnAk%TuQ3Z%H>ns;O z`w)Gk&x^9~vg^a!exCvM?EOZ3I0>hoZ$AlVOLOzUxw6)&qgE1-8y!`Z1AO7J6oT-n zlhV|U+B^U9sfr;;(9g1Za^&Qm7t0pH>5u<;3qXlW>#;=maN4on zdvRQ`02+YWnU%E5pHTE&fumSSZSA2%K*@Sc_d$=!-p}4i)QXLE2-Pc2i+Jb00HV;14nH5Xj92L4vlLYaZYmFKKHJB|;JNL# z;rJ4hm^?Y>iahWrAhwEjB_w>YpP!Cn`I_Ac-P?}s;cpSYB1lAdtK?k0u)RHtg`^UAVvm^cd5|O# zhLnhguKQb&H#ByY#GfOk1&J>Zbl!|c+G^c`kplrUD@Y5`r{A;K$LiS`5ubcuX{-K} zm_Y&a4T^>Qzr=X7-pn3Y0gG5txut|*Q;zd1YwMd66do=H!PnC}6aC>XDmd}gR-Dut z*Q601Uo1vqV>9{g{TlY_utt5es@7546&(DOG|6MCl)=qSO$)orB}#BBtt)1W_W8p* zK^a89YV6f2H!TV)*Su!!ieF-{C*FY)zv{6Sa{&$|*8}uo%~4}iGL+>`!JkW292Xl+ z6NNQg(e(9-{ZFBIdj^vc4>ohKY-ts6GRi*Ej->sn(cYRP0?^W=Q(MOmBduQ=|B8C6 zp+V$9t*Q#%0br>}Uq&E`3mca%3{A&iak58j*kK->GI9aP7xWSjqjJkcW7vSIa?5=dxRn90 zGwwFPzzWG6pujz#zW)JW7d@XZ0T(ONMLg8D+bOWS>MY=Vp>z<4yL6e9B+lR2_lm9y zkK9|=>cxg>F?E5KZW-l}ysAZ@Hc%53AgjoT|N71uGj?0I8PKDv*-`!&Pu=eLtoFfK zj#wSr@O{iV!MoU9^BzdKvwbMFVA^|K8)N%{=<(el>qByzYH ztanABOM`OBQ_bz@et@C6lmzXFocXVtSa)sa59;TGxc0Dxmi&>bx;Fd+&QBMg2q&_{t$zwh-$F(U0$lJ6V1z)F(pJ?uP`VY{e(;4Yf2i|cm#Q!*HpYmq9I570jWk3`G8 zg@-<)4>3j`6P*ahUb=}n{e9wknk3l#3nIStNVn3sz2yrPT1|BJ77XZNq9I2G=)_2b zfkw76(+QC`y6Oe!KN(rVo~d3NxPPXv$awp|IpNGyZvhYc)YB;0ilIfCm7>h*W67TS z6|<#fn_G_lp|1*e+(qMXrhvxEKuK|8fP_EISTWKhQIKL3#{6Ws)mWZCo?MKBef#&W zIFgj~i}g}NfcTP(T({UjDf~tWgA@JtAON%Bj1nQ6J+RBAi?<+!~Wto_3ciw11F&|i|>jH{Y9B@$&o$UX*bt768^il z`bY2pO61QA6jP!9J9AldqK7Ljuk>H)uIbwTKewbSCVbw3qwEy0admSnK!yZg-FqTg z>SdwLKouZRlIiZukqq01d+2j=A%j-HB@AsW_k*^Hu)Q?c3)s2HeY6bm>bAzc z$0c#5J647BuRk-L+kJ>0tV4_z_lCc?3BGzF( z#}X~hS2FbebK}u*{?`eNu`NS;t2ei&B_)vChd_khp(VF=iWH!OR>_}ivile*+qX^A2>{}_B|U9!p3yf?*O*w4#@DLzlgtb?P5Z=m5}`(-Af-YUu6(c9Ydd@c$YQdfn(U>t$5Yn|}v0PZ>~RyB`1J1N7?Z z5M$2;jdhO1z4guMf8+lzJpTWR@dpBphfRIX)8)YJCKC~%Hz6zc@85@}d2THx zrA9sl5U5W8|HN(dARU0R=uR_9i4k?7ym0YN=(U|Oh7b8EBvT&z*x7P(6(|P>%V;0 zKX+C;p6NS3;o6hw>FFthyo?0`UF)2=Ig24H0DZ*;SON5^r{2Xw8^LqHojxZ|y@g|6 z7BqS^{VV9fXh^9>``q^xP>j_CdcyRwZRi=A>~#3~1q7T6oJe0RWJJmlLe|E2HQt5O z!8C@V69E5f9S&gYyh;9QB4E72kI>kf`}^O(Hbyo)BZn6z211|nWN8gmSXnLJYJ5-# zrH^ynQ~r1z_l7;q`CT*{kFl2)>E!fuHAibu&ry}aO4SRq*PtSSbBP~-ocVJviYacr zsvHmx@Ttw<-a?KN+94hX{~HcbQiYPotNU}8YI&>DRMZE3He%KAS4l~3ra|73oYg=U z^3gU$hjYnCeDljUbuPAtDW(}oOWP~&AIh`6Vg;P0R%;(ET1ICzK;W$i&080>o`{aA zl0yCsfdM0i%HXE73VjmT`z0$%NBW5uqSFm6sN2$5+_11z z`+i77r}%vSl{Wip&!GM#&SstGOFu}QH1vo7JHp-NAVo{R4;WQLFUhN0m!?Mq*n1uH zU-19Nn60aaAJk%m^pjG1Me?6(TNsKZE3uHI~dmq7P?0k$`0Wo zZ_{TApMGOzCJm>%SN;y<{mahDvk$O0=?5{ZfOL|-Ufi7lfhFVU(6$%ypX}&gfUtcc zYh{q9q9RUGVq!D3nuyOZIG>ic0^@7|9r~89e|UVoU91~fu{1D#LOy$T61%Qc6UFz zk&Y24-EnUX77$gV0o>vu+MESdBT3P2F$XP8&%fXn!O|G_WdM{>$ak@wo8!|AYAoQd z14kq`2(Z(pz%ZNL_AJ#*P^}k1YTq3@+BeS!1_mTA{u=)$aNfuM>m@2;I0J|agpqQ$ znEvUXx7rPgpiiG(KjGr4VS6}T6H6LCJ>m2S&bAm*dyiN{q}|y6nTcn1`xCs1ZOId- zADXO0Cu?cNSrohXG5WEK>9Qpp4XcN!D!Pt6{}fj;qwV1XkWlL|bTftx7UM{no}f$e@z(v%luHfSa(ET{cP5;oMp^$-*M9kd9p@>R zTB+5I0~~@?{E*#S^TtIVRPG1=LaJAfITral^Bb9lqT=(Lq-L_AI?qwPA^>bYS{HkO z|EM6@M(S)mTt4A$Vv&X+cJ{Ele*PeO-s*Q_n&;%67QnhT&^`2}CGmvC+1@IFJSvIN zIOtARFSn!&Iobh@grNl&yZpznJ8=vaL>?<0Do88t75-2hH6r89c|s ziF-8%nUd4xYTbvSxSw#kd=Q!@LQG)|h-H-t2=B%A!{^VvRG72$Ip~vzqJ`xrcmvQ@ z7clpb|CWm}N6S4#^Up6hxp>fe-XR9LpU>WE#3P;Tq0vrdmF~mJ`%!PyW$+O7ipcC( zoGuDX;n^j>%ha`>KN#@{F-fr4Su6G1v--`Q>z1HjeBKT&{ehGCC@0!2&xkubNWB{r zLGtU^Q-Am4rmKeg6Epq_HvN;X~Y#XN4c| zD)KmR&~Q8VDu_*>5uE<5I5#)zhI(-J&!Bw}si1zeGaoK6sq`W?FEfKat9M>edi5pn z&xN0&UG3ffCNuvb4y5xj5%0s;Sd#KNY2r^QNy(bPFQKH@oM@}MgE7&EaWpqa&HRXh zHbzp^>%=Ld>nJ!<7+6U8wonZTv>#DWM>~!;fB$)Mu!;Sv{-fd1k14jZOplDzEG#m= zaOh8cex@lPGA6S~cuS`Q26AJ3v}Nu1^GA+6oY$=7>HF#D!%v7j?CNYQXT`f-%zwPM zZk7!o#Q~pe3|c25HuOsF{Wrb(52dJJ?jUm6m8Z|18WWppYCgy90!M$K{noWmkK=sn z56m$?3Vrm{GP|c|e(?fp_0arFCC?@1p(y>ez-#>5IFt@88u(v0lN<tsMG!fg*9% z>8D|APa7jTEt*;vBFVE_<`{uHacFntib+jP4G5@u9D(28y;4=Y5>Ln!UX&kcZ!dcX zvSu$!zk!ad0_o6(GxaaHh-48AP^S|qmHwxZKY1g6Fjr+VMcjh=o_VMSW!x?g>)k3a z6WrnB$2<%ENxR}qgAG0Uy7B78(kZf5+>jN{I8$jFG(6FC%8#PA+BgWE^}V|-a{RNr z?M7JO!(W*TxE~}rT-02hbVd|bM9pL(44Pft+7m0}2XI_W&? z1eK8JImpvNrwCV_DDGP_y#|(**arLUJv1QbDx%G36&)=cFmrQr=SK@%(v-EG37_m$ z)a*bcD9|rWNG2tb9jsxB^yvWA43L4xSB8gbe4IhRyuax8A97P#4uA)spEG#1aOtK} ziF%3wF_)uBMfVUHS-@DSjs&a2FMv-6=p%qHtyT?=y7l<(w|wc#|0BNCS^5_jo<~UV zP@WSxIr*bv$qcXnlQa`heH$m}yQS@`9~%RGWP#eW?F%|i;)lc_&zhCw#f+rcM?rp4 zj77j{|E~lct#A|?(&praFQfKsy6^>JMA9M?V1+y543T=_q1L4Tq1xS%dG1Y*H4K*` zkJ`%nZLtIjuTEm zX{@|V!`T_blU{xJzU}QfHM&hF+ktfQ|KjN_!=ie>?_ovw(A`o>cPiZ>ASEFo4bt5V z-Km5~NJ@ircXtjT-Q6{G&iv2#eSVJ@7cb6*FJR8S@4eSrd#%VP71&LK(m}OCxja8E zz;eV%`^o%cFSgT22x}FMmh3!!^C$}?Bdz443P_P;1|YHk??Nho3&&(hw$Of>&JXz5 zLZ~VqE2$h^T(I=wTK!%WBP$An9YSsM7U{lgG*njUSNF>AV8dww5q0?IK2;B}4Xa64s_wdQ zqNfh}kCfHL3lMQocwlTP1AZNVF0z{mVw_VDKlYTLqh>zp5+%B+`T6nivVy;z2vQT_ zFZ0BDY-pSnE&Y;s<2P=2G)ik^QTd-t>yp%>^6JdPWL^l><7Z|velvdsH=6fPNq5dY zx>TcZ+74hCGCPY5GKyrmmYmuDL?*5)DIF8<)wV}I@3fZ3+j&ut-KF-^f0>$>Lq8%b z{&B0bJRuitrti``ts1-6-p%+D(Fb_3PG1G}wbI8yF8MpoTr&smo0c!z|N39--s@%# zlx&l>#vk63wwC`;|CKDZO7D+3^Q6V3NrjIR^6&aeg%8&kLTs$8c}&=fDuPoaAshE| z_5YZb`?=jBry~^Clf^{#*KTx}I%gbAJcF7fqJRlfv3lV+)1^Y)+ogyN6;Vh|+RUIT z%|37n-IBQg?g$49V5WLDzNxX|GqUb&glaM8m$M|RfhzE5pzM3e3Do~_oaro*H-(_P zNv52CYYm*{)JVT@>?1E3&O4roS=!e4ysCGo>YUy>=vvQz>C>t+27~;!LeIe?!)nkH zV(41!xjY&jkS?<|(=j{1-ARtJ9RTMaG*RL7&%0#!L@ZAX+GBxVEMO`)2{1tcx7RjZbRoH4*9zU652b`#N7Bz7Q!erc<5h{VFQc+C28!@Pzf znb3SYe3v-mpD9qIxp43Pla+__urdgdU2J5P26Ln9n;~pCN3tGFNbC`!vNF+aj0)yQXNN zKXKvstJS#HVdGa#OtyOv>E|assm~owuBCl9pmv@Z~E_*AY^l8$mBnKl-V&0wC&YkVaXOcykNfHDD3bpK)}7Q>&rVE{+U` z?H>iAyL1p&!MK6Z{y7V06Y05I+U|yOQ6Ak#z2~egC281Pq$#7Gmpm!UHZG+7ubsE&Hgg=L;}w_YCxh4wHZZQl9>H>I}8sa?(!Mw zaV#d68x1@HoRTp6mJUviwP z%C*6!jtw`kPg*b-T~_EfQ{$4+Hfts#0?QqG+}u+#Hkp46cWjFEZqFUVIFFh*BmZNc z>`uNcvQr|LsFx4IxTu!K@PKJ^-Sy=CX}qWe&N}dgBN}M)=)Qrn@iiwT`Qx->jI=p* z#HG^jlw|bz#_0W)qJW)^F}?tLC$EvQ&;~w;AZdj~5Re3aTSpb+RvVGe8uiCcoV)IeO#q$=YsO({-i zPW^c!{wB3!k}hG@qY3g~XMfVsK>2;q7PQ=uzSojVds=!(&UwA+zbW_9#myS1mJo}2R$wTRPamw{?B zQA=MEaMrRM9ouqd$5{KCFm^B%UEyp4+sleRIH$pOOy>C&$SI8faUL1b8P3rzU&x&P z!ZC_owK>w>jLmJ0u;5XJ2nz(g{vSl**8) zSi<>c1$qe6l4Hj%zjNCCzk&l-?+yRyk4ypmQRN;fOuN!l5co?0#JS+`e9}+x8j7XylKg1RXtdHEgL^_M<`~(+JBEx=$PZ+P@D zT@K}4b~Jqew3Bj(dkc!skJ!K8&*`NB3fKL|vyNQA{mTmbY_YCnf!!|**z1(Pg^w0f zQ+PFH<>fInH3ONt3JMCKH2l%}^72(_yJU*Ts{@5J+iKFWUzb9@4+o=p_a<~G(t&`) zL)9`AOFLZoRg_hz+i)>)N9l2$GO?66;sW(HJ3%a$N>d~AOeb(4wQ*nj`9mw6h9SL0_p8TU_5 za%0)8okv-sODv8PS&G3AIa=^B=;!*;3);*~<5#}x-Cq7`C{Wmxr7_hSn>99R&STBH0eAuEF zI@C6v@}pnu`xJ4M;ZY^F2FRMT;AHbZ{@vN~ety$YgddG5M67nA5+jzMThB25KagGA z=s=CKO(OlYUxJN~;FpIreSFWzee4*on{l=$NCNlzE0re|QQYk|aq+XZ_-adIidVAZ zuzx=Fn>{I+7wowG=mIp6lI#g6y#kuik(}&RU1n*=G?T+JtgD?nBR7J|C1hj=xqnj4 zvpq4{-n;QE02NN|O~4)mkybli3PIJEQpIC9)a*j|^odYmc!cgr^%Dqe!ko}SNvF{+ z$>nzVS&>fvu7;|9#F-4|$Zpk5(0rMoDm^z$h^HD^rM2d`;uh;K$GQ?;r46nuo*}Cw z#KES>obp2`XX`oe8331#ITk<%4R*MO5_(Mm=6I@HVur&Sy1KG}uKnXtxe#La`IF6f z<7;I2{5r7FD~U`KXJdS4o@OTxIB1`!*(udJ3d{rkJdVK~lYnknVx`5+<1z_lzoG+z zw&Jy=imwF@$vv|E&(uc;3iqPTmOU-}t4!LJo94=MM8x(*T9(rHcZ-<0 z_#12Z2>B(tcIzR+B@bq*g(z@N^;!@5n46h2=J2~9)$#=a#B(q4qi4#l+F`Q^liR7*M-j~<0+-Abb+f$) zzZ8e@*dz8mOJVv=J*)n=hI2K@G6vi18CtTJ=9-mqt*M7Z-Bx(tXJeGv043s;;y-cE zGjP=#4?K6EvM&8AF=hYg5HX%k`fOj-l=|KxX;uMiZHHF=Yn`G_;Gmu#D=Dyf!etAim+BffI( zFt{t;l+;m_q%SOUHPaqS&1JecUIRVItv-BIP%mKShFCD7-K2`#bGrCO( z-eLQfCeR)e?qHuJLpyjU(b>VWevLkUNv0-3Z_NV=>B38WgsNHV{Q-h;>OL6U!%0=G z|Na$b1&4Mk^0KzsHw~Mn{RXleV9!9_c_UMl9(`sY=JQ{Rm~|5KW$7*aRSoej_|(z< zyga<>c{y_h6yBDRU*n!K3&8)%bYFxRGGW85HPoYeo^OM`{!;YzVFotKM2!;~u(b4D znA$GK(@-D3GGk=%onar&l<@hNZc7s_@_Hv;@6e( z9*6B{9F7Bqh~R$t(Py76paDhpOuM{p+&WIAI81&IQ5ZH8-X`fNO8?x$!F zT_|FOzYLoD6U!#bmvJ8S`sr4Ju z#*dPyE*S;wZe{04UW3u4qdOV~nl~2NccqJ74~BheL{W1Se8ROn^f;ZD{C=;R-ozP# zw@flJ!fh)kERO65l?ns{WCjEnTRY;m4Njt%@# zqkyK}>JUCd{E*o0n`f<`<8+{Kp;qDxfF7alm^x`#i~o@ReyZ#$R?JrgaMaJy3FBPu z`_aJj4`l!?T@^JVAv6($IaKwnSxpa)tbAcNXpM;B|Ko;R)s|yBR@vSjvkT4w<2JOk zs5ZDQ!I1`FyLwV4!2mFST(temw&J#_4&90s&@eDi7V*Auo*Nm}_gMP?0Eg1cf({fx zXgzJDBOncqk^&iY;wfOkaeXBOgcCskfD|+{(6P;;)8X z{Nb|MmOC*mdUg^7VsbN3Ryy|FWqV3&Y2E(m+Z!FfqA}=oPib>m36>JPoB-<=)skV4 z^y@Sl&ZF~2zFx&($^2DgH|e2Zbt?P0c0bxWI_Lg){FQfu$Un^L70|Rw4=5WC@HqI- zLq>Is!B_t-dAPvY^_bvB`0@tWBSPm}{bd>> z9CETXMXQN%8}Br&Ef3DFH5%jqBd(tYM^7w6J~Y+JPh6X_lY)nS)Z`AS8(zjfa3jCRDj#jI_vp6rob(wQ_dC(-X>Mj^^7c-*YkYR>sVw_#jk z)a+-aN=HbE>Zlo?WBc_ny~FxrnDlGnz($llvc}Ob(2|Xl%fq9C;?Tsa?jP1)hDTBa z>ETq>s5^K@NA?Dk{@v&;j4+ZYt)qx(y@Q%GDiDU|=^L15!qa3pp#i>0>nO1LL*o~| z?^8RZxmbxq7$c> zq4Zo9vZ>F!`zHk^)G0kfK?H0dviLXN9{w$?h=x3nRpiVWW^J%r@N5y9jW|Jz*25{> z-%_!h0`fdEk|J51_@qLt?=0#IL@L;m0;D7(T*f?HmSEw=BUNZOnfTza zoH5*7vy>!sNj;G?bZ4~Fv8&u}%VT1Z8Si>PRad)S!u;h{0#$RKodHQ2#n&B5Vd)y!ZG6kp;ZSlW(&YS|LDFmlO-}HxjR$H z4$FHL;`gX3VERu7&&>APPvlGO*4Sqg4-^&ZZWr?Z>J$#GpnIR+!D{j~?nO#Ot5X2u z{qN~~b&mbcKf0PIZ`w_N=Y#?+yB8fe5uqOq0M}aMZ*8O*W zKI&F`l%`LA#~8A2mRpp;;1>45OHZc9cG$3LFW-sUxFkfmR!s*r5z5BWYcq4$E%5KdA^o2U>v#TcKiCt^_EbJF zCf$FiQGR&M%DN{8GD*H|im?ML*6Pm}2Y5Z_<6l495FZNVm1vAjE1eM*EWLppJSwlU zQ#&mAh~)@dy*ta(`84u=7cxWB2u@fF z;x@;^~=$Qgrq94zK9;RSUT-~j-QzV3jSVJx1|s!gzR?WVbiB~naz z@>B=4iR0-Nhs8!(-U!j$-yjZc4Cs*Ba2|{bSdcg__5bmCv*Fx6dQ8X?rW+opQ!?KI+d0{5{;>DPSzlSOsN6B^TE^VX|FSi#^u|YRhA&=I zZLz@?k7WO1i}~!W(H&geES)I=$?%N}H-NRlr2f`lLs@yAuHjZ1fZ96t=7pL&U0vo; zi)LuG*zT^CWky+}s&UvTTOZC^w%E?5XKCn)#1eZj<*>65sTUF|6kP^T zA5IsB2wt-n1$oO|uCO>-f)jBvj)_9TyK z)%(ZLjgOei=Ck1rCIT4&Q%QdeJbDtZ`pGJ4n`aGFZ~QaUKin#kB*)Fs5gW>+RjX(g z&;f*GWzVh0E$AYWSl=+j@ciVom&aqxO$?J{9c2)(AC&{9u(*~5-SFSt=ZlMii?Gg> zl~#aKxfchJG#|@+l-a5xg_mWmQXa1xchEHS#<$`jBSL>8E%pKd-&Oyy3=f?y=u7KT zySz|CtxNQg{Mx&t*VbQluixD%|9E~W6WaITi7meKQVH=?C@n8~b2?7UDZS`Yn!g~; z3xhrXWtv-|AK4(z6c=tgIsM?6f0)qTShI_c7G#7w_4y&IcK>gxtB$Fz&>OhzAPW3o zod|_eV6V1UZgr7ThkL?{(k>dX^RwuI=1tk^YYYzNk;Z}mjS8^(<5_;<##Bt3}@{Vy~c zn6(VO1;*0vvs;d{?)S!9CY=TZdlE81H}dZ+#?PSomFaH*_n54HYJY~tuBN>#iH;}k z5pvK1oz~yxBdqMGVY+uucQo9iY}fWJr_IO|zT1v9J=r}srm!!&CU2MAg!nJxK2HWImHn}RQeLU)4 zvd<9r1z8&W%A?2^;i~gTh~d;5SAPQAw)L$Y3)txty06qMjsm{Uw^Ruym!1O29ZEo> zWD#3lD_V#r->Mii>{xc#Xi|q~msIF{lSS;(H`WZS#kQ3PW*TTqaA=$t<;$ObhI$zb z_`{`Z{!8U-)|9)Ko*)Mz{~DC#r)!P^_E#U3q#aEyzGq*z-l~(>PU$7g2yd|;iRu*C z@|q@CbY)!I&t25n`mO2Kq{wXgPyd~20w{>Kz@+Cc)q!OGGeI8En$BP>r^wuq^s_eY zYB)`#LX$}o`h9Lzp2R?J`DMrblf-c;G0|Ucc6{|mdVo}7p#!vM(tdf^%P^Ab{q`W; z1;ExPUd;A+RX>RorDw}=dq?sg^J~B>#B2woZG>zrIi3ezg1;jvP=%v^g8m1MD zkEuuOxqu@h&D#zhFn<;>QD&Jx|A?Yv#aIK6dYr8W_jVNhrP%79^3U@ANW7(1(b!sG zPD3O##2;#B#dq$3WSB1+?~;*@@H|vRiK!GLbfP1%l3l5Dxyh$m-+W z$9kU~@^fP8yW_9|#L_d=9*OPlx8U_J$HI=HIHYxIgguom8H`n228JJvmx;d3$vRV5 z&%6gF)RArnhSu$w9$|6HuBnzwth(ANHET}^yKNmmQ?2v6IP3V}Xx#8Ccq1gLJ`Y*0 zXF2w?zpuYt1`OeM5aFl70?j*f$vspA%(ixu*#cikTW$AevL4WaKB8%8X!f@M)PNE) zq}HR*?)rAeUqH3w6UCV;_Prjxqyl>R^_inLzRt5UZq? zwc56T#m`)76)PsGUZ^~&k2eMQru+=Q`QBEVu$N>kOCG05at4@SL4mSVNj4;s9k5GS zQqlfFFHaPuM?K!!|LDC*WBRXq{R@@9iDA*Otho~X+pIQd%=1x={`eKB)!*su$#NU| z$#M+~CL zEY=HMCfCncB=(j>@52gmqS~38kg657->E~)PqrityVAGRvHd1`X(wtLqX{5k+$E0w zHIN1u-^&I+VVX-fRt&=@AOUCm&ldEf$a~iBGK4C(?KPF90bUNDlvx~V5Zv^umQka2 zVyW3>$R-_inncH_?+f1!@NiARFeST`Yg;^v~rQP6+rSZ+Jg#EI_`=6Y{yd_6- z17|Rl)90eMc)CQ3;})^J4T=VLK40FM9sNo@tW1YN_0E!W#Z}67x*2yb^COVX={9AJ zA$Iz8(FW{0#!K4fxbN4}{Ai{Y{(k^p%)hK7=V^gc2ysm7xB6uhn;Hz`#%LrSkTh^- z2wRudm;MoyQw%v*zUZDR9=q7xC#&v8NoZ1G?`hzjeK$o#rBy(&sE`vtOz5QHNEAp6 z5iamVxGsQZ+<5>VB9@$?&Ir5^9)_Z zdwBl-@lVA%-$=^hKV}Xo5LgAY1i}wI?@myKC`B)QI3@w$2@B9z z%QoqM!@!z9>d;Zvc#HzoDvFHu1igLo&Imdnr0BZZ*lX37x%R)9Fp@7Qy4Nz1JkvIl z={8O$*!;vjjwV04*jSsFN9+EnQvg_$s#YM@A?-d7=;Yxi?>;YQ%C)bkn|;&#G-ach zcMXVcX|9w*uV4$0STC5Oj-MM6EO=TJw3Aqa&`7|po228%D+kXr>KG9M$i1u|MpJO` zshNKV9VlAx`(;|(C^D+Pq#GCmF&rO5*Ge)E59C{Fctm(aYASvfpB@7LDw;dW{H|w8 zgzP)0+*i!BAg6M;r_nB1(T4(&!;@$|R)y}#zuj+aUeJ(MkHWt0}ijc78n z4xViWGNln7%voHd(Sa`^$O?&3$Q#+a=pU7A8D^n(r&wfOxZqF-BLTP}wJXfuMULRK z|DoA&R1&TCz`>Tx3#X$0<(3u;+4@OXQZMtM1sT@M(^#@{d5q85UvCG>?#&rrn}wPX zgb<&6MUJKz5VOKa9F%pvAmo?UBJr2DWejPq_~y(3_0BnlKIVAi`uc1y@0J-9!B&vI zcVufpl=#OF=wdflzIkBPcXo;qbp`mMJ*6<2f+Y-k^DW6MR9gxNd42Sdm26s4_?Mnf z9I;+e|0a$d(KUUHlJ35Pi2N-;cUX^{zGBy-IPmU}6OMPjTi&LQ+xH$nb8}t_SK#AC z@Ivpw7P)dv;TEXG(+6T%1PM?Sry_~JYfCRohHPxh8LvhMCcd#4^+6E@@^>OW|MD8q zuY~#e6ATP;4GzKXqEk7N1V=O9Q~ig*NW53csTUf_U)hlioPrpO|rZv4U@-E9vPbIqzj+NiR*Vr zU74Q5HG4BJR?_a%LCr|Hi$RiljyF|F+xOVbAG{jK>t}$RCTbrLGdNytQbNARje!v9 zCq6{Bfz1>t?cO`O)o&@g>ttFRJZX~+{81jiiYCi_l^yl5|K|Qxl<$KONgI(DMBuV< zQn%US8t}6!W;8Z^`xvFhzM#eH|C!^M1;;qwm1apRgoG9`(-pmE=(4m{N<_ql9tgJ| zc>5kPLWV@S5q5u`Ydoj#fv=#eHK);qE#`+T_H=Hi4f@IXb&!*PTO~Yr=_CxW6DK9d zVjy>&=&A&-K1$lzQTRNr@KyTUgQEiIO;Gh21j(`Z4+po5_b0MHX-p#nJ09yJ6{5j( z3VvvUPRosLMqrVfAa{Z-R&7p>{gmbG<_zcRYVt!^`!m{XsSX`>q-VoQe^>kCDfX)o zmVje*GTzegTQN`MFJDrFFDA!RxVDVW+HOMs9Dcmp&-E`JFvSg{avt2M9NE{;cF^&+ zw6Y5$WJ!(=q_Xarz&Ye<{r&p|Kpo-`{(9l#Lr}MVu z;K*2TnQlYpHdAt#f#yo`Tr#SXl9D4d)M9w0GR3yxa*uf~9=YXD%P7s(ghfYQ9s!oX zHaN#Fxk8)S(N^-zpJ0E5mSZ1qhm7--)hvS|t@^_#Y6HU>nph1V*#VhgR{%2P=a={@aNM*f$_f1~6~Yq(7KN!yfP5csf+1Vjgqx2P`&Q=w zz8SbydD4P*p~FNaeily5@WM6#Thxa*H7%`MIk!2>`QpH+VX04O8{*kpJ>-3Ik6cvx zCj>~1!Faen!j`3SYvsSq^@pMV;S^Ehhx8_N9gOzftg#-FGN}k#eZUX1IeVJ~ZN%pi z(xHblSN$8vFsmOMsU;sZfERG4n}hTf(Pg37tdheP>PqyQc7rW>5oW z?2B8#oJrcM*ogaa>xLWsiqtfCGvnkFXK#5_XTFCclWmfzo7KxVL&P$NnBNqphB5hv zJ}+}X?}RfZDrecC@1J)AXDRkoTcaV$cxSh3VpPmG9Eh@1WJ%G_NLM86MC5o>L@Bh0 zo3y3izz4UoO6b-XOT9h>2rZw>aMWD%fLZ85J9Rv#Uuig}&!R}fe`Ql&pIS~c-YZ^- z?AFXyvip4ElK6QIne#GIc3RMy&s;==_&=;t z9QnGsI!0?)xR=Q-25h*?IG{%wtXQ&#-F-=oE>tmIz< z^Prc=1mgEpQJqP|+Ntbv6S1Fv`>G^h&q7;XGk2Kvcs!MveB;4?4*22GMWu!pIU4h8 z;QNvvP(SR_T;~)yD3foyeAwd$k5lfK8&3v&?3e51J;ts!U3MX^4SAw-a~F+$R;tp} z+O-7oj6sfOS?!Z7$(x5ZhI}8cy^>LH2sZ~N-l#EB*hyv>o2c}uQ)eWz#%^RidEk=! zoErl{lwy&e*xR0jEiH?%{}cdHKhUt5q-F6Kr|rCBiu+!$QB5{K9qshY%u6<_*UUel zM}{ygi%e1Z2y80Qu|(tU;Sp|-k%IDPxA%G?_tdHtaV9OxB9F(H8$~oRgyY1LJZipj zw2CI?X!!JsA^0%&AM<`j4v70;BJ0MD=ZzC#7!m-D#K@zZesa@#f zty}1xGYD!}^P}0>neEvTMUXiE(XIJl!>>oJkOc9DYt6a{880`NHK-hQz+)c zq1b--z$skRcGh^b%bD%1<(3_-$8-PTir3X>;@UEN7|1czT3@+c#SohP97JI5L03-6 zWkS2`0q)63A>JDc#8%ZNe;p%oRM~h!2O}`m6QQP3fu1#e z=xO^s*1VBfb%}%{#WQn!DYpgmXY?}}OiyZH$1cDqL99oQT?~1~Rt{#<7tSntb612O|2zqe>b8Rz%Juuvz{+YnJ-gjbnA zV-KAGOPMYJOhCd@Ik}G(h2rdvf*~@xs?mv5OjfHMzrIPX$Wt}hAL^a%K5sikT z7Ak3g0{OR>FTpqo9}C1drP6QulC!$;RYd==N!limJXgtP5-mlmo6G_?Apzd7_(|B+B?A)QbaY-` zit&#j2;~1QzD&#CObFw(a|9XteHQVV95v7|=~^2EHHz79&&vC*{FtHQ+d3SkYgzhj z8>`1iVw-DUPOVvhxlNkK2%*`WUUOfpV1$NEtf6h|cLyUFORNc(B~o&|tT&HDZ{`26 zKp1eBa<4Z@FEUESN)^C_`7`mFaA64l z?iaqw%j6OzZ^7g05qr+(z=4@}&MA-=i=kgVpJ#4E@DUcV34PTU+aD=kG$>)tt4m%k zJ6~ovc0!R=HH*s2gIvbd)2$;Ap~i^ZFW_y9I8A@#^C$p1+(t zE%!TfN^nt_De!IgqnFwBrjw59u1lnJSpVoA>5L=%o1T0{se#Y9aOPFP#|NZ`lPq*0 zio$uV5Ke>&#$%Q%DQeVhuLsfF`Uj&C*r6)UD#i)s2BkC4Si5982~&h?>wzmu&)VbM zQlJX8-mKHj&f%b>N8PC(u?OH}+ zwK$(YiJi7BiH|quoJxc)VIsAaTYW!rJ?X-xudDXVxoP4R=)3lfhS!I+DpMi7$m?uz z%230U%P10l!zyResM5kH*Awt4DHx5T&!9pUqjrKTHRAl3$K|bf_PLUqbeYB+-pcHu8L?lwB%f`#d%1U5$bvez$JHw7(zgU0W9rxrWgcgGCP%YjGd-%#A7+*fzyKNV{wrA=CoJ8i2bq$i z{q7?_Mq8zN<3SQrrTdA4iEeC`Hek-0Az4ApCh{(8^)ZBQgJewEz?iEiq*@aA*@y)3 zc>IzfO|AePPHI%jD}ud%kfJ zKbY5(x%!q{G+j{`@Q&I~npN#C90%uaJr>Ut0YTa{Aw+#FO0(#x3yIB!kYjVwbJ@T= zED)KnGr%{hbMu`smEKR*_QE0~W?i4qb!-rh;q!<0jqbJT^jM@30S3bC4jCsbc#+4E zQlUN^>mCzguzTy9Wh0Kp1nD&wzY6azsqO|6!a(3R;~4YD*AimIGmFD?*~|+dwzKO3 z9!Zr(G5$UEKUaqP18&<(I^}eqqZVc$6{_&-mi7Gk%(4M+jhb31BYlKdyQ6+Ob-HRF zszZNZYKM_pf+@Is@|KZK*3g})oloBL71!$z_jWrHGOrHeA!?Ik(qR@4*zqH48%>Kn zNKP6dzvdxe343P};j4IfzTq7GnevKL|5Y7-f{^ew3XL#k?_gljxo~<8AHvr=qE*$2 zyOqfebe7c+Qxd{(g06K)pC#v8>`luz5cKZQuHFugwuCb84kBT}Td+=C9IkWL=hM(hLCn2Vui`BclF<KOQnwCa+?)t}h1>;%pwc7m(eg52uh5#u(8(FQ(^l08XD!tlHl`#2 z8d}2JklMiXk;Re*%MlVn(0dL9=r*`lghJ6Lz3;NH#Xj2p1&33$#RlD2^ogQYQDs6y z4sj2sQ5lNvEw>IBvWSE1yetOMVL+Ng8D-Wt%KLoc!cXz71YzpCRFkIA;C+ECgvbu7 zsc?dlgo&pM7LHF^1b2V4u);^!n?7zo@A?~doAMMy+dT+eP0+0Wh%U!@d>6klyX5c* zOw)SW_GtW;wXk_LU=_?~tIcqsCMJQ_!?fXhg&~G5(i<9k)&>yuF8h<(5hkX1PA+QA?G=;>j`aAv{P74k!p|4&}$nS?O1;5j*7 zMB0R9%onNi%`C>`idTMiE=06Wa)pZnMXPml%ZYS@!ad;Up?ro@2qAl!m4`r zNG4hmtON^R`(kfTj+8%oKNCH$Ls8=^Lme*yY+&+qu;KfH@7u^b(F(?evflO2pX42_pBK zpE&4z;hO#%vzB=&DD<*U-tn}FxNA*?pN{dnt(U;vGY5scC3ZEZUi5zs7OxOkF~DJ( ziszJyH{>MFuDBQvrd%nkmdkxzFRcbqd{R7uS|-C49A`RXD!z1=%l&7_{W~H2+?C9~ zhk0%UZ+eHvX@c&ZHku;mb_8`1KMpd_B)8>S@t>Mm4yMQEa~-8`30kURlk2` zfc{NOqu!!9kVb+KbSn3|NTrLhl|?WF~YNq4(pI$wY0!NUvV@HsK_JYw6?&qJSR z5x5Nv?Jh=vAm~d)*WBpLtTKQWl2<49a^fCd5om<}C#ZZs`1c-(SXl#G!I&R}mc@Ud z@jipMyz%L^Ta#DWx`BD>^Re)is{!KCY(*%U>N;DG@2w~`y>Z<6b5sdCfx+VrS5zH% z*x!+RsD{5d1mPM#kMpTvZMKuG;lKZkW&VR)foenOhjJT{8-{}KU^DN#N{9cmjJ&vI%jfGOjz$F_-T_2)P4lT5F?**k zzriGAKTmc-y0hKgf(n02Z=)Y>@vzZCNrob8LP|Obd^KmvoFuMQ>?SIQF*A8N?sC3+ zP)2TLAQMD)C<&xOlp(j#!#(cKGu|5w9woost!xwS@j7VOC9n~%fYzV5zCw<+n!()I zbGN}P=S-ku?iUQ>oT4{Y+@t$^((*58zw4I-apP8bT90PIUoXPK(c4vjWS`5iWQjex z>fCV7bXyD+v1(8Mb{?%NZv;J+P5d~Of2Go zjwKRf0I;<|@2SbB+iH#d;e8Eq78ajy0erL2nPtOPr zD#2>&@z+3~5n~QEzwn9q9x0wf_35JgPU<6dJSo?ur+KZqaNSqRSme4}V#2V$baf_5Up{_DOU*^?soS*2^G&!DdUssE#NYTl+agn~sa+;=yIVZ> zC$nwmetY9FF?p4srJ|$i#j2-EwlIO#a)qJ)hA{x4#$dA~*otwCc}scr==^K`PXw=D zgV$duWQo#sFF`_yv&sr&m)^t*hHND=7SVzCw#|O^wsA}2>&q%6?}hej@M7K>`fTiU zyAv|!%%Jrc`k7I-K4u2x*OOnMC>azmjds4w0Zjh&Y-LME(7_x5dKT=Kp{<*UW8jfS zat`Cl{jJzf8fA*FC&*(OnL-zEv90j{?>wW6p9)T&{-`GWu@${NR$ydiN3mgeXrT%1 z&qgrOiD+#XW=EQ~v$K;}{hjW4#xV+#SywXHJZn2seoM`OJ--ckY$hW(Tc}>Pth!lX zh!=`nsS6aIPC&k5+uu1zwc(hf49aj{ByF=}!RM^Sr;m|6Tfm=;cHc+DI2lS9Z^vm{ zrlLxcMeCV+sz)nW^=i8zAQ6Lpy27hKX{Q10`XP!eYW>+Pxb~!OedhlBX2~HWv*2_@o*_HPz0AY);(_u2q>3JCN~0UHaWUI+ z7<()6c4NpFZA1I&je#XsuAH!JMAypTHI{48mbfm6EJ~~)d+uDeUbAPDH#bBWeyaTO}neb94lKjewJLWkG;P%D-CW9 zM5dwpxTfj)-#xIz>anlZa>Yr{dLu<7Ojooief_7)?ZajqW&;YtT1}le1rfX;jjbyG zJF*3dou|JFe0V1GMMRFA?$(3PBXbvA1{ORRBHQ<#3@p}ox;zMk5VE_^{nKywW^jX8 z%^rez9H(F;RV_|`|7$$HFtR{rPo#PO|4**L674(G5q#T26gl ze8^#o>fX$14_`WKlyB>0<6A2(S5C(GHjNJXk0y>5KCRFieK#mA>qVqdVGXU;zf8oo zE3IrEO}ewBKl;zB@}Yo|mwi|nTl0ehP1~Vtn8RyMNWJ$&9GX8;yyTWP_wClWE%y2Y2>^;YYrj=;JAPWR3gi zmRF9|aWzo;K*^(fc&MfWqkH}KR3bJ5KIIIl{pgHdQC-y z@Mc;o{-31xJ66fkRyOHIhirb=w^7ojn0v;*vp;av> zShRliOOpxtDJN8n8{hrg(SSo5mFl>K+0bL)eadz$ZJ{iTdpm4s_l(0-xFV2d6pw%F$F| z!N~Gjx~?=IW4=ytmqKfTJsu34kFxfes?i!Z&W;FYs37FYNwB|fK|_wSZ+Q-r)9Ied z;}kj$pbr!dSc}@Jt9@sqmZ(&wo!Yxn!J0z+GyJr$vhvgPJrEO*Cip?<;z5nXuQw|; z5C}|_-u6C*9Q=_jio^#8Z^b^R$uZ#a!;#kgp%CFPS+5%l0JD#Zjy67NUfZC*axTZD znJ|6y-EYbW(j;NeGW>W}ZpR3gNmfL7t&#UeW?76Kiba|37b}+_wlzQ%)D6VWClu-x zlvBTnRdA};Bsq2DpE$ZNp;oAsM|J(=RXT^(Qk?ZB^hrg8=T+OdKQ#v-kqzh|^YJtw zUm=kYVSiBr`HLv%V??EBX&gUZwGs*Y+xt>4pk*M~ri zL<6ww;Md5be3FO*47#p;{)TJU|(4}FN|=^OOpt2u5)%2R@^w5Us-{Be_f zTWcxX5jUzW>!Ik^YA!rJiH*1b0&0dd56~p*DY%?i2>%W`Ss_~!eZS1_(o&8^bP5x- zb|)KB?=j2#u6P<*36Ovn_CO59!q3@_j?9p*;PBu2wLCVQ2SaTxtE-W~CG4%+lJ=4r ziz&d^pZ#UR7$W^sW6}rAot~6qvW!pnE;OZ&MeSM`+BXHVow5i{vAP&9tj}7ELNMJ0 z+X>MA6Y3S`B{h&_K$~!p%-rI8Dlzb8b|6Cugtzcz=RgbJ{hRBuQG$&A{fr5v3g-Ah za})rg9DxD4gVPdVN+-hPmFiN!>cK9})BVWOpex7xG72R#hHFH)kWFPW2IK1nAk7+faZu1ipT3K;f zm;#58%Br$9Yu|vO?=up+{vS_vg(S`=euC4??+B|C;v{^0eCQQZuB;j)?L1!nO=Ke5 zpHeQnS+#5TU$yQjs9yQjRwDn{+t}TucS`q@OMgC>;>iXOGNlAE1HJ5+pcxaTc~kz# z%(K=lQPm_?!+!Q;1;RD}m^d4@sV10YH>|=aSP}V5Kp{_}9ISAwn8wbrVz-R1T25g8 z7x5LI$%uI7X&#PuPnetI^LMQgDGA8++zP_uahu_%FSQh96S}+cbIIL~AN{RXk4#L4 z3&zMc7LUqtbh2a}Hb70FPoMQMlMSzD5C98y5K8DQuZX+@+%x=~{22I)|`K@n+bX@(8~0YMrD z1eEUf{D=E~PQ2@^^}Zj}55rn6=DK$LV()8Fdn~zaczc?G64awc)WN422(v~BpLOuR zvX6VZ)5LZ_(Y)b?_RigLW4=1ee4(iE5byq-9mEt&Ga#7=JNgzv(240zhdJ<8y5yA7 zD!=)s{$9w;--^)4nf*_jCn2W~-h)nSk zleAC1iISRt1f4nZsf|~tFnr_(HlKbQ(5-JyF7!L17R`|b&w^^|l0OJD-V9 zpkI@;_C3!K!rn%s9`gs%#Kg=wvuOPKm64daU^G_o)z2pd0Ut>EFxtmEn9)}YsYc%P zk%cI85EJpm+?`|N#>AcB7#@r#Lo5iX1@S;_e+8Tx=>Tb6ZhRis{QJ+X*b#uOeqjQo zza^Hk@taJLH*I7=C(-V_yhU-}j%cY{D^9$B)R@YI$F8-;>QE}1ao@JVe!0?Q*3H++ z9YrtM7<>AHvW;b3+XDRPTjt%D!)uMaf+3dE-Ipg3r8}z3TSleJ_*MgPq}C$B(=glo zzm$Q%Td4-EUieAoH^0lP!cZq+-bEnT2dYe7#r18z&TEu3ZWFw0w=yF`^n=V&(_ra< zCk0#Qr{ZEtxBjHp|qdj!5Txs#i8w_U-QCWv8QQ6?$^Yt_deGP zj6X#tie+4egm4Mh?k>g@9hZY(XYKb|Ji*f7cY+Qb_>@=KacF@@9DMMpkA zmo=#ZpFIYTIL$Qg? z9w#4}H&+#O1)EeGBxPUqV{p|aQB12};5j*PS$-P}$<^ctLH48MtlOESqL2tFXs@K# zZ-0(15?eg}V>WGQp$zyx2{~S>KZFGr5ElA=R3Ah~;mY>d0lyJOnRx`#iX#`wr!AL? z)203Oy0|`E`fMV0Jagd8p0HhgCuU&BkbXPf1rV1k?l3(k*1nszU(CHIK{EEcsOOm? zzsn(mmS|CFdS>xkFy~x0rP`5ZWo13nS}?S@EQ|snryG~}=ye>f*RkM)5MIOcSBJSi zMti858yOj~AZ|bKMiZ&|dgfNlF+x$kJ%lw`8r?s8hKE9OtQ*Y=5QptW44%EUMN%#; zXA;d9E`6;~)^QkL!JHJO(!pfghn1j}Vt)U*=IB+R0i9X%pM)C#0KP#Kgv>m@^G8Q?+(wOP`yCgJ6YJ~C z={>uu!2QMCRyA#Eha=3k5xrosTD$yAu%=c+8bpheSHA%TE*>+tTl+exoYb;_Iyw?}U# zbh4U-ed$MqQ-mijYWbX^42v5pNn#kf`L;kG|7nsma^f0z67&7~C81T$#6sH=K9}ht z#=Zj$c*~p&!D>Kd5Gt|TC>9lF$b&EwKb2-osvP_f0dVH79yfSnytD}H!*nFb2n|NS zgU)Y}e3`2r_U_a@{#;#v<*U6qZH1u$dgVfBsbXq6+qG>^8{HSm5CXd7GhfK+33`(Z z^{jIKH05pt#o0?HvUfvh%^_QRmo3CU+H`Q~UBnBr+rid7xsR@q9~~sil(3Ruyv-cI z|4!6jiE+~GzVNYBWD4MRJrM;0_+^q;6KW4*sCeCzWwE)i?)vDx&93yYa%>WpGCpWEUQ`NL#D6?nRb_*A|;dGk| zS3Q#dmZ3BUnf6$ii|2~BEQ!Q(eT54tgMO;DL`WmRCu<3X;oub!QHULvOlGDxaKoz^Ed28X{P z2DO%%z7^&8k4F<&@k&ny{q80TVM@eW<5}4Gf!DQ$ivVGV!ZML9)~-+(>MBC&62}n4 zVeFpyT@%;Lk9yhnkH}d~TZcZCCT6J&KkA^Lv{b8p=fHh+=M{-f0n$qhAR1z0V%C1z z{)%x>BHcHO9UmUdRW7u>VKDzur!{CKn|P1gz4N9gF}5=dvvj5S<5d6UQ52)gY_#xR zE1!@QLbt(EI$7{`3M;SbFkpi@Hvu)!iyKrN6X5(yZE_(yOo`Z2tA? zNBnLp%>HhNL6sM7q&y92sLnGW1L+{idgq;%WM$JRtAGbV@kb~kSauRK2c}jqGM}OT zI(q6*Ip~S#rgi|d{`M3=>pQMtPd_7{K-n7fH@^Nx5Om2$8Y&LlkdC#^hMFVT>hn3@ z@&F~AE`^0BAGvyH#k(QxAXr%f;4?V9w5ki$AHgM$T8>%v9VnVL#Q}7?$k`PGEXN-e zC?q`jf%7kWrUdEzw%UVwxT-mNp3iGV^0xtRB=NuQ=jH^de!5>hZM7Ik*pn^#iEWS$ zn}D_8C5Nv8Faodh&Ca;nctF$0)bwSZJz@7?vSe%kNloC@<`zfAd3T0DE9{-43z?;l zL`F1!&bNGkov*blT-XC16ze1nyz_kItJv6M5k-%^rTJBRu9(B@1#pQpmm9YrGPr@a zN0N+MYqog;*vfltxb&g<>D-YwHBQt;*f5m@I`k=kb)mP;=;idQ+Zqi8-dB|OS@|ef z-RZ@-v9%Rz>C%3uY|t(tY5|}R6~HGb)!tOZzWaJy(gdrUo*;@yBvr%ZKS;y~EB5iY z`pGiUxQC}Fxv^K@l>-QRr)_Z=cymYm@u@!K<;ZBd859}VQZa0@7LxayvU1-+c}&@R zyyLW_og(5wY^fsp%jOP?a$NY$z7Av{3a84tRmap}3NT4`14hP)1m_1%AmCo%PZl1K z!%p(-r~2yT$M+pwxq}mB?y(S7RzF~ylWWjbEBKd3OEAB)g9BNU>K&T+PgZ@4ON3i0 z3gpoojs@6H*8m$7z*;SD1E}S#RmnYAA~yp21G<~TilrN~3|L2d{)wrr0C*BP5w|JG z*%MYj_PmBZVx8`LETRy_1_@T>*#$wWdoW;{+~Kz#N>rnvhp*~&Ev-{XU5_`GzQRzs z@+;Io)u=*TzHD{Xz+){YLi&oF1oSl_39}`RQyDF8k!Qa6Zw{G>u-B>wh7kQ1?tkBZ zrD~7iD9@XZzC`Uxv(*n@WyL8?H0n%0L|7w#&u|*kj8MLz=fde3nj*w!b=%IxN>X7> zq^tzdP+~_EJJpQHT!*2i&ym*cY@URxSL%pR;o{&}XiLRY^>w=x%eADqepc_Gn|gy*U9p&u)G zFE`(Fy6s`*aW7@>ap@v~=0gRSfOP(Ot9Ia)oGq80XfhH9Hl%sJ4bzf?Udf&{ZU@$% z*Bx~HWq{lc2K;uP-`1p4hf;JLH*#!MHmw9lRAcy!e1s?W0IZdKXW-q80{wPNTKFhg z{Q_VF9{}BmWe6WZ&TbbD+SHZd=&DInQZ9&@f!GU-4UeLU!v8x1IA`%F&nBY!bYh$ zYtnWmEgz2(kaIhpII=SiZmn&kYKG})<}rR5%erIrKOSPLXE?wCi8L7$0xcRqw!M?N4Gfn-38%>Lnv zvE3q;s_R;3=LV@ys6dy@CI|H2L30l;8=rA$oE$*oRI{#CoFj9gXF8;k8)D^eEc=e9!PjHp!87) zJ_#+g%3QL*j(+MiI>Y@f>)%fm%f{u zvjv#0M%rl%ltGw#d(v|ZiK!|Zb>rgCr9c{Q=y`HAbE4(X;MxUzN?Y%gL(PtR{7;hy zSipx=WlCnCWM?!xP{`KhnN`Il1a2+`GCy};^@@e}OLG8_f(gEy*kHd0Zyv~dw?;8s?jGZZR zfH{rKX4Idm0Uw+FS*v$Xd6xmd zBfsx7uiQ9puB_?IL695G^p78dzhj%ZESkMg6idAScHc0B6)Hp(H~Zl}&GWF=ZmEBL zxu(+n(l)QycOsn{n6n`ok6KMPOB-P&){`kL1uEZB8T*#3y;>#*yhxo*E+D{i`I(ZO zPM`r52#HNMXz^`oL=|WPv!8NPPCB7G!@U!`Z_O*Gz)DH1cdYr&!&<8C8Eu=cW5KiK zC!&Zb676}+Dbs!LlP|T)f4YrQL(WN1Kd26l2V@lx%0_uyKTUh3w8619{u|#j$5K^@ zX8EH;NFf=NL9;h_w z;)kjcbdGsD*YI*rZe(*<0l@B-qEa}$ldMIdI?Z+X%K6_3=>jUiSBxrDs@nyJ%8AM% zFWM>7q?@Ehjj{}pTv_oSVyW*z-@FE5=`6$ch8&2c@(4sM%_3rH8fc!%1|SR7gZ_Ch zc`6=ase0+{fVX%t!@IcA=N+DRHe^a1NE3#ek=A@pnBE_3$VD5gKjE{XuR4SVDSRw_ zLIv^-Xg|;WW;_Son%vI)O889QC2A$eEX>lP+O+TD6P@9i>&4PVWKWO0L0Hiaj}eOL zTfb-8c$qgB3`kb}y9t)lz6LEGWkg{EZib0gy8t>A8@HklJ&zpGF@F=9E62J zSWFBCC`zo{dGug6B0O4A`DYe=F(0-w+d9~1*4oq*yVl%#G$rH1zklzjabBP};{Em8+6P0I1K({{eL}iA`g#;A zVer@GunQd@l-$Hn$({!?c_(y%gJvh&x95cpWfd&GI5hdzD|f5|XV~U9HIp$QRN8>JJixU9E)QT2 zF6(O*Gl{=~yf!pKsAO7N*(w|E$IMZU6te9Wn=v2x-H{evgf2Y6=?c>uaw>Q#@q$?# zSqch*DO8W&Jm85}oG2SDLBkugN@r#{H+24&lYVOLp>n-e=FQbRQv}o&-=fUxFYEh>lDiXs&Do#P zZTmR>x*3~J5p5X1Ernj#m@WY)FkQl)8N^uhdG+1 zVpmumY(@_Gued@2SExnQdk)kMJL}Z(JgQx#g|TDkvm>aQHICv=iQ0LdMGA3?Ddh5Q0C43_QUk}22WAZDVE8X^? zrRAi~J>%cml^T$zdO37Tj@3EUpX%yvC=WjJ3nR^%xNoMfZ7vEDxNQM5;BX74a${x% z(69Ci(1ey_H!;i`>`VqKhcRFllj!ls+~ z=@~7-r=eL~@SnNbbzks*@ZjD5c=BT}>KJTNdaHHCf6N!q=Tquck=tQ$s3i~sgSH91 z+#@bC22Ln#?0;}V&+5Jecv#Hp-Ne2e?db}UIVILusPg6Gb#B9-Z$xX;*(i|v=~7qx z!d*}sI3nLjS-)072uLiCETh)!YdWb{HovTo8UJKmspiw|-nZCDok808Mlu@l0;RUz zsR1pBv7Tsz(#0$Sm`WVnewA}z@h8&Wr~y*lrU}9Yt4k`BUy5Pd5fh4Z(o&m`)_$1- zg*5EZj4j}6;_DM{gD>n z>(hJ2AFY5Gmh8Ctr7ptrl9n6q0Q6J!?uZ2^#|>CdMOIAd&c)ed* zxqJWFH{%7&>v(@s1A=)mKsvxr{|Gn@zWEsp$-7R#lR73pt)AQ@&CbzGv3R+Uj5|jgV&4 z`Tqwrf{UZ*;RFg5)IT%hZdr|?l*(fpm8R*>Y*Y7ei)2B&xcM!A<|r@(7xCiA1$PTC zPL>Q!VnP;v)Et~^1veK z$0AsSA%I0lO%yO=qC@_5{~SpVF))Frw_4Kq7u8>y zVR~90_WE^Rh*{uE5P;DibK@oAPXosyM2DFIPgYKKJ;g@;O>E7dRVp3pjz1%3u?^k)>VRL=()^S1# zML05A$gaS%6;oe2^ID)6K&1zcp-bbmSl@I{rTGRVYqRP#;JLIi};mijmt)^LGV z?yXoV0^2c=Ucgd95GMTXTC(7%Ur9UMc!Wxk7t{}FnF2`5iS1I%X9M8yC^VzTRl=iv;+tY|O3Y_1O2VG=y6GOnR_ z2z0=en04_Erk2umGepnH?;*O`CH~*Ql zx9W($G25!y-qMEn-n|~acT?bdhd8xtkO+e^gDDhZf4tFpf5Z`ohYWFeTQBwt{3jn zmWsA}nxm6sb<)t;&@)WwC9C3lx?9_mQ+oxd0N6k?^53aoq^3f+T^VY31x~a6g-wt6 zMlGqi>V15&j$qJpeY*t`w}Bih!H^{zOwxWjs*qPq6Cul*#x9f&FjVF2d(2-wpE@rn z$IITP8Nw7|YD-8Bxuj~|oQob#qo=KD>M!{kwEz?2YHbfqc{otTu?sws-!gYW@;E}T zbE|=LL?ZxHh!i5Be0Jn#yn;GFIwDol?mR(g#O`iRp3Q6tdq2C7D%NX_6jF zTJ+sIYq#QczP@{if;r2_NQ7g6D0UK3&s2~UC3x+OLxEfEMHxeTAcO!RNe%!A0Wqut z=Mls=9Ck9IUVw$ZwlUh=$4B5@Y=4`sPr2Aui$gQn@lDPQFtb(4U{dk1Tb3bPd@bMLcm`GwO5*b&d_C&-+k6l%UP|KL-d!jOl* z9Rcl&1?t0Nb6Mes`XEqkNr%lD=S_v2OS~DZw}Z#RB4 z{@GjX+MHVRmE96W#$dtg_rBKdh!dGWoJgVTC2=bKHV+J6{+i(H6&ow+j9y1QoKop}QAxmwm%l3xmgrG&gmeKsn zhOCMoJ9j_@P1{-jT9+z>w_lQ-fXMW>{bDW~>)_G1Mh89#7X@1bk>(09%q7H^ZyoXf zGYXtLqIiL7FUwA*jW9+MP@H}xRL4l1mZH3X?wC68jy7*qW@UpnqcceiEbG>|p zD~NB_0<7cFl=fc5*Al?(Bxmv88&(v@wiR9zl0`^f8=#t|--V?mE3Q2hTvM;v)v>IG z85XOzvwuR)&x7YAv%onv)2*}%7DuE;T}HNau1J}SV8mhyzb#-fMYuFzxRM5YJT8+9 zhC)QZDy;~|)rc*S<+p)G^iLQ|dI}AYW&q1oeffjCd6FoBZ^`uI+>2eX`_B!^cZ2We z47rwS-nF&acWC_VAYu6ShQeLDgmEe?Vr?DIFxez$K(tbI(4FQ1L6j+V{W*)lezviV zYGUljo5zw96#ZCeddJ7wNNew;Bq`yTfI{Z<9h&ZPL1i-kuP%F~rgPI;ij&HN7zpv%xM zw7vQ>lAlgZleM6)B1y!8lNZ1Z2prP8&-mh^faoB0Dk05ytR?d|UvO&34Z~|oP~*Kf z?(*UuIX?$kaPQcqwumQCKA<+w2h`>Rlv{A5vAi#lIY^6e|G}1V(xS+GlyL=d$7=~c zW$g0L11iG~uarRR-l-$|v^!}B|0zwNfNMNRbh^0=v!x8>x7b};p3d?^R478v+ z@PArR-9*_p7U1Dtjgrw_vYsPV5i1KHe$K+XBPM;JMVWdAJjLW@a`H6|JNBkxFr7{W z&$hV#@il!-vDE3oE4R$HSt&%_1rX*oGX^3cU_T(87-b0jjt zaeSJC64%T|L%!SFUj0K+!9@0X8pIc%V*m27CXtweo?h|O^$b9c{~m|bzEw{Wpy8=u zEV6bX!8>=!`j1W@Vj2lF68Q70!hd|mN4VZSD(fwvrZE~SCXemGpSLuw)J7Y0Y+iut zJ8PeJu+9BRk^2nJfkcj2|M(4niSI8LkmGi!7*BuTwN|GWhNE*_3;!D!I|@KxoX!2| zgMayjfYH5{zfO7&;dN7qNGH(W3$lnzKU=yNJ#f)nLWjdiC*RA~p=4hqh@^VI2!rDu` zXvkQ-XSy>FpCS-Nt9|K1AclPBX?$?pMMf4iYe^GziD>_C=N`dFfvk_tGNE^w8PN56 zH=xLk>6Ai-W>SpUWi?evm^5HkyTHsHywME)bXCZ#;zQCa(X!w!|Wwwid~4iIa*vKQh;%* z6t8e3hlpO^8oJGMvOJbJS)>`RZ1c^N=gO@Sai0@SBS|Os=Ie4B0z z1=5(~7(%#W1e#SOi1dsq7Jq(eqX=U3%ROM;c~_}<`r;e%O^n;nThw#OK!@Pxe3*WZ zEN^0@GrZaH2&2`>tU^Y$1pM%3OYxh@9&Q{yVSkBo*hu%D_Y_kTI3el4@iqi3;Pmp2B(AJE@x!k!Q(U7dsPJ6 zZdQvw9$P;7;|8xyTg4FB{1yC&53}Nmt!7n*%OzW2EE4W1y4XaS+5MAn$-=5YLe18? zD3F;r=vBM0XOTiJ#s_00t=E{`e)Tl$+x@;a*C!Fj&D89SiR}e+F_9AQW#W6NDWt2x z)uqU}7CZyb%LI4szbCr6;REgQOHK_WK2Bbg-|%MRw(y4D2rFeq6D)A9W{p9`g!T#& z=#4Oy*6M`>X!?@w-;i8Jgk=1W^~+kR>r>tBPFo9EaBN+ZJrsW3P+4R7Eunx=4OGAT z71>_97S^dK&{to5SDM2U&p8}ilA^4hIjPS`;g`V?zd*LNk8V5?sz;E-5Zc+vPh9RG3ic&O zl!X_<=8AK>#EITTcYS7&^@T~vN&5XKgPVzT6_lZ4jN^vBGI61xlu=z>M zXn@*$_$0JJsl@4OZ$t6N-~uZZ+25-cAMjn~3?dee`5ZO`#YF^x#G4qWKCG7vVUG9qnqfF-O(a#$F^Ge^#85i! zkV|fQY8|7aN0GE#m+5mgpF<_I4R#}cjd9#7Gc}iXpw81ucI$1a5BZc<_TnW7l7gS` znDdFqxtMY7XLp;LcV7>8qW5YTK;u5eTYljDS!LAW6z&`=sFg!I3}+=+F{#k}Kpr(R z(+TSoPrWs;etrZbX9}_%lMF~#D$xwP{?=+|onf3$HnA!j$}3F4wWd(xGCpG0wJ&+M zrs@dE@PP$2M`LCImgfvMm+*KFx-0RYPLT9%Gucq85J$i-8kc?{8QMgIg(8jpI4{I% zL4|A5gvZU06p{wBrM`S)qf|<<5YfB-8|jng?BKLz?^9nfUxJIM=ykWwt;2@c&lV>| z%9-!sAjX1+^eu%XC9nVM2&WYM`gz*&4#2o!yeWcIH4G$~DDGNu zld({tKZD<^JNB>Be=}y(Vaj~7O-Ar`LgJpu?BKW0WNH-fgA(itDU+2X+VH`3#jP3^7YW0^!GCsL2XEB_v4( ziM@HaGk*G;kKaO&OZaoX=eFz4AKSkH2Y#;n6zsK9s>L9XR;{t+48*vq6iUfvS!LOC zc1WKQYG5^7DPi2|pR?ZPLD)uRd6vA**mpgKk@#P_P2jaFB!w`3DLtff1Jku%!y@oz zUX^?#JWNRF;)bG%lzze)^DQAL%T$C@r7pEKMV@;xeCtYJB;Zpto-km>2pC$!lfLO} zYo!0&F%EG>-f=N{2=%v}k1_|W1`?nUuZp-7U?hD;Bl!np)>$bJHO8BO5BfWPzzp=Y=o8^iWQ!-h;r~W6G6#hmn+I zMbU^L*#00o?z|TGU*mWQW{6jPQ>%$C)qax==264+UyCBMr&XJ`b`3U?P^8R>`%Ck3 zY7q4g28_81lts4sV#)&ZHv9pZ4kx>i*2W(gn!hSDh=2d>49#9Fw62_vAQBU3< zO%jw)l0CImV9ezYh}GUb0LA(_&ToXwo6Q&4w^JG$y%YHMl=k2afpD?*B90Qvt~PkO zs)ChiKYgC?fY#MuS*YOyQJa>u?k90H?G$KVxH zThM{M70}WoS2;FGcJ0@M^jTVC`X?i;3oA3KVNI;GoO|&FI{I`~x^sLQ*t~52fvUtC z_Ry**GtwfYemTPiw|aap8C%ATO4g(ge71O(eT%v~k@=t%40;!Pw4L^_H2UZ$!gTMV zM|pM>7QZ6sjza9^i>ruYE_$p*?9IE2cecy(U<$o!8v^D(V;2entz}j7pnJL3G=Ho) zERdi0I}bpXrCdhgOIEK==>Xj4%!Ft+Qm%}dC+jk<+Wguw##;vlx5YEYQy1hPh(G^5 zvQ_uKKlL1sN=X;-_i8SWDpxBnu5baoC%DIDm|XBhBa|v09RAeo`!xBSZ0lR}Tqf}3 zez8wg{K4bZx_^_@x>9gkL@G;~>|x?_J0BxrFemnJG)72BqZO_bJKd4jPetfGdJSID|Cwe?j+QVqqM(9)lnv9>N4S{kxw8g)yf z?SrpVPH~p^6$HS(gM%Msm5FH%>kYWuhi@eDA$|~-hR3<&6qm5)VH3p?1oNqsSiKQx zdBvM2fxZs7^)Bb1q0nMSv{S)%HAo*Eb6DuDL5blpDg6)-efCqS9!OqW8sKHM#zzpi zwSZJOS!*esn4s|yWFf6(IZ|9v+-HPKyvBVxY@gCk zY>)LCH(p0%4^75<$i^VeOj$0POSYg)%Rvb;B%1li443$eBz;*;Zx$$?N}6dVQtiH6b;=th?EC$g6!BPy&UqQW`>6&9>Hx;46W zT(%kr0NKWeg)WKsu9;kM14l^MM0QG?jnyTAUjk}W`3y4hh#bS_#TY99fXE~p-{r~t?Js8!NpOYf#&igAOO4uvEkEJdf5 zHC@eE75bx!j_r=bL$v(poMK&2hpR%aK7KYZ>cpFc?uGX6DaMq!UIq*g9DV@;;e z%fs+7G`}39m}8kc&vElyB0{Kwfnf!2|H-c-gDhh{P~xUxPSig+^B*TUP%#Jf5yPzg zX(C&{x=df-q1k5Qvp+4K?Bc^=FYe~6c8>adfC^%)(5E%1{2}~9^eX*h=J6>qecLnZ z<7&qeuB4wPIK3Ji6yr9@%>!_N-|6a&DjcBfo={K^M~+`c7LY@MN1i#?oDS_NJFvli zDDf#^<7!*F0(&vcD>4R-w8Z6mAev#~qwsE56(>KSsJGDo97E9!z%fv<{CfTC^?o{L zC#lrZ6k@uXnS-B6=kohjlRT_CKC>>8M{jO!HIqKh<(OHrRk1YbHaweke6HZB_P(%2VRorz(e%3AGl>%Z{OTNet4q)r@0P_Li!7Rfy-Fp!uvo28-4rl}> z0O5JWyQVhWod=+ULGCEJ<_7#zSrCT+g#pQ%xCY>PCd~B{x$(y1oQaj;A5RFm^J>W@ z7Ber}RNAjt=>a}8^_diJ+J8MxEW~kE*~o`=&*9!E41I!Fm^~s~V>9H$PfwdaRpP1e zW3PFa)@O-)Op#X^634IYCPVg zPZI}^E$@OH%;8b^QV}(?o3kU@Jt*&}80dewlXC!nVhZqox&JyUz+{f32Wp~e45B7# zjQUrh^!iuH6W4e9fAa^O%J$Y2BV?HYBvEVZHFp8Iu}-qP;Rwls>jC`+M!=_|%^IN4 zaCF({BoF(@iG6geiyN|6>NJApJi`$$c&QLglU&7=pxbhf(&zaqFz!7(xJV+$UyJlm zWZ;W^YH#kTR^(y`#b9~O<5n7~CC|NyOCv9+{kw|_TkhP~&Bj&XF`flmemK(u>k}d< zt>P8h77fAZ>=!;qI*mn1!?BIs8hKs@5tatjcE^ct1+w+uiB-LLUnuCiBh4a+SELYG?dkrCf+K(h5u;#Rt(MHB9mTdv1&92u~5 zmRd-2BHCK$_2IzOMws(TqwLrP2bSyl(Kl7+pR`b5bqC;P{3N(J{>i4T&H+k#d(3>| zQ?jL){gE=!abyG02>7HLNF)DB6K7TD6OFZDgcz0Ikbi>vFW z*xZN_I}JuqpbM^W)q3|xRo=fa7iY6e0C5+(rY6oPPQV23_!GVFk}eZ}1JZ;g0_9c# z(!{Tms{EubU~Av66U5J@Az;q3$hTuVMUaXREK-8w34gj?Xxm>6XJ)Wb!?9wOpyUj~*xcvX-|ePQ@@F zFCxcy*M_xIq1Tfb_2#)b86QM8?iJ>RgdzPoll;uz##57J1t|dMXu8wj!djJGYi97Q za=mKS>*f>u&AXeZ^KCvIU9SdJhLAI;zIjR@{Dp@o$g_ybShA(q_97bHE-WaW+4t^| z7LI$i#{VK7uft@@Cs%gq| zo>3(ou&ja~rZ15gHsN~pbFnVmnRpV41QtdhonRHP3Y4+m9kmIPMVLK9NwBnTODT=@RlaUuyNNfH4QSMFGRUw27%g6)BU@tml=7lX4neS>EO53zTC^ z>oeIx+kNnMYK0E(|8h5g(Fl7Vj2vK&DxV7@WGIqCicFP@!cds`*(8asQ332AE;4P% z6Z{T2T=<#LW}j2fcvgE22jeDk=s;0W-;ZSfSc9(1Il+ z?#H&kiTDZhw3OwV7;!(gwW2%}5fJHsz&_XxvRu=RuAVoK@rj!ZKu1efW!?mpvM-5o zI71H^HHwN@rj6(G3y2vp2)+wIk*r9#bS+qw-$g0fJV^~9P{i4ApL_S-G%BE@E5={k zPXF-|3nh08le}Sl=QOt?sw?Tj3uY<$cgp8tIL_EOu-wo5IrQ{cvMjgn3MS49ZjEaS zw`RQRQ{n3&>t`)bal6DluY8x($Xw#J<=NOhi|XPC+RXkkl!v=MLit;EyN#)T z{4q_dVfF*_v)|KcrP@^`rqrZ-Q77$LUO^3~IBHu40UZ)MOJipf4g$=pNNHw4viXOijv(u_Kg@7vkH6Qf7 z|0`GcB35N-xpQSZiPv0gge1j+l2I8#nx_raF}VZ%g__IB75D?lQaN(gov>bMef)WB z%10Q^I-gg6T03J&4)XZ2dBvCr<|T_7(GEAQ^pf!|SXX0U0QutEB^6o(&~ig9TSxod zu~2-epBMy_pg0Zko`6q$23aQICWwq-M;QMfrAIL#b3AjLMTWYzR4`vCU&YVxVLz4~ z7T1mrwCyLQa9PG4mkB(hqIgYSuCvr_6V*)MZQ07rGu#&V%Km3d`D9h<@LJid*>?eF zkS~hKJaQUX3k9}ekfH4G33l~eS07l17g^NJ!H<-5RD>U$RAO`5a_zOZ{7fQ`tU#0- z+OAits*|%lzRXHEs>`Q^`(0ud?ywl*5~ddc1%e#$ zmWy3h!y;2Gcmsyf6SUXxJ03P4Umx+)I-~CRgGj>mkp-gLEH8$qrcLysMa@YHV1RZb z9AKq>EVYxkkbIz`#Ti<(=Ea!osUk@na}>5J5ph@mTR>jubPTD?Uq@@TA&WTivF*X1 zqxH$jvAA3N-EHokF)2qOeR01vw6q$*z_5U_2YD&9{d$8hH)1>T9on+ zKKm0D3T<=rzqH#0H0^+09{`81Q(A+x!fSd?*`dsU-dI zd)I+SbxpD`2ZW4m>X7hx-L3w0@~$2+Tk0S`FyH?vo{a7ZQ}Jin=^xKINE)!4d6bl% z$*N9p7Ag>+?^)h)+u;7-lBbrV7P2Zujc1px?;Bd(whigj%yp0@Nun&tP|;rn}bKWbXj|MN3i2u zHyCg|>N5(7+arvK{T<30DtVjV+!0{$cQi`*U5H)`=uO=NcOVhmfHmA-B*aY?F*Ro= zXs12i%9fzl77l*}GGiC6@`hb|XBSkapsm%*<0g9lj`j9R^gah{z3#Sz&+>x=#8FmI zJt7-U)$-mj?7Rf5J}mFY?Ej?;|MQQms~k`y9y{63;0i)_Qzk`ae}oE>DW93Qttml73Bjmf9Rn5PMEHT6~*SC{l{OtF}KeJ#JTbWE-TrdbIG z)4}ue7f|BM=SDvCs`Wo417>1;!0QGRSbNtCc&fDMU`~(a0O8XIF=*Wb9J||7@FRDG zw`73d=*h#Nb+HUFXJRaN?`5Sb3h6Ud=gWEw(OWM5WFRk03Owyg4nEX50;`8AIIt@j zM2jR9DNvWcFM8TMMeWs5AJbj|e<$HwQdkbRwijBr>R zJd6?G|yVK;3d;qJPQqt`9`<%Qq; zRt_~-`W3>Lg_^*^t)Eq&eDzH)n$W&SIhYbApay=#?AbS%-dhH?SDB1(#3|AUA|_M6 z*SFKXTKFS_ooN3}{nBQgi!u8KK1;|EQml^Cl@O4JaY~WrDk?{Nd|N()e<<&1biQs=@_){7Ljwt%rNVcl0qlXc~ zlTXq6EUx4gR+Z^byv{c54?lV=8$!k$o?i&LhwaCvX|rgp(dmXE{amWlZw-1mGi@wH z{i4t|sx5M=1O*2W?RKGkvV#l-SSv&3su<#0gWp$&KB=>%l|69d$wv85h#+syXF|YE zalM+c^8sjANH7tg-EGt#6o+}R6AO4xJA7;$>>qfw4R%J7#8i$-i!j_YNCSSCj2N)i zHu*^1`m;V#85YK!JR`IEb=N?_&(jz2--ld2>flA}UJH5A+iM<3SPkm5QDEx|29nFP z1!ehEJBI;2C^?s)FXAKxF#!=dCN5V+#E=GL;ui4SNYE}plx)F=vd2uU$2d5>S1vDP zw{x^ZUV3;i*b?N_`$>a&INHhwysMVE^|76Xj=Fa%kweO{*&QV=0w|A71Uc=v4C8@xT~miv-I5zTr0SNsq!g|F?Pi630TqriacjoVk2gC{d+9Lhno|7z((SgervrFu5#-FdcwxqzU`O zE>gk(joZU*?49m|FgC~|eO`1wVf)IZCg9PlV}782vr12x4z{bItzMIR&yr12M!?c{ zA9t)h0lnT!GKoN2S0VRR*Bu%__)q*F#F?7 zD4{b3riXu_3AMmp6G?yeq)*y+k?wqr+9A$)HzhJn=qK$bxZ)}$A1~NxZ{V#zD-S0Q zk+ok5$Z=Ixy=^8v?rdsV`{KF4R6}6vTPxNx(;}0%s2>gdy%jEy{!SL8`IM#Blij03 zC#*m~X0pNQqQ@1NwxDu&1fFZ}RO7Va{@R6L*AHf1Q%owJRiGBY^e$3CNcwdw!$PPK z$#<;gZr)1DMB3fPW{LlL&`$sc-3MotSB90r>t5YV0GX=upjS|b-mdxP>}(I1wZ=Ao zzv>1S(w|UA4KS^qHRyN2cH)cUq2R*lt&9{N9{5ftov-CXkPaOo?DyUd541do@QA3Y zS4_6n|GMLxyI$#S6B}}TrBLw@?`vA9Cq7`doD^fUjDghHY8`dp`Hamwgq0BG=&$X8 z1X2mdmvi79q}P@f>8xu1i?6o~i@NQ;Kt)s#kS>X#yFp5Zk_G|k4gu)~VQ7@jp+mY+ zx;v!1Q*szWN*F>K&JW-Bf3EjBAI>MA4_-X@yZ62KT6?X%Z*}aE#WjCHnGwN{O20F9 zmBzidpP4*y(Zt4f=|k=bWsE!R+HMu%5#qV&*jxRRx|k8QA~Y?0D!;g=t#Q}pxu)LX zdmerT4t(y=`vPhEbYb1L_CI*?ZyejLs3$*|Z@HbC?7jb5)>`B@0xA^k`ik0qc0-wW z(Xep1F{5?kW>TpDbMOu^vZN=S~qoCZjTVAv%ns>F(T^gq46_-EbMv46xVu?QXV zvAg8TWh>jEl^F=dlU%5!=o>zvbB)PVJo(6DS$oW&JC74^{P`MX%S?C z5z(hUd*zf=_E0#O$dvGnDS&150;*UgD^@;B9jJ}UL0xFC>6F~{CIcO7NS)1ql6sfH<4 z`Bp#>`I0VR*nP3BxANG2c zv8Lme%3OdKeQ<+QzwptJ(aJf}zTNdPe4}zArA`)$XYC2CZnm3 zZeUsTgxUN(lBi-=wiuCTHpt&jzPp>Tl&dvxMgx3Hwsw6#8f+%CH@1#4r18`xy>LVN z?RJ?K?zX8M^JAk?@36_^RL_4&v!+%j_QL~%0CJBs?Dtt3=y>Y0uQ?1)v;omUC_C^?kjK9oKm^#a zE;lGjegVo*LEFd<8ZLk-P|;VzIe?)JM6al3-TtHIM->)y+88RpxcWRuaHGM3KgV9N z1P`>N_0IpbElU?y`Y(DRoM391`fG8?Ynu9NtT304_aSAZ>YjuiMcy{5(H*50^eKC@hpytSB#^IzwGt6r8$? ziYjw6RxC)p*`}#?f&0U(WD6K!ukhKxqaVeQrTV-dpd`1gOg(VSmgzt@XW8h$;Hr3* zeCFoJcgd<3DsEjb)&5GUK{Pj(=WY^Ay2dAGC9C8GRdeW_F_M?k7~zPQukDw?j1;dm zS@tY9pb_*MZ&YTEQwi!)Gz;xiBO();9!?;w{-q&5;=uG@O^`|gTI66ao_X?D!0B5S)6Q`LbxJ(wN?-cxmHsYHI_YyHV2_t< z?$NKD$7C?f9o`4;VzS#;uG7A93u2aiR40g?X{KUhmIG-!dSYA}1t>Qre%uFdcsEk0 z^t2055oDX=%*&$&u9b4oe(je!bF5j^dWz&Hi9cc&Ju3c}GkLR?j!H7-S{RAds|^1> zxV>B1$m&ed%F3$R6rxeU*MFR@zsS{^L)Apyjn2c``0+vCthqE!t#K*yX_jc{C;Wo@ z#h|($*yKt?tS0qe!{XNKx6N_Dy}*^$v{&a;chg9;@6Iagf6kzghI6x#?Jc%#3&Uqe zvu_KnAL$D~V-4d5b>uN(p_mtS%ZZ2V4KRVDPIQMDD%gIm8D^a{%B7w{^o^=6%)lb+ z{^}UQv}&(X3LF$_lj+I2o)^6gIsbh6L#2KKC~mNT4>@T^s`ZtX?K4PRBn*b=3mL6z5s0_%x0+Zz&xjoF9T<8 z=z(G;kDfCAi8f_C5%jYu2beD6PY^h>k9>p;>tM^28RnxJ{dRq$O6 z*IT2^_u}Tx3xkDPbL;&+Mq1f((?jsh6q1fv=O*r=WaDWSB!Yhn!RuptpnsV$d$maX`! z2kzt}wYGwQY#Z~tHNL^pL~(i{XxKp7vAa|XN(Pbvise@LEQNZ`i943x(E(TZi)Jb$ z$Z}HV*A<6CN*Aj)Acm==4)^+OS2^-~--2Z^|6r2jqY?}B;xVeJkAS^%&0UKhlDv+x- zeK5s0BHfb|de?>t?+7WZ@{k6whvMqLMDZNZCT64fF~Xk!YhRz|Rp|hd^c2RU5M99G zXbJ95 z!oif1p5o0B9TMOAF_GH-3wI1gZIYv-itdpPX#>?Y37W1DJK;aCr(D4W z2vH|tS5lLS`D<$}dWTw_Uar>acWI^&dyo*Yg?A!a&TE@;RBp1X4S#aMN?cf~(C?!Y zf$uL`jynd(vRpI%lw(~4i5m?uMvPh`u>Hh6az?#C@v!NG+ljqho%3s6Vb}COR&?4| zDf+?QjIIHx?N+XIuWO+F&{6sBS*a#j8jA*NwD$>09>R)GJewQIDCUC?b`4-LmOCEc z@+`@v#X%urXbaCWv6mG4@|9bdbWdZKtlTpa+`*%lCcSzZ#RlTBU4p2<9LUUmeM(0E zsk=fiX3;rHkGC3(Z1pYZ0lsCT&i1OTaM{NHy=NXENZeoQEG};1|H%&RJ&~V9vg#_W zYGiJ010sVK^pt5c%OsZGV!@Vg*XKu}?oz(%+jlt=202bMuYCdH01c@2sjTYJEFC-N-w5T6rIZxndU8%SNx~Dpr5c zD00|^{FZ3*Y_&TjIMv%n@U_@g%=xU@yMMDjY13}jxmBC0FWRB9fmo*y;2o>;!=V;~ z?Q!`oM-tI60<`ySLoTN$`8RQbCYKU2TRDsoWR8s40a4o@LcIs)q`@Z!QQ_aaKbFtF zLPqinj8gT%4zLZ>uBj70#2n}+uyCxBnSS?6ATT1YOwILIi~}_g#zpvp2XMqr@T<=H z?@AM`pi8T#F#&-svpfA)B@LZR(!M8n4EIJL=spA7((S|&ju4kAFkNqdt;$)N{X6o7 z{M25}J5dtk%bQu|nTm`0@=N1vX=k-JRK{)~3Kuz~6X_zH7ei_9(Z4aLStXV#`nRGs z(R+_TY5-6UZW-QsCv8q21^Y(xG?r;c5D=!mEKskzwG`g1@KG9Lt^QKn3_Cv({ZN2UV51Y93HQLdqP%U z2r{_fWGQXdnx@|oHer2BD>4_BoPUfZ-RHpDP}zc-GN?^`oeSZ@o-C7;XRG}`3|X`3c|)&T4HkeL)-x-BjEk}gxx-d`Zyskj&@xKG^w?8d%VZs&X3u_`!NVO z@-k~bJO+XLBR`%EwUU>FQvlCLD~B23zFgAsoZ^Z4Y$vqi==>E`kZEX4GPd1zla}g^ zV%HMiL-96KMKTlbZW2Szn1kAk`7eQ9;d3EH=0M?D`C$RP5&W_bDTZ_2s=?L0gH}dz z%F4--Ix6L(Two3(fJIa!YsI{gc$lb(>i@Ie_g%N-^^TvH`R+D5F&fj;KYlkL+Nlh(qS{$>SQj ze1H56-zT+?x4rpEQk(jh;8DZ-)$hs|occ`W+fI5(&%NG*o_T#!)NPaTxG)eY!KV{_ zLyWnv{YDn}roX{CIda{?Rs{Q>l_xfA zUVmwq5yNWJ)E&Goqx>mIlT8j_%K7Mzk}tiM`B2NrLR{YyllM4Vk+xTKH819T`k-H~ zu4}HzS1G-0+euzA9`S_jdk>QnPURAL<vwlygJD-*!@)K;q}d0N^Ai=lga*rDEaa%sOq<-$hSPLk7U4%fNS` zvYAKQ(sO8pyT;;20olKaQMN|ANddwSas5{OrR7SGntDdOqb9O$W7 zX3@4pX`0uhbqne*XnD7PP%lTiDKJfwFk>)(O@@ZALeV)6wNn~m4BFj&jf+9Bwe{^< z-q5$*)z8Q0M9|#4N%eTs2{~q~K6jERaOvZhexgnpzj;`lkwo-~Qjy2OrpQNzB1@RG zT%3!$J4xQPTM6l&fnCPW-Y`o{*an1JC@5kFZ*T;T5DGtWG`M9SLm5YlHn_F8P8hP!t zVIA%yQUX3Rm^`PD$)B%@J=8lm&2%l_1yGf?AWffG@loCSe&$Tr8@m!gniBls$4K|asVzSKY8 zZ!hjiI(nznI^;s{33A5Zm_94^xTc&I1+u14pW;B|Rdj`8b)2Xn8)_F$bom zmrLW&>wqmm%I3g7Wsd4Z^~_8QNw64AX11WhUVN59KG3wjBG6Oc#Fpvkl3esv+u88F zu>2gNVDI&!efFO|%Wi94nGdL6n&wHXEAS6egJb*u+SFAHjKR9vPflq_agO^?!2#;= znVC#=`K|8(Xy+^1%LQ)wN`x_9v(estLnAaBPNIyRs)TMWj0CJjdzw}D;jJ2)KKtpy z;GHkeGi58fLH0A*PtsNrD-oVr+=782$i{PsOAn9N0!5T4N-R>6=O{5s9&CbPM)E4% z5mut9xX}JOIlTpqW*(5TqGIn~txd|d2`a@o9{E~FfDA;M-qRKx_u+B~1=;N57Pc&v zf>-^r-T)>rzu#UA&$~}%ZxDH?VnwZJX-Xt*HkH7goyjcyOR#kXRP4x5e@$VFHojc- zCdI+%$NPC~`d?5M{RqnXKQMnhTfAZ<&JrKS4$2K`@X>J*u$0qDLnJ4h-`v%vmzw0y zzocaE!$(c&)uv7Y|MmRD5<2_*z;_s1VA_gM*k?F~tjEf9)FN@=ER~z78~@nGo))XN zR(t?l{z|lciTJT7OJKIy0ga~enXFQdv5_5lto7v)pv#x0<4N{fqU4MklD>)vJheWQ z#`5)Zj?jIxq@~V?>zrr9fT)MA2wM!)yPt2hS3abE>=grAf7M8TJWs_MT@I}$&|&L! zury}dTh=DBG#-Gp{87>TG0N3y^yPp!(^g?_%?HBn#_pUa#XU87o0&jZ%6z(U-Ly3& z@Ap(chhn=j9tF1(c(nD@U61{}rp0l+K#CLi4`oIgS1z_hx*H_4gRPdsS8ZpWZ+&c( zz>b0|=iEGN1je)m_l7Z~s?c$p#YH{MuS@iuz{|IfvXDqBywJ$?wDfBy|y zSr7YsB$%DH`lc)(=9^4BTWEhGrR#pj57}mo$KmvXWCoTjmho{x8e_?4wR?j5bvOk> zcJgk(f7gAb*l+myY$wv+?b!mCzEH3%g?0a!7UtJpY3Vrs+gy6X12lQ?tIZaI#2Luy;wkG^Z`?>%Mk8e`-sOZ+HU6kc=z(hJ*a`HVtYfTe`|3! z_1_)2p*~e8bPzx1_HYK|y^#{qHY<;2ziakN4G!^85t!cM-TU2&=g1}6e@4r^=SEmF0P$x*pMC^1{rxxe83ej*&b zacsUXv9_IFzL2=%y5^67eEB2|5*45!;Kv$~XdeAaA}V#@5Jlp9_^V8N%PyKIfvZ9b zhNxDx1CPNG!1?@fd$JU=d~-Mn04&6xM{LA zDhLk#kN(m;(qFEgzdKubYthtkkq2jN7+8vlBDa_8^e0M*ERI0KXx4f*r@o!#o#ad0 zvHBMm21i0q8qElhP|mf|US5~r@{-|)`a%6LgM~tJbz_lK&pvVqnw~RkE!=*}xYiGA z|A}0-s*v*EQvwWn%PG#gKQw?0sjAG~F2Iw;GPMI%dF+G~(f*tLjuUNLnzgg;s(=F5 zy2htB-E)$tj4xoiaTk*QtZw*~lq`qq&|iXIL;?=iIV3g~S{FgYuXGB#;9H-5Q3vl_ zs8I(qKus?@=r@hCa(40A^a<_Ibt@qfH1~HN1e_O)D*f)@fb}dOCUV8Bbf*Uu)g~+cx-^k`g;Wq&8;Z}X zAFZuXsYOyR)Fh^>tN1%RlyzR8qBHk~kNUS_wC^ZzBkZA3KaioMm@zu_`6mr&xG=QT5v>Diz z7?#P?zOEa-?54LmeehIOKePVhXBrB=tkVA26K@R__*$6UNu)b5PyyVq z^;+k@{h`MTLc&GnYl8XWPdwXbcyhSBmAL75EK%WF9o;|LWG+q1mBb4_HAmjqpTyvK zIbV)hCqPb?*(0skwXBOBNRKC_61;e=1?n6c{4gY~zXV)Yx0)G$kbLz}OxC+b+m9ju zvOF)3=Z0Qj^0yCPJ{BD#X*)fj*U7R)zZAi9+C=R!h6i++T~6iHA9)BurX@S`6nArZ!_8vHgQ2OlvEqY69Vo+ zida^p_k=nK`C&;=mYJui{ThsrZb{UX*#09>0`!hY%}RU;A4gQ``s~!#gSRkYE(P^! zl|Rv=)qO##rbLnyiY3a1cO_J)N_QeTwmMt5W?`JU0y# z+R+@-2dDm>P7Dp`gNtKiBQDRhJOIUJs#|o>+-R2y>@7%xP4u6{1gGXJPr&(&k5uMa zhClYdyvskQH{+|%(J%Tprpe~{5wBbP+S&;SI(X&i^vd{v(d<`sXLn^FT|yq>_tCHW ze4yqgvoY`RNc5GGb27TSZyf$Wi2Bksiudv>J#VbWRJGn`a|^@YghC3m#Kq`MfAq4vOe~@g(8Tm&#K#B1hA1CqY~} z(ZDh2r*}Iu>0WXn$0&jgB__LSm~h4H*L7tqjwC1scY0mmRhRcILZI>2so~`8WAW6k5gG~bjC_ss z@2{iu+O+?A^M2dA^s;FEWHNI-`uOXSFMOKHoP7raey4pz9(PmJ*X}A-Y>A{%$vOoKe9c*(c^f*p{ER`M%QG?RCAkk;9D4 zM4172Voxvu7>MhSJmRDP+*;eQsgE5TC+@b#%AmRfjs@JA{7~2Pjnn*Xr*NA^_0`B; zEL+v*<9`VV&{jE_o|?lE=p5w!aB|E>Ci$F*^scml%d2gQS%{TynH=53ojDKGoq}a+ zC=F|*>n9C2^;ZQ}Hmj~Em!U)jjE*1lOM@CJ2x*kZ4_<^x>Q6o(zN~O-$3W`GBN*7B zuNpPag6H>HaT|g8)`=lxX;I@XF%$|GB5hilzM#IQXQp@V;|Uwg-yn@y!|TZ!*Ua`U z3F&a}Q=_CWUa|%I!agB8zFR_E##xW7BOp*OJ?=#4NM&%|xqX3`(Iuar zy%m2f0#Y^qTLg?7cgO=^L-h(uFedy|ZAo;a6B91$`@ix9x~0^=sy)c8dBuJ<&^kd)dghd0UI6+!q4=w{6G-B{#)9mEprhw64f$}9_hui*rvpiIe z!-)qys*NYTIGV?@`iVcI`s_DG%X88nX-=>fy@~X7^v9h9E6Lpz#0iOCEtDAFDgAgg z3lfz=Z&0aq6#bKL3F{ynMv?=hbR4)H4~;QBNR+laDh*|;TS^J`>P`JnHh}}b$2X_3 zA11V(#gyBKOdFj*gMt7A#9W2Mt@W*uZk2{NR+I%UCX8vONUxZjWwrd>ECmSCmI>ht z+vj`v)O^DU1V#Q-P|$o9z}x_MhJU zI%_?0e}MID9i8X;&`C(~*Fe>z?W?KLIl6)W2+n3jUw_lP28;{cgW@=?zawh7M7((qBxTGCa zthxfHjbhc_p_74sn02foqwTRHe18iSw_J z61G@T4}8m_^1z<>F^{JDKONO$LzBkg%`dVP>)9<^1w(-@mnW%oj921rU_~m4#-)s3 z!&5hZ>j|w(Ln~RGl_JI5aj<=KSy$McX$p^vVF8%KJ(b-j#yRz9$OWOl*FMPO@q;iF zmpM0hEbZ{HYCVG~Pa)&S8zrLv4{t<#daR+!-&h}sTX>J4GU2~=s#V)1SjR(Ol0u)i z2t%*EA~}%l8oZs(LN*wFro=JxQPPSV$DaP;xhVXGba`6kjQe zpOi^^9!M!a)z$_8Ktq3$-|_1`z!03=FqA!mK0U!D1Y)|*tSXJ(7G)hpLH3b)Jw^K; z>eWg_Y#!}qToi%wV>p2cCv0b}ity*0zaVBvd~T3hj|nn84ilL8{V}i6t57fMH-px7 zz`HLbCn{Ze!Asn%3pdd@Q*ps@rjY%(uO06fBd_a3B93av+b8Ln8>XEJ4*3tI(heTd{}(Wz_l#;+77Vea{eo z9Y9$^?{9OKrb3%0FaG-?=CC?<3f}*!rolu}YU3ASYJbaG1vGmQdHx_CD|zK9R1_6u zaeYqH(`oRAiUH78|G)>ezM&x1979N6m9K(?Q5S2g?aD;`-={r4&o8;KXYRf@4{Nyw z%z+o5nYzlS1|i<-RJ+99j)nLFvI5clEMZ4g)Yvf{OrZs8%pV#GVu8%9Kn&Q$SRaYl zM2f0Hvcij-a6<1(LupFTe=vK$gta~c&4l1n-E&>0jQ=RhVnR<3h*!}u`t?=)TPLkt zi*N;-p7%yrSMIIIH@$g#;8sAwU0uC)6f(YZ*sFHR7U>`9UnRQzCKl%dt1fz^h!Dkd zIFFeyK8!||Bf^hX*r!NCd8=QpxC6);?E=7OfD#y`LLQ-^R^cN^2^>8B2U5~b%_ISk z(x^HQfRwNO9P~FsZq+_gfP`w*Tl8v8-Q-S&x7vTP-DEj>jBPp+_~5s3!aR<9jkcbK zsIecTp0Y?;?N!2d1p<$1D4>&=GuMx*aSRR_AEGp=h~w^bg8KM|pp$y}DAGZi3K zpu|_aA%AKPL$IJ#J36G!*jS-J<_RZgAz5cWGj z^xhr0b<3F+aG4O-KwV1CBswv`A_7Z51!R3!zZA|~U9$OM<~Z)!JpD$K>zgjk&3;HI zm%It+D3h%I(4tpD1}lboaQ=hj6)Tz*)Ahij!&JCW8Twx?s5_QI`%o)t%9s9|>HMf& z8Wd`UC=z=qUMZ{&%USb;4`;`c?}1-8-n!ACpaubX5T54T2s~(%L2sDg2gUA1_*~jp z(}5L%ukIDMUGnzv_X0wDjCP^TCCuJv7lh`W2tv(|@srfmeLFQhO_!C{*Z1Phn>WQ@ z_RorGbcO(8WZI^blsR=VKImCS(Fa7m)ysc5MiXg(l&$*{eh^&pTl^?%@xm{7Ur+mgFR4kD-_4cYiVGa_$etB;0ul`tLpyWTBVZ}?eoQpG5PhYR%VR(p z!Axgh1Uy!K_@##4nciSb^$M$=IE(rpUq6Aga@M1sNp-S1?*`wYZB59q6G=iAXPIM4>4yTbL+FMmbe-oo;%M`#pND6;n$ zM#BY}z}9B06o=rJtl2HsB9&-W^i+TH%l*#8wHDuc5Oy#bQvZzF{Hx&F0sEUx5C}LOpF1GbDga zODixt?mdrXr*H5q3FNUc~u8%Up?UeU8+Oh{<3=^tX*o8a3 z;ao~m#tmxQ*P88K&_o`Xp-^?@P7^Q&dO0*7RjMlc4Hw)3rHw3{v1Sdr2+6&x;HNd`o&e3T8Lqd}fz_#;_|FZGn+A;i~>WUZ0m9`gcxEV7bpPuXcLD zU%vA=?xxQnNcKMyJvrSiMy)iI36g3Ec)TR2|1OEJxc|A(TV3Et$dij@T>4u$w@cs2 zzCf)S;qvLUwZ|}q`OVQRG=mF=x9E|>==SaqZx`NyszA_?q#C)APe||}o9g+rcX!K* zcPo}m-nfB5+m;9C6*-kHy=Iu}=%^5F*d_p_N%EV4=;1EQvyo1?Cimxdh`b-hxJynSsaWAOpc*`J_pT=B zaFA=AeF;>fKZi07M|}idTa&a%5hQZHQmI7A^sR0N?}3 zbMNoxD4ZULnzvht0j0EYDJlIeN#<`JkXcBsgsw$rZ2l~$;zNMc1f;nygFSKSqd7YCT(Xi|(EU$p)Qr)J~BC!8? zO+gx{Ol&iH9f{6TzUyC5Ovi=)_J+dpVzRQZaAvaW%U_VrRdi?7?sbPyv`1WDUmxpn z+Ak|L{{5CVYszEzE%@rRL*rt}9(;Nhfz8JiE3eS7k)-Ef=@r)OxAC%QJ;hW>^@0L& z%wDSBq`$Hg^@pnSdWA~~Kp9+ohhzUm3@eyv(}3vei>Z)heT>7M?N95iU&)vtXo?((WHzF# z6}|BVwhTwb{PZ^vu!}5npFt|e^FdrCI6_{&vRtDEuoE_Zd4zD-FAXbRKN6F+qd?Eo zs{@^^l6z`{I(f;MOz%@(;CbH9TFECK0hFBPBhdo4m3alT+>-Hp4y%C`?e}DKbb3N| zA3uJ?p?@zxhCJ|HKVOV8R9W-})$gvZcKRmnJb&)Oo@e*`dr@YX%XmBIyO}H>5iDil zJIfxf)m@=y+7&mLOUH3QT2@V3y=EJ#TRLpq`4dvr{ns?XI(Hio6z!W1UzFMNBael_0HneRvp$a!D2%)F+~G;#@^;+)HFH&oMGbOv|Kl~(z>k9 z;6@OKRv{Ptp#efdP!k9VL6JQjtEOwAkgmO4Y{9bFklt#m2#Q5i<^7WsH|MxI(h)g6 zSp{zNji(zHwJPG9QO_hPpW8RIEOB&HJ6)fCR5wEv(|mdMiMOEszXUYrmi7v^*qkCzix#!xDXXA?&r2$bKo?A3 zGhv<|p572KnPQA!Oz9UoI=}i(*b1?xa$e@!L-+m8q0aleqT};yvx#0a^wZhMk^(pJs!YUH7nv()S_XG6#58dH+u0ID<>v z`1gdJHvxOXT;5gX&@XDYh=Hot2B#&c3Lyo#Y9;_qhfopIgo8 z+k9b7T9X}qAD>}RQ&%|>Zp~Z(1>!=uPyW>-caU+o8yi0g>8J1_)JfHmC&<|D62A#6 z0{u)j^3658SQT=Cxek5#Dm1+|%+c{ehTJLLGixE`@9lJokh^9N*C&hZo2=rW+QY-c zjz6U=7>fjsUOX*bGYII3h8dXB8pQISO%dq;}QF9|d8&(?i9` zY#P{3tH1%GEPRdKLK49;;&UG zvW`1$BgP1hQVv%dosaYvtX|uS3cd}kl~ zQdqFq6RZ60^ZO@%R@8%!_Jv>4;R8RWm+64nAR3r(=Ja(2r7!XkQ|Vr2aj0jTKNgpN z5kLcEt;YyZ8UP}oi@bZjOfLW`no+`6nr-|mPUF~{?z`waqTEM*Z;-SFg|hx zAIWKl&FyVidJRWR?ofEh)!Nr1M90JE(X7j1%hAKfqqe_^e+cg1RnM0HO zk<=Oh#gf7eTfyWxzQkE-a=P=qs?b>pmb36Pklb==LP}-0j~**0MO^{a_i$Xo7VLhU2e9HWx?W+4asrED8WlQ;>WOg zJz75H2F|RpWA4*WfZ|_|5|uFY(}XqmS`_`zP}D}KzVbVY48e{oRpL1ofcAUMGo1qt z4xwNUZ2^jl6+erXU_r}*tMB8js4&Lz{Fb9OWYPOZvBj-rW7KrtUyHkvK)P7S~c!GOq4uJid-{<*qTJYb1sKVUdE6Ud6_orQW zPf(;7>uKry_4>k!D$w{*g=SvcRcRl?M#d9z2UA->$@*?e>P3I_9Gc?k&ku?Wg+Bj^ zPvLi{ul1Wz>4%*Rm#d6RMLf-X$H7q}a+#o+QCU167FAd%{T_VR7;0)#Wao-Vd+=aO zV!NqX`Rq;k7RW7=PF7wC{_c)f(Q6i&srD!rXH$(E$!jPb^2>3p5|*2;jhZ2R{gtE-AoK17{scN1fyv3v{#2|8ddPL2`9hPj+6P3 z`^5m>`OIQ)`%#<3N0(SqTsApnBthDnq1>DKZY{bHETDbLb7<8xvrtz>%17)hXh>Rg3}VXjO_!() z=$sRaio{<&{+tjY+Z66r>|H(dG-6*PN0QK#F91p5-IQWF?tv*+kpKuP8dXNqhn(~ zOE@}8_^yX5M0(-M;QT)!4v{3hBoKXPl8u7 zc?EX&d7_V@XZlMc?6Hy=Dc^wvo$*0lWRtRbu^O@k-qEOp zFXG5sFXcu@Mbce|REDX=|ez$x=M8{fQ z@q_4)%EJvPr@i9mWAyp${h0+^*~00-7@;(<8~y$S7$;6-_!oUVtH3g$N06KbY~en= zRFDH4T@Vmay9_>3rLzZel1TsG#LEkP`!6ckoNu9`-nJrcEb=+S zM5}o1Hvk1SZrjv&a`nZOAAchN;WJDnRLrK0x9k(MTl*blc;M@gQt_I(An4yW-+EgK z-HvzsX+&u`s@J;A2|Wb5LHg1c8d@QJ%Q!?6(V$PYY&@xdv4qc1BPkkc&IYo)uW^#t z4W?jl3fBp$h0n<=1*g;-rwwbqE3i6v-(g@krm*VUhkXZ8_%Mfrij7b!lbn*J&kpThLw+d;;~9@gcl(meMOg0liZJAGc4I-1_%?S*p;e1-V9P>{HiBc-uWmewySp0!+q;_ zE?sAh&p#>bR~-~^)9~u3=|u`mOr43nT?L+2p-ItK?*f1m$ZrxD1R9F}eFXrg*Z&8c zM*V@Y!P|m-064vcMtp(=Y<{ZV2vAM9tq{j0ThiKC^J{vJkm>vl7ID%iVPEA@zO)vP z)|vn6o}>;BlB&*lbB}86fOHD!Ctm$*$^Q5Cx8Ns;hHmQ|MwmEQqyr_+!Q^lRQxp06B!hdlX*s2ctus_Eow^8CbV(B+X|rSVVif@b@jd4n6Q_X3w6 z&aZjKMZy}}G*|qruJs0nhm-#%e~?6gbw9oO9997TS*DQ*q(7riY z0ckyuT6n_p46)}E;ISAJe}7J_Le&{Z#8=Y<{A^1G6Z$0CM+1S?lo*NGumS~|!CzpO z*>UIXGaqTIs(Lr#&)Efs-_M%TJoG8Zni6{clbC>A??7U@0m>qOCXkrGlRJRD>@kk$ z3V=95hXPY=$le4uzd>*N&A1&ZM6ZPI^VU^Tv{67|J5@EL8Hkj6xp9ej={Uh_};%Z48=&( z2q(!@iU)_cjixj@yL6$3KbBJInn#-smLNT{)Qy>ZQ0+4*XtW(J<}MQxnJMvP7ZTP1YhDkEt+zuFd-pwR<2`~FM#X=; z+C*5jFNH-OHcTTIKgy7~uIDhavZ{_dEULbb?H-SC)Sb)2sQXI0;`r8}@%qPpLD556 zy6eu^U8MqTAUw;Fz4XVAgOguysOalFX?qrfiE)<=*}{rDoV~%;bo)zG!k_H<*t)+fEZv=1-v4RN%vf^!v_z=li|0 zGWeV`7~^#~9#mB0EyvfJOz*wPy|!fKWP$dk6&G({iW~3oVDE{o|*GM>@JSR*cFr| z;`Bs%Tad2*m^E3$8MpnqyG+}NN%@4chsf$+Wjm6) zH=%Pdz%d_VOyn`f{=@9VqhV|V*-w~x?VY@PJeu|%ak3d`~xNMn*ch~`1SWyfR70TAGI9aH+r0qTdNySZrGH! zVA#x;Cy_W*Y15)`la>vz#a$NqTqP2joNmWpg$sz-?F}z?$xh?=wMHbJ)suxV5 zZBG4m?j<3sK1Z)wPxf4`9%3Y-JL|NRHOJco?sP`3kA**H@z>IHdX|xQuOEzh+RAafDE$y1mo@LF7IyAp(-ocSKK=ht_LgB)1zfi%5(3g7-HkNTjdTbI zBAwFR-AH$Lhe&rxhis5W8U!|*?(R5i`@ZMi@BFyWbI(uzEOqU<<{Wd(F~@{P7WHpn zPJ-<4WF`*4sjg|j87!rA@?X}{hr+4&9j~!)AZz)QeE(PdP#%75Bg|@Ed`O`I0&1XT z&FN@R9H@MLQ^$mSwxu?(F;rgDYcMPgd`}?8j)vl1O|k2G?or58VEbNx4T%6F9F<39 z4vAdn*axA`eAtWU)U*cRT3IYHxuOy@sCG6@*B^sO3eVn7l3+|9;0d*qVB2fV(g*hd zff-d-rANC2EO$!+eP1Fy!tk5vfj~|A0zs-Dai!C7v~j6L5gIbFO-QX>>d=P)A}TG7 z+2tY_a_o1U&}-A!zmJpkB5*m*r1CsZ_dqAzTPF^`Wv z9k-9f<=^gpx>`G2HKoa??W#z7=nenu9OLPC{5@PM7bkiG;lDkoDajG1B!R^cyp$yp zML69ny`Z9Mm1%cmmP!}iaNw6e6g=rF5I~S_=gEk*B&{J-sp;RE=Mg_6G|vgYzZcj` zY)%s3!ME=#9+|-C&S~MM0>TTcM}M^%2rnxjyi}@dwf_42v0d=4d0KjsX1a>hmAvu4 zP4KR>S831r`EG=+~Rn8HoNtSzC?DB4Foi1by{Rty>|nzMZ3+_*wMlsdUel+8jA7nuBzyu!<{;&4=*S%2_Y44A zq;UXCMfSrv$4nZyrsg*dtli?|fWDa+Xx%$;l&Ahy*sb@a_47@&=Ana?-aaEmTyT6z zcuF{KEbPS(j#AftY-+_y+M$!)d0X^T&W*(6 zKe46{A3gf7NRsM@yJX=iHo^wvbosJW+YTrak6?=%0s;GLVXhk6R!HP_7Zr7d_cNmy zplx!kpB$jhEDeY;m^Hk>cLj!qbFl08RG94V0rV~;uBLaqYinS6K(P7+j>k?r&blT` za;-vw$$7=$Nz`#NFziQ5u3Uon0mD~|#>=@}mPj?>L#rjiwGi?L|L zJS6maGrpix;6nJxIU5?|*yiJPwBPhj!mQ>u9#v*)A(2IvE^zjD>qu zA*u4^`<<=|&F(d{dT_F~KMHL%CX3$o;4BzDaO~HO^NDqa^Zty1IdE+$n7$GRxGXMd z`zQ6^xSouP>)3!}DxV?j{T9BG%Gbp7 z_Xl~=XA{=%AANOyZs)d>_uV=QDl8N8RD)79^nyv<2%->A(k7yCZiF4^@tD)u^!ojQ zJ9Jm=*3o@6fGv;%*OJYD1T2)8q2Ux&!HlVFDAS2vqL0?3LG=c$l0>$4Q3rNOr4iZl z-yCh>G;&vviYKQvj^_xKrNzW01N@TkPvxd%qXfc=M@3UpYK|8H3G6*??=V_8j~0EpV-^0)x$qU467wbh&J zHfM%K4Q3MqOUtI3#`Un};V$tLQ!3=5$^>j+DoPWxX%ki8nN7E(pwzdhq~-}bWa)}6 z@=!JQBWcGgUE5+5a`a0>((SyA9+zs_mX;7PVY_ynHxt$2DSOfcX8Y{A^(}=Oha_Zi~%+I2~qQ2l8+X0_>2u8U2h}l2L18$0G%s}t{awsfIKbKXQ~(6{{eBhGo5E>2PQe(UC-Ojt_C*Iu z+}ub>TUCh@e%W~Ib2V%3adD-!Uw2@a9z0v2A@=~T@V9WE{I)dlK@yq4d;bL4@2RJ-u00PCsf` zr$-bg_WXYP$kO_9fbG*g_sB%}KCWdxMnan3vET1$y;h4xNBd2}-Gl#};ga}bE}*6+ z;}q#bLW1VK7Q8L+X|~I)Y+laW!;%P+{*y&0C$CdPm7y67L~;l{z{&%@U(&^2M`=B| zB9iEwr%${plQf&Oe8Q@<7lU!xzFgdlYoY`kL`4Q9KpO-VDZHzE0L3|gPJjLs0Gjy& zt4^8tqWPY}5Btw~{w%*l?c0?8YVeTxn!J&YSy5lYeI_lc^jljL0FT{x2Z_omTrvvpS zGw#}c|8EF-Z|~uAn4EqxZPD>`b;TX$Kbf)98^L#lroMTCt6#WK1whj3eO2qX!S)0dWSvHW=O=dTg&;HIwB%H7$8_;-DQ>20%1#-^pLbd%{?Xo>2 z@mPsd(A-h92!@;YN=n`v7-+|&?Z_D9DzFT|6Pp3c3Ai1j5Z@5yw5(ldIhE-H%IWmI zM+YC1>p>21Kr@CREsYuKd?hmtjD`EoZi9usP;5wSFDZ}IMWlw|14{~wTphhgk`#s} zWTF&pm*54)vh%`TG+ zJgp^{;{bLE%f^E;1g`>tPi z&PRzOv1{Z1InLgm^S&i}-QO`xc`#CLUf85VMIrJXbcaT)5SErZnSQDdp}NRhD!U+=0#lW@oj z%&r{v%axMQ+8(G#H{K_nEdX>qwJ-~*eht2|=!IrjJ?6sY0Eh15cYWt86DQQh4}PBTvY3#HKwTI^V=yRH-6TK+md{9Q1<=`4YKt)Ix`u9*2EHUM9g^@63% zA*TEPfPg3Syor8FoY+qX>$9*lnqH4#SM3k+b;cg_nlYV~qE0d;^5aFdpYUM(yj-X= zo9|27Nzmz~q?%n-#SfJt@I&a7i&o|qzKn2lz>-oK7H=8D1;TU=4JRl`U$$(V8NW#| zYJu*E|H%!yvpzUGnP3$3!E4B7e-Nu+b1)-2K~ZZPHGv*sy-tL(`Kh1-AV(QYWZ&s3 zbG22PK=0dLV*^S?oD|UKf%DV1fu}q4yakR)LT!NhyzZn_x5yKdoiPq$yxH)#oYatu zw#orA#h6iVVGLTUseXuO&ryB}kXQmeuZv-$k9Wj{7pLp9@zfGVC%oGUW)3Ur+vKmmv*hYGmrBCGuUahR`r8QHSofX^S^gqG6c;Bz zkLW4bx+iFSNZKEAl5C;j>3h?p{5A5d0%*+N|l@ff?F-v$La`$={zqzoJCg*#MZpVis_ns*kQZ`cU z$3Ij)4@Vl{(j+gPe=v;&-s_kje`DtJxSA;wthe1t>j!|0XJFWk)d2v}C1L8T0jXjR1--H*pk?Ze8+s-~v2c zMTf8O(Ln*}$hJ+|Xj3GX83=6Qv(l?KJvo}ShFw2J9o7wn1WiMKcAF}2 zu;#c_|E#gecG)OvU4I-&*7JGztKo5*@KEe4{QDEi_n7Q#U9~S#7{zpSvPg&;b@J|w zGmD*5)x1k}+1Vt><|udYDQRg+!nB3|+Y10(l4h`58vz^DFd;zROHn2NJUC)ub@d_K ztsenjH?(6#cW-HrGgrdZ^-JSO;(7Xi5*yO_7NI>guZBLi$|V*SSQIlJE>0Tf()(oe zbB?5NE?u@)=AjUgU)8A`j>qiWTrjzMOGAURz@d&?d(Ln7!mcj;P7y60%^G7iOQ4@< z)Kq@z0@O6@ek}wdKZrBD?boq4I|DqYAHPcTpeq6$O-4LvX-ftX+2fMoCOP zAjQIP*Mv7>kXE#cM5|^N;qpoHBL=XLO!P@r)>$b!-~-T;o?C$h&ex~z7K}eJ5@qSc zQ%dolL(XrS^IKUhFfYC4QK``zy>4jkSAGdqW5nvppF`Py??vD)o&3m^kE8nUXc^#; z4aQ8{xTH8V{Y?Y@9ylSO&nUJFSNHLNlG`WwdGI=A;pSuYR3#EY2Iz!e2;%`=p$K?9LLon{nQF`O?5LQ7UDSJPZs1Lh9kG|xw_-NFnqV)E13gOLJ2v% zKoY6F(fD&$`R?SClWo2%yFS>y^pdrou*|;nj<%egGqIKLxGkFUuAImD_is5BHCDb& zGjQ|^8;9&GzW&R1IY~Y2=|ErRD$1%K4vf&XKP%7!A}+y`#wfKx`|g(@wys;X&z=!( z|0%jZYip4vgdZXq%Tp@U*F%YGKHU`AuDBiOEm%+Z&VF`(d?3F)YRQ{@>PHvEPGapO z0v4?Rws4SpK7=k3snnjMw;0feNYZQ~lK(`EDm!7^t88HH8TdRNPx8_K{!Zp`=NOaz z-8_xgxEHAovNfaU+$0=oJi+5gv{OGrmnA zM({2%x*SF*pGI-gypSX0@qaf_44kLmSA|J%AM{fm<-*~VF8TQf^$!il zH?2_DTMSlIv}+i6p8)Ai!I#pw@Bi&L3! zAB1?iN%E@Vw?>7?BJfW*V2?1_+2af{^Fsch)JOc<3OCu%`E#Mebmo@QM*Ze}oXmA! zvm35s^N~jf`P26}0a_VtVSd@9fUg<5r6KzWLbI?t1(@e?_MSHSN2{sL$-)*NDIspn``x%bHJI25DX&Z;O*v2KFmQN5df-LGM22 z^?>q5RcdgFsZ?9$-XA~S_yPk^;Tb1<*85jCt2g$R27EEffWFU5sWR2`_ki&X($MGO zcdnRHq8igWCmpf#26+XCnT+my*?cb@Q3-MjQJ4KMT}2EhqL=%G*MDzbYH#%G8=wbP zH$SqGxbufV*Q%GG1pP>6R!xZgvH^Jd*5hDMQ2ZS=9>^#p4?wOkgCt*io9RC8KNs+Cu; zB={^WZoHor?(-B)RfoA0u2g8ZlNkrkZ2X59?2Fd#c743`$gt8|XHS3hZs*rw7g*k% zCdumzNj)Cyzh2Q}uF1AmKRfjhQgxa-WV8iAB;v*-r&bBSh6Y>;bJYiT{Z%M37zV+>o7E8$|hxD zIb!-euVf|5a>9O~$dH zeCJ6OuNfr^i(aSCpu(Ti-ck3Gr;G7Uq|$7kP_OFpa#bf2T*Lt@eDV%nWj#kR5*+lM zzB8dk>;uSwL}mr#~%=6tCa6@8PsjqKVHlo%;E|a>Is<)eGuTJIAAz*V?k;b8V(g7rX=qIg`>J6NwtBYm^jRoDbNdm0W1 z0FD_MWFoNT(k*KMVw2*8Z?|klV>ZDqgu09*1dtsOMU*yTuVWlSwr6vixAKJeDq zzSMtFt4nx*k>`q8?+CDD^v6t!wCZ)dlF}#Q4w398D}}I$uc0`A4Z5n8HGLcp+1l|m zi3yskoD!HSUWqyBm-L$99;wltHSQ11C16?67+GX~))pRh1lnV#q5`@&mw66dE+sx+ zlpQNtWO^{~n%5UVmuTDa7oBSQbp3+_b!=1)pf%~Z>0S0nBk31Z(Ed#T)f9ESIED{p z^<{kFYi7~8F4g|)|1R_F`t7V{F#^gCr0d}%8JNsJT?@kS>ib7E5@n|iVruBR2JYqL zgMgtcHM*#_uXG*bT~LVV`pud@9ioV*tJ9UqL1;g?M@jIAQ3_cjNFj3VGZy%%raQH{ zGyCS>T6WQ1Q#d>MF%6MOZy{5la@UzU$*DhLMcQ8gYQL@W>uU6$jws#eaAr&J`D1lp zod5qC_iuJQC=1dl2T? zy}iCt`pwv-e2kO14|)VXEP!4)u-H?p!%M`1zqM*2tXDNeeo8lzX`&CXal&M?v=|(i zj|7}I1_$a7)Cqm_XRmN}-4QNY{WP8AhJht<4`%NGU^v{^R->Pta&DXeWKdYsYHEHc z-(8XoWYB?4#~uy*o*;jF3*3mhdmtRYmc7%}n7{=7Qif~XGt`ntn_tSQm4jC7EM(?MEl5WW(Nc19j09H z(Ire^p=T)1?YO9NP&!P$$)GQF*T%$Klt}VxEE9N4qP4vSU{X9G+t1pv-7nt_p^{Z+ zPP$d6?&^Kb5HP`ZlTuVyEs{FxtQReYfyvGag#y+bnZLRcGcrPKTS#Mqf?jRjYQ+J( zuMtH0m*4c2b0kw^G6$eRvRSAVu?n66hImee4brancnJ|Li)O@?S`FGUPAi>rL^tK& zewmF06DMkTPcZf+3HLBsW%(rUNM4jq5Wyp0W z&kgf!kxNSLRiyt`8gAn{R>`8NBR2w{&Xr-AKRb`f8F$0or|b8g5@>S-Eun2rlrb=O zo0-)B#=HiUz>*U+kjCngegc|J2O(CUkE+_A&fMXieY(`Jk$@nXXKSOo0Q`!`x_!l2 z(TOt9+4vLN-hK7xWD7Vjx9=@?YObMyjXM#TjBQ6}%AdWiJXL|gH3C0ac(UZC8&JQ) zy=c8_3(2E!gO+Q1meF4Y{Ck+sJxWdvR;P##JE5w~R+oe5Bsta_XC6qCNbT~7kh4*v zG6HW)R%RN1tTw?^IiE1xad65Aaoxt26`*obIBlyYgZ_(vR%gEOW3Z#=yz;QdZj6|| zz6iR=Ejuc0>2hwd>%ly@qDDl@)YMejPT{9@6Vz@gsd{Ahyr**hXU~EWB~&Mw4&4^M z$fW?V4=D@uV;#xwuV+7;)U}jD!xGU<|G@K7Lu75HE~l}4gq}%i5$9xnh!Ig@J&*_w z_*@rJ8LIj6yU)91cPgOwroz?>_bDrU6S}wM7tyg?jldI{d}9g7zX0R;ukejPpIMFD zd%OnS(cq@*`)4Dg_|MvqJN#LW)+33wmz~zBKpsjUQ7M2ekYN{a`O#9Jr`WS$N z2^p--xR!E4`!)Z@!94!o$%?-hBv$OQ=kg5=+P@ z_yjK*fejzc;N`%-)9Rk(jLzA>*#N!NlDl`jms|Un^N(;>=<@z2J-w$dvsv>sU0x?E zL-!twgj(zn90ZaNf>Q$yC|)S$Kl>xPFC~`!!z{Zv7hD?6#o(hEL2~4AD_W|(UD$Ul z?_@eK)48pAvO5g1W(qW1vJ^9@H?O?-?Z>^ZkHshUWFTKsk{LBBx}+fjB|jzl3_=G1 zo%Fmwb%^%VEna-S3nS9xZEB(PfWteom8oonr~(^D^kYtEUWb8G2{w1Jok6VQ1@KVM zBzx~C_LX6pC&xCu-Px56%BMtlVqu;@B!XGz$-O@$4i91SJ%Ov0KBEvUC5IfR`^j3OAi6D@8(j^5yx!`bXsq0a%)e(pbGp_kG%l z&x)}SdVKtpqn@av$)*tcw~4;AbE{H!FKJlpB0UkeRF23D#*iU6zYvKNRr|`Kb(B3 z`_bdZ7l;5bj< zQ_{tfnfBgIAvFYuEwCu3o-`f=HX zu9~fe7zWtcE#Sf7U<*|5mum>S$e{W7s7q)iycz@DF_i4}5ISxZ4I>jnvXJUwNRm7h zCg8}%8d$tOPpio18hQOBi#h>99)-)XYu!jeKh7k4i7&*p;ZB5nV77?EoP>Lo2=U4% zXhZGXY&|VjyLA9QfJVBK-~bV%c%gYjkZj2=5c*$YYue8|V8rWd;PdlI#VHt}-+JD& zF2cu&mi_<`Mw^8J|I$%lP4^7htRNu=4iKmn9oH$03@%HCFt+Ax=qQ(Mj}EyAISG0U)o3F=`Z3v zK7jwyyoGL7%~;uBSzN%JBMXHjL`N)0RUELJ0AjhvVGQn1UHW`9$yG?sLPu zFt2H{iQ`WgaP8X9wMJ;rTIQ1Tzuz%j>Hpt%oMx1sYaf!*GPAz#fr8Hyqf^!8jf5`J zA12zQ$*ddxanV+e|I$Nf{pKO@+{TN65)%bB`t5i$t@MJBF`u2!V{6;Ninyk*a285f zmhj%yZauzEzB^al>rb5ylu}VLiJPtZ!jZoz;e&ugf$Mk6)DjgEOs(B;I>ZjwH39$Qk$6hi3huX>hmqUIb24-efN#n#hNEv z*Q^yn7`23(h7-DsjLcbY1osThj^bXKnG6NXSWxaYC)yd2@vh|>@1Et$(yZrcgs0~^ zsP|Z`e`hG2yxO|~E!O3X>=&8}n1Jy^BpwFd@EL&snkg-Op=7|ON3xjZ==U@d{Bomf z4D-YtaRpU*BtShr?EF-Vs*wmJXxuH2{`StvQ#xe^ng{t-eaU&GSEkSe@^W%XayuFX zyV~_t;^B=~TGw-x8{dkhQWzc$=gY1nH0Ry_E@$%iT76PLdG*XO}~ryLbBOo@=nD+_A)OPPSp*5~x!3Nr`a zv>4_O$;W9XLQ;!?Qi1;}ap!)cOf3_5(^s3fd?daN#M*15+lJH+k*X&J&cR|P0N4Yu z>2C&)D6{naz6ch_@XGAm$to_#H2j1wBz zE*5WE;AU&xJYyF5-K zCkO7&_;7!ogG(xRF5g$42TQqrIYNyV{OiZYux&x9r!=>ixBx6@H3UDz{9nB5NbW`U zgGX$vKv&pnkw33H+daRv{5^)jqhnmmN?*x5{5g4n9B6?#G&Cfs{FrJ{KgbrgSG)#8ACk#x$dPAMa#q7UB3=3hA^YO3nczjfLJ?mYpNpbH7(BBiFacVYBTyfRnu6owb~TG64EZC5e!AS!e38VZ6Axq+qhs z19nRQz}vQQFL$+~hGLD~NhrwvFV-+6P9ADp%1MWwQkQ$%>F=u~>z9jdm@A6GoFoVE#=S8kn@SCaS%L2VNYw_=R2@~SYUwq6 zp_UA3&$S5*lrS+++2?hM#SF)LMg9AXHf7IY!t?#Z?{N_iD<*PEiY0eWd67hjk3ywh z5(;@>iA1h|OBcvh3WbB13c-tR!3dql5 zrRKY#{|(`+`NQj!Q+pW=?XY+{nFe4S0E>j8%3-8UE4oas*-F(u}um}OM z{s(Y+qCsG7kKlN_9)}JJMm#b_Q5>i8Ious}>BU2oAfre|PfjkS^G0;Nd zpLKId42iAxSmQ^wG)38egCi>~dKsp_yT~bik%RmphpZ4E`fU`)h1u&h0L$gwqzA=MM{L493IZFWBGY>1k)rCOAKnveUEjHjl7>d&U1s#F&JwLTcHAcxt+tj zZInP72!{TCJma3iqNPOV2?VPr7sghBB5bjD5@UvgS`5H9l9s&iMeXuTiWm1A_55ob zKKx9JueW@x;tLlIb_7;#KwN)R4Sr8g418!%+;Q{f|Ke>k*;TzZcN8G{Flo*g`rdfp z)RmVkZ#3n#5`Cx|XV11%+oWv2@^_+q0W}2R5;UxO78;40%z#hC5O7k)kVN8Z{m~Yi z#s|w(wEL+875z}$ez4d20b3J&A<<9KZ*N)1R1+aT-B}ppG1oCE_{m;k^8vVFW%WPc zMic-ye9zPv71=y>y&SLcDLIj zq;;LyADog%c8E7J6n-0N2a>)PTKG<2cH*%{;IoC$YbNdH^vy-1JQVpK9Cx-;3?Rt0 zZt9^(bnA9#MR-krv}+cWxyiStjoluZD3Y6lFw&)gn(Bg5T0vhQ!@~3YU>A24sz77HD034jNp#|=K5 zfVCgmqogiS;TWv2NLp` zJp&XufByeS$H|Eho&b)<8k^f!z)QXV_fkJVU+Nvk7cp+>sI1e{Km<}d4EKq=rvUw6;RV*_GvozMuGbo(Ae{bTCA;`>`B`?_e+^2eKKexi z348E(V*QlmGz`%Yo8S^`j>rY!Z@JhD35l;7Dw7~*#(Kl4C8}&3MQn~0!B@-2`$1C) zMxSF%%P(J8p-(A}tGJN^Vs>UNx*8L9Tk1bpxx-eOTUqZPRwn$PSQ$w|$Bn4+-j7rU z)!gV$JR&+Zsd4SqD-p@>2pJoMgoK5Hi4B5*XDhnZQ=3BBx$;OKu>!fWWxrCI#ngHV z9gA7?0i`aO+jVhW{KS=?0Iy|Y>l>TMJZ!(Q^bboLR|+WKt?_b+k@v<+?Z{3DvH#TXCc4F8 z#ksPwpP^7f>G3%eTQg@T7>g!YaB3KZI`n0&PTk@%f#2WfaW*SDjRs^KpI0d1twwqQ9P!fhK+J zS`^frmw<#G2tysp`Ax3ArH%vR)~5q>nkABsu`@o7fI%eEaEQ0-e#gOVhw?pwGk8A) z@tron(_HNj?Kl??W&uNi{QJy*Mpw>RwPVCce?pzq?P}jj9_C`zOI!-Sy06F?`^TrU|I_b_x zyr}U#i0H2|Kq@f+#|cVN@s>FGVL1e7L$FN3{GT1DU1Ah%edut^6Cp5K2za*(OGFO& z;Z4-^T%=ib4YO!9B8$D;Xp6&1ixPW_WdYCvnUf-Q8W?1Ay8ie%boPyeg=qJ+2O`N; zI@>keup@$CTabu;BL(rcUm1nvz-fUQ+u@5R#+du}pid9vK8XMF^Q>TD#u3xLy_5z( zBmox*phVK~b?H(m-9Oy%=6~XjU?Tyd4voJ8Xd2~3=EST5tMD#Iq@i%#2OCaWRXh%S zB+y_#km=6`(F{0be-F77LMaxkV&_?h>kcH%DJ?)D5oATLE!JtpobLiVj*ld)R@sd8 z4W}>)n&6c{8rVs!yYY4P{2ZFjBuzW>%!|*XjTs0fzVg{mc7ZE>`0SNfJrKUOE+ZOv z7od4bzmxn5;!Gx~3o80Xjge%ct?j<-y&JN`HVP(soJ=I9#dAdT_0=>J z354;Rwl>k1=MIX7Zv0)?M@*>!z6i)6?TFxY&c7>6oDcGQ^)~6gj+D~Y#>Fa!aihOZ zi%Y>UtRAsa&ROY(Pq;M=%t^#8#J5SWMRYeu!Xby`%jBTROA+1E5vO77a?g z|Bj0;O1-}&`hzj30mX?=yda7C?LBz9e{Av_nfVwXB2UZGi*v{_o`$UmeSA(@V~X@ zs#aWm_2d5Czb`r9|NN3I*u*7<$7E4ul))>2X!m`S9Z^6L+m%zB4G;0c(QAs(ho|+k zRw-`YIlFNe!I|tF3@uaH%n{`@HM_6bSSTPqWuusvLqxW$Muq9rT>HQcDXmXkTwxVV z*6w%P&JS(F2Xkl=>sC`d?G~=Eqg_AX<(0-0L_-DLZsK|q=6~k<``bj1AV=>A>|D@> zb67j zc51^N;}74+-1(?BIyfQ@pD}nxc{OuAW>h>IlBwmSBJOE@Xrt=tKOqf!HZJ<~g6HM3 zC#%o5C^?_Ik^1Hf**(#rUxym+?@ZRqa61w3r#m?tRp2s@GsZ#LWn@>F9At+X#n}j%BjP&ABM!6 z*23VO79(bV!U<_FS3ZG<{3G&&$XYODSef5!-stA+I%n7oCx^t2^qcQ)!Q_u?-0#%e z6OLkAcMxF+g;rr_ilYK^G5~PJyr}$5uGZOyoDZ1K_64_Ez z`k#ct&+or=%Oq(@3wE+E{TQoe_S1g$L0rgIePE25Txjr4j`~44Zk_DwU4YSsnH2BB zXh11Pn@+wRyUEOadnmG~ptd^4Y;w67y3t>*$6}pFTOa!OA+LneBXmVM$QZ8uzarK( z6kV`CHui>O|G=TMXZ+ya8Ur0(Y1mnDE^8#Og^vSto6LV8#G`M(D#aC`9IHl*GF&;8#TVjv|SuQ*P|$rz(lKByXox&MoYWV+Mgr#Ih==Jb)s`fnywClR!PBY zhr52ar=Ntc<0MZ)itbl*)1Sd#BX%kHPJm;)xg}vZyUY0R+cVs;&6LU>hB8 zGcxS{iymG%RPxhXaAcZJ0a_<0!Ca#I$8ypMQKvH5-x{ z=>GQTl8h|S*xv11yRe8&R9^_Xrh{e5Zp8gbb$gizGe@>mil&DY|E4nNlZ_Eiv!U|? zM3H@oMEF}6i>kKgawBH_-M9fy|76{+EtgSw$pI}uC+Nfz`00P(kzp_)>qqqUYdw@j zX*9BG29VW~hCH*wevE5U0{MbGaV~-1u2vv{lADufclCpb2&?wHNXU4zLlZ?Q`Mq{1orpt@`yq zz2@xn6rsrw+zhVgrS9GTAz-xm$%O6A43qY#N?z`NFeRSx#HDjFzC zBi+CEQKioSuEi?(urqB2m#{hll%g-5fiSzUF1 zp%#PZr520MBWxkjdRWAP?|n~X(zoH&0m_3Ky&S~6>@<{J8?a-`5z7%&C7cbKLwTEUs6S z6K|S}ui;~!=|G%TZ-GmEVjF-ACtyOw&uYo|7vnM!BK~XiKb^oYf-272mV=WS46F3UY%;5eA@)nw?R*b^Q{rl`V8G$^fB3s#&t979nI2Pg4nu6UO@wdbj$nXyTU9O{o8x`PcjOB!>`e4lICn!x<5~s|FB#24NzDv2v8N*5X3^XDbz=}s z*3AWLEZc`oW12)+Zu@r$nWz-2uGUeq9eOvDhAK<)?(J4K_fA*bwYim?FSOe}SsD+t z1!rftY5GXH=7+09r#2peSU8eq;1G5@jAfy%krflPHM&=I3B+WFTHFJiE8Pi<+EcQm z*g-3vSiu8?#zPEL9Nu=g3f5riDaq)klOpI)*Te zMZKdB`9ege8W}ehaN^?(6^UIl!9r>9{H>AF89JbS_qW?NF!0+{Q(&Tu{dEVJhxc{` z68|~I#FB<4ig(-7us;V4Oph|zzY$4nO+jt2MBnmKi7ZW2$T=CFml5tOh}~}s;I=5? zQ2&Q$AwGXUvfC+wl)xkQ;8o0OzbZ5$ui>KE$w@=1pCQ!~6#OCxd@7IGk1_8V(g{}& z^otYu`D3GeR&pFk1m(&rGUscmJ9nqd7UGf+l#+iHU`egEcaqWd@#7ENX!#j)n-ppg zoHgT6%b@MomuCHTtJ&^|$#^>18bN5H&BBgKkEerebzZLzp(G7B)F(xGp>`9; z5{x^|yi}oRuSSUc)Et64m1M;3U-aHLn&2VkSP6G}e4Pv-XBpkX3(7Bt*S{oqw$Q=o z;5HmZK$JI@laZ{B+hPk)SFPYMpnto#O^Yep6h?G~{>@?Ft44BT2P z0}GjnYDW7Jc1(;)2$ZiVo?E~!UH7z(A=qURH z0eDkP^qJmq)c<7J`v6*xzy@{Dl*lKEqhUdErKXr@blRavDj+N_t0Tlb^~)9@Ct~`WBgUSA7wp7X@unah#*o zD(b+l_GAhH_Gw=d$|TMkAex0Z9+&2q^SeNNufJOm$G>HPNrX&Yry7dTPI&ZIJd#}) zj)`-BHM{Sa2L3by|{g9y?G3tP&+;T-x0hJybm7$We^WF{o^Mf=8| z^hG0arcDijF(n~BV$$V-Z}6EA+e(aG;_SR8VcWw8tih?xKjYx-dcc*}~ba!_* zdn4Ul($d|Xn@*9G?(UBB#ruBW-}9U?&Kc+Y4}(EmtaZ)#nRCIlHeZBRXMEA;=-v+V z*c1BBb+|BLd@Y3>=0Wk%B>@UkOB+LfEPCe%gpDf3E7?y#|rlTUF_YG%QLb zvV2A0cRrJ_+5N8fHqEQoZ{7VZEJ&5W0(8f?zV;IufD59%jAF1yMNEeJysKi1op5=G zg0V-f|M*8EEFLNnp%qFYn83TS{VDkT#<;-^?hLzZ$*$M=tdzIN`)v~?GqdRh?b2k_(>wmAw{h$uxSfZLhZ{IvI z!Oop^*uE;c>*tSFBMiLMO{fS0Rww$R=>E#!@pKXDJ8>@Fug}ix7erSbfi`dO zK`NA7bFj?(DyU5I6zKDf7AQ!(GoSJI66jir@Xz+fn{p~Dl^-EXd;Wq5B!@!XJS1Kw z=nEfBnxKq5b0wy+f7M}72$SD~g(?@RjK6X{M&QH@r9q0g@yNER$432zFFr7C^y3Im}m+$uK;6XLF2|V8gqoD{1}w)gY0YLOW|IydKW*5p~i6k zprFh-Dx^v1qvfOTP_>6!icc>VnRrf@AU39ZL*OIhRvA%^=@e0%2cHmj6d!eo8?#;+ zUir;oAKwWd6- z)Ue0)0z1R>6C58s;vvo>WU=CThF6jzyW&apUjAbKT5#82(5@XH?@DmJxmCg#(r=Kn z9u=|F3(sgSGm;D<4m4hE`-WKqe;p{DEPmdY80Rb6l+U~PIb`N6pHsP=?Drp~F*mY= z<$lbt&*C2Gt_IrBGs{aK`QA-$Z**Na_=})BS2{6YH7DMBQw;F9RjiybQSwOQ@)oWx z@7O)k-??k5y4NK*yv$Y{MhB?3(U)96x94Qy)b8eYB^$?{yo>vfcLAGduTPf}e>+ zrT3jwmCDP=PCQ+3`X8v8ufsb`A=rk>vsp)J*-@08w9C=#Tt6OpjZj&e2}csZ{0khwn!aVai@_nNlOP;w%kE{hpR+M~Abj@VS1;{SV+PM)+9eFRRCV}@! z_^(UvaKkNPU0xM=OMQAVvw!@jnJtQKJd@WQW25XKhNAb*>;8AA?)NVmT&UUoE1v~a z+ESvS;#CuvCoiHC-9K9S%iu|{Tgc+lHddiBg%V7{$BMfLK@FuC2jnfo;o|im8$p9w z*vK}M#YMGmQecafdWBTOt-d&Mj6MWy;?e<$S)bZmY9qME`LchILp0ctB!X9qtMvMAB|m2W zNcR6ga39%-Ac&x<-tg%t_Am`GOC$NcA8D+ps`5{FToaMlfU&^6oV7Ew*7hCT&ATf= z^3Twzp;0NY)q|#Os8O(&+5HoD!0!bS(>2Z?vSWBiQK5rZXYeh8gfmVES;Bn_T98N~Zuf(e#i0I_mc#1XRz5&^(RnW$&AQnN zyhgKnP?mqqxVFAO7}_r^9iJg-q|f0o#Vc1U^!(aMu10|+SKR(n){QyOfzsMpL6waJ zX9UxO5GlA?*~4-*0~zavXMP^a2I(YPuHYvoedJCP-ycN&VL<7aM7Dgz6V7Cl`DJUhD2!8w5EmhI zXX`(Xs^gNFp>Jxoo^W34b8VAuW{?kf4CdTc^p>R_*}RV$B1?|-)ct4mC}#jpum6u) zZW1$8@ahgI`8u$x1uQ23x$1lsJVOa>!&s=S^cZ-=q_9v#6bL>wir?Xnh;@^rl-q)h zH+7Azg&EL3)WfpT>8lm{!lq*?p^!` zH^%JRMniYy2gOEr-(i&@tcVIu_-rC%%#AJEW--G<3wk}e4oizC47aIg6Z zuum=hPZvNy`=MVYpfV(4Z~!!qa-p2RrK4^5tMMnbKz)(GrV#2H}}#ze`k{D zReNLo3ue+pN~={&Nor6F|BV8-qgC{W=_rZre)2V|hChhh-jmdPy@$-rzs%U>;BPbL z9(18xzn};X11m^KjOUFe)v*~H0j#k#DSDY`?X4Jz3x^UW86A28^GQLY3{wlzJS|ZfAx>C*oKQ`?(Ki=JMz9Ecs_}j6LGsoogGQI zdp|&O8iJ*%9Wr51MOI#Be2>9I^;sR?wCA=Dg0UrPN4&l3vZiUyMO~DAxH9x}$Xext zv+LN|ez8!R1t^q>mRjA(<%*1>zTpd3eZw&)soSvvxm&eA_2@8IEt4`nZtfv^tf)((N9dKv$%*DqbD`BC2!tH_ZaWBfQ?POk0=!V?)gSP?ZjUfT`n%RKRl&ua}{3aqz!*QBvmfRC$2DJ&7I zrYh#EW7o}+MXl+6!%7M+&%|ZqxhCbNZPEljG|5I!8x|rgc@+bLw*tUOo_K=voOHh? zhyN0bDq(5lieh4A=`7+*`%-6`^O(R0x$b!70t-GF=jaD{He$wyJZ&UvV;w4fk{}Xr zH5x2gHW2t=(d^mw7nZvukN@7a=~|E6l+q9ZzWG$hV4 z{P_?BW-r)ER#AvTf|35MBH(OfEFrlHU|hpeJ+`%=yMkpJvL(t7-O2S`y)#N+S9Sh7 z6|9;^$W7=G4mogtTNa?N&ZO@KU4P^E!}j*_;ywT>Cangv*%4<2G}H&aL$2>XaYkiLlDGB6lBNW2KG8>fgjXxwbrCRe2EYmM+23L_7C ztm!Ba;rJ5a2}^d+`Y#{p&jB4kg}~F2`=@p);Xj~Z1-I9>g@9R zd4;0&wKOG>b#4Q6Sq!&IJ-e^*0yVM>L17TpJcHPT*%O1s0FG}oM*m20q#f`8}5o;bt+ zZPQNF$1}j%SNI{%19=_pgd&G|k!gKk*Jax8Gs!QsYrpM8r4x`2{!ESmWkJkMASnE+@u`id>$E ztE$d^-CZeZV}2mUHsLXEH3=(wMVLxt>RYMpC2!hzc4U&295HkOKnWJWfH zB>C^s1pj}QrWAaeKVdy5SfV{A)0_;C>$jRacY&z#v+6t#v;;h>evj=!`mTT>E@gb9+g_X z#Yfc`^f`a|wBnaR!{nQpPGhVC&l*;9?&w@5Ne+f={B}6W)l5-^tjsyps1w$Z4C8Xn z-$+3`cb-_}sKzntEnY^u_5Ot2$vc*sGeK-4d}UNGZXt;QF}S)p-V#15Pil-b1e;G! zFmqz(mdKGQlCGWc*Lms1NUk`Hwg`k;Lztjg!a*`}SPX$Z7hhcArWgCMGsvEYAl2s= zKavZJb65VvhL~z}oui}pUu%%~jzi<;EiBNX;Np??=u2U`{^@Hr5y^n5c6KfMN-Jp}-OlojH-xnJ zWh^lh9`!RYn1Yqxd1-7);TI=eEZ<%|#YEOoMQ(MAMl~RlNCe&)!!BkX5D34Jc*@Nn zK$k0`7sY!2I`&`E;|xh)T2Ausl%fCElnH0hehuxh-GT*aCHap2k=SzwQ}_Uo$3=gv zXXL6kDcB>%kn+Qd0*TXsR~^RPm57{r`5gmp#wa3U@?s^Fq>E2n%u%2T1<6S2wl7F0 z)_u?T@;iFdNJ1)fv2`M@jAko9Nj(`4SWwDH5k8BvvpgFxM9B@9=4{on*~WG{Gcl!O zgBAYt$Nk(330`GSi$^9(kA){>@if-sXn!3^RQBShASVS_()We^)#lb-9*q#u$Fvv6 zyAJA!*kpaX(N-qY>wDN1$LIyqF5>0YgR)mN{Nu z%bE#*_uKNu!JZ`ZH2_&IE`7oE{m1k+hHr(>#6&5l@(s%#NT(br~ubL^CW}+jhA}>+1 z3Ap{pN|DnKcl0Eu>2}u_Q4E&{clZ1PZzL@rxU#3n8+Wh9iFbs@EwhtN?cHd3wq8>H(r6{UMD8GWmEg>mdCA1D1g~sR*-+cjj`E< zjoQUrs)BuXySgu~{sTj?+KLo1(WYSBy`#pO28CfvX_9|-Wd+qB!9@ufkfl4Es{}OX zKOUKa8h1-OR=cMk!6quRA8qoX((;h{silKxzr3Sw2up&rq&DAsj(Zrsoo7c9$DmRQ zK@ceEIVtQn0#@BDLcF8So!o%Fm{S0zBckeurblwE>oCC%UaerP&CT1b8nG zVvY$Y4E+CMY3<14c)5gsnW*>w#YAO&hP)|m;}53*J2-d2?#y%JCz&!KFyd2u<2l+G z*_nf%pxGO7`m}MQWz6DsT({2XhAL0;-BtXt;xqyrWkR|yatJxooreh#B5dC_W9d3j zWba}K=)<`N%MM$6w0sc5^U`!P+eNEzv|{Cu&J9KCfhHsyd-n(t*Be7SBz9st3;se1 zc33iw(UgpYKzZ{=K5K1bcJ1xXF@#_qrvZ#_*c2kH30J#ttx0onw6@8gnxy4y_4As> zE+e})nc*2;0hvX(ly3G^@!XZn@L1MfbLaJ`BtRt!9sZ9Oc~bbn(C`Kqx*RtPSZ@Lj z1^e6C-9Vix=#jLXSYWScDBqtheWiyy2*Ut9M$qI1PPQ7Xb^HzPj z@marLLekPOT!Mgv5L0l0InhvjV_~1ZB7|zkCKie(Ahg;G zld79^-KO`IG`?8W-gC0;jJGB>2Mq-4nBVch<4AEjT6@#0=yNpF{FXp6CIRCCUpUDA zAuR2SZO)?e+M7&4&DrzxbpqR{PyD_(!K+fv7>#_|4D9e*UhgjL924TdMy|%fvj(N~ z=`N$SN=`u%H6!7%h63QR?js{t(iS$3H8BNli}3pk2L(8T;f>Q&Y_C1+UKe0-)gcQW zYbS~z#}Dy34{yPYdFtkGDA2+Sz+-XgcIGJrDI-3Mnb7(qr!XfRG&z2=R}RnR3y`6k zeGF9*W#ZA$ZQn>S@2Bml$jY`l4?tYmRnWdE$h~H!pfPJe=`e>(H)dPy%bjxnn1=ah z@q{7xE<=_L{y7_X2>f(o`O20ajDdFu+A*|<-wucS{yw$_k|WkWMPBBA#kQaSDYhA5 zXWXthUf_cWyKxxt!_l-nCZHx1TVOuX;CACj{D>BCw&+5l7-!f9{#v_=E?6jrZ^Ilx z93HoY-X-O2M2zCd=w~C889XI(X14R6r_fuit(un`v zdM>SN7LOLZ;@$7^Eeo%81xQe0Lj7X8qK?Y$H4wAsv0s)y(;PVpVN=S#ft4cs2HM}Z zE`xbb{|Qu5RQO>}{k~FD6JH5mLA*{N=tIa^feCbr8EkSxuR{I0KGjCUH@nq2GxixN zZ}C~DYByk_#OyUqS8sA()HUaSg8;ei=5KKdwYhgONPfj{xK+qVWWZE@hh%7-mQ^{)X)mTNR{rDyx4QhPo#!r&N4LH|1~5yr_x8td_z5D8UlbBy(@ z5&%`#oi-~txS_yNC>fCk?F;)sa-SF%!+<5ol9gq;>~`YfBO`v$@(Z%#nHz7R@G@VP ztoj_)>qxB46VAQ}+@R0uYW>#J0oWM{NDTMcpC~m@FJg+nmVPft`K=dM!_FwBj=p6E zdJ+Z$ZjC_%8z8lfIC7u0V+Zqr&ux-?kDvV}b~%8diS zd)(-Y(?xZPG*2VD$?vPQxSdBq?lEl3RGFyV_Um)w=LI7 zZ*egXmQ_L)5+J6dN)|m>cH%h^6Io1*rTCMK z>M1rD=6X(2B7ewl2xqpVCG!)lmPWvaP2p>@Mc#k>PPZXQSMd-rGNR*}%mGNnbE*G+Sdl|R&<02p z*yTWMmvTC7?ABb#-7Sa&fQK)^>=)ski5WJ`LaYX5P*mzXhNKV@7XsGC=JP~}6b{T* z2*s)-Fi~^0h%N*$%eLsufkJ|Cg~WU2!pD@$V#rJw6ZA-E;$9q^gJ)5&D*^O5s$C-k zB*at#1gGc&r{7V`Mqg4M)cXIXJc4VD_dfmwf8~W9$~D8YmLy+ zpcxqb+|+*j_C}nv0MQ2^;-7{~q>_^Lf&@vy+n=&3Ce}e?L|LHg65-^DDca)eqUL-* zUI-joKdg+f{*7rOR^3As#eHmg8Zqggt`FVpf0nVUbTvI)YTz$f(Yob{w43VSs-*Iw zX?mT4joOWbiGH;YiW|Lm0c4i6&nITuGsvNI4b z2x*CN)pto-kdxi!dQcK;rIwzxH>gb?;V@d9J-ny_0TWh); zm9-5g*TzwdDF}bw^*fM)K_4BBzzwm5|Ar6XjhB5~l4|(350UbFo0Qcvi|oSoKG%G8 zvur$!8ukGPzMR?W&Q^%C)s?n$TSVvGvlT=>_g1Jd6hdrczF2jSH8@V&{ma(o@<}+S z_dWa`KMvcw8T68UF|bD{syS{_S*CAcvm%0}{(&!#UmMp0#}3ZRu@m}F!pm(T!#tzu zZ^Db|zY|_VHbUK|$pIALob8hdS+?L`JjpbOyhgYIvwQZ9y8bTBWF7%GM57HAuZmQp zcy+4_2=5eO9IeCt_~xi+R|yeRk9UopIrgV+cAOaHq00TyckBj9qiq}956@Puao2{t zb#T*WRIBiG!W`yn{}Pku!H?_h&F<74nOpIe5ZkEhduvq^_S2qf(T`cP06rFpO)lDs z@@~Xs_gZGZ{+(+|ARqQPF<=oBLIg3;#qi zZGqjQ9|x>6;`)h#m^{@>g6?j%VU&k?G59bD1V?*Om))I=3*HI=^}5>6xfFK< zhgNPbDDgOrwfsb(mK#aL%5y#FHPzMd_V0-&c2tXONmR5cgFSU^xK38tS=d);S?;F* zexJ`g_0S5*%*)$h`^&8ac>?2_*u%?U_9tMYgS4luv_5;E`BJgv;SWclfIBcc)jC zd{5h)(_a~=AZi;hi6n)n_8-7Ct@1}OiB?Gv_5Mk_4jWh+8}=T8A3 z9_+BKsj3fHc#l+P`XpHohWUWrM&$KRKx==q%j1R6K?PPC&hUMuJ;V#TLjRZ8&5FQ( z#BLJx8s4`uZ;$nFATC*EqqM4&0`(mq6Xk`38~<2i6UjvsfH@N}g?GCaUy*l3q?`&j zlqD3oeT?i&DpfJ2-IkVf5W0nX@|t8W4M9sdJG;se-_1CryWOpT;N+=rY@Weg;9IK> zL&9jpm-hm%oOl|w&HR%}XHE2e^%;27tevBJP3l()iHTh-Ij^hn>*K%$M&-$+xmNh4R&?QtV} z(1W^RbyrZe{LYa{%1LPATj~0r5jO>1)|fD*ax$M@2|z!Sb$82sD!Oa}BJ{Nz9A}N= z>m`eARTmy)y1nn3V6U-94|s17y-$*XSSouO%e@rftCqC6~nrJLWY0=7o&3O$wP2#^D| zi4*3P<1SBo>(PbX6IeO#IQ9@cyniDG$dm={uUocR_aaUy-G53g=RTE1z94e)bLmHR zV`l#0Dp;3IICy5=0FyJ`f6(gYKu~eQwCn#P$Hqx;!3V6ANz?u@@;%FBFw=F`36rWe z_#x>--H_av-!2hd!ffg0ZJ=2%Xl2{Uzd!Vw6-evV-8ijOk}n&7drX8&!7 zv0-cm8e$eEfQFc~f0^X!KZU;wfjtMvt_lCw5OF*+k=AEr#d`HSK--XlZ5o3fG1$lP-fNqv|>Cj47~WX*c{w zDH2LbI{@DdjxEHhVFXcpc?X*l=Vm40jk_g#rE-?#5ZbBOoLwsy_FrD8h^i#-9^Kc)!HDTOc zo1J%<^fWPbxyYtNF~M2s3V(`i9)q}b;B&0`)tpLlM&!olSs;}LYQtWec8f&# zWJdt}CXw0yZK|@vl!{9~ym&jMwWux`7iTBm3Ucw=_ut--!=eqh*$+C2F4i-W5Xg!g zd(!dP%4^ns?2MlCifEaH*U}J=G1k#j*0Sf_3){>J{W+K2{TZ1Ur#Q1-9OO}&Hs}29 zA>4eL)*VGc2_D%&F#P?jhb#6KFQ;f1gL@`DFDtzH*l=QCy2-R|;0d6QjXxZ}#@=7I z8%hqI)gloSyWMlk6)im#`l|7~LQ`!D(_Pk~@g3@#op zyjq-~Lq6~-Lge`~*v>he(Yi5cuKZ>-|6Wd_rK5_eH+8<+0|^o89D5XCMBIP3=Bs$7 z8v5LirRe4gre|A{zid6ORD-7l#>zq&hy*`x9-<6t1#%$pIz02#W-!(GJ4QOUnG=vu ztF-D8k^S{Wp^*ZQAIB@EU6-e}OhE%ZymEE#E+p;R>D6Ok&-rzT0ZVx1nPBspKkrOK z5}EhPnWCnR8{fWt9<~v3l7$JZZH9a6OdedZ+pOl1@E%M2Y)A`4R&QJi#GD(1STai^ zS^SGdx!QU3I^EnK=oNatSHwq5iT;xiXRn*vbi1j~DGv1I{5QpH(yUmji1#O zXt(VZndsL}x5&ORnXA@dG!TIC!=Yp_Gck4g$Knkv_4W0Qf6cajA8zuPu=Eu}=$rA_ zg6nedPjcE#CmQY5K)1}#iJ4%qi^q@C!yVxw5IV|cuxaor#nLXguN_%n{;k|yXw?ms zQ86C{R(_@;ZDYNa1wX!5rKozJkb z*O|DGWpR$K86}rsnBO$0({^@%pX8<;yy3WgB*<~$;oweYNDqwMWSSl4OA|vp^S@0D z@ruj?63o-e5o`kvoS*IpVzmizhT>w^cRtwe%cXQqkILOJ-w(FXz!BH~?gW+>NfF6z z8~!@hM~hb}Pkc!%_i)9+1BJ3l9`ZkB0=3Q~Zgk=Gw=x}7Iz|WB#mv805b9dm;*%R%+)Teu z;`W&ui649bv_FB+ktlu{=@t1OHj4J~s4Ti>1dlK_{4!l8!=C1XKvTkx64&@xWy? z$*RFqvtF6-J=ExzqR%4XZ&@qV*?u_I2jXC^zK1sbvTP+pSEPd+~*XJ#8IUkCq! zfIumbr$rco{W&3-_)dZX?O6E;!tQe9OYBk1$f`!!!Ua^OdiOQD{qF71{zI~C(l1utTor_$|R23Rl6J&6Z!eATRZBr zLaSjQ^=D@IJy!{Z5aRV6$HL&Tin1n%pPSvVkG^{bWyV%-NEcgUjCEU62MhWH>>2s+$8iq3FriPVcpK_c&B-sKD>m` z4F8iuWkT2=9TP7Xe&@<2fv|kv%(jzy;ksvQ=sR7+H%CrGXB{-G!Euh85hgPd)%-N- zeiGTEO;Rg4ZYm_w_f#{^olOR==1zW{Z2@HPtU`Q18f(Fv%<$(>XEMlT^P-UazHrQBMb!~>>pZ^UJS7tR5jone+;`BQJp-3c*@Q-9F0o|{F!^kF*A z_PC-0CL_^sepTGbD9Vpbk-lnhb~1FE>wcA8tL7x5Hdzgpa*7|Y3)9lmD8S5|ZR=PK z6>FwnBc96_2Gs81G-_Uk;xm2o)(e!&Z{El7{y?HhCLViKsuD}>qYX6bqp`8j69R?d z#yT>Jb##lDLo)eunzxPC`>ZIf5D`bER5=t61e48*<+@FFrGRP?T+vu91C9GDK*4CS zCw=pBKcj{mXZCG0yz8?qseZ@z+|iHBZ^^uEldpXh|NPGDA-8-(xD(&-qd|wS2x>P# zVxx`MnPKB<6MbpmvmTNTP_CER28He%|K&Sd`_a;ikKn?ePM(nW$O6ssOnS0Ixk5g_ zq~o&ZU$gVO-9%P$6Y9ZVss8XBp-mW6WuvHgJT=DNhYiP$Rx=79Q&kS?e|%ccsn`y7 zmTb-DjiQgRck*D6QeegcP3^t?=In*YVSl?!k;hceA2C6KAb`aFQst*vICHW+OgA?D z|EbOap?eQ`yW=7f-9h{7EZJ7?!AbGX=^NFO>JiFfUr6-gUIuBsJ%;p3CP+0=So^bg z)==HIu>LiTV!k*iX1-6CMT2ET|A5oD?lPX4+V_eR3N;bF0Z*S0j zCgT3#f!A&u1#}(wkRuN!7&+)Nbm?WYMPNrPFXyE5&7Myk0h1JX{aG8lG(``KC`qqr zLIrQeKf3|8TYl^AH0*m2f^fQ7aUT#)H+$)-G0WqwjUYa{a25-Ffmzn!{HyxSw}cpo zz46eTr%$ZjA^C)mu1M>{@WGcGZ7bN^950Zlj^P4*)R&u2F>oUdKnT9TA3u{32R3v- z#+ch+`L?zB_Smy3R@$Q;cTQts$v+xNWf9vP3RB9`y&X%og>cVK2cA)rQbR&bZC6d= z%@HCQ6r9^eVG2R3!Tm>kZ|(G-*q<5Gc;$lMQf6auGc4`i>eC> zx?^Z8NXJZp7+OU85(OvL$m>w1nu8Q|NtJv&n|n#Lo9-K@)t0bjU+um_Q_>Gsct@g* zTqvoUM(x}*HnFIC?EJH@GU)i}64HIiW>jdziZOXH+d<#Zi^xwLA#DaK`8xWNu9H* z)9S~dvb*anA*ZTSO8au2lxUYt^cVkLm-hoI4E@&J6arv8WWP_43DO|5ci6GkXc6_|u{*Q9%|>-SC$SE!3IJM|_DySH)X&o_F$F!$#;njyv-!yW&u z^x@cVt#JqhPFb-yBTA|&n`T5~0rSZbp>)Ws9ao@ur&?>d2ihF6LFIDC`xP_~2| z8yDgIcDHQG>hfRSv{*&I8SaJj0aJ$yGYr31;`hSQzW>D2h7VNH=Q=~DaA!OYMmRU8 zEceEQsPhRvjeZENS;5j^e?-<&*Jg$u$m2q`6#Me2MnkxD-rA!^e$6d?n$?H!n3uhp zfdHWRPY`Z8K=8-Sb&)+H3BT?Meho4gX+H%&&^EMHB_SG(YH(D?@s7{F^TC7bW>N5v zgi4!{rk8`|d`gz&=Cr_Kks$KSL*rsUVTwK2m>exKURNOnua`>6lk}1udA_lthUBqf z@CL80nTXa#zJ*L3BgB^;ydbN#D}{8^`uYICBQ&UL(s^($>LsVd)a`(Vin=KxR*lk) zNCP=fv(-`_*NZ_WK;!*A59LGnk#n?3M{0IpE|hX-oppOt=u)lmz+5PJ^}y{5(X~gP z-U6@I3a;dXXMyKy?o=I*nRe z=U@0Q$fwOyzQAV~8AitvCez|gex_<5yg|Tu=*WIF{1QRDhAZ$b=Ls)EOX-%v95 zDs^bHi)&9@D&i{MSWELg#!#NvE9df9c$MGP_W(`4#D4{HfBv5TrS*)#rmUKd z(ti@vqW4AHnlz`L$EU#{Z~9qtXZ$|3k-_^w8z@sqFrZ{DCeTU+%_UJ(9`alH!P+E! zfo(C&wPSv%noEzeiu_Cq!Z717i&$cKB^)1-GZmn7an-?$7#*$$8%Vz=4#)<=qJTx3 zsAgwT`$^v5&u0mQ%r{tSR{l0ak$9bdvOb6TCGYK(h>p>y&Pd@300AEmiq>hmdWBJ7 z;`;DS>t+_Y78a2tHRd{yj>CSPs_zVF1t+fmmP`Q(Tx=MNB4FkeUvu)7M**AA2oQ{; z6;hmP;^R1lZa+!h#o9D6T<_X`Q}Ls|!;ar1Fc=|BmXo6yIfno0u?f#TGK{ph?UOgg z7MabH>Yx%9a>zOLA@gwnaOH=RZYR5>?mu%r!VMH2EA+5ernV{ChWl%vD~HtE+%meu zA18Ie1nHaYQDA9~+KM|G3TxYbgl_iM zltv04ougqO-T3fU3;yDGdo%K(-X`}C;)2g4?vb5ctMNA=MeRE4%-*a`c(u9niC9Lkz=3m7S9o=C1l2sc}662yB84*NFt0qbvx)DapjF==~uQ zvso~(Wlxg+7F~oP$O>l}kPLoh{_NQUf9 zSr;NEM=hSAkt)}ngGt2{Omb~K>h5y$8vOq95Hdtm+&q7KB?t!TYH{z3O3v zZSHS_ckUT@wo+PXBd|u-UNuDhv~@CoKOl<3Jw{b#FCBX+E{=M+_T?I07`p@;$!;k4 zbE^CyLwrr8IcLibj>DfM4;)rd72DvvOfVy6Qa^|v_uuQT+W#di3f}-rg*AG03;$;=9!57@p5QjY7vF08N0X-TRC}s`qzUxwAlVp5SzlgKl4=!f}AqZO^uPbdvnD z82!eGeFv3Jd<@$+J~vXP(^mJR^QA5eyAS53xvhY#T=875fSQ>xhB-*BeH7@1l?V?+ z0rsv|vWJW%3`&d^8$_=gzD=b>6nB$Yrh9-_P)LIC41x2>Th58GWMB*$8MUjrLxbJS}C$2Z#xb4c1G%2<$sHH)jwb-#Fs$mnnlo5FEi52Of#cqy`F7q9KYZvDzE!|;D#+j z45x;Jtl#jN9Z_XT9dgNi>H&8TGahSs{d?tpP%;SkbU=$fR1BCuP+U8!Ilxnk$OJjt zV-22uiCYh#gp`YMdV7W)Y?QQ)>@jKrC3O>!bMtiDLFL|`ks2{oUt@RI#kCJWWdtO! zGmw40WMo#0+;i{yOmVr{1rm=g74?U1%l_c{led<_SPdr7^_@u&_&;8~Mx}GQ|>rj5m#A$AF^o%+UX?RQoRM|8L5|=PMSD z^$ac+*BmsxK7wA(?1>1***gL0k0!?XXt!$*h6*c&sLnlE^=P-Qc~J%jw^kZOgMs|xu8m;1V%P1r$+=7#|Fb-|7>&K>2))H8 zoD0oXfVzmWH4`#+X~#mmFeA>jch)-kAl*PQQ3%xhrheuU@@U!Lvd<~-VzI401kEXb zU6%-Yec>BpQZ0};gPh4Vi+)VqQwnrDxthKhulnch5rjhMEj%(t0AlaHzz-yiFo?L6 zkvG=5h81R#u`q--3nxu+fwT|69&~o>(>AmyO+b;76je}?jmC_BCv6Ry=zlVF6uasO zSgR1W=Rx{Gja~>q2_bC%d-JRAm$-s9E~Eigs;B(xmz9bn#oV3;@sQ>b(R%jMRAXf` zT|z+JS%XTP3;&wTnGv6agK0{3zmJ&XbpDlV>!-vLKU{JXh`goW9`c6W#QZnj>AEG7 zZrh9L4Br_6vNhP|>yC4Yj0Hl{vBLO*OP+H&o@aQBkY15}9Qo5NO1b=P<~JiXD5$H( zHS})ehtj~aS|P{3DCfWaBAmL=w&|E^RnHJfXRp%M4!?@;lmzO{ZSd(h#@}z{qP^Mq zyJYEj9Xm=G03p(076i}Dz0oR((8f8ZuMI3x743Z(F`?jtda_E??aEGZ6?kM_v(B_(C8*dtrX zg`dAEg)Dj=lxvOOm9OBFdVnX{)~DWmt(Dp%X#Go5%ooJ*ZY=uS{E+c<9@n__OFUUH z482$|>@5{Eq1Jpm>fzr6O=CNn*BjG6`j_yIMIY|~zL$&Bs3R;6J?B=QlTSeBYA(t@ zEeTY=aCVdxp#N6d`2UCMUFJ(n@``(CyqVEneC{p&xq9tU-@0EK4$!gUpHUjqdJD)B zf-x4^_5Uz@GW`ZGA;`ndkA8fl0bQ^f+PEc6R_^^6#@KGhDm`i2$6!slzTh>C9jCo& zg>uCbLiI4lgQpfZZ1AClLZo)L6*k=AYJ%^nt)Sv zdE*V`@kN0>Ck)5f(6P}qK#?wDbz82;|G|+4VYMd@EwUadyvqrDZ?*&|2<=RGd4)sB ztb9*+2XsP`rTxC3s)GUmPK70<5O)re%d|C>oP4S$*A2K#z)jR{GSMDnHC(S{+YQ9} zn~xuqnGpy!B_-6REGsq!Q&2t)-ElJn?wZ5$FhP=)DczAhg?mJRMtsWyg1|qNlowm*Dji z#TqJ-3JUv#XNb8l5lhnXo{Jva>|FoGjsA;kUhZj=9EQLMj>06gM_&Sw0ZSFqk~7L~ zvSKUYsD>+P*NRdmRa+#l&natsl*Z@qGnc=uS8`D&_LEqD>$z@JtTWv<3GhyZJBPoB zjcoE)0D~n8$2xv6ypnBvp4LJ(gfe8MeqhZRub&2=K!uqa!`2;&u4_)2!hwefeWL#vNgd+bOQ*uwv9}Ja$lt3EC0TU^# zWXWM1Q9R>NE`+aaqd|(Wl=wUOD~*mu1j*GiQ#bwVe1?^T1kr_hlHa~Z3wBpo8K zHFCV+Dykq(o!89eV9L88uRE#S3UJ$T>_O^2v^IUZ{unltA5lCu0NUL~54C|h{*GM7 zzl31GyN6bp9FNVrv1hK?Pm_)o`MdKA>}c^Q#z=7$sPx{OfD49#9DEOX*ul}_DyO1I zY>y=Dg~ZmB&3u39E0mm3;gNXPs(ISGLrSa*ywmizv2gK5=`v3AubA(sB9)*zfGpQ& z@kY`^e(rFxQ?PsrHNaKzI~)3D3y5%refH@Xxx3!Wwa%$pISjp zdegh5IVJK)%kRTOg1U3o63pXgwQuYfAzf4nyaH~ljkMn~rj=Rx7Z+4G|Dc{}{)BC+ z)uAdUwt*~VE^5!9lPz7P=qIk@s{9b|v0gA4lr31mdX4c|guXg9hq1blS=iEOvw!|0 zdr1A}L7nO`FI@A-&utQj6AkIs_e6a5S{X31$ws4@%=Y*p(`5T^>vEd2#r9ft{?P^= zranmuNIYWo(^C5L5A~HrL*3#_njG0(V&3|(Gi(C#Nfgm~At`kVv!8RHmX~Vfi-u@O zEU*)|CczUP%S=5JXwIMN)Q=lO&oKEq+rItz0fdVY#<|HvmUhCi{OlH1<-W844C zy#**#yek1Q|CZR*FEdxz@sK=Dug%c(!E-+AKefr!B*7SJ^~&$wA9(a0s>BBG>!%^W$efBw$3Lwk zcO@Zq!w~&e1|KX>q2tw8up)L}a(Ip5fdenh+YW>`Pp@MCFRI=>p6U1hAFq@SNTnQd zswA;c&S#b6l=CU(P%k9s^I?v!5<mthC-D*=5;5lD+t2tK1ua<5 z+(d_OxQAq1B5Yn32}rY34%+07CU1t^VpkP_!*RD*sKISgn}p57nGB6`j#WP|I%6Ud zEY>?aTiq5C>arBAIHTp-^3S(}L?`o$CnWNn#)f~e zMCVbvTUb>CXSTb3q-JN;q(L=8r;q4R^9IdgT<~F#JiD}2wNWFDUGl1SoYa7&^gMFg zHEZbbTRuOU7BX(^8KMLS&sh@pfsc6V5U8ha7r+e zecXUXFCeGaq}O6hGe#eO5wtVAKrCpBlxI-lseih^IE8(F6ff#ITIjpKA3dHw-QQ=J;WKgZK{EASi!FeW zCM$Yv%))d>i&?Wtf^MkVn&@T~crC%9)gwz-Ie&Q8!fqKOX0`WZO6k7$cGF?}$$bI- z%9zqP^{Nj)?Jc|J)^^@cG^y|`?0t?xKviZPSPT0*si&Zwl)E!T65h*g@46{Kf*O1(x2yy=w7tr4xr+5#F#{LKrLj~oj-7RGOH zTmIi&09->kF>@_pFoIyH!G>o5Jx7gts(2N@TuZ13oLp-c+amIViof*HXy5Y;{g+-T zMl`1qaO@xH+slf0oSE1tOMbhcNDf#J8p}sB-kzum9%N2NpFI+_WELzBGTTf_3=e9I zxL4w$ay06fTjJ<4^#R=>-Zw1J6u_@cyP%4`*3-7Ml!nl#{bTDUGlv@q863b)sy?PH z+@~NjOj^G^D1Vd}s5Y9-+1?`Y`lA2FONgt_#~HUKQKRI15Q|$$h+n(^a{lGr@W}8+ zqeLdLK3LM1BeiqW{UmhOh2lLe(*E1;n$RNy=a3IQ>HG$}iZ(9mUP%to^Zq0KzImI$ zdru8wd?0)BA7-&LfxIG?wmwk{i*!!6y(~Lx3$aGL+~n{_|DQT#-M`M4?K>wc92FQI z8483gt-s$+`^MmSAJFy`m2@P3|Hbzk%^oYZBdv{I#x~~WC3y;SjhilEHckJxhkb#we-7fWq)mM9mZlg+s}OCZKJ8*u#6GBVl$}S zP{CULY1G0^$#58+fgzA%zOVL|hOSq(x#T<2O3k;xC5QJ_o9>mxMGtyfOMuq`=SKXh z#rz7}`xLLbt0Jdvuv9U9 zZuv5QdHl_rO!lk5=D%g;9_tsXmwY*Ja7@YTE=999K3=Mu{*5{*pLvK7(rJFW#QC-HfC{6uaMgwgtOJsWc$RODeiO8!+J{42kK z$?+;duCQ@cG;qWtd+wqU94+R@pv-F&2wP%(L)8`Ke8uZ9AwpbIqWheCwH& zfrFi&xWJqRvEQ3kb(Wb2ey9@K?oP+Gp7^1H?g@@yyfv}f!o5oCrt%S z@9ZFw>#^zi-=?Aemd79^`o#QaD=J~Q1!qzCqi3>)COitej5Kfd+J?>5m z(VewzPiXE7S8^ZF9(2jl3%8e1qI)E*ec>xq2(27V0%snFr>)-a$<7C}cG!Z@p+RX5 z>3p+=)?nV)2@rgaRx@+^*ishd+-XQmckPy6}4V zsT*YVI_2xbbu&dGyQyjp`D|lMy@9%LdI1#V6)~)hQeVKl?+Ev+i)QI{-0aQGi>K!K zzeb$LqzC=DnYo(y|`AtXHz$=aBGEW{)92=e$xR(&g&=d$?J?X}^ zb!k$OU+B$qv>TPYbp6k2oy13>W8Z3Lk^9HWxzenW2QYSV%KQ;6IvzcB-{#n$VKzd` zs_qq{+LpK3&U5JV58vTvOEE7}vlN+_y+zC_h^~|(5w!nuh04dra<_33tD|LbmCfIN zg=kbizhoP=x-WbCso!K1{^{xatU)IVvJ2)nWr`c`PgRNtXGm2)hnXGEiNb%>2pGL` zJ{#H_c()R3=wWw_*|NB+Ia9R4R=RvjzD6*|KK+NeNu8P$VSCJm_>CCioGf0YE}nXg zA`d^e3IE4>!k~77O~b=35aD}V<$?><*+^hl)rI&^qSuFskrU!+2H-Uc-s9le0USN3g*;s0(Pjh~WNr|7SX>l>q*nl1YOP93`(g-htF)@h? zK3Hf<)-3-tI)61yK96=WO^a7=6~cIdhOOw0J(*`8?FbzmjXXQDZBqP*w^fEvUjZY# z;wROg_3Ll8p->`{;%KJ!D>*+ zLS#JqpM>ZidI|2;UT8kIr9!1hk6r5`e(C-_C{n~nDPH81m#j$@EZAJO++rqne@Y0| z)wPaycR6VV!tAZnL-=H-Ur?{GRE`|5HST%_irkCoRba$@02k~gx+ zoad8egMg*Sr}hbbjoU(K&#zxVY3ID=qAnVP%>`e~zJ$Hji9V#de|-c``#~H*d87v% zvn{rf`lCE33&Z9!rT*p~!A$`+M;}Mvt2I=RxjsE(K1SSkF;%wqQS|@#gy=%Bdm4ck-1J)n}-(b&f_vQfR zhaNJ}ohT6Y(D`eu53Ga7`ymu~c_%P*81(3tN<+ENOe#(8X?>N`D@jD|psX4rP(!~c zEP#Z$p=J3dIS}U~ecy17hWUP&F~q$1W}lEBh~RTp5ol-zKtf>m48hwtl&PID9{Tpf zPd7I=6+beaCK#-uzAXwN!M&`?l2u=|<@wMd7D_qV_XIlpe0>*Is_yi@zg%=%vazaP z;tSGF{DuwknLzN8gjJ&i)zejZt-kJAE8%1nh7!4930a&(!>9?;vme)I)*7ylYhJ6U zs>)*Hy|9qgnXmw-Acdju!14a_4B~~Rp-Bj<_|5Tu4BPcQe!t@%R>q0dqEg;!Cdh=XVfbL8?|8M$KB>>3@WdJKY4+8Z?%iEr~C zRx@zvPya&Goo9xIS(dN*&(|hk8v?Odb7-gK8@YekRb=$l_7`T2&4ahtOr7AWwDS-{ z2ZyMu*E}-qvkt|M=`<~=$fLH8kmgG{Ct~Jq{$35D+%Gw8{MbHRKFDP1ovfKWhGc4C z-8@6xw6wK_9VrM$)C%XNDotMqWb^5svvt}KT;&L2UN4*&<8)@pyEY6qZNOZTU6$XY zsjBL-z_^Fw|1I(I3Zy=(N<>G>2k@HKi;wA23SPm|M(lrv-d|Bs(C>mOKQxifYqyJ( z4^K=#wr~cuHvjs>lHvUC31af-xMwM!2>aT$potxJVlt!7ASQNJz40j~BC#f7vH4p} zh^CK)MPPO3FJgr1zoU`GeVgkIQN@-amFZ&97M4!j$d(2>1Do1$8_%qpOyZ$+H_>Y< zr%bKSt9Wj`&@#Mh-4GFeELJgdwOVC2z3A86MQ5>Of>4xd5}CX$XuRbDvsKF`xhbkq zjVQaFPxq_Yo<`)iix&UT1!J*elcN4PW(=Y@0=BJEkhh6~A0N4%P3Q=-Goc~`?Vj@% z{|?%+Ze3tJwmNp%TT^5dTpLvIRe{RnN`mZsr&UC))bdhDrJDI24XG*Je)gd^2E*^& zihCfD{4Q1A&;;Y-6lMgcg6yZ4M&Yj|4O;PIdM45wUfeZ0WY`>{Y>To!b@y6s<&_$H zo(P+uWt21RvtZ1)Rap3}ScRm^)7he~#a6oq4?XtRbMHQg)~tPDO2Q^g)P)r5=o?4L zq(60j0!4*7Pp+Sy<*Z+vRLTp2y^sCtMAuSOjc^5`X+Nv>B(I zH=`BuVdaXy_+^2P9gX~j2qPyxgQg$%r~7>mnMy<`DVbuzm>X?XQmo@FVM%*V2&SH0 zzYB_gCh88q>_dPq^Nv29#|?Qe1uMvYUcP`BVF2>fjaQ zuw>QUR5HB1$0DeV7ooPbEqMf@O^3#gI!#6%GZ8C6o?mOiRU>Pr0}rl(7`0L#$vIqY z4@WS6xQ1;!TOH!>0dwcGR)ud)|HBU-hBL)6mmz3}XV*ig8=gO=or#PZ@Q#^l52dB> z3?l!zcDNqux6sTp3iEFh!g22`=B}rIQ>NhDhlxZYG-9_`GJ$V2xFUn48JN?hvM2rw)>f`I&jkhck866iD zM?#u1c@5Qe-X9VKBe$tc;=H9ZJ}Bxk;Xv8W*!U*L9e$4)4xNR}+_Ika*2IL{RU(6A zFXSzFo0K$=P*SgazpwI3@+C=+uG9F%YK3WM8Q={a@j%%Bz-K>!d#0tLg2rm{> zbr0~#Uotm{Q~L=i9tp5wG~-NGJN}uprho%#C_E8nhRXJ$4nB_y_T6K`->O#)clXS311KnubpRFd5b|{P zkNanrl`-p=)*&&D6V@Po+~Xu=o~PXL)+gJad31m`W~Fi`I8@I!uEn(rm%+Ci9`t(@@aQIG z!D=!og7#b{M~#}cYzg1L((G&j`TvPW}hlMy- z@Z!uWrljE2HSJ+`*@mz<-5Y2QP253VaF35E*>T_dDKwXpjlUuK!xQKMjtR2va9QVr z59D@w8zj{v$xX=J$z6uFZf;_^AR9bR&sdXOy4f5hrTR2hckt6thXZ#F8tKKQH#d=S zUNDPvaq#0e5=8a)X=Jb;bEv2*|L$(tZpwN`R3g>iYFp5PMULe$nQ3H8spn!DEu-xX z^2|(0Ri)!_LiC6c{==gdF?+JXE)w#dAn}+t@?Ms`oolDp>LsW{q?rS0{*nX9bIPg5 zz?s_kGB9;6KV2FhPWh_LQ!Wn6f&?|>r{sar#Bl7}1Ckc2QOvsvlL{(mQWqr!uhy-x zzDA$w9zP+W|I5t=D&u=-0uveBUFpf-o2^{fvmCXsR({c}X;>3!;89{fJeUMbXG(yA zcy}7-!bm7Kddui#%>A}Blm7M4GTYMZgQ6K1DevJAq9VpIb96}5#R1c&x%-R5m!knC z6a?d&2=Nt0`{gb)ji(JtPWZN6JC4yBbN%M@IB9Bsk}5XWMPR}P+axATxvC(kuzx1^ z^ykrpKx<^W0Eooeoz99NJK1LE*o#$W<@94xbeHB3l*M!ZxY(NBR#w+nqFKDP)-M{j zl+iVR88zl^!v21XCuCg~rWZd}84U&JF+8KIQ;_l+Sm8RiO6w=ZoM+25>jIn-p_)aB z#PdfAriihpt(O_B?w{OzOo}?;$2v?f}J<)FQGFAiS#z96lQrmDrI6< zc_M+Lx{?h%?uP6(muW*F*#QPx^$(T_$!egev+Rh0K?q47G8gS!?SDJPHgHnkGdCnT z15sc7#6XQ04B~fj94Q_p7IPR}#MA^N)9tNuO!!Xo9UoBDePKoW+ifzCYxcuOXxIgBPTnu^75xa1x;Ia9MufU(MgqC@FEb~~t=RFL zD8!=+q>vfsCG~WJ$V-o}ay5>MVlGMsImS?bOPIDK_N6AyTK2>T^%fg=5Jow*{GLu& z$C)*M8L#8_*R9kxh@5pSG7XRXRMDV$Fu8AAFaK+HR(X0^Zm_wTR(}3GEd9KKiJwvr zUjAYLHU7#+b82x_xso&KY$)zRU)3lJ)JFUo=$1TYaH*ofzTLANR^HX}G`s4K=}*Y3 z9#aqGwjz@U&D^6p!%M&hG(yE#TLEJB2W5XhvDYaf1lFJ+C7oe;X6L~|Op#sqXY2%| zy@N8TXzD45(~`mprZHn1l`A6Q*D^76xw-Ph}bD`{e(HnP8NtG*0(irOk{!kCP^bwE|U&h1{1qo@&Ge2Sz^!Eaz6 zA0{=P8oRIArI(beb-OoKdTYA=_1^sysVk5q74~B{*2oMWNU++GB`}>-rT8TITMxUW z7-2@Reb(6;RolOb0ngOnnyh&ED!sOz2)wefZ2J5-6NQzdXXIxLm!Nzajt|@f^o}j= z@}jCTyY4u6v@2UvX8atN3PNJ2cc-T1$kv%|h6F{%TduH7m~n!fpm~B^uHqKW2>%mJ z3+R)j;ICo8!P{NLBzY}wsUhh?WQq3|ob?fL2;A#T(@UFybGw6P2%lfzT&J3AbV+iH zA5J#-IaTFw*5ZKIcIL)?_U&|0zEdCZsU`mE0B_0&lFR6Eh!&2E8ogi`570uZg}rW$ z5N(udD@5BZDi(%$;Xb#m#_tJU?#a(Vitm6nfh|dqquxbK;naRyG|9t*3s{`EYXZ7L z4S5!W!7{Zi+uvm-*GyVjWuxzm+_#a`#jRDoxv9FAqqHCU7Z~gKP_{ijFcJ8K#(m*l zXOQC2*i+!ibT;7xKH5Z8Q;5nD?0WK`OYUY9s_(|dlOm@)GYKEi`PO|A5>^Y%4A^{D zj?})zq}yP4^(lME&KU?JF$-27>G=KMM_XfQ(lFELXfmiojP+s)37YM+Ekz&d zpmn2;x4&r&43&lK_GOU@Lb0!8c6;Xw7UDz%Gp~Unwx`QcWC=4nL)!j~pnJ^J;9!_8 zexD5f!V=Up4^a8U_OfaJMDeVe!A>q)m+dfmf8(~}3M|cIUwiM|FZzr=YWExKHEAGY z7++gM4K#52>`^XV8n}P>+o7wMr>wyFT4aAfmhN5l8H-ZRr>BTU`L*O>{2NjqWTtLN zgR6B=?Vx9$Waqkl+Rmz?{i-0r>ci#bA||#Dbip21?~h^$4c?-oBicc4CzqWloq|#{ zP*~QksM*D_#1e7i#_V zYbS?h@7vIbX?Jj{Np4zKMWFYMlazOFlPSU3*7b7B3l6#BxvXlZxML2U|NbYO)}Lam zMcY+wG#`(!s#FQ(gbb9FmR6{rO5tHYxHeK)ZU>tQ@Ne@iUfmNlp$$dw88Uyy^;=`Y{t<>>h!(8Ty;F)ulcVKrv=tS#1vv#Wd%1*rmnBgy||2`HFO` zd_1EWd^5dPb!-i)GXcUDyZxIr^IsG^zmYj*J=Z?S-|@Dgrs}z+5E&(&b(LuJFw|sX zs)a7t4_V(CoVvbghPWcHzjIBVJ9c7x+?uiQYrWam6D0su7ia;=R>tkNPPnHhXINJ4 zh5@9j5egj5?XPyzq4n-&EL=F8L2$&Sd2t2 zAp&Y4k6vfpiL0)vG5j-^Ldc@ATQ6K40DXz%34JDTwO+PN?uqCpQocJ`y#_oK(Eh^Z zXvXa@LFYA}BGhR5ilm~aYyLgKfo{Li!nuwo80`hcEtNy~gG2K!;@tk8y}pHFjR-Kb z(9}&onxJPOLrIv8qE-lUxIVV!3BMX6{w_mTBX`;IC(jh(U9{c5Y7{(n%=N$ON<32gvnGN{981Rz32t_HA<(`ZS`a72>-ySdfSTbg+(E7zt6&rFRB0?A+Ggmm)7HT68)L!T@ zI@-1f-}ELRD@dMGZ~JPY%Ncz+Pd}P`MyJ(AKh*0xQ5+~1>_gJ1g zrtz#|O=h^ZMjG_cp7>?Q&)~yM;6*?@raWwq9}RAsrT9AWB5r6(o^Y+&4A%E)?<;7i z>8CCPx)$GfAu=Se(3DMaRPppu&ezTKfo$Y zPcWeG(V|J+%g<-$Q&ay@B-U^Cq%nB2Bx)vXV#{44ySSy@@VztWN)WSwI=_v^0#b@oJn_C-Op z$2CaT_`ixS_iWg)8s7kbj)4k$pF65knb^uB}DVs*Jeo5fp0hOH`VO-&+6kuc_@9Gk1s3ol->naZqqy2jWW?+M*@n z8%LhJJ^u>_*N>xmY24bJ)0f9f5O#l0g=|!36=cafqg+dvguGBYUx>q~FJJ7E6xWBo z-@7^b_kZvjsxxd9*FSMQGZAN2v=$Q+tCmUG0GxC7A0k2T@R*=~t=WKNw1n*&0$qHD zohMl*zu1rTJMoK#ik;L!iI~QQ|FW8rUJJF?hzWbg!JA=TCbJ>HZtW_C2!348gPVv(Za3_Jnlh>hJ{d;fNwrRdiRi?)ldBo)xV1t6@{tNmFhFJ(tebqtC-Jf7n(UNx2p~L>>QUl5LX=%%evMdW99} z!ifxHT4VpS>m}tq+QbJcV-JoWA+QYk221`G3(HvDUG;}N_xMM814|U)?~bzbS)U)m zl{-?t3?5^E%fUgRRx?nDq{P@H#r&aDWAA~*w z$q}7pYQ2F5c}Lg6Y=7`QU8h@LwB031Dl|{=6@+(OS{86po6l`cYVS8NtP*) zc+RSpE-L?YHz^GwOz(`Ge91P(;lW))7U-YV2hfA86w02}vw1VzF)Qz0g)t%49gFTw zui1L*bf&getuOg!TYQ2Ba0K}Luf9{mN;^WTxDa( zcB@C^cx!OPJ#4JpgZSn^pmECuPIN8^)qS4)W5PDHZnbrg0t=NuZ5`ey8l}xL)X6Qo z=V}uB`!83#+?#Z^y`jYq$s;wqPF(D-Q_|%o2Pa0%Y}xA;$ILa(2EsqgKrG3$u*37Q zy~A3`S^N#N#&jC*U2|-PK-6~QVjJgJL1BgMXVwnmA3;{`OTHB9Byc3_{xW(hveW3j zuot#0EhkxNA~Qu4^hkeecnjSBtz)u1s!eYBtda+VYp7hr5u+HD>`#qxY(dJq# zcB|Kow-A4EQYX&de)5K28LT$K&|r{7W#WIWnOU^SpMbP+8&tS2!VKoyQCw1yabCIN zhiRqMB|}pN2utwjwsC>jX}Cikywz z|Pi1(wi}LoYHA8!k>H`x(HTr)_86>cQlAw zXLWl2&Gzffr?%0lagCaC+Rw~v8RUGd7R0O#BVUf4xV5RfWFM16`>m)K_-RaiquxQu z#Bd==zHvooA=qs$5y<6s^fLde_6>LceA4qD?WnacR^=l2Dkm?R=z!3OZ*fvbM~7Y4 zYD)&HMu+Gb{F2L}`3GXexAgm?UARq^dIK%q&%rY@E73V(HAs$`QhOh3BLTLvFqY>E z4zP4_7yR7`9mBcfYDtRCW!(8UCYEcevcDC;v#3q9?>@04@kp~%R?YRFuj>_nDTSMp zl=8w`OV*>YQMY9~kOb={-5N&alT6gw%%l4@$Y1tuQ$CQI5inBD^M;S^+xF%!7Du-vW>Yr zAlOUp8Y@->Bgm%eZ>082oH1YvJj zd%B0OV~4cB0EpmL{T~S~%T?(z?5AQYCVNeVN%HYvsDaYSG}-z8CAUt zcWnJ2gkJ0CEVeu^D}bsm^IuMzvy$PVs1w(mR6IqrE}ZJW{5n)|Ah@krZp+RP;3SS- zyx|g&9rwa%eO%DY#@Xz(=SC3`$iBSImNi^}yp2QxC*!p;DVBbKYvSc z$G_mL5&5xn%EQ4gDcq$&5mFW@suZ$;t{N{-(_t64}wByjj+38>pk3`2;uN2jSH|I5d0j&~k`*|+7 zNNJhk1H5=>PsCJ7ux4PD)LllFYxUBG>5GN^z7sRZBbf9)Z;ThN-grR4?QMV;08Zg_ zY6b}knQVazvshKkGcvCEj{GX(uQOCwzM8EtCqu@4Lkqf2o%!DK@K*|8k|aHE6A4l=^y06&nxIRl z7P*9Yx%YP2t)#HW&p9L{#y1XrM;sjNZ_-7A6s14dTY9#vCZ%UmSO1_Xr&`9(bfuY| z4bG6NdF_I5r+@qDz>0rfBGE+c?lVkz8M~8ir_9XD*tfJYw5XwbrBojUx%~EsU{HoP z&9HpRP$!x}P2x{36{OrMXU`j=zd+s{#%^~8UYR6IzG(>%iSrFKSk zd(?+vRQNv`W;Gx1-l0Q3AX3zatt6jgXV(VxJ~L=Lr@Z>*)ku4AC&xEN4*q&665s1U zT4ilEgngcX{v=-ot-U)=OTEw>;Nb|Jnu<5QJ?QlL+OrA-F?fSx&G~x9gJ2B}jaJ-| zl=JHrR&^=+EoLP{?i~5CsZq(}*J;%gts+n6o@jfiuci zmfJN-)4OAKk0Pg#p%xI~58s!%P<;H!zpAkFisTaSnxIE|e;A&6LTCa2FaRZa+C%-X zI&>ZK8~2|dl6*L^)Z5bp)RR)5OZECNX6TF$r+S4D<8an}Jt)-@{M(J}5w1GkA*E(~ z#-)RE&B}sThEp8oi-^z+^`G7xi|AhwJGK8B4p(pg`e;S(7O$z-TD^JK%>C|RCU;_t z_5SV~#GB-=BXeDKeiW;l0l7j|QRXk+T^5lfA)I!kD4tH=Iio{_tGp^#Blce;U#w}? z3jM%u|9feeO)jeu1{VpjIXX)e4cq!-H)6%}exgz>w=%uVKHVIXj*YieR-c!ZFubFP z9NiBuIXJiT2u^(sJWZQ9sy<*{#u*nX-r;~vlc%M*u+uZffiR32*J_8i_vH(e-XL)f z+;2NbvRd)d+tXaRMs_Q%{AwVcXIvFImLh*wZ>05|{5knSFHpzE*!0!OZTgQS{E+^P z|B@ZxAzD{9C)dm9Y_B%C+&FhR7A_}U_)?p9b$@K3hb8bdO966=fgwB*)CQa}FzNqE|n(zN|d;QwZmw$_x zpypjiXaU8x7#4J5=xG%eyXLTt?ELdj+XDbA1PpmgJLy`mQ*PDe6ow!Y z{SvPZ!0X#;I+uOoM41%@q)C6%r4&c{-*m|$QO(_hCr(8RsN}w~gekgq;Z% zsO;pYUoRPi+7=%}XNvVGeP8d!XPN8%$P%uFywo3SVQFuQ>}zItbj@LNU7db#oEdq} zLw9&Z(pzKM)jz|+mDw|&1s?q5PIEh}9f-_F%O}^QR>}ee7~hzZ-cNOB-^t#&M@Kx4 z$g$$aK7QU&sNZoqu~(8ZWt+4*i5;CZ$)$n}Io&fGvGWJJ5r5+(GvCASDm(x$P&3#1 zVOdh^-!L)!SdY3e7a_7{j2f|ynafGTUwb_rpz%}U^?7IRnw@3B9d?T?B+cOQVHQLJ;SQo>8P`mJsryW_OF)yt&1VPRidf7Nmcs; zi(Q0$<-nBTGO8IMZG(&&9zNmLDcuZy2Y)7aPJu%kxZFZzy>E8TU8=_~b|PUvJ@DP$ zcy6TW)@!cB+FMHgb%FeFaYS7<^7E7E8u27VJJ58__VlZ9Z|=VZT&yO(b?F zW~Z!uD0@-`(8!qHQ7Ay9X)V(4#we>fI3YRb=b?M>i(V z-#~*$egcQ};^hB3taZ=5dBX7!+V~FW`9nr9J|)LP$|V_GV?B*DIO38sDKN#>YpF!V z3Dn_@c&?l(l)l-M8B&7UFS#X31uf5*+y;xc4H_lgU#1VFUq)7M8xhp)RmCGx+9*OLR|WF&24y#L;UjHKiOa2J~gf# z(E%$c+gaEH3DJ8$R+5ETGY*8aX5C6I)MOC?Xi9lESxDCAkY$rcQs&>i@+!fy%Sbp|( z@_cS&Mf%MH8Y^Zb6X@wl8ML!~fUc|5?&{rP=sJ4wh1Wz~R5FujUaI`eTU9AmQd48l z61c`Wa%Zs+m_ReHvS6MeN6tr+?6aV%;mU=xwC^(ev&V%r&YIpZazFbM-Ve(TNAlYX z<5PD+=Z}sPuOncG>p>`*x>JIrHC5x|W(DfObbV_XCd)qJ)y(!*wIar^)|#9D#~0P; z*`VWm0fzqRNoDuU=cv}@3Gw4)fWIRt$jFatjZ?#Q8#gsYUm`^QV5=-A1;RRZ8oMdG z_`yJlE7>nR6_$UcIgR`p)i+7vjciI)L|EXS0FSiXwQX7WHkCXo_<0WL&Z-13{z0!DNMSob%T{^-Wa&I`*`a$Eby+s<~jEtm|ig)3oPKx^wwO zSMhpPt_xHra_ZZAOyZQi_Op?|Oe6wQqGYFHFgp9XO_<@m_m9ZLE@TB^-{#^aNYBk=l$mrXGZ9g6KG zXl+u@#v*IS4r7ur;)s@}YpyBU4BFp|C^Dk2rHOPLEl4j7JFOJK*gBLe`fu;%Nd2n& zjs^=I9Is8?pFXyo2cZl2@%AmHCAx7V9*dRU=KB1;(}Ioh(gH!_5NQGSy)Pf%J^XbV z_#98N7OUFw0%XBUz;*z2NaR+ettR}$7n7cHiFr*r1mUrmps@#MCt0OnEc%x{Y(i#a z<*mvR;^PQmw1EC!sQ=g3>bh@}RfN9Lh*z^h zG6Ex-{%PiXc%ozD#EmySmqn-_BSQUme*3u@-o<@7IN&*BQu(22C#AHZtJ`RbTwKmC z)#9FZXQr_~b-hShSX@~XVz7DrE%`uYJw9W^#tUs}kG-p@4jTNUZlV26LWIumr8>B% ze$HRzl(kFh{JutUV_p2OwBYdY_OXcxh!KDKlvNGyn4b9J((S}b$&~|=`@IyA3++=` zjX_Y>`aJM`6UFtgMoKl>FXOtqOr^)O>)4;Cl5J?VcbHF}d-CLjqQFH{UhIOc#W>?$ zHj$VE$p13=It{7K2)8HrzUEjghr1gQJ-;qKW)@g5=n1W2d&*T zt?M_Vm@hWim9@3m45)+__CLqAXg~SqKdyABb=9k5#b888Lt%LF@ux@Oe-j+@yPuT}fd9Y6gWX%ZME?4z${?r@97V34H#!xo&2Uaq({NqEFH1_avCRrM zPQ1zF(AL*CjhU~2Mt=G5%gxnw>@6#tRLGlNd+>2xu`uvj7Aw}$?K&^9G$UH75hFk3 z2Jyfh88BN9cwCC(laI>%enllMV(S*?XxsQ_3L7&D39qyIy z2!}j+ILA)h+GgQZm4B@Wd;>5GH28{UnC2-|d%MmpM`ql^82Gy;reim*ok5r&kapKG zDjc^wVQM>DLYEMHfgDV;9-Ta74k zeeak?GHwrJk*2iMkW&Bb-CZ7s<=z7$&5*|tZ>nuuai%z`h%@H<$tV>ck3*Hs1&7CK zL>fx{VNJ7ZZGftU*IdizvNF_N#sT|AIuGW)?L_42K&#atHl^btBO&7`1h}{ZL?0u%x!q?-_OA+|-a$@HGp#}PWSc(&XNH4W*{O1#7qGsI!?6Vx zEKap5WjSdWjF<}w05as(4&@DOc3quvuY_p`2mJIQ;wzGr7g@n$7Jh@ zof`Ukur03g2C!Negfa+=4rd&^os;tZa?FQGAa>_wh(I4N3k%_`@_!4}XWz=`+-d9l z6I&_~e_=S-Ripy$o-XNW?ZS{iL^$=C@26rlAosEKYspDx81ZM`pF4f!;b%p~8U}F1 zqO1|!v2>CO5)roonPZ~-rT8|lXNi>Y%|105$$;84bFUXui2{ldy%#RS4Vjand<M8seRgTvNN#vk|7gLW$gv8Btu{G}@uFotAsYF7gZ4@8->%E0^Ld1VF;nsP#PftGr zeh{F?^gvxXh=A&wji9A7&?A9%gmKw!_p-k~;7h-1SD0$4_n*AdJda_i_J6TZ!i7&? zp9#nRq%>0k{|;|5ze*4bpFzlDkO!+?Gk)WWX~kc?qcQ^H{nw{8@xg1hpqOjJ?2jMj-a{}JMo4~w*)x^hF)(i}3Z&CRcwNGzS1 z$Mt?ylyiSB0<67woftO2jxsOU|1Py>)3)c_OvmZ{#1~281Ienjz0WE0>}6+eZ31I# zgV_q3GTGjR6x+uD;4VS`JEiJySxRm%^&AQ)ioFHCHWH7ysyZBMJ~NWdS5wB+QxFgPAZWP2W=AM zy&UJvRbY{4t2-S|Cb?{+c*@t;CwWyh`Nj2=9Z>|Ah-E%d5~{7gx&Z#eVzR+u|inswC?N1Fo5>No|bP!$`n|GY0g z{OMA@u89N#EwwJw>^%bD1sxL1t|u$1Gw;xgiq>web~!*9q<5)Lf+KnoDs6Np?8hxK zunToI_ElSVk)>Kb*G3sWcRTGjR(FCG7S?fhLV0`NvKVgH>6>#M*f&l3cJ1nnOPAsO z7k%~At#N@msa4U%v{(Py1%N_HFsSbO95Ld}#k7|3o86@BJPXg4IQN!O=PvCGr-qYl zF>i^#;PH14NZYozx6OeAD+(VP8R?+0NMl`Gl%hv8z0NzDD_3u39p`KoZS|rS@;Yvl z#;#D*{)mGYX?eE?cOgX7bqWu<#gL4(`{|12^>)?OYK`0LwqCfyyrPQek6nC|Gt`h| zpYPAIvSBbyMbYcT_`aW4Kl^^(jxkU1@wHU<)y2H%%N*y^+MD`!STm2vP6I zniV9;q1V$b&qu_&>#5-~WfZT)_>93Wzdg^g&PVadiPkA!FCU`R0e49+w5y7!k$Ng6dh2LoLKW_v$I3w@4R&M|mCH}Eg$?Q_{)QguFhbL-IJ z1v|bgbZRrOC=zs}f4MTqcx_GAfDuGdvDp2S_*4=9IPAh3Y>98m{*hnadCUY+vD4|=Yz=4k5y!B zo|*54(nYOT&oC~273fK9n3RB%;IYc%^NP=QXC2Jdu2PeVt$YCgc;evZbb-I^Lf;k|3N)nY#dPgW5C8Ne9fcm<*_%;pmuGKUFYosPasO+NK?ZpRFdq#qegj> zfWlbS3-D}f)vbr_k9m~{36#{H+<#j4BPE3!v*h-Q$C|*V^)fo#q^v>cOwk7Si|gdQ z;cyB|a>Cg8{z}aWPEluQ3+tJ8`^>UY4?<&_W47l9)*>e*kkn~os z_kp#)9%6S;={5B2Pjudp4JVpHNCuJdYlooMMuXtp`}g?NS4j;-j4 z^(tM7xny^{>k`;?14?OPudT8zZh(5d^V|q@y^OoCueVPjGfiUR`B+ANR7G*q-etWi z+XC!8!h&?k?Zr2FMipM{Bh@PwD^d#%fvd&_`Dw(NcSy;9^cECY$)XE04squK5-rmG zCQmj$f@VZu7@U4G_uf>qvG<(n1cGtSIuo-pG+`g>(45A|1MRyT0FHO3gcmHS5$}8p zwQrPKwIGsgIFkmV=GM}KB0xrY= z;*mZ(Q0?pv`-^!fSy2(v_Fce9tPXw{Uv&tfRXUoph`B(B0@oz9_2C|hy%FEo|| zm86;?)JbEZS~9R1`S8!yfr{rZ4NCv0o#b0o?l+1_iw4FmZGB%_S6}BwvawUpvIo-f zCI#H^40imD4XXvdN5XuC{>*wk@>Oiu*pk=yyKNW%FK7IrPu1k<-O_~>CVr$MoN zUr12!i&n1$$H_r^c(hj*7~$q(nq5dKH%`>!m1cBIGGqFgZO4FYyTb5wd_4S`O#^Ff zZEd&C_xIuGR{}}ikqGtm*X|L%+uaQxRI=MB0G&y%^T?h8HcOq5 z+OjflBojdqc#078bAq{qxI;%FR$}H;r-@mUWEvJkoh3@wJ`Xa1e^V2DagqzBAOIE= z!y0(qA!ALU2>&^10(|{mVuv&A{MS>Fw9H z-%6wwSP+d%3S$m4Sjo{dRCzy|Pfj}zQ^w0p!+ELI7n_S7-z;vFx`6&@Vs*#^Qb$## zpW@zDSZFJH(x9>!gJE1g*moO-QEn~IlYjaCNxC{e6L&%69_2fan*|84nIJ$vA6F$s zQ#BLLk(HIv&exE2Lf{!dEW>0)%-5&ACCAiiYh?-7GoX)XSgbAw+MU1iwMzvFqh)mKb?xj)p)%TV6ul7p>n0?~z~ z2%#QlOn;ub@z~RpOP!u~6-N+;Y@E7?4eEs2#%cOe|I$5|Be#iT>x%cw_VsH#dT++S zE4qWYjGG=>Tf?9^{4_LgVAABtL1)gXfY~fdmLWG3iTNlF@q zku$PqDcEsZ6%>?JJt;aq-d4B1qg^1*@qoJ(KZeQ0z;h2TeA0N~%c#oC#S335G5+9X z{~v^pkeP5&4h&4GHM^@rHLn1%#CH^41R*W}-F|@f;+QmMHb5^5N21eb^%0h5IZmG@ zmU;Jy@H<#sLv--ZrT)J+{x7Njk#Ge?%e`xAQlq7%Eu`w{S-1V~u=Om! z!g{DGgyJpix1Y*!uODdjGYUs?VEjd~Cu-y#`WX7-WTEs8#yGQ&{4b(`!(P$EY2R^M! z@#Nj&%ct)y<4>gj5N}Mi1IaD^a$kP{PnXWper3X5f{cJ-Sp*mdw1{1I3NTCb@u%iD zqsP`8?gFDLfwv80mw;#cKceG+|KP~cgCh;>giL*R&hs&#U$6B*SE_ygYr_k)!EG_Y5JoFI5{ zmKS*RQ+>H_;x(4zLwGsfY=F?F6ev>ftN9mp!s(zY$Vv{(MSLE&*A=|3b29^b!!}fN z4u2V+pO#Me-v24Vf4~4pcS{|*b@QiTxEHwTsgmzRj;B4aZs4gMp80sBh(1gG&~gd;wT)NVCridF#YrdV^ zeQ9Ps;gab3{ZN;m_sIZ}bWH*K7QO%o$o&tZx&OW0e_n6mX*7z6d-H}J0!bYB_Kn(y z>?dDN5x!-MefyRpP2A(v__#hhC%2tYKn>}aO-I)nzzLamZp0t8qEl#Z^#02}vrwqx z?XyAsCM%vhVW*t`I&_~D=>M6psMoOh`F9zK#Txc2e>w&aisy=WNzLLeAOe!=xnrYs z?O$=%cYEseSFhL&>)jHMj=T#B3ZfhT^iZb=x-Wm}=}D@naAoIaPpqpOf4Uvs6?mOo zG2G@S(ZXfi-7VPK2fyEFS0#Ie#ETqoWgjm+&y!G$zJ-|byo_g?+^Ij$0G`F~@)L<^ zsfj3kU_mMP2*0zptG-#*B?Z2rCPYS}3AD7VTdG)}2JB1^XkM8Kg-4sCAzQh3 z*8BG@rtiLetC3HeqV=)+i+WrHX<}}12?4%q$YQm%XM6+`BSRj?Gjhyg0;p9 z>3=DkevFJwr^|vTs*;ZF&m+Xn|8?p;rBkB+ucPS$np?d4sb76Ug!);)t4`!{dvO5b zjVzDm+JabGmN@7eam7m$Pi1SED|&j$w1gVETSEtwR%q37s7NX3 zYx>La1z8e*go10c{F|T56b^WPKCIuo`oHIA1K=msPg(|5-J&Q4akM?AD*G&+ki*5Q zUVnucmnEkCTvZkIc<8T5P4L$%7iz72|JBHtGYoGtemk5**#3zzr z*Szo&`>zN5&0lNq%Q$#!ufhob@g-!KXaP+Xa_Te{etIx8>R#eM zB?h43f6d3gB;!wk`mg!;9}@jv^YK4T&HshwV?ZCWI$2x(Fx3$jd0kZ5$j2_pcOM%( z;(c}T=Iak@)l)24)l<{kL&23c3tOB#4;;xn4Oj^k3Yt9H)5ss|F?uiCA8vK^774ye zy7YQ?z!rpATa(8bC%#;RdD7!$!TNIuuv_oOm7A$IRFaN4K*c6b8v?0p)w+H8_pK*W zu!-kj@oXn}%Rrj@xabdu%%&!zBp&DuFy)z8+;r41MRV|TZ#r0byc<1g^&LNJr91l(6xu7^$O_H3QZc%yMC0hCofr z91A+nlJt0cE7DN;FS>Xd^`k|;TcUP*r7YqK4QdCJg{jUbXzfco>fZO0O|HkR&o5+N z;H_rZoH@ZN8hOs?aZVYrh_4AMF!;_Y!H)piuaX{sv*MgG__zYr%*L!2hlYk~me>~^ zql_8lC!fxLUSso^c4~$g$hPQi~qwa z{7;?umnW8iU;4n;aDFWkX_V9w60Sg0mv0$A={i`s1>Yh>?PWo$?fEg?wBaVv|C7q?QmR#sNJbxiSdwTSxM&edV~y(KIr{NznepCIHENWV>JUuyi zsQE!m1W%nowA896bPqo+Jok~s4tYb#lI5`-DJlRI477X1Ph3y5=!I|t5e~}D{#Z%0 zF%-(8c)X~tQA(Vk8(miBku_5)0SW! z#r;xGMqNXR(h!SPzPak8jGGmQ``gk0iM#jGW$ zRzFZuQu>C;Z1g-dAN?{H&S*X*WYL2Huk-PfQBrO6yB=Vvq9^t|@T%;~Kia0n>630Y z*19I~G=>cLByt#fC*F`J#I0V)yv@{jn~5X1BFAHT^1-?qzks+v7Z2YLVdU~;%H$+# z#@A?MWU~Z|X?xUwA1yIc;328e7f?*gAu>KnEF^t08F4x-xC}j=6=FMi5WukS5>_L5ajQf# zG}C4T#>!EHGDvlqCU4oexK`hJj`8Yno_>+H=;8iMfZL328n3%9<|2t ztl70=RDoK$)F_XZuw_nVZDC$ zF~*3nP5LmqxB)i~Jt{mRKhPd2)GNZ2_P^fzbgv_UJzg~N$ULZIAoR@JkqOL9hSJ{p z6nBxn@aB=qnZ+oM=}S?Wt^0RGr2~3Gp43PyN6MZ$O-Ld0_kJ+=o(`U?MwbdWdTo9u zs`VA+pe}4pegB@Fjg|G|n&Eqbk2zgeU&LG2*q^Hpp@fyW&Ocv@6SDIj+nGO0BbeXo z=hVZZ7@6G8$UEz94y)U!bwNf3?olem->hU+ef!n!>n>`PEz`+P)OGpYgpkE^asI8y zNx{~Ik;~T479^+CE2p5AqoPPG1!;zWPieM#Mt=j>ZWFhb5C^Nhp^a|zfNz~_8DD- zXdlHeQ!~Wd?0Md))(v~c`pR5obhX6q9a`^h)Hb$Q%EN$>L@D8R=^^J?op&={QmXs*7O%3wK3addSBdSx4E^%Wm@n%qr#l zk)!DhRu0%CPM|`lNqM#NpI11Hsv4v6HRKk$*eBiDnRR56rJwaKxyr8j zCHT;oMi%waTbFXne!MXKx|HwZVi%#+5v$t}*ic0ggJ4Z@p?>9L6;CXM$NFSUheeiD zb!e01)S%2#MOtLPUgZ|FJh-H?$?=tN(bGud2ue=&BB!5)c-GW# z|BbLiZe#}ExLaa-mG`4RzV+Q!689E1(vS7xXe}t ziHNrMeK)bx3$@p7Qa4?{Mnl6+$&p0rO5)_NR;yOxf^@e!bb5y3fMV1_=I$I8I)%-d z*8ky&HVRo-Og0`K)yuVg6)2sRiVkpSaYQk7nhbq8AIA;7Xp?v-=J8Q768_ zs-8Z1Oy?-I@!d)-crIQ0arkdA;-mCW^HD3{xWY-zz+j{f)4ec?evLi1r!@cB&2?tX zb1`u=TNZmW?#{eioP-yNQY=%D=>1U0{@Mihm0MBq^y2<*>q{d6o1c=wn-Esx1QgYj zQiK{W(!#w!KtoQmJ4P|qG_@@eayBZVeOub3r=T^g5~Urlm8Vum-=_aQ8d7Zk%&a$E zOZQ8o``X*<;_gEtNJ)NdM91elF^}8M_!sGwR}I1RMyrhDZ@QVy;QXrG(#7P$Ba`yd zy%ikBs{@J!t^Ua1?4$4q-!ohcA%D3l8p9~fGi;ME>Nia6sy3Q7e}j6udFB!5+e_Xt zQ4Xaf5&8Cm^z&**TY$tzqO$l#@~4KRkI1%lNQgS zcrk+Hp3jy>i$7(s?mlDS7;+NKi7)B!?hA|-j_7XN*c6KqsxdgIlWeV zR($59DR^$=^iq!@$4cZryz;u_QT5PKe%$a~<85RYZ%9-#nav>%bE4FYlPp54K_IL# zd<`y=N3RcOUa%_RFy;iiZ|G>e^q+oBUE0*;8e+wrZ5y_DiZ(jaeU7OxB*5<3*l+;) z@!xe~KR*|2j1K^OmO%hO5>{<9Do;YikvnsG#=l<2dv7%tqr;+-#I>aq;~&R4YC6JE zTx>InNNU1L?bbDVmFrx-_LQ8%hxW&s!AZBB52jv%W=odC_Q!;Xs zp(K>A_SQ1j=W=<7>ZGEqwtGayAnqaoai{FQjOwKjVxwF(gsX4+>-*wK+R!bYNO(`ly6#2xcm09%i zOn47Fh5d1kY?a$B61KH6XyHl+*GUz5PjEu{b$|L}uf=o94{jHAn3tve>bs71ZPHQ* z)&m|l#R*yu8K6h&)vKwyQOBv8E!YJ1lM4#)4#Db_2B4>@}{}+#p6Qi|UBKRqWL?5^~u(3ua)9 z%6fTPSFc+2;kn=N%`&r1mzb#iF*+h=lKY9;uCOVyx;*85C1^FUOjppF55~s&{NQ(d zMs9S(&N0JL+iuehZ}|Q1X~*`Bo6LT=q`5JMEJF0^s<+4l1kb@b(K}Y#Y+AVwrD9CT zUJKb}>Pb$nVzyzeA61%;$Av2VL*6E`! zp$<(fQ+2x|E(T4$O0CLNBY#;TQ*mlspB#C|SDJ_&W?#w2@6L(Q_B{DEeUGc{qb|KezptJw zdg8g*Ioxe5SaNZ)(dXP)$)*n+wGZFMq8<1Q=%1{ovc>Tjdixd92;_Z9V9x-7T#F;p zZ8@z`P$IM|E#OQ~?}=iW)Q$Oe_0i_zcezcTo58+Ujr4j_Uyzjfh_b>j{mo$aPdjlR zZzt#$Ne#R&gypHFDSVDhHzUkr9I1XBs>li9S<`W=G@4S-taT=nRJ*JFX|y{O#5IfpI;(elq#Vl1Jb3N}I+SQ68_eWw7;XCj(($77w zMIjtCvJUXLDiVI(!mCf=&whw)W`=UwLg0|QKYG$KIc9v|)Kcc1wqC3@LXwGMJ&Xa@ z`P&+S2S^+>^3zK9-Lldk=X@NI7hAX8q-Q!jGp;UeCkW%abtj~UnVEUy<^&3d^)XMm zbhS65IZ$HrW4t{iB~Lwrbucnu-Au2_HHuaBXhx3oRA6W+^IIJNt<$dH>^;a~&DH&3BgWR$Kp2O&LyZ z^zlf+6r8hAPUJGJvKc9~@dni0r}GJb)NO?K~S zXF*V(%lt6K-f7zoAMR48gGhznYvAEL^VYaHaFw!;PKig9``H%~v;~~$_z0!%p6JR2h5v7Lj_R7?-d= zhQm%KHj2Plu2PH!q!8JoXMJH77S%)2%kfOgX6eR68?E&5M!-NN)JIbH9oa75r|cI3 zyY|yuyMdOpAWK1=PTE_md+`3=r=9dwbhCZqY9kVe=)0SqWX_tDvNwE25GqKoLsp8Wu( zmW`4U=Ya-28^3%>A9tnlB{J6;UA{@Z-4lQcY2w;0X9iCoRN`+ks*Le7u0pz!xZ~zn z&Z3%tkdcJJdn*e*pP^>g${jE*Hmtu%M(1bdWK74&|xeil9&zdGDnggD-t@MmE%hVJ{_wH_(tu8g#9 z9Vs(yFWz#3f7TJ!F~X8L!F>eWznVdrbkbyyJa%6>RuH%5(WwZ`S4?OJ32hv5&rF5kUzts*PC z;>9lAxaSHQiA8De9V~8-Kz9$9y7iS_Yxs@n^B9Qmns>>E78LEWsU#&OagXa6mcOjc zYZjLLpqM3#{jAxa|G}P{TVE)_u-;vND$pn`LraIxfN0!;j@%|&B}J_lS?G!8q((@= zr84;B(8br`+T5tkZB=@Qx!__}LBSjb`xG5j^&f;c!%H6Rabs{Hc zwfTHkD_-!str|@Dd_USn4GegmtW8yqx_ONMU4-oC=g6Ob@M9#^9fmi*mYT_@#+4{Z z+3dN#5wuEG#Bo4ibniEaT_#op@^TcSwiE)_-?o?bB=&sBajCCvB2kAq9wc$6G?Kk= zZ-gzw2%_Kcn-9#VQieuc<+*7$;od(1F`cTr%Rjf>y8veSDvM$e^PJRDZk^XVte$Pr|QHwwytGdm7PM)e$}2b+S;@~lPyOT741FQ zg!PFE=gDMHD-n`7vgDaKgd3CR+xsScbaleoYACQNH&G21e01Zv`+|~GcWttkQGmhm zb>_*<;kw1H<^+?rbFkb4k}z@lQG_A4AI8sm9kpWYharsvQ%w*D&BQXuC)-grMDSeC zzU}?_apw=7)HiQNxFCR!>!tKHI!j9Zp4{jAN2hs{xB2@oi$QYmiFo(T_qyvMCB4M8 zN*|COn)iLs zh!bGc`l(Ir8VDrW@2DX7X`p~ySa>AKuobKP%SSe}*j3W7Z~)o>JAFTt zI4we~S5kp;EK1s!7i4qOMRA2FyaD2+DlSV?Pe$0859UT8v_9&Ka}}lvI*jP|?qksB zvQ4rar(&YQ1U{~zQ(?3Yo@<>9a{E1^Xb4|}{I-xLOk2-SX?ImOxhCUwytHDA&F}&Kz`?>9Ux<7=;W$9~`^@;~AsqyBp_q>B} zsh0ueQIw^DVusv1JZ5RbXV49R43+|eF`Nwnw{&!Dns_yaG>rVZ$UPD%OoJ5LuE$2q zyuc#|L0XX7#U`+8tw8<_ds=^l@J;TPKTNae&_qu}(>-WE#F4X78&y1TvKq`SmrC7E zzJf<3q6Z^k(|ToQ!!D1b0G3E=vM*V-(#~jxJ#ttWDu^GsS*?ql37j}(D*qRPDrxrz z{cj1$nHv;WTO7@cTIcWx3rKBmHrj|Z=yZ!|4jw3S-#H%&$NCcS3{%@^K>on&(AZwE zhR-MQXnmy3G=}h%Xk=XjuVs^hvKdQ$kZS|!iCkv_&n?vy1)>7UA2(rJWhCj*N4C$~ zt;-Q6%9YLKod}yi2w3f`^8g5C;oH(CiT%k4{&y#QlIQ}hTiMQu`R~f79nX*TBi_qQ#F?x#I8@Cg`V8O^w?EO!7R0- zq*n!~aR>86W(&_Zy?bQnR{e~!m!aOmKYhxM&n41;xzOYs;~C~7gFMxY+k<)PBUl+a zoUfWF=ShZifEtGm2R8&h6supE$&u{7uB_K9BZG1W8;0kH)8-YB%m8roHY400Z?->Z* zirT<5i2PrmtjpH`gh%(Jkot~u3728rm%*DO%)e1QA(mhwki9~xJAJsln2f`QqZ|(^ ztgf(We7~>Rs9)nHt}Rk8*B&Domf3x}|F|pTLk?=a)ZtA~N2J{8s`$wMaQK#Db{ zi4Pb?|759oXM$3$+8a1L0d8vh?I7*?w`Sh)CpDBW<@t`m zwz_yD$#bi157Q-jh##usFM$slDcjOaj?BBRb$~L!E<0eUZU|&|H(m1F!S1R4Dm!Sl z>crC{YS`Q{U5Q=8RIS%`uAt>(fzQocvsP-q$I(e+0$Jd5Tw&uSFu^o{yr!5vlt0}yLpL>grvVpKPO#HnMB5cj-@J-7 ztuI~Z8m_`nKlmWvwkr6vM8c$%P$9?l4m?MuQr7J}Aj)KBnXHVLM z-TZd|4d-m341$o$Vm$D%_1%3S2seRg(k)W z=PvJz>|`m>%BvC@Gx{r!F8p*T{B8uUytzsxWWcEKA~G@oq#~UlKPoizlK*?vyg#!C z&p^Up9%{r7h^JHEhf8sSR{YyK`O1BE<3hqABX=5wac6l9k?6^lD_;9kn;)Jg<)_R6 z;VHtvE>X2?=m}7bb5x19RaRYbT&WP?P^5A%xL(9ycH1!bc^C0X3K+D$1M6=t@;Laj z`lPx47yljaj=t`X2XzZy?{QI=rV`07@7AFOCHK7Rf%wrL?5?7BJt^aK-}_M>M0b1# zD$U!n5gSXPSG7Y&f8Le6>(nVuXQlhH+F!As4kjFCn#>>aXrQlrK@M4Ds3!P&ZKM3s z$dEpxG~bN?g0-_U(N+tU@k6G198rXb9RRswHLTlN^2*(%ytUA*@S#3+c=z$02G1A!*{&E06AN($CN!o=n|8^Ykc^L^v6ZfRtGn1%zp! zQojs*gjBF~izvg-s94N%L)fZsgJo);k`z5snb)v8;^Te1Ip3b`9hJXPIsHUXTO?pP zw!iMiQP9jD-P}$cLe95pd!fVTS@c@VHi$Tt;3KM;Q@U%y&L0VtQbkG`sx3DqXe7OB z*5L{Ugl(8zdM@6Ejh9l<{aIPZGvM{k zGh?@PtAc+>m-OaWHEY>V0t~{p`D|OnHEwoLRUJZ#{e_6Ew`X7af=nQx>`mXzS@T1B zKeXU$crf?JK$RVp-I2XwoqCbP1!UJ#Uws z3Z*$A@@pVO9g!1uz{!faTW4YJSc%M>;0W7LL!i0{AF80YUl|DH|6_q-LJSWbZym&| zOL4~_d6~AUzE)vZgYLd5Cuo388fTgfSyTepV0YW`L*Se}IoTmb<=n{T?->Q+i$U+m z!Z@cv=tu(zbMPb`AnIg%Fw1Y!`fQ-kijy4zD_r9X3ExQs@O}zvb<~CnAR&Vj$=p!Z zHbv4AKltWI3i`n)qR|y%g2#@H>lSQ&I)I2sdhvVLxKiO5oh;Q9HO-0#+7CQO5%dF@ z<}5blAiF{Ko;-`B6_t(AjRFfjRA&4$#zzWz4x9#AbrF-^jny38-hs&qC;oDaz}}YG zL4x+&*PvejEM|*>D0;aq_f1`MhfwB$B%2dtbrn${WU9&I!vMpWW7-~Lj;i9WnukZj zmZXl#JF2!JC_ke2!_?{#Bp4opl6wy0HJd&wBaI<|tDw>?7BXku0|IzuOH1XLL0#DO zrx(s*u8=zPH&kx!Ux92sCNgB94ey$33!jRfBb{5Hs)xQ6f)}@o%9Tz}M}$h5fue7{pU8??cJLB_82+&vw!4NR`Ovhy)Nn$oc=zZRxIUUH$tu|@Ytmkwdvd^1g~ni9!-LnMH5 zR&A!(#AGqst9;}n?b#+e=!{RtYO(IB|3g|3Pq?-?0=zxbi*CN<^)0*bmywZz{-sP` zi&h(ppBy}&GhKXfD}BI46#b%dE|khB$=@&Q9x|ENI1T^}Mcww4ZH53?$Z^`Wp#ZLE zWFA!DvYtBvU7Ohv`@QCy7G8>>mW^)<9%p%~X_#k1!hgK49V*boSH@noGjy4PIDh+5 z%ZR24S>I;eHhBu9&_?F71bJgA+z-*n@U0wNXW;R-b(t^Jxmqs8Sl2>GayS9bjsD6(?^>L zR$=Qt5jXHAIy4*JB8sFi?TVS`V$mzFz4(fRQboxu+b4EH=!@Jfs{u3qfN4!(bxMQD zYKNH3Ui%jkbXM9od#rR`sAouJO>j-aWEo*HM@S!F#LGrSkSqb$ zg*ZM~+$G3RSI&J%KC%8{oIO|8v=SY@$f=a2&+X2Foi&vtYNHdj zX9k5~*OU|CEKT36=i^noz#Kq6ZI6{-qhVfP^){w_n=bB=>uaYyD($`F!1CqoXVTNn z+dUZXNNW-FK9-i$gKQ8UevLzy-LyXXwQFjI7EsPO5KtcU`Zu9Xl%LGAw%}gez(Br9ckr8YPbuiH`+;rs7FWE?azAyZ*E)&BS-t1H9RT zOLSvGYCb6aLYdW|LbYEebnXaFY>m(~L3a(qM>8irA%&2blbsc{bB1buUJj46^3apXs$U6L^&K{6=Vm%I@l1 zRk+GU>fvn`+R=W@;St><^883fVZ)QwknzpqLzmA!WjpkkmH2l-ztM@noWwk2}C|hEQ90&Os1mx6AdZv!4DS&^T}5X;c)BW7guh%Pvk~x zR2evFFgx39Lhcwub%Bm6J#IY+`JRVH%chXs+e7h5yHuFAl-Y(w)oj)T6EEioHS>4e z8E>yiTHO{$H?(%X9;A@mwbpwVHk5z(zNpom$%T}jf1LCS+)taS@_xs@-M&a{ZDDE8 zlft|wHQuo%{jG`i%pU5Az1Z_%a8oKy9Zmh}Rm-$6Bwh&vbM6__`)O1AxRi#1~8f`ajY8T4<5H`WlSN+)gGgUZ2;&EdM`kG zpLD>PRoU#^&%A~;JmT)Q@iEZ5PKNSyzf*=~dj6AtUI1-u-rw0;R8c5Fl(t)!@-`pC zde|=IXeI;M!I8)xm{PwE}XAC2l@x&n{l|V+75IR$nn~vv8Kq*{lUg z1gNzhlZC-IK@Bz$pqf6U7oppF#jqPbn2X$t8*^SFWyZ z|1f(N9<8zusOkcbvL)iy9HV`=gvFY9Dob!UWii;Zu-qL&zuH;dMXm)wyS(7m#1R zIqlUN?%S9jx5{jO-{WhpYy`pNv406OfSCF1RwZn7x2dk2z4)UDbH@%JarBf2oEWWq zD{!l5PM8I_b##vZrU?KvjO1n!$gJRH!@7!B+|U3gLv9?7YzHO&7 zUz$&Qy-rB`kw;q?xMDVjFtCi;2NJZj2;LIB!+W#d?t)pUH?Qhj z5iRuhHNVVr2V<7s-o4Pye*}ryyGH7vgD|!v+-{GgdB0yy+j9zyZf1-Pi)sZW+=4~Z)4G+t8 zrOQODEK3Kd?60p)1s~amL{S0{8h^B#QiIjJ*A>HJhU{5OcXxtP{neR;ny9i{s?@Az zSWs-s0EUQSadnu}d3{oJtimc}vsyw4q<8Z!?vzIi0%EIZ+-%ECj%=I0J*J@fO|Ghb zR|IbpSGs?ZES4<4IjhHTymPGll|2T75sn2g%o|hfXS#C%HX0d7MxEOxMk$riy1!R2 zy$aeelGmXR9z1V2XzfyP`{wI>^wnytZr4K8c_D_HUJ?S+jzg9XC)in8Uo3R(Ka)tp+);YV_2$P!y`ukAld3kKAW!ng7x7=RVx%}?B$L!N z^VGOuu%qe5`uPP~(|IxNzA~AhTir4~XIZq1*EeSZuhz@|;9eTXJW2#W=d{}l|#noIF%>fyQflnI5xRJ#)nm>iA!Yv)h|jInSl5N`u?L(N#V z@n{%t8tc?q9E0#3d0`i6iab&UkQWYpzdxSJ`>hB70G>Ap+q|flC1oHzqm{!dF|J(bz_!zvc^dc54`E}7~|u( z_Kwg^Nw3rre)eCLzXQ(&iJT9+^)TU)W{xr&NW&$s$3cE=y+Jh&R8?Mq7Q~KjI$_hr zj|b@zI@E6nf0ZDEg)HFmc}giG#G&ETi8_dgBYteyq5M(NDYc0AzrN4MN*RLrQJyPr z<<&0VQnU5V@PpjwO?{CVvL)$N$j5*FSC1oeilCH%R6U97O}28HKB%DH1TZ1VtTy$p zW6TeYmmZ``gyuUQ)RtRBp{saj`js{$59v2c1%7!KQldX3snf+&X_ejz78o>mw&Y*+ zX9>&uWdG|Ynv@Iz{8M^)^}X|AC#5Ku8c;m9 zL_OARN!4FW{kpTR215V}HTb^$FCABIp_gZU)%_KHe?wtK3@`zvjmyDhy zp&*Tc8b4M1)uRw!23dlvbC!teKR;;Gd2ka4k5tsJs-p~C_*7&ByI}oG=KpaU#Ghe~ zKf->+Y)4~a3esj?`!lzGnTt3U;}H*(_=nA3QfwXHbe^_c%KIydeK^A+!*?81n`l7% zi;qz7;sfoji5_{DUnL26^~Q{|Er5^@W}8@){{Yh$(HZ0z)3w_%WJiNAs(}*fwJIEO>6U< z|D_;10`3Q;l%D(5!+yZxX^yL!4$`Tz{3JqycC0QIrXB53u(_fHq_SQTi&cq%b8 z8}}bMTT_jvMUk|k;;byitk220IUZ-S|5d$ZHh?yePy4jLIaeUFwZNV&6O_ja1~fnq z#YLspd_il<_R|e)inwDre~ex8kLgA&9>Y4z3AJCqZr>H~+E>HRNkCqAY6c^{?C~1) zA52EDBKX+GB>dd!!wjEwB6iK1nnIj+sO{L+_8B+3X`Y)--#>1Bk0{2YXB?o(e>!Yz z#JJI0-v3*LhClA`R;|-~ogE$hf2RL)LqOY7REO=RN+240I4$kpIA5#(zkYTWca^yA ziDzd5dyY=G_tP37^UfiK4?-_!UQ2qcr(T!F#c(|q)D^NnmeOBi4pCHjD`+JdQ|qSH zV=Tf8ihXfzsL?OgGl*S@p2sM$-nVx-%|?E#7`5xu9qrJvl3Vmxy9=Kjhkyp7*71ey z-MMgqh;ie~tULUBNhXdDtI*$WO52(v!467PJlVKU$7e(wegOE;$$x{H*CUpe}^D4Sp1&XKixniL!8aEuE$ zkwlF4|7ZCR`bJq}95Tle9+Q0aJ^SvbXd3Z{LJl|$&<0)h)bGBxzgVA8A($9<#vO}J zh>3X^1M;Dpylm-Tp|PeaabEvPBmU50$`78qbg4VXv!4$PP|8-ERgA%LhlK6C^26+T zUavOW9F;NJfG)a6(*v811$Eab}?u(?}Op%R)S4V!T9d}9qPDARjB7WF! z)+hr^iU2?|oAu$r3}z3!X)vJ@)wFkz?%a65rB{X@k5BxnkZ-^Z#*z~t@Z>Ipp}0YK z|9>_nL60xvNrjk^hpR}sE_-bI*aYy_yX2`a4C_vpkdYT_$*ZMH0QecXeqp2WWJ)+D zie;-Y!1gx(f6{0f17bo@t0QVTx5vnHxJ|V&F}u{AGEfG2c3t8iWkRZlw}0xah>k(6 zv))@lIP%(pTN=`zGz{MLC~+vIx{&vl+N8Ac2G#A2gdMc_Lr#zNaRq;@8+={*K~$awm?^kGoroQG zw*Jb^hqo^*apM8WZ-mEFGgVj89#atuQX|xWAedr>2nCsEFy}sgdh);O(y5?&F3?`N zp~L7AyFblsmIY=*6ADlE=tNyNJix$xlmymQ)@$xCIqH=CJxMj9I@A!H$D&tZ)eSaB zyR4H4(J9o=t+wb%l_!zFB`kC%Lhs#+TiwOigI^}Lxf#!)>u3uQO2{AxA z6anc5X%qzk0qF*jQo3P)A>;;Bx}{rU=pIr*>BgZ$x^tMJneQB*=RMwM?-#!P=iYzZ z2gf+qea&^PbM;!kwU*$ZEe8~gqCoTcs)I6S@KLYPVOIwSaH5CS1T0S!j?l+1_O}86 zWQq{L@9SBnJ*%c}7GT`P6P5E*B#-vUGEIZWA|AJUVVYk~1`tp*g!bD_xz1TZHV_!* zBi(v8wQpWrTEC|+UC?{{Gg*QYlst;=-KH9vUiuXay0Ody1RN=(IVXbWlk;t_Hy=)6 zk|(Y=5_asLd8%;TIY;;Zp}sY-|6?1QOgutUznhCg|9Sz?*D+Q}tC_FkWTkG?a4_ge;6!` zbNDpefW({i@jb?>CMSv%ek-9k5xZ}NmojjERy47|gNYisQR~nvOuc3iFt_`9R)!M3c9DPS7ZWy(BEF3!Z zFB4lUlCex0FMz4a1z1|8^q-(Ll#S`lA2P_(!DdiKw1?Zy)SaOA4Y(bKR7cCqV)=g0 z!EomSqM2C~MRuZ$dt)Rczz7DsB@WVK3{zxw5C9*a*^Vn-8?JTQV7qniF=rKO4fB~& z7#%wPwBivcx0@n1^0lbBd@LWjSA-R#{K(R!`UK$MLPvDHoWtA$fZ! zlJ7HyL<2xCM1&1REBSfWaIwOp;$3c{2&8GQ8uWhU>QsgQ^X(>I6)bONYC=TeyX-mg z1(@lRI_+0bMSU*i$*-wiq(6g--<@w~k-QPQQ!NVYSdHUKF!LNi*Ou8}rhig<6Y_AlJ+{V^X>Y27d2VeipQy*w-sukN*F6$YRc0n-eA9w zmnZ~PZqTZgjp4uytU;t+@Wy&TQaH0GjfrBls^2Gr#=>ZqDYxmTz4#9^K{3CW!y+c>;X?C_*yCoMlK-_IKRcEKyLT*F|z|H%pp=|-uLiGN=yBA;Oo3wX} zMb4(}C!e$~EpQ>IO@Q1nu|oic#}8xixmrWQrkTcKLnE|T3k({q6jD=o5`ajD*+mx?V)c0lVK_6)q?@{5 z9^T{1&c)ot<4d&IC}tWPX_+A30tP_oT?adZipXj*vxNI{|oQtML@bx758& z!B<5Vla;ya<5kl81yWUo^=E{( zC36}@*?Uv&e)C{UFn1NE{O|_Y5Ih8-f~|icVz;)Q1|X!v#f0U?@B2b`DbBOvcO<^c z(~f+c0jAsP`qxSJdGr@-hwv6tl2P5YX5d$Fpn^Ed)kd1()8GkA{`}G3?c_*EN|^6_ z38M#HT8l@w?n}#MeL>L6uku>Yg`7Y%$pvy*XzC1JXji^ps?;lZ>a?2q+u}{L#Sa35 zX&Qq%*JtEX|8#=d)8t|br*(~SPvS4##>tzt*|O0s_&GXP)V>1+a1V6)-X;?@kEqc0 z9IKNN+Zff_*P}2Bdc-pt^p3||>6$#V1{+;0yPhM}YzfsX&WV(*Q{;lXE`Uie#8G$5 z@3@+0)dbv?H+L^@|H_rIw=bcoF;tf}!HKgy1h^sQr&p23md$?Zd6Ok<_pffQ zy>%^lmz62=LauAf9TZ*)go6g(gbBnTwd>S7_oWd9=-vm4g9tk%^{L- zu5(q0E53DX#rkSDlChHM}{3&Rqd2x_aE-cqWt%h6@nyeHAdvy=^YMk0iqP0+`e+hgQWp|E2=C z+BO{0c^!)v2QxcX!L*R+cAaUZ@#~YD>k}1{xFS2o<*i4X$0+W%Z#k+a6Zou5i29z) zR8GMk?XHvMYBYY^z!2te75=bQ)BJ^7rgN)CCO;$H@A|qn8MdZ{33!MYEz(3F{y{t!v-~~jhsR@J@@90m%^5v z`KmTRd)!#L#kba-`o?dZ6#su`d`5HW5(aZkJ0tnOHgT;_x_l-2HS~Q(l1M{B28a3C zRidDrVO!#|kC$JBrG89(+)k^+o%8AD%}0V}?HbSaB!5vB-DG~J{!upU&$Y+w+fIDW zJUqf>mRoyWj!u|G!f!@S2=|_{P(@S;gLGQcuh7qdDeugZKU7wl^=?+{SoPNx{I1T9 zne4ejh={0n-yb_3opfCgw|NEp`>t{Ev-vAzd#^D$@=115590)(K2Yml6rCo&1DiKa z^DB*}-&(RJHojI6q)&k?A8$2AvrmrsCHFWU?{*eNT|T`&J}e?&83q=t$hwQ$Ox4-{ zBG!-rp$3@Paf03fu)mH3n+EGGSV?a(Toae*$y=J^cPOug?ke@R{pq5}cs_GElsR9J z=OSM(jj;1;b3`G7bSQO2(bo9Vhr{7$jq&56`i9prjp$O#v2vNS)2+OLTuF`g$fbkR z8-&ym$7_=&2JY?jv7iC@cE!H1ixmri#wOJp95>YLzKV=NpQVA3BpIRNVJ=fpupIe- z8-Ca_o@qPNgg&gAhN$?9do}$M3r{B?o_4kpj#I?+Z0lC8FGQjC?d%pza{3=(PLqA7%3jO|Y zAqN-|#pS-;o=wv+=zCYDqCMeIMym1N(CkyIhF4xh>S@%ua8)70q^vgzo>w= z{bJV#YB@!5R`X31fswVK*BOreu@z!r%%ZTWXu9E45t6xb1>G>_3TekZ_N*uxbKO8` zn9O}cDQ|?-W}QHYf6o;6R*F}(C&ICF(U39~<69BHKO0>d92{I|5JGG~E7DprTTG$9 zPmrxCbvAoQK6vr&FMFhXm0Wv-nO3$5a#1|f1lE$i2ruaUxxEANEwAkm)%r6AcrU5+Ow-GCq;CLz7@@ew^i*T_C%XV= zz4g`}k40^>NOQ!rKgZXI*2K{cHjRbN_+mdCuWu zMcRN^yk>V4CGD^-G$}XjOTr=ZyVE#Qw1Mt&)uF4lT1JD{+VKk)jL8Uh%U1 zvVjojjyQ$&1Ao}(bvo&gk}IvmEXw zF^6ByX?X#Shhu`dZcR0R#V0(~efL_7aCtxI zq~4~8Yv-(XZ*@`)bXFMaM~q%AVb(UN+vc0gYS}5t;c{M`ki{k_Y9UVDiCS{&v-?+W zpyeiy_|}oRQ%|(Zzw&;V=vr^7-KuB9oNh4|I)vu^)Ms^jQCzYu_`P)rB+{Ymn|$*xjg_9vOxOx4lCyNsF+6FsZqLp&az{Y~=Aj56rnC z%J*CMq`AhhH(7M|ZS!%y9R>A~vYs8`wXvdXDqs)y? zMpg_Rj*VIy-;%h&kl}ta2G9y^H`Gd=g2_Qn;#VV0?UeUhL{g1UwPa(Njr7xJm5$G9 zn{fLJ+%urk5B$wsrVJ_yrgByD9K#d!w;d1XLZ`bw9`u>TFso^|)II$rmicAHfRwLm zisq#2W=8edBL^?c8C1FGnf!6$SX;tAQS-a-L;ZK-l?CZ%X1<_v*-XE=&Wkj9j%x9g zD$?%CIgzgf{$)iHe{lihOkLYEKht44jieB+WLB!%Y|H8_#)JY4MFqWrOHnZ_Mb%@8 zs)Tyc+68x%vp4k&BiGV4UL@dGj^Up=mvM@ePre_YCcj?_+Tw9pOhfAw0FO4Uw)MuD zJ&0|;Z#Eu4LiTH6^_LGyi_<1oOSKTKW*IM{&Cv;ioAE#M=W01e=);gRbKMJgzO8%^%d~vB>UK5j|2d z+wX64?MI`(`D>d<%JZT^Mq2Oo17<8Ii}<@#`oD0WuAQ6(PA> zTt?F5;=^#>y*K_=6&)4vQ5-5Y`6Usqf(~kTEbI;RPF8m@ZPhy*WBa1w9+o4$o_X!a ztp<$x6l7oia5-atp{UV$bXdyTnE%;{Yc_R@B6P}h`X1ao7~0fKv6QT%uu3YvkSC_a zgby0{f=(j~Il2?*8&4lJ)X2}jk;H2zVRw@mD#GyX-b0@o!%NUo;M5TL_d-9IYT6-t zl7usN=vtQ^BZ>oVL!R}BHq$&h0eD zF9VWZ-Q4zP2S5BPOnQ?i4fl0~BN=-lczRh4H>Dmp1Qe1sK#x;)Uk%>KA5Zrj&aq1= ze?QbBw(t6P=+tz=arjaVYB_91?<3SGD!8r5R$%EA46fxM(RckECA=@96gaDE`@_%P zw1j6v|HXqX>B5^u>=Si|gplJTfq7rT>1eRkZ){)6nsbYWy{XQt`Ucnv_&;kF?f17G z4P9$1Ln&C8rcEq<=Q|zCXsA?j^cd5XrwT`QcBz=(Lx0u}mDWQEmM5ta0ZsDP!|Eu0 zNW`K4e6wavKUWWCJ+cE_vG#qb8RzAtRTzeiU{wGZ_MQqaH=aF@WoKi z)GEXo(#PxL`&Awdt2}W+Rv&Xdqxuw0@Bd5_r(a#f;NLkqd0_)1+DoNwR`jVp0&yV;)4S!dckHcRGb=QO4sUtk(}4I;08LaqK=JqDeFO;DyUZckgDWv_)6n*CVAC^0F-WhZ@$K zMC0|`x@Jdx{27~w5i}8aUB}5(BFfAfHGP&%%$Gu0Ney;d?8#8GwrgBkN6rpYn3}ab z+5JN328M~Y^a?YL>Xc>*5M3t`g9epg2*$@AOjz{OvzRo3N{2*4yyO-S1dwMl)a&Q@ zL5~%`5BklRC2N6Uc!^dP9%a@Ad#>pj^)wb4+7-3)B@Z?*g3#Cgp``cJ-%xA!B_X04 zGDI!f(}#m*S40+b8_&k*gUJJm2B;vR%&Ezo*8WgeRJ@uL7sSOKV@#pI@JUSvx!Az^ z;X`&@_t{SC7Rb^YIUIjsDU3LSDxR?B2vyq+UO#l(Aa|$;wP`$+sL%B{Tlc>7UF|KMPIrQykv6u5a`yPd&b|ObW-*sz;(X#+|s%mOf6jbS8>|xo!tvgli7^6wi%p z>Yk*k7*_AD4ZyP}T$|Y#va~ZZtWU|gEn08{QNMYSBZ}gfCrQ*cCf49=J;$vOS-xCb z@1X!3N2gO|RpM;qxiwX#f1GX{@NC(XeWd_4+r9X_Y)zO5)}>?dUr||-0pW>dc;V!D!mm!W z&l8HY$QpCygEKc53fM=Q(jrF~r^nE1ROA=4k&U5TShB-Al-==9aSB2)(uUOzSW!Le zTo7?e*^!;lGZD{qNB55|(+!PTXIEM^KCg_V)lRr|enu$$X682Mi|Y$@v*W~M-4~yU z(=v2jhMFJG269BRhwkGCj=Rn=O2(3jQ{{;Pc@_Y}Kg@@6tn{j0=t|so$G;am($e)LQ%PI($Xh%%0eJV;v5{jUYmSF z6#)JiQ@bTZ-HIV+8EZV=?9hz0r#z?L0BYQnxXbEb+{SyZI3Y-(s4NQ4Kl&_AK<&mc z!SX``j~zHftgLAp*<18jG9d9QELjMe&Z;lcYb^ky_{@&Q9tQueUJ|Pn1{OY=#`B!#Q*8=7z@Q8! z3z8HY9^91#ew(kO8%n)!zAYL!Z5N9iNhbjMbBj0(b-iqH4<78EizD`zp2~T!yE4)M zgGW?5ZOYBnKew4X{dmN=Ws^&ZF`W{h@D)8r0DR_exi)atDI^^Y%+D-Vema7*wbqu0 zV~u_5a{-VLWUZQZ&W1j5d`YyHs^d3P?{+)PqOhUITpd9ESgpqR2w0w1DX4RMzO(OB zX$~T8anK3Kl&f-%dw@Sqa#l8D{86(&p-}zo4eXzsP;CP6)f_Ab4ChklNYa@SX#g{} zA=NUX4#In&o}Ek2mQYne=m!)d;zgcDW2wNd?BxkFYh9SaFrT7g00hGUi8b09}B$5NNmWUIDU=vn!jCFQvv zZaba4Hu;%g7x6m~=@FG#n5*sk6Y|}yNbg-29OR*Dr3)>OdgO5aPX-6+$tRT3&iFlyiZw&!9#6Svn7%C&HrTyX#h4;kWgcx)6c$(ZE#9vr?*J|v&61@x zsBRykq-hCN{bAcXm1MFpR+KNfSOvHy@4&68n#|B@YotWav^$Q02D;afqgEpqjh&MsRDSo$3LPd|e$$?G_T zZG+@0P$@7AM#V&WU<#jG#ixXdA4()s9F>D9(IXR$=&`jd>8gfGv+^a3gN1CPb>pc5 zNal2QiFu%>6@z5ttFi zCC^;pLO)=O;~4xVUmThO97OjjG5b>|fw5t){Zj!j^zH`8Ddh;dt_>3+&Jt|u+35^O z^Z2_J)A7LgVFtx`HnRsTo+mynjW%_M@j+DkA4R?LJ^OcaeR@Zct*fFw+I{qnC2@dj zfG|OJn?CKJ%L)ikK_HVDxx(>9dsxb|7HAN`EwP_pRlG2#_Z<>oTw#u056`B?p<7qLdNP{)M>6k9zlKCHMP*>Np}+&A#>C z0GfJ68m|+>$|L7VCjnBHeoBKR&;89wZfJ|KpzF3o<=8C4If@lqcw|pCGOQ+VZ#wMc z?#dprc*B#Qvv2G-V{YxzwajD~Sd;@d;^U}w-#4&Fk7m(>v@-t$@X0fO_*OJ|KI0+t zn*oD{V?NKB8Ix|Mw7g@@rf?`pLw4kq8MTB&4eJ}2@9S6@S8?i23U_MS9XlE=Z1gFX z8?Y0Iri@l%9SAbH2ldowg?CFmR3MYdpX&D{3t^jRs|rxvC+92mm@EF5il0D}|4-@cB87U$zXXFToo|Fkh!WJbB8b zTiqrjox{Mg#F0o>W~LbER*>K~vwArkW+V+=7Qdl(8I}6uiZLRum%EmA6~6xZ&ejB6 zMM&)ItEiv_pUCWCiM>7?cgWh`hA%gTMZ?er444GDh7u-#b>BBfHFtM$;<)MOaM>QlW;&`AFk>Wq0|>I`{Lqc0 z;;hP>#Edi3 z$>D*y&kBZD#I+2oYo3C#fNB3;`{E2D{RzMrg}Y2?k)eZ(iTq(V@(fm0SzM$a2n9H` zjKnp*4cY0;PUE<@6{ZM4ojW>%hMYoR@(yzgEtKukqUa z*F6+Z^gbfbjOY_d4{i3BxJLcsEPN_-P`!||e_+rRYZ8B$Sx!o}Wq*Ef6qAFsKcW%= zQM=Qy8i?um6y6)h@*ZZSAg+}}s{pd}ULXVIvA1)~ZL)e*(EhRLYDNvIIEOasD1n>E z1#IWgx-K#*=TAswmEQ-=gV@aXhUED!C28o7Rj#lu^mb~#Ej%jrY<622tV&p)tYut3 z48Q+#)l4L#AXz_mz;B>IFSeN2TvgJ108&+Qboe`WZlNHWrD+U3+w+n{?-2qvY2~uf zmnONp!${K+Yf#9&J6jADag?n0hOd{hbg4BV6kYQ|xhx$_5}ekC{PjEd&!G(H2(5nn z2hp_M_tB!l_UA;bsef>FYRN}bJ{jHydUM+*Yer0>2M}s zt7$W^ivvXcu3aGE%M)*VUhDdI=;Z?3X32mQ5c(qLd!`jdb?Q9ax?Y|ywP5hw69cb$hSh&OO*eAX z3e+gFw}Xm*M858Y_9&h<_1Ho=nJ*i4UVJt4na_`Ze)3W4#&F| zN1qMf#U)D1LY}xJ4`Nh7+L7jaW5GS+&cXh)fXT+ENH0dPetk^l`!e<$04CDc6ZcE?YE|dUp)P)$G%`omPp2D}0 zT1AMCH5QGtl=TTXPg&>F^MW{#inGIgLrTr~_LB7$!ogeD0p)~#MYA;^<$1E>?w%6- z_7=$8<-78n)6gk#<1%iu)2DmTy=aNUU1IEtCq50G_9S7&-z8LFc?QUcsx<(`ak3le zqLC0f2D$y{e`fw}(x&EK@|%gj&i-SvP_ z=R0wCBxtk171>eKk%Wy0vvMXN)M+{bwM?!!UX%Q~-HzGfx%Kmz?GjGzgH%ot#N9uK z=cktdC6n)@a!%Yg+*V~|R()!q&2K|@h({C!3Sgaz;-^oFSoI!*s>HUc_zM2{8jUvf z>JxlM!t;X_tkvekeot`W_m1ERb(fa=UH&I!{%!Mz!3|u3Bm6JCg)*&#p(}15@1KjV*Z{;F7W)VP9H;GHy)54ez)wuR$-Hm?x{vCu zH>AnTkmSINU=)9tz%4|HZwpP@11P$5Oz5B7h`y2 z!EY!si4$Ik;o1HIZlVAA>gfd?Pgn1>VTm=WG)H}*wwMNQx>*~>O?$x=v~Peiy}>+z z|KLJ(8v}l$#Q5oje3R-6E)aV0S?oe`Kh_0|^UrUjk0(|5B%&hH#E4)LoYEg-k4EI8|Kx5Tn1C`5#Z2?H}ypp=%%X#coD*@KI|7>VXOo0VL50=chO^br(hpcg_rDtE^pd9B7i%6NgSju zhS*_6`N={d`Q79TD@mDFCv9RF(3t@dL0?t1y}qLzzXUErIrizVi+5^qU=|T`~_@r z35OaeoC!LgREi6)$R!4>RdFUlb)gW6fghPe&RJjdrk7K00zr+G;Ksk;3ffexpz?0x zUrcYTl-UxVn=kyUTlek(au>Pw>s;^?mwc{b%?zz({6&DjJFl=RNyc#T;f18o#~R$V zyXjsRUJ-}=k+|DS7uswbI>1?PpLa`K#Crxn)Hgeexu0D0BA2rPv4n}|+%CL; z(qsTd2E`DVuJcOtx9PpONRU7@2sMub`3~#^m5DbfQyDqo?mAOYI$5aSpN^4|8jW%qg$ zrlJR4=be603$McwjuHo{C+1WDlJ68*>^yQ22>8ycU?2s#c1yVz^7!UW*BZ_`u);vO zeoS=n6+i-T+-C@mL0g3uHBc3B)`1kg@mq{p_nKP2(^_Td7v}9sF%^dH@OQs@cwLUs zTkN3uuv8&#`hH7qSOk3!cOd)*!6qX>hlNAWR$)ba>TF&E6Vr7Igdh5M4M78$W}Ph@ zq#iV+f~nbA)L8|3>+6jDv|gXRC~C!~{=2~52y{%|@bqJ_Uw5%%VY~|SfqIGNBI)q? zjkTrPUdb2Q{5vV$*?8DQ26NSa~kt z-ed@sy6-uUBFf1o}-3$V$O8+Jw?&i;~X%e+Izm;MAVPQl;7Y9B-KMH6@!|EL+Kz*~Q^v z1>nW(p_J?kVY@#Wv8@yqMFq@Y-A@FV|MZ*E1k`MK*s$&Jx$~Vac_(MBHoKZI1|<`z z(D=uHQzbYMY)7QU>Jan|we>*`oTjF!3hGm$)Zp|&w)(hGgcFgIXDRA{ca zofq=|{kJeEwtg&qn*|g^%gcG=1onxMF_^l#Hw=noB#>J8bcQ<(d~;eLejG)U!*q<^ z;Hmnr9@z-b$9kJEJ<-+r)jtEV)4aXVLxed%aKWH@YwA?q3e$N4|IDx>$n10Seq=mGGC=!ZvpG$ShN1xKvRZU3cGlihb$#CAu` zXcESOMv8v->q|QroS*dp8IV2&yRASet|A-t{H7T3GSJ}57rWyP zL>6?MeGv`>g^a}=^WHj?nomYHmYwBze=E|Bz+9?cgDO$fA)JWSwFZ<^^#OX~0aD0m z7C^J2TeZu1idF7%4GQE*shiuNplDeaW$P5M)P+2mMwpFlae?5*1SOJ^@ghHq<7U_D zzO$wP`m|p0uPeYBCk(8%N`l$LFv`m)G5TmPjR+pukI(yX#ljylv*U8&9N?+S0 zY2$F;Uq`*)T7H1Tc(n}yn)D$$-nuAfk7d2;ONZo}KayRpwW)2j_$T|}g~33J7DNMl zZEd-%C3QLbum;_1z{4NWL&&&3_o|4+H>MlXz zU5%3OG+u&`H$G$otRW&_$EEm5(VH^3H?%gAP9mA1H7@%nV;G=_n&Ck;*WmnmS#NlT zbf|2k^x8PgN9Q3x0LHTFAH>gh#i{(;@3_AfrOK_2NAoV9yiknBb1xa+(>WE|GxmNQ zZI|nOthZ<_U{tYf6Rg&p0i}N5_lnvhwFJ)W8f(2VA3)O(2{zonNCv&jkbLzGb5k0d z>KW6OF?cK}sJ}9w_NtqFzhSaOicJ{|&QWJ`4DXQ=*Y9Oqg0Hsb967w6&l8_p9aai2 z@(j+$w7e`l;`ruCbr-%m+;(K6JD-Pnx4Lv!uv-JfQ;7sY1ea}YW*)sv6d+pmi@nyS z#33EfpoKG^OaG$^d{p_5j~d|HA}N1QKishFG;&LKHChB@71p$Uy!Jb`g-}4i=oB@^ zk!4oYV`#TEl9m8Elc*L;R(KdXnqDHCdV0N9h_TIIWv;bq3DRA&c55GjlN5Dp?i&S5 z#8Nn@d&tyZ=POV#oCeM$)@b}%!a{jkf~X9W!A>?76N$zj5uQIabL-GM(H$97cowW7 z0E0019eGfoU{DD0cbq8!77i1`%jpe}Y&a76E=EobU(I-X)Xs~Yg6UwSu+ za=Ye|;4127zc{@_a>Naxo0&q2P*8WrX?Q!O+#b87%7bQ^(qso<-B{2PBjbBSu<(t- zh9{KfX|ciTP?4c^z?n+X=9PgQLltCSvS_Wzc9_p%-XBauWnQ^kzHg%Q*oQ@UTp2^> z$u$Q_n~JJz`8bxBUx*@;z=oDN2GqvfhYugPZ8ljJJwbEq0`M^Iz}`Sxdo~8^+& ztSvzp<)1DvZhK_kosf~L@$3$*hVLB{ub^;Ulr*I;H|{Rqlkg$0#QD54F8q$0ZHUwt z+8DbC*ikFzD>~cwA`p-=36rwma%T-_ z#ZQVcyAc}I5zQXtra+~;zacPCFCCDxL}M|ku-UMt@9=A&}y7g8)AY&by13}zMS`V z`EDwqTS?p1ynb@s%=q6ZkbNj?==z4+*X~i6kdJEPC%MTsNGUtc|N0`4*)=biM@}Cr z`cipONhyY`-l0Ewl^k*9?|D}+<8x?kJH`R4L^e!T_O0#XHHY+;>IkKgQ`c5%<@6Wr z#Ymk>%NVe{FahQrkaTktJI)kTvE1PsL#q+OF4o7d-P~&G4u{rEFIO?Mhn|4$Y-NU! zl!w?3VhGZ#J%4oY5eJtByVs82XXWBQWN3I%gFaoon@h?5NQ?-d)4gs*Q$!< zKiC=-4YsKyWor}r3lmtLom|TVYcLnB#Lo_V;j%93<8Q9bV*R`b`ra6@gGoRG6QX`6 z8G(LY#G;&;qnhvPldp}6qZD#{vKM7BPUK%JwF37~Y|uqcTv~qf7U=cw|7aikM8L)Z zINw~MHMl}-U2sFL(44A4!YNsoK()GpniZbsbbvDq`Fn(al>fO*7v}i)2#>rWsm|aC zogd;E2ec1L`0Ld^i_9MLV-2HXP`pCsTyR}J#GJVByK{^Pf#q@qxoaNs_pslLw#moN zq%6(}lQ)EXOMsjKH3}Ly41k=hf5-`AbrDZpFRu$7l7CON5Hhw`8~lE)czoXcl5gQk zSi7416_P&dzvR3U{{)}H=P@ppoc97cGI)t;rTGYSzt_kogB!))juk=h+FoAchmaS0 zTRhdZS8YzcR@^!N0o-UN43Lw^AMDlATH*Ga(NY>fl4F%PC4D{G7bx+7C)bVe=gm21 z*JG6;iabAOVp^XS9*MVjl0Ag4#=bi8q6A9ZXg&L`&|f7^1a0}~EpScw^ifX&^InG> zph#@`GlO@x!m>fSca#3|?r*vNRF0CXZG)QJl>1+RCX0U67up3rcR4;?BKA>TVgmW; z_N%eS&kUo2+Ryz3zostT#zV_B7(EQFv%?{7A+r}Aj+U4_pHtg6opZCoN?`>5( z;?i}!!H1DBP@qhmbpESHc$G-`ab$?FdgSr#Xz35k5+UZ~)!t4CBG?;Iu|x7c{J|Nr z+U0$FdDymkKF9wV-dlji1UG7a2=oXO{xIi<(g72Y$xF{%;sc6(MNrmlD2_hc}kzPc!{pi}PW_6UyfJBAp}lL5`V@g05Y*!5&Yd zfZB3&>{VdeUivotyhp2O)NVTgsFeGkR3g4?|C=Q|l_m~QEpElO2PnfH6;QQ`xW&Hj z+hiO2O!Rl+V5|l8s*v8Tub1H1Llx1rSDX8Dt$1zT;=j4$yLBY&GJefBNd^tT4HC5* zf33{>bWY2w5l^sIMuw6(bU+nYng8YvA>Cb7z-Ia5^B=zB6wcVTD-rlU_ztWIxRJl4 zf>W|GX`$hOaaJdhzu8NfUJD>SgAP%9V}!um(WH|GafQKtd@3`Ll4&@ik2s^b0bj+C z9-I6#pJ(uB1!ETmI}0sIky^$+$iB)RNH$MlkLRgPULOm`t*{LJFV~@;fmv&bn5hO- zQu$9Ru>fAuc=Sx1`#!BA&)`g=##Lo3=epD1*WJ9~H^c7hmLmu_SLC--%{|V`_I%(_ zU90EwT7KZY9c~}~Z|?A(WYD-m>L?%Zx)93^&f^~%lu4=kUKozte5^!^;Fyhgs!E-e z3;fgn<_^c|`CPzenpN$C-q&=SXCgAlud{jU0m&wHBnU`c70Ke%ZO&|$jb*~`{SuKp z{1giRw)AABg63+Qn)nMYQX^mi?wU0QMR=g(9-oTewnt#N(49Bi)f!(6VCI1BpfM5; z?!!LFX7)1AgR6B+*yEW}P1Df7M=M=VL~wLZ&RZmyzS#~?Df>UEqyc!T=*34*V+M(2 z({Ig!MOb$L5LcF=dV&zQS?@aPjRV#t$ysxxe~UA34i0t4wp}gQm*jh4WYK?f$E8~N z!t0#N@wel2vD~mxIp~qaDU$lRsrc@zbcsBJpY(P)BC;x%?f-*2R&3iRly!lUGDRuI^YtxF_G`l#{f@R&VLS2 zN%23aRCE*Y5-4)Dz4jLAejVVTfAblOXUME3q{adw%MMEDO_+!&i z)0hK!^?XA>^3*iiGS?~OiS*|+~VBQQhXnpinTCbO!@>t^ovr6>4 zOlurB-_o@{_^(+2^a4}1WsnBU_vBJ@gxO-Hxlu zV~h+Xbov|CKuT~_;AC%*a3I&g!d={P4m%pa z6M7vxR39u!8MkRf`r~_@X}M3gw;(hOSVU`3(O}y9&pImr(%YRMgwBIY+HJdYNEpcH zo(_vN*zV}Yi9t)@&<>_U&}(Z#Rv0Q02C`r^PFu9VO@Yocq3?cT#!9!6@ULfApI>e3 zzk|;&$|8~M#?R}>`odd;;K$p+GGGzRBqdwV?$S!jT%!V-kShHJ&0lso=Dh*L`@`E{ zLqRBf(cqTUq7yJk*y2dcCAhiFc7gAu!#v5T<&1l@8tqr~fCAEc^isTC&0c~LM53~W z2kjX;Wu{h;Ogp@cJHjp_ze;eMSwIdo8EX2^d|SBW0D#udDT;;J3_>191)D2dJlxxB9;mTW z83h2tW4DSlb?&Xc!oU+$W8-{o_HhR&Ty!A;POcFE=o?P0lb{A*_CS_u?tnd7Yw2t2 z?`mp`(@&O@HKPsw1XthP!n!s2o_3U%<-92?9v`4fFF{rpmiq*NO8n1{^b#T1a40Mr zTQ~mpR||f)4TOI1J9)D`aMT;`1$C_fGc`>20>cVu7V$?1L{)0osES^qFyclHP)jv0 zzIaB!=Dihg6p+zP+g%y%{V!wNK!2(LhmhM|4y)!<>CFNy>D>ikwRgn+gi!0KM1zuu z7dl$C*!+xDhlm6H&ZHFAzGU!bZ{3xLW`hF*#_0TC5yHbeIH=q~#O{Ti2s*1Dr+VWGeBP-pt7HQ4VQOk1!A8hZeqtab{$Hg_A`ck_8Y15xya)0_bB*WS z6Cgh?Cwka%WbOm`km~lG1@ba?bsn9G3*={E(a;W1!tRfqh4QsFM5aBm_$g44Pq(Z2 zR{l*DN!6?lV(wy0S*b~<>B?kP0{l7pBSg*!06U^9%s}S3@WI)?Dn)zvgzr(B?z#FK z)JWF^XZeMyX?~MxhOx_P+t3ZZ;e>23TH(1=5aqoMZsbVY-MOp@_)^@ssJlxHKv4`d z01S%5!jMj0dx=qN)0f6F;Czr{w?h)Kt7>}cVFJsy`kJg`|8Mh%4nPsX7h;5s&rU|s zegGmO0Nvxtb=o>jS0^db!B7i2E{8!4>L@rkvd96+QJ4U!9S!&qo5 z8E0`e6uL2nnh$Sj#jCNHa8H1W!2D($g482b9r*e{ z=fb|YXJo53UpR$zwHAPDurqm@-Wml3BnZa0YGKkvLx`P}L%%Nbbv)nb*Ga7o2)11jw|Xs~+i4H9CPRO21> zyhWUnqORYk1qrZA(F}SZzR_FAEV&Atp<5rbv^1{$bp>n56}ET*Hpb=MjI$zmL5JhR z@&E~Sz{L5oc6xc>ud1~7lJH$xgQWL%R?OJXt4K>m@Rs-43gxpd!Q*A2$s4NQ|N3F7 znvX%fgNs8UMmeja%>@j6Imup#b?9MAnz;-LUIPWvHx2;|k|#0H>Cd+W>&TuYx3Ad& z_)z63FU+K7Z1SF~I&Sl{E7B=W)GU5d9dxnJ*Q{Ab30v2yV6&?aDyu9Jng9?p_kmUeS=A9hk0eTb`*)Nfm z%5%xuiz`QjzoWn`OOt9|FrSbtMC%6V-ZZz81+Tpkm|+QK-aJe8s0p>4I+2blqkK-A zf%PIy*6&K+xk*AR-7GqS{08)RcXc1k_VO+-K(E<=T;5a=UcJymiIC*tx1F_sb6t!i zdlmlW(u&{3Gq6%3-Gtft6SS;!Rum_UKBN5%T-tjj5LI39SJz`A(fIN%U;7^s_HppJ za;2K99sG^>0nx1lhYi|ajBu|eF0%N|^!P?;86Nz*vSXTIZnFJD@_HM{Wm$^Pr`@2C z^F0dgI%U)0mqW(1R&C(Dvvgb?(X)mhIwLI$#t^_v(iv*#yG#-cWavy68x8gJ=m}@p zMY;`HPXH`b8j8X*JS0h*;`CM3xXJ$m@@nwZD=yJqR;;WaOe5ItH_j-XB15}#R9a;ly z1G6g5JToLU@De;vE_0a?E_-|V@n4V5g?h3J%z)|9e+1eT?o?=3GarSZbG1=YD??eK zkUSgAW+~Y7_CD5+s&=Dtbs$ZT1UWKGk2F~=zh$_jD9R__BU+_9Qu4z$IhIn;zU^PN zskxU6s~^WI*4pWUb-oS{Rqyj3Q>*nA;*KNC+PxmMMdi+II1FKz>zd4evQw_?+A$N| z?t_-PY*DZ6$ut7X|9GRkd}J7;5T&w>QkH^ZaKLoB@FZcE_oFUCRQMq0Pii*=jhfHh zyo)b|5sl0~5isGr$DWX}_d40M3^TFl1+*ZD6Ps_gtBAko#~6JQP~87lT_x7)3CK#w zFKti&U(nfOb|neq19n6L-S2*q_SOxS4t0+NE-!;hg4r(?iG~)Iw$pY%$VmEbhgsk< z&k@g`83PU99o<5@1C?h@eW_M6@AX;)aA4P zaV>ftZYCYK5&Z93QdReLoc^oI_0Ltx#q98X-T)$~`dSqM$=46YKgA`cRg~l#LFWO* zfN1OE_}~3KkmLs;CGo8ks0!(3nYTZFE6@XBI-@UWbAg;I!y%fK;4h`?M7$%=11s|) zoC)AF|4cR83ivzF4tdLu)dNj-I)SlJ3 zzYCDwkdqk(Cljy^jjZsq^V_BaGUd{rT8p{=9u7v0^;#qe`D4Jo;J9%-lHTZ*LB{dn z79G-L0ASWC|KrMaP9=D|Ze<%cUq5cL0MlR|J)BTq2Ff7l57KJBNG&aO& zZ%{R$Sjv`b@W{qG>Y4h!@rFx0+!MJ9v3(CCacA;XY@>r@Mg8#SOsLF zq40yieTC*{^2T&f@$MOK?EQza_TE2| zm9@Syzd7fcoQksyyF~`&OiKWMK8r=YQRwJA=8p?+_XVtK3zR2;8mT3i0m{kfzKcwE za9A9?(5tm)2`t}wZv%~L+_c)t)^kdq<_{5;PlXae7Xg#pyQ|}25aN#ENSg=)Z%?!Y zs#%7BQFLq*8NC>b{p_#-qW%a`9=`nHRj;`cO048zg*z!L!yQ%cIgA6_z3zB^KuIsU66hzd?-DuJ6f->kc=61|-5t z4p65G@UC-efi8FRK|8pd86xA?a})tu!_M8eiUQYusP9t&#do|D_P!3T3CdN@cwKJ* z9r4T?I-I`!8dPaF%oxr^0o;?4QG{K?%T0m*N^nat`1j^w7I*T&UwtcvD06@S4%iJw z6MbuXOV^lM<97Vsq@>52c@MEJAd&i z(&s_$j0QOT1xZPI2VgheJo&3-yTr<89^i)rUT?dlp0lq72J_Riht{TAxd6K`3{b&B z0RgVXhm=M1Yy%gfYpql37)_l?rpt1BQ+Tp=uj8sqauy7S$5 z@HgIT5UKj@I5{cT*Vijvyqx+qDNO5~@&AIkQLFv9XJD$8 zG0#AI8k9Jn6QELResjt^S5c{AL=u=61i0B7{-!Emds3AXjX&|)fS-(`dCsVt%H2FdVjk zIW@Ity}Ej>;DuC)xqns$-Yj&u25%PHKRPvW;&KjBYe^s4I)h2k$NEn*Jpe86fAS%J zeCAY8rmp^)tbhNYpbTxj#+K_&b5HM{;=!XW_>&N4Z9wI4dHOXGphO@H&oN!C#t&oD z0kpPym6Fg#E$a4Xm`w!TCt1)rmP1GBbIv^*T{$u9F10r6(gaaoO4!%->ps;)emk5% zH1?84zr6}*XYuy{#C0~gNQWVi=Z3o8WZwWc|Ha)D1#k~k@b>_J{<{{OZNP6$u0~df zCN4hubgL)TuBXn>-x z;QjlX3;-X6o+d7TX#kGz?8Z~aNO79+>nG~6UqSvi5FsF9Govc4-#4`~Us ze|)m}a{N@*0LAbMzY2b^KD;TAgL2$`h#dw$o*KWyds@5j%mESZn@&1H!rcdniS_oDr( z_+G^X?C`Bg15m8Dd`_|i6A6IAE*O9h_Qz5U{MW3IBt#PU-04RKbk14DrdhOyfikxH zBN&(6dC+wH>*zqj{^#Z8yI*75K_E;#+9y-b40k{Kk>KPrw?Fo(_HuLg?~U|2S^xde zK?MFibpIZ@|C2+v1N`Igwa;s3S^(~I0=CF;zz&H5FXc*m?9Rx<2s#GGk%_<>V}1C&E6{rNnm22rh3aq9@Q69I6b*imF- zVB81ul0gp7JL%XUfm`xpzQSl zFJR+GC_1qsDu(Sb3=YGA^lZ69ch?PRK`L>wJ-g0%4Ac=|pcl*+nzjF2@9wQ@)qfCY zs0qtx!f($>DcYzVP#rNQR$S{|IKJeR&2cgqes^gn2Hq>~k>|3szNGxt;Y!fe*Psl# zf$l)z%jme+!iY-%MpMc!oWsLvschz~*S+Wzo^qO(^IS!i^T@frIQ>25?4T~Yee@3l z;}(9yx8U)ju(xj0q+co=0d*WhN!6Mdgq$n9Fct z7(G|3cQ@Byle>NqDgE@0)Lg;Sf9N7y{|jBjkW&x&noChE?&A!}jj9J*K)OAV*P~F^ zZ*A&(+^mG9sy7IP=wVty=m|ei;QzT96Q;jeL~n+{YifSQ!+NZ6Y!Kp;%aR~s2)P{8 z3kbeWhq2X(&h~(6l(ngNz}sNrJARBs4(voY_d+@7Kgu&?HD7>QmV#Bj!UDh!7A76) z7x5dJ-jNDG!;FE~g;B@87cFHuC+cm1XD4u3F+{!J+atrsMWGhz9;$;Qj~}0{_2o_< zZ4qCpq;~N5feaY(;T_o~!eEm5?Fe^lQAfpe*wj8~9d7$ak_}9EHI6-NVTBC;6O0AVvmfX&%)m&4GPMp z%1&FCGlC#FJtFUQtenWK&Ler)j_zf;rQOaKmgUmO^Sp`;?oDM6ZMuV+r7n53(f^|+ z5&ygC0ddX;p6!whJRO??I>C3I91oUiJ}r=SZ^ys;Ai`rWF}Ix(7=E%TL>nyT=`>pMc5NOPl{)3I|%TkL>#Woi{fbBr_YHR+Niac9?t6p z7cChz9gS9lj-T(+0X(S^_R4X4P!Z@{t6DPB(O%kWn5_bJPhS7%74Y;llgyt-0^F_6 zKkwEt0927@7S0rdyH$K?f%ST=F|r&ATLhF-o5{AYkoI7aalv_%nfc&^lI`*9)@X-#61gYJEGOQ*&9^aKsi&<>ox&PhQxq4r4d1FNZR{{sG9TSoifRdXJr87M{J~3oD`|qx>yLAt zg6Q~kJ|wszclS1RjF%r|=h`NT|D^3hYWBJ5#fnctS-p`AY?KJQ40kg?KIlM6lawvj z`KOpp>rAaA)gM6vG^Q{|w(AH*--}QM_X?15-c&cOGo^!+bC)>>Q;JsGdw70=AuQ)w zP_>R8I~?l=f9WcZIwHF55}2UX&82f~DZA6n-nC9##}km5&vos1Om&0!`gR!9xM#EI zgP31l0@vWr6e9-+>VF2-)8G~*$;DX!;NQZ(L+2tuSu+OKCaSqk!mpJtmealg-fK$Z zMZjF2dRZUxrDSO-cMY#Yz$>;AJuSjy;-FLxymm(0h0eguiu_Bty+3$M=JI;@H$KJ0 z2dleug$m$-o%3d8(L=H zQ}p?Ytm|>d=bx^o7FvSZ2#zjL^NcXunu@J8T4<=cQ`{1GXjHtaFY1MX>#mVI~k zYTrSDc>dN7f864PqEkr;kIQq`m+KA8h2b1 zv0Hbhhe|f7^BZYQ*+dm%#DG9^r%gGvB!G&CUd-&0KV|RjlEtGDZ8=VtN0Aw_fnN$R zQJvcfg=3Rq7_uuPBXyYhsQ8Mlj%%wO@{s1GBia`=EsL@XAAP~RO8G1AZlwhnV`Hok8|=^==bAGX*_)L?s#SWTJR}6 zn9W)^yj1!23l%czL0@|TiowA{V<@gVDu33Bfe3YMB%Le2t4NjrX5R58l8eY`){ooX z{7kJ%oM6V}6M~@fS8@si#LHd6O0#854`wqK>}}L&>n$tyg}>;BC4N|)Y?=I6u+!|i zRHeyY0EAfzjR6dZzP>7T-SMpwo3FYxJlc#dSgV;z+-kLpGpHmfDytMpxiiZq$CXf$<8%XCHJZ}x! zRDFzIxox90bD#=`%iFiiejE_Dz$l*ZWLguWtYiI@|8lQaNNDub)#=ZtYr7x4KIL&& zi&U~M{m61-Z(U8x>QbyyhLOZu5fqJM+o3%2+U~wHWR5yoq#_km;v!i4o{werm%H7x znS~A-iXg&1y)T%{A!a9gsmo?OpLfbW&TwP%N|sEMw&Wm`P>@}o5g?SniqgBtA0IK6 zdjdloA4$(ae}an=O7Nt;%gZ6Y12xdw+GG*MZIYe$cu{eG%56PqU9<@9?Z|-1DzSqW zYT9HwCXVYb4hC0?OXBQiY(_4iSFEdoULGbn-{o3uKHz{hTpD}k%TSHD6;xg}44ru~ zU{NODqiMNUmlFXA*9#Jt5QVzW>Y)vnk5e?E3ys6{zp9DIH3cP1B$wGvgiAs@WwPD! zz040MN8P-GpwT&XO+^Xp%%z9wd@QKU=X|jjmW5)|y&2EKy@M~u5?cLUWD?$SFRB_z zpcJO9UpsSh$GNAwz73M)+PSH$PTwY7dybIK{$^yk@bQrhh1f2en*=cJkQ9^xk&{Un zTani3S|v)=4C6QgWIvl6loD)3QI#Sn3hL8f?uE-FL#-1-ju?*Jhg%t>Q{6?TBBIqs zOB4M#@1xCGIo0uWmO5KS%ev1Hq>%l!l9~ez6oVLxDDOccc(_S(W61SjqN}zp=UI)$ zR`;^c{>>Ws|D+#{dk4W)T#w$%*G#w+fQZMbGmiMzO62^QbVNP zXRlMsM$g8Pz_A9I&KFe><`Ac8gO&G+W%Rs!3FjT+VoK zIHYDbU&pWnBc|S??N~K|l&)BWPrGdUkGDhtdtaEi&eTU-u3eQvoW1_U!gy%l^v`I6 zieNnJ$+{a!?kEQwLMJ~rF+^%wR`8Wo?rn&F01ujJPcI4Ep(@ppdgpW3KCSI&bVm7L zIzJ^C5|w6(YHJsaW}+mDVBR_X6g>irs9Ch~4ohh!R2RB7nu+Ari+iiCJ8H%6T7j(ynN zf!iFZsN)E=J^zBk4KmSv-}%FP zvG!C%V^GW`Ajb~Dkxa?9>&T?0rIeT7XH#&yjPvO6s^&NC8L%-tc$`1Yu+vchrv%(Du7Hx~{=KE@Bsf`ATb!lRlAFs|pa>#r z?FbfD(}SD0m6H;EaNwP?;JW6a zODGPx=9wtsZoDOaJi(aVE9>X4&+j9M_JRWXqz#e0BE&lUEv-H@_z{2bf+m*HcNjmG&fVL`*oJv zTH*pnosawMNzBdaPDgcD&A4)8otnNd;s+AyMS1Nw@RivVOv)~LrJQB#;qkE}E2rp% zUv4b)Mu0c7SZel%KKU#!@q}9E+0QY`=jFr*Bnr%OBx`x6Y1CSr_V)`*g^-?;7P#?} z*h;9m@3k2XJUIZ`m$V|(_w}_Y9X#1zqVKWw@5R^03g5#&!`}=49(twzpRT8)i5!k` z?By~jAZVoKbC%B%uTs=@@1D%awK+C2`(gAWB2StCv0(Ma&y}cj7Rl%N?i8U`QGE#K z;_;D8*T&t$uEgInlj9j-pJ`UAjkUK~z6l8R&D5oow_@?WUR*?1{w!ns2#|m7I+Is2 zZ@O})FxYj4a2MWV(Jm&g3GsoWyvo>DY$SaQPe;-ZgMgSUe2L`ot>Ysm$5j3H9UI9!dg0UchVLIt+d7$AB%e>>&>faE z4R|kg;f$*+`{C5p7=obf_)}FW#LI%L@6y`ADVVnV%ERiF#&KNI9i0DaHfh8p@p57O zk*4Z1m$6+NVxqM5?n2B)2<`{d#9vp&!7msrlO!}rY^CO0>(^MXc2oD(wBoBC;MgH`y{tYd)Gx{ug(c31j@5CX7|L)SiyY%l_`uE2D8<+l#-v8#f-}TqO zmB7FC(*LSny7y&mz`Vr!W87vLzHc%hd;6y19vmV23ci}i=MKr9*x0f?eVXp(lhemW zjE`}AW=4)(Jq8N9r{QCZUhXr+t&oqRYmay?xXgicZ7!@Q5b)*vCqiB-3R7SkrG>lUI!&VEpt6a3cP80nQN7u>5=r!mvz z1|_%7nLAVBgj!hWg>8VSnaDk92b*gOHdil%05x{)5@=k)-+ZNjTQji`{b;m4^ahS* zz0LQFcb>Y6YqfBVyrq?||ANlp{eH^Qrfp@oW8{j1MShbSp%=W zB1i!w;7Rc*#DVAvz?JCNMl*uwieQh^0OY{Oh@5R!T048?*TE5jJ^Y3{3;S>t`5!h* zd3U~%-`)5t>pT5widr=c#p{}I0#qu?*2%{N)jluJK+hi^;beGvod7N^>nCY_?&$T4 zKQVB(L8|@Qb?S~n<4*m;7|3qU?pGP9P(`VsJNd#s2-nuud}G};EFK>O=vq>)^Y-|e zNWiOYF)s;_751&!{ov2PRc9;TOCz3+LPZ`QPm}HDR6}Pw2GVi>oDH>hZzZa={a}c7 zbuDor9=wm5tGoMEk1WR)))TcFING_Ccu{Y$Jb5^KGHy6cF^)yADKOI!hNO6#HJvoQ zRFJyF>dBrYkWFl*YMNg@Ri~ouY?5#UR>YQN$}7?ZCIz`9%B;U{3ERN zK2pGTOgEmBsF z?e}VQ$6k14uq^&O7S)z_8DASCs%jZ+CStbiJ+(A?Ar#ZB<(*iqjMaj zq}D{s?3!`dms-vZ_W9kB1Gg0v0_nbtzjEmx*jINCbjwvN(H|W_@9uhiJ?cztcQ|q4h10dYr5?K@HTE{e70YXa7P9Lum(Loox1^4wj~62*n<6kyhM`{z8RP<^9Kpz)!#( z)3|(Y$sD|4zf=VvR4r}ESaBJ}na+YF&DjJ~kTn7roMATm!NF1&LAZ3Ua^-}E80l?+ zaRe7!%fWm#jb^8NRc?o)CfFWpL*o*pfRT4$GYSRU{OFQw!w@^NF~Rdv=eY5&#j(!u zN57Kij~{UPG&hbAn(wuD=+)uX=QHvRoo8Z1=_!N1eic5uZe->x`lkBlQ$_l5b%+$s`4{ zFY4FeT6OqG-d;mj)uXEJ?5{?pak-$9;yAxh?Qc;8qo2AqMKUaX(mnLWAPUhU>AF|E zZ2DCp6akZ(>_V>MrV}F;^Q>#j%SrwAyFvKJi4f8Q&@}m11lu|5^eeOp!5;sa6Mu#O zihmUT<<=UP=^eJpwA@C|p&Mu0C3o&9UdfPYD0NA6E4LF}emSwv>`KQ1kWwtt9AKkW zwlgmP4?o3M8Qr+t(Mn3UyMtkDHh^ngvYqzms_k}5y9aM?YVS=0zgl(MHG~T|fx~7( z3O;GE9bMui$k|r)PJO3L4uyn*k@PmLB86vZ{b#yGgMXgDPA*+SP2=2Wd0@=sB6~Sz ziP)-p^XXRzt5+7lx1^c8axT2N5qy)V)bD5#m_XePeGSuUc>MC%50r{4o3`G+rWCsC zBmM?}w*b3MInYRJp8vwA8Zh@(pgj!gfDtb{c!}Ei(2*iPCsEPDTY-M{n6g_m$^*QL zRJ}gm&9fKls^^sV2@oLIjgZ|XpJdE4P=SYfy3Oyd=lIOHq5+h9Tbz)t8<`t~TUM0` z>QAdxSvWoyO$M`{NOwL#*By^Sepyz9MaHu8{ea&h`WuLfJ06he#Z(Kfhd$uP01R^J z_-BM10bJ#bo6l7Og!Jtf4F1Gc=g6*K@XAVJxtUn%cjyB7O7Ou|m-pzJH4^ zD#4-u=ElG@fre2+F&gKHahGY6*y-wk3ZJ?MFS+))h6dz~1GZPp3m|X&ij0XwuwNl_ zio;6Pw%oF@n%8PVDI_ii$nd7laj>F()U1_E2)Nx&4 z-`hzDlMpfA)-N-iZx$1}Uzi>lDBNG#R+g1hc`a7Z_#&WJyoh&Nb&UJqFJiaQuMo8K zMZBz}<(+E;lEXr)#&WeDy*aZ93*mDdQYw#R5H#;s^}#q*n3cYB^vr7d?EB&MO_I)* zKo9jFW8fVz{1{llNN4(bH1YlOHV5UYY#*m)#7+=hp}G0-9odVtcQRBB1jA1Ex1<^; zFdWHu7mt$81g1%)yE1@bdQjs_n=OGc<1OmO%a1eC3Jlw_y;G*Ev#<}Pzgh8@ zDVx?$~*btP!ZPDm45)KZ+VQe$96ft~4a;3%FakfBmNa8lBE z)rne4*DS+f7~JT!GwW;4po&aX`4nLYs>q*Rh7jyY+OPIwj^#?GkcPJHXa<;PD(d~L zB8C206F%EkM^|CYy!KjOpT8rsk-O@MxJ+bd>LJy*kc*x+Loq4 z4d4NjcBBCjxKS0``ZR5uzxxU~(&UaB=8jV)M8%w9G`;wSeVcchm~FRk#Lo!antl-H z`;RJQL9>&LPKyt|VBs~gcilkmXtNgxH+bm0RHb1rY;H8Xd5 zSZd$ZU~r$;jgj(1?mkRId}w#reP*{b%<+=Yv*(L3Ps1SZV7IR^~AikB_EB zA~gX$M}H414m!;tX)%X+;BE4=X>&Eh*FoN|J%zv?>?>TA=?8nT@znmKZArKzGgFKEOmJfRU?PoUsJ&@X6frWRr#u4Sy?EN*I&MQXfjYlM{`uJItLW? z{B}y z#>@S~u1oCDpm`2ZD`YsjFE%lvo0UqMfecY|>Hf<5Yue=>(CkPh?A`48Uhp0h;nCD>yJk5zF$09Cxru~=mV0vWZYEW;}j zY7{B6*Opc7D@}arRfLo>>4{~!Z)KTWmQXvF*01}kiZ}U3RXqONueCi|!%eluwfN9l za4!t(-2e<5l-u&VqxnwOfvr+`%BFd!YGv!3LMg*-o%_atE51z2p2SvX&s_&IXnd{W z1QPk_+RoF;eibrwf#L^uQ=2;PhWb^&zEL&`__Q)SxgT(m7GI8>k(Y)XXIo1kKy4#W z@1+2sg7uvdfP;jdhJVewO#okEItwPqa?0y3vaXyyL->O}0usw#qK_2t4c^RT=+Y+~ z@IFmf^kBQgs_Tfsg-@3Vwam|wi#UO17<&APFc??n{5h_?No-Yg@y;NSacNmH4O8eXDk@t2D7DX- zQ?uf?yK5xg(xYh}w@c*2*DM>x-n@0(u^Vj{XI>oD;(JoQ?VRW2PK@X7&HYWBkwK?o zf&s>T17a@7_^XX^-O1K77_#pT+mv#G9YH##(&c(m&NvUE^WAVZqU*VP`ZFCV=si4k zJ1Ah`clO{=eY_~6Tw-`VMKW8tOUeJkgbEFL&v~8Q5EH>wL(pcRp5n(JT26z762MX8gf@ zB>|+7C!9j&fSojOBV+oB*SCtI+x%qBMlAWWz`;2Ag^vEZBMbYhZ2(HPG40Nb2%s0> z_%gBw>`Iye$4;kQU4qKFX{e)m2o~Bp2*^ZvyW@jxh5)#T7W?o1-=JFm4)<;VUYD@`=$5&>lDW zwRo!?_ni#l_}9oFTHTGyW3Ts~x~8(#oZtU>zFX$IB#oLqb{L}LrLr+ER!4mg6oUZs zoIP142o{8JTX8P?8hQz-D|t&2H!qg z6fg%}kqZ)^dOEScirC_Xe9F^fFB>h_3{!CoW@J`L9n>tBu$X7js8BO?Ud^Dk8DG}T zUuleMUZ!2Vjbi*VK2isH>KSKtA7Nf}{u@0J&=gaQwCQra7M^4>3V|5uVW&XJ2T)Ba z6*5~xEmDD!dlikn_rl05Ewo4>1^2`oyMHaB#f87)5gUImNM;6(Mv+pp4OkRZeZ+m0 zp0PE7;5oJiiB+3X#dC#WWsIIIp18S#X299)9`RXefmGZo{^BB@p?#mr*I@`EFz3tQ zbg^t>l?(D=gruEksCTn#e|2E5oCPxtiDu94xi5~JTphCpY|RfZ<$XUm3@pAhJ1tdg zB%)FHNOwkq@+kL%idVBT3Se@?sNH!p090L#UG?$Q@cNHDJ*X9-o{9Q6T6ld6cTbS@ z@5NNmcV1f*^zZT7O!T(0MKBz5IW>=W*RnAavOzal*YIDF2a2Xv?)Tw*54mUSy5WVXuE10F|Qx5^qmdD08JXlJjW60+iki(b8 z*fU>Bqpri}=%4j#k>_aoT@tYSS0w>U)Q*X7i+b+c+ib6Q!t{rCo%65xk^+*gSeg2+ z+A`sEJlj&$rp3(gXXYk(z{Q*>XqSwjP!!9w^Jwt4)?-KaXWwrDk3?~UaA4Tb@(n8s z65rP{Y;_SYGs1RzvN%#AjeFjfRj#j+*o@vhGzT3YBPQwNP2X1P1W`by*N&VSM1fxy zzsS@rPzbj?)!4nv?_Z>+^?OmK_oM!9|C><&xgYDump%sZMnLD4@ncuHGJ1Da*o?@AMT$b4u=3z-k1@1N6jEd5K^Ohz?Fxql653`dE;FX3ys=3a zOitC5;g`Zr!_`(1|4A@K!qYyxDw-OOpU>64i%m`i#MEHa%*9)vt!nWzhycGHOh^a+7?W!7jfkFlw^P7`}9_%S; zw7U``UfwCNB@cAbw=7V#{$$tU6Kg(JeL&9tJkNwbr23`r!DR(DSWMr@nzLfQSAs7R z(;izbxQp~z6a=%t)|uH@Qn>wOhC7WLz<7~3D>YbTKl?j1fbqtV!Lj<(^(;U3A^LCb z10JKx-=39{^Wdp0ovBFjbQr?+@-N4egvnEC+aN2q#y(CseeJGKS)0;q#sk2wq&Fk@UXbh0yHx;r=0u1&{O?44F4$)rg z9S7yDLovN&r+FVoKAW#WWgK@T5DWGpK;5FSU-lj|Sth?Fuh5V+Jt=jdePvfb4b+B!nP78GXO$%g)vWf2}k zFOHV6oo?oEcS#Mtj|1KDiCYkVAoZxEdy@K^$tz3g{o9APwbaP<1f@qz1(l9UT|ymU z)D=85V)ARDdg#Y|!t5^=P7t3>sd23@rtR)w@h%CUw5M8kDs>E-=pA3vS>FquaKjy0 zZY8NXFZp&kYqY_0e`8asrCZvUrD^iOc6-LCrP_5VF`nigP{@do)X)rnl7f~qa>-!7 z#_Voyi5}VkTYaEXJZNKDHtRPRX6tJ=1P6W!Y`~GQ0wfV4+{OTF2X=XBR##Eh?4IDIlI?@~I^VjAnufkK(f&z}OI>M9sFS&fO>lSgO12XlC^tA(;O6 z6k_&QQ^-J(DTLXwMjWTM_;bu57E@$WQ($zKY8$X|Gh_KE5=9|JlwvQ(4MEqCO6k(uD}1pxuc*r zB*nD5yPMy5%p1K~aAZ9~qhL5vV23Omy4fc))q0@iLXWAk%L&9QAnvXg_<%&y&heG= z2uL*2AH)pbtI>-#@*l4L-H-7T{@I*8{=q8vt1TF0e{3@$xlz8{3v>5TyX2;0*{+{I zO*xBn4g+t32+P-SHONOo?lZf*^Q7BH_kv zH7An+%Mgs}^l&BOpJ5I&<*P=C9Yaue0cQ{(=7j869t-c%0n6k5o!}} z1&*RoHQPmC238pU89XU?pQmJ&MZ&A?_L%NFeY*f^N>)S{c12H5T-dvoXF+?tjQ0D)CCz(oF@dHC@ufSiyxfS3S`n}Wgg26 zWn!kT|IV7^`+M>E-(C7YQF8vfOaJcD-%#$jH@wgNmHV?*T|(a zPp=Zd@T}~Wvq!&Exz0KUU!if_Pr4JFmc(+-G4U4z|KOi7@H-oqd5GMk0h@@)e9lgstx4pBN#7^f~$WJw?fSGTr03CJPabLLC~mL)URP*iDlNq(d9k?a0Yw<;4dzYBz z_VueKw7@`lea5xYZ52KEe_sR8MWR4lXq9)xl5jgT$sP5a)9oRSvQH(N;U|H{^tJ!6b#GE$AfQKCY-1 z7y2octNunB|1YI-BsEX=k@25v9RIJ2TaqkKLfWLhOuuc$nGRw0CVq%@1t-VvmSFa4)_;^Yu*Y_3`$JI#cPR8>~`#F zS2%=?yLyx)Rp7NCZSZow-Ql{NNJ`u7<@zr+v&QV<$ddodx~uxeWmBIamN@f%jYw{d zi)r!wDv}4g1!p1`iU+RoR&AMpN$ra})t0XZZQB!4Ofin1ip^cAp{vM=NGZwv=Yt>y zVjOoiby^De+~%@NBDi=fgflRGIgy^06x5U&@OA=#9`)H54F*R4~ozCw@g4hoy%WO)0Ag~ z29APQ8GWbJ(QM|MRh6~_u4ulX*D`FMwtroxT(pUBZdP=}c{d$Mv&NMkDR2exQ zSxe}6tFh*CyLx+89yfH@s09emx+)^=UvHw*R2No^eZDT{G*JV^$AGo9x3sRFK$urj$*E$PZ$*5di{76JTlqk z{C=ZxnL1dCbDTYZV=wR_ulPct|9nY1VSbyKdDt4;UepO8o7!?0W-p>Dvu&Ldc6a(u zMH2)aq0_mMJ#KtM_oxIX^7lfFGw2A7mcIG8vHs(uVyGYag(3Y)U{NcPgWl7+DvRxo zV~)yVWI!a~gM1*dH*O-KVH$lFA@qi->0~L5OdCz=do@yyM!rL+#*l(Yjvq8Ny|1+O z!J_4G-9wvv$9?$pmR`Z4;72EOz&W&G{9S+2<)Q9%KM+%yLhCqd;Xx1 zgMNahAMIz6{zv0t)2QmzBlytdwmrjcGls^Pb3%qc{j}a3<~_Qk=3>~b1`pd|lZtms z^f)+6ZQDzqVmfmToOgq*z!IRNaN8_#p>t2?zF&B`YVfO(y?v2t$*x7Go3x1IV0FUT zSc|V`ris{q3+qm=P{f{?N3C9mODzatBg+>}=wZw!#7C5>R_t&#`w9FHg0)H(1Jh7P zQ#^9P{{c+sOz2?Ae&T()3eW^0TZ;q@(`r;mEZv-j*~uJlm3B&DpE)2(?Aa2PDD9^y7c1P%*+jyWz$f$^0$&3qd2YiN@Vi*{4pJayvWh1|J z-TFkArezJsbo@Zhu1(eW>}({Mo*zQvehKiP|3d|MS)V-l+}ZY+3&#xqEcfeG@9CiTjc^}VmjE^?d9!3xzF z5%bppw2nqI_x}{->C7~4bTC>@fQk1lg2OIf)~n8bjV`3WOh`w1rSkdxrq1Ver<_zJ zelP4)=|}}joBJC(n~BKmw0;O_B<@<%n8qERZ#?$Qb!a_>eDRGPgBY+XfWC9uZt8Z* z@43uTZE~A0h}udhC|>B!i!2)D0KFqxfy_ocV2u0C>+9PUs^q_`+sWnEQX8{_fjy%Mp27%9$oM3_c6#RpoXZ}H13%6 zk`CKW!-^{zw(q~vA(L7so|}~lkH@sfR7ZW|pPi{y0?x5<$6>%A*7~AEcXVcT%(5yX zwV$m{39suq3i}r3e@chqlnzA5{!#z+3>d-yr^l8)z8cu&vm%z5gIf31_hFvlqPF`y zaTeuez+X6SetiwW)?0%!%%ii`r)1Wb=B4KxFt3na{rJCC@69Y>oH{Q%&t^2u?FNDj zcQF2?27QQ3kq26W7ct}QJrGOfzIO9{9HIyAHGVoSpO%4ZgY<*PgBBoE>T~z2-XM@U z_bkKdtb`gJ>hcpoAKGxCKOac*9zQ z{Ura!cn3^Q3)uH2up zJ!(J0*>H2ANV6!i!Jo1vE2oMDu4Dh=EM+r%H|_r=IdN@hDXS^)N}x?DISkV5@Tu#1 zvp{mP{k;3+^DL9s0S`~R1zt2fCq0(wN0m%P1W$gh(eSb~^J3#8pC6KwPu~jQ8pLRC z?asywu!j&tzgHSmg?`;Fm`HFO{!p2gMA9I%A;{m-aai0B>0pgxmE_!)CGqz1?aj}}H)DZu=%DP3genj*o7h-m1JkT0>1PX_ zelNxfuh2X+S-}RRtr#$jz5fPm1V7Zl04^XGt(b!2rEBi#?X7hHx| z6EGi%%;>D%X^2=Qah=aAiSItBTvb7@{3L?>mal|8!(m|MGHiq5s5X0Az?Uh$_PLz~ z$PWPKkVOUL2PZk^qRHX*6I3l{w|={%wh;dZ%hVsd%ln{szUbX5JfPJkxU<69-G#q17yRZFb+L~Q{B}(ZO^ld5mUIpS30^b88tySJ^F;W3r=eyhM z3u!vB`xe8;)&Ny;M+^imJ$zqtc@OVn1v7^??090FapC#>iZ`&fv6_( zs&XSRM7Q^g8jJjHr>cbo7a9^UDDDw^mNKp70Nqa|xf)avc6ffwa1k7_d;2TFHBk%$ zzmzflHBKEn;p1|>en&eOU2gz6JTJ3FnU3K{Czi4e;Ms#VT*YWx72_4v=(5Mtry{jI zv9uZ4*%4td(c)zi_C(fc^8*l1|A{CBWesFQY<$pmz*{N&@AFpFEKk;Kz2-7#qW3`V znrtQqLs@nXHeS8=_7V0eF&|I_F87IAo+}O0ldt8EblVz&{A^H9Dy-O+*tJ82n+OY* z-+TkCFb*JB5Kg1GiQRSM;BP)d2byhTW%w4;M~hz&km@~Ca6pMC-##GufN(%q$#WJs zRP4^gD?VffIsjIN#mZ6|3kN=jpd7wbhN+LGG=>&@4uM&}7I_Zl-$_n`e~;u;L|b3d zZ$ewPx;hv7aeo>XjDNL=rwS&?gB$+Txco~Iym=_N#0(

V{z7CZjYF{s(BDiO9WKKu4T4u5fQT_TqDc4cVxtdWqBVk* za1j523zRiMr!6{wUT@((N1T_l8=9K7AYmcI+|<+*u)La|VoLc9yFCGv^a6?f{~}$+ zUAKv~W7`h*sD))ad9ay%ET8ge1)RTqE$6A zwzmsRYIAclW3S2XDmrqpHyz4WgNDOsBXoa;{aF-1O|A^)dowAe0Zhh?;kQW0)Q@}i zi8`M@x$DR20VHszg|B`9>67kHy*?Z0P|S=!Db1szs(AyE3=q{^i~auky9B zK!gG?9pZWbuoBb=Cg61tMx@s3Tzi5@EW{2%okA8WbaNx7Mj^U3WsH`D^UTEhW|hROhMx@U%pM0bIT zj_fO-P;S5>Oa0k0FJE4TK0|-_rx6fYutA}dE~StUX&`e@@|pe`br_d}#DD(c#X936 zYrqcBcQ^icl?j!=XlaH+2zm&8xEa|Po&Y%Y&vP|Jo(9v%fgTd@>61R3-ip037di%p zskyl(pe{{^-C91Dln9jBF6t`0rUi-#a!2CqAB(ZEvBfHuwvYJ4lzBj^U=1=rlL2OA z8-NjdJuo+y9U(G%s12It1#`P<00}~@hp;rXk?U(}xcq`p&8q8SAm%pry3{ z5X#wWn&+GAHOXvkAxp=ja-ph6VPNmRcNk=h%648j%{Ypm(f z{-$b`!QH1pzc9Nf%UoHgQ&@ zvlEmZB1Xn^qOR51Sv?kwGM?VOA&wpo6jx}Nf#YA27nG$It~tn54|=i4oy@vKAG)Xl7^5JwL`5}hK z=n?Ga=Eg=12!P1-YtEA7L5~R-?z6cl;Ig&?yXDt3E-RdW+32`>sJ-xs!Xf&CA*Lo~ zXMI3bdG+JvWB_Br1cJ;91Rb6tu?(%eA@t|;(&06uqw&UlKR7{S&Qf0>P)cc; zrDZfN9XW-WL&t#i4*&;3;D{BEy9jqfXevebigRCvk{`f61hzQuEQm`=o`)=mh-}o50PG zwP+k196+ThfmL!^OpOHj)3Y*qr^9rl#0qkI94JK3%WRECMn=Aa#DrurV6=;ZwXCx% z+!Hz+@!@dRfcmo!Nz-YgmSA<>(#|gRWriXT)GN}vPxC-cGA;k%FIoGJGw2N08Bpr4 zJ^<+PbC20$KHh)&e9rGx${a(#4C`hQa_pgKi2%Kt^~s zY$gtkUdk<{D-Q4n`9P?_AJ#&tq7ZWzu0Gl#ydLNS-AGT8_}OQEaIrgz-!lM+OIVvi zIfeFuu>%E{R%|In3+AbHDS121K&t)Qf3Ku@N%%fPJHX%n z79Cwf0;487{en~YNY-dqm%;_-`>!*vv9q(orFKYsW&#rEIRpQDc_-Y3?^C^)iboT3 zt6x<7YnSkklK;OuCun!5=r}n^Gy9enILyg2`~HD`CqF+Q3Qvr+JkTw1@$qtkk$*2D zDV~fcl1SlO3nKn`LP8tcd;H-#q6-xKe877!4C znW+F|`svcVue7xE9Z^vl5)u+6GFu#gOgQjI&$P9ZV#@y-M(Tp0j&9- z5C4nU;A5wArT_n0P0mH~;`7O(jynx2&%f}}JP!tt7)bXqkb6llUw#OpF(}P4O8RgN z|2w3JKLXloGcgRXb;8({LKT+l*jpyx%}8-r9|IEe65PHqxE+Ada~mBXCM0|dt#UY< z1|@uQNUx2+B(Gq>{~*y8!MeG#1Dw$Z|<2~?>xClqat=D=LVKxwCRp&#Pp;69*0#Z}R*T;Ti-_hVufUk2JQ@l#|EDp%s-Erskd|pi_ zEk6G~a{bcrMJ3~5cV%-z)~B8(zhioa8-L?UcEYCkfnIzD2!U4!D~n+82eu0}@H4<# z-ymayz@=E_>ICi#JOC#+Kz!^U9+F+X`c6y>>K%okb&8wYV4e<=9oMU{pPCT6O&J2^ zAgd#M65=@U1Q3dgnt{$`aR)&u^?vAJ4%s`I&V=M0q94p?dHQy()myX@C$glRh;hntO#5h!<2VIP=-yU9la80wxZ z0s;aX=mc@r%GX8H=uY5X0+WtB_cNJ0$|r zcYlAMT5F(}dzs`hWK@Wrsc3@)sp4FKVXD>Ie{ax=sNS>0e}{L>4(sKisusOY>s?*d zbwnNUvNGfNagxL}wYpVh%+mUww%HoyTdA&X^GXrfzfri8p|M2{F5?k#w1<8D1eJ|8 zPkuy~lkMC=oDxvLegpFGC8DTfEgq>$5OG2LGUn#ykmJ&rs&(q|ft2X1ClMnW3|$1e zSzS5X9(G0mxC?N7b>&^J1StWDXRr^=tTsYIhA%;)v$||(G7A%;4})b*WK@^NMMFb_ znxCbLq13&@@kR?vR*TEEetZym)uXW#5k4*b+DAvo`bKW1vb3CQASsG+n_yl;uJajC zMHdmDSy)t*B;7=1P)iC*Wua8PxqQMRsKQ9!AagPXHsgNEFwI0{DZ2Pw8xWur?Usu#I z82G#p^9Mf5bD(Nb>>w!cjh=x2Z8*{ea3IhUTq4Rq8Sig*iS?QioX^C{?^!|2R5 zLQR8bVNc0oFz+_pb^F$XzX^`NNFtN9w9_Y$O5FeJT)Vbpy#snZR0+>EN3kH@*Y zZ6lH3g@d+j3NO7m;nmINs5Fs#j>*PVE7Q6hA~*f2wr62%w!TpEi_S{vG-~vAGEcg6!5cw4 zD~jSs6TIm52|6FY{~VbiCaP~0@o&rc7t@ap@QDQz%P@^&slL0Xn+Yxn#|?!QbT!_6 zHKlRA^^e?;a@Lt3VZ&wYYPhj6S1A)HEXa`$iB*_U6)1#r;T9M8r3O}cDqNrHTMmp_%l!Yl52pJ>_5CSF!zn^e zrTrq$tE;nD-e1czRGFJEc=}lJ8|!YmwigMQ^HnAL=s!-)^JmfBs6ZSuUz8^|9 z*?O=e<s-OOg7pY~)JYBePNHq-6n{9-Y@)EtJfvG<+@2O4Crvl)=vhxBqRa9=8U%05G8ffzY_WIlYO&pd!nGbO1jC z6{AAJ3=lD3fMd~`hCYBaM3{@npI9N$HnLPik7j*zypH5-p{T;6ifpFR#xvJqhSF}e znbs8j;YEKz54%gy-K1YQH?mbN&+Wb`D(}}kTzn4e!huD7aVjkluIcPey%97~?kYbtEWTaO*u6WSFEJ3}r003s%)I!qjhUitvx5 z58p2DDM)pP#YUH7)1^ytrOK2PSTWrW2Zsd`=GS2X@@75a=UUiLYfAL7(HklWM_VNN z!s_f=VYbkvf!hXW;omM#2SghTo*NheKlu1?>bH}F9y4#-+!f1@z@}I*^&)yj& zg#=bWJduA5RUj3xf<JHSJ~>lP_V?iV{ljdFri1w)H1DQLC*{?(Ww>0nfz?hfqHD zp}_0|QWLaWx;o#L$jHYRK}IkA{Krcs<3HcU!Jqj3b#}~9B-@5=R=^+X!n$x9ikfJD z72{yF=;SRdgKb?K>z>6PaCy=E7Bs;2-36r?F)JGN(6vUgC+c{eAh(S&Qv*uwBUl>x ze!O}M$QI#BH4zKI1n4bEun{;ff4mF;hWaC*{=W$=-bsbv|D4q)KOt6=jwoa8?heP7=LR@rU(tkHD)s4L-B2c;|~; zjEp7Qt4A(ocdYAYnZ$zw0w#g?oCVwLG;l1KcO!7%L3LLucqet{%$bdyof#0S!EJ;f zQ=5|`C(zvM!$qQ%K_XW{NVd=b{**mmKN~swL_j+MHhquoekeX6QW$^ z1~ksV&0HkGWlVij3^mWiIbYR*I{sy`v2BRPM9EOlqUY!>%O7liw8&dk&mdcsNW6ui zsg~_vxM|6x%s*Q)yax$qCM|p!&kZE#io>PMxCeF@>p8x^%Dj%~=Ae3mHD9a^Zy7XT z=#P97-46qR)7gX@`YS=69`A8mO-TTVwx)XhUO!*OdZfMLNuMGf*2R)Rn6KV_;rgv# zQci+n#bqY`Dbn?v9hS|vxfl0p- zSg0cqM<_HAU`IDjr@%coN8E|Jc;4RqT(i4^8Wqir1nc%y67gUXvo`roOM8V@QRUp7 zj`lFck3u^OU8vfA#iDz1Q1dHHDR_U3pHpTA>cu)f2_z%Lqyn^H!xaVLTC?KOi0QZB z+>iv!hnOM!KYf~mH#UIw66saowm87bGUEH{oq&)kxr>7wk`GxvGe{|%q`R$*@@~6{ z?gY^>PPVYAm*5PBZp}D#`w>$$flH#^|BI_iE z?Mne`9)s|Xo3!HE3zpUJw*%5Nb{Z0NRHgFWh@K|HUKu_=Yz}=mjnfS2A!fA=dN2c8 zdi5lmY;7cwj|wk{^BdCaR2cT8T!40vX1!wj61cz-eYgT#+ZC8$6ATyNS>2XUdZfih z#tN2}T9M9UI#_J2<`>**N{zI%*8(90<0xr*O`(ABhEKNXBck?lvE}V23394AL&Var z3+YErl(Ftue1kZ3mwoZROLLWMq#>EG35;DnX5eJ1JKi(c4qlM^4tMt850UQjq)c33 z5PkQ6Hyn`zO&ieSSEwV+Ku_>>>!fwx*GX_*VuFQRK|c*54@r{+Mr+{@lfm&8azp23 zq(6KwTqID>?5DWTSNvsYgDtD&+n0ES=dA_LIPVQyu8>Z%a9>lte&nTe=zsRliaC~) z)aq7;x8`}+S;IG_ACl`YBZV_wf>|O#D!Q#)KRBFWQftq**JBt%6|fk*yCEahecXz3 z9W6OWK|gw;r}U{%Wn#1M;J8&nLN@hZaBAH}X<{P3raFRhpS)#yOlQ`+{k<@S;)TJ^ ziwzciwNJuk%!JxjC|+iTc-Kg?&(`XzE?+y3na-96_+RBrV^{z|Y7$ZpVoev`Zs%5w z0DZ^-5!FC)H4V90gbWO&ND3S?#*Ic=jjt~dKF5ld%yX*^4O$K!S{>R5l%~;*ZYnF1 z?Fcarli5^rOFxi(*pyD)Ys`aJ^-hg1bYiMD+)b0dXTX-~X=~}T z6T+N?xds^MO5@-aM>xeBZ0bO?l4)s;v$Q>Z`jm`@M*iVL&*@at(Yh-JGAL);X^T0|<7r3*^{0M0S z&cwBs2>(s_AzJ8ioYJw=>%Fh8F43_68&NSyd&>jCz`-L1OpuA{YTln+1j8~Y7d5*1 zmNUOd5#VW%==Rjql%;sgnHl_sQLyS#hro(}3lsaum?sd#lZzB41o6D=Q61-`i4 zF~f(p>HS+yB_?FDxrqhYXso624eC)m+%H@-vlS{n;ai%x9NJEzV|YMfwadzNYiMYw zf)4}|?G)(8h&LU1Ew)nliUU?drj6Lx(tMxqWW}C08T-J;{y1}9DZ3gk`|0d*Dk8Ul z3<~BdM6v;?C=RL&2e@r3FhQEE? zm=Jb6kd;=4lUMrA*`6BQ^#H@7z$*S^v3FD|lxv;fz7&7@Yii04zg2K)Id2w4-jEqFkODK(mv z*)0NHBexPmiM)Vrng&h^2akXNTpppcku2&{x$i2+herEPO*>+ZiWwQ_Tly<5boizQ z)x{hU^c0O$*wC8Xhp{d2M`91bzv2W#5f_MwXY-qB5CR;WLwZ*Duos`nrj&ROE-55R zV!ymsa~W>c6vhiol+t2gx;^V!f8t5@#jFi2>V4X;@4m`qrxYP_R0Hh8R$QDUHkGa? zBR1Y4URSLeRiT96TNz=Yi)~?5W=9*Ydj8hM!;;WUMi9*F3~j9Kew>2b6YAq?p`{5V zGSUu=M>468v4nv~LLYr{*?3z~TF%LrD9VnJ=#7cm$Ex#T_tV+=*n}hBry<^KuwnhR zogq-*r^&!2&?^v%8mewBa7d1z8g99=g32^Ge30J}$1YTql9Jg`K3!A1(`V9HSLKs& z_{LAGcm2Uge`MK8U)7($L|?J$&Lhe%=rpW)l$!Pq*0kxFyj{uZaU7bsDSmI~>29&5|pFx3ax^L=I}Gc?V$ zeSN&W1u$H6`_3INAoUbM5$)BzOv+@d*b&*$a0hZeJK*dqv4aCuX+I zgyV^{ zi->E%q%PnNTEIgni-_B!;p*VmnFpmt7ltYkqY0ncaMaDGwGq9dlLHv>>i$EiZp>`B zU0~zSlR2FK!2UDDM;8Dyd;qPvR}TRq$nQ%DpoaKFfAt*{IIZ)ct6I&zE#H8g&FRl#?7PMX(S^UX}|qAdBw#* znFNiW9sbjHx&I^Lt$`sLvg_9)a#p~5i?oQqLR1`(?ZlO#NSssqjR=N~wtGH)I*-_o zfXtx)ANoIF4KM-C8m1TY;i!TQP>7BJt}p5D{fqng(US1M4U#%9T#By9QM>yRaX7Jc5Y!n*<8lGgxTVwPV5HMFj(uKah3sfjXFanmIB4QP0#&D?k z&D;BmkcnZ)`5W8Y^<7=TF!-|t94vZoZ+)CLurYKlT`KHna&Z$l-M|(B4PXGsT>yZm z8!j~O>~%iRe1SUX%6}NEJQ0$pm|Y#WzMW*y1RW$Y#|GC@Oqaij%n|`h?FjpKlPnm< zqp#V~K-aWQi{gE8>i)2`>SBj!uO~011~H5 z{k#hisA*n0HA9to9;K`E4fZvXL$p>Gr;^QVY=p`mZKwCPN6HoN>HL1)5y=Y0Xb9k( z7@ROeJSvjE>?)K(v%=HoI)r(F zrj2vp6&!FKg0~M9RJvjdhC{L|S8QJP8y5kz`;?n&k&qi374-`l3InRgaQJ728x~Xq z7)MTlk#F#Y{)5~W7{PGB6ZF2%I2CM3m@R}Fd!79TOw>%hcp|{(fLRLbEFtmXcvmG? zzC84hm(o`pbSD_sa&nLNk=nv}-;z7d0{=a|Ik z9n;9pdT5k*S4h;8F2&l=fxX&4cp{+GA_Ei9C?Z8sZwQQl;8_1W{i2S$z@Uo;PLec$ zEFf}@_NRmYWT{^96k4wcL(KUg10lY1O}E`w!!Yy%sKFA(o}j{l5d|cmO`dtgeq_)Y z*Rm6X%yJ+T$;ECU+&Vg?niVWeO+5l78fMMFn=yZAcU?auBqS|?T~st7DvG=x9v`5; zB+7IM>o&V^ImJIuOvd-URy(KH9edKNRbf-VPlwv*-l(gTI^tgFL=BtEe9R!fena4F z+|Y%Uip}>j?~^?u8;G1gHLy5GT=ZR7SP;HxVeB=Qqag%i(pre3tO-J~Y+GYP!$ut; zyLP_H)}P2ypso-%ADA$~gM^Gwg0ZMhWhKOCzw+3&==$`Ls(>S`+mACJOGy`c~G~ zb-MZQ9II0edtKN{uRGyma%-3lxi0?$rH_q1b1+TVX4gyF%+#$V)2^;%fX_VOLARv* z!ZZi!}L@-bIFS!4DfmEf@+p2K~&giy0U$;7J64T80&=>*(nC zGdFfl6;cOy#K4jpqv&l;jt}3*iGixZm~N{y7vOKXD7J4#xuS&|lHR+>)20j#s(BMpv0MJ+p$DP}h!Cj;?K|}_o ziv-;cdB770zvBlVs29c&U`Bed(0~M-PxLG-K`>tc)Iko$_mV=o}xr@1pt&dxpoy56FU?d+Y<_ZwlB z2j^d!nyc{>4RWGUD@(**&<~3`pBLTTk6>sqrq>zbdiI1FN!r=2Qr)%vm?|(!0&-y0 z>O`a+**zy`=K?5ord^YhleO@E(V}i;(O*+xDD)}33vAJ^&8xOgAl2$Yn1Bhc=VuiM zSTn%MH^P&BSz-nTX)T@8vwe@o6DQlvH(1Yhtril`z5d4alahFrQoK>9=G9p#kfkIi zLoF_prIb(*@Y>khGu*z7OF|MHyJQ#W-!hdvSzVNOYM`b|W5?GgPq-iHy&yB8QHRRt zkon|~n133s^7Duzh!-@1x}l+H@aDDstQv!c_(OvhF24nO>!5lvc8m!-;+kZw_T8QF z{3inhqJX0^7cH;X3HtZ($=ayU;{Y(moEz}5CjKfb@_mfrJ`X`;#YY`j&^wCHqhpfTVRjFONqfNi z$AdoIpC$n_$i;`Sa$t*`h8|ZoK{y(`eBIzs<_4}U#EXsX{;S^IFP5|Ft)hWfB;xR^ zwJoi^nw5oEWC7EKP__K|npbRw#(!9DzuIGZiVVOaj4GUPTIdLn2Wh}_VgQvI>z{{P z=#C%0aD}<{$WMDd5;Zf@al;tnnLu0r%NsSL4(m3<@Yb045Qq?%6})}#-n|YClxCPv zA{TM~>E@gjsp%X*BD(JhVIqN`sD&Lr!@qjrkpfr@L^zDW!8!Q(m+ZOQ_@oYK z7Pl6XtPcx>$_^??^>}E*WwtdATcLgYp|ocJ9!W`SX7EOvoBcL6EVEu`x*2D~!wk;A z16g`)XL*k!{3S`PHZQZ>#C^v1D3n*Pyj<+15kcRB^!P}1xj}?P?g3<64Wm3T%0dVQ zBN@^R4l;=Z=+bHPC%aLg4~d#JFuo#&=XVQ-?4dSu?|v5n|{XSA^mszbhAIZ zTXJR{L&!Dh)PUdtC%7&ga$y(-fSnLSQX{P9^vp~HaFmg0D^MB04+k@me?#Z!GF1X< z(Yc-8(%ug`-8q)M4L{AqhtF@`Q|!@8?hjh=OTK9<)%LFcrBFBo=pC76(^q*g?BTlU*ysSCAiC!?-1R4 z8ynhSLy?Vt@CBI)gyZ}h=KY=b*5cFDp+12>3NcNC$NM+3%XGzLS0=V5!znjD2&kMq zYCR!)W!BN3Y3KWyVZwCD1A47HclbqmPs_G8Hz&U+Yqk^wf$JW^kqg^%=Z2vlJ3UtF2h3)L=Y1T^hf%Qba(C? zEY^vmM`o~*^}pq`>C#rVSrSm!E0?b}H{64_wZ77-P0p*ny#lJM9T>C zLE&qrx#i)3+GP~OxD2N)8|*!L&Z-uAGPC}J9x+lddCDFTW;lHN2W?oDx(QLCIY*n8MKV1_gouQR zl@7RH#FC7xK5`bpoqreJu%V?T0C~s~7{{T(k3G61C6O|b|I|BE`OpsGMG;WpN=Pn+ z;`)d$p1(CQLqy_cLFF_$I>5P!SVbih1V?`bzRG^{1-5V#-pA^2$4C^r!VO~;UJ#oQ z;BP@x$eG6rXuw7NRa!) z)jzls0U`iG;Az+3K%s_C#b-|OMo4Th?{Gv=ZXzC|!^6Lgm%vV@nMYg5#=x2LX3;&i zl`2lnO)SVo4yJq=7XSe^4GmF%KOD;ks|hrRRq|tyT7((-OWu~S3X1J8%Vrp%HA`7} zXfGL@dcE+g9c>PIfn9D3A!LF5HJ`R$$5e$1E#xaPDpN7A^V2~@; z$q8!H-QO35=4y7F^Q`WV@G`BQn6INlx3=WM4$s1<1-;i(J)pwu9b078=MRZ}sd4~<<92RSz` zeSku=zK)J51}kXSTH&$i4zsD_K>su3oJyEqOv8wa!IRr?ibt$vVpb8~DtHv3a}GnM zz&*hifXLqkhQIGI zj?m*(g8rB#=7%H{8YcwPa+_!oT}TP?tJu-j?|ygQvq#o#NchCbRh#*5FT@J!9;8kN zZYNbQcC$>Z1ekreO%ESX+qn}32FFYH5PVT*BC{j4D32pXN1Tg3QZ~Bm&)W7QF{1o8 zgYJ0v)3Ah;g*WAm1KG7ThmF`6AU{^$wTlfd5~Q38)FwDa``w#q#KUx%U__Fuov*qr zQ!u+W<#?*I%9M?>MbWyCXX!V{kR3AKRHleUk>r~sN}h$VP87u@Y`stG#J+r|AfSgT zIU4&87z(QjF_tnJTb?GJb4{Z@aLl>$oBuJ#N6UK;Hyns3JGmyC)ydH1y+|vuU2N(e zhe&H}oHF3S7Y3R<7+Uey`QA5OnmUT+W0U8Q$BtfHe$Gk%B{IEy&_X5}}dr%H6H`%$}H_3P>0&!2{ukjq_<-sH{s z9U@@za!9_1c;-jXpWjg#85R#bFiFT-tCc3@2(o&~sBGln+O-6KM@ea8Oe_f>Tu)^{*2rPud#E`A&ZW6tJ+Tyn4K==IKr8BDvciXk?`bRi7T4~_wwdd z&ZAFuUTG7}2?)~qv+$bZc^g`*)LB+5hGWmiPUaW;Vt#g(ji29zL`xl0QjXfMXg2Ju zAG-1_VDWW6?$h}dvsp}x5q30QVULa)$e|($S30uh_lV0;7Y9-!r0fx>u@1R6<6OVO z$yKo2wJLUI2nxCN z#koGQQ&D0wnGo@3QWP~!@rtP6tF7WmqpYb;F zFu%HCdX95LiTmPf61oV4FKBKgo9e`ev+_pJ`9&Tj!-rssxL_gwQl4o&NLG$~;ofoV z-&+hhrp~wZUcM-8v$VBLAT4e6>MNM<(V91OocmD`D4Bva9RW6*D+By^HruJfvx!28 z#jqP8x^(E-C`48Tva}6erpKn3GN;@fQir?cHq7U3OH3e(TJwt4+^{BLO<=zxtO?k> zv{n8>I`v4JnF3*yFFuD`&MBoM83*Vt9WJNKRAZG*PC3pdErRP+eW z+?DYE=$nRUHNSH1G=hDPPpx9W6B!`|@SXZd{sB{V=vmgT>p8d;?FRYA;L;@%6HzAv zoU1CR5&p);?kCq0Hk@=0yn@qW0)9K4^I0*`1A7{dzMT+rnth&>LPPuJ{+6RYQ{GNa zWcCS(G|^TI=@qPd>`{Mn+}-z&SajSy=cNzZl2Y1->G$1;^nhiuObAq0K|EO>7@t7g+6tSgV0qL!IfG_^Q6|-<{VM zFtk&}aQM}KyV3lmq~9agVPT11AI8`U$h+{td(!Y3JuU6T=fcKG;B39uw^#3SQ8C>3 zXuQR$e!XP{^{Q{RVHQ+w@AFqamGlu)SlAAQUbfO!fT@H$;rGueU9dhWOCSjbT?u(1 ztc{3ti2|rUo9Ia+BEAwH8NeQbM3V1^-qzLDNJ#ITSs5kueOO*b2fJr*rlOmlz_X|8 z$Sd*)yJ)<+==c|7f^HfaaUts`c`Pw+!)E;zJRwh<34|Ajfqwyr!yX)Vgz5cQ)Mt3* zmj{Dicp1@gb3*}CUk^Ok49G&)0;O*u(^jn-*l6G%BZvW-m***WiE`0p6bPSk1J3P1n?va z2REnsK!`{2KYEbU%oihVhN}obw*)-T^Rt}3^w8wo=%!EIet+H2AkU-PrQqzKvaO*( zvMeFJHAvsLPd;CZDYI1SVKoXFyL5DP9?0U*B4v(;fPFE)K%{c)DM5xq==+IZ{-4S? zypc7)?1(ER04k!VB3wEJqIJO`dkq3ZC>Mx`bYI%m_|9Z0AqrS4lAG>`d_3FRC(wKM z(EhB7xrL}9+J=Oikob_07dw)<@8Ak*8XDq<$g*vk6_JB9k*OmQB)&X&c#mhReHPYz zT*fv(ISO18o_{k2h)#qIwy4?>87DII*A;#~e(Wk>?vd&wW^g0^l8BCwh{JpGU84-@ zHi19CYE;gCD}Lz78($v_l8%Qsck&8a{Hqr;#ahk)_o*z zQb@tlLBI=cz{HpZL|O%sl@N+D*eC_2zdm$fX9Oxq{A4e}N@wFnF+qQ9>;4!#=xS{I z@-Ul~Ue1xD9i(QGpWzs_0j-%lAvhXKgB3hK5+NuIXr5siGP1B7gCNZR>yuUYU3>V0 z;pcH4>^g&d^Sxk(>6EP(+GX%#&)cnZzf&Bwt9SB*xU!b|?i|mD1>VAOaOH>t;-A66 zCmI3)j|tBb){>}QY~LF6TN&s(YA1ae?j(?7O)MjwiE~z ziB;IoT?ZyBnzo6B3hnE^Z_l%@yz=pRE*4ciO%C>J8$&f1Tcoo82Tf(&=)C30(9062 z%0SmO)ILH*U{&1HIyt$Ou+ktS=m7mTGE9mnyl5{?BJLvGDGF|w9`G=1J}%!O${X2= zkC^g=%iiK_0U=89+V-9^i`A6J!u(%X-8_C=W>D(RvyOt7M+0lJ75~uB420?~dwY^~ zYl>@od!evx^WR@hp5)K^(xBoFjr{P}ZgLF=Z?+JA0@#?1U#hkPz9uQ3^w*GB}yYxy>Lq5oHNtV*^L0)vIGus%LY}x0)H@ z^~~A|7x6^|MJU-NG`6?>?{`rkTvG5VPc1#O97QH5ZzwqE|J5#lQPMjV4WM}OZ4wbp zVf!rxIyw@Q6Og*~YKD?eEK-l7C-?gJd}khV@$vR>D5g@yCE57#~T1|mBTpTJO_@{m_f*E>&NdLqcfLO6P z;RH1D1xE_{%-+tf2;Us3k^HXM(BUX3jU?dmM`RxelMBMIA>fRy-|oH7E|d(4>U-pre|h_aK~KAhXH2y_};P6w{J;EqV7TskdNp?v0J4n z`<_Q6LzYS9uWu%Zk^nsWZT0-f3_&Nk9jHU`@5!bXqp(~faSy2AAI8UFdgX0Y>0cJG zd1E&BuO0 zqaWZQeD*7`TG9uGRA{VMl(=(C`RsLF$j=@=q{OPM~O z-?9)V zV*a^;f6n04g0x;_i`y4T!}v& zA=gY@lvD4k;OC`UnsI+d9@cshHKSJ={Y7&Zqy6jfQKC5kBM#!i^WY0HPlQdG6$Hh2 zcD|sXAmS#2lHSDk@A8{yY+Bn88CmrwX9uJAK(0aM>1^~_@?q}j~WR?gN`BMBuH+y;y3Ax$g z)x>))mAaxvj0~RG;}A2$qg(c;X)YK3zGM4zZr)+Pl92+t6z722@}_lIo<252B9Jy3EdRa`^ z7oI*Dj6Abzy`EQI>Jq386Td;TnJ@C;&Sm@dg$KIcB)i3vA65#eAHr4S9B~JP+VrCD z$@tWkv2n|nyq4~vf}NQ*iTCH*ZZAM@*EOZR&y@n2rNLr`>KNc*N?+?fqs^_wUH&KL z7QrC()65`wz)1im8#hf4w`@B#aJW_B$45cU=!jdZuY3`YjnyPCRDCiYHE8(9N1r++ zC@1%(q2+X!KYM>)r^cFo;e;Hf@BZ}rUd6`3Blz$4&Ow5JY9c9sY@WI=B%hI?QFFJa zrkmwt&@%qt|GDZ#;9A7f2wPJGD)aO>-}(I~c5Kp>^6=bxkL`E^wS-%v4ymiKEkN{u zQRjXh14rxzB6DJ~@$g4~3~}cK|5&HqL}Dd;BJGcJ2}Kw_QOc~hiLRnL_9#~8d?u@5xY>o3*JDf74Ew;ef~w2W}4ZaO_U>WD=fOW^Ee@II&owvcitBAQ+e%r= zPs5z^Cx|^PSAzS{vdrp$cyM*UVO}yO6 z8p!vi^603~V$YHDr*B?^cO^Z0-h|n96A%9V++&wN4o5G%x$6|Y(2eJNlBy^gK{WCf z!@stiSn1=XzOe1+Q6Lzr)9pM~Q`|^d+BGGXaBJ+}j|IQ*7MLam1Vr|1aL4qbZRZSJ zkN^XUmRLn|nrZ|^9c}DP>a3)08~1WjUc6rBpyDD94tdi^u^G$WLXll>iW}F|F>Ucv zjbV$81tl9_Or@slhrv3ghQs*wZaFrplYz`Timo)Pc0OFFWe5o!e(oLm9_~wg&KzYo zx#)!-hvYJBEw5f(yZY~taI*6V?|ZD1dZV`C8|(3g=EZ^16ud@j_E>_!p!U;UH(M4y z(=Wqzi*Mmvop&fdbW65;$D5m*XFbg0{k^}k`fsP45zhI(q&2AZ@6yv0yf8{zfn`T( z-JtXwr0%E;P(lJV2u<0RL4P569Vc4s#N}Q}aKP;=I5NRAUw`_P6|H&om*PlJSK0;HCkMHM`;;t{- zL`{0`eqEf)SCU3)w;cuvXBNhSpZClGSm-EqUyES9{s&kCt+|GAP(HSl`?$K4e<^+# zxu&0Mk5bd^$AW)KzC%-6BECL?^+k*CT8G}RmcaWAA26;S) zfNaeS2A;++(ilp)xziZxE|$fFteINaryV0ABiVU)E?H!+azCRk-VpRwF8y7AXLWYM zwY7^|m5@*myH~uB{S$3B8=BHt)oHrFs&{LivoA!>9}H@U$jf+}knUf+TAqitD%94F}`-4!w20q zI?|UKwdRuq?VE6ZfshL-atr@vt#XtHaia%+zfcWCSzB4U0{-nmoCkQGmH&{XUB^!dCzj?TDV5$$$ zqsX4Gf0Lc`IdGpP*4W4zu_x7~mTLZZ!16e9N2`LHhxgx^;c-E_h-D6*{?TX8zI31E zSijE=?q%fLE3OOw_{zBTkNgkX8XAkx+^B!serov6)YA3B!o3OgVz~r}g%w>i`1$Zg zCM9bG`0{%%{{A%&^U&8nO-*VA&>RI2mgwPo!Z?HoI)+afnA@^Puy}0sk7JBm%WJ|c zLM#gYja6!NE&DVkH`u08;Md)WwE%gK;J+X@5L#V$I4$hFo};YtI$;#&6pZGI|45h= zOT7GzTzd)#!6dSz0<=lEdi5%)5+mFw3v||7^2LO9^nI~p&$eYRwz&P>(>z-04sv`) zPU{XQh|QhoBhvxn%SbDm6KAZr2qp=uL)a+6W90aqI~8CiY#znVCL}F=4)mNRxRK5u zUzApT@qiEFfh%fsaMo^srMwnSO9G*QulwKd zijV#T#6a%MZ7%*_>{wa~jHGhgE)BKo=?nM7sJZKEbu-1?pCG_lEkwcGKyb%U8yb z5;(r%h1Mu4UP6N<^o)qj$msDFaQ(E|F;01M2_eZ(C3Ik}bHS(48d%9;ks-?eiD22M zKq|uIbrPa5EyPU3z0lBA^}tbhbHUjL_gLLl)cbOs%j}e+Y0|?zdgx%r~3#qALH0RSq z&tYh^{%=DQ`LL1!ah?2Yc>kwB-A-amV&bdv0c1kd*x3Kyn33=5RnT$i4ElMY5WDsMZ!G3)w9rXL>X3u=xi?VdUJIRxSFJXt)MhLn=eo!#X z-^ElAzV%13lzMuI-j>WEjoyKQ;hcq|UY&A*W7_@0Q>(Jr%7=9a^MJ}Ux;QBTSwM$` z_%FWyg(Ossab0JXVK$oNOM6St|0MIfvfKBU-=r5O67Q46+Q(s$VXzg6=M%sr;-~|_ z^vK7-U9u%J9B*gV8Kw1B9%kKN_cuO?zHSBh0p$>oq_87Eb`IN(YT~u2u6!a7@ z_bMuB7Z23XMyshz4M-(du8b@%FTbXC?y6JmpWg9Zy`Sl?F>S%O(q4~lLFHN14?UAm;?tRt<$CaETDuIxU!|C@x6Ireww{Dm@ck%u(sEN ze=L>e)t!GCq3|;lMPM-3pq#Gx>afKADuJ|)WfemqK8*j)eh)J~xzpv*BM!%-Hmt6G zS^qa|jW(4pl1hH`Wk6tJd@_NgO5{sVbI@_w96p*vpopAk*5FWq?%fR)x{pIg2W{DW zS62Ei5z?& z`YPGm$5K&6&$3-ZW1+eubbdY7KIznk?*rEt!6@+Np6huDk>FB&_uJ$ySOtwg4D7?yfGop2}^+ zcadnFBCX!WvDL-b@>c*^iK{R-n_wB2gs6xdV9tB8{8{&Zg#p_TB?r1$?I4Rvj(iKcaw~KooV;R20oV>EiTi^Q(!LuWr8(}(g{>x zPzBg`WAGz_Yk2$Z#pwgFPB^)}sd0}VzMKtXOhez5{%r^TAFMytk*EyJ%sQkfT+)^l zgg05R98HikA*z0G<%t{uN}pPEfI@&c@iEoa_QV~e$|>BJICl^IiJ*(4p>Or0)jH() zZ`sV^p#w1D-`oiyA_ThjEGX~591BNIdF0XKV(dH zznK#%L5W?s$LOtKmOQZHV&;+#CT)k>3$GpMi2y7#HtTkRM!9gQY1hAr4Le69`~h=b zh=^!Z5ig8@97=?OPj*R;tmEjkeH42*m~QOyjlUq4i}4NHSBL|_`$LVg&ru`+t z3iG__$ffgEvoTFWy#Jm=uXjLkM-Uoy6xaWzNu#~1W(uD&lY^w0K{N%>%Eso^dC zkq0mQPMjdeunU$KkY^oGYS8#XZj-S4xyLR7ZWDS!SGL{gAGrQqe_`VgR>IH_$8@P< zo9Rq!=Ly-DbZ7&h@B?QsB7?|_R&{3+QYf!B!V#h8=-B$!!j!*h7n>L-_Nil#r-Mkr zA)?cZoSxLSai|$r9smg?l)8?g zam0P+&Lkv@K=uVD;PjlzeQBWNNUGI{rslmzm&Y?}*8FEIbHHKhB85`0&T5I^-ri$5 zNzYW~hSwe+fBzDk4H{Jd`$Tpr!yE?ENh){JK*~eJ3sKby4QCD9gKB6*nv_ z(RejbZ+>B;L_pj>knqbsJv|+csspI;escz#LE3PlHkN|BSXCSRq_KL$te2a@5=iIr z{$IdxeLptD@870j}0 z2U@`C--Gc?iCntnM}s;N~m8)$yl(wQryLQ{%5QZtDca7^UP7wscU} zOdF!WAdLcTBjzcgp+#CkXHaP70tLleS6doaO2oaOuGe2P15J)^GtvC5MVWeVk1zmmRlICc4^>w6P< z{pBYOn`dwBm~6aQMVtf&Z|v;*_Wmyv51$@Qos8QzOvvSB;)jjIl{Okh&wda-Wh5(l zq4}hi8sD*(bH-m!?GYB??qEn~xt08F&{|)aO@@(sEG~M_8re5mi~`?z`40canR~e0 zgROF8*&Dl@R{D32X$uyyV#cys(VK45?bFe@@Y&W>L_wjRhYD}SXN9E1`~E0_z+P-EH84E|UiuX-!lYlEAA(*k zl%q%oGO*%rAnEhRRzvz?e3OL=i756+jS5&4IK|I!5lv9Y?mMCmS`Bs#aWNr1J)onoL|RDvuZZgG)6yuRb^V;CD%372P4m1H zOm2%|phr5Uw_x3Kp~HK7{>WdmX-^VeZ+jSek8~H4zP-^0+i8#KA52SYuuY)Nmby?$ zuW&8=ZFk$Z&L;|^11+#cB+l&*a{2Gv*_A8DUET6BBHP%IzDabf-4P4v@EAO(k0G5P zq$4D#Xf&$|8K|ds9^min!x?DZ3WO1-dKf@<bbQCIEn_S;312NP2 zKLK&4&-Aab?B&_|hlTcy+v3?2p|+b5I@i>+hr6FWnbJ+6iwkPJU}N;T&gIg8oZyG> ztpOJs==-$xJao0(_Rd>-U-ZHCB^TE{F8*@3ed5MGmu`=9tQLNGpV_U;zIK_Vb9dch z-}z2|&nA|W2ee<+4A549C)1HcBEUxFDP~er#)E+J)t%r7sXGlTt7Y!sHV}BBvL*zZ zZ?ZtvhzuQk1GLm3&0tZ7+Ochk>`O;xVCbGeF#-8_p;TXYly3XionIE3c& zJ8I`hjx@SE-%GQ9S(p{P?lJvtJ)t+()ioGA%v_TG^q5Kp$46(Bb8ww1HD>zg_{vdd z`N)1gw5cUyiw({fsV<=bOnIq*4=`3XL^2+NMN;q{s5A|CT7d9?#=;I#A3lCeaxwpg zG4713db3D~8ez~f!J&h*)c53Sx{#7u=O;4gO!Jf6UySByQTONnwpqg#T{gE?KK6HzI2T$UO?f@6-Tf2b>Iwwr{wVAC z?rS2QN^#twjB?~bK>Kn_ORn9;VLUd6J`#~McsPam8tUq-?CivC`RmkF{jO;`O*M&E z7GAYtf@eH7CNq9&THY1l>Ae9M5L&>1O|din^HO}~qG^9!u6kO^-EXXgDxK*!SOzZS znS9XEt29@-#?vq1c;ZXZ!&4&;{+l+W9+oU{u8P0W;pK15{)YO!jbd5k)~vew>uPyS z+Z`QGVBapj?z|!0>a54py}1{Hl#ZQa8|@KZXa}&aEW+DGL1G8Gm;$mT@wamNF=B!a z*(v&nW^?;)6BS)j&~_ev_2RNUDyHS* zTURjq2C(oYWsYOJfh5x}T(&0kNcKNVr+c3daBG35DM+N^DKzg+QnfMtG)QZj zCh=4D>I!n8Pw?Qswg?&?77w^?G^@M*GQSQd>zA-&Wn23$*DZN%|B-i};m1qiZ)Yya z?h+P6GpXl%=QCZ=3wp6QV~0*5cV5ZZJZB|@P2KIk!t?X1kyH>7HW>Mdb=gjF08aavfcca8$0)D>V?~iar%>ES^jcoX{qFRD8pRa~Cdz z0rJ|pgsRB+>?-pUR}SPC*}mkVsOssRe>pil?S-h7n<5QW-0;jv(rgpFGkWZ2`~)R= z%{NRT(X(hGY46Q{vVxyQd4htx3MLyNfxnasd}Y^2RCia=ja?iY1G;)(>9M;@5)NIu z6>;=PQLwYx&6=-MpE`L>-{zSJCf69|hR5w@w8&$s0LbHIZ>RX9Uv_j@Kk=n7gwoINRd zATEB7jV$-;*RRhdC4F?`%`=Dr1APUw4i48|H1{W((F`zUI#Wyg?`msxP*?YQCY#bw zQtNTnz~LruX-%MGagoUh`>eVwn&^>MrcSp2aie(${gOvTlLn;*3d~)`i4R}xuug9_ zyd<6Fdi&+JVS|HnPaoObRb8fgaRWq!#vAaS!AMsOlF4Gn0gwEM0c7sZ>wiS+c3llz z_S2nZjUm;BymIZG>a7mL;(PZ$9~8YQ{m3vkw9YzYU$?(c(#6~&6^=ml!Y#pwR2)=g7$~qj4|U_|*FI{S^IfPxAoI@AzZp zfo{QV|o=@1D80gF)ozoOLRSr*T5dk_<^9wQlZV<8E}fN}hOQyU z-$-_u#}r-p)TgDwleJggRZP>-DQx$_S9qQ%cAEvW7|H%U?@J&rdH%E@77}?`9=kQ%ui<>%QNs`m1Xd9Sa)Xk z+Q6;T%ShE@xHauH_7>A9s$cs&XJ{w-?wjb+;vNpH-mrHiX9Cau{nqo(rEccTmAzAR zJhs54u|W%z{QIfpEou}>4@yKvlE4~`z#}eNH0tcTn=@=Hx8d1 zt)8!Tu+J_Hq^(g#cQ)9>=_AE6X7zWw84*&kK?-tg9gV)3_2IM8+cpuOta1jwrYg!@A<^y+*VbR6xD6KF}W^n-?J5&2iG>lg?W%A z>%4KfU#%U$!(B`SiPQ)3z%5B*#YQm^Z8p89kaP*IhTI?PT(Bsn#-4QMt_P zuR>niP?%pAs-}NB-kTI!{6;Q%-i|f%U|V_ns_YNITCP6zuvfJ}-ivgA9#UV@YYwQf zdY&^ppU%Lmlo=2Y>1R>^^$gKx-I1WDIwUz{kw>ZYrz`bZa$;gdZ305SP1h@< zmrU<2hF|Nx@7vAxTdVqJmA!wKSQl*Rx<$=@^OC8S zcj4pG7M*v7DtWTH-?&_Bj&7WcyW*!5l%xOBFGhD}r1Vx}*ef?e<#|=QIWq%J${P}y zZr;4dVo!hMUqj4ulzHvv|s#ESqt!c*uU0 zYEz_d?uxe1;$j0IPe%&Nq`}Ucc89wkd4Ia~$2u%vGo{+)3qC}DeKNgQg*i+5K8CD& zW3?L+wT8VEaWWXL47s)GQ%GFKeY0<0FI2A`m$}Zt#w@bwPNKoP48_j|l#=7}`I`@a z3NVc@jbs%l;=J$J-4Y<~74^feH?Pkr1D94e*!ZhnR9Tr%K|z5u(?c|w?vk3Geiju7 z#gc2g@4XKTbTK?s@-03$_t9*bS+!xkFN8?#n8-zYPzz)qXG1 z+ZlF)<)HY$l9{$Ok6(FNZ7EN6^!-B=hzANWS&1r!G1ea3H|EBXB@T!KafR?9BYiTv}P| zsMBMj!h|Ha&L*^UOK4Ni?{D(N(cq2*b(?-6uTpst$FI*4D?=XkGntA?Jd1cFYqKip zL;sAdsnn+aygEkB?dOjARC!w*J91KwPhU&HvUFR3QA++7(T`h9D#9wZ>=##9!%ZGt z`@>|j@8!Tbdk70i>o(#Egbf_B9P7fps0!W%6&;;zy4s|4)dyB$6(paYKGZjFu%v3g zk->@=`N^HDZSoG5N-=4RAGlr8qiB9x{z`-8(_-%Lf~wh}abY911ar&rwoB^s)HuTx zf_`hg(2_iFXiyIfRH3YqkYI+^fRqkBA4*^(Zb@H?_pIPs#Z(_}Ch2b>DzS5H*|vt2 z&%OtW56Y_3jj(du-Z^jSs?IRdB-6#)7ij#$SL{oq*LJCOKP&Vxw3~M=u z`&5c0dQzb!ml_-!xSpQM*(J6m<$BWi=SQ-px4(@i_A_OB`a2%-cuecNR?#TxTaDsl zT0iX&e%>J=n(-!6&zCUoXvSMieKIxgmrS*7K5_-2v{z+eWDMzWO4{wbUdV$%kakj=x$VK#o=>rXqrQ70rr4Up zD%$jhlO9~}nUmi?GyB5Sq53K}m5B`l&)$95l&1-8TtdJZx>8WU3XaGf zWq3O;E8NxV5&!$R;q>U>^Gbu6@rXwr!)I-?=QVyfvaPn^t;|&`TVDAtu2R56BbW5WCA0UYMgpC&#B@CY{=J#6ozMfCLzg6;ZxF3L#VgC{A3#;Mi~i;o z<)PgQXKbIzhl>x$-d{FxUd`taC7-FUd55B1hvHtchwwkmq6&cDIhpk^fM&W-7mi|< z5SqXxO6^e3)A7x!8>{H;Xn)~8c!U2C%{B300p0d@hI!}67^P&Id=HNrzHacfIK+H^ z2Y12(b@=(gvdx)rDWIVMdAbX|3VQGnH!E~;3sbYI!Ulf93ey?K44ZFC>d7OAhVRH} zlzMwj|6~tq3GjBHqfQj@au$gFc0~m*-0#}ijFqS9m%msbln&g)(|`Zmab&8?Np7Lj zbHIb+^~s633?@QXGcs7I+qJ~qzn;9fSNp_qtG32ngS)fm8HV)_bvK4NWv`E6{1#cY zJMz!(?^Tcplq*oykqKJlfb$4;A6|of(a*z-B&dC#tSH&NOR8$7w0ncO!HEC<;{6sc zzf7#YzEkJ<7yB1Ky<@{lnunFU7O2QYE18GYg=H>DmU``U-K6p3$!Lk%j~NgBOA^b! z<6P1V$~XBQ(!7DN=1rmsvWn#14;+!n=JA%JG~GdyDLAwKkk7DZgSNRU6@0GQ?EnQTS-Y(T*&$?#mp? zB5zH2QY0T)EM2Fud%>B(VdYDnCZsuTFy>Jl7lh@w%PVWvBBnVljyGf5;tnY_YS#bV zR;j7$(jOHJU${`Z-zfIcJ+16#lWn4G9M;Qv6vfV!uzcN}k{=l+zrYbG3#q3$>FcWi zxGs`}h3H_O=jnG*hyFs{z$aW@k*SPtgyG)jJ0?DBcw0C=oHH|u9W8m}TTn8GHNx|U z{*yj)ADONT^R*%;X9;4bXQ*DU3I8x}l$$$#T-|4bAZ_f>d-dpi`X?vnEo@he?nQ3U zH-6J+?XZFL_cjl$1$ph&q{o=>K-GUB3eD8JiDIG-F|dg~b0%D`&ZPSk&nE|WI+d*n zF)uGFh~AO5>Ul_o7y545NT9hE?Wfk^KQa5|6A~7N(6lD2&KD*wI3IqX>uoIXfUd+S zkq2ur_-|*9sL0BC77Tf*+@`tZm%C!CUlHg3bq8zhrMN6SlVl?DF%a3-B-hKpS<^ zW}AbVuhCV5o7W#j>&BYx*loK!)>PtrQq-J-^o?UmXSD62O7{z-25kP{cPh3bf2;zO zrfV@qZX>VPq2Bfe596>Rob|`XQAIq<*N(5^O}sBD6htHg7LkHm1ex}vtEIo%a_697 zfYX14JnHIs;dmf$>k80*TEhGx7i#O?gI7)&-_L!r;;qNlRjHdiBcto_H24|A`6KKp!uVzES(gZe>XZ{B`Uv=UV$mq3rmiJ2Mi(mWlJTI_5*Cd@){ zo*x|;@jw%ORzozEWErus9cmv=IG3Jomj8um>Cyv6M#FB~OjvGiv1vFclGRuET1RMl_APq<&Z&b`4JEt%Km5Jaz=pj{!@%sZ z@6pmI78xI#(k>XWT&|+%!Pu_l%JL(Ll_UCVg+edy!u^-+yKo_b_0fvKHIrQHR_yja z`I$+sjcY>ku|}9o#XD<%HO{{`A(VWiMS#r(KOcWFOgFBLf5I96cz)~O>ugy|@Acr? z;3~sjn&m$hIsE*=ROw?|cNfx?2R3Lt-O)+aS1s6VCelp&L!=a*t|x9Qr~+4hom}nkp(PNViiG^zc%e;f#8x|_98O~|3!zl`%cfB4`a0gTzISxC zE@k`Yjgg)UZ}h>3`)^lH-@7LZeg+MS3pFT~es6`r(yji&!5gu1ugU8)Kd z6tTkK=Zb>&8rYQn`5yEq3ja%~23N;&GY=_}*v~(Iu89)X3Wnu;ma4AT3um+5rd;G@+ACSzG6GQoo8Asnv0Au&=-7B!Hp%z}h{sR#7 zCcDkg>+8de^LC>Z?8`t9Iq|=qCrra?C}A~gwn=T=NKfH9yPNCm(~k<=EQXjxRoe&p z!fKP9>HVh;F`m(me){8mbM(@g$|C))yW<>NVv8yZ-tRwncVm2R!Nz#;0)FzlZ_c3}iN^1Bn-Y?f|kTCL0lXK(PgOy(?5UugI(9&=MF=T zICOB+Zj6bEVZ+$fgfYM*JAZax^-j(Nu8HR6`nMiG*k4(co6TB>d2@BCT?Q0Pwz-q`q0P zn&rovOFgUReE+`l|Nh=1Z~R_v?4&9sDftACl{Iwu@L^(ygeGcm%MHJOe=`(h^w?>i zzk2o5vvFG#+zZa|Y+<6n+%=AX2H+&^aC4K!_fHIM|HBRK{Ds}#RW0586JOZxB)+d4 zzx?l{DKn4RYY1EnY`oy^>?G5azR+~9KvR}WYft%Ze!eFpRmYB%+* z(SE*ZbmjzFAK`F&pmcKiv|bov#_ICWy%!a>di6!#SAMK6f zc31l&{5aP{y>t$zhkWv4(33OcaF;6mlZJ&oHWQ6K5p0#lE0B(PeY$1g7r2K`McJQ` z8@FSzU*t=R;A7qoei~NO`R{}pbVu)8T?m-jx({$QnaG4SpDFTdT>DFPCBjK$jxtE= ze!s1v`90So@b$gVD&Eb1Pb9W20`$`cS{C+?-c=0Ubr|BdKYaInI0DgXA=c$>eS1=x>=OC~wzTz3IR9&&$qs6KqDJj^____LRzs45a$ZsrC)sZrsf1Yh#m!n$xbzQQ-QER3a)5KDFN zV%(=bK*iPcmshGxp;<@=$bDO?E(G#ILX#9^CW~L>2wymdqo31AXD$u{n=gfa!Q2WL z{+}-z{JNXYZX}$Q65QW!EuZ|j5c?Z8iEE{$TpJQ!bU(SW^32~Yg4GoqnwnHAt1#$zEOt&@2pD$mNksLDM6OR2KUxmX;p0o5w1H*S;bN4xr6LG{$$k6KVFUxJ@&kkl= zHAxW*UwF++|Br9nbc3YiGCbGEW}JIIt743a?+H8cgn{b~K(hkxQG?VOwI$cHvd9UK zf*NRagp=EK&}h=myDbYX3fdp+>-HP37!#B5y^UQz6NMUtvxJzZr>ExzKgm?*6J}TE za&*o5y-$6*he{NjHq9=LKGe-n`7s{!M?7z6CT9k7B6$>6(7=*}wqc_g8xuOPbiaW? zB>%RPmS8O%Z41M`QzroRX;-O9lcu^soNr->6X3GlAFsh*0}sM(Ghv55ocrgJlcR8=Y1eP& zSq4?RI}AGHlg=350was{B5RHJS2)pt3ukoO@Qc^5Xi0!hnx>6pqThhqf zoS(8rS-H#B_r!_P`>{Edk;`}^340x0Rgb{KB!h#9q}EcpwA^msN;oS4Zsul2qtKe0 zntc2-b2Dl(BNSk$$E}|!Mwv6@a8`-N5uHU$!wcQ7i6re$9<%xy!X>k6^@7fI9yzD=@hfnD^Gl-wL zMX`9vOEkPswXeGs2M@L8HYY#um!6xuH~#Jj}Ih&g><6v|?ycUuQerL|>YqGCg^0w#xTN);SmA{m2t zkaX*Ek=WvU38JT71pl>ggX$d}6))16ou99*uU{&A@eE#m@`F!UQDV5=Y&fMgmVcT?TQ<~niLMj!I<@|azc)!?7d1slih zxgf$Q7!EF`HCo7CMQKJ25Ii^oFY=8_a+&FI~v_So6#lULe|25+N<@RfD!lwS~ zfO;SH|I=CL#c-j{2DlBZnBvM~`bl4t_*c0_#Ow+MaQJO~9=ocEI`|rtHWCsBFiV$R z{xRzU4GR7l2>H<1RaQ1%VH}`WS61_c-j0J8;G42W@aUYs)xCP<@;mf>-|5q*QAUqM z?8v!dMOpb)9L24VM7zx>$X`oA6i|kf+46RaN`G!_2EOUfMo8xw)$EqC+fF|Bu3i-) zRuGh1#^p5B)LkZ-8KeaT#G~QhppYpY45^7-kumHP%F9K8i=Dy=pF@XL8e*OgUXT*u zDdgmcmUlX1x3Mkch9<`6oyTuTV&x9*8QJ%B;DT;H+-JOeWO5?jmU(8E$Ed0gG{1d9 zu7Xy{PeE69_;}3!q+b zgo=aR#?!uS{VQYWU^xE_r&J8i%~c2rNX~}P9Q*iPR5+2K8P6knfa{0BNq$~l$4&IX z6+2py;(UF|>uL$I)Djk!sNL7ldXE+o`7k*42#Sl>kCX*~4~p}dbU5)+Tn!zb3lzvq zD`T>Dj34kuIg<-=P>6G}n81k-#t2wLMobEv`wtwjDy+D+qyM(|6%`i0nMNE8*my~S zn0&uME>_NI!JU!B3;ptmNUFzWkHBJCqpNvc6ZDh~If&@K$$HwHfl9G8lhCGnIktUUTC6MheA%=$_ zncezMRDGw(d*8iVEzbacp{&4FOH3N4rl#;5nEVrvo+xb_Lj?~1!zv))TKG^U>~R1b zGl_{4B+}?RvJ6FKudyy}fOW`8{rk>)3JOgc!D>|bJ*3SqW1P&6 z&Pv{&^r=)q2GjifzQTmUN>Mj3FvxSx8^ts*b2K}A_#&K|((W&#^kIM7%yV}7A^h0o z<_QupXjR(yGD1LAiOzBDAo@YIfp3(QqXkk#T4hP5siC zv0duH@h=;kr7qN|!`T=`rq}nj{~FPinLSqlP!FinNJ-y%e zMt6EYo16Ihl}$1ImF?k|-y4kOIXF0+lU%z>n?L?^RJoO0X8WtYzMcbKv~e#-=jP`- zl?4<$u1j+>1N_OlSmAKotB}PmgE@Kg_1kKWOZg!)X7%G2f3Vd~c z{ru_lDi?ubKLQB3`DM5Yg%y-jZzH*wCqmXoj?hu!lag!;Um1U#o}M$D3u}^i^?gBq!bg8Vn->5M1*zL zAg6MC?r+caT+ctxb6xLyz5Q2zbj|$c`}=(F&;7YS_l*_gTw@vsz3Xl}Li7aqoio38Cr zo9_Ah@6!-(+aWo}7;9d}R|ReiJsSxlbMqAdnhUsBw|VW!ZoIQv_@TIM&vw67S=`aG zlpdZa5vS@~T3(>*p027gAx`dOv_%#g6QC2)+O)~ycD*P0tN&=XDI=VYXw|PthUM54 zT+F>+PETNq$(TO`nm?+nOxNQbyxhvMBBKJgk8hE*aVS42g)2N$x}Oe<0(>RhqQJ6h z1JKMcsnZZ^MFBZjU%GsG5hJ6262BK(fUA@u#>IW&i7E6#kw5UoC8VWgQHjdjRV5kW z@HbDjR!Q5v9?#F(YaSeeiL)}(K2Hy!VEPi4HvFwyx9T4`l1C{*(Qcb5bZFavVnWk~ zg^BIoJw15H0M+!SrlxUCT$bO0h)ON8m{ousCWGjSZaH%$7Ss8eODW}E;a^A-elS|QPbu+ zi{s!BINooQi>~NGe0+Z%DITeipt%$!^(W5NsxE^c+(#}S!=v5R7aLI^YaoSjHf3iq3EI!q>)Qsv0< z*)!BB$tZ^eS&Rboc`yE<@haC>O9o^Ow62>I*w7&@r}W@NmddlSa&xc+}SL(rD6^_59v#G`kB_;Fn5by{NAfA+^71ZgjuCC9X zJ&U>GxG*w7lkFLMZXP0Q8bvY-FyMM*SxM&X@4EE$^577win2QmPiW+{r&zqjT%E3l?^lOcs_@Vo^#D zmGpz}YZw<|`IF*)2m%lzcPd48FdN^#*uTlBF%^A$Lr(xj4K(INRvZ91_@}Ck3rDr< z-xbq!)eklD6R^&UhQd-3*N2g@vB#@i83lEUI^LTcDPO?HpeT4^&?E32>`c;VC7{cD zhvm?iFR)mqy7I@zR%l!j2hdr2WoKq(mE%PwNSxf}ne}g_Jwb)YP|pMcpTpOXdCmpX zBH}(X-3%|DXI&HXQs>W{5vBI>LyL7P71R$a-gRJ}Q_u*5(d-)Nd)*RG)@`f9lxif7 zKO^=O-xcZE=QOe7yP99uXu0ewd<#w=gO6#=0X_?UDQr> zW-}n$^(#ykYqg7u%h~U@AAVTV(4#Pzd!rJ|w!*O;SZAQ|CW7qv3UAS{XKQ{0CM6Uo zxEQn;Ig=XpIU4xBVS6&OCqqrO$Z>rVM2;&~7!4QzEClSKG=6LPPz}zH;hov67M6a-@?-!%i#x82iafUz3@sriO*f7|(Y(-g-@}EF%xS!QV&mf0oz-@3+bD;Fx6y$i`ft%_sPr&}B925f8;%o0G=_TRiEtfBDZ|mW$ zA%74uF5=;zG`O(N#?Ps2*dVoj{d(LPkKqL}eE!7D6XCJ?hYcq{57BK7cBykz z13b5E_+_X#h28McG>KL0+cV)yS~(PZESlWe|i?@3j zi1>uPK-yZ~MU6}n7k_Zs^cX#(67%v+c8M%M-Z@%k+w^zi#%GysW zph3T-$SmC&{ycwL=CH9oq}gb*@jRxL($6?*UadGUE^Aj{dlop)k#zRup^5$gH!McOZF56a1vJQt_NK#Rx;o6 zI_-C^Sp0d{{eDs(7JWffS5PoP^e3a3X}?|=nV+|WH-THO^1I5+J5W&Uo}9#{p=L`> z@C$dW17n;bKkiT?PS3xSITvA|qeFut08j3{5BQnbd+F)Hg`tMo17It5Pj+@6Ke3Og zqO#=YD?gMXffQ=lLcg5J5)5a6|0=MKNr#z-IqS1%XlUq?LK$gkJt%f)s=p4GoXRkN z7gOD(HvS$?BqA2|MME_q*Rt6SO&Wu53#!{S2ZCVCZ(o$gO9m{cy=A_JDs-&Po}cU9 z&cEPWaC_sxwqNjhmh5z%xZm;buUZIg8=lNUj8QHWC92iKxl@t?86k>8+IjJygP5o& z84XfgTc@B3p;j{TqO`Enynx!heeUU}=C(Pv$@8f= z^btHTtvUI*bIrU2ufIB>NH_SWg=Lu_0yv-!%(J_eEn5b^TMdeQI<-kl^XuwgfyV_r z#r*Wt>sldmdGu?|m$D6-1HB4k!CJ%{JaXExtrOky^S)ygIM}runq6$cG7ef80@>;= zwq6x}z})|oZn8G0DNL{JhGzG&RnX(7e0DnwC4p_ahDtbKS9}12U+;ffy*Q$40Gq9tVeR-6?ptRk_N56CIDN_T0hK0nkFQ^v)I{#Wv zadOY#(9p(oyM;p+(dmu+lhy6H(@^2Aqg0 za$io>Jq9DvSJ$g1x|RHz?`2bpu&%Tpc(VtL{;R#sfZ-i5L|#EKwXTGvz5RN#Bo-5n zNEHr8A3OHQB&m8_d@`CdG{n1!aF1QMP0h#xrrOcyIH+yl1tL-403ZyGwt29}ef1co ze#ZPr?1|^Jb`hLP+9*o#l1bX}xz9Zn*y&Ci2aq`cH-`i8d{t6ZzdX`1NUQ5U%hUO;#j9`T!50~Q-$;wW=+ygXRrCc{@S&kD?-PA z0iOkv-~2%wn*HFmXWD}oF8{Q&2z2x4mI~IaDedOXSQI9 zXa@Snowhwi1EGhNJ=1;p|LSowM@M~Tm)1-XG>l`Xs}k4z!wZ3cZFerd@ib;A>deCY z7w4WIBAxqf8yU7{q9D#!KwhvL+kMlt=7tOP-B47L>`9%A!O<_^R{yw8h;^KA!@Fmz$4^Dzkt(U?@d z*_9)1ZR&LlMQjFSv>w8uVB%v5;u#bMj@?4+K0M8`&yNq7Tbv6e1KNQ3$M{0S)y{KE zA#F}eOC!S)jN*p@07#|^9+~e;f97&2NOk(hEc2WFK{}F+Q@rnFO%shwu zs@4>t~TkR1R3 literal 0 HcmV?d00001 From 99e02fefbb611d64d07f840e67269dff7767cd92 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 17 Oct 2023 12:43:51 -0700 Subject: [PATCH 162/648] Address Feedback Signed-off-by: Sean Smith --- 3.test_cases/6.stable-diffusion/README.md | 57 +++++++++++-------- .../multi-node/2.train.sbatch | 6 +- .../6.stable-diffusion/single-node/Dockerfile | 3 + .../6.stable-diffusion/single-node/build.sh | 3 - .../calculate_number_of_parameters.py | 2 + .../single-node/create-conda.sh | 10 ++-- .../6.stable-diffusion/single-node/run.sh | 3 - 7 files changed, 47 insertions(+), 37 deletions(-) delete mode 100755 3.test_cases/6.stable-diffusion/single-node/build.sh delete mode 100644 3.test_cases/6.stable-diffusion/single-node/run.sh diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 0a95481b..26420cc7 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -1,6 +1,6 @@ # Stable Diffusion Test Case -DISCLAIMER: The scripts presented in this repo work but I believe there is room for optimization to further accelerate distributed training +DISCLAIMER: The scripts presented in this test case serve as a working examples and are not optimized for performances. We will follow MosaicML's stable diffusion benchmarking scripts provided [here](https://github.com/mosaicml/diffusion-benchmark/tree/main). It uses the `'stabilityai/stable-diffusion-2-base'` model. You can check the number of parameters by executing: @@ -10,26 +10,36 @@ Model has 1289.952427 M parameters and 865.910724 M trainable_params ``` -Just for simplifaction of testing, we have separate scripts for Single node and Multi node Distributed Training. We will also present a comparison of throughput (images/second) achieved with P4de (A100 80GB) and P5 (H100 80GB) nodes. +Just for simplifaction of testing, we have separate scripts for Single node and Multi node Distributed Training. We will also present a comparison of throughput (images/second) achieved with P4de (A100 80GB) and P5 (H100 80GB) instances. ## 0. Conda and Docker Make sure you are able to create conda environments and docker containers. For example, to install Miniconda, please follow the steps below: ```bash +# install in the shared directory so compute nodes can source the environment: +cd /apps + # Get the appropriate Miniconda_version from https://repo.anaconda.com/miniconda/ wget -O miniconda.sh "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh" \ && bash miniconda.sh -b -p ./.conda \ - && ./.conda/bin/conda init bash - -# Amazon Linux 2 instance -source /home/ec2-user/.bashrc - -# Ubuntu instance -source /home/ubuntu/.bashrc + && ./.conda/bin/conda init bash + +# Detect the OS based on /etc/os-release +os=$(grep '^ID=' /etc/os-release | awk -F= '{print $2}' | tr -d '"') + +if [[ "$os" == "amzn" ]]; then + source /home/ec2-user/.bashrc +elif [[ "$os" == "ubuntu" ]]; then + source /home/ubuntu/.bashrc +else + echo "Unknown OS: $os" +fi ``` -## 1. Single Node Setup +## 1 Single Node Training + +### 1.1 Single Node Setup When testing the latest version of MosaicML's Composer, we found that different ways to set up the environment with a PyTorch Nightly conda environment or a Nvidia PyTorch Docker container. For single or multi-node testing, you could use either to run distributed training. Next, we present both approaches. @@ -42,8 +52,6 @@ install_requires = [ 'tqdm>=4.62.3,<5', 'torchmetrics>=0.10.0,<1.1', 'torch_optimizer>=0.3.0,<0.4', - #'torchvision>=0.13.1,<0.17', - #'torch>=1.13.1,<2.1.1', 'requests>=2.26.0,<3', 'numpy>=1.21.5,<1.27.0', 'psutil>=5.8.0,<6', @@ -57,24 +65,30 @@ install_requires = [ ``` Once this change is done, you can install composer as `pip3 install -e .` -The `single-node` folder also has the Dockerfile and `build.sh` and `run.sh` commands to build the image and run the container. +The `single-node` folder also has the Dockerfile and a Makefile with commands to build the image and run the container. ```bash +# build the image docker build -t mosaicml-stable-diffusion . +# or you can do +# make build -``` -```bash +# run it docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it mosaicml-stable-diffusion /bin/bash +# or you can do +# make run ``` -### 1.1 Single Node Training +### 1.2 Single Node Training Once you are in the conda environment or the container, run the following to kickstart training. In all these tests, we are using synthetic data generated by `diffusion-benchmark/data.py` ```bash composer benchmark.py --use_ema --use_synth_data --device_train_microbatch_size 4 ``` + And you should see training starts as: + ```bash Namespace(batch_size=2048, image_size=512, remote=None, local='/tmp/mds-cache/mds-laion-2/', use_synth_data=True, model_name='stabilityai/stable-diffusion-2-base', use_ema=True, wandb_name=None, wandb_project=None, device_train_microbatch_size=4) wandb: Tracking run with wandb version 0.13.11 @@ -90,8 +104,7 @@ num_nodes: 1 rank_zero_seed: 3179589898 ****************************** -train Epoch 0: 38%|█████████▍ | 18/48 [03:28<05:29, 10.99s/ba, loss/train/total=0.1463] - +train Epoch 0: 38%|█████████▍ | 18/48 [03:28<05:29, 10.99s/ba, loss/train/total=0.1463] ``` To see GPU utilization, start another shell on the EC2 instance and run either `watch nvidia-smi` or `nvidia-smi -l 2` if you get a segmentation error which happens frequently if you launched the EC2 instance with a DLAMI. You can also run nvidia-smi in docker as: @@ -100,7 +113,7 @@ To see GPU utilization, start another shell on the EC2 instance and run either ` docker run --rm -it --gpus all nvidia/cuda:12.2.0-devel-ubuntu20.04 watch nvidia-smi ``` -### 1.2 Single Node Training Benchmarks +### 1.3 Single Node Training Benchmarks We ran tests on P4de (A100 80GB) and P5 (H100 80GB) machines and here is a comparison. @@ -117,7 +130,7 @@ We ran tests on P4de (A100 80GB) and P5 (H100 80GB) machines and here is a compa | 20 | 8 | No | 197.90 | 361.15 | 1.83x | | 32 | 8 | Yes | 204.22 | 385.31 | 1.89x | -#### Scaled Dot Product Attention +### 1.4 Scaled Dot Product Attention HuggingFace Diffusers has a set_attn_processor method that you can use to plug and play different attention processors. A list of attention processors can be found [here](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py) @@ -136,13 +149,11 @@ The older self.unet.set_attn_processor(AttnProcessor()) gives Cuda OOM error wit More details on this can be found here: https://pytorch.org/blog/accelerated-diffusers-pt-20/ - - ## 2. Multi Node Tests ### 2.1 Multi-Node Training -For the multi-node training we've created a `Dockerfile`, and Slurm submit script and a `Makefile` to build the docker image and convert it to an enroot image. To get started please follow the guide [AWS ParallelCluster Distributed Training](https://github.com/aws-samples/awsome-distributed-training/tree/main/1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: +For the multi-node training we've created a `Dockerfile`, and Slurm submit script and a `Makefile` to build the docker image and convert it to an enroot image. To get started please follow the guide [AWS ParallelCluster Distributed Training](../../1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: * AWS ParallelCluster >= 3.7.0 * Pyxis diff --git a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch index ba2afc27..05d2372e 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch +++ b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH --nodes=2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs #SBATCH --job-name=mosaicml-stable-diffusion # name of your job @@ -60,6 +60,7 @@ function run_compose() { benchmark.py \ --use_ema --use_synth_data --device_train_microbatch_size 4 } + NODE_RANK=1 for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) do @@ -67,8 +68,9 @@ do echo "Run compute node ${NODE} for rank: ${NODE_RANK}" run_compose & done + NODE_RANK=0 NODE=${HEAD_NODE} echo "Run master node ${NODE} for rank: ${NODE_RANK}" run_compose -wait \ No newline at end of file +wait diff --git a/3.test_cases/6.stable-diffusion/single-node/Dockerfile b/3.test_cases/6.stable-diffusion/single-node/Dockerfile index 0964e1c9..64432fe3 100644 --- a/3.test_cases/6.stable-diffusion/single-node/Dockerfile +++ b/3.test_cases/6.stable-diffusion/single-node/Dockerfile @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + FROM nvcr.io/nvidia/pytorch:23.08-py3 RUN git clone https://github.com/mosaicml/diffusion-benchmark.git /wd diff --git a/3.test_cases/6.stable-diffusion/single-node/build.sh b/3.test_cases/6.stable-diffusion/single-node/build.sh deleted file mode 100755 index 859c8a44..00000000 --- a/3.test_cases/6.stable-diffusion/single-node/build.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker build -t mosaicml-stable-diffusion . diff --git a/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py b/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py index f2da4532..7bbb7a93 100644 --- a/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py +++ b/3.test_cases/6.stable-diffusion/single-node/calculate_number_of_parameters.py @@ -1,3 +1,5 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 import composer import torch import torch.nn.functional as F diff --git a/3.test_cases/6.stable-diffusion/single-node/create-conda.sh b/3.test_cases/6.stable-diffusion/single-node/create-conda.sh index d4c28d9c..10831b42 100644 --- a/3.test_cases/6.stable-diffusion/single-node/create-conda.sh +++ b/3.test_cases/6.stable-diffusion/single-node/create-conda.sh @@ -1,5 +1,7 @@ #!/bin/bash -# + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 conda create -n pt-nightlies python=3.10 @@ -15,8 +17,4 @@ pip3 install diffusers["torch"] transformers pip3 install wandb # We will install Composer from source. First clone the Repo -git clone https://github.com/mosaicml/composer.git - - - - +git clone https://github.com/mosaicml/composer.git \ No newline at end of file diff --git a/3.test_cases/6.stable-diffusion/single-node/run.sh b/3.test_cases/6.stable-diffusion/single-node/run.sh deleted file mode 100644 index ebbad77c..00000000 --- a/3.test_cases/6.stable-diffusion/single-node/run.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it mosaicml-stable-diffusion /bin/bash From 2831296f18a751bce693eb120a7730000e72fb36 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Tue, 17 Oct 2023 16:02:41 -0700 Subject: [PATCH 163/648] First commit --- .../860m_res_256_pretrain.yaml | 199 ++++++++++++++++++ 3.test_cases/8.nemo-multimodal/Dockerfile | 98 +++++++++ 3.test_cases/8.nemo-multimodal/README.md | 80 +++++++ 3.test_cases/8.nemo-multimodal/bcm.yaml | 11 + 3.test_cases/8.nemo-multimodal/config.yaml | 64 ++++++ .../download_multimodal.yaml | 72 +++++++ 6 files changed, 524 insertions(+) create mode 100644 3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml create mode 100644 3.test_cases/8.nemo-multimodal/Dockerfile create mode 100644 3.test_cases/8.nemo-multimodal/README.md create mode 100644 3.test_cases/8.nemo-multimodal/bcm.yaml create mode 100644 3.test_cases/8.nemo-multimodal/config.yaml create mode 100644 3.test_cases/8.nemo-multimodal/download_multimodal.yaml diff --git a/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml b/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml new file mode 100644 index 00000000..803426ab --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml @@ -0,0 +1,199 @@ +run: + name: stable_diffusion_860m_res_256_pretrain + results_dir: ${base_results_dir}/${.name} + time_limit: "2-00:00:00" + dependency: "singleton" + +name: stable-diffusion-train + +trainer: + devices: 8 + num_nodes: 8 + accelerator: gpu + precision: 16 + logger: False # logger provided by exp_manager + enable_checkpointing: False + replace_sampler_ddp: False + max_epochs: -1 # PTL default. In practice, max_steps will be reached first. + max_steps: 82500 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches + log_every_n_steps: 10 + accumulate_grad_batches: 1 # do not modify, grad acc is automatic for training megatron models + gradient_clip_val: 1.0 + benchmark: False + enable_model_summary: True + +exp_manager: + explicit_log_dir: ${training.run.results_dir}/results + exp_dir: null + name: nemo_stable_diffusion + create_wandb_logger: False + wandb_logger_kwargs: + project: stable-diffusion + group: nemo-sd + name: nemo_stable_diffusion + resume: True + create_checkpoint_callback: True + create_tensorboard_logger: True + checkpoint_callback_params: + every_n_train_steps: 1000 + every_n_epochs: 0 + monitor: reduced_train_loss + filename: 'nemo-stable-diffusion--{reduced_train_loss:.2f}-{step}-{consumed_samples}' + resume_if_exists: True + resume_ignore_no_checkpoint: True + ema: + enable: True + decay: 0.9999 + validate_original_weights: False + every_n_steps: 1 + cpu_offload: False + + +model: + precision: ${training.trainer.precision} + # specify micro_batch_size, global_batch_size, and model parallelism + # gradient accumulation will be done automatically based on data_parallel_size + micro_batch_size: 128 # limited by GPU memory + global_batch_size: 8192 # will use more micro batches to reach global batch size + + linear_start: 0.00085 + linear_end: 0.012 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: images + cond_stage_key: captions + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn # check + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + scale_by_std: False + ckpt_path: + ignore_keys: [] + parameterization: eps + clip_denoised: True + load_only_unet: False + cosine_s: 8e-3 + given_betas: + original_elbo_weight: 0 + v_posterior: 0 + l_simple_weight: 1 + use_positional_encodings: False + learn_logvar: False + logvar_init: 0 + beta_schedule: linear + loss_type: l2 + + concat_mode: True + cond_stage_forward: + text_embedding_dropout_rate: 0 + fused_opt: True + inductor: True + inductor_cudagraphs: False + capture_cudagraph_iters: -1 # -1 to disable + channels_last: True + + unet_config: + _target_: nemo.collections.multimodal.modules.stable_diffusion.diffusionmodules.openaimodel.UNetModel + from_pretrained: + from_NeMo: True #Must be specified when from pretrained is not None, False means loading unet from HF ckpt + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: + - 4 + - 2 + - 1 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 4 + - 4 + num_heads: 8 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 768 + use_checkpoint: False + legacy: False + use_flash_attention: True + + first_stage_config: + _target_: nemo.collections.multimodal.models.stable_diffusion.ldm.autoencoder.AutoencoderKL + from_pretrained: /path/to/vae.bin + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 #Never used + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + _target_: nemo.collections.multimodal.modules.stable_diffusion.encoders.modules.FrozenMegatronCLIPEmbedder + restore_from_path: /path/to/nemo_clip.nemo + device: cuda + freeze: True + layer: "last" + # For compatibility of history version that uses HF clip model + # _target_: nemo.collections.multimodal.modules.stable_diffusion.encoders.modules.FrozenCLIPEmbedder + # version: openai/clip-vit-large-patch14 + # device: cuda + # max_length: 77 + + # miscellaneous + seed: 666 + resume_from_checkpoint: null # manually set the checkpoint file to load from + apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this + gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) + + optim: + name: fused_adam + lr: 0.00008192 + weight_decay: 0. + betas: + - 0.9 + - 0.999 + sched: + name: WarmupHoldPolicy + warmup_steps: 10000 + hold_steps: 10000000000000 # Incredibly large value to hold the lr as constant + + # Nsys profiling options + nsys_profile: + enabled: False + start_step: 10 # Global batch to start profiling + end_step: 10 # Global batch to end profiling + ranks: [ 0 ] # Global rank IDs to profile + gen_shape: False # Generate model and kernel details including input shapes + + data: + num_workers: 16 + train: + dataset_path: + - ${data_dir}/your_dataset/wdinfo.pkl + augmentations: + resize_smallest_side: 256 + center_crop_h_w: 256, 256 + horizontal_flip: False + filterings: + + webdataset: + infinite_sampler: False + local_root_path: ${data_dir}/your_dataset/tarfiles_reorganized/ # each tarfile in wdinfo is relative to this diff --git a/3.test_cases/8.nemo-multimodal/Dockerfile b/3.test_cases/8.nemo-multimodal/Dockerfile new file mode 100644 index 00000000..caa69dc6 --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/Dockerfile @@ -0,0 +1,98 @@ +FROM nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 + +ARG EFA_INSTALLER_VERSION=latest +ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws +ARG NCCL_TESTS_VERSION=master +ARG NCCL_VERSION=v2.19.3-1 +RUN apt-get update -y +RUN apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl2 libnccl-dev + +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ + git \ + gcc \ + vim \ + kmod \ + openssh-client \ + openssh-server \ + build-essential \ + curl \ + autoconf \ + libtool \ + gdb \ + automake \ + python3-distutils \ + cmake \ + apt-utils \ + devscripts \ + debhelper \ + libsubunit-dev \ + check \ + pkg-config + +RUN mkdir -p /var/run/sshd +RUN sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && \ + echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && \ + sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config +ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/lib:$LD_LIBRARY_PATH +ENV PATH /opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH +RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ + && pip3 install awscli pynvml + +################################################# +# Install NVIDIA GDRCopy +RUN git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ + && cd /opt/gdrcopy \ + && make lib_install install \ + && cd /opt/gdrcopy/tests \ + && make \ + && mv gdrcopy_copylat gdrcopy_copybw gdrcopy_sanity gdrcopy_apiperf /usr/bin/ + +################################################# +## Install EFA installer +RUN cd $HOME \ + && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && cd aws-efa-installer \ + && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ + && rm -rf $HOME/aws-efa-installer + +################################################### +## Install NCCL +RUN git clone https://github.com/NVIDIA/nccl -b ${NCCL_VERSION} /opt/nccl \ + && cd /opt/nccl \ + && make -j src.build CUDA_HOME=/usr/local/cuda \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + +################################################### +## Install AWS-OFI-NCCL plugin +RUN apt-get install libtool autoconf cmake nasm unzip pigz parallel nfs-common build-essential hwloc libhwloc-dev libjemalloc2 libnuma-dev numactl libjemalloc-dev preload htop iftop liblapack-dev libgfortran5 ipcalc wget curl devscripts debhelper check libsubunit-dev fakeroot pkg-config dkms -y +RUN export OPAL_PREFIX="" \ + && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ + && cd /opt/aws-ofi-nccl \ + && git checkout ${AWS_OFI_NCCL_VERSION} \ + && ./autogen.sh \ + && ./configure --prefix=/opt/aws-ofi-nccl/install \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=/usr/local/cuda \ + --with-nccl=/opt/nccl/build \ + --with-mpi=/opt/amazon/openmpi/ \ + && make && make install + +################################################### +## Install NCCL-tests +RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ + && cd /opt/nccl-tests \ + && git checkout ${NCCL_TESTS_VERSION} \ + && make MPI=1 \ + MPI_HOME=/opt/amazon/openmpi/ \ + CUDA_HOME=/usr/local/cuda \ + NCCL_HOME=/opt/nccl/build \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + + +############################################## +## Nemo-multimodal dependencie +COPY requirements.txt /workspace/ +RUN pip3 install -r /workspace/requirements.txt diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/8.nemo-multimodal/README.md new file mode 100644 index 00000000..e8a5dfba --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/README.md @@ -0,0 +1,80 @@ +# Train Stable Diffusion with NeMo-Multimodal + +## Prerequisites +0. You have access to nemo-multimodal +1. Have a cluster ready +2. Generate API Key: https://ngc.nvidia.com/setup/api-key +3. Install NGC CLI: https://ngc.nvidia.com/setup/installers/cli +4. Login +``` +docker login nvcr.io +Username: $oauthtoken +Password: API_KEY +``` +To install libnvidia-container cli: +https://github.com/NVIDIA/libnvidia-container +https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html + +``` +curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ + && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ + sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ + sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ + && \ + sudo apt-get update + +sudo apt-get install libnvidia-container1 +sudo apt-get install libnvidia-container-tools +``` + + +## Pull Image + +``` +docker pull nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 +``` + +## Run container + +``` + docker run -it --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 bash +``` + +## Copy launcher scripts to host + +``` +docker cp -a :/opt/NeMo-Megatron-Launcher/ ./nemo-src + +``` +## Build customized docker image + +``` +docker build -t nemo-multimodal . +``` + +## Convert image + +``` +enroot import -o /apps/nemo-multimodal.sqsh dockerd://nemo-multimodal + +``` + +## Create Conda env + +``` +# Create conda env +# Create +wget -O miniconda.sh "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh" \ + && bash miniconda.sh -b -p /apps/.conda \ + && /apps/.conda/bin/conda init bash + +source /home/ubuntu/.bashrc +conda create --name nemo-multimodal python=3.10 + +source activate nemo-multimodal + +pip3 install -r requirements.txt + +``` + +## Update config files in nemo-src diff --git a/3.test_cases/8.nemo-multimodal/bcm.yaml b/3.test_cases/8.nemo-multimodal/bcm.yaml new file mode 100644 index 00000000..0b27cdec --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/bcm.yaml @@ -0,0 +1,11 @@ +partition: awsankur-p5 +account: null +exclusive: True +gpus_per_task: null +gpus_per_node: 8 +mem: 0 +job_name_prefix: "nemo-multimodal-" +gres: "gpu:8" +srun_args: + - "--no-container-mount-home" +stderr_to_stdout: True diff --git a/3.test_cases/8.nemo-multimodal/config.yaml b/3.test_cases/8.nemo-multimodal/config.yaml new file mode 100644 index 00000000..c84f56a7 --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/config.yaml @@ -0,0 +1,64 @@ +defaults: + - _self_ + - cluster: bcm # Leave it as bcm even if using bcp. It will be ignored for bcp. + - data_preparation: multimodal/download_multimodal + - training: stable_diffusion/860m_res_256_pretrain + - conversion: null + - fine_tuning: null + - evaluation: null + - fw_inference: null + - export: stable_diffusion/export_stable_diffusion + - external_conversion: null + - override hydra/job_logging: stdout + +stages: + - data_preparation + +cluster_type: bcm # bcm or bcp. If bcm, it must match - cluster above. +launcher_scripts_path: /apps/nemo-src/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts +data_dir: ${launcher_scripts_path}/data # Location to store and read the data. +base_results_dir: ${launcher_scripts_path}/results # Location to store the results, checkpoints and logs. +container_mounts: # List of additional paths to mount to container. They will be mounted to same path. + - /fsx:/fsx +container: /apps/nemo-multimodal.sqsh + +wandb_api_key_file: null # File where the w&B api key is stored. Key must be on the first line. + +env_vars: + NCCL_DEBUG: INFO # Logging level for NCCL. Set to "INFO" for debug information + ##NCCL_PROTO: simple # Protocol NCCL will use. Set to "simple" for AWS + TRANSFORMER_OFFLINE: 1 + FI_EFA_USE_DEVICE_RDMA: 1 + FI_PROVIDER: efa + NCCL_LAUNCH_MODE: parallel + FI_EFA_FORK_SAFE: 1 + FI_EFA_ENABLE_SHM_TRANSFER: 1 + +# GPU Mapping +numa_mapping: + enable: True # Set to False to disable all mapping (performance will suffer). + mode: unique_contiguous # One of: all, single, single_unique, unique_interleaved or unique_contiguous. + scope: node # Either node or socket. + cores: all_logical # Either all_logical or single_logical. + balanced: True # Whether to assing an equal number of physical cores to each process. + min_cores: 1 # Minimum number of physical cores per process. + max_cores: 8 # Maximum number of physical cores per process. Can be null to use all available cores. + +# hydra settings +hydra: + run: + dir: . + output_subdir: null + +# Do not modify below, use the values above instead. +data_preparation_config: ${hydra:runtime.choices.data_preparation} +training_config: ${hydra:runtime.choices.training} +fine_tuning_config: ${hydra:runtime.choices.fine_tuning} +prompt_learning_config: ${hydra:runtime.choices.prompt_learning} +adapter_learning_config: ${hydra:runtime.choices.adapter_learning} +ia3_learning_config: ${hydra:runtime.choices.ia3_learning} +evaluation_config: ${hydra:runtime.choices.evaluation} +conversion_config: ${hydra:runtime.choices.conversion} +export_config: ${hydra:runtime.choices.export} +fw_inference_config: ${hydra:runtime.choices.fw_inference} +external_conversion_config: ${hydra:runtime.choices.external_conversion} diff --git a/3.test_cases/8.nemo-multimodal/download_multimodal.yaml b/3.test_cases/8.nemo-multimodal/download_multimodal.yaml new file mode 100644 index 00000000..d6a7696a --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/download_multimodal.yaml @@ -0,0 +1,72 @@ +run: + name: download_multimodal + results_dir: ${base_results_dir}/${.name} + time_limit: "4:00:00" + dependency: "singleton" + bcp_preproc_npernode: 2 # 2 should be safe to use and x2 times faster. + +dataset_repo_id: laion/laion-art # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} +# See https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads +dataset_output_root: /fsx/laion-art + +download_parquet: + enable: True + output_dir: ${..dataset_output_root}/parquet + parquet_subpartitions: 3 # increase the number of partitions to reduce the download time of each job + parquet_pattern: "*.parquet" # files to be included in the hugging face repo + +download_images: + enable: True + parquet_pattern: ${..download_parquet.parquet_pattern} + input_dir: ${..download_parquet.output_dir} + output_dir: ${..dataset_output_root}/tarfiles_raw + num_parquets_downloaded: 1 # Number of parquet files in hugging face repo. + # To fill in this parameter, go to the hugging face repo -> Files and versions -> count the number of parquet files + # in all directories. + # Only used if download_images is queued as a dependency before download_parquet finishes. + # Otherwise script will automatically count the number of parquet files downloaded. + # Note that node_array_size for this stage will be parquet_subpartitions x num_parquets_downloaded + download_num_processes: -1 # set to number of CPUs in the job (-1 defaults to slurm cpus_per_task) + download_num_threads: 64 # tune by monitoring CPU usage + img2dataset_additional_arguments: # see https://github.com/rom1504/img2dataset for complete list of parameters + encode_quality: 95 # jpeg compression quality (100 is best, but still lossy) + resize_mode: no # optionally resize after downloading to save disk space + # you can also override these arguments to suit your own needs: + # input_format (default is parquet), caption_col (default is TEXT), url_col (default is URL) + # see https://github.com/rom1504/img2dataset/tree/main/dataset_examples for some examples + +reorganize_tar: + enable: True + input_dir: ${..download_images.output_dir} + output_dir: ${..dataset_output_root}/tarfiles_reorganized + node_array_size: 2 # increase the number of concurrent jobs to reduce the time for each job + tar_chunk_size: 1000 # number of training examples in each output tar file + file_ext_in_tar: # target extensions in each + - .jpg + - .txt + +precache_encodings: + enable: True + input_dir: ${..reorganize_tar.output_dir} + output_dir: ${..dataset_output_root}/tarfiles_precached + tar_chunk_size: ${..reorganize_tar.tar_chunk_size} + node_array_size: 4 # increase the number of concurrent jobs to reduce the time for each job + precache_config_path: ${launcher_scripts_path}/conf/data_preparation/multimodal/precache_sd.yaml + +generate_wdinfo: + enable: True + input_dir: ${..precache_encodings.output_dir} + # use ${..precache_encodings.output_dir} if you're doing precaching, otherwise use ${..reorganize_tar.output_dir} + tar_chunk_size: ${..precache_encodings.tar_chunk_size} + file_ext_in_tar: + - .pickle + output_wdinfo_path: ${..dataset_output_root}/wdinfo.pkl + +merge_source_tar: + enable: False + append_tar_dir: ${..precache_encodings.output_dir} + source_dir: ${..precache_encodings.input_dir} + source_extensions: # objects in the source tar files that are to be added to the precached tar files + - .json + - .txt + node_array_size: 1 # increase the number of jobs to reduce the time for each job From d242c71c481d9e37a038aa1983a98da9b2f75cfe Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 18 Oct 2023 12:19:40 -0500 Subject: [PATCH 164/648] remove export NIL --- 3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml index 32b0aa7e..bb0f5703 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/cluster/bcm.yaml @@ -6,7 +6,6 @@ gpus_per_node: 8 mem: 0 job_name_prefix: "nemo-megatron-" gres: "gpu:8" -export: "NIL" srun_args: - "--no-container-mount-home" stderr_to_stdout: True From 2453442462fdf2d1f9667e0e3c7ca8a8f5ae7066 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Wed, 18 Oct 2023 13:02:49 -0700 Subject: [PATCH 165/648] Added updated requirements for img2dataset --- 3.test_cases/8.nemo-multimodal/requirements.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 3.test_cases/8.nemo-multimodal/requirements.txt diff --git a/3.test_cases/8.nemo-multimodal/requirements.txt b/3.test_cases/8.nemo-multimodal/requirements.txt new file mode 100644 index 00000000..83d904a4 --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/requirements.txt @@ -0,0 +1,10 @@ +dask +huggingface_hub>=0.13.0 +hydra-core>=1.2.0,<1.3 +img2dataset +omegaconf>=2.2,<2.3 +pynvml==11.4.1 +requests==2.26.0 +tqdm==4.62.3 +zstandard==0.15.2 +opencv-python-headless==4.8.0.74 From b6154059a419eb2e0618defa276e0f9f94982bb3 Mon Sep 17 00:00:00 2001 From: Maxime Hugues Date: Tue, 24 Oct 2023 17:28:28 -0600 Subject: [PATCH 166/648] Fix docker role to auto accept install Close #25 --- 2.amazon_machine_images/roles/docker/tasks/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/2.amazon_machine_images/roles/docker/tasks/main.yml b/2.amazon_machine_images/roles/docker/tasks/main.yml index cb6f45aa..9b644b5b 100644 --- a/2.amazon_machine_images/roles/docker/tasks/main.yml +++ b/2.amazon_machine_images/roles/docker/tasks/main.yml @@ -1,11 +1,6 @@ --- - name: "Install docker" - ansible.builtin.shell: amazon-linux-extras install docker - -- name: "Enable docker" - ansible.builtin.systemd: - name: docker.service - enabled: true + ansible.builtin.shell: amazon-linux-extras install -y docker - name: "Add remote user to docker group for non-privileged users to run docker" user: @@ -13,6 +8,11 @@ groups: "docker" append: yes +- name: "Enable docker" + ansible.builtin.systemd: + name: docker.service + enabled: true + - name: "(Re)start docker" ansible.builtin.systemd: name: docker.service From 36b35fd40e9550765dea0865443ffdb7be9c36e5 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Mon, 23 Oct 2023 09:18:02 +0000 Subject: [PATCH 167/648] Add basic test infrastructure CI is fundamental recipe for maintaining repocitory healthy. Currently many dockerfiles are broken, because of some small mistakes. This patch add basic verification procedures for docker containers. Idealy running CI script should be part of precommit check. Signed-off-by: Dmitry Monakhov --- .../1.megatron-lm/test_megatron_lm.py | 12 +++++++ .../2.nemo-launcher/test_nemo_launcher.py | 7 ++++ README.md | 15 +++++++- conftest.py | 35 +++++++++++++++++++ 4 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 3.test_cases/1.megatron-lm/test_megatron_lm.py create mode 100644 3.test_cases/2.nemo-launcher/test_nemo_launcher.py create mode 100644 conftest.py diff --git a/3.test_cases/1.megatron-lm/test_megatron_lm.py b/3.test_cases/1.megatron-lm/test_megatron_lm.py new file mode 100644 index 00000000..4462d7de --- /dev/null +++ b/3.test_cases/1.megatron-lm/test_megatron_lm.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +import pytest +import os + +def test_img_data_processing(docker_build): + print(f"module file {os.path.dirname(__file__)}") + print(f"cwd {os.getcwd()}") + docker_build('megatron-preprocess', '0.data-preprocessing.Dockerfile') + +def test_img_megatron_training(docker_build, docker_run): + img = docker_build('megatron-preprocess', '2.distributed-training.Dockerfile') + docker_run(img, ['python3', '-c', 'import torch']) diff --git a/3.test_cases/2.nemo-launcher/test_nemo_launcher.py b/3.test_cases/2.nemo-launcher/test_nemo_launcher.py new file mode 100644 index 00000000..46ce7356 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/test_nemo_launcher.py @@ -0,0 +1,7 @@ +import pytest +import os + + +def test_0_aws_nemo_megatron(docker_build, docker_run): + img = docker_build('aws-nemo-megatron', '0.NemoMegatron-aws-optimized.Dockerfile') + docker_run(img, ['python3', '-c', 'import torch']) diff --git a/README.md b/README.md index 58e580c3..6f4914d4 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,19 @@ All test cases are under `3.test_cases/`. You can go in each test case directory Utilities scripts and micro-benchmarks examples are set under `4.validation_scripts/`. -## 5. Contributors +## 5. CI + +Integration tests are written in [pytest](https://docs.pytest.org). Just run: +``` +pytest . +``` + +Alternatively you can run tests with out capturing stdout and keeping all docker images an other artifacts. +``` +pytest -s --keep-artifacts=t +``` + +## 6. Contributors Thanks to all the contributors for building, reviewing and testing. @@ -64,3 +76,4 @@ Thanks to all the contributors for building, reviewing and testing. - Sean Smith - seaam@ - Jianying Lang - langjian@ - Maxime Hugues - maxhaws@ +- Dmitry Monakhov dmonakhov@ \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 index 00000000..73a74819 --- /dev/null +++ b/conftest.py @@ -0,0 +1,35 @@ +import pytest +import subprocess +import os + + +def pytest_addoption(parser): + parser.addoption("--keep-artifacts", action="store") + +@pytest.fixture +def change_test_dir(request): + _orig_dir = os.getcwd() + os.chdir(os.path.dirname(request.path)) + yield + os.chdir(_orig_dir) + +@pytest.fixture +def docker_build(change_test_dir, request): + img_list = [] + def _build(name, dockerfile, test_tag=".test"): + img_name=name + test_tag + subprocess.check_call(['docker', 'build', '-t', img_name, '-f', dockerfile, '.']) + img_list.append(img_name) + return img_name + + yield _build + if request.config.option.keep_artifacts == None: + for img in img_list: + subprocess.check_call(['docker', 'image', 'rm', img]) + +@pytest.fixture +def docker_run(change_test_dir): + def _run(name, cmd, args=["--rm"]): + subprocess.check_call(['docker', 'run'] + args + [name] + cmd) + + yield _run From 001af1d3ca7f61088619f0772b19d7df3045a2ba Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Thu, 26 Oct 2023 17:03:12 +0000 Subject: [PATCH 168/648] 3.test_cases/1.megatron-lm: add a note to simplify guide flow Signed-off-by: Dmitry Monakhov --- 3.test_cases/1.megatron-lm/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 6bdb335a..93fdd669 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -91,6 +91,7 @@ Below are the steps you need to follow: wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt xz -d oscar-1GB.jsonl.xz + cd ${TEST_CASE_PATH} # return to original testcase directory ``` 6. Now you copy the file `1.data-preprocessing.sbatch` or its content on your cluster then submit a preprocessing jobs with the command below: From 0f97095c67d3773d181f47268867f3cc390a2c33 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Mon, 23 Oct 2023 09:18:04 +0000 Subject: [PATCH 169/648] update: 3.test_cases/1.megatron-lm - update base image to latest image version: nvcr.io/nvidia/pytorch:23.09-py3 - update EFA installer to latest version: 1.28.0 - update aws_ofi_nccl to latest version: 1.7.3-aws Fixes: - do not cleanup all hpcx libraries, otherwise torch complais like this: python -c 'import torch' File "", line 1, in File "/usr/local/lib/python3.10/dist-packages/torch/__init__.py", line 234, in from torch._C import * # noqa: F403 ImportError: libucm.so.0: cannot open shared object file: No such file or directory In fact we do not have to remove all hpcx libraries, we just need to remove openmp to avoid conflict with /opt/amazon/ompi - remove NCCL library update hank since nvcr.io/nvidia/pytorch:23.09-py3 already has latest NCCL - EFA: add --no-verify to installation script, otherwise build fails on instances with EFAs - install libhwloc-dev, required for aws_ofi_nccl plugin Signed-off-by: Dmitry Monakhov --- .../2.distributed-training.Dockerfile | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile index c9feb9e1..6ba0ec9c 100644 --- a/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile +++ b/3.test_cases/1.megatron-lm/2.distributed-training.Dockerfile @@ -1,12 +1,10 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -FROM nvcr.io/nvidia/pytorch:23.08-py3 +FROM nvcr.io/nvidia/pytorch:23.09-py3 -ARG EFA_INSTALLER_VERSION=1.27.0 +ARG EFA_INSTALLER_VERSION=1.28.0 ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws -ARG NCCL_TESTS_VERSION=master -ARG NCCL_VERSION=v2.18.5-1 ARG OPEN_MPI_PATH=/opt/amazon/openmpi ###################### @@ -14,12 +12,11 @@ ARG OPEN_MPI_PATH=/opt/amazon/openmpi ###################### RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ - libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - libnccl2 libnccl-dev -RUN rm -rf /opt/hpcx \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 + +RUN rm -rf /opt/hpcx/ompi \ && rm -rf /usr/local/mpi \ && rm -rf /usr/local/ucx \ - && rm -f /etc/ld.so.conf.d/hpcx.conf \ && ldconfig ###################### @@ -39,6 +36,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt install -y --allow-unauthenticated \ autoconf \ libtool \ gdb \ + libhwloc-dev \ automake \ cmake \ apt-utils && \ @@ -66,15 +64,8 @@ RUN cd $HOME \ && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ && cd aws-efa-installer \ - && ./efa_installer.sh -y --skip-kmod + && ./efa_installer.sh -y --skip-kmod --no-verify -################################################### -## Install NCCL -RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ - && cd /opt/nccl \ - && git checkout ${NCCL_VERSION}} \ - && make -j$(nproc) src.build CUDA_HOME=/usr/local/cuda \ - NVCC_GENCODE="-gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_60,code=sm_60" ################################################### ## Install AWS-OFI-NCCL plugin @@ -89,11 +80,10 @@ RUN export OPAL_PREFIX="" \ --with-cuda=/usr/local/cuda \ --with-mpi=/opt/amazon/openmpi/ \ --enable-platform-aws \ - && make && make install + && make -j $(nproc) && make install ################################################### RUN rm -rf /var/lib/apt/lists/* -ENV LD_PRELOAD=/opt/nccl/build/lib/libnccl.so RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf From 39f5c2643b241b1b2d4ef5a3847ffe949b30afa0 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Mon, 23 Oct 2023 11:02:24 +0000 Subject: [PATCH 170/648] update 3.test_cases/2.nemo-launcher - update from to latest image version: nvcr.io/ea-bignlp/ga-participants/nemofw-training:23.08.03 - update EFA installer to latest version: 1.28.0 - update aws_ofi_nccl to latest version: 1.7.3-aws - cleanup hpcx openmpi, to avoid conflict with /opt/amazon/openmpi - install nccl library from official repository instead of compilation - EFA: add --no-verify to installation script, otherwise build fails on instances with EFAs - install libhwloc-dev package, required for aws_ofi_nccl plugin Signed-off-by: Dmitry Monakhov --- .../0.NemoMegatron-aws-optimized.Dockerfile | 48 ++++++++++++------- 3.test_cases/2.nemo-launcher/README.md | 6 +-- 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile index 139763ab..f879be08 100644 --- a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile +++ b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile @@ -4,38 +4,53 @@ # DOCKER_BUILDKIT=1 docker build --progress plain -t aws-nemo-megatron:latest . # Customized from: https://github.com/NVIDIA/NeMo-Megatron-Launcher/blob//csp_tools/aws/Dockerfile -FROM nvcr.io/ea-bignlp/nemofw-training:23.07-py3 +FROM nvcr.io/ea-bignlp/ga-participants/nemofw-training:23.08.03 ARG DEBIAN_FRONTEND=noninteractive -ENV EFA_INSTALLER_VERSION=latest -ENV NCCL_VERSION=inc_nsteps -ENV AWS_OFI_NCCL_VERSION=1.4.0-aws +ENV EFA_INSTALLER_VERSION=1.28.0 +ENV NCCL_VERSION=2.18.5-1+cuda12.2 +ENV AWS_OFI_NCCL_VERSION=1.7.3-aws + + +RUN apt-get update -y \ + && apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ + && rm -rf /opt/hpcx/ompi \ + && rm -rf /usr/local/mpi \ + && rm -rf /usr/local/ucx \ + && ldconfig + +RUN echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64 /" >> //etc/apt/sources.list.d/cuda.list \ + && curl https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub > /tmp/3bf863cc.pub \ + && echo "34bb9f7e66744d7b2944d0565db6687560d5d6e3 /tmp/3bf863cc.pub" | sha1sum --check \ + && apt-key add /tmp/3bf863cc.pub \ + && unlink /tmp/3bf863cc.pub \ + && apt-get update -y \ + && apt-get install -y libnccl2=${NCCL_VERSION} libnccl-dev=${NCCL_VERSION} \ + && apt-get clean -# Install AWS NCCL -RUN cd /tmp \ - && git clone https://github.com/NVIDIA/nccl.git -b ${NCCL_VERSION} \ - && cd nccl \ - && make -j src.build BUILDDIR=/usr/local \ - # nvcc to target p4 instances - NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80" \ - && rm -rf /tmp/nccl # EFA RUN apt-get update && \ + apt-get install -y libhwloc-dev && \ cd /tmp && \ curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ cd aws-efa-installer && \ - ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf && \ + ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify && \ ldconfig && \ rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* && \ + apt-get clean && \ /opt/amazon/efa/bin/fi_info --version -ENV LD_LIBRARY_PATH=/opt/amazon/efa/lib:$LD_LIBRARY_PATH -ENV PATH=/opt/amazon/efa/bin:$PATH +ENV LD_LIBRARY_PATH=/opt/amazon/openmpi/lib:/opt/amazon/efa/lib:$LD_LIBRARY_PATH +ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:$PATH # NCCL EFA Plugin (Dockefile original) # NOTE: Stick to this version! Otherwise, will get 'ncclInternalError: Internal check failed.' +RUN apt-get update -y \ + && apt-get install -y libhwloc-dev + RUN mkdir -p /tmp && \ cd /tmp && \ curl -LO https://github.com/aws/aws-ofi-nccl/archive/refs/tags/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ @@ -52,8 +67,7 @@ RUN mkdir -p /tmp && \ rm -rf /tmp/aws-ofi/nccl # NCCL -RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ - echo "/opt/amazon/efa/lib" >> /etc/ld.so.conf.d/efa.conf && \ +RUN echo "/opt/amazon/efa/lib" >> /etc/ld.so.conf.d/efa.conf && \ ldconfig ENV OMPI_MCA_pml=^ucx \ diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index fcf781c9..dde56fdc 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -22,9 +22,9 @@ The following pre-requisites are needed to run this example: You will need to setup the following environment variables before running the scripts. : ```bash -export NEMO_VERSION=23.07 +export NEMO_VERSION=23.08.03 export REPO=aws-nemo-megatron -export TAG=$NEMO_VERSION-py3 +export TAG=$NEMO_VERSION export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher # where you copy the test case or set to your test case path export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh @@ -73,7 +73,7 @@ cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ $ENROOT_IMAGE \ - cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/FasterTransformer /workspace/mount_dir/ + cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /workspace/mount_dir/ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). From 30a2e9c3cbcc77e44c133179d63e76c875f729ec Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Thu, 26 Oct 2023 17:03:12 +0000 Subject: [PATCH 171/648] 3.test_cases/2.nemo-launcher: add note about nemo images access requirements Signed-off-by: Dmitry Monakhov --- 3.test_cases/2.nemo-launcher/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index dde56fdc..ea244f1f 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -35,7 +35,7 @@ cd $TEST_CASE_PATH You will retrieve the container image from Nvidia, build an optimized container for EFA and, convert it into an Enroot file so we can run it on our cluster. -1. You have a registered account with Nvidia and can access NGC. Retrieve the NGC API key following [instructions from Nvidia](https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key). +1. You have a registered account with Nvidia and can access NGC. Retrieve the NGC API key following [instructions from Nvidia](https://docs.nvidia.com/ngc/gpu-cloud/ngc-user-guide/index.html#generating-api-key) and request access [here](https://developer.nvidia.com/nemo-framework/join) in order to be able to pull NeMo images. 2. Configure NGC as shown below using the command below, when requested use `$oauthtoken` for the login and the API key from NGC fro the password. ```bash From 585f8838159a561011812ab6e3d6f81a117498c0 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Thu, 26 Oct 2023 17:18:31 +0000 Subject: [PATCH 172/648] 3.test_cases/2.nemo-launcher: add llama2 pre-train example It is almost identical to gpt3 example, the only difference is that we have to download tokenizer.model first. Originaly tokenizer.model file is part of Meta's pretrained model bundle which requires permission to download, Let's fetch it from microsoft/Llama-2-Onnx which use the same tokenizer. Signed-off-by: Dmitry Monakhov --- .../5.bmk-pretrain-llama-7b.sh | 90 ++++++++++++++++ .../6.bmk-pretrain-llama-70b.sh | 101 ++++++++++++++++++ 3.test_cases/2.nemo-launcher/README.md | 21 +++- 3 files changed, 211 insertions(+), 1 deletion(-) create mode 100644 3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh create mode 100644 3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh diff --git a/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh new file mode 100644 index 00000000..0e307998 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/5.bmk-pretrain-llama-7b.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train llama2-7b on 2 nodes for 5 steps +export MODEL=llama +export MODEL_SIZE=llama2_7b +export NUM_NODES=2 +export RUNTIME=4h +export MAX_STEPS=30 +export MBS=2 # setting for A100 80GB (p4de, p5), reduce to 1 for A100 40GB (p4d) +declare -a MODEL_ARGS=( + training.model.micro_batch_size=${MBS} + + training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + # When node_count < 8, needs full activations checkpointing. These're settings found on + # Nemo repo's Jenkin script. + # + # Below settings is similar to 22.09, except that 22.09 funnily didn't OOM with + # activations_checkpoint_num_layers=0. + training.model.activations_checkpoint_granularity='full' + training.model.activations_checkpoint_method='block' + training.model.activations_checkpoint_num_layers=1 +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Disable wandb_logger + training.exp_manager.create_wandb_logger=False + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results + + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${UNIQUE_OUTPUT_DIR:=0}" +if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh new file mode 100644 index 00000000..5ee47452 --- /dev/null +++ b/3.test_cases/2.nemo-launcher/6.bmk-pretrain-llama-70b.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -exo pipefail +[[ -z "${TARGET_PATH}" ]] \ + && { echo Please set environment variable TARGET_PATH ; exit 1 ; } \ + || echo TARGET_PATH=$TARGET_PATH + +################################################################################ +# 000: Modify this section to define pre-training configuration: model size, +# number of nodes, max. pre-training steps, job's max. runtime. +################################################################################ +## Pre-train llama2-7b on 2 nodes for 5 steps +export MODEL=llama +export MODEL_SIZE=llama2_70b +export NUM_NODES=16 +export TIME_LIMIT="7-00:00:00" +export MAX_STEPS=100 +export MBS=1 + +declare -a MODEL_ARGS=( + training.model.micro_batch_size=${MBS} + training.model.tensor_model_parallel_size=4 + training.model.pipeline_model_parallel_size=4 + training.model.virtual_pipeline_model_parallel_size=20 + training.model.overlap_p2p_comm=True + training.model.batch_p2p_comm=False + training.model.gc_interval=0 + + training.model.tokenizer.model=${TARGET_PATH}/data/llama2/tokenizer.model + + ## Activation checkpointing + #training.model.activations_checkpoint_granularity='full' + #training.model.activations_checkpoint_method='block' + #training.model.activations_checkpoint_num_layers=1 + # + ## Not applicable for A100 + #training.model.transformer_engine=False + #training.model.ub_tp_comm_overlap=False +) + + +################################################################################ +# 010: Advance users can modify this stanza to customize benchmarking behavior. +################################################################################ +declare -a BMK_ARGS=( + # Disable validation, as we're only interested to measure the training time. + training.trainer.limit_val_batches=0.0 + + # Disable wandb_logger + training.exp_manager.create_wandb_logger=False + + # Ignore checkpoints + training.exp_manager.create_checkpoint_callback=False + training.exp_manager.resume_if_exists=False + + ################################ + + # https://github.com/NVIDIA/NeMo/pull/6181/files + training.model.data.data_impl=mock + training.model.data.data_prefix=[] +) + + +################################################################################ +# 020: Internal settings. +################################################################################ +WORKSPACE_CONT=$TARGET_PATH +CONT_RESULT_DIR=${WORKSPACE_CONT}/results-v2 +CONT_TOKENIZER_DIR=${WORKSPACE_CONT}/data/bpe + +# Dev/test feature (off by default) to force each pre-training run outputs to a separate directory. +: "${UNIQUE_OUTPUT_DIR:=0}" +if [[ ${UNIQUE_OUTPUT_DIR} -eq 1 ]]; then + # For debugging: each run has its own output dir. + TIMESTAMP=$(date +'%Y%m%d-%H%M%Sutc-%N')-$((RANDOM)) + CONT_RESULT_DIR=${CONT_RESULT_DIR}-${TIMESTAMP} + + BMK_ARGS+=(base_results_dir=${CONT_RESULT_DIR}) + + echo " + #################### + This run will write to directory ${CONT_RESULT_DIR} + #################### + " +fi + + +################################################################################ +# 030: Here we go... +################################################################################ +HYDRA_FULL_ERROR=1 python3 $TARGET_PATH/launcher_scripts/main.py \ + stages=[training] \ + training=${MODEL}/${MODEL_SIZE} \ + training.run.time_limit=$TIME_LIMIT \ + training.trainer.num_nodes=$NUM_NODES \ + training.trainer.max_steps=$MAX_STEPS \ + training.trainer.val_check_interval=$MAX_STEPS \ + "${BMK_ARGS[@]}" "${MODEL_ARGS[@]}" "$@" diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index ea244f1f..c8ac2997 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -217,9 +217,28 @@ training.trainer.num_nodes=$NUM_NODES | └── key 'trainer -> num_nodes' in the `.yaml` file. ``` +## 8. Pre-Training llama2 +This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. Actions will be almost the same as for 5/ Pre-training GPT3, let do it. -## 8. References +1. Download llama2 tokenizer +``` +mkdir -p $TARGET_PATH/data/llama2 +curl -L https://github.com/microsoft/Llama-2-Onnx/raw/main/tokenizer.model > $TARGET_PATH/data/llama2/tokenizer.model + +``` +2. Source the NemoMegatron environment created earlier. + ```bash + source ${TARGET_PATH}/.venv/bin/activate + ``` +3. To pre-train a llama2-7b on two instances with mock dataset, run the commands below to let : + ```bash + cd $TARGET_PATH + $TEST_CASE_PATH/5.bmk-pretrain-llama-7b.sh + ``` +4. Next stests are absolutely the same as for 5/ Pre-training GPT3, the only difference is that result directory is `$TARGET_PATH/results/llama2_7b` + +## 9. References - Nvidia NemoMegatron Documentation: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/megatron.html - Train Large Scale NLP with Nemo Megatron from Nvidia: https://docs.nvidia.com/launchpad/ai/base-command-nemo/latest/index.html From 35a788a5bad8a3dfd9729cefbeda07e6727e5989 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Mon, 23 Oct 2023 11:03:26 +0000 Subject: [PATCH 173/648] update 4.validation_scripts/0.nccl-tests - update base image to nvidia/cuda:12.2.0-devel-ubuntu22.04 - update EFA installer to latest version: 1.28.0 - update aws_ofi_nccl to latest version: 1.7.3-aws - install libhwloc-dev package, required for aws_ofi_nccl plugin - add build unittest Signed-off-by: Dmitry Monakhov --- .../0.nccl-tests/0.nccl-tests.Dockerfile | 15 ++++++++------- .../0.nccl-tests/test_nccl_tests.py | 7 +++++++ 2 files changed, 15 insertions(+), 7 deletions(-) create mode 100644 4.validation_scripts/0.nccl-tests/test_nccl_tests.py diff --git a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile index e36e49b1..3a7a55dd 100644 --- a/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile +++ b/4.validation_scripts/0.nccl-tests/0.nccl-tests.Dockerfile @@ -1,11 +1,11 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 -ARG EFA_INSTALLER_VERSION=1.26.0 -ARG AWS_OFI_NCCL_VERSION=v1.7.1-aws +ARG EFA_INSTALLER_VERSION=1.28.0 +ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws ARG NCCL_TESTS_VERSION=master -ARG NCCL_VERSION=2.18.1 +ARG NCCL_VERSION=2.18.5 RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ @@ -72,7 +72,7 @@ RUN cd $HOME \ ## Install NCCL RUN git clone https://github.com/NVIDIA/nccl -b v${NCCL_VERSION}-1 /opt/nccl \ && cd /opt/nccl \ - && make -j src.build CUDA_HOME=/usr/local/cuda \ + && make -j $(nproc) src.build CUDA_HOME=/usr/local/cuda \ NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" ################################################### @@ -88,14 +88,15 @@ RUN export OPAL_PREFIX="" \ --with-cuda=/usr/local/cuda \ --with-nccl=/opt/nccl/build \ --with-mpi=/opt/amazon/openmpi/ \ - && make && make install + && make -j $(nproc) && make install ################################################### ## Install NCCL-tests RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ && cd /opt/nccl-tests \ && git checkout ${NCCL_TESTS_VERSION} \ - && make MPI=1 \ + && make -j $(nproc) \ + MPI=1 \ MPI_HOME=/opt/amazon/openmpi/ \ CUDA_HOME=/usr/local/cuda \ NCCL_HOME=/opt/nccl/build \ diff --git a/4.validation_scripts/0.nccl-tests/test_nccl_tests.py b/4.validation_scripts/0.nccl-tests/test_nccl_tests.py new file mode 100644 index 00000000..2637b9c4 --- /dev/null +++ b/4.validation_scripts/0.nccl-tests/test_nccl_tests.py @@ -0,0 +1,7 @@ +import pytest +import os + + +def test_0_nccl_test(docker_build, docker_run): + img = docker_build('nccl-test', '0.nccl-tests.Dockerfile') + #docker_run(img, ['python3', '-c', 'import torch']) From 6d161cce227717191adc41ceafcd47b01416a5b9 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Fri, 27 Oct 2023 19:09:52 -0500 Subject: [PATCH 174/648] Quick Create Link --- 1.architectures/1.vpc_network/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index 8a25dea3..8d1f30b1 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -6,6 +6,14 @@ You will find here a collection of cloudformation templates to deploy a VPC with The architectures each deploy a VPC, public and private subnets, gateways and endpoints. You can deploy them through the AWS Console or AWS CLI. +### 1. Quick Create + + + +[
 1-Click Deploy 🚀 
](https://console.aws.amazon.com/cloudformation/home?#/stacks/quickcreate?templateURL=https%3A%2F%2Fawsome-distributed-training.s3.amazonaws.com%2Ftemplates%2F1.vpc-multi-az.yaml&stackName=ML-VPC) + + + ### 1. Template VPC Multiple AZs This template deploys a VPC with private subnets in multiple Availability zones. Public subnets can be optionally created in every AZ (done by default). This template serves most use cases. From d2eb8ff460f56f6a5d2b5a47e2b0da11e6e3761d Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Fri, 27 Oct 2023 17:45:33 -0700 Subject: [PATCH 175/648] Quick Create for S3 Bucket Signed-off-by: Sean Smith --- 1.architectures/0.s3/README.md | 7 +++++++ 1.architectures/1.vpc_network/README.md | 4 ---- 2 files changed, 7 insertions(+), 4 deletions(-) create mode 100644 1.architectures/0.s3/README.md diff --git a/1.architectures/0.s3/README.md b/1.architectures/0.s3/README.md new file mode 100644 index 00000000..a141ba3e --- /dev/null +++ b/1.architectures/0.s3/README.md @@ -0,0 +1,7 @@ +# S3 Bucket + +This template creates a S3 Bucket with all public access disabled. To deploy it, click the link below: + +## 1. Quick Create Link + +[
 1-Click Deploy 🚀 
](https://console.aws.amazon.com/cloudformation/home?#/stacks/quickcreate?templateURL=https://awsome-distributed-training.s3.amazonaws.com/templates/0.private-bucket.yaml&stackName=ML-S3) \ No newline at end of file diff --git a/1.architectures/1.vpc_network/README.md b/1.architectures/1.vpc_network/README.md index 8d1f30b1..896f3e2f 100644 --- a/1.architectures/1.vpc_network/README.md +++ b/1.architectures/1.vpc_network/README.md @@ -8,12 +8,8 @@ The architectures each deploy a VPC, public and private subnets, gateways and en ### 1. Quick Create - - [
 1-Click Deploy 🚀 
](https://console.aws.amazon.com/cloudformation/home?#/stacks/quickcreate?templateURL=https%3A%2F%2Fawsome-distributed-training.s3.amazonaws.com%2Ftemplates%2F1.vpc-multi-az.yaml&stackName=ML-VPC) - - ### 1. Template VPC Multiple AZs This template deploys a VPC with private subnets in multiple Availability zones. Public subnets can be optionally created in every AZ (done by default). This template serves most use cases. From c99383e02fbd8b718b529ec116b9219c8e45e9bf Mon Sep 17 00:00:00 2001 From: AWS ParallelCluster user Date: Tue, 31 Oct 2023 18:13:07 +0000 Subject: [PATCH 176/648] Updated training and data preparation configs Signed-off-by: AWS ParallelCluster user --- .../860m_res_256_pretrain.yaml | 6 +- 3.test_cases/8.nemo-multimodal/Dockerfile | 7 +- 3.test_cases/8.nemo-multimodal/config.yaml | 2 +- .../download_multimodal.yaml | 22 +-- ...diffusion_860m_res_256_pretrain_hydra.yaml | 175 ++++++++++++++++++ 5 files changed, 188 insertions(+), 24 deletions(-) create mode 100644 3.test_cases/8.nemo-multimodal/stable_diffusion_860m_res_256_pretrain_hydra.yaml diff --git a/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml b/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml index 803426ab..f246160c 100644 --- a/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml +++ b/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml @@ -8,7 +8,7 @@ name: stable-diffusion-train trainer: devices: 8 - num_nodes: 8 + num_nodes: 1 accelerator: gpu precision: 16 logger: False # logger provided by exp_manager @@ -187,7 +187,7 @@ model: num_workers: 16 train: dataset_path: - - ${data_dir}/your_dataset/wdinfo.pkl + - /fsx/laion-art/wdinfo.pkl augmentations: resize_smallest_side: 256 center_crop_h_w: 256, 256 @@ -196,4 +196,4 @@ model: webdataset: infinite_sampler: False - local_root_path: ${data_dir}/your_dataset/tarfiles_reorganized/ # each tarfile in wdinfo is relative to this + local_root_path: /fsx/laion-art/tarfiles_reorganized/task0001 # each tarfile in wdinfo is relative to this diff --git a/3.test_cases/8.nemo-multimodal/Dockerfile b/3.test_cases/8.nemo-multimodal/Dockerfile index caa69dc6..95f9852d 100644 --- a/3.test_cases/8.nemo-multimodal/Dockerfile +++ b/3.test_cases/8.nemo-multimodal/Dockerfile @@ -3,7 +3,7 @@ FROM nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 ARG EFA_INSTALLER_VERSION=latest ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws ARG NCCL_TESTS_VERSION=master -ARG NCCL_VERSION=v2.19.3-1 +ARG NCCL_VERSION=v2.18.5-1 RUN apt-get update -y RUN apt-get remove -y --allow-change-held-packages \ libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl2 libnccl-dev @@ -92,6 +92,11 @@ RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + +RUN rm -rf /var/lib/apt/lists/* +ENV LD_PRELOAD /opt/nccl/build/lib/libnccl.so + + ############################################## ## Nemo-multimodal dependencie COPY requirements.txt /workspace/ diff --git a/3.test_cases/8.nemo-multimodal/config.yaml b/3.test_cases/8.nemo-multimodal/config.yaml index c84f56a7..eaf8d47c 100644 --- a/3.test_cases/8.nemo-multimodal/config.yaml +++ b/3.test_cases/8.nemo-multimodal/config.yaml @@ -28,7 +28,7 @@ env_vars: NCCL_DEBUG: INFO # Logging level for NCCL. Set to "INFO" for debug information ##NCCL_PROTO: simple # Protocol NCCL will use. Set to "simple" for AWS TRANSFORMER_OFFLINE: 1 - FI_EFA_USE_DEVICE_RDMA: 1 + #FI_EFA_USE_DEVICE_RDMA: 1 FI_PROVIDER: efa NCCL_LAUNCH_MODE: parallel FI_EFA_FORK_SAFE: 1 diff --git a/3.test_cases/8.nemo-multimodal/download_multimodal.yaml b/3.test_cases/8.nemo-multimodal/download_multimodal.yaml index d6a7696a..3b4f7232 100644 --- a/3.test_cases/8.nemo-multimodal/download_multimodal.yaml +++ b/3.test_cases/8.nemo-multimodal/download_multimodal.yaml @@ -7,7 +7,7 @@ run: dataset_repo_id: laion/laion-art # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} # See https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads -dataset_output_root: /fsx/laion-art +dataset_output_root: /fsx/laion-art-data download_parquet: enable: True @@ -45,28 +45,12 @@ reorganize_tar: - .jpg - .txt -precache_encodings: - enable: True - input_dir: ${..reorganize_tar.output_dir} - output_dir: ${..dataset_output_root}/tarfiles_precached - tar_chunk_size: ${..reorganize_tar.tar_chunk_size} - node_array_size: 4 # increase the number of concurrent jobs to reduce the time for each job - precache_config_path: ${launcher_scripts_path}/conf/data_preparation/multimodal/precache_sd.yaml - generate_wdinfo: enable: True - input_dir: ${..precache_encodings.output_dir} + input_dir: ${..reorganize_tar.output_dir} # use ${..precache_encodings.output_dir} if you're doing precaching, otherwise use ${..reorganize_tar.output_dir} - tar_chunk_size: ${..precache_encodings.tar_chunk_size} + tar_chunk_size: ${..reorganize_tar.tar_chunk_size} file_ext_in_tar: - .pickle output_wdinfo_path: ${..dataset_output_root}/wdinfo.pkl -merge_source_tar: - enable: False - append_tar_dir: ${..precache_encodings.output_dir} - source_dir: ${..precache_encodings.input_dir} - source_extensions: # objects in the source tar files that are to be added to the precached tar files - - .json - - .txt - node_array_size: 1 # increase the number of jobs to reduce the time for each job diff --git a/3.test_cases/8.nemo-multimodal/stable_diffusion_860m_res_256_pretrain_hydra.yaml b/3.test_cases/8.nemo-multimodal/stable_diffusion_860m_res_256_pretrain_hydra.yaml new file mode 100644 index 00000000..42dcc0f4 --- /dev/null +++ b/3.test_cases/8.nemo-multimodal/stable_diffusion_860m_res_256_pretrain_hydra.yaml @@ -0,0 +1,175 @@ +run: + name: stable_diffusion_860m_res_256_pretrain + results_dir: /apps/nemo-src/launcher_scripts/results/stable_diffusion_860m_res_256_pretrain + time_limit: 2-00:00:00 + dependency: singleton +name: stable-diffusion-train +trainer: + devices: 8 + num_nodes: 2 + accelerator: gpu + precision: 16 + logger: false + enable_checkpointing: false + replace_sampler_ddp: false + max_epochs: 5 + max_steps: 82500 + log_every_n_steps: 10 + accumulate_grad_batches: 1 + gradient_clip_val: 1.0 + benchmark: false + enable_model_summary: true +exp_manager: + explicit_log_dir: /apps/nemo-src/launcher_scripts/results/stable_diffusion_860m_res_256_pretrain/results + exp_dir: null + name: nemo_stable_diffusion + create_wandb_logger: false + wandb_logger_kwargs: + project: stable-diffusion + group: nemo-sd + name: nemo_stable_diffusion + resume: true + create_checkpoint_callback: true + create_tensorboard_logger: true + checkpoint_callback_params: + every_n_train_steps: 1000 + every_n_epochs: 0 + monitor: reduced_train_loss + filename: nemo-stable-diffusion--{reduced_train_loss:.2f}-{step}-{consumed_samples} + resume_if_exists: true + resume_ignore_no_checkpoint: true + ema: + enable: true + decay: 0.9999 + validate_original_weights: false + every_n_steps: 1 + cpu_offload: false +model: + precision: 16 + micro_batch_size: 128 + global_batch_size: 8192 + linear_start: 0.00085 + linear_end: 0.012 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: images + cond_stage_key: captions + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: false + scale_by_std: false + ckpt_path: null + ignore_keys: [] + parameterization: eps + clip_denoised: true + load_only_unet: false + cosine_s: 0.008 + given_betas: null + original_elbo_weight: 0 + v_posterior: 0 + l_simple_weight: 1 + use_positional_encodings: false + learn_logvar: false + logvar_init: 0 + beta_schedule: linear + loss_type: l2 + concat_mode: true + cond_stage_forward: null + text_embedding_dropout_rate: 0 + fused_opt: true + inductor: true + inductor_cudagraphs: false + capture_cudagraph_iters: -1 + channels_last: true + unet_config: + _target_: nemo.collections.multimodal.modules.stable_diffusion.diffusionmodules.openaimodel.UNetModel + from_pretrained: null + from_NeMo: true + image_size: 32 + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: + - 4 + - 2 + - 1 + num_res_blocks: 2 + channel_mult: + - 1 + - 2 + - 4 + - 4 + num_heads: 8 + use_spatial_transformer: true + transformer_depth: 1 + context_dim: 768 + use_checkpoint: false + legacy: false + use_flash_attention: true + first_stage_config: + _target_: nemo.collections.multimodal.models.stable_diffusion.ldm.autoencoder.AutoencoderKL + from_pretrained: null + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + cond_stage_config: + _target_: nemo.collections.multimodal.modules.stable_diffusion.encoders.modules.FrozenCLIPEmbedder + version: openai/clip-vit-large-patch14 + device: cuda + max_length: 77 + seed: 666 + resume_from_checkpoint: null + apex_transformer_log_level: 30 + gradient_as_bucket_view: true + optim: + name: fused_adam + lr: 8.192e-05 + weight_decay: 0.0 + betas: + - 0.9 + - 0.999 + sched: + name: WarmupHoldPolicy + warmup_steps: 10000 + hold_steps: 10000000000000 + nsys_profile: + enabled: false + start_step: 10 + end_step: 10 + ranks: + - 0 + gen_shape: false + data: + num_workers: 16 + train: + dataset_path: + - /fsx/laion-art-data/wdinfo.pkl + augmentations: + resize_smallest_side: 256 + center_crop_h_w: 256, 256 + horizontal_flip: false + filterings: null + webdataset: + infinite_sampler: false + local_root_path: /fsx/laion-art-data/tarfiles_reorganized/task0000/ From 14283fc829a590d5a31b1ac9c8b6dfec33d1fd83 Mon Sep 17 00:00:00 2001 From: AWS ParallelCluster user Date: Tue, 31 Oct 2023 18:33:40 +0000 Subject: [PATCH 177/648] Updated Readme and bcm.yaml Signed-off-by: AWS ParallelCluster user --- 3.test_cases/8.nemo-multimodal/README.md | 21 +++++++++++++-------- 3.test_cases/8.nemo-multimodal/bcm.yaml | 2 +- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/8.nemo-multimodal/README.md index e8a5dfba..9d7ce280 100644 --- a/3.test_cases/8.nemo-multimodal/README.md +++ b/3.test_cases/8.nemo-multimodal/README.md @@ -1,8 +1,11 @@ # Train Stable Diffusion with NeMo-Multimodal +This project provides a guide to run NemoMultimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion + + ## Prerequisites -0. You have access to nemo-multimodal -1. Have a cluster ready +0. You have access to nemo-multimodal. You can request access to the open beta [here](https://developer.nvidia.com/nemo-framework) +1. Have a slurm based parallelcluster ready for use. 2. Generate API Key: https://ngc.nvidia.com/setup/api-key 3. Install NGC CLI: https://ngc.nvidia.com/setup/installers/cli 4. Login @@ -10,6 +13,9 @@ docker login nvcr.io Username: $oauthtoken Password: API_KEY + + +If you have createdyour cluster with DLAMI or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. ``` To install libnvidia-container cli: https://github.com/NVIDIA/libnvidia-container @@ -21,10 +27,9 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ && \ - sudo apt-get update - -sudo apt-get install libnvidia-container1 -sudo apt-get install libnvidia-container-tools + sudo apt-get update \ + && sudo apt-get install libnvidia-container1 \ + && sudo apt-get install libnvidia-container-tools ``` @@ -34,10 +39,10 @@ sudo apt-get install libnvidia-container-tools docker pull nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 ``` -## Run container +## Run container on Head Node ``` - docker run -it --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 bash + docker run -it --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 bash ``` ## Copy launcher scripts to host diff --git a/3.test_cases/8.nemo-multimodal/bcm.yaml b/3.test_cases/8.nemo-multimodal/bcm.yaml index 0b27cdec..2e7d8717 100644 --- a/3.test_cases/8.nemo-multimodal/bcm.yaml +++ b/3.test_cases/8.nemo-multimodal/bcm.yaml @@ -1,4 +1,4 @@ -partition: awsankur-p5 +partition: account: null exclusive: True gpus_per_task: null From c23debc8bcccdf5e8b430c257e9807d2e0b6afc3 Mon Sep 17 00:00:00 2001 From: AWS ParallelCluster user Date: Tue, 31 Oct 2023 20:50:09 +0000 Subject: [PATCH 178/648] Updated README Signed-off-by: AWS ParallelCluster user --- 3.test_cases/8.nemo-multimodal/README.md | 42 ++++++++++++++++++- 3.test_cases/8.nemo-multimodal/config.yaml | 1 + .../download_multimodal.yaml | 2 +- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/8.nemo-multimodal/README.md index 9d7ce280..a2b4775b 100644 --- a/3.test_cases/8.nemo-multimodal/README.md +++ b/3.test_cases/8.nemo-multimodal/README.md @@ -1,6 +1,6 @@ # Train Stable Diffusion with NeMo-Multimodal -This project provides a guide to run NemoMultimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion +This project provides a guide to run NemoMultimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal 23.05 supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion ## Prerequisites @@ -46,6 +46,7 @@ docker pull nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 ``` ## Copy launcher scripts to host +We need to copy NeMo launcher scripts to head node that we will use to submit multiple slurm jobs for downloading, preparing data and running training. Once the container is running, exit out of it and copy the launcher scripts like below: ``` docker cp -a :/opt/NeMo-Megatron-Launcher/ ./nemo-src @@ -81,5 +82,42 @@ source activate nemo-multimodal pip3 install -r requirements.txt ``` +## Submitting slurm jobs +Next we will show how to submit slurm jobs for data-preparation and training. The NeMo config provides the following config files which we have modified: + +1. config.yaml: NeMo config with information about different stages and environment variables +2. bcm.yaml: Cluster setup config +3. download_multimodal.yaml: Config to download and prepare data +4. 860m_res_256_pretrain.yaml: Config to pre-train stable diffusion model + +You can run one or more stages like below: + +``` +HYDRA_FULL_ERROR=1 python3 /apps/nemo-src/launcher_scripts/main.py +``` +This will create separate folders for different slurm jobs and create folders with the relevant slurm submission script and config file. + +## Download and prepare data + We will use the popular [laion-art](https://huggingface.co/datasets/laion/laion-art) data for training the stable diffusion model which contains >8M images and their captions. Please review the [download_multimodal](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/download_multimodal.yaml) file which contains the following sections: + +1. dataset_repo_id: laion/laion-art # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} +2. download_parquet: Downloads and paritions the parquet files and stores the partioned parquet files in `/fsx/laion-art-data/parquet/` +3. download_images: Uses [img2dataset](https://github.com/rom1504/img2dataset/tree/main) to download the images specified in the parquet files and store the raw data in `/fsx/laion-art-data/tarfiles_raw`. Each partitioned parquet file will run in an array of slurm jobs sequentially. +4. reorganize_tar: This section will reorganize the tar files and create new tarfiles with tar_chunk_size number of images stores in each tar file. Make sure `node_array_size` is set to 1, otherwise additional preprocessing will be needed to merge the tarfiles from the two tasks in one folder. The reorganized tarfiles will be stored in `/fsx/laion-art-data/tarfiles_reorganized`. +5. generate_wdinfo: This task will generate a pickle file with the necessary paths for the reorganized tarfiles. Make sure you are reading from reorganized tarfiles and not from precache_encodings which is included in the original version of NeMo 23.05. + +## Run Distributed Training +Once the data is downloaded, the training job runs next. Make sure the trainer inputs such as `num_nodes` and number of gpus per node in `trainer.devices` is set correctly. Also, set `max_epochs` to -1 if training needs to run till max_steps have completed. The model by default will create a tensorboard events log, but wights and biases is not switched on by default. Also make sure the datasets path at the bottom point to the right paths for `wdinfo.pkl` and `tarfiles_reorganized`. + +Once training starts you will see logs like: + +``` +Epoch 0: 0%| | 1/605 [01:58<19:52:10, 118.43s/it, loss=1, v_num=, reduced_train_loss=1.000, lr=8.19e-9, global_step=1.000, consumed_samples=8192.0] +Epoch 0: 0%| | 2/605 [02:02<10:14:49, 61.18s/it, loss=1, v_num=, reduced_train_loss=1.000, lr=8.19e-9, global_step=1.000, consumed_samples=8192.0] +Epoch 0: 0%| | 2/605 [02:02<10:14:49, 61.18s/it, loss=1, v_num=, reduced_train_loss=1.000, lr=1.64e-8, global_step=2.000, consumed_samples=16384.0] +``` + + + + -## Update config files in nemo-src diff --git a/3.test_cases/8.nemo-multimodal/config.yaml b/3.test_cases/8.nemo-multimodal/config.yaml index eaf8d47c..a52866f1 100644 --- a/3.test_cases/8.nemo-multimodal/config.yaml +++ b/3.test_cases/8.nemo-multimodal/config.yaml @@ -33,6 +33,7 @@ env_vars: NCCL_LAUNCH_MODE: parallel FI_EFA_FORK_SAFE: 1 FI_EFA_ENABLE_SHM_TRANSFER: 1 + FI_EFA_USE_HUGE_PAGE: 0 # GPU Mapping numa_mapping: diff --git a/3.test_cases/8.nemo-multimodal/download_multimodal.yaml b/3.test_cases/8.nemo-multimodal/download_multimodal.yaml index 3b4f7232..14f74865 100644 --- a/3.test_cases/8.nemo-multimodal/download_multimodal.yaml +++ b/3.test_cases/8.nemo-multimodal/download_multimodal.yaml @@ -39,7 +39,7 @@ reorganize_tar: enable: True input_dir: ${..download_images.output_dir} output_dir: ${..dataset_output_root}/tarfiles_reorganized - node_array_size: 2 # increase the number of concurrent jobs to reduce the time for each job + node_array_size: 1 # increase the number of concurrent jobs to reduce the time for each job tar_chunk_size: 1000 # number of training examples in each output tar file file_ext_in_tar: # target extensions in each - .jpg From 81bb2e1cd566a8535a8890468c246d7f717d6ad4 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 1 Nov 2023 16:27:29 -0700 Subject: [PATCH 179/648] Makefile Signed-off-by: Sean Smith --- 3.test_cases/3.MPT/Makefile | 12 ++++++++++++ 3.test_cases/3.MPT/README.md | 2 ++ 2 files changed, 14 insertions(+) create mode 100644 3.test_cases/3.MPT/Makefile diff --git a/3.test_cases/3.MPT/Makefile b/3.test_cases/3.MPT/Makefile new file mode 100644 index 00000000..75d9c2c3 --- /dev/null +++ b/3.test_cases/3.MPT/Makefile @@ -0,0 +1,12 @@ +ENROOT_IMAGE=llm-foundry + +all: build clean import + +build: + docker build -t ${ENROOT_IMAGE} -f 0.llm-foundry.Dockerfile . + +clean: + -rm ${ENROOT_IMAGE}.sqsh + +import: + enroot import -o ${ENROOT_IMAGE}.sqsh dockerd://llm-foundry:latest \ No newline at end of file diff --git a/3.test_cases/3.MPT/README.md b/3.test_cases/3.MPT/README.md index 2455ff68..6142b5b8 100644 --- a/3.test_cases/3.MPT/README.md +++ b/3.test_cases/3.MPT/README.md @@ -75,6 +75,8 @@ Before running training jobs, you need to use an [Enroot](https://github.com/NVI It will take around 5 minutes to convert the container image from Docker to the Enroot format. Once done proceed to the next stage. +For ease of testing we've included a `Makefile` that automatically builds and imports the latest image. To run this, execute `make` or you can individually specify `make build` to build the Docker image, `make clean` to remove the squash file and `make import` to import the Dockerfile into enroot squash file. + ## 3. Run the processing job You need to retrieve input data and preprocess it before running the training job. From 28d3a70481bd6b0112b7e69960ed1a307de3a25f Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 1 Nov 2023 16:32:42 -0700 Subject: [PATCH 180/648] Remove Multi-node for now Signed-off-by: Sean Smith --- 3.test_cases/6.stable-diffusion/README.md | 41 ------- .../6.stable-diffusion/multi-node/0.Makefile | 0 .../multi-node/1.Dockerfile | 104 ------------------ .../multi-node/2.train.sbatch | 76 ------------- 4 files changed, 221 deletions(-) delete mode 100644 3.test_cases/6.stable-diffusion/multi-node/0.Makefile delete mode 100644 3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile delete mode 100644 3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 26420cc7..aadbc99a 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -147,44 +147,3 @@ AttnProcessor2_0 which is a Processor for implementing scaled dot-product attent The older self.unet.set_attn_processor(AttnProcessor()) gives Cuda OOM error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput More details on this can be found here: https://pytorch.org/blog/accelerated-diffusers-pt-20/ - - -## 2. Multi Node Tests - -### 2.1 Multi-Node Training - -For the multi-node training we've created a `Dockerfile`, and Slurm submit script and a `Makefile` to build the docker image and convert it to an enroot image. To get started please follow the guide [AWS ParallelCluster Distributed Training](../../1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: - -* AWS ParallelCluster >= 3.7.0 -* Pyxis -* Enroot -* FSx Lustre Filesystem - -1. To get started, clone this repo and cd into the multi-node directory: - -``` -git clone https://github.com/aws-samples/awsome-distributed-training.git -cd awsome-distributed-training/6.stable-diffusion/multi-node -``` - -Next build the docker image and convert it to a enroot sqsh file: - -```bash -make # this will build the docker image and convert it to enroot -``` - -Now we can start training - -``` -sbatch 2.train.sbatch -``` - -### 2.1 Multi Node Results - - -## 3. What's Next? -TODO: -1. Investigate why single node performance on A100 80 GB instances is sub-par. -2. Implement distributed training following original implementation of stable diffusion -3. Explore the impact of MosaicML's Exponential Moving Average implementation on training performance. -4. Test the impact of xFormers diff --git a/3.test_cases/6.stable-diffusion/multi-node/0.Makefile b/3.test_cases/6.stable-diffusion/multi-node/0.Makefile deleted file mode 100644 index e69de29b..00000000 diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile deleted file mode 100644 index 393fe5e3..00000000 --- a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile +++ /dev/null @@ -1,104 +0,0 @@ -FROM nvcr.io/nvidia/pytorch:23.08-py3 - -ARG EFA_INSTALLER_VERSION=latest -ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws -ARG NCCL_TESTS_VERSION=master -ARG NCCL_VERSION=v2.18.5-1 -RUN apt-get update -y -RUN apt-get remove -y --allow-change-held-packages \ - libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl2 libnccl-dev - -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ - git \ - gcc \ - vim \ - kmod \ - openssh-client \ - openssh-server \ - build-essential \ - curl \ - autoconf \ - libtool \ - gdb \ - automake \ - python3-distutils \ - cmake \ - apt-utils \ - devscripts \ - debhelper \ - libsubunit-dev \ - check \ - pkg-config - -RUN mkdir -p /var/run/sshd -RUN sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && \ - echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && \ - sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config -ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/lib:$LD_LIBRARY_PATH -ENV PATH /opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH -RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ - && python3 /tmp/get-pip.py \ - && pip3 install awscli pynvml - -################################################# -# Install NVIDIA GDRCopy -RUN git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ - && cd /opt/gdrcopy \ - && make lib_install install \ - && cd /opt/gdrcopy/tests \ - && make \ - && mv gdrcopy_copylat gdrcopy_copybw gdrcopy_sanity gdrcopy_apiperf /usr/bin/ - -################################################# -## Install EFA installer -RUN cd $HOME \ - && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ - && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ - && cd aws-efa-installer \ - && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ - && rm -rf $HOME/aws-efa-installer - -################################################### -## Install NCCL -RUN git clone https://github.com/NVIDIA/nccl -b ${NCCL_VERSION} /opt/nccl \ - && cd /opt/nccl \ - && make -j src.build CUDA_HOME=/usr/local/cuda \ - NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" - -################################################### -## Install AWS-OFI-NCCL plugin -RUN apt-get install libtool autoconf cmake nasm unzip pigz parallel nfs-common build-essential hwloc libhwloc-dev libjemalloc2 libnuma-dev numactl libjemalloc-dev preload htop iftop liblapack-dev libgfortran5 ipcalc wget curl devscripts debhelper check libsubunit-dev fakeroot pkg-config dkms -y -RUN export OPAL_PREFIX="" \ - && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ - && cd /opt/aws-ofi-nccl \ - && git checkout ${AWS_OFI_NCCL_VERSION} \ - && ./autogen.sh \ - && ./configure --prefix=/opt/aws-ofi-nccl/install \ - --with-libfabric=/opt/amazon/efa/ \ - --with-cuda=/usr/local/cuda \ - --with-nccl=/opt/nccl/build \ - --with-mpi=/opt/amazon/openmpi/ \ - && make && make install - -################################################### -## Install NCCL-tests -RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ - && cd /opt/nccl-tests \ - && git checkout ${NCCL_TESTS_VERSION} \ - && make MPI=1 \ - MPI_HOME=/opt/amazon/openmpi/ \ - CUDA_HOME=/usr/local/cuda \ - NCCL_HOME=/opt/nccl/build \ - NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" - -RUN git clone https://github.com/mosaicml/diffusion-benchmark.git -RUN pip3 install -r diffusion-benchmark/requirements.txt -RUN pip3 install mosaicml==0.15.0 --force -RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force -RUN pip3 uninstall transformer-engine -y -RUN pip3 install protobuf==3.20.3 - -RUN rm -rf /var/lib/apt/lists/* -ENV LD_PRELOAD /opt/nccl/build/lib/libnccl.so - -WORKDIR /workspace/diffusion-benchmark \ No newline at end of file diff --git a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch deleted file mode 100644 index 05d2372e..00000000 --- a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT-0 - -#SBATCH --nodes=2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs -#SBATCH --job-name=mosaicml-stable-diffusion # name of your job -#SBATCH --gpus-per-node=8 # Number of GPU per node -#SBATCH --gres=gpu:8 # number of GPU we reserve -#SBATCH --gpus-per-task=8 # Number of GPU per node -#SBATCH --exclusive # job has exclusive use of the resource, no sharing -#SBATCH --wait-all-nodes=1 -#SBATCH --output jobs/slurm-%j.out - -# default variables for Enroot -: "${APPS_PATH:=/fsx}" -: "${DATA_PATH:=/fsx}" - -# default variables for Enroot -: "${IMAGE:=$APPS_PATH/mosaicml/stable-diffusion.sqsh}" -: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" - -## Plenty of EFA level variables -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons -export FI_EFA_ENABLE_SHM_TRANSFER=1 -export NCCL_DEBUG=INFO -export WANDB_MODE=offline - -declare -a ARGS=( - --container-image $IMAGE - --container-mounts $FSX_MOUNT -) - -NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) -NNODES=${#NODES[@]} -NODES_ARRAY=($NODES) -HEAD_NODE=${NODES_ARRAY[0]} -MASTER_ADDR=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) -MASTER_PORT=$RANDOM -NPROC=8 -WORLD_SIZE=$((NNODES * NPROC)) - -srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" -function run_compose() { - # if [ ${NODE_RANK} -eq 0 ]; then - # OPTION="nodelist" - # else - # OPTION="exclude" - # fi - srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" composer \ - --world_size ${WORLD_SIZE} \ - --nproc ${NPROC} \ - --node_rank ${NODE_RANK} \ - --master_addr ${MASTER_ADDR} \ - --master_port ${MASTER_PORT} \ - --verbose \ - benchmark.py \ - --use_ema --use_synth_data --device_train_microbatch_size 4 -} - -NODE_RANK=1 -for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) -do - NODE=${NODES[$NODE_RANK]} - echo "Run compute node ${NODE} for rank: ${NODE_RANK}" - run_compose & -done - -NODE_RANK=0 -NODE=${HEAD_NODE} -echo "Run master node ${NODE} for rank: ${NODE_RANK}" -run_compose -wait From 462c503af51c77dfc7196bb08f97e428a50ee327 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 2 Nov 2023 03:26:20 +0000 Subject: [PATCH 181/648] use tab instead of spaces --- 3.test_cases/3.MPT/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/3.test_cases/3.MPT/Makefile b/3.test_cases/3.MPT/Makefile index 75d9c2c3..8e55bd30 100644 --- a/3.test_cases/3.MPT/Makefile +++ b/3.test_cases/3.MPT/Makefile @@ -3,10 +3,10 @@ ENROOT_IMAGE=llm-foundry all: build clean import build: - docker build -t ${ENROOT_IMAGE} -f 0.llm-foundry.Dockerfile . + docker build -t ${ENROOT_IMAGE} -f 0.llm-foundry.Dockerfile . clean: - -rm ${ENROOT_IMAGE}.sqsh + -rm ${ENROOT_IMAGE}.sqsh import: - enroot import -o ${ENROOT_IMAGE}.sqsh dockerd://llm-foundry:latest \ No newline at end of file + enroot import -o ${ENROOT_IMAGE}.sqsh dockerd://llm-foundry:latest From 129e067189e41a790fdb196b00584768522bbf1f Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 2 Nov 2023 05:12:33 +0000 Subject: [PATCH 182/648] resolve issue #31 Co-authored-by: Verdi March --- 3.test_cases/3.MPT/0.llm-foundry.Dockerfile | 31 ++++++++++++------- .../2.train-mpt-manual-distributed.sbatch | 1 + 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile index 0a7a2a83..12e46dbb 100644 --- a/3.test_cases/3.MPT/0.llm-foundry.Dockerfile +++ b/3.test_cases/3.MPT/0.llm-foundry.Dockerfile @@ -1,8 +1,9 @@ FROM mosaicml/pytorch:2.0.1_cu118-python3.10-ubuntu20.04 -ARG EFA_INSTALLER_VERSION=1.26.1 -ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws + +ARG EFA_INSTALLER_VERSION=latest +ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws ARG NCCL_TESTS_VERSION=master -ARG NCCL_VERSION=v2.12.7-1 +ARG NCCL_VERSION=2.18.5-1 ARG LLM_FOUNDRY_VERSION=v0.3.0 ARG OPEN_MPI_PATH=/opt/amazon/openmpi @@ -52,11 +53,20 @@ RUN cd $HOME \ ################################################### ## Install NCCL -RUN git clone https://github.com/NVIDIA/nccl /opt/nccl \ - && cd /opt/nccl \ - && git checkout -b ${NCCL_VERSION} \ - && make -j src.build CUDA_HOME=/usr/local/cuda \ - NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_80,code=sm_80" +RUN apt-get remove -y libnccl2 libnccl-dev \ + && cd /tmp \ + && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ + && cd nccl \ + && make -j src.build BUILDDIR=/usr/local \ + # nvcc to target p5 and p4 instances + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" \ + && rm -rf /tmp/nccl + +# NCCL +RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ + echo "/opt/amazon/efa/lib" >> /etc/ld.so.conf.d/efa.conf && \ + echo "/opt/amazon/openmpi/lib" >> /etc/ld.so.conf.d/efa.conf && \ + ldconfig ################################################### ## Install AWS-OFI-NCCL plugin @@ -68,7 +78,7 @@ RUN export OPAL_PREFIX="" \ && ./configure --prefix=/opt/aws-ofi-nccl/install \ --with-libfabric=/opt/amazon/efa/ \ --with-cuda=/usr/local/cuda \ - --with-nccl=/opt/nccl/build \ + --with-nccl=/usr/local/nccl \ --with-mpi=/opt/amazon/openmpi/ \ && make -j && make install @@ -86,5 +96,4 @@ RUN git clone https://github.com/mosaicml/llm-foundry.git llm-foundry \ && cd llm-foundry \ && git checkout $LLM_FOUNDRY_VERSION \ && pip install -e ".[gpu]" \ - && pip install xformers nvtx 'flash-attn==v1.0.3.post0' - + && pip install xformers nvtx 'flash-attn==v1.0.3.post0' \ No newline at end of file diff --git a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch index 8e6ba806..ef579eff 100644 --- a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch @@ -29,6 +29,7 @@ export FI_EFA_FORK_SAFE=1 export FI_LOG_LEVEL=1 export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons export FI_EFA_ENABLE_SHM_TRANSFER=1 +export FI_EFA_USE_HUGE_PAGE=0 # https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 # https://github.com/pytorch/pytorch/issues/68893 #export NCCL_SOCKET_IFNAME=ens From 305f50d28512826e3a68ae676ad702a6be59ff74 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 2 Nov 2023 20:43:00 +0800 Subject: [PATCH 183/648] EFA cheatsheet --- 1.architectures/efa-cheatsheet.md | 47 +++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 1.architectures/efa-cheatsheet.md diff --git a/1.architectures/efa-cheatsheet.md b/1.architectures/efa-cheatsheet.md new file mode 100644 index 00000000..7ea0524f --- /dev/null +++ b/1.architectures/efa-cheatsheet.md @@ -0,0 +1,47 @@ +# EFA Cheatsheet + +## 1. Settings via environment variables + +| Setting | Explanation | +| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `FI_EFA_USE_HUGE_PAGE=0` | Set to 0 when you see `os.fork()` causes `OSError: Cannot allocate memory`. Typically happen by multi-process PyTorch data loader. Disabling huge page causes minor performance hit, but it's needed to prevent fork fails due to the operating system running out of huge pages. | +| `FI_EFA_FORK_SAFE=1` | Not needed for kernel>=5.15. Still fine to set it though no effect. See [ref](https://github.com/ofiwg/libfabric/pull/9112). | +| `FI_EFA_USE_DEVICE_RDMA=1` | Do not set for libfabric>=1.18.0 and aws-ofi-nccl>=1.7.0. It's not harmful to set it on p4/p5 on the newer software, but you just don't have to set it. | +| `FI_EFA_ENABLE_SHM_TRANSFER=1` | Not needed. This is really a no-op, the default already to enable SHMEM | +| `FI_PROVIDER=efa` | Use for aws-ofi-nccl<=1.5.0 AND p4/p5 instances. | +| `NCCL_PROTO=simple` | Use for aws-ofi-nccl<=1.5.0 and p4/p5 instances. | +| `NCCL_SOCKET_NTHREADS` | Not applicable for EFA. | +| `NCCL_NSOCKS_PERTHREAD` | Not applicable for EFA. | +| `NCCL_MIN_CHANNELS=xxx` | Recommend to leave it out to use the default. For e.g., on p4d/p4de, the number of channels should be 8, which is the minimum for a 4-NIC platform. The reduction message is split by number of GPUs in the job, then the number of channels, so having more channels than necessary causes smaller messages which causes EFA to be starved for data. | +| `NCCL_BUFFSIZE=xxx` | Recommend to leave it out to use the default. | +| `RDMAV_FORK_SAFE=1` | Do not use. This is a RDMA-core environment variable. Prefer `FI_EFA_FORK_SAFE` (if it still makes sense for your Linux kernel version). The two looks the same, but actually behaves very differently, especially on newer kernels, where `RDMAV_FORK_SAFE=1` can break things. | +| `RDMAV_*` | Do not use | +| NCCL version | Recommend one of the stable releases. | + +## 2. A word on p5.48xlarge instances + +Use cuda>=12.0, nccl>=2.18.0 (recommend at least 2.18.5), aws-ofi-nccl>=1.7.2 (recommend at least 1.7.3). + +## 3. Sample Presets + +### 3.1. libfabric>=1.18.0 and aws-ofi-nccl>=1.7.0 + +```bash +export FI_EFA_USE_HUGE_PAGE=0 +``` + +### 3.2. aws-ofi-nccl>=1.6.0,<1.7.0 AND p4/p5 instances + +```bash +export FI_EFA_USE_HUGE_PAGE=0 +export FI_EFA_USE_DEVICE_RDMA=1 +``` + +### 3.3. aws-ofi-nccl<=1.5.0 AND p4/p5 instances + +```bash +export FI_EFA_USE_HUGE_PAGE=0 +export FI_EFA_USE_DEVICE_RDMA=1 +export FI_PROVIDER=efa +export NCCL_PROTO=simple +``` From 474385bd79b3d03db03eb09cbc814815b0789c2a Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 1 Nov 2023 16:38:57 -0700 Subject: [PATCH 184/648] Multi Node Signed-off-by: Sean Smith --- 3.test_cases/6.stable-diffusion/README.md | 40 +++++++ .../6.stable-diffusion/multi-node/0.Makefile | 0 .../multi-node/1.Dockerfile | 104 ++++++++++++++++++ .../multi-node/2.train.sbatch | 76 +++++++++++++ 4 files changed, 220 insertions(+) create mode 100644 3.test_cases/6.stable-diffusion/multi-node/0.Makefile create mode 100644 3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile create mode 100644 3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index aadbc99a..80267ff0 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -147,3 +147,43 @@ AttnProcessor2_0 which is a Processor for implementing scaled dot-product attent The older self.unet.set_attn_processor(AttnProcessor()) gives Cuda OOM error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput More details on this can be found here: https://pytorch.org/blog/accelerated-diffusers-pt-20/ + +## 2. Multi Node Tests + +### 2.1 Multi-Node Training + +For the multi-node training we've created a `Dockerfile`, and Slurm submit script and a `Makefile` to build the docker image and convert it to an enroot image. To get started please follow the guide [AWS ParallelCluster Distributed Training](../../1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: + +* AWS ParallelCluster >= 3.7.0 +* Pyxis +* Enroot +* FSx Lustre Filesystem + +1. To get started, clone this repo and cd into the multi-node directory: + +``` +git clone https://github.com/aws-samples/awsome-distributed-training.git +cd awsome-distributed-training/6.stable-diffusion/multi-node +``` + +Next build the docker image and convert it to a enroot sqsh file: + +```bash +make # this will build the docker image and convert it to enroot +``` + +Now we can start training + +``` +sbatch 2.train.sbatch +``` + +### 2.1 Multi Node Results + + +## 3. What's Next? +TODO: +1. Investigate why single node performance on A100 80 GB instances is sub-par. +2. Implement distributed training following original implementation of stable diffusion +3. Explore the impact of MosaicML's Exponential Moving Average implementation on training performance. +4. Test the impact of xFormers diff --git a/3.test_cases/6.stable-diffusion/multi-node/0.Makefile b/3.test_cases/6.stable-diffusion/multi-node/0.Makefile new file mode 100644 index 00000000..e69de29b diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile new file mode 100644 index 00000000..393fe5e3 --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile @@ -0,0 +1,104 @@ +FROM nvcr.io/nvidia/pytorch:23.08-py3 + +ARG EFA_INSTALLER_VERSION=latest +ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws +ARG NCCL_TESTS_VERSION=master +ARG NCCL_VERSION=v2.18.5-1 +RUN apt-get update -y +RUN apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 libnccl2 libnccl-dev + +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ + git \ + gcc \ + vim \ + kmod \ + openssh-client \ + openssh-server \ + build-essential \ + curl \ + autoconf \ + libtool \ + gdb \ + automake \ + python3-distutils \ + cmake \ + apt-utils \ + devscripts \ + debhelper \ + libsubunit-dev \ + check \ + pkg-config + +RUN mkdir -p /var/run/sshd +RUN sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && \ + echo " UserKnownHostsFile /dev/null" >> /etc/ssh/ssh_config && \ + sed -i 's/#\(StrictModes \).*/\1no/g' /etc/ssh/sshd_config +ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/lib:$LD_LIBRARY_PATH +ENV PATH /opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH +RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ + && pip3 install awscli pynvml + +################################################# +# Install NVIDIA GDRCopy +RUN git clone https://github.com/NVIDIA/gdrcopy.git /opt/gdrcopy \ + && cd /opt/gdrcopy \ + && make lib_install install \ + && cd /opt/gdrcopy/tests \ + && make \ + && mv gdrcopy_copylat gdrcopy_copybw gdrcopy_sanity gdrcopy_apiperf /usr/bin/ + +################################################# +## Install EFA installer +RUN cd $HOME \ + && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && cd aws-efa-installer \ + && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ + && rm -rf $HOME/aws-efa-installer + +################################################### +## Install NCCL +RUN git clone https://github.com/NVIDIA/nccl -b ${NCCL_VERSION} /opt/nccl \ + && cd /opt/nccl \ + && make -j src.build CUDA_HOME=/usr/local/cuda \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + +################################################### +## Install AWS-OFI-NCCL plugin +RUN apt-get install libtool autoconf cmake nasm unzip pigz parallel nfs-common build-essential hwloc libhwloc-dev libjemalloc2 libnuma-dev numactl libjemalloc-dev preload htop iftop liblapack-dev libgfortran5 ipcalc wget curl devscripts debhelper check libsubunit-dev fakeroot pkg-config dkms -y +RUN export OPAL_PREFIX="" \ + && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ + && cd /opt/aws-ofi-nccl \ + && git checkout ${AWS_OFI_NCCL_VERSION} \ + && ./autogen.sh \ + && ./configure --prefix=/opt/aws-ofi-nccl/install \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=/usr/local/cuda \ + --with-nccl=/opt/nccl/build \ + --with-mpi=/opt/amazon/openmpi/ \ + && make && make install + +################################################### +## Install NCCL-tests +RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ + && cd /opt/nccl-tests \ + && git checkout ${NCCL_TESTS_VERSION} \ + && make MPI=1 \ + MPI_HOME=/opt/amazon/openmpi/ \ + CUDA_HOME=/usr/local/cuda \ + NCCL_HOME=/opt/nccl/build \ + NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" + +RUN git clone https://github.com/mosaicml/diffusion-benchmark.git +RUN pip3 install -r diffusion-benchmark/requirements.txt +RUN pip3 install mosaicml==0.15.0 --force +RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force +RUN pip3 uninstall transformer-engine -y +RUN pip3 install protobuf==3.20.3 + +RUN rm -rf /var/lib/apt/lists/* +ENV LD_PRELOAD /opt/nccl/build/lib/libnccl.so + +WORKDIR /workspace/diffusion-benchmark \ No newline at end of file diff --git a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch new file mode 100644 index 00000000..05d2372e --- /dev/null +++ b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch @@ -0,0 +1,76 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +#SBATCH --nodes=2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --job-name=mosaicml-stable-diffusion # name of your job +#SBATCH --gpus-per-node=8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --gpus-per-task=8 # Number of GPU per node +#SBATCH --exclusive # job has exclusive use of the resource, no sharing +#SBATCH --wait-all-nodes=1 +#SBATCH --output jobs/slurm-%j.out + +# default variables for Enroot +: "${APPS_PATH:=/fsx}" +: "${DATA_PATH:=/fsx}" + +# default variables for Enroot +: "${IMAGE:=$APPS_PATH/mosaicml/stable-diffusion.sqsh}" +: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +export NCCL_DEBUG=INFO +export WANDB_MODE=offline + +declare -a ARGS=( + --container-image $IMAGE + --container-mounts $FSX_MOUNT +) + +NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) +NNODES=${#NODES[@]} +NODES_ARRAY=($NODES) +HEAD_NODE=${NODES_ARRAY[0]} +MASTER_ADDR=$(srun --nodes=1 --ntasks=1 -w "$HEAD_NODE" hostname --ip-address) +MASTER_PORT=$RANDOM +NPROC=8 +WORLD_SIZE=$((NNODES * NPROC)) + +srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" +function run_compose() { + # if [ ${NODE_RANK} -eq 0 ]; then + # OPTION="nodelist" + # else + # OPTION="exclude" + # fi + srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" composer \ + --world_size ${WORLD_SIZE} \ + --nproc ${NPROC} \ + --node_rank ${NODE_RANK} \ + --master_addr ${MASTER_ADDR} \ + --master_port ${MASTER_PORT} \ + --verbose \ + benchmark.py \ + --use_ema --use_synth_data --device_train_microbatch_size 4 +} + +NODE_RANK=1 +for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) +do + NODE=${NODES[$NODE_RANK]} + echo "Run compute node ${NODE} for rank: ${NODE_RANK}" + run_compose & +done + +NODE_RANK=0 +NODE=${HEAD_NODE} +echo "Run master node ${NODE} for rank: ${NODE_RANK}" +run_compose +wait From 641199aa22207f1f6a06504134db0a82d2745962 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 2 Nov 2023 10:32:34 -0700 Subject: [PATCH 185/648] Switch path Signed-off-by: Sean Smith --- 3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch index 8e6ba806..e81117a3 100644 --- a/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch +++ b/3.test_cases/3.MPT/2.train-mpt-manual-distributed.sbatch @@ -68,7 +68,7 @@ function run_compose() { --master_port ${MASTER_PORT} \ --verbose /llm-foundry/scripts/train/train.py \ /llm-foundry/scripts/train/yamls/pretrain/${MODEL}.yaml \ - data_local=/fsx/c4-dataset \ + data_local=${DATA_PATH}/c4-dataset \ train_loader.dataset.split=train_small \ eval_loader.dataset.split=val_small \ max_duration=3ba \ From ed46289ae52c42c25dae2b1b15783855f3d7891b Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 3 Nov 2023 15:35:23 +0800 Subject: [PATCH 186/648] updated efa cheatsheet --- 1.architectures/efa-cheatsheet.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/1.architectures/efa-cheatsheet.md b/1.architectures/efa-cheatsheet.md index 7ea0524f..ec0f4c59 100644 --- a/1.architectures/efa-cheatsheet.md +++ b/1.architectures/efa-cheatsheet.md @@ -2,6 +2,9 @@ ## 1. Settings via environment variables +For optimized performance, you may need to set additional environment variables depending on the +versions of your libfabric. + | Setting | Explanation | | ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `FI_EFA_USE_HUGE_PAGE=0` | Set to 0 when you see `os.fork()` causes `OSError: Cannot allocate memory`. Typically happen by multi-process PyTorch data loader. Disabling huge page causes minor performance hit, but it's needed to prevent fork fails due to the operating system running out of huge pages. | @@ -20,7 +23,8 @@ ## 2. A word on p5.48xlarge instances -Use cuda>=12.0, nccl>=2.18.0 (recommend at least 2.18.5), aws-ofi-nccl>=1.7.2 (recommend at least 1.7.3). +Use cuda>=12.0, nccl>=2.18.0 (recommend at least 2.18.5), aws-ofi-nccl>=1.7.2 (recommend at least +1.7.3). ## 3. Sample Presets From 035787e4518bf60268e7f692336a2b28d36114c9 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Fri, 3 Nov 2023 18:14:03 +0800 Subject: [PATCH 187/648] Template pytorch docker file --- 2.docker_images_template/Dockerfile | 229 ++++++++++++++++++++++++++++ 1 file changed, 229 insertions(+) create mode 100644 2.docker_images_template/Dockerfile diff --git a/2.docker_images_template/Dockerfile b/2.docker_images_template/Dockerfile new file mode 100644 index 00000000..f5a251e7 --- /dev/null +++ b/2.docker_images_template/Dockerfile @@ -0,0 +1,229 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#################################################################################################### +# This is a sample Dockerfile, with optional stanzas. Please read through this Dockerfile, +# understand what it does, then create your own Dockerfile. +# +# Sample build instructions: +# +# docker build --progress=plain -t nvidia-pt-od:2310 -f Dockerfile-nvidia.dockerfile . +# rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__2310.sqsh dockerd://nvidia-pt-od:2310 +# +# Compute nodes (aka build nodes) are transient, so we need to keep the docker image on shared fs, +# which head node can load into its local registry. +# +# # Build node: save image to file +# docker save nvidia-pt-od:2310 > /fsx/nvidia-pt-od__2310.tar +# +# # Load image to local docker registry -> on head node, or new compute/build node. +# docker load < /fsx/nvidia-pt-od__2310.tar +#################################################################################################### +FROM nvcr.io/nvidia/pytorch:23.10-py3 +ENV DEBIAN_FRONTEND=noninteractive + +# The three must-be-built packages. +ENV EFA_INSTALLER_VERSION=1.28.0 +ENV AWS_OFI_NCCL_VERSION=1.7.3-aws +ENV NCCL_TESTS_VERSION=master + +RUN apt-get update -y +RUN apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 + +# We noticed that since 23.09, we can't just delete the whole /opt/hpcx/, otherwise `import torch` +# complains about missing libuc?.so. +RUN rm -rf /opt/hpcx/ompi \ + && rm -rf /usr/local/mpi \ + && ldconfig +ENV OPAL_PREFIX= +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ + git \ + gcc \ + vim \ + kmod \ + openssh-client \ + openssh-server \ + build-essential \ + curl \ + autoconf \ + libtool \ + gdb \ + automake \ + cmake \ + apt-utils \ + libhwloc-dev \ + aptitude && \ + DEBIAN_FRONTEND=noninteractive apt autoremove -y + +# EFA +RUN apt-get update && \ + cd /tmp && \ + curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ + tar -xf aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz && \ + cd aws-efa-installer && \ + ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify && \ + ldconfig && \ + rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* +ENV LD_LIBRARY_PATH=/opt/amazon/efa/lib:$LD_LIBRARY_PATH +ENV PATH=/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:$PATH + + +#################################################################################################### +# [CUSTOM_NCCL_OPTION_1] Uncomment below stanza to install another NCCL version using the official +# binaries. +# +# NCCL EFA plugin (aws-ofi-nccl) depends on mpi, hence we must rebuild openmpi before building the +# aws-ofi-ccnl. +#################################################################################################### +##ENV NCCL_VERSION=2.19.3-1 # libfabric gave NCCL error. Known issue on efa-installer<=1.28.0 +#ENV NCCL_VERSION=2.18.6-1 +#RUN cd /opt && \ +# wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb && \ +# dpkg -i cuda-keyring_1.0-1_all.deb && \ +# apt update && \ +# apt install -y libnccl2==${NCCL_VERSION} libnccl-dev==${NCCL_VERSION} && \ +# echo NCCL_SOCKET_IFNAME=^docker0,lo >> /etc/nccl.conf + + +#################################################################################################### +# [CUSTOM_NCCL_OPTION_2] Install NCCL from source to the same location as the built-in ones. The +# benefits of installing to the same location as the built-in version are: +# +# 1. There's only ever a single libnccl version offered by this image, preventing application from +# mistakenly chooses a wrong version. +# 2. No longer needing extra settings for LD_LIBRARY_PATH or LD_PRELOAD. +# +# NCCL EFA plugin (aws-ofi-nccl) depends on mpi, hence we must rebuild openmpi before building the +# aws-ofi-ccnl. +#################################################################################################### +#ENV NCCL_VERSION=2.19.3-1 # libfabric gave NCCL error. Known issue on efa-installer<=1.28.0 +ENV NCCL_VERSION=2.18.6-1 +RUN apt-get remove -y libnccl2 libnccl-dev \ + && cd /tmp \ + && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ + && cd nccl \ + && make -j src.build BUILDDIR=/usr \ + # Build for p4 & p5. + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90, -gencode=arch=compute_80,code=sm_80" \ + && rm -rf /tmp/nccl \ + && echo NCCL_SOCKET_IFNAME=^docker0,lo >> /etc/nccl.conf + + +#################################################################################################### +# Rebuild OpenMPI with custom PMIX version. E.g., to match what host's Slurm is built with (see +# /opt/pmix/ on host, or run pmix_info on host). +# +# May be needed on rare occassions when `srun --mpi=pmix --container-image=... ` +# mysteriously crashes. +# +# NCCL EFA plugin (aws-ofi-nccl) depends on mpi, hence we must rebuild openmpi before building the +# aws-ofi-ccnl. +#################################################################################################### +ENV OPEN_MPI_PATH=/opt/amazon/openmpi + +# OpenMPI build script claims PMIX_VERSION, and complains if we use it. +ENV CUSTOM_PMIX_VERSION=4.2.6 +RUN apt-get update && apt-get install -y libevent-dev \ + && cd /tmp \ + && wget https://github.com/openpmix/openpmix/releases/download/v${CUSTOM_PMIX_VERSION}/pmix-${CUSTOM_PMIX_VERSION}.tar.gz \ + && tar -xzf pmix-${CUSTOM_PMIX_VERSION}.tar.gz \ + && rm pmix-${CUSTOM_PMIX_VERSION}.tar.gz \ + && cd pmix-${CUSTOM_PMIX_VERSION}/ \ + && ./autogen.pl \ + && ./configure --prefix=/opt/pmix \ + && make -j \ + && make install \ + && echo /opt/pmix/lib > /etc/ld.so.conf.d/pmix.conf \ + && ldconfig \ + && cd / \ + && rm -fr /tmp/pmix-${CUSTOM_PMIX_VERSION}/ +# To silence this runtime error message: +# [p4de-st-p4de-2:110912] PMIX ERROR: ERROR in file gds_ds12_lock_pthread.c at line 168 +export PMIX_GDS_MODULE=^ds12 +export PMIX_MCA_gds=^ds12 + +# Rebuild openmpi with DLC style (which it remarks as "without libfabric"), with the above pmix. +ENV OMPI_VERSION=4.1.6 +RUN rm -fr ${OPEN_MPI_PATH} \ + && mkdir /tmp/openmpi \ + && cd /tmp/openmpi \ + && wget --quiet https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-${OMPI_VERSION}.tar.gz \ + && tar zxf openmpi-${OMPI_VERSION}.tar.gz \ + && rm openmpi-${OMPI_VERSION}.tar.gz \ + && cd openmpi-${OMPI_VERSION} \ + && ./configure --enable-orterun-prefix-by-default --prefix=$OPEN_MPI_PATH --with-cuda=${CUDA_HOME} --with-slurm --with-pmix=/opt/pmix \ + && make -j $(nproc) all \ + && make install \ + && ldconfig \ + && cd / \ + && rm -rf /tmp/openmpi \ + && ompi_info --parsable --all | grep mpi_built_with_cuda_support:value \ + # Verify pmix from /opt/pmix/ + && ldd /opt/amazon/openmpi/lib/openmpi/mca_pmix_ext3x.so | grep '/opt/pmix/lib/libpmix.so.* ' > /opt/amazon/openmpi-pmix.txt +#################################################################################################### + + +# NCCL EFA Plugin +RUN mkdir -p /tmp && \ + cd /tmp && \ + curl -LO https://github.com/aws/aws-ofi-nccl/archive/refs/tags/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ + tar -xzf /tmp/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ + rm /tmp/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ + mv aws-ofi-nccl-${AWS_OFI_NCCL_VERSION} aws-ofi-nccl && \ + cd /tmp/aws-ofi-nccl && \ + ./autogen.sh && \ + ./configure --prefix=/opt/amazon/efa \ + --with-libfabric=/opt/amazon/efa \ + --with-cuda=/usr/local/cuda \ + --enable-platform-aws \ + --with-mpi=/opt/amazon/openmpi && \ + make -j$(nproc) install && \ + rm -rf /tmp/aws-ofi/nccl + +# Do this to minimize the ld path env vars that users need to define when running this image. +RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ + echo "/opt/amazon/openmpi/lib" >> /etc/ld.so.conf.d/efa.conf && \ + ldconfig + +ENV OMPI_MCA_pml=^cm,ucx \ + OMPI_MCA_btl=tcp,self \ + OMPI_MCA_btl_tcp_if_exclude=lo,docker0 \ + OPAL_PREFIX=/opt/amazon/openmpi \ + # https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 + # https://github.com/pytorch/pytorch/issues/68893 + NCCL_SOCKET_IFNAME=^docker,lo + +ENV FI_PROVIDER=efa \ + LD_LIBRARY_PATH="/usr/local/lib:/usr/local/cuda/lib64:${LD_LIBRARY_PATH}" + +# NCCL-tests: always good to include this as a diagnostic tool. +RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ + && cd /opt/nccl-tests \ + && git checkout ${NCCL_TESTS_VERSION} \ + && make MPI=1 \ + MPI_HOME=/opt/amazon/openmpi \ + CUDA_HOME=/usr/local/cuda \ + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" + + +#################################################################################################### +# Custom packages. Disable as you like. NOTE: always check `pip list` what's been installed. For +# example, the base container comes pre-installed with Transformer Engine, flash attention, triton +# (https://github.com/openai/triton/), etc. +#################################################################################################### +# Install the xformers dependency from source, because pip install either breaks or try to pull +# its own pt + cuda. +# +# Pre-requisite: build node has enough memory to compile xformers. More info on the stanza. +RUN export TORCH_CUDA_ARCH_LIST="8.0;9.0+PTX" && \ + # On p4de.24xlarge: + # - MAX_JOBS=16 => 145GB memory + # - MAX_JOBS=32 => 241GB memory + # - MAX_JOBS=48 => 243GB memory, 542.5s + # + # NOTE: must export MAX_JOBS. For some reason, `MAX_JOBS=16 pip install ...` doesn't seem to + # work to prevent OOM. + export MAX_JOBS=32 && \ + export NVCC_PREPEND_FLAGS="-t 32" && \ + pip install -v -U git+https://github.com/facebookresearch/xformers.git@main#egg=xformers From 3357f09789b38026441f1ede0470ec2d18c68692 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 6 Nov 2023 12:20:24 -0800 Subject: [PATCH 188/648] Update 1.nccl-tests.sbatch Remove `#SBATCH --export=nil` from submit file. --- 4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch index 353b5191..7654e21e 100644 --- a/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch +++ b/4.validation_scripts/0.nccl-tests/1.nccl-tests.sbatch @@ -9,7 +9,6 @@ #SBATCH --gres=gpu:8 # number of GPU we reserve #SBATCH --exclusive #SBATCH --wait-all-nodes=1 -#SBATCH --export=NIL # do not export env vars from the host env ### Disable hyperthreading by setting the tasks per core to 1 #SBATCH --ntasks-per-core=1 @@ -42,4 +41,4 @@ export NCCL_DEBUG=INFO declare -a ARGS=( --container-image $IMAGE ) -srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TESTS_PATH/scatter_perf -b 8 -e 2G -f 2 -g 1 -c 1 -n 100 \ No newline at end of file +srun -l "${ARGS[@]}" --mpi=pmix $NCCL_TESTS_PATH/scatter_perf -b 8 -e 2G -f 2 -g 1 -c 1 -n 100 From 5ffb11889f147ee649197242ca1ea291ce2019fc Mon Sep 17 00:00:00 2001 From: guilhermemaiaribeiro <66248786+guilhermemaiaribeiro@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:53:29 +0000 Subject: [PATCH 189/648] Update README.md --- 2.amazon_machine_images/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/2.amazon_machine_images/README.md b/2.amazon_machine_images/README.md index 5bd146c0..24cb4b00 100644 --- a/2.amazon_machine_images/README.md +++ b/2.amazon_machine_images/README.md @@ -9,6 +9,11 @@ To build images you will need: - **Packer**: it can be downloaded via [Hashicorp](https://www.packer.io/)'s website, you can also use [`brew`](https://formulae.brew.sh/formula/packer#default) on OSX. - **Ansible**: get it via your package manager, we recommend via [`brew`](https://formulae.brew.sh/formula/ansible#default) if using OSX. +Initialize Packer (will install plugins): +```bash +packer init -upgrade packer-ami.pkr.hcl +``` + ### Build a custom AMI Assuming that GNU Make, Packer and Ansible installed, you can build AMIs by typing `make` in your terminal with an argument corresponding to the desired AMI you want to build. From ea573ce519997ef7030b12728bdf4a12558dac65 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 9 Nov 2023 17:13:35 +1100 Subject: [PATCH 190/648] Fix syntax error on mkdir --- 3.test_cases/1.megatron-lm/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/1.megatron-lm/README.md b/3.test_cases/1.megatron-lm/README.md index 93fdd669..370e0457 100644 --- a/3.test_cases/1.megatron-lm/README.md +++ b/3.test_cases/1.megatron-lm/README.md @@ -84,7 +84,7 @@ Below are the steps you need to follow: ```bash #!/bin/bash - mkdir -p cd ${DATA_PATH}/gpt2 + mkdir -p ${DATA_PATH}/gpt2 cd ${DATA_PATH}/gpt2 wget https://huggingface.co/bigscience/misc-test-data/resolve/main/stas/oscar-1GB.jsonl.xz From 1970b41de59382499097280a57f2d85f77aa68bc Mon Sep 17 00:00:00 2001 From: Verdi March Date: Thu, 9 Nov 2023 17:18:38 +1100 Subject: [PATCH 191/648] Move dockerfile template together with ami template --- {2.docker_images_template => 2.ami_and_docker}/Dockerfile | 0 {2.amazon_machine_images => 2.ami_and_docker}/Makefile | 0 {2.amazon_machine_images => 2.ami_and_docker}/README.md | 0 .../inventory/group_vars/all.yml | 0 {2.amazon_machine_images => 2.ami_and_docker}/inventory/hosts | 0 {2.amazon_machine_images => 2.ami_and_docker}/packer-ami.pkr.hcl | 0 .../playbook-dlami-gpu.yml | 0 .../playbook-dlami-neuron.yml | 0 .../playbook-eks-gpu.yml | 0 .../playbook-pcluster-cpu.yml | 0 .../playbook-pcluster-gpu.yml | 0 .../playbook-pcluster-neuron.yml | 0 .../roles/aws_cliv2/molecule/default/converge.yml | 0 .../roles/aws_cliv2/molecule/default/molecule.yml | 0 .../roles/aws_cliv2/molecule/default/prepare.yml | 0 .../roles/aws_cliv2/molecule/default/verify.yml | 0 .../roles/aws_cliv2/tasks/main.yml | 0 .../roles/aws_efa/defaults/main.yml | 0 .../roles/aws_efa/tasks/main.yml | 0 .../roles/aws_efa_ofi/tasks/main.yml | 0 .../roles/aws_lustre/defaults/main.yml | 0 .../roles/aws_lustre/tasks/main.yml | 0 .../roles/base/defaults/main.yml | 0 .../roles/base/tasks/main.yml | 0 .../roles/docker/defaults/main.yml | 0 .../roles/docker/tasks/main.yml | 0 .../roles/neuron_driver/defaults/main.yml | 0 .../roles/neuron_driver/tasks/main.yml | 0 .../roles/nvidia_cuda/defaults/main.yml | 0 .../roles/nvidia_cuda/tasks/main.yml | 0 .../roles/nvidia_docker/defaults/main.yml | 0 .../roles/nvidia_docker/tasks/main.yml | 0 .../roles/nvidia_driver/defaults/main.yml | 0 .../nvidia_driver/files/nvidia-persistenced-override.service | 0 .../roles/nvidia_driver/tasks/main.yml | 0 .../roles/nvidia_enroot_pyxis/defaults/main.yml | 0 .../roles/nvidia_enroot_pyxis/tasks/main.yml | 0 .../roles/nvidia_enroot_pyxis/templates/enroot.conf | 0 .../roles/nvidia_gdrcopy/defaults/main.yml | 0 .../roles/nvidia_gdrcopy/tasks/main.yml | 0 .../roles/nvidia_nccl/defaults/main.yml | 0 .../roles/nvidia_nccl/tasks/main.yml | 0 .../roles/observability/defaults/main.yml | 0 .../roles/observability/tasks/main.yml | 0 .../roles/packages/defaults/main.yml | 0 .../roles/packages/tasks/main.yml | 0 .../roles/pytorch_neuron/defaults/main.yml | 0 .../roles/pytorch_neuron/tasks/main.yml | 0 48 files changed, 0 insertions(+), 0 deletions(-) rename {2.docker_images_template => 2.ami_and_docker}/Dockerfile (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/Makefile (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/README.md (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/inventory/group_vars/all.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/inventory/hosts (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/packer-ami.pkr.hcl (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/playbook-dlami-gpu.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/playbook-dlami-neuron.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/playbook-eks-gpu.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/playbook-pcluster-cpu.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/playbook-pcluster-gpu.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/playbook-pcluster-neuron.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_cliv2/molecule/default/converge.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_cliv2/molecule/default/molecule.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_cliv2/molecule/default/prepare.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_cliv2/molecule/default/verify.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_cliv2/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_efa/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_efa/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_efa_ofi/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_lustre/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/aws_lustre/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/base/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/base/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/docker/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/docker/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/neuron_driver/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/neuron_driver/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_cuda/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_cuda/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_docker/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_docker/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_driver/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_driver/files/nvidia-persistenced-override.service (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_driver/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_enroot_pyxis/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_enroot_pyxis/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_enroot_pyxis/templates/enroot.conf (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_gdrcopy/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_gdrcopy/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_nccl/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/nvidia_nccl/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/observability/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/observability/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/packages/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/packages/tasks/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/pytorch_neuron/defaults/main.yml (100%) rename {2.amazon_machine_images => 2.ami_and_docker}/roles/pytorch_neuron/tasks/main.yml (100%) diff --git a/2.docker_images_template/Dockerfile b/2.ami_and_docker/Dockerfile similarity index 100% rename from 2.docker_images_template/Dockerfile rename to 2.ami_and_docker/Dockerfile diff --git a/2.amazon_machine_images/Makefile b/2.ami_and_docker/Makefile similarity index 100% rename from 2.amazon_machine_images/Makefile rename to 2.ami_and_docker/Makefile diff --git a/2.amazon_machine_images/README.md b/2.ami_and_docker/README.md similarity index 100% rename from 2.amazon_machine_images/README.md rename to 2.ami_and_docker/README.md diff --git a/2.amazon_machine_images/inventory/group_vars/all.yml b/2.ami_and_docker/inventory/group_vars/all.yml similarity index 100% rename from 2.amazon_machine_images/inventory/group_vars/all.yml rename to 2.ami_and_docker/inventory/group_vars/all.yml diff --git a/2.amazon_machine_images/inventory/hosts b/2.ami_and_docker/inventory/hosts similarity index 100% rename from 2.amazon_machine_images/inventory/hosts rename to 2.ami_and_docker/inventory/hosts diff --git a/2.amazon_machine_images/packer-ami.pkr.hcl b/2.ami_and_docker/packer-ami.pkr.hcl similarity index 100% rename from 2.amazon_machine_images/packer-ami.pkr.hcl rename to 2.ami_and_docker/packer-ami.pkr.hcl diff --git a/2.amazon_machine_images/playbook-dlami-gpu.yml b/2.ami_and_docker/playbook-dlami-gpu.yml similarity index 100% rename from 2.amazon_machine_images/playbook-dlami-gpu.yml rename to 2.ami_and_docker/playbook-dlami-gpu.yml diff --git a/2.amazon_machine_images/playbook-dlami-neuron.yml b/2.ami_and_docker/playbook-dlami-neuron.yml similarity index 100% rename from 2.amazon_machine_images/playbook-dlami-neuron.yml rename to 2.ami_and_docker/playbook-dlami-neuron.yml diff --git a/2.amazon_machine_images/playbook-eks-gpu.yml b/2.ami_and_docker/playbook-eks-gpu.yml similarity index 100% rename from 2.amazon_machine_images/playbook-eks-gpu.yml rename to 2.ami_and_docker/playbook-eks-gpu.yml diff --git a/2.amazon_machine_images/playbook-pcluster-cpu.yml b/2.ami_and_docker/playbook-pcluster-cpu.yml similarity index 100% rename from 2.amazon_machine_images/playbook-pcluster-cpu.yml rename to 2.ami_and_docker/playbook-pcluster-cpu.yml diff --git a/2.amazon_machine_images/playbook-pcluster-gpu.yml b/2.ami_and_docker/playbook-pcluster-gpu.yml similarity index 100% rename from 2.amazon_machine_images/playbook-pcluster-gpu.yml rename to 2.ami_and_docker/playbook-pcluster-gpu.yml diff --git a/2.amazon_machine_images/playbook-pcluster-neuron.yml b/2.ami_and_docker/playbook-pcluster-neuron.yml similarity index 100% rename from 2.amazon_machine_images/playbook-pcluster-neuron.yml rename to 2.ami_and_docker/playbook-pcluster-neuron.yml diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/converge.yml b/2.ami_and_docker/roles/aws_cliv2/molecule/default/converge.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_cliv2/molecule/default/converge.yml rename to 2.ami_and_docker/roles/aws_cliv2/molecule/default/converge.yml diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/molecule.yml b/2.ami_and_docker/roles/aws_cliv2/molecule/default/molecule.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_cliv2/molecule/default/molecule.yml rename to 2.ami_and_docker/roles/aws_cliv2/molecule/default/molecule.yml diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/prepare.yml b/2.ami_and_docker/roles/aws_cliv2/molecule/default/prepare.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_cliv2/molecule/default/prepare.yml rename to 2.ami_and_docker/roles/aws_cliv2/molecule/default/prepare.yml diff --git a/2.amazon_machine_images/roles/aws_cliv2/molecule/default/verify.yml b/2.ami_and_docker/roles/aws_cliv2/molecule/default/verify.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_cliv2/molecule/default/verify.yml rename to 2.ami_and_docker/roles/aws_cliv2/molecule/default/verify.yml diff --git a/2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml b/2.ami_and_docker/roles/aws_cliv2/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_cliv2/tasks/main.yml rename to 2.ami_and_docker/roles/aws_cliv2/tasks/main.yml diff --git a/2.amazon_machine_images/roles/aws_efa/defaults/main.yml b/2.ami_and_docker/roles/aws_efa/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_efa/defaults/main.yml rename to 2.ami_and_docker/roles/aws_efa/defaults/main.yml diff --git a/2.amazon_machine_images/roles/aws_efa/tasks/main.yml b/2.ami_and_docker/roles/aws_efa/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_efa/tasks/main.yml rename to 2.ami_and_docker/roles/aws_efa/tasks/main.yml diff --git a/2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml b/2.ami_and_docker/roles/aws_efa_ofi/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_efa_ofi/tasks/main.yml rename to 2.ami_and_docker/roles/aws_efa_ofi/tasks/main.yml diff --git a/2.amazon_machine_images/roles/aws_lustre/defaults/main.yml b/2.ami_and_docker/roles/aws_lustre/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_lustre/defaults/main.yml rename to 2.ami_and_docker/roles/aws_lustre/defaults/main.yml diff --git a/2.amazon_machine_images/roles/aws_lustre/tasks/main.yml b/2.ami_and_docker/roles/aws_lustre/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/aws_lustre/tasks/main.yml rename to 2.ami_and_docker/roles/aws_lustre/tasks/main.yml diff --git a/2.amazon_machine_images/roles/base/defaults/main.yml b/2.ami_and_docker/roles/base/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/base/defaults/main.yml rename to 2.ami_and_docker/roles/base/defaults/main.yml diff --git a/2.amazon_machine_images/roles/base/tasks/main.yml b/2.ami_and_docker/roles/base/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/base/tasks/main.yml rename to 2.ami_and_docker/roles/base/tasks/main.yml diff --git a/2.amazon_machine_images/roles/docker/defaults/main.yml b/2.ami_and_docker/roles/docker/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/docker/defaults/main.yml rename to 2.ami_and_docker/roles/docker/defaults/main.yml diff --git a/2.amazon_machine_images/roles/docker/tasks/main.yml b/2.ami_and_docker/roles/docker/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/docker/tasks/main.yml rename to 2.ami_and_docker/roles/docker/tasks/main.yml diff --git a/2.amazon_machine_images/roles/neuron_driver/defaults/main.yml b/2.ami_and_docker/roles/neuron_driver/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/neuron_driver/defaults/main.yml rename to 2.ami_and_docker/roles/neuron_driver/defaults/main.yml diff --git a/2.amazon_machine_images/roles/neuron_driver/tasks/main.yml b/2.ami_and_docker/roles/neuron_driver/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/neuron_driver/tasks/main.yml rename to 2.ami_and_docker/roles/neuron_driver/tasks/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml b/2.ami_and_docker/roles/nvidia_cuda/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_cuda/defaults/main.yml rename to 2.ami_and_docker/roles/nvidia_cuda/defaults/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml b/2.ami_and_docker/roles/nvidia_cuda/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_cuda/tasks/main.yml rename to 2.ami_and_docker/roles/nvidia_cuda/tasks/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_docker/defaults/main.yml b/2.ami_and_docker/roles/nvidia_docker/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_docker/defaults/main.yml rename to 2.ami_and_docker/roles/nvidia_docker/defaults/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_docker/tasks/main.yml b/2.ami_and_docker/roles/nvidia_docker/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_docker/tasks/main.yml rename to 2.ami_and_docker/roles/nvidia_docker/tasks/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_driver/defaults/main.yml b/2.ami_and_docker/roles/nvidia_driver/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_driver/defaults/main.yml rename to 2.ami_and_docker/roles/nvidia_driver/defaults/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_driver/files/nvidia-persistenced-override.service b/2.ami_and_docker/roles/nvidia_driver/files/nvidia-persistenced-override.service similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_driver/files/nvidia-persistenced-override.service rename to 2.ami_and_docker/roles/nvidia_driver/files/nvidia-persistenced-override.service diff --git a/2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml b/2.ami_and_docker/roles/nvidia_driver/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_driver/tasks/main.yml rename to 2.ami_and_docker/roles/nvidia_driver/tasks/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_enroot_pyxis/defaults/main.yml b/2.ami_and_docker/roles/nvidia_enroot_pyxis/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_enroot_pyxis/defaults/main.yml rename to 2.ami_and_docker/roles/nvidia_enroot_pyxis/defaults/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_enroot_pyxis/tasks/main.yml b/2.ami_and_docker/roles/nvidia_enroot_pyxis/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_enroot_pyxis/tasks/main.yml rename to 2.ami_and_docker/roles/nvidia_enroot_pyxis/tasks/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_enroot_pyxis/templates/enroot.conf b/2.ami_and_docker/roles/nvidia_enroot_pyxis/templates/enroot.conf similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_enroot_pyxis/templates/enroot.conf rename to 2.ami_and_docker/roles/nvidia_enroot_pyxis/templates/enroot.conf diff --git a/2.amazon_machine_images/roles/nvidia_gdrcopy/defaults/main.yml b/2.ami_and_docker/roles/nvidia_gdrcopy/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_gdrcopy/defaults/main.yml rename to 2.ami_and_docker/roles/nvidia_gdrcopy/defaults/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_gdrcopy/tasks/main.yml b/2.ami_and_docker/roles/nvidia_gdrcopy/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_gdrcopy/tasks/main.yml rename to 2.ami_and_docker/roles/nvidia_gdrcopy/tasks/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_nccl/defaults/main.yml b/2.ami_and_docker/roles/nvidia_nccl/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_nccl/defaults/main.yml rename to 2.ami_and_docker/roles/nvidia_nccl/defaults/main.yml diff --git a/2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml b/2.ami_and_docker/roles/nvidia_nccl/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/nvidia_nccl/tasks/main.yml rename to 2.ami_and_docker/roles/nvidia_nccl/tasks/main.yml diff --git a/2.amazon_machine_images/roles/observability/defaults/main.yml b/2.ami_and_docker/roles/observability/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/observability/defaults/main.yml rename to 2.ami_and_docker/roles/observability/defaults/main.yml diff --git a/2.amazon_machine_images/roles/observability/tasks/main.yml b/2.ami_and_docker/roles/observability/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/observability/tasks/main.yml rename to 2.ami_and_docker/roles/observability/tasks/main.yml diff --git a/2.amazon_machine_images/roles/packages/defaults/main.yml b/2.ami_and_docker/roles/packages/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/packages/defaults/main.yml rename to 2.ami_and_docker/roles/packages/defaults/main.yml diff --git a/2.amazon_machine_images/roles/packages/tasks/main.yml b/2.ami_and_docker/roles/packages/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/packages/tasks/main.yml rename to 2.ami_and_docker/roles/packages/tasks/main.yml diff --git a/2.amazon_machine_images/roles/pytorch_neuron/defaults/main.yml b/2.ami_and_docker/roles/pytorch_neuron/defaults/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/pytorch_neuron/defaults/main.yml rename to 2.ami_and_docker/roles/pytorch_neuron/defaults/main.yml diff --git a/2.amazon_machine_images/roles/pytorch_neuron/tasks/main.yml b/2.ami_and_docker/roles/pytorch_neuron/tasks/main.yml similarity index 100% rename from 2.amazon_machine_images/roles/pytorch_neuron/tasks/main.yml rename to 2.ami_and_docker/roles/pytorch_neuron/tasks/main.yml From fd39a5d128fc0d26f0a833190fdb8dd021555467 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Mon, 13 Nov 2023 09:26:05 +0900 Subject: [PATCH 192/648] Update .gitignore Avoid accidental commit of `sqsh` files. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 53a44a27..2ba0f997 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,6 @@ spark-warehouse # slurm outputs *.out *.err + +# Enroot container image +*.sqsh From adb06c9986bb1e005ee5d428c114e100094e5ca4 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Mon, 13 Nov 2023 00:29:45 +0000 Subject: [PATCH 193/648] add tensorflow distributed training example update readme add slurm script add sample training scripts --- .../0.tensorflow.Dockerfile | 89 +++++++++++++++ .../1.run-training.sbatch | 71 ++++++++++++ .../7.tensorflow-distributed/Makefile | 12 ++ .../7.tensorflow-distributed/README.md | 104 ++++++++++++++++++ .../7.tensorflow-distributed/src/worker.py | 67 +++++++++++ 5 files changed, 343 insertions(+) create mode 100644 3.test_cases/7.tensorflow-distributed/0.tensorflow.Dockerfile create mode 100644 3.test_cases/7.tensorflow-distributed/1.run-training.sbatch create mode 100644 3.test_cases/7.tensorflow-distributed/Makefile create mode 100644 3.test_cases/7.tensorflow-distributed/README.md create mode 100644 3.test_cases/7.tensorflow-distributed/src/worker.py diff --git a/3.test_cases/7.tensorflow-distributed/0.tensorflow.Dockerfile b/3.test_cases/7.tensorflow-distributed/0.tensorflow.Dockerfile new file mode 100644 index 00000000..52e29861 --- /dev/null +++ b/3.test_cases/7.tensorflow-distributed/0.tensorflow.Dockerfile @@ -0,0 +1,89 @@ +FROM nvcr.io/nvidia/tensorflow:23.10-tf2-py3 + +ARG EFA_INSTALLER_VERSION=latest +ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws +ARG NCCL_TESTS_VERSION=master +ARG NCCL_VERSION=2.18.5-1 +ARG LLM_FOUNDRY_VERSION=v0.3.0 +ARG OPEN_MPI_PATH=/opt/amazon/openmpi + +RUN apt-get update -y +RUN apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ + libnccl2 libnccl-dev libibnetdisc5 libibmad5 libibumad3 +RUN rm -rf /opt/hpcx \ + && rm -rf /usr/local/mpi \ + && rm -rf /usr/local/ucx \ + && rm -f /etc/ld.so.conf.d/hpcx.conf \ + && ldconfig +ENV OPAL_PREFIX= + +RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \ + git \ + gcc \ + vim \ + kmod \ + openssh-client \ + openssh-server \ + build-essential \ + curl \ + autoconf \ + libtool \ + gdb \ + automake \ + cmake \ + apt-utils \ + libhwloc-dev \ + aptitude && \ + DEBIAN_FRONTEND=noninteractive apt-get autoremove -y + +ENV LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:/usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:$LD_LIBRARY_PATH +ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH + +RUN pip install awscli pynvml + +################################################# +## Install EFA installer +RUN cd $HOME \ + && curl -O https://efa-installer.amazonaws.com/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && tar -xf $HOME/aws-efa-installer-${EFA_INSTALLER_VERSION}.tar.gz \ + && cd aws-efa-installer \ + && ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify \ + && rm -rf $HOME/aws-efa-installer + +################################################### +## Install NCCL +RUN cd /tmp \ + && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ + && cd nccl \ + && make -j src.build BUILDDIR=/usr/local \ + # nvcc to target p5 and p4 instances + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" \ + && rm -rf /tmp/nccl + +# NCCL +RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ + echo "/opt/amazon/efa/lib" >> /etc/ld.so.conf.d/efa.conf && \ + echo "/opt/amazon/openmpi/lib" >> /etc/ld.so.conf.d/efa.conf && \ + ldconfig + +################################################### +## Install AWS-OFI-NCCL plugin +RUN export OPAL_PREFIX="" \ + && git clone https://github.com/aws/aws-ofi-nccl.git /opt/aws-ofi-nccl \ + && cd /opt/aws-ofi-nccl \ + && git checkout ${AWS_OFI_NCCL_VERSION} \ + && ./autogen.sh \ + && ./configure --prefix=/opt/aws-ofi-nccl/install \ + --with-libfabric=/opt/amazon/efa/ \ + --with-cuda=/usr/local/cuda \ + --with-nccl=/usr/local/nccl \ + --with-mpi=/opt/amazon/openmpi/ \ + && make -j && make install + +RUN rm -rf /var/lib/apt/lists/* + +RUN echo "hwloc_base_binding_policy = none" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf \ + && echo "rmaps_base_mapping_policy = slot" >> /opt/amazon/openmpi/etc/openmpi-mca-params.conf + +COPY src /src \ No newline at end of file diff --git a/3.test_cases/7.tensorflow-distributed/1.run-training.sbatch b/3.test_cases/7.tensorflow-distributed/1.run-training.sbatch new file mode 100644 index 00000000..4bc3e7a6 --- /dev/null +++ b/3.test_cases/7.tensorflow-distributed/1.run-training.sbatch @@ -0,0 +1,71 @@ +#!/bin/bash +#SBATCH --nodes=2 # number of nodes to use, 24 p4d(e) = 192 A100 GPUs +#SBATCH --ntasks=2 +#SBATCH --job-name=train-resnet-tf # name of your job +#SBATCH --output=logs/%x_%j.out # logfile for stdout +#SBATCH --error=logs/%x_%j.err # logfile for stderr, remove it to merge both outputs +#SBATCH --ntasks-per-node 1 # Number of GPU per node +#SBATCH --gpus-per-node=8 # Number of GPU per node +#SBATCH --gpus-per-task=8 # Number of GPU per node +#SBATCH --gres=gpu:8 # number of GPU we reserve +#SBATCH --exclusive + +set -euxo pipefail + +# default variables for Enroot +: "${APPS_PATH:=/apps}" +: "${IMAGE:=$APPS_PATH/tensorflow.sqsh}" +: "${DATA_PATH:=/fsx}" +: "${FSX_MOUNT:=$DATA_PATH:$DATA_PATH}" +: "${APPS_MOUNT:=$APPS_PATH:$APPS_PATH}" + +## EFA settings +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +# export NCCL_ALGO=Ring +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_ENABLE_SHM_TRANSFER=1 +export FI_EFA_USE_HUGE_PAGE=0 +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +#export NCCL_SOCKET_IFNAME=ens +export NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO + +# variables for Enroot +declare -a ARGS=( + --container-image $IMAGE + --container-mounts ${FSX_MOUNT},${APPS_MOUNT} +) + +NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) +NODES_ARRAY=($NODES) +HEAD_NODE=${NODES_ARRAY[0]} +MASTER_ADDR=$(hostname --ip-address) +MASTER_PORT=$RANDOM +NNODES=$SLURM_JOB_NUM_NODES +NPROC=$SLURM_GPUS_PER_NODE +WORLD_SIZE=$(( $NNODES * $NPROC )) +SCRIPTPATH="/apps/awsome-distributed-training/3.test_cases/7.tensorflow-distributed" + +function run_worker() { + # TODO TF_CONFIG + srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" python /src/worker.py ${NODE_RANK} $( scontrol show hostnames $SLURM_JOB_NODELIST ) +} + + +echo "DUBUG" $NODES_ARRAY +# run the workers +NODE_RANK=1 +for (( NODE_RANK=1; NODE_RANK<${NNODES}; NODE_RANK++ )) +do + NODE=${NODES[$NODE_RANK]} + echo "Run worker node ${NODE} for rank: ${NODE_RANK}" + run_worker & +done +NODE_RANK=0 +NODE=${HEAD_NODE} +echo "Run main node ${NODE} for rank: ${NODE_RANK}" +run_worker +wait diff --git a/3.test_cases/7.tensorflow-distributed/Makefile b/3.test_cases/7.tensorflow-distributed/Makefile new file mode 100644 index 00000000..8916dc8a --- /dev/null +++ b/3.test_cases/7.tensorflow-distributed/Makefile @@ -0,0 +1,12 @@ +ENROOT_IMAGE=tensorflow + +all: build clean import + +build: + docker build -t ${ENROOT_IMAGE} -f 0.tensorflow.Dockerfile . + +clean: + -rm ${ENROOT_IMAGE}.sqsh + +import: + enroot import -o ${ENROOT_IMAGE}.sqsh dockerd://tensorflow:latest diff --git a/3.test_cases/7.tensorflow-distributed/README.md b/3.test_cases/7.tensorflow-distributed/README.md new file mode 100644 index 00000000..088aad01 --- /dev/null +++ b/3.test_cases/7.tensorflow-distributed/README.md @@ -0,0 +1,104 @@ +# Tensorflow MultiWorkerMirroredStrategy test case + +`MultiWorkerMirroredStrategy` in TensorFlow is a strategy designed for synchronous training across multiple workers, typically in a multi-node setup. This strategy is a part of TensorFlow's distributed training API. Consult the [official Tensorflow documention](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy) for more information. + +This project contains: + +* AWS optimized `tensorflow` container image. +* Slurm scripts for the distributed training. + +## 1. Preparation + +This guide assumes that you have the following: + +* A functional Slurm cluster on AWS. +* Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. +* An FSx for Lustre filesystem mounted on `/fsx`. + +We recommend that you setup a Slurm cluster using the templates in the architectures [directory](../../1.architectures). Before creating the Slurm cluster, you need to setup the following environment variables: + +```bash +export APPS_PATH=/apps +export ENROOT_IMAGE=$APPS_PATH/tensorflow.sqsh +export FSX_PATH=/fsx +export DATA_PATH=$FSX_PATH/mnist +export TEST_CASE_PATH=${HOME}/7.tensorflow-distributed # where you copy the test case or set to your test case path +cd $TEST_CASE_PATH +``` + +then follow the detailed instructions [here](../../1.architectures/2.aws-parallelcluster/README.md). + +## 2. Build the container + +Before running training jobs, you need to use an [Enroot](https://github.com/NVIDIA/enroot) container to retrieve and preprocess the input data. Below are the steps you need to follow: + +1. Copy the test case files to your cluster. You will need `0.tensorflow.Dockerfile`, +2. Build the Docker image with the command below in this directory. + + ```bash + docker build -t tensorflow -f 0.tensorflow.Dockerfile . + ``` + +3. Once the Docker image is built, you can check if it is present with `docker images`. You should see an output similar to this one: + + ```bash + REPOSITORY TAG IMAGE ID CREATED SIZE + tensorflow latest a94ca0003efb 23 minutes ago 15.3GB + ... + ``` + +4. Convert the Docker image to a squash file with the command below. + + ```bash + enroot import -o ${ENROOT_IMAGE} dockerd://tensorflow:latest + ``` + + The file will be stored in the `/apps` directory (default). The output should look as below. + + ```bash + [INFO] Fetching image + + 36a8c752c28a2db543d2a632a3fc1fcbd5789a6f3d45b9d3a24632420dedcfa8 + + [INFO] Extracting image content... + [INFO] Creating squashfs filesystem... + + Parallel mksquashfs: Using 32 processors + Creating 4.0 filesystem on /apps/llm-foundry.sqsh, block size 131072. + [========================================================================================================================================================================================================================-] 291068/291068 100% + + Exportable Squashfs 4.0 filesystem, gzip compressed, data block size 131072 + uncompressed data, uncompressed metadata, uncompressed fragments, uncompressed xattrs + duplicates are not removed + ... + ``` + +It will take around 5 minutes to convert the container image from Docker to the Enroot format. Once done proceed to the next stage. + +For ease of testing we've included a `Makefile` that automatically builds and imports the latest image. To run this, execute `make` or you can individually specify `make build` to build the Docker image, `make clean` to remove the squash file and `make import` to import the Dockerfile into enroot squash file. + +## 3. Run the train job + +Here, we will conduct simple NN against mnist dataset. + +1. Run a training job by submitting script `1.run-training.sbatch` to Slurm via `sbatch` as shown below. + ```bash + sbatch 1.run-training.sbatch + ``` + +2. When the training job completes successfully, it should produce a log output similar to the below in the `logs/` directory of `$TEST_CASE_PATH` + + ```console + ... + 56/70 [=======================>......] - ETA: 1s - loss: 4.0206 - accuracy: 1.0957 + 62/70 [=========================>....] - ETA: 0s - loss: 4.0104 - accuracy: 1.1046 + 62/70 [=========================>....] - ETA: 0s - loss: 4.0104 - accuracy: 1.1046 + 69/70 [============================>.] - ETA: 0s - loss: 3.9982 - accuracy: 1.1101 + 69/70 [============================>.] - ETA: 0s - loss: 3.9982 - accuracy: 1.1101 + 70/70 [==============================] - 6s 82ms/step - loss: 1.9969 - accuracy: 0.5576 + 70/70 [==============================] - 6s 82ms/step - loss: 1.9969 - accuracy: 0.5576 + ``` + +## 4. Authors / Reviewers + +* [A] Keita Watanabe - mlkeita@ diff --git a/3.test_cases/7.tensorflow-distributed/src/worker.py b/3.test_cases/7.tensorflow-distributed/src/worker.py new file mode 100644 index 00000000..bd6ed873 --- /dev/null +++ b/3.test_cases/7.tensorflow-distributed/src/worker.py @@ -0,0 +1,67 @@ +import os +import sys +import json + +import numpy as np +import tensorflow as tf + +"""Tensorflow distributed training example +This code is retrived from https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras +""" + +def mnist_dataset(batch_size): + (x_train, y_train), _ = tf.keras.datasets.mnist.load_data() + # The `x` arrays are in uint8 and have values in the [0, 255] range. + # You need to convert them to float32 with values in the [0, 1] range. + x_train = x_train / np.float32(255) + y_train = y_train.astype(np.int64) + train_dataset = tf.data.Dataset.from_tensor_slices( + (x_train, y_train)).shuffle(60000).repeat().batch(batch_size) + return train_dataset + +def build_and_compile_cnn_model(): + model = tf.keras.Sequential([ + tf.keras.layers.InputLayer(input_shape=(28, 28)), + tf.keras.layers.Reshape(target_shape=(28, 28, 1)), + tf.keras.layers.Conv2D(32, 3, activation='relu'), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(128, activation='relu'), + tf.keras.layers.Dense(10) + ]) + model.compile( + loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), + optimizer=tf.keras.optimizers.SGD(learning_rate=0.001), + metrics=['accuracy']) + return model + +def main(): + per_worker_batch_size = 64 + tf_config = json.loads(os.environ['TF_CONFIG']) + num_workers = len(tf_config['cluster']['worker']) + + strategy = tf.distribute.MultiWorkerMirroredStrategy() + + global_batch_size = per_worker_batch_size * num_workers + multi_worker_dataset = mnist_dataset(global_batch_size) + + with strategy.scope(): + # Model building/compiling need to be within `strategy.scope()`. + multi_worker_model = build_and_compile_cnn_model() + + + multi_worker_model.fit(multi_worker_dataset, epochs=3, steps_per_epoch=70) + +if __name__ == "__main__": + worker_rank = sys.argv[1] + workers = [*map(lambda x: f"{x}:12345", sys.argv[2:])] + os.environ["TF_CONFIG"] = json.dumps({ + 'cluster': { + 'worker': workers + }, + 'task': {'type': 'worker', 'index': worker_rank} + }) + + print("Hello from Python") + print(workers, worker_rank) + print(os.environ["TF_CONFIG"]) + main() \ No newline at end of file From f1c63e5547ca22978124a3a1bd93ece7dd4a75c9 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Tue, 14 Nov 2023 06:34:16 +0900 Subject: [PATCH 194/648] Update README.md add reviewer --- 3.test_cases/7.tensorflow-distributed/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/3.test_cases/7.tensorflow-distributed/README.md b/3.test_cases/7.tensorflow-distributed/README.md index 088aad01..892d446e 100644 --- a/3.test_cases/7.tensorflow-distributed/README.md +++ b/3.test_cases/7.tensorflow-distributed/README.md @@ -102,3 +102,4 @@ Here, we will conduct simple NN against mnist dataset. ## 4. Authors / Reviewers * [A] Keita Watanabe - mlkeita@ +* [R] Pierre-Yves Aquilanti - pierreya@ From 96aac32c9a64d2f797830d9099ab21b1e9936602 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Tue, 14 Nov 2023 10:47:16 +0900 Subject: [PATCH 195/648] Update README.md update support matrix --- README.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 6f4914d4..3b63c799 100644 --- a/README.md +++ b/README.md @@ -37,14 +37,15 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test cases | Slurm | EKS | AWS Batch | -| ------------------- | ----- | --- | ---------- | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher` | ✅ | ❌ | ❌ | -| `3.MPT` | ✅ | ❓ | ❓ | -| `4.DDP` | ✅ | ❓ | ❓ | -| `5.param-benchmark` | ✅ | ❓ | ❓ | - +| Test cases | Slurm | Kubernetes | AWS Batch | +| -------------------------- | ----- | ---------- | ---------- | +| `1.megatron-lm` | ✅ | ❓ | ❓ | +| `2.nemo-launcher` | ✅ | ❌ | ❌ | +| `3.MPT` | ✅ | ❓ | ❓ | +| `4.DDP` | ✅ | ❓ | ❓ | +| `5.param-benchmark` | ✅ | ❓ | ❓ | +| `6.stable-diffusion` | ✅ | ❓ | ❓ | +| `7.tensorflow-distributed` | ✅ | ❓ | ❓ | ## 4. Validation scripts @@ -76,4 +77,4 @@ Thanks to all the contributors for building, reviewing and testing. - Sean Smith - seaam@ - Jianying Lang - langjian@ - Maxime Hugues - maxhaws@ -- Dmitry Monakhov dmonakhov@ \ No newline at end of file +- Dmitry Monakhov dmonakhov@ From 5d14d657862edfb547d8699acfc4be6d2c929938 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 14 Nov 2023 20:39:24 +0800 Subject: [PATCH 196/648] Dockerfile template: bump efa-installer and nccl --- 2.ami_and_docker/Dockerfile | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/2.ami_and_docker/Dockerfile b/2.ami_and_docker/Dockerfile index f5a251e7..c385cf71 100644 --- a/2.ami_and_docker/Dockerfile +++ b/2.ami_and_docker/Dockerfile @@ -23,7 +23,8 @@ FROM nvcr.io/nvidia/pytorch:23.10-py3 ENV DEBIAN_FRONTEND=noninteractive # The three must-be-built packages. -ENV EFA_INSTALLER_VERSION=1.28.0 +# Efa-installer>=1.29.0 required for nccl>=2.19.0 to avoid libfabric NCCL error. +ENV EFA_INSTALLER_VERSION=1.29.0 ENV AWS_OFI_NCCL_VERSION=1.7.3-aws ENV NCCL_TESTS_VERSION=master @@ -76,8 +77,7 @@ ENV PATH=/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:$PATH # NCCL EFA plugin (aws-ofi-nccl) depends on mpi, hence we must rebuild openmpi before building the # aws-ofi-ccnl. #################################################################################################### -##ENV NCCL_VERSION=2.19.3-1 # libfabric gave NCCL error. Known issue on efa-installer<=1.28.0 -#ENV NCCL_VERSION=2.18.6-1 +#ENV NCCL_VERSION=2.19.3-1 #RUN cd /opt && \ # wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb && \ # dpkg -i cuda-keyring_1.0-1_all.deb && \ @@ -97,8 +97,7 @@ ENV PATH=/opt/amazon/efa/bin:/opt/amazon/openmpi/bin:$PATH # NCCL EFA plugin (aws-ofi-nccl) depends on mpi, hence we must rebuild openmpi before building the # aws-ofi-ccnl. #################################################################################################### -#ENV NCCL_VERSION=2.19.3-1 # libfabric gave NCCL error. Known issue on efa-installer<=1.28.0 -ENV NCCL_VERSION=2.18.6-1 +ENV NCCL_VERSION=2.19.3-1 RUN apt-get remove -y libnccl2 libnccl-dev \ && cd /tmp \ && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ From 6fa4882d56418bbac22a03cf5e5b13e6de6f8113 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Wed, 15 Nov 2023 12:43:02 -0800 Subject: [PATCH 197/648] Made changes requested by perifaws Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 8 -------- .../6.stable-diffusion/multi-node/1.Dockerfile | 10 ++++++---- .../6.stable-diffusion/multi-node/2.train.sbatch | 5 ----- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 80267ff0..0b957280 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -178,12 +178,4 @@ Now we can start training sbatch 2.train.sbatch ``` -### 2.1 Multi Node Results - -## 3. What's Next? -TODO: -1. Investigate why single node performance on A100 80 GB instances is sub-par. -2. Implement distributed training following original implementation of stable diffusion -3. Explore the impact of MosaicML's Exponential Moving Average implementation on training performance. -4. Test the impact of xFormers diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile index 393fe5e3..e8414e34 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile +++ b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile @@ -1,5 +1,7 @@ FROM nvcr.io/nvidia/pytorch:23.08-py3 +ARG MOSAICML_VERSION=0.15.0 + ARG EFA_INSTALLER_VERSION=latest ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws ARG NCCL_TESTS_VERSION=master @@ -37,8 +39,8 @@ RUN sed -i 's/[ #]\(.*StrictHostKeyChecking \).*/ \1no/g' /etc/ssh/ssh_config && ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:/opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/lib:$LD_LIBRARY_PATH ENV PATH /opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:/usr/bin:/usr/local/bin:$PATH RUN curl https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py \ - && python3 /tmp/get-pip.py \ - && pip3 install awscli pynvml +&& python3 /tmp/get-pip.py \ +&& pip3 install awscli pynvml ################################################# # Install NVIDIA GDRCopy @@ -93,7 +95,7 @@ RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ RUN git clone https://github.com/mosaicml/diffusion-benchmark.git RUN pip3 install -r diffusion-benchmark/requirements.txt -RUN pip3 install mosaicml==0.15.0 --force +RUN pip3 install mosaicml==${MOSAICML_VERSION} --force RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force RUN pip3 uninstall transformer-engine -y RUN pip3 install protobuf==3.20.3 @@ -101,4 +103,4 @@ RUN pip3 install protobuf==3.20.3 RUN rm -rf /var/lib/apt/lists/* ENV LD_PRELOAD /opt/nccl/build/lib/libnccl.so -WORKDIR /workspace/diffusion-benchmark \ No newline at end of file +WORKDIR /workspace/diffusion-benchmark diff --git a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch index 05d2372e..caba4a2e 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch +++ b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch @@ -45,11 +45,6 @@ WORLD_SIZE=$((NNODES * NPROC)) srun -l "${ARGS[@]}" python -c "import streaming; streaming.base.util.clean_stale_shared_memory()" function run_compose() { - # if [ ${NODE_RANK} -eq 0 ]; then - # OPTION="nodelist" - # else - # OPTION="exclude" - # fi srun --nodelist=${NODE} --ntasks=1 -l "${ARGS[@]}" composer \ --world_size ${WORLD_SIZE} \ --nproc ${NPROC} \ From 38f1e9f630cbf1425e25f2044dbdd2b1c788a6ce Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Wed, 15 Nov 2023 20:18:33 -0800 Subject: [PATCH 198/648] Reorganized files by numbers. Added versions aas ENV variables Signed-off-by: Ankur Srivastava --- .../{Dockerfile => 0.Dockerfile} | 4 +- .../860m_res_256_pretrain.yaml | 199 ------------------ 3.test_cases/8.nemo-multimodal/README.md | 48 +++-- .../1.config.yaml} | 0 .../{bcm.yaml => nemo_configs/2.bcm.yaml} | 0 .../3.download_multimodal.yaml} | 0 ...iffusion_860m_res_256_pretrain_hydra.yaml} | 0 7 files changed, 32 insertions(+), 219 deletions(-) rename 3.test_cases/8.nemo-multimodal/{Dockerfile => 0.Dockerfile} (97%) delete mode 100644 3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml rename 3.test_cases/8.nemo-multimodal/{config.yaml => nemo_configs/1.config.yaml} (100%) rename 3.test_cases/8.nemo-multimodal/{bcm.yaml => nemo_configs/2.bcm.yaml} (100%) rename 3.test_cases/8.nemo-multimodal/{download_multimodal.yaml => nemo_configs/3.download_multimodal.yaml} (100%) rename 3.test_cases/8.nemo-multimodal/{stable_diffusion_860m_res_256_pretrain_hydra.yaml => nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml} (100%) diff --git a/3.test_cases/8.nemo-multimodal/Dockerfile b/3.test_cases/8.nemo-multimodal/0.Dockerfile similarity index 97% rename from 3.test_cases/8.nemo-multimodal/Dockerfile rename to 3.test_cases/8.nemo-multimodal/0.Dockerfile index 95f9852d..910e8c36 100644 --- a/3.test_cases/8.nemo-multimodal/Dockerfile +++ b/3.test_cases/8.nemo-multimodal/0.Dockerfile @@ -1,4 +1,6 @@ -FROM nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 +ARG NEMO_MULTIMODAL_VERSION + +FROM nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:${NEMO_MULTIMODAL_VERSION}-py3 ARG EFA_INSTALLER_VERSION=latest ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws diff --git a/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml b/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml deleted file mode 100644 index f246160c..00000000 --- a/3.test_cases/8.nemo-multimodal/860m_res_256_pretrain.yaml +++ /dev/null @@ -1,199 +0,0 @@ -run: - name: stable_diffusion_860m_res_256_pretrain - results_dir: ${base_results_dir}/${.name} - time_limit: "2-00:00:00" - dependency: "singleton" - -name: stable-diffusion-train - -trainer: - devices: 8 - num_nodes: 1 - accelerator: gpu - precision: 16 - logger: False # logger provided by exp_manager - enable_checkpointing: False - replace_sampler_ddp: False - max_epochs: -1 # PTL default. In practice, max_steps will be reached first. - max_steps: 82500 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches - log_every_n_steps: 10 - accumulate_grad_batches: 1 # do not modify, grad acc is automatic for training megatron models - gradient_clip_val: 1.0 - benchmark: False - enable_model_summary: True - -exp_manager: - explicit_log_dir: ${training.run.results_dir}/results - exp_dir: null - name: nemo_stable_diffusion - create_wandb_logger: False - wandb_logger_kwargs: - project: stable-diffusion - group: nemo-sd - name: nemo_stable_diffusion - resume: True - create_checkpoint_callback: True - create_tensorboard_logger: True - checkpoint_callback_params: - every_n_train_steps: 1000 - every_n_epochs: 0 - monitor: reduced_train_loss - filename: 'nemo-stable-diffusion--{reduced_train_loss:.2f}-{step}-{consumed_samples}' - resume_if_exists: True - resume_ignore_no_checkpoint: True - ema: - enable: True - decay: 0.9999 - validate_original_weights: False - every_n_steps: 1 - cpu_offload: False - - -model: - precision: ${training.trainer.precision} - # specify micro_batch_size, global_batch_size, and model parallelism - # gradient accumulation will be done automatically based on data_parallel_size - micro_batch_size: 128 # limited by GPU memory - global_batch_size: 8192 # will use more micro batches to reach global batch size - - linear_start: 0.00085 - linear_end: 0.012 - num_timesteps_cond: 1 - log_every_t: 200 - timesteps: 1000 - first_stage_key: images - cond_stage_key: captions - image_size: 64 - channels: 4 - cond_stage_trainable: false - conditioning_key: crossattn # check - monitor: val/loss_simple_ema - scale_factor: 0.18215 - use_ema: False - scale_by_std: False - ckpt_path: - ignore_keys: [] - parameterization: eps - clip_denoised: True - load_only_unet: False - cosine_s: 8e-3 - given_betas: - original_elbo_weight: 0 - v_posterior: 0 - l_simple_weight: 1 - use_positional_encodings: False - learn_logvar: False - logvar_init: 0 - beta_schedule: linear - loss_type: l2 - - concat_mode: True - cond_stage_forward: - text_embedding_dropout_rate: 0 - fused_opt: True - inductor: True - inductor_cudagraphs: False - capture_cudagraph_iters: -1 # -1 to disable - channels_last: True - - unet_config: - _target_: nemo.collections.multimodal.modules.stable_diffusion.diffusionmodules.openaimodel.UNetModel - from_pretrained: - from_NeMo: True #Must be specified when from pretrained is not None, False means loading unet from HF ckpt - image_size: 32 # unused - in_channels: 4 - out_channels: 4 - model_channels: 320 - attention_resolutions: - - 4 - - 2 - - 1 - num_res_blocks: 2 - channel_mult: - - 1 - - 2 - - 4 - - 4 - num_heads: 8 - use_spatial_transformer: true - transformer_depth: 1 - context_dim: 768 - use_checkpoint: False - legacy: False - use_flash_attention: True - - first_stage_config: - _target_: nemo.collections.multimodal.models.stable_diffusion.ldm.autoencoder.AutoencoderKL - from_pretrained: /path/to/vae.bin - embed_dim: 4 - monitor: val/rec_loss - ddconfig: - double_z: true - z_channels: 4 - resolution: 256 #Never used - in_channels: 3 - out_ch: 3 - ch: 128 - ch_mult: - - 1 - - 2 - - 4 - - 4 - num_res_blocks: 2 - attn_resolutions: [] - dropout: 0.0 - lossconfig: - target: torch.nn.Identity - - cond_stage_config: - _target_: nemo.collections.multimodal.modules.stable_diffusion.encoders.modules.FrozenMegatronCLIPEmbedder - restore_from_path: /path/to/nemo_clip.nemo - device: cuda - freeze: True - layer: "last" - # For compatibility of history version that uses HF clip model - # _target_: nemo.collections.multimodal.modules.stable_diffusion.encoders.modules.FrozenCLIPEmbedder - # version: openai/clip-vit-large-patch14 - # device: cuda - # max_length: 77 - - # miscellaneous - seed: 666 - resume_from_checkpoint: null # manually set the checkpoint file to load from - apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this - gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) - - optim: - name: fused_adam - lr: 0.00008192 - weight_decay: 0. - betas: - - 0.9 - - 0.999 - sched: - name: WarmupHoldPolicy - warmup_steps: 10000 - hold_steps: 10000000000000 # Incredibly large value to hold the lr as constant - - # Nsys profiling options - nsys_profile: - enabled: False - start_step: 10 # Global batch to start profiling - end_step: 10 # Global batch to end profiling - ranks: [ 0 ] # Global rank IDs to profile - gen_shape: False # Generate model and kernel details including input shapes - - data: - num_workers: 16 - train: - dataset_path: - - /fsx/laion-art/wdinfo.pkl - augmentations: - resize_smallest_side: 256 - center_crop_h_w: 256, 256 - horizontal_flip: False - filterings: - - webdataset: - infinite_sampler: False - local_root_path: /fsx/laion-art/tarfiles_reorganized/task0001 # each tarfile in wdinfo is relative to this diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/8.nemo-multimodal/README.md index a2b4775b..891e9462 100644 --- a/3.test_cases/8.nemo-multimodal/README.md +++ b/3.test_cases/8.nemo-multimodal/README.md @@ -1,6 +1,6 @@ # Train Stable Diffusion with NeMo-Multimodal -This project provides a guide to run NemoMultimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal 23.05 supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion +This project provides a guide to run Nemo-Multimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal 23.05 supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion ## Prerequisites @@ -14,8 +14,9 @@ docker login nvcr.io Username: $oauthtoken Password: API_KEY +Please make note that the Username is exactly "$oauthtoken". -If you have createdyour cluster with DLAMI or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. +If you have created your cluster with DLAMI or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. ``` To install libnvidia-container cli: https://github.com/NVIDIA/libnvidia-container @@ -31,45 +32,53 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear && sudo apt-get install libnvidia-container1 \ && sudo apt-get install libnvidia-container-tools ``` +You can set the Nemo Multimodal version and others as environment variables: + +``` +export NEMO_MULTIMODAL_VERSION=23.05 +export TARGET_PATH=/apps/nemo-src # Must be a shared filesystem. This is where Nemo launcher scripts will reside. +export DOCKER_IMAGE_NAME=nemo-multimodal +export TAG=$NEMO_MULTIMODAL_VERSION +``` ## Pull Image ``` -docker pull nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 +docker pull nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:${NEMO_MULTIMODAL_VERSION}-py3 ``` ## Run container on Head Node +Once the above image is pulled, you can run the container on the head node like below. Here we are running the container just to be able to copy launcher scripts on the host machine. If you need to run the container on the compute nodes, you would need to add `--gpus all` flag to the run command. ``` - docker run -it --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:23.05-py3 bash + docker run -it --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:${NEMO_MULTIMODAL_VERSION}-py3 bash ``` ## Copy launcher scripts to host We need to copy NeMo launcher scripts to head node that we will use to submit multiple slurm jobs for downloading, preparing data and running training. Once the container is running, exit out of it and copy the launcher scripts like below: ``` -docker cp -a :/opt/NeMo-Megatron-Launcher/ ./nemo-src +docker cp -a :/opt/NeMo-Megatron-Launcher/ ${TARGET_PATH} ``` ## Build customized docker image +To get optimal performance of Nemo-Multimodal with EFA on P5 and P4de instances, we provide a customized [Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/Dockerfile) and we can build a image like below: ``` -docker build -t nemo-multimodal . +docker build --build-arg NEMO_MULTIMODAL_VERSION=23.05 -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile ``` ## Convert image - +Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in /apps. This step takes a few minutes. ``` -enroot import -o /apps/nemo-multimodal.sqsh dockerd://nemo-multimodal +enroot import -o /apps/${DOCKER_IMAGE_NAME}.sqsh dockerd://${DOCKER_IMAGE_NAME} ``` ## Create Conda env - +We need a conda environment that has the necessary dependencies for submitting multiple arrays of slurm jobs via [HYDRA](https://github.com/facebookresearch/hydra) which NeMo uses to configuring both NeMo models and the PyTorch Lightning Trainer. ``` -# Create conda env -# Create wget -O miniconda.sh "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh" \ && bash miniconda.sh -b -p /apps/.conda \ && /apps/.conda/bin/conda init bash @@ -85,33 +94,35 @@ pip3 install -r requirements.txt ## Submitting slurm jobs Next we will show how to submit slurm jobs for data-preparation and training. The NeMo config provides the following config files which we have modified: -1. config.yaml: NeMo config with information about different stages and environment variables -2. bcm.yaml: Cluster setup config -3. download_multimodal.yaml: Config to download and prepare data -4. 860m_res_256_pretrain.yaml: Config to pre-train stable diffusion model +1. `1.config.yaml`: NeMo config with information about different stages and environment variables +2. `2.bcm.yaml`: Cluster setup config +3. `3.download_multimodal.yaml`: Config to download and prepare data +4. `4.stable_diffusion_860m_res_256_pretrain.yaml`: Config to pre-train stable diffusion model You can run one or more stages like below: ``` -HYDRA_FULL_ERROR=1 python3 /apps/nemo-src/launcher_scripts/main.py +HYDRA_FULL_ERROR=1 python3 ${TARGET_PATH}/launcher_scripts/main.py ``` This will create separate folders for different slurm jobs and create folders with the relevant slurm submission script and config file. ## Download and prepare data We will use the popular [laion-art](https://huggingface.co/datasets/laion/laion-art) data for training the stable diffusion model which contains >8M images and their captions. Please review the [download_multimodal](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/download_multimodal.yaml) file which contains the following sections: -1. dataset_repo_id: laion/laion-art # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} +1. dataset_repo_id: `laion/laion-art` # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} 2. download_parquet: Downloads and paritions the parquet files and stores the partioned parquet files in `/fsx/laion-art-data/parquet/` 3. download_images: Uses [img2dataset](https://github.com/rom1504/img2dataset/tree/main) to download the images specified in the parquet files and store the raw data in `/fsx/laion-art-data/tarfiles_raw`. Each partitioned parquet file will run in an array of slurm jobs sequentially. 4. reorganize_tar: This section will reorganize the tar files and create new tarfiles with tar_chunk_size number of images stores in each tar file. Make sure `node_array_size` is set to 1, otherwise additional preprocessing will be needed to merge the tarfiles from the two tasks in one folder. The reorganized tarfiles will be stored in `/fsx/laion-art-data/tarfiles_reorganized`. 5. generate_wdinfo: This task will generate a pickle file with the necessary paths for the reorganized tarfiles. Make sure you are reading from reorganized tarfiles and not from precache_encodings which is included in the original version of NeMo 23.05. ## Run Distributed Training -Once the data is downloaded, the training job runs next. Make sure the trainer inputs such as `num_nodes` and number of gpus per node in `trainer.devices` is set correctly. Also, set `max_epochs` to -1 if training needs to run till max_steps have completed. The model by default will create a tensorboard events log, but wights and biases is not switched on by default. Also make sure the datasets path at the bottom point to the right paths for `wdinfo.pkl` and `tarfiles_reorganized`. +Once the data is downloaded, the training job runs next. Make sure the trainer inputs such as `num_nodes` and number of gpus per node in `trainer.devices` is set correctly. Also, set `max_epochs` to -1 if training needs to run till max_steps have completed. The model by default will create a tensorboard events log, but weights and biases is not switched on by default. Also make sure the datasets path at the bottom point to the right paths for `wdinfo.pkl` and `tarfiles_reorganized`. Once training starts you will see logs like: ``` +tail -f ${TARGET_PATH}/launcher_scripts/results/stable_diffusion/860m_res_256_pretrain/log-nemo-multimodal-stable_diffusion_860m_res_256_pretrain_xx.out + Epoch 0: 0%| | 1/605 [01:58<19:52:10, 118.43s/it, loss=1, v_num=, reduced_train_loss=1.000, lr=8.19e-9, global_step=1.000, consumed_samples=8192.0] Epoch 0: 0%| | 2/605 [02:02<10:14:49, 61.18s/it, loss=1, v_num=, reduced_train_loss=1.000, lr=8.19e-9, global_step=1.000, consumed_samples=8192.0] Epoch 0: 0%| | 2/605 [02:02<10:14:49, 61.18s/it, loss=1, v_num=, reduced_train_loss=1.000, lr=1.64e-8, global_step=2.000, consumed_samples=16384.0] @@ -120,4 +131,3 @@ Epoch 0: 0%| | 2/605 [02:02<10:14:49, 61.18s/it, loss=1, v_num=, redu - diff --git a/3.test_cases/8.nemo-multimodal/config.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/config.yaml rename to 3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml diff --git a/3.test_cases/8.nemo-multimodal/bcm.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/bcm.yaml rename to 3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml diff --git a/3.test_cases/8.nemo-multimodal/download_multimodal.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/download_multimodal.yaml rename to 3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml diff --git a/3.test_cases/8.nemo-multimodal/stable_diffusion_860m_res_256_pretrain_hydra.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/stable_diffusion_860m_res_256_pretrain_hydra.yaml rename to 3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml From 07e72814da8f527ba0e495a017dc63d3097cfae0 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 16 Nov 2023 09:19:08 +0900 Subject: [PATCH 199/648] add neuronx-nemo-megatron example --- .../1.convert-weight.sbatch | 10 ++ .../8.neuronx-nemo-megatron/2.tokenize.sh | 17 ++ .../3.precompile-model.sh | 5 + .../4.pretrain-model.sh | 5 + .../8.neuronx-nemo-megatron/README.md | 154 ++++++++++++++++++ neuronx-nemo-megatron | 1 + 6 files changed, 192 insertions(+) create mode 100644 3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch create mode 100644 3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/README.md create mode 160000 neuronx-nemo-megatron diff --git a/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch b/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch new file mode 100644 index 00000000..03e70976 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch @@ -0,0 +1,10 @@ +#!/bin/bash +#SBATCH --exclusive +#SBATCH --output=slurm-%x-%j.out +#SBATCH --cpus-per-task 96 +#SBATCH --nodes 1 + + +source ~/aws_neuron_venv_pytorch/bin/activate +python /home/ec2-user/aws_neuron_venv_pytorch/lib/python3.8/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py \ + --input_dir /fsx/Llama2-meta --model_size 7B --output_dir /fsx/Llama2-7b-hf diff --git a/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh b/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh new file mode 100644 index 00000000..f690fe26 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh @@ -0,0 +1,17 @@ +#!/bin/bash +#SBATCH --exclusive +#SBATCH --output=slurm-%x-%j.out +#SBATCH --cpus-per-task 96 +#SBATCH --nodes 1 + +source ~/aws_neuron_venv_pytorch/bin/activate +python /home/ec2-user/neuronx-nemo-megatron/nemo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=/fsx/data/books/book.jsonl \ + --json-keys=text \ + --tokenizer-library=huggingface \ + --tokenizer-type=/fsx/Llama2-7b-hf \ + --dataset-impl=mmap \ + --output-prefix=/fsx/data/books/book-tokenized \ + --append-eod \ + --need-pad-id \ + --workers=32 diff --git a/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh b/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh new file mode 100644 index 00000000..f8652ab3 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +source ~/aws_neuron_venv_pytorch/bin/activate +sbatch --nodes 4 compile.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh b/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh new file mode 100644 index 00000000..d254082a --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +source ~/aws_neuron_venv_pytorch/bin/activate +sbatch --nodes 4 run.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/README.md b/3.test_cases/8.neuronx-nemo-megatron/README.md new file mode 100644 index 00000000..3a85831b --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/README.md @@ -0,0 +1,154 @@ +# NeuronX Nemo Megatron Llama2 trainining on Trn1 Test Case + +[AWS Neuron Reference for NeMo Megatron](https://github.com/aws-neuron/neuronx-nemo-megatron/tree/main)(`neuronx-nemo-megatron`) is a modified versions of open-source packages [Nemo](https://github.com/NVIDIA/NeMo) and [Apex](https://github.com/NVIDIA/apex) that have been adapted for use with [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/) and [Amazon EC2 Trn1 instance](https://aws.amazon.com/ec2/instance-types/trn1/). This test case describes how to run Llama2 training on Slurm with Trn1 instances. + +## 1. Preparation + +This guide assumes that you have the following: +* A functional Slurm cluster on AWS. +* Neuron SDK and Torch-neuronx installed. +* An FSx for Lustre filesystem mounted on `/fsx`. +* `torch-neuronx` environment set up as virtual environment as `aws_neuron_venv_pytorch`. See [NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/setup/neuron-setup/pytorch/neuronx/ubuntu/torch-neuronx-ubuntu20.html#setup-torch-neuronx-ubuntu20) for the setup. +* `neuronx-nemo-megatron` cloned on home directory of the slurm headnode (`cd ~ && git clone https://github.com/aws-neuron/neuronx-nemo-megatron.git`). + +We recommend that you setup a Slurm cluster using the template in the architectures directory. + +## 1. Prepare Llama2 model + +This test case requires Llama2 model, which governed by the Meta license and must be downloaded and converted to the standard [Hugging Face](https://huggingface.co/) format prior to running this sample. +You can submit access request from [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/), we need "Llama 2 & Llama Chat" to be checked. Use the [download.sh](https://github.com/facebookresearch/llama/blob/main/download.sh) in the official repository. You will be asked to input an URL from the email you recieve from meta. + +We will assume that you had placed the model and tokenizer as follows on cluster: + +``` +/fsx/Llama2-meta/ +├── 7B/ +│ ├── checklist.chk +│ ├── consolidated.00.pth +│ └── params.json +├── tokenizer.model +└── tokenizer_checklist.chk +``` + +To convert the model to the standard Hugging Face format, the following script in transformers can be called with the following (example) command: + +``` +sbatch 1.convert-weight.sbatch +``` + +Note: For the purposes of this sample we assume you have saved the Llama-2-7b model in a directory called `Llama2-7b-hf` with the following format: + +``` +/fsx/Llama2-7b-hf/ +├── config.json +├── generation_config.json +├── pytorch_model-00001-of-00002.bin +├── pytorch_model-00002-of-00002.bin +├── pytorch_model.bin.index.json +├── special_tokens_map.json +├── tokenizer.json +├── tokenizer.model +└── tokenizer_config.json +``` + +## 2. Download and Tokenize dataset +This tutorial makes use of a [Red pyjama dataset](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T). The dataset can be downloaded to your cluster by running the following commands on the head node: + +``` +mkdir -p /fsx/data/llama2 +wget https://data.together.xyz/redpajama-data-1T/v1.0.0/book/book.jsonl # Note: Dataset download is 50G and will take approximately 3-4 hours to download. +or +wget https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample/resolve/main/book_sample.jsonl -O /fsx/data/llama2/book.jsonl +``` + +Once you have the Tokenizer and the dataset. You can tokenize the dataset following the below command: +``` +sbatch 2.tokenize.sbatch +``` + +Post tokenizing the dataset, you will have a path to the tokenizer and the dataset which will be used for pretraining. + +## Llama2 training configurations +We tested with the following model sizes: 7B +### Llama2 7B + +- Model configuration + - Attention heads: 32 + - Layers: 32 + - Sequence length: 4096 + - Hidden size: 4096 + - Hidden FFN size: 11008 + - Microbatch size: 1 + - Global batch size: 256 + +- Distributed training configuration + - Number of nodes: 4 + - Tensor parallel degree: 8 + - Pipeline parallel degree: 1 + - Data parallel degree: 16 + +## Pre-compile the model +By default, PyTorch Neuron uses a just in time (JIT) compilation flow that sequentially compiles all of the neural network compute graphs as they are encountered during a training job. The compiled graphs are cached in a local compiler cache so that subsequent training jobs can leverage the compiled graphs and avoid compilation (so long as the graph signatures and Neuron version have not changed). + +An alternative to the JIT flow is to use the included [neuron_parallel_compile](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/frameworks/torch/torch-neuronx/api-reference-guide/training/pytorch-neuron-parallel-compile.html?highlight=neuron_parallel_compile) command to perform ahead of time (AOT) compilation. In the AOT compilation flow, the compute graphs are first identified and extracted during a short simulated training run, and the extracted graphs are then compiled and cached using parallel compilation, which is considerably faster than the JIT flow. + +Before starting the compilation you need to update your path to the dataset and tokenizer in the llama_7b script as below : + +``` +cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +vi llama_7b.sh +``` +Update the below lines to +``` +# For tokenizer +model.tokenizer.type='/fsx/Llama2-7b-hf' \ + +# For Dataset +model.data.data_prefix=[1.0,/fsx/data/books/book.jsonl-processed_text_document] \ +``` + +Run the following command to launch an AOT pre-compilation job on your ParallelCluster: +``` +bash 3.precompile-model.sh +``` + +Once you have launched the precompilation job, run the `squeue` command to view the SLURM job queue on your cluster. If you have not recently run a job on your cluster, it may take 4-5 minutes for the requested trn1.32xlarge nodes to be launched and initialized. Once the job is running, `squeue` should show output similar to the following: +``` + JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + 10 compute1 compile.slurm ubuntu R 5:11 4 compute1-dy-queue1-i1-[1-4] +``` + +You can view the output of the precompilation job by examining the file named `slurm-compile.slurm-ZZ.out` where ZZ represents the JOBID of your job in the `squeue` output, above. Ex: +``` +tail -f slurm-compile.slurm-10.out +``` + +Once the precompilation job is complete, you should see a message similar to the following in the logs: +``` +2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total graphs: 22 +2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total successful compilations: 22 +2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total failed compilations: 0 +``` + +At this point, you can press `CTRL-C` to exit the tail command. + +## Launch a pretraining job + +Submit the training job + +``` +bash 4.pretrain-model.sh +``` + + +As outlined above, you can again use the `squeue` command to view the job queue. Once you see that your pretraining job is running, you can view the output of the training job by examining the file named `slurm-run.slurm-ZZ.out` where ZZ represents the JOBID of your job: +``` +tail -f slurm-run.slurm-11.out +``` + +Once the model is loaded onto the Trainium accelerators and training has commenced, you will begin to see output indicating the job progress: +``` +Epoch 0: 22%|██▏ | 4499/20101 [22:26:14<77:48:37, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] +Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] +Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.44, v_num=5563, reduced_train_loss=2.450, gradient_norm=0.120, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.50] +``` diff --git a/neuronx-nemo-megatron b/neuronx-nemo-megatron new file mode 160000 index 00000000..6038bb80 --- /dev/null +++ b/neuronx-nemo-megatron @@ -0,0 +1 @@ +Subproject commit 6038bb80b5ae59d3f4a99ed38a4f68fa0e22665a From 1d581889a5712002d0a3f5fb60d36c2df683410a Mon Sep 17 00:00:00 2001 From: Ankur Srivastava <101727556+awsankur@users.noreply.github.com> Date: Wed, 15 Nov 2023 20:39:13 -0800 Subject: [PATCH 200/648] Update README.md --- 3.test_cases/8.nemo-multimodal/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/8.nemo-multimodal/README.md index 891e9462..d49e3908 100644 --- a/3.test_cases/8.nemo-multimodal/README.md +++ b/3.test_cases/8.nemo-multimodal/README.md @@ -66,7 +66,7 @@ docker cp -a :/opt/NeMo-Megatron-Launcher/ ${TARGET_PATH} To get optimal performance of Nemo-Multimodal with EFA on P5 and P4de instances, we provide a customized [Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/Dockerfile) and we can build a image like below: ``` -docker build --build-arg NEMO_MULTIMODAL_VERSION=23.05 -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile +docker build --build-arg NEMO_MULTIMODAL_VERSION=23.05 -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . ``` ## Convert image From 0dc218bc1675d3df54e3e2a31fbca8aab73192d5 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 16 Nov 2023 15:21:04 +0900 Subject: [PATCH 201/648] Revert "Add neuronx-nemo-megatron training usecase" --- .../1.convert-weight.sbatch | 10 -- .../8.neuronx-nemo-megatron/2.tokenize.sh | 17 -- .../3.precompile-model.sh | 5 - .../4.pretrain-model.sh | 5 - .../8.neuronx-nemo-megatron/README.md | 154 ------------------ neuronx-nemo-megatron | 1 - 6 files changed, 192 deletions(-) delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/README.md delete mode 160000 neuronx-nemo-megatron diff --git a/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch b/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch deleted file mode 100644 index 03e70976..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -#SBATCH --exclusive -#SBATCH --output=slurm-%x-%j.out -#SBATCH --cpus-per-task 96 -#SBATCH --nodes 1 - - -source ~/aws_neuron_venv_pytorch/bin/activate -python /home/ec2-user/aws_neuron_venv_pytorch/lib/python3.8/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py \ - --input_dir /fsx/Llama2-meta --model_size 7B --output_dir /fsx/Llama2-7b-hf diff --git a/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh b/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh deleted file mode 100644 index f690fe26..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -#SBATCH --exclusive -#SBATCH --output=slurm-%x-%j.out -#SBATCH --cpus-per-task 96 -#SBATCH --nodes 1 - -source ~/aws_neuron_venv_pytorch/bin/activate -python /home/ec2-user/neuronx-nemo-megatron/nemo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ - --input=/fsx/data/books/book.jsonl \ - --json-keys=text \ - --tokenizer-library=huggingface \ - --tokenizer-type=/fsx/Llama2-7b-hf \ - --dataset-impl=mmap \ - --output-prefix=/fsx/data/books/book-tokenized \ - --append-eod \ - --need-pad-id \ - --workers=32 diff --git a/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh b/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh deleted file mode 100644 index f8652ab3..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling -source ~/aws_neuron_venv_pytorch/bin/activate -sbatch --nodes 4 compile.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh b/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh deleted file mode 100644 index d254082a..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling -source ~/aws_neuron_venv_pytorch/bin/activate -sbatch --nodes 4 run.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/README.md b/3.test_cases/8.neuronx-nemo-megatron/README.md deleted file mode 100644 index 3a85831b..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# NeuronX Nemo Megatron Llama2 trainining on Trn1 Test Case - -[AWS Neuron Reference for NeMo Megatron](https://github.com/aws-neuron/neuronx-nemo-megatron/tree/main)(`neuronx-nemo-megatron`) is a modified versions of open-source packages [Nemo](https://github.com/NVIDIA/NeMo) and [Apex](https://github.com/NVIDIA/apex) that have been adapted for use with [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/) and [Amazon EC2 Trn1 instance](https://aws.amazon.com/ec2/instance-types/trn1/). This test case describes how to run Llama2 training on Slurm with Trn1 instances. - -## 1. Preparation - -This guide assumes that you have the following: -* A functional Slurm cluster on AWS. -* Neuron SDK and Torch-neuronx installed. -* An FSx for Lustre filesystem mounted on `/fsx`. -* `torch-neuronx` environment set up as virtual environment as `aws_neuron_venv_pytorch`. See [NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/setup/neuron-setup/pytorch/neuronx/ubuntu/torch-neuronx-ubuntu20.html#setup-torch-neuronx-ubuntu20) for the setup. -* `neuronx-nemo-megatron` cloned on home directory of the slurm headnode (`cd ~ && git clone https://github.com/aws-neuron/neuronx-nemo-megatron.git`). - -We recommend that you setup a Slurm cluster using the template in the architectures directory. - -## 1. Prepare Llama2 model - -This test case requires Llama2 model, which governed by the Meta license and must be downloaded and converted to the standard [Hugging Face](https://huggingface.co/) format prior to running this sample. -You can submit access request from [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/), we need "Llama 2 & Llama Chat" to be checked. Use the [download.sh](https://github.com/facebookresearch/llama/blob/main/download.sh) in the official repository. You will be asked to input an URL from the email you recieve from meta. - -We will assume that you had placed the model and tokenizer as follows on cluster: - -``` -/fsx/Llama2-meta/ -├── 7B/ -│ ├── checklist.chk -│ ├── consolidated.00.pth -│ └── params.json -├── tokenizer.model -└── tokenizer_checklist.chk -``` - -To convert the model to the standard Hugging Face format, the following script in transformers can be called with the following (example) command: - -``` -sbatch 1.convert-weight.sbatch -``` - -Note: For the purposes of this sample we assume you have saved the Llama-2-7b model in a directory called `Llama2-7b-hf` with the following format: - -``` -/fsx/Llama2-7b-hf/ -├── config.json -├── generation_config.json -├── pytorch_model-00001-of-00002.bin -├── pytorch_model-00002-of-00002.bin -├── pytorch_model.bin.index.json -├── special_tokens_map.json -├── tokenizer.json -├── tokenizer.model -└── tokenizer_config.json -``` - -## 2. Download and Tokenize dataset -This tutorial makes use of a [Red pyjama dataset](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T). The dataset can be downloaded to your cluster by running the following commands on the head node: - -``` -mkdir -p /fsx/data/llama2 -wget https://data.together.xyz/redpajama-data-1T/v1.0.0/book/book.jsonl # Note: Dataset download is 50G and will take approximately 3-4 hours to download. -or -wget https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample/resolve/main/book_sample.jsonl -O /fsx/data/llama2/book.jsonl -``` - -Once you have the Tokenizer and the dataset. You can tokenize the dataset following the below command: -``` -sbatch 2.tokenize.sbatch -``` - -Post tokenizing the dataset, you will have a path to the tokenizer and the dataset which will be used for pretraining. - -## Llama2 training configurations -We tested with the following model sizes: 7B -### Llama2 7B - -- Model configuration - - Attention heads: 32 - - Layers: 32 - - Sequence length: 4096 - - Hidden size: 4096 - - Hidden FFN size: 11008 - - Microbatch size: 1 - - Global batch size: 256 - -- Distributed training configuration - - Number of nodes: 4 - - Tensor parallel degree: 8 - - Pipeline parallel degree: 1 - - Data parallel degree: 16 - -## Pre-compile the model -By default, PyTorch Neuron uses a just in time (JIT) compilation flow that sequentially compiles all of the neural network compute graphs as they are encountered during a training job. The compiled graphs are cached in a local compiler cache so that subsequent training jobs can leverage the compiled graphs and avoid compilation (so long as the graph signatures and Neuron version have not changed). - -An alternative to the JIT flow is to use the included [neuron_parallel_compile](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/frameworks/torch/torch-neuronx/api-reference-guide/training/pytorch-neuron-parallel-compile.html?highlight=neuron_parallel_compile) command to perform ahead of time (AOT) compilation. In the AOT compilation flow, the compute graphs are first identified and extracted during a short simulated training run, and the extracted graphs are then compiled and cached using parallel compilation, which is considerably faster than the JIT flow. - -Before starting the compilation you need to update your path to the dataset and tokenizer in the llama_7b script as below : - -``` -cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling -vi llama_7b.sh -``` -Update the below lines to -``` -# For tokenizer -model.tokenizer.type='/fsx/Llama2-7b-hf' \ - -# For Dataset -model.data.data_prefix=[1.0,/fsx/data/books/book.jsonl-processed_text_document] \ -``` - -Run the following command to launch an AOT pre-compilation job on your ParallelCluster: -``` -bash 3.precompile-model.sh -``` - -Once you have launched the precompilation job, run the `squeue` command to view the SLURM job queue on your cluster. If you have not recently run a job on your cluster, it may take 4-5 minutes for the requested trn1.32xlarge nodes to be launched and initialized. Once the job is running, `squeue` should show output similar to the following: -``` - JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - 10 compute1 compile.slurm ubuntu R 5:11 4 compute1-dy-queue1-i1-[1-4] -``` - -You can view the output of the precompilation job by examining the file named `slurm-compile.slurm-ZZ.out` where ZZ represents the JOBID of your job in the `squeue` output, above. Ex: -``` -tail -f slurm-compile.slurm-10.out -``` - -Once the precompilation job is complete, you should see a message similar to the following in the logs: -``` -2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total graphs: 22 -2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total successful compilations: 22 -2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total failed compilations: 0 -``` - -At this point, you can press `CTRL-C` to exit the tail command. - -## Launch a pretraining job - -Submit the training job - -``` -bash 4.pretrain-model.sh -``` - - -As outlined above, you can again use the `squeue` command to view the job queue. Once you see that your pretraining job is running, you can view the output of the training job by examining the file named `slurm-run.slurm-ZZ.out` where ZZ represents the JOBID of your job: -``` -tail -f slurm-run.slurm-11.out -``` - -Once the model is loaded onto the Trainium accelerators and training has commenced, you will begin to see output indicating the job progress: -``` -Epoch 0: 22%|██▏ | 4499/20101 [22:26:14<77:48:37, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] -Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] -Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.44, v_num=5563, reduced_train_loss=2.450, gradient_norm=0.120, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.50] -``` diff --git a/neuronx-nemo-megatron b/neuronx-nemo-megatron deleted file mode 160000 index 6038bb80..00000000 --- a/neuronx-nemo-megatron +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6038bb80b5ae59d3f4a99ed38a4f68fa0e22665a From 6c65be2750b5d5a0874c5f8dc41f8081e42f4d51 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 16 Nov 2023 09:19:08 +0900 Subject: [PATCH 202/648] add neuronx-nemo-megatron example --- .../1.convert-weight.sbatch | 10 ++ .../8.neuronx-nemo-megatron/2.tokenize.sh | 17 ++ .../3.precompile-model.sh | 5 + .../4.pretrain-model.sh | 5 + .../8.neuronx-nemo-megatron/README.md | 154 ++++++++++++++++++ 5 files changed, 191 insertions(+) create mode 100644 3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch create mode 100644 3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/README.md diff --git a/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch b/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch new file mode 100644 index 00000000..03e70976 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch @@ -0,0 +1,10 @@ +#!/bin/bash +#SBATCH --exclusive +#SBATCH --output=slurm-%x-%j.out +#SBATCH --cpus-per-task 96 +#SBATCH --nodes 1 + + +source ~/aws_neuron_venv_pytorch/bin/activate +python /home/ec2-user/aws_neuron_venv_pytorch/lib/python3.8/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py \ + --input_dir /fsx/Llama2-meta --model_size 7B --output_dir /fsx/Llama2-7b-hf diff --git a/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh b/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh new file mode 100644 index 00000000..f690fe26 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh @@ -0,0 +1,17 @@ +#!/bin/bash +#SBATCH --exclusive +#SBATCH --output=slurm-%x-%j.out +#SBATCH --cpus-per-task 96 +#SBATCH --nodes 1 + +source ~/aws_neuron_venv_pytorch/bin/activate +python /home/ec2-user/neuronx-nemo-megatron/nemo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=/fsx/data/books/book.jsonl \ + --json-keys=text \ + --tokenizer-library=huggingface \ + --tokenizer-type=/fsx/Llama2-7b-hf \ + --dataset-impl=mmap \ + --output-prefix=/fsx/data/books/book-tokenized \ + --append-eod \ + --need-pad-id \ + --workers=32 diff --git a/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh b/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh new file mode 100644 index 00000000..f8652ab3 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +source ~/aws_neuron_venv_pytorch/bin/activate +sbatch --nodes 4 compile.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh b/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh new file mode 100644 index 00000000..d254082a --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +source ~/aws_neuron_venv_pytorch/bin/activate +sbatch --nodes 4 run.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/README.md b/3.test_cases/8.neuronx-nemo-megatron/README.md new file mode 100644 index 00000000..3a85831b --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/README.md @@ -0,0 +1,154 @@ +# NeuronX Nemo Megatron Llama2 trainining on Trn1 Test Case + +[AWS Neuron Reference for NeMo Megatron](https://github.com/aws-neuron/neuronx-nemo-megatron/tree/main)(`neuronx-nemo-megatron`) is a modified versions of open-source packages [Nemo](https://github.com/NVIDIA/NeMo) and [Apex](https://github.com/NVIDIA/apex) that have been adapted for use with [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/) and [Amazon EC2 Trn1 instance](https://aws.amazon.com/ec2/instance-types/trn1/). This test case describes how to run Llama2 training on Slurm with Trn1 instances. + +## 1. Preparation + +This guide assumes that you have the following: +* A functional Slurm cluster on AWS. +* Neuron SDK and Torch-neuronx installed. +* An FSx for Lustre filesystem mounted on `/fsx`. +* `torch-neuronx` environment set up as virtual environment as `aws_neuron_venv_pytorch`. See [NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/setup/neuron-setup/pytorch/neuronx/ubuntu/torch-neuronx-ubuntu20.html#setup-torch-neuronx-ubuntu20) for the setup. +* `neuronx-nemo-megatron` cloned on home directory of the slurm headnode (`cd ~ && git clone https://github.com/aws-neuron/neuronx-nemo-megatron.git`). + +We recommend that you setup a Slurm cluster using the template in the architectures directory. + +## 1. Prepare Llama2 model + +This test case requires Llama2 model, which governed by the Meta license and must be downloaded and converted to the standard [Hugging Face](https://huggingface.co/) format prior to running this sample. +You can submit access request from [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/), we need "Llama 2 & Llama Chat" to be checked. Use the [download.sh](https://github.com/facebookresearch/llama/blob/main/download.sh) in the official repository. You will be asked to input an URL from the email you recieve from meta. + +We will assume that you had placed the model and tokenizer as follows on cluster: + +``` +/fsx/Llama2-meta/ +├── 7B/ +│ ├── checklist.chk +│ ├── consolidated.00.pth +│ └── params.json +├── tokenizer.model +└── tokenizer_checklist.chk +``` + +To convert the model to the standard Hugging Face format, the following script in transformers can be called with the following (example) command: + +``` +sbatch 1.convert-weight.sbatch +``` + +Note: For the purposes of this sample we assume you have saved the Llama-2-7b model in a directory called `Llama2-7b-hf` with the following format: + +``` +/fsx/Llama2-7b-hf/ +├── config.json +├── generation_config.json +├── pytorch_model-00001-of-00002.bin +├── pytorch_model-00002-of-00002.bin +├── pytorch_model.bin.index.json +├── special_tokens_map.json +├── tokenizer.json +├── tokenizer.model +└── tokenizer_config.json +``` + +## 2. Download and Tokenize dataset +This tutorial makes use of a [Red pyjama dataset](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T). The dataset can be downloaded to your cluster by running the following commands on the head node: + +``` +mkdir -p /fsx/data/llama2 +wget https://data.together.xyz/redpajama-data-1T/v1.0.0/book/book.jsonl # Note: Dataset download is 50G and will take approximately 3-4 hours to download. +or +wget https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample/resolve/main/book_sample.jsonl -O /fsx/data/llama2/book.jsonl +``` + +Once you have the Tokenizer and the dataset. You can tokenize the dataset following the below command: +``` +sbatch 2.tokenize.sbatch +``` + +Post tokenizing the dataset, you will have a path to the tokenizer and the dataset which will be used for pretraining. + +## Llama2 training configurations +We tested with the following model sizes: 7B +### Llama2 7B + +- Model configuration + - Attention heads: 32 + - Layers: 32 + - Sequence length: 4096 + - Hidden size: 4096 + - Hidden FFN size: 11008 + - Microbatch size: 1 + - Global batch size: 256 + +- Distributed training configuration + - Number of nodes: 4 + - Tensor parallel degree: 8 + - Pipeline parallel degree: 1 + - Data parallel degree: 16 + +## Pre-compile the model +By default, PyTorch Neuron uses a just in time (JIT) compilation flow that sequentially compiles all of the neural network compute graphs as they are encountered during a training job. The compiled graphs are cached in a local compiler cache so that subsequent training jobs can leverage the compiled graphs and avoid compilation (so long as the graph signatures and Neuron version have not changed). + +An alternative to the JIT flow is to use the included [neuron_parallel_compile](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/frameworks/torch/torch-neuronx/api-reference-guide/training/pytorch-neuron-parallel-compile.html?highlight=neuron_parallel_compile) command to perform ahead of time (AOT) compilation. In the AOT compilation flow, the compute graphs are first identified and extracted during a short simulated training run, and the extracted graphs are then compiled and cached using parallel compilation, which is considerably faster than the JIT flow. + +Before starting the compilation you need to update your path to the dataset and tokenizer in the llama_7b script as below : + +``` +cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +vi llama_7b.sh +``` +Update the below lines to +``` +# For tokenizer +model.tokenizer.type='/fsx/Llama2-7b-hf' \ + +# For Dataset +model.data.data_prefix=[1.0,/fsx/data/books/book.jsonl-processed_text_document] \ +``` + +Run the following command to launch an AOT pre-compilation job on your ParallelCluster: +``` +bash 3.precompile-model.sh +``` + +Once you have launched the precompilation job, run the `squeue` command to view the SLURM job queue on your cluster. If you have not recently run a job on your cluster, it may take 4-5 minutes for the requested trn1.32xlarge nodes to be launched and initialized. Once the job is running, `squeue` should show output similar to the following: +``` + JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + 10 compute1 compile.slurm ubuntu R 5:11 4 compute1-dy-queue1-i1-[1-4] +``` + +You can view the output of the precompilation job by examining the file named `slurm-compile.slurm-ZZ.out` where ZZ represents the JOBID of your job in the `squeue` output, above. Ex: +``` +tail -f slurm-compile.slurm-10.out +``` + +Once the precompilation job is complete, you should see a message similar to the following in the logs: +``` +2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total graphs: 22 +2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total successful compilations: 22 +2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total failed compilations: 0 +``` + +At this point, you can press `CTRL-C` to exit the tail command. + +## Launch a pretraining job + +Submit the training job + +``` +bash 4.pretrain-model.sh +``` + + +As outlined above, you can again use the `squeue` command to view the job queue. Once you see that your pretraining job is running, you can view the output of the training job by examining the file named `slurm-run.slurm-ZZ.out` where ZZ represents the JOBID of your job: +``` +tail -f slurm-run.slurm-11.out +``` + +Once the model is loaded onto the Trainium accelerators and training has commenced, you will begin to see output indicating the job progress: +``` +Epoch 0: 22%|██▏ | 4499/20101 [22:26:14<77:48:37, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] +Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] +Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.44, v_num=5563, reduced_train_loss=2.450, gradient_norm=0.120, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.50] +``` From 8fbcc193f8cf8b11671474de7b1ca0dfaf41f38f Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 16 Nov 2023 13:01:12 -0800 Subject: [PATCH 203/648] Updated README Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 62 +++++++++++++++++------ 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 0b957280..238c7164 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -1,7 +1,5 @@ # Stable Diffusion Test Case -DISCLAIMER: The scripts presented in this test case serve as a working examples and are not optimized for performances. - We will follow MosaicML's stable diffusion benchmarking scripts provided [here](https://github.com/mosaicml/diffusion-benchmark/tree/main). It uses the `'stabilityai/stable-diffusion-2-base'` model. You can check the number of parameters by executing: ```bash @@ -9,10 +7,21 @@ python3 calculate_number_of_parameters.py Model has 1289.952427 M parameters and 865.910724 M trainable_params ``` +To simplify testing, we have separate scripts for Single node and Multi node Distributed Training. We will also present a comparison of throughput (images/second) achieved with P4de (A100 80GB) and P5 (H100 80GB) instances. + +You can export the following environment variables: + +``` +export CUDA_VERSION=12.1 +export MOSAICML_VERSION=0.15.0 +export PYTORCH_INDEX_URL=https://download.pytorch.org/whl/nightly/cu121 +export PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:23.08-py3 +export DOCKER_IMAGE_NAME=mosaicml-stable-diffusion +export TAG=$MOSAICML_VERSION +``` -Just for simplifaction of testing, we have separate scripts for Single node and Multi node Distributed Training. We will also present a comparison of throughput (images/second) achieved with P4de (A100 80GB) and P5 (H100 80GB) instances. -## 0. Conda and Docker +## 0. Create Conda Environment Make sure you are able to create conda environments and docker containers. For example, to install Miniconda, please follow the steps below: @@ -35,6 +44,24 @@ elif [[ "$os" == "ubuntu" ]]; then else echo "Unknown OS: $os" fi + +conda create -n pt-nightlies python=3.10 + +conda activate pt-nightlies + +# Install PyTorch Nightly distribution with specified Cuda version +pip3 install --pre torch torchvision torchaudio --index-url ${PYTORCH_INDEX_URL} + +# Install Diffusers and Transformers +pip3 install diffusers["torch"] transformers + +# Install Weights and Biases +pip3 install wandb + +# We will install Composer from source. First clone the Repo +git clone https://github.com/mosaicml/composer.git + + ``` ## 1 Single Node Training @@ -68,15 +95,13 @@ Once this change is done, you can install composer as `pip3 install -e .` The `single-node` folder also has the Dockerfile and a Makefile with commands to build the image and run the container. ```bash +cd awsome-distributed-training/3.test_cases/6.stable-diffusion/single-node # build the image -docker build -t mosaicml-stable-diffusion . -# or you can do -# make build +docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} PYTORCH_IMAGE=${PYTORCH_IMAGE} PYTORCH_INDEX_URL=${PYTORCH_INDEX_URL} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . + +# run container +docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it ${DOCKER_IMAGE_NAME} /bin/bash -# run it -docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it mosaicml-stable-diffusion /bin/bash -# or you can do -# make run ``` ### 1.2 Single Node Training @@ -142,9 +167,9 @@ from diffusers.models.attention_processor import AttnProcessor self.unet = UNet2DConditionModel.from_pretrained(''stabilityai/stable-diffusion-2-base'', subfolder='unet') self.unet.set_attn_processor(AttnProcessor()) ``` -AttnProcessor2_0 which is a Processor for implementing scaled dot-product attention is enabled by default if you're using PyTorch 2.0. +`AttnProcessor2_0` which is a Processor for implementing scaled dot-product attention is enabled by default if you're using PyTorch 2.0. -The older self.unet.set_attn_processor(AttnProcessor()) gives Cuda OOM error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput +The older `self.unet.set_attn_processor(AttnProcessor())` gives Cuda OOM error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput More details on this can be found here: https://pytorch.org/blog/accelerated-diffusers-pt-20/ @@ -166,10 +191,15 @@ git clone https://github.com/aws-samples/awsome-distributed-training.git cd awsome-distributed-training/6.stable-diffusion/multi-node ``` -Next build the docker image and convert it to a enroot sqsh file: +Next build the docker image: -```bash -make # this will build the docker image and convert it to enroot +``` +docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} PYTORCH_IMAGE=${PYTORCH_IMAGE} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 1.Dockerfile . +``` +Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in /apps. This step takes a few minutes. + +``` +enroot import -o /apps/${DOCKER_IMAGE_NAME}.sqsh dockerd://${DOCKER_IMAGE_NAME} ``` Now we can start training From 0b586b06714dc32f5b4ae7006c31e50906af1e54 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Thu, 16 Nov 2023 13:03:05 -0800 Subject: [PATCH 204/648] Updated based on PY comments Signed-off-by: Ankur Srivastava --- .../6.stable-diffusion/multi-node/0.Makefile | 0 .../multi-node/1.Dockerfile | 6 ++++-- .../single-node/{Dockerfile => 0.Dockerfile} | 11 +++++++--- .../single-node/create-conda.sh | 20 ------------------- 4 files changed, 12 insertions(+), 25 deletions(-) delete mode 100644 3.test_cases/6.stable-diffusion/multi-node/0.Makefile rename 3.test_cases/6.stable-diffusion/single-node/{Dockerfile => 0.Dockerfile} (68%) delete mode 100644 3.test_cases/6.stable-diffusion/single-node/create-conda.sh diff --git a/3.test_cases/6.stable-diffusion/multi-node/0.Makefile b/3.test_cases/6.stable-diffusion/multi-node/0.Makefile deleted file mode 100644 index e69de29b..00000000 diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile index e8414e34..98440b0b 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile +++ b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile @@ -1,6 +1,8 @@ -FROM nvcr.io/nvidia/pytorch:23.08-py3 +ARG PYTORCH_IMAGE -ARG MOSAICML_VERSION=0.15.0 +FROM PYTORCH_IMAGE + +ARG MOSAICML_VERSION ARG EFA_INSTALLER_VERSION=latest ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws diff --git a/3.test_cases/6.stable-diffusion/single-node/Dockerfile b/3.test_cases/6.stable-diffusion/single-node/0.Dockerfile similarity index 68% rename from 3.test_cases/6.stable-diffusion/single-node/Dockerfile rename to 3.test_cases/6.stable-diffusion/single-node/0.Dockerfile index 64432fe3..a6ac4ea5 100644 --- a/3.test_cases/6.stable-diffusion/single-node/Dockerfile +++ b/3.test_cases/6.stable-diffusion/single-node/0.Dockerfile @@ -1,12 +1,17 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 -FROM nvcr.io/nvidia/pytorch:23.08-py3 +ARG PYTORCH_IMAGE + +FROM PYTORCH_IMAGE + +ARG MOSAICML_VERSION +ARG PYTORCH_INDEX_URL RUN git clone https://github.com/mosaicml/diffusion-benchmark.git /wd RUN pip3 install -r /wd/requirements.txt -RUN pip3 install mosaicml==0.15.0 --force -RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force +RUN pip3 install mosaicml==${MOSAICML_VERSION} --force +RUN pip3 install --pre torch torchvision torchaudio --index-url ${PYTORCH_INDEX_URL} --force RUN pip3 uninstall transformer-engine -y RUN pip3 install protobuf==3.20.3 diff --git a/3.test_cases/6.stable-diffusion/single-node/create-conda.sh b/3.test_cases/6.stable-diffusion/single-node/create-conda.sh deleted file mode 100644 index 10831b42..00000000 --- a/3.test_cases/6.stable-diffusion/single-node/create-conda.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: MIT-0 - -conda create -n pt-nightlies python=3.10 - -conda activate pt-nightlies - -# Install PyTorch Nightly distribution with Cuda 12.1 -pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 - -# Install Diffusers and Transformers -pip3 install diffusers["torch"] transformers - -# Install Weights and Biases -pip3 install wandb - -# We will install Composer from source. First clone the Repo -git clone https://github.com/mosaicml/composer.git \ No newline at end of file From d20cee319902a2d1bca248756bb533c09a35c76e Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Fri, 17 Nov 2023 23:45:12 -0800 Subject: [PATCH 205/648] Made changes to resolve PR comments Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 61 +++++++++++++++++------ 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 238c7164..1c62b1c6 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -12,6 +12,9 @@ To simplify testing, we have separate scripts for Single node and Multi node Dis You can export the following environment variables: ``` +export PYTHON_VERSION=3.10 +# We are using Python version 3.10 in this work. For a different Python version select the right Miniconda file from https://repo.anaconda.com/miniconda/ +export MINICONDA_INSTALLER=Miniconda3-py310_23.5.2-0-Linux-x86_64 export CUDA_VERSION=12.1 export MOSAICML_VERSION=0.15.0 export PYTORCH_INDEX_URL=https://download.pytorch.org/whl/nightly/cu121 @@ -20,17 +23,22 @@ export DOCKER_IMAGE_NAME=mosaicml-stable-diffusion export TAG=$MOSAICML_VERSION ``` +## 1 Single Node Training + +For the single node training use case, we provide both a Conda environment and a Dockerfile for your convenience and preference. However, you do not need both for setting up training. + -## 0. Create Conda Environment +## 1.1. Create Conda Environment -Make sure you are able to create conda environments and docker containers. For example, to install Miniconda, please follow the steps below: + +To install Miniconda, please follow the steps below: ```bash # install in the shared directory so compute nodes can source the environment: cd /apps # Get the appropriate Miniconda_version from https://repo.anaconda.com/miniconda/ -wget -O miniconda.sh "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh" \ +wget -O miniconda.sh "https://repo.anaconda.com/miniconda/${MINICONDA_INSTALLER}.sh" \ && bash miniconda.sh -b -p ./.conda \ && ./.conda/bin/conda init bash @@ -45,7 +53,7 @@ else echo "Unknown OS: $os" fi -conda create -n pt-nightlies python=3.10 +conda create -n pt-nightlies python=${PYTHON_VERSION} conda activate pt-nightlies @@ -64,13 +72,12 @@ git clone https://github.com/mosaicml/composer.git ``` -## 1 Single Node Training -### 1.1 Single Node Setup +### 1.2 Single Node Setup with Conda When testing the latest version of MosaicML's Composer, we found that different ways to set up the environment with a PyTorch Nightly conda environment or a Nvidia PyTorch Docker container. For single or multi-node testing, you could use either to run distributed training. Next, we present both approaches. -The `single-node` folder has the shell script [`create-conda.sh`](https://github.com/aws-samples/awsome-distributed-training/blob/stable-diffusion/3.test_cases/6.stable-diffusion/single-node/create-conda.sh) which installs the PyTorch nightly distribution for Cuda 12.1 and the `diffusers, transformers` and weights and biases libraries and also clones the Composer repository. Before installing Composer, we need to avoid some torch and torchvision version checks by commenting out those lines in [`composer/setup.py`](https://github.com/mosaicml/composer/blob/dev/setup.py) so it looks like: +The conda environment installs the PyTorch nightly distribution for Cuda 12.1 and the `diffusers, transformers` and weights and biases libraries and also clones the Composer repository. Before installing Composer, we need to avoid some torch and torchvision version checks by commenting out those lines in [`composer/setup.py`](https://github.com/mosaicml/composer/blob/dev/setup.py) so it looks like: ```bash @@ -92,19 +99,21 @@ install_requires = [ ``` Once this change is done, you can install composer as `pip3 install -e .` -The `single-node` folder also has the Dockerfile and a Makefile with commands to build the image and run the container. +### 1.3 OPTIONAL - Single Node Setup with Docker + +The `single-node` folder also has the Dockerfile with commands to build the image and run the container. If you are opting to setup training with a Conda environment, then this setup is not needed. Run this setup only if you need to run MosaicML Composer from within a Nvidia PyTorch container. ```bash cd awsome-distributed-training/3.test_cases/6.stable-diffusion/single-node # build the image -docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} PYTORCH_IMAGE=${PYTORCH_IMAGE} PYTORCH_INDEX_URL=${PYTORCH_INDEX_URL} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . +docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} --build-arg PYTORCH_IMAGE=${PYTORCH_IMAGE} --build-arg PYTORCH_INDEX_URL=${PYTORCH_INDEX_URL} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . # run container docker run --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it ${DOCKER_IMAGE_NAME} /bin/bash ``` -### 1.2 Single Node Training +### 1.4 Single Node Training Once you are in the conda environment or the container, run the following to kickstart training. In all these tests, we are using synthetic data generated by `diffusion-benchmark/data.py` @@ -138,7 +147,7 @@ To see GPU utilization, start another shell on the EC2 instance and run either ` docker run --rm -it --gpus all nvidia/cuda:12.2.0-devel-ubuntu20.04 watch nvidia-smi ``` -### 1.3 Single Node Training Benchmarks +### 1.5 Single Node Training Benchmarks We ran tests on P4de (A100 80GB) and P5 (H100 80GB) machines and here is a comparison. @@ -155,7 +164,7 @@ We ran tests on P4de (A100 80GB) and P5 (H100 80GB) machines and here is a compa | 20 | 8 | No | 197.90 | 361.15 | 1.83x | | 32 | 8 | Yes | 204.22 | 385.31 | 1.89x | -### 1.4 Scaled Dot Product Attention +### 1.6 Scaled Dot Product Attention HuggingFace Diffusers has a set_attn_processor method that you can use to plug and play different attention processors. A list of attention processors can be found [here](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py) @@ -169,7 +178,7 @@ self.unet.set_attn_processor(AttnProcessor()) ``` `AttnProcessor2_0` which is a Processor for implementing scaled dot-product attention is enabled by default if you're using PyTorch 2.0. -The older `self.unet.set_attn_processor(AttnProcessor())` gives Cuda OOM error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput +The older `self.unet.set_attn_processor(AttnProcessor())` gives [Cuda OOM](https://medium.com/@snk.nitin/how-to-solve-cuda-out-of-memory-error-850bb247cfb2) error with a batch size of 32 while with `AttnProcessor2_0()` is able to run with a batch size of 32 and yield 385 images/sec throughput More details on this can be found here: https://pytorch.org/blog/accelerated-diffusers-pt-20/ @@ -177,7 +186,7 @@ More details on this can be found here: https://pytorch.org/blog/accelerated-dif ### 2.1 Multi-Node Training -For the multi-node training we've created a `Dockerfile`, and Slurm submit script and a `Makefile` to build the docker image and convert it to an enroot image. To get started please follow the guide [AWS ParallelCluster Distributed Training](../../1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: +For the multi-node training we've created a [Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/multi-node/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile), and Slurm submit script to submit the training job. To get started please follow the guide [AWS ParallelCluster Distributed Training](../../1.architectures/2.aws-parallelcluster). Before starting this section make sure you have the following setup: * AWS ParallelCluster >= 3.7.0 * Pyxis @@ -191,21 +200,41 @@ git clone https://github.com/aws-samples/awsome-distributed-training.git cd awsome-distributed-training/6.stable-diffusion/multi-node ``` -Next build the docker image: +#### 2.1.1 Next build the docker image: ``` docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} PYTORCH_IMAGE=${PYTORCH_IMAGE} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 1.Dockerfile . ``` + +#### 2.1.2 Convert Docker image to Squash file + Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in /apps. This step takes a few minutes. ``` enroot import -o /apps/${DOCKER_IMAGE_NAME}.sqsh dockerd://${DOCKER_IMAGE_NAME} ``` -Now we can start training +#### 2.1.3 Now we can start training ``` sbatch 2.train.sbatch ``` +```bash +Namespace(batch_size=2048, image_size=512, remote=None, local='/tmp/mds-cache/mds-laion-2/', use_synth_data=True, model_name='stabilityai/stable-diffusion-2-base', use_ema=True, wandb_name=None, wandb_project=None, device_train_microbatch_size=4) +wandb: Tracking run with wandb version 0.13.11 +wandb: W&B syncing is set to `offline` in this directory. +wandb: Run `wandb online` or set WANDB_MODE=online to enable cloud syncing. +wandb: WARNING URL not available in offline run +****************************** +Config: +enabled_algorithms/EMA: true +node_name: unknown because NODENAME environment variable not set +num_gpus_per_node: 8 +num_nodes: 2 +rank_zero_seed: 3179589898 +****************************** +rain Epoch 0: 100%|█████████████████████████| 48/48 [09:57<00:00, 12.45s/ba, loss/train/total=0.1521] + +``` From 4c20e82a421fb931cc7b0f9a37db28ca02953a92 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Fri, 17 Nov 2023 23:48:52 -0800 Subject: [PATCH 206/648] Updated README Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 1c62b1c6..94181cdc 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -203,7 +203,7 @@ cd awsome-distributed-training/6.stable-diffusion/multi-node #### 2.1.1 Next build the docker image: ``` -docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} PYTORCH_IMAGE=${PYTORCH_IMAGE} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 1.Dockerfile . +docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} --build-arg PYTORCH_IMAGE=${PYTORCH_IMAGE} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 1.Dockerfile . ``` #### 2.1.2 Convert Docker image to Squash file From 974c1e838e1dfcc9bb7c8536824b5a9453bbacca Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Sat, 18 Nov 2023 01:26:41 -0800 Subject: [PATCH 207/648] Made changes to resolve PR comments Signed-off-by: Ankur Srivastava --- 3.test_cases/8.nemo-multimodal/README.md | 110 ++++++++++++------ .../nemo_configs/1.config.yaml | 9 +- .../8.nemo-multimodal/nemo_configs/2.bcm.yaml | 2 +- .../nemo_configs/3.download_multimodal.yaml | 4 +- ...diffusion_860m_res_256_pretrain_hydra.yaml | 8 +- 5 files changed, 85 insertions(+), 48 deletions(-) diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/8.nemo-multimodal/README.md index d49e3908..b2f069e5 100644 --- a/3.test_cases/8.nemo-multimodal/README.md +++ b/3.test_cases/8.nemo-multimodal/README.md @@ -3,21 +3,29 @@ This project provides a guide to run Nemo-Multimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal 23.05 supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion -## Prerequisites +## 0. Prerequisites + 0. You have access to nemo-multimodal. You can request access to the open beta [here](https://developer.nvidia.com/nemo-framework) -1. Have a slurm based parallelcluster ready for use. -2. Generate API Key: https://ngc.nvidia.com/setup/api-key -3. Install NGC CLI: https://ngc.nvidia.com/setup/installers/cli -4. Login +1. Have a slurm based parallelcluster created with a FSx for Lustre filesystem mounted. + +## 1. Install NGC CLI and Login + +Follow the steps below to install the NGC CLI and login to NGC Container Registry. This is needed before you can pull the Nemo-Multimodal container. + +0. Generate API Key: https://ngc.nvidia.com/setup/api-key +1. Install NGC CLI: https://ngc.nvidia.com/setup/installers/cli +2. Login ``` docker login nvcr.io Username: $oauthtoken Password: API_KEY - +``` Please make note that the Username is exactly "$oauthtoken". -If you have created your cluster with DLAMI or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. -``` +## 2. Install Nvidia Container CLI + +If you have created your cluster with [DLAMI](https://aws.amazon.com/machine-learning/amis/) or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. + To install libnvidia-container cli: https://github.com/NVIDIA/libnvidia-container https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html @@ -35,88 +43,118 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear You can set the Nemo Multimodal version and others as environment variables: ``` +export PYTHON_VERSION=3.10 +# We are using Python version 3.10 in this work. For a different Python version select the right Miniconda file from https://repo.anaconda.com/miniconda/ +export MINICONDA_INSTALLER=Miniconda3-py310_23.5.2-0-Linux-x86_64 export NEMO_MULTIMODAL_VERSION=23.05 export TARGET_PATH=/apps/nemo-src # Must be a shared filesystem. This is where Nemo launcher scripts will reside. export DOCKER_IMAGE_NAME=nemo-multimodal export TAG=$NEMO_MULTIMODAL_VERSION +export ENROOT_IMAGE=/apps/${DOCKER_IMAGE_NAME}.sqsh +export HUGGINGFACE_DATASET_REPO_ID=laion/laion-art +export DATASET_PATH=/fsx/laion-art-data ``` -## Pull Image +## 3. Pull Image +SSH into the head node of your cluster and run ``` +cd /apps/ docker pull nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:${NEMO_MULTIMODAL_VERSION}-py3 ``` -## Run container on Head Node -Once the above image is pulled, you can run the container on the head node like below. Here we are running the container just to be able to copy launcher scripts on the host machine. If you need to run the container on the compute nodes, you would need to add `--gpus all` flag to the run command. +## 4. Run container on Head Node +Once the above image is pulled, you can run the container on the head node like below. Here we are running the container just to be able to copy launcher scripts on the host machine. If you need to run the container on the compute nodes, you would need to add `--gpus all` flag to the run command. It is recommended to have the docker run flags like below, as recommended by Nvidia PyTorch containers, otherwise you may potentially run into an error like [this](https://github.com/NVIDIA/Megatron-LM/issues/516) ``` docker run -it --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 nvcr.io/ea-bignlp/ea-mm-participants/bignlp-mm:${NEMO_MULTIMODAL_VERSION}-py3 bash ``` -## Copy launcher scripts to host +## 5. Copy launcher scripts to host We need to copy NeMo launcher scripts to head node that we will use to submit multiple slurm jobs for downloading, preparing data and running training. Once the container is running, exit out of it and copy the launcher scripts like below: ``` docker cp -a :/opt/NeMo-Megatron-Launcher/ ${TARGET_PATH} ``` -## Build customized docker image -To get optimal performance of Nemo-Multimodal with EFA on P5 and P4de instances, we provide a customized [Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/Dockerfile) and we can build a image like below: +To get the `container-id` above you can list the containers like `docker ps -a` which lists all running containers and their ids. + +## 6. Build customized docker image +To achieve target performance of Nemo-Multimodal with EFA on P5 and P4de instances, we provide a customized [0.Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/Dockerfile) and we can build a image like below: ``` -docker build --build-arg NEMO_MULTIMODAL_VERSION=23.05 -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . +docker build --build-arg NEMO_MULTIMODAL_VERSION=${NEMO_MULTIMODAL_VERSION} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . ``` -## Convert image -Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in /apps. This step takes a few minutes. +## 7. Convert image +Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. ``` -enroot import -o /apps/${DOCKER_IMAGE_NAME}.sqsh dockerd://${DOCKER_IMAGE_NAME} +enroot import -o ${ENROOT_IMAGE}.sqsh dockerd://${DOCKER_IMAGE_NAME} ``` -## Create Conda env +## 8. Create Conda env We need a conda environment that has the necessary dependencies for submitting multiple arrays of slurm jobs via [HYDRA](https://github.com/facebookresearch/hydra) which NeMo uses to configuring both NeMo models and the PyTorch Lightning Trainer. ``` -wget -O miniconda.sh "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh" \ +wget -O miniconda.sh "https://repo.anaconda.com/miniconda/${MINICONDA_INSTALLER}.sh" \ && bash miniconda.sh -b -p /apps/.conda \ && /apps/.conda/bin/conda init bash source /home/ubuntu/.bashrc -conda create --name nemo-multimodal python=3.10 +conda create --name nemo-multimodal python=${PYTHON_VERSION} source activate nemo-multimodal pip3 install -r requirements.txt ``` -## Submitting slurm jobs -Next we will show how to submit slurm jobs for data-preparation and training. The NeMo config provides the following config files which we have modified: +All package versions in the above requirements.txt file is recommended from Nvidia. An older version of the package `opencv-python-headless==4.8.0.74` has to be installed to avoid this [error](https://github.com/rom1504/img2dataset/issues/355) with [img2dataset](https://github.com/rom1504/img2dataset) package. + +## 9. Pull this github repo + +```bash +cd /apps/ +git clone https://github.com/aws-samples/awsome-distributed-training.git +cd awsome-distributed-training/3.test_cases/8.nemo-multimodal + +``` -1. `1.config.yaml`: NeMo config with information about different stages and environment variables -2. `2.bcm.yaml`: Cluster setup config -3. `3.download_multimodal.yaml`: Config to download and prepare data -4. `4.stable_diffusion_860m_res_256_pretrain.yaml`: Config to pre-train stable diffusion model +## 10. Submitting slurm jobs +Next we will show how to submit slurm jobs for data preparation and training. The NeMo config provides the following config files which we have modified: + +1. `nemo_configs/1.config.yaml`: NeMo config with information about different stages and environment variables. Refer to the [EFA cheatsheet](https://github.com/aws-samples/awsome-distributed-training/blob/main/1.architectures/efa-cheatsheet.md) here for more information about the EFA environment variables. +2. `nemo_configs/2.bcm.yaml`: Cluster setup config which contains SBATCH variables and [Pyxis](https://github.com/NVIDIA/pyxis) settings to run containers in Slurm. +3. `nemo_configs/3.download_multimodal.yaml`: Config to download the `laion/laion-art` data from Huggingface and prepare data for training +4. `nemo_configs/4.stable_diffusion_860m_res_256_pretrain.yaml`: Config to pre-train stable diffusion model. Currently Nemo Multimodal 23.05 supports the 860M parameter Stable Diffusion model with 256x256 and 512x512 resolution support + +Run the following next to substitute the environment variables in the yaml file and place it in the right location: + +```bash +envsubst < ./nemo_configs/config.yaml > ${TARGET_PATH}/launcher_scripts/conf/config.yaml +envsubst < ./nemo_configs/bcm.yaml > ${TARGET_PATH}/launcher_scripts/conf/cluster/bcm.yaml +envsubst < ./nemo_configs/download_multimodal.yaml > ${TARGET_PATH}/launcher_scripts/conf/data_preparation/multimodal/download_multimodal.yaml +envsubst < ./nemo_configs/stable_diffusion_860m_res_256_pretrain.yaml > ${TARGET_PATH}/launcher_scripts/conf/training/stable_diffusion/stable_diffusion_860m_res_256_pretrain.yaml +``` You can run one or more stages like below: ``` HYDRA_FULL_ERROR=1 python3 ${TARGET_PATH}/launcher_scripts/main.py ``` -This will create separate folders for different slurm jobs and create folders with the relevant slurm submission script and config file. +This will create separate folders for different slurm jobs and create folders with the relevant slurm submission script and config file. For more information on using HYDRA please refer [here]((https://github.com/facebookresearch/hydra)). -## Download and prepare data +## 11. Download and prepare data We will use the popular [laion-art](https://huggingface.co/datasets/laion/laion-art) data for training the stable diffusion model which contains >8M images and their captions. Please review the [download_multimodal](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/download_multimodal.yaml) file which contains the following sections: -1. dataset_repo_id: `laion/laion-art` # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} -2. download_parquet: Downloads and paritions the parquet files and stores the partioned parquet files in `/fsx/laion-art-data/parquet/` -3. download_images: Uses [img2dataset](https://github.com/rom1504/img2dataset/tree/main) to download the images specified in the parquet files and store the raw data in `/fsx/laion-art-data/tarfiles_raw`. Each partitioned parquet file will run in an array of slurm jobs sequentially. -4. reorganize_tar: This section will reorganize the tar files and create new tarfiles with tar_chunk_size number of images stores in each tar file. Make sure `node_array_size` is set to 1, otherwise additional preprocessing will be needed to merge the tarfiles from the two tasks in one folder. The reorganized tarfiles will be stored in `/fsx/laion-art-data/tarfiles_reorganized`. -5. generate_wdinfo: This task will generate a pickle file with the necessary paths for the reorganized tarfiles. Make sure you are reading from reorganized tarfiles and not from precache_encodings which is included in the original version of NeMo 23.05. +1. `dataset_repo_id`: `laion/laion-art` Huggingface dataset repo id, in the format of `{user_or_company}/{dataset_name}` +2. `download_parquet`: Downloads and paritions the parquet files and stores the partioned parquet files in `${DATASET_PATH}/parquet/` +3. `download_images`: Uses [img2dataset](https://github.com/rom1504/img2dataset/tree/main) to download the images specified in the parquet files and store the raw data in `${DATASET_PATH}/tarfiles_raw`. Each partitioned parquet file will run in an array of slurm jobs sequentially. +4. `reorganize_tar`: This section will reorganize the tar files and create new tarfiles with `tar_chunk_size` number of images stores in each tar file. Make sure `node_array_size` is set to 1, otherwise additional preprocessing will be needed to merge the tarfiles from the two tasks in one folder. The reorganized tarfiles will be stored in `${DATASET_PATH}/tarfiles_reorganized`. +5. `reorganize_tar`: This task will generate a pickle file with the necessary paths for the reorganized tarfiles. Make sure you are reading from reorganized tarfiles and not from `precache_encodings` which is included in the original version of NeMo 23.05. -## Run Distributed Training -Once the data is downloaded, the training job runs next. Make sure the trainer inputs such as `num_nodes` and number of gpus per node in `trainer.devices` is set correctly. Also, set `max_epochs` to -1 if training needs to run till max_steps have completed. The model by default will create a tensorboard events log, but weights and biases is not switched on by default. Also make sure the datasets path at the bottom point to the right paths for `wdinfo.pkl` and `tarfiles_reorganized`. +## 12. Run Distributed Training +After downloading the data, you run the training job next. Make sure the trainer inputs such as `num_nodes` and number of gpus per node in `trainer.devices` is set correctly. Also, set `max_epochs` to -1 if training needs to run till max_steps have completed. The model by default will create a tensorboard events log, but weights and biases is not switched on by default. Also make sure the datasets path at the bottom point to the right paths for `wdinfo.pkl` and `tarfiles_reorganized`. Once training starts you will see logs like: diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml index a52866f1..e3ce1e75 100644 --- a/3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml +++ b/3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml @@ -7,7 +7,7 @@ defaults: - fine_tuning: null - evaluation: null - fw_inference: null - - export: stable_diffusion/export_stable_diffusion + - export: null - external_conversion: null - override hydra/job_logging: stdout @@ -15,20 +15,19 @@ stages: - data_preparation cluster_type: bcm # bcm or bcp. If bcm, it must match - cluster above. -launcher_scripts_path: /apps/nemo-src/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts +launcher_scripts_path: ${TARGET_PATH}/launcher_scripts # Path to NeMo Megatron Launch scripts, should ends with /launcher_scripts data_dir: ${launcher_scripts_path}/data # Location to store and read the data. base_results_dir: ${launcher_scripts_path}/results # Location to store the results, checkpoints and logs. container_mounts: # List of additional paths to mount to container. They will be mounted to same path. - /fsx:/fsx -container: /apps/nemo-multimodal.sqsh +container: ${ENROOT_IMAGE}.sqsh wandb_api_key_file: null # File where the w&B api key is stored. Key must be on the first line. env_vars: NCCL_DEBUG: INFO # Logging level for NCCL. Set to "INFO" for debug information - ##NCCL_PROTO: simple # Protocol NCCL will use. Set to "simple" for AWS TRANSFORMER_OFFLINE: 1 - #FI_EFA_USE_DEVICE_RDMA: 1 + FI_EFA_USE_DEVICE_RDMA: 1 FI_PROVIDER: efa NCCL_LAUNCH_MODE: parallel FI_EFA_FORK_SAFE: 1 diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml index 2e7d8717..9ffeaa43 100644 --- a/3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml +++ b/3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml @@ -1,4 +1,4 @@ -partition: +partition: null account: null exclusive: True gpus_per_task: null diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml index 14f74865..e9bb51f2 100644 --- a/3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml +++ b/3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml @@ -5,9 +5,9 @@ run: dependency: "singleton" bcp_preproc_npernode: 2 # 2 should be safe to use and x2 times faster. -dataset_repo_id: laion/laion-art # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} +dataset_repo_id: ${HUGGINGFACE_DATASET_REPO_ID} # huggingface dataset repo id, in the format of {user_or_company}/{dataset_name} # See https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads -dataset_output_root: /fsx/laion-art-data +dataset_output_root: ${DATASET_PATH} download_parquet: enable: True diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml b/3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml index 42dcc0f4..ed57e81f 100644 --- a/3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml +++ b/3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml @@ -1,6 +1,6 @@ run: name: stable_diffusion_860m_res_256_pretrain - results_dir: /apps/nemo-src/launcher_scripts/results/stable_diffusion_860m_res_256_pretrain + results_dir: ${TARGET_PATH}/launcher_scripts/results/stable_diffusion_860m_res_256_pretrain time_limit: 2-00:00:00 dependency: singleton name: stable-diffusion-train @@ -20,7 +20,7 @@ trainer: benchmark: false enable_model_summary: true exp_manager: - explicit_log_dir: /apps/nemo-src/launcher_scripts/results/stable_diffusion_860m_res_256_pretrain/results + explicit_log_dir: ${TARGET_PATH}/launcher_scripts/results/stable_diffusion_860m_res_256_pretrain/results exp_dir: null name: nemo_stable_diffusion create_wandb_logger: false @@ -164,7 +164,7 @@ model: num_workers: 16 train: dataset_path: - - /fsx/laion-art-data/wdinfo.pkl + - ${DATASET_PATH}/wdinfo.pkl augmentations: resize_smallest_side: 256 center_crop_h_w: 256, 256 @@ -172,4 +172,4 @@ model: filterings: null webdataset: infinite_sampler: false - local_root_path: /fsx/laion-art-data/tarfiles_reorganized/task0000/ + local_root_path: ${DATASET_PATH}/tarfiles_reorganized/task0000/ From bb9afed0c2e7acfd63f84c2fa67b186b5abf523d Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 20 Nov 2023 11:14:08 -0800 Subject: [PATCH 208/648] Quick Create Link AWS Batch Signed-off-by: Sean Smith --- 1.architectures/3.aws-batch/README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/1.architectures/3.aws-batch/README.md b/1.architectures/3.aws-batch/README.md index 64b920ad..25cf773e 100644 --- a/1.architectures/3.aws-batch/README.md +++ b/1.architectures/3.aws-batch/README.md @@ -18,6 +18,11 @@ This template deploys AWS Batch and EC2 resources. It can be deployed via the co - **Template file**: [`0.aws-batch-distributed-training.yaml`](./0.aws-batch-distributed-training.yaml) +### Quick Create + +[
 1-Click Deploy 🚀 
](https://console.aws.amazon.com/cloudformation/home?#/stacks/quickcreate?templateURL=https://awsome-distributed-training.s3.amazonaws.com/templates/0.aws-batch-distributed-training.yaml&stackName=AWS-Batch) + + ## List of Parameters The templates takes parameters that are mandatory and optional, see below for more details. @@ -34,8 +39,7 @@ The templates takes parameters that are mandatory and optional, see below for mo ## Deploy with the AWS CLI -The command to deploy the template through the CLI is shown below. Please edit the parameters values with your own configuration. - +If you'd like to deploy through the AWS CLI instead of the quick create link above, the command to deploy the template is shown below. Please edit the parameters values with your own configuration. ```bash aws cloudformation create-stack --stack-name batch-distributed-training \ From 91d1129587da4088c439306b65aede81bc5b964a Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Tue, 21 Nov 2023 08:19:21 +0530 Subject: [PATCH 209/648] updated 8.nemo-multimodal to 9.nemo-multimodal Signed-off-by: Ankur Srivastava --- .../{8.nemo-multimodal => 9.nemo-multimodal}/0.Dockerfile | 0 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/README.md | 0 .../nemo_configs/1.config.yaml | 0 .../nemo_configs/2.bcm.yaml | 0 .../nemo_configs/3.download_multimodal.yaml | 0 .../4.stable_diffusion_860m_res_256_pretrain_hydra.yaml | 0 .../{8.nemo-multimodal => 9.nemo-multimodal}/requirements.txt | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/0.Dockerfile (100%) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/README.md (100%) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/nemo_configs/1.config.yaml (100%) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/nemo_configs/2.bcm.yaml (100%) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/nemo_configs/3.download_multimodal.yaml (100%) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml (100%) rename 3.test_cases/{8.nemo-multimodal => 9.nemo-multimodal}/requirements.txt (100%) diff --git a/3.test_cases/8.nemo-multimodal/0.Dockerfile b/3.test_cases/9.nemo-multimodal/0.Dockerfile similarity index 100% rename from 3.test_cases/8.nemo-multimodal/0.Dockerfile rename to 3.test_cases/9.nemo-multimodal/0.Dockerfile diff --git a/3.test_cases/8.nemo-multimodal/README.md b/3.test_cases/9.nemo-multimodal/README.md similarity index 100% rename from 3.test_cases/8.nemo-multimodal/README.md rename to 3.test_cases/9.nemo-multimodal/README.md diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml b/3.test_cases/9.nemo-multimodal/nemo_configs/1.config.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/nemo_configs/1.config.yaml rename to 3.test_cases/9.nemo-multimodal/nemo_configs/1.config.yaml diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml b/3.test_cases/9.nemo-multimodal/nemo_configs/2.bcm.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/nemo_configs/2.bcm.yaml rename to 3.test_cases/9.nemo-multimodal/nemo_configs/2.bcm.yaml diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml b/3.test_cases/9.nemo-multimodal/nemo_configs/3.download_multimodal.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/nemo_configs/3.download_multimodal.yaml rename to 3.test_cases/9.nemo-multimodal/nemo_configs/3.download_multimodal.yaml diff --git a/3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml b/3.test_cases/9.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml similarity index 100% rename from 3.test_cases/8.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml rename to 3.test_cases/9.nemo-multimodal/nemo_configs/4.stable_diffusion_860m_res_256_pretrain_hydra.yaml diff --git a/3.test_cases/8.nemo-multimodal/requirements.txt b/3.test_cases/9.nemo-multimodal/requirements.txt similarity index 100% rename from 3.test_cases/8.nemo-multimodal/requirements.txt rename to 3.test_cases/9.nemo-multimodal/requirements.txt From 79d4babbafc5b89cb8f5794bcaca8a6f20c9b996 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 21 Nov 2023 15:05:40 +0800 Subject: [PATCH 210/648] Relocate to new structure --- .../1.amazon_machine_image}/Makefile | 0 .../1.amazon_machine_image}/README.md | 0 .../inventory/group_vars/all.yml | 0 .../1.amazon_machine_image}/inventory/hosts | 0 .../packer-ami.pkr.hcl | 0 .../playbook-dlami-gpu.yml | 0 .../playbook-dlami-neuron.yml | 0 .../playbook-eks-gpu.yml | 0 .../playbook-pcluster-cpu.yml | 0 .../playbook-pcluster-gpu.yml | 0 .../playbook-pcluster-neuron.yml | 0 .../aws_cliv2/molecule/default/converge.yml | 0 .../aws_cliv2/molecule/default/molecule.yml | 0 .../aws_cliv2/molecule/default/prepare.yml | 0 .../aws_cliv2/molecule/default/verify.yml | 0 .../roles/aws_cliv2/tasks/main.yml | 0 .../roles/aws_efa/defaults/main.yml | 0 .../roles/aws_efa/tasks/main.yml | 0 .../roles/aws_efa_ofi/tasks/main.yml | 0 .../roles/aws_lustre/defaults/main.yml | 0 .../roles/aws_lustre/tasks/main.yml | 0 .../roles/base/defaults/main.yml | 0 .../roles/base/tasks/main.yml | 0 .../roles/docker/defaults/main.yml | 0 .../roles/docker/tasks/main.yml | 0 .../roles/neuron_driver/defaults/main.yml | 0 .../roles/neuron_driver/tasks/main.yml | 0 .../roles/nvidia_cuda/defaults/main.yml | 0 .../roles/nvidia_cuda/tasks/main.yml | 0 .../roles/nvidia_docker/defaults/main.yml | 0 .../roles/nvidia_docker/tasks/main.yml | 0 .../roles/nvidia_driver/defaults/main.yml | 0 .../nvidia-persistenced-override.service | 0 .../roles/nvidia_driver/tasks/main.yml | 0 .../nvidia_enroot_pyxis/defaults/main.yml | 0 .../roles/nvidia_enroot_pyxis/tasks/main.yml | 0 .../nvidia_enroot_pyxis/templates/enroot.conf | 0 .../roles/nvidia_gdrcopy/defaults/main.yml | 0 .../roles/nvidia_gdrcopy/tasks/main.yml | 0 .../roles/nvidia_nccl/defaults/main.yml | 0 .../roles/nvidia_nccl/tasks/main.yml | 0 .../roles/observability/defaults/main.yml | 0 .../roles/observability/tasks/main.yml | 0 .../roles/packages/defaults/main.yml | 0 .../roles/packages/tasks/main.yml | 0 .../roles/pytorch_neuron/defaults/main.yml | 0 .../roles/pytorch_neuron/tasks/main.yml | 0 .../2.docker}/Dockerfile | 0 3.test_cases/2.nemo-launcher/README.md | 35 ++++++++++++++----- README.md | 9 ++--- 50 files changed, 32 insertions(+), 12 deletions(-) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/Makefile (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/README.md (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/inventory/group_vars/all.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/inventory/hosts (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/packer-ami.pkr.hcl (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/playbook-dlami-gpu.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/playbook-dlami-neuron.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/playbook-eks-gpu.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/playbook-pcluster-cpu.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/playbook-pcluster-gpu.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/playbook-pcluster-neuron.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_cliv2/molecule/default/converge.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_cliv2/molecule/default/molecule.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_cliv2/molecule/default/prepare.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_cliv2/molecule/default/verify.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_cliv2/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_efa/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_efa/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_efa_ofi/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_lustre/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/aws_lustre/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/base/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/base/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/docker/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/docker/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/neuron_driver/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/neuron_driver/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_cuda/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_cuda/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_docker/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_docker/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_driver/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_driver/files/nvidia-persistenced-override.service (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_driver/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_enroot_pyxis/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_enroot_pyxis/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_enroot_pyxis/templates/enroot.conf (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_gdrcopy/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_gdrcopy/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_nccl/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/nvidia_nccl/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/observability/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/observability/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/packages/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/packages/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/pytorch_neuron/defaults/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/1.amazon_machine_image}/roles/pytorch_neuron/tasks/main.yml (100%) rename {2.ami_and_docker => 2.ami_and_containers/2.docker}/Dockerfile (100%) diff --git a/2.ami_and_docker/Makefile b/2.ami_and_containers/1.amazon_machine_image/Makefile similarity index 100% rename from 2.ami_and_docker/Makefile rename to 2.ami_and_containers/1.amazon_machine_image/Makefile diff --git a/2.ami_and_docker/README.md b/2.ami_and_containers/1.amazon_machine_image/README.md similarity index 100% rename from 2.ami_and_docker/README.md rename to 2.ami_and_containers/1.amazon_machine_image/README.md diff --git a/2.ami_and_docker/inventory/group_vars/all.yml b/2.ami_and_containers/1.amazon_machine_image/inventory/group_vars/all.yml similarity index 100% rename from 2.ami_and_docker/inventory/group_vars/all.yml rename to 2.ami_and_containers/1.amazon_machine_image/inventory/group_vars/all.yml diff --git a/2.ami_and_docker/inventory/hosts b/2.ami_and_containers/1.amazon_machine_image/inventory/hosts similarity index 100% rename from 2.ami_and_docker/inventory/hosts rename to 2.ami_and_containers/1.amazon_machine_image/inventory/hosts diff --git a/2.ami_and_docker/packer-ami.pkr.hcl b/2.ami_and_containers/1.amazon_machine_image/packer-ami.pkr.hcl similarity index 100% rename from 2.ami_and_docker/packer-ami.pkr.hcl rename to 2.ami_and_containers/1.amazon_machine_image/packer-ami.pkr.hcl diff --git a/2.ami_and_docker/playbook-dlami-gpu.yml b/2.ami_and_containers/1.amazon_machine_image/playbook-dlami-gpu.yml similarity index 100% rename from 2.ami_and_docker/playbook-dlami-gpu.yml rename to 2.ami_and_containers/1.amazon_machine_image/playbook-dlami-gpu.yml diff --git a/2.ami_and_docker/playbook-dlami-neuron.yml b/2.ami_and_containers/1.amazon_machine_image/playbook-dlami-neuron.yml similarity index 100% rename from 2.ami_and_docker/playbook-dlami-neuron.yml rename to 2.ami_and_containers/1.amazon_machine_image/playbook-dlami-neuron.yml diff --git a/2.ami_and_docker/playbook-eks-gpu.yml b/2.ami_and_containers/1.amazon_machine_image/playbook-eks-gpu.yml similarity index 100% rename from 2.ami_and_docker/playbook-eks-gpu.yml rename to 2.ami_and_containers/1.amazon_machine_image/playbook-eks-gpu.yml diff --git a/2.ami_and_docker/playbook-pcluster-cpu.yml b/2.ami_and_containers/1.amazon_machine_image/playbook-pcluster-cpu.yml similarity index 100% rename from 2.ami_and_docker/playbook-pcluster-cpu.yml rename to 2.ami_and_containers/1.amazon_machine_image/playbook-pcluster-cpu.yml diff --git a/2.ami_and_docker/playbook-pcluster-gpu.yml b/2.ami_and_containers/1.amazon_machine_image/playbook-pcluster-gpu.yml similarity index 100% rename from 2.ami_and_docker/playbook-pcluster-gpu.yml rename to 2.ami_and_containers/1.amazon_machine_image/playbook-pcluster-gpu.yml diff --git a/2.ami_and_docker/playbook-pcluster-neuron.yml b/2.ami_and_containers/1.amazon_machine_image/playbook-pcluster-neuron.yml similarity index 100% rename from 2.ami_and_docker/playbook-pcluster-neuron.yml rename to 2.ami_and_containers/1.amazon_machine_image/playbook-pcluster-neuron.yml diff --git a/2.ami_and_docker/roles/aws_cliv2/molecule/default/converge.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/converge.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_cliv2/molecule/default/converge.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/converge.yml diff --git a/2.ami_and_docker/roles/aws_cliv2/molecule/default/molecule.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/molecule.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_cliv2/molecule/default/molecule.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/molecule.yml diff --git a/2.ami_and_docker/roles/aws_cliv2/molecule/default/prepare.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/prepare.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_cliv2/molecule/default/prepare.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/prepare.yml diff --git a/2.ami_and_docker/roles/aws_cliv2/molecule/default/verify.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/verify.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_cliv2/molecule/default/verify.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/molecule/default/verify.yml diff --git a/2.ami_and_docker/roles/aws_cliv2/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_cliv2/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_cliv2/tasks/main.yml diff --git a/2.ami_and_docker/roles/aws_efa/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_efa/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/defaults/main.yml diff --git a/2.ami_and_docker/roles/aws_efa/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_efa/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_efa/tasks/main.yml diff --git a/2.ami_and_docker/roles/aws_efa_ofi/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_efa_ofi/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_efa_ofi/tasks/main.yml diff --git a/2.ami_and_docker/roles/aws_lustre/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_lustre/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_lustre/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_lustre/defaults/main.yml diff --git a/2.ami_and_docker/roles/aws_lustre/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/aws_lustre/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/aws_lustre/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/aws_lustre/tasks/main.yml diff --git a/2.ami_and_docker/roles/base/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/base/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/base/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/base/defaults/main.yml diff --git a/2.ami_and_docker/roles/base/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/base/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/base/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/base/tasks/main.yml diff --git a/2.ami_and_docker/roles/docker/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/docker/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/docker/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/docker/defaults/main.yml diff --git a/2.ami_and_docker/roles/docker/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/docker/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/docker/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/docker/tasks/main.yml diff --git a/2.ami_and_docker/roles/neuron_driver/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/neuron_driver/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/neuron_driver/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/neuron_driver/defaults/main.yml diff --git a/2.ami_and_docker/roles/neuron_driver/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/neuron_driver/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/neuron_driver/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/neuron_driver/tasks/main.yml diff --git a/2.ami_and_docker/roles/nvidia_cuda/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_cuda/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_cuda/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_cuda/defaults/main.yml diff --git a/2.ami_and_docker/roles/nvidia_cuda/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_cuda/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_cuda/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_cuda/tasks/main.yml diff --git a/2.ami_and_docker/roles/nvidia_docker/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_docker/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_docker/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_docker/defaults/main.yml diff --git a/2.ami_and_docker/roles/nvidia_docker/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_docker/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_docker/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_docker/tasks/main.yml diff --git a/2.ami_and_docker/roles/nvidia_driver/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_driver/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/defaults/main.yml diff --git a/2.ami_and_docker/roles/nvidia_driver/files/nvidia-persistenced-override.service b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/files/nvidia-persistenced-override.service similarity index 100% rename from 2.ami_and_docker/roles/nvidia_driver/files/nvidia-persistenced-override.service rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/files/nvidia-persistenced-override.service diff --git a/2.ami_and_docker/roles/nvidia_driver/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_driver/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_driver/tasks/main.yml diff --git a/2.ami_and_docker/roles/nvidia_enroot_pyxis/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_enroot_pyxis/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_enroot_pyxis/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_enroot_pyxis/defaults/main.yml diff --git a/2.ami_and_docker/roles/nvidia_enroot_pyxis/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_enroot_pyxis/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_enroot_pyxis/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_enroot_pyxis/tasks/main.yml diff --git a/2.ami_and_docker/roles/nvidia_enroot_pyxis/templates/enroot.conf b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_enroot_pyxis/templates/enroot.conf similarity index 100% rename from 2.ami_and_docker/roles/nvidia_enroot_pyxis/templates/enroot.conf rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_enroot_pyxis/templates/enroot.conf diff --git a/2.ami_and_docker/roles/nvidia_gdrcopy/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_gdrcopy/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_gdrcopy/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_gdrcopy/defaults/main.yml diff --git a/2.ami_and_docker/roles/nvidia_gdrcopy/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_gdrcopy/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_gdrcopy/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_gdrcopy/tasks/main.yml diff --git a/2.ami_and_docker/roles/nvidia_nccl/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_nccl/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/defaults/main.yml diff --git a/2.ami_and_docker/roles/nvidia_nccl/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/nvidia_nccl/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/nvidia_nccl/tasks/main.yml diff --git a/2.ami_and_docker/roles/observability/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/observability/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/observability/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/observability/defaults/main.yml diff --git a/2.ami_and_docker/roles/observability/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/observability/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/observability/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/observability/tasks/main.yml diff --git a/2.ami_and_docker/roles/packages/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/packages/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/packages/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/packages/defaults/main.yml diff --git a/2.ami_and_docker/roles/packages/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/packages/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/packages/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/packages/tasks/main.yml diff --git a/2.ami_and_docker/roles/pytorch_neuron/defaults/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/pytorch_neuron/defaults/main.yml similarity index 100% rename from 2.ami_and_docker/roles/pytorch_neuron/defaults/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/pytorch_neuron/defaults/main.yml diff --git a/2.ami_and_docker/roles/pytorch_neuron/tasks/main.yml b/2.ami_and_containers/1.amazon_machine_image/roles/pytorch_neuron/tasks/main.yml similarity index 100% rename from 2.ami_and_docker/roles/pytorch_neuron/tasks/main.yml rename to 2.ami_and_containers/1.amazon_machine_image/roles/pytorch_neuron/tasks/main.yml diff --git a/2.ami_and_docker/Dockerfile b/2.ami_and_containers/2.docker/Dockerfile similarity index 100% rename from 2.ami_and_docker/Dockerfile rename to 2.ami_and_containers/2.docker/Dockerfile diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index c8ac2997..059989dd 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -10,6 +10,9 @@ Table of contents: - [4. Prepare Input Data](#4-prepare-input-data) - [5. Pre-training GPT3](#5-pre-training-gpt3) - [6. Customizing Pre-Training](#6-customizing-pre-training) +- [7. Pre-Training llama2](#7-pre-training-llama2) +- [8. References](#8-references) +- [9. Authors / Reviewers](#9-authors--reviewers) ## 1. Pre-requisites @@ -17,7 +20,7 @@ The following pre-requisites are needed to run this example: - You are using p4de.24xlarge instances with A100 80GB or newer, with at least 80GB of memory per GPU. - You have access to the base image [`nemofw-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). -- Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) +- Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.ami_and_containers/1.amazon_machine_image)) You will need to setup the following environment variables before running the scripts. : @@ -130,15 +133,20 @@ That's all needed to pre-train with a mock dataset generated on-the-fly. This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. Here you start a pre-training on a small model of 126M parameters, this serves as a quick sanity check. 1. Source the NemoMegatron environment created earlier. + ```bash source ${TARGET_PATH}/.venv/bin/activate ``` + 2. To pre-train a GPT3-126m on two instances with mock dataset, run the commands below to let : + ```bash cd $TARGET_PATH $TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh ``` + 3. Check the file `$TARGET_PATH/launcher_scripts/main.py`. The `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submits it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. + ```bash $TARGET_PATH/results/gpt3_126m ├── gpt3_126m_hydra.yaml # The fully interpolated pre-training configuration @@ -155,6 +163,7 @@ This section assumes that you went through the previous sections and 1/ retrieve ├── nemo_error_log.txt # Stderr of pre-training step └── nemo_log_globalrank-*.txt # Log of each rank ``` + Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, edit `TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` and uncomment the `#export UNIQUE_OUTPUT_DIR=1` line to produce this output dir instead: 4. You can use Slurm command `squeue` to monitor the job status in the queue. The ample output below shows a `nemo-megatron` job with job id `1234` is in running state (`ST` = `R`). A queued job will have state `ST` = `PD` (pending). Please refer to the complete of job states in this [Slurm documentation](https://slurm.schedmd.com/squeue.html#SECTION_JOB-STATE-CODES). @@ -163,7 +172,9 @@ This section assumes that you went through the previous sections and 1/ retrieve JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 1234 my-cluste nemo-mega ec2-user R 00:19:40 1 p4de-dy-p4de-24xlarge-[1-2] ``` + 5. Once a job finishes, check the `log-nemo-megatron-__.err`, and see it should contains ``Trainer.fit` stopped: `max_steps=40` reached`` (disregard the warnings). + ```console $ tail -5 $TARGET_PATH/results/gpt3_126m/log-nemo-megatron-gpt3_126m_72.err @@ -172,7 +183,9 @@ This section assumes that you went through the previous sections and 1/ retrieve `Trainer.fit` stopped: `max_steps=40` reached. ``` + 6. Review the output file (`log-nemo-megatron-gpt3_126m_.out`) which contains the `stdout` output of the job. The end of the file should be similar to the snippet below + ```console [NeMo I 2023-09-11 22:31:28 lr_scheduler:910] Scheduler "" will be used during training (effective maximum steps = 40) - @@ -189,8 +202,7 @@ Congratulations! You've successfully run this test case to completion. > **Note**: Should you run into an OOM error, you can adjust the minimum batch size by setting the MBS in `bmk` launch scripts. You can tune the NemoMegatron and PyTorch parameters in such way as well. - -## 7. Customizing Pre-Training +## 6. Customizing Pre-Training To pre-train for a different model size on different instance count, open `$TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh` and edit section `000` to choose the right hyperparameters. Be aware that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). @@ -217,33 +229,40 @@ training.trainer.num_nodes=$NUM_NODES | └── key 'trainer -> num_nodes' in the `.yaml` file. ``` -## 8. Pre-Training llama2 + +## 7. Pre-Training llama2 This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. Actions will be almost the same as for 5/ Pre-training GPT3, let do it. 1. Download llama2 tokenizer + ``` mkdir -p $TARGET_PATH/data/llama2 curl -L https://github.com/microsoft/Llama-2-Onnx/raw/main/tokenizer.model > $TARGET_PATH/data/llama2/tokenizer.model ``` + 2. Source the NemoMegatron environment created earlier. + ```bash source ${TARGET_PATH}/.venv/bin/activate ``` + 3. To pre-train a llama2-7b on two instances with mock dataset, run the commands below to let : + ```bash cd $TARGET_PATH $TEST_CASE_PATH/5.bmk-pretrain-llama-7b.sh ``` + 4. Next stests are absolutely the same as for 5/ Pre-training GPT3, the only difference is that result directory is `$TARGET_PATH/results/llama2_7b` -## 9. References +## 8. References -- Nvidia NemoMegatron Documentation: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/megatron.html -- Train Large Scale NLP with Nemo Megatron from Nvidia: https://docs.nvidia.com/launchpad/ai/base-command-nemo/latest/index.html +- Nvidia NemoMegatron Documentation: +- Train Large Scale NLP with Nemo Megatron from Nvidia: -## Authors / Reviewers +## 9. Authors / Reviewers - [A] Verdi March - marcverd@ - [R] Pierre-Yves Aquilanti - pierreya@ diff --git a/README.md b/README.md index 6f4914d4..8047311c 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,8 @@ The major components of this directory are: ```bash reference-architectures/ -|-- 1.architectures # CloudFormation templates for reference arch -|-- 2.amazon_machine_images/ # Scripts to create AMIs +|-- 1.architectures/ # CloudFormation templates for reference arch +|-- 2.ami_and_containers/ # Scripts to create AMIs and container images |-- 3.test_cases/ # Reference test cases and/or benchmark scripts |-- 3.validation_scripts/ # Tools to measure performance or troubleshoot `-- ... @@ -45,7 +45,6 @@ All test cases are under `3.test_cases/`. You can go in each test case directory | `4.DDP` | ✅ | ❓ | ❓ | | `5.param-benchmark` | ✅ | ❓ | ❓ | - ## 4. Validation scripts Utilities scripts and micro-benchmarks examples are set under `4.validation_scripts/`. @@ -53,11 +52,13 @@ Utilities scripts and micro-benchmarks examples are set under `4.validation_scri ## 5. CI Integration tests are written in [pytest](https://docs.pytest.org). Just run: + ``` pytest . ``` Alternatively you can run tests with out capturing stdout and keeping all docker images an other artifacts. + ``` pytest -s --keep-artifacts=t ``` @@ -76,4 +77,4 @@ Thanks to all the contributors for building, reviewing and testing. - Sean Smith - seaam@ - Jianying Lang - langjian@ - Maxime Hugues - maxhaws@ -- Dmitry Monakhov dmonakhov@ \ No newline at end of file +- Dmitry Monakhov dmonakhov@ From 270d579cd72f8fa7650566f9f117daec193406c0 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 21 Nov 2023 15:52:23 +0800 Subject: [PATCH 211/648] Add README to the template Dockerfile --- 2.ami_and_containers/2.docker/Dockerfile | 2 +- 2.ami_and_containers/2.docker/README.md | 70 ++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 2.ami_and_containers/2.docker/README.md diff --git a/2.ami_and_containers/2.docker/Dockerfile b/2.ami_and_containers/2.docker/Dockerfile index c385cf71..d39bc77a 100644 --- a/2.ami_and_containers/2.docker/Dockerfile +++ b/2.ami_and_containers/2.docker/Dockerfile @@ -7,7 +7,7 @@ # # Sample build instructions: # -# docker build --progress=plain -t nvidia-pt-od:2310 -f Dockerfile-nvidia.dockerfile . +# docker build --progress=plain -t nvidia-pt-od:2310 . # rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__2310.sqsh dockerd://nvidia-pt-od:2310 # # Compute nodes (aka build nodes) are transient, so we need to keep the docker image on shared fs, diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/2.docker/README.md new file mode 100644 index 00000000..7e741370 --- /dev/null +++ b/2.ami_and_containers/2.docker/README.md @@ -0,0 +1,70 @@ +# Template of PyTorch image optimized for GPU EC2 instances + +This directory assumes some level of familiarities with Docker, AWS CLI, AWS EFA, libfabric, AWS OFI +plugin to NCCL, enroot, and NVidia GPU stack (particularly NCCL). + +The sample `Dockerfile` are intended as a reference. It provides optional stanzas (commented or +active).Instead of building this Dockerfile directly, we strongly recommend you to read through +this`Dockerfile`, understand what it does, then create your own `Dockerfile` cherry-picking just the +necessary stanzas for your use cases. + +Before running a command, _make it a habit to always review_ scripts, configurations, or whatever +files involved. Very frequently, this repo requires you to edit files, or provides explanations, +tips and tricks in the form of comments within various files. + +With that said, feel free to explore the example. Happy coding, and experimenting! + +## 1. Essential software + +In principle, the reference `Dockerfile` does the following: + +- Provide PyTorch built for NVidia CUDA devices, by using a recent NVidia PyTorch image as the + parent image. +- Remove unneccessary networking packages that might conflict with AWS technologies. +- Install EFA user-space libraries. It's important to avoid building the kernel drivers during + `docker build`, and skip the self-tests, as both of these should've been done on the host. +- **OPTIONAL** -- On rare cases when your MPI application crashes when run under Slurm as `srun + --mpi=pmix ...`, you might need to rebuild the OpenMPI to match with the PMIX version in the host. +- **OPTIONAL** -- Install NCCL in case the parent image hasn't caught up with the NCCL version you + want to use. +- Typical environment variables for OpenMPI, EFA, and NCCL. Best practice to enforce these in the + image, otherwise it can be error prone to manually set these variables when starting containers. +- Install [nccl-test](https://github.com/NVIDIA/nccl-tests) by default, as a useful diagnostic tool + when using this container. +- **OPTIONAL** -- Additional packages that worth to mention due to special requirements during + build, e.g., xformers are known to be notoriously resource hungry during compilation. +- And more reference stanzas may be added in future. + +## 2. Frequently-used commands + +Build an image: + +```bash +# Build a Docker image +docker build --progress=plain -t nvidia-pt-od:latest . + +# If Dockerfile is named differently, e.g., Dockerfile-nvidia.dockerfile +docker build --progress=plain -t nvidia-pt-od:latest -f Dockerfile-nvidia.dockerfile . + +# Verify the image has been built +docker images + +# Convert to enroot format. Attempt to remove an existing .sqsh, otherwise enroot refuses to +# run when the output .sqsh file already exists. +rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__latest.sqsh dockerd://nvidia-pt-od:latest +``` + +Tips: when building on a compute node (or a build node), you save the built Docker image on a shared +shared filesystem such as `/fsx`, to allow other nodes (e.g., head node, or other compute nodes) to +load the image to their local Docker registry. + +```bash +# Build node: save image to file +docker save nvidia-pt-od:latest > /fsx/nvidia-pt-od__latest.tar + +# Load image to local docker registry -> on head node, or new compute/build node +docker load < /fsx/nvidia-pt-od__2310.tar + +# Verify the image has been loaded +docker images +``` From 10f65b1b39b9821922d3f8f191bb4bab05f01213 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 21 Nov 2023 15:52:44 +0800 Subject: [PATCH 212/648] Template dockerfile: bugfix pmix env vars --- 2.ami_and_containers/2.docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/2.ami_and_containers/2.docker/Dockerfile b/2.ami_and_containers/2.docker/Dockerfile index d39bc77a..e23ef2d7 100644 --- a/2.ami_and_containers/2.docker/Dockerfile +++ b/2.ami_and_containers/2.docker/Dockerfile @@ -139,8 +139,8 @@ RUN apt-get update && apt-get install -y libevent-dev \ && rm -fr /tmp/pmix-${CUSTOM_PMIX_VERSION}/ # To silence this runtime error message: # [p4de-st-p4de-2:110912] PMIX ERROR: ERROR in file gds_ds12_lock_pthread.c at line 168 -export PMIX_GDS_MODULE=^ds12 -export PMIX_MCA_gds=^ds12 +ENV PMIX_GDS_MODULE=^ds12 \ + PMIX_MCA_gds=^ds12 # Rebuild openmpi with DLC style (which it remarks as "without libfabric"), with the above pmix. ENV OMPI_VERSION=4.1.6 From c7a6c82db4a4628a281beeb20aab516dd6cc98ac Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 21 Nov 2023 16:14:19 +0800 Subject: [PATCH 213/648] Practice what we preached: remove unnecessary EFA env vars See: https://github.com/aws/aws-ofi-nccl/blob/master/doc/efa-env-var.md --- 2.ami_and_containers/2.docker/Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/2.ami_and_containers/2.docker/Dockerfile b/2.ami_and_containers/2.docker/Dockerfile index e23ef2d7..74df0b89 100644 --- a/2.ami_and_containers/2.docker/Dockerfile +++ b/2.ami_and_containers/2.docker/Dockerfile @@ -193,8 +193,7 @@ ENV OMPI_MCA_pml=^cm,ucx \ # https://github.com/pytorch/pytorch/issues/68893 NCCL_SOCKET_IFNAME=^docker,lo -ENV FI_PROVIDER=efa \ - LD_LIBRARY_PATH="/usr/local/lib:/usr/local/cuda/lib64:${LD_LIBRARY_PATH}" +ENV LD_LIBRARY_PATH="/usr/local/lib:/usr/local/cuda/lib64:${LD_LIBRARY_PATH}" # NCCL-tests: always good to include this as a diagnostic tool. RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ From be2f0dc40c9e61de305583913bce767d86247ef4 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 21 Nov 2023 16:32:28 +0800 Subject: [PATCH 214/648] Template dockerfile: README to explain why minimalistic EFA env vars --- 2.ami_and_containers/2.docker/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/2.docker/README.md index 7e741370..d7410e79 100644 --- a/2.ami_and_containers/2.docker/README.md +++ b/2.ami_and_containers/2.docker/README.md @@ -29,6 +29,12 @@ In principle, the reference `Dockerfile` does the following: want to use. - Typical environment variables for OpenMPI, EFA, and NCCL. Best practice to enforce these in the image, otherwise it can be error prone to manually set these variables when starting containers. + + **NOTE**: recent version of aws-ofi-nccl simplifies a lot of environment variables (see the [the + official EFA cheatsheet in + aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl/blob/master/doc/efa-env-var.md)). Hence, the + provided template `Dockerfile` has (almost) no environment variables for EFA anymore (and this is + one major simplification for those who've been exposed to older EFA examples elsewhere). - Install [nccl-test](https://github.com/NVIDIA/nccl-tests) by default, as a useful diagnostic tool when using this container. - **OPTIONAL** -- Additional packages that worth to mention due to special requirements during From 70f28660e064dec001242b66729a81dc05bba423 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Tue, 21 Nov 2023 23:45:32 +0800 Subject: [PATCH 215/648] Template dockefile: update readme --- 2.ami_and_containers/2.docker/README.md | 35 ++++++++++++------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/2.docker/README.md index d7410e79..ee4c4860 100644 --- a/2.ami_and_containers/2.docker/README.md +++ b/2.ami_and_containers/2.docker/README.md @@ -1,16 +1,13 @@ # Template of PyTorch image optimized for GPU EC2 instances -This directory assumes some level of familiarities with Docker, AWS CLI, AWS EFA, libfabric, AWS OFI -plugin to NCCL, enroot, and NVidia GPU stack (particularly NCCL). - -The sample `Dockerfile` are intended as a reference. It provides optional stanzas (commented or -active).Instead of building this Dockerfile directly, we strongly recommend you to read through -this`Dockerfile`, understand what it does, then create your own `Dockerfile` cherry-picking just the -necessary stanzas for your use cases. +The directory provides a sample `Dockerfile` intended as a reference. It provides optional stanzas +(commented or active). Instead of building this Dockerfile directly, we strongly recommend you to +read through this `Dockerfile`, understand what it does, then create your own `Dockerfile` +cherry-picking just the necessary stanzas for your use cases. Before running a command, _make it a habit to always review_ scripts, configurations, or whatever -files involved. Very frequently, this repo requires you to edit files, or provides explanations, -tips and tricks in the form of comments within various files. +files involved. Very frequently, this directory requires you to edit files, or provides +explanations, tips and tricks in the form of comments within various files. With that said, feel free to explore the example. Happy coding, and experimenting! @@ -22,7 +19,8 @@ In principle, the reference `Dockerfile` does the following: parent image. - Remove unneccessary networking packages that might conflict with AWS technologies. - Install EFA user-space libraries. It's important to avoid building the kernel drivers during - `docker build`, and skip the self-tests, as both of these should've been done on the host. + `docker build`, and skip the self-tests, as both of these steps fail are expected to fail when run + during container build. - **OPTIONAL** -- On rare cases when your MPI application crashes when run under Slurm as `srun --mpi=pmix ...`, you might need to rebuild the OpenMPI to match with the PMIX version in the host. - **OPTIONAL** -- Install NCCL in case the parent image hasn't caught up with the NCCL version you @@ -35,15 +33,16 @@ In principle, the reference `Dockerfile` does the following: aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl/blob/master/doc/efa-env-var.md)). Hence, the provided template `Dockerfile` has (almost) no environment variables for EFA anymore (and this is one major simplification for those who've been exposed to older EFA examples elsewhere). -- Install [nccl-test](https://github.com/NVIDIA/nccl-tests) by default, as a useful diagnostic tool - when using this container. -- **OPTIONAL** -- Additional packages that worth to mention due to special requirements during - build, e.g., xformers are known to be notoriously resource hungry during compilation. +- Install [aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl) to get NCCL to utilize EFA. +- Install [nccl-test](https://github.com/NVIDIA/nccl-tests) as a built-in diagnostic tool. +- **OPTIONAL** -- Additional packages that worth to mention due to special build requirements, e.g., + installing [xformers](https://github.com/facebookresearch/xformers#install-troubleshooting) from + source may encounter OOM unless special care is taken. - And more reference stanzas may be added in future. ## 2. Frequently-used commands -Build an image: +Once you've created your own Dockerfile, it's time to build an image out of it: ```bash # Build a Docker image @@ -61,15 +60,15 @@ rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__latest.sqs ``` Tips: when building on a compute node (or a build node), you save the built Docker image on a shared -shared filesystem such as `/fsx`, to allow other nodes (e.g., head node, or other compute nodes) to -load the image to their local Docker registry. +filesystem such as `/fsx`, to allow other nodes (e.g., head node, or other compute nodes) to load +the image to their local Docker registry. ```bash # Build node: save image to file docker save nvidia-pt-od:latest > /fsx/nvidia-pt-od__latest.tar # Load image to local docker registry -> on head node, or new compute/build node -docker load < /fsx/nvidia-pt-od__2310.tar +docker load < /fsx/nvidia-pt-od__latest.tar # Verify the image has been loaded docker images From 7edb2e0eceebdd12af0e2f6e600bcdb557261577 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Aquilanti Date: Tue, 21 Nov 2023 10:49:21 -0600 Subject: [PATCH 216/648] Fix readme typo and formatting --- README.md | 51 +++++++++++++++++++++------------------------------ 1 file changed, 21 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 3b63c799..6441009e 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ reference-architectures/ |-- 1.architectures # CloudFormation templates for reference arch |-- 2.amazon_machine_images/ # Scripts to create AMIs |-- 3.test_cases/ # Reference test cases and/or benchmark scripts -|-- 3.validation_scripts/ # Tools to measure performance or troubleshoot +|-- 4.validation_scripts/ # Tools to measure performance or troubleshoot `-- ... ``` @@ -17,17 +17,17 @@ reference-architectures/ ## 1. Architectures -Architectures are located in `1.architectures` and consists of utilities and service related architectures +Architectures are located in `1.architectures` and consists of utilities and service related architectures. -| Name | Category | Usage | -| ----------------------- | -------- | --------------------------------------------------- | -| `0.s3` | Storage | Create an S3 bucket | -| `1.vpc_network` | Network | Create a VPC with subnets required resources | -| `2.aws-parallelcluster` | Compute | Cluster templates for GPU & custom silicon training | -| `3.aws-batch` | Compute | AWS Batch template for distributed training | -| `4.amazon-eks` | Compute | Manifest files to train with Amazon EKS | +| Name | Category | Usage | +| ------------------------------------------------------------------ | -------- | --------------------------------------------------- | +| [`0.s3`](./1.architectures/0.s3) | Storage | Create an S3 bucket | +| [`1.vpc_network`](./1.architectures/1.vpc_network) | Network | Create a VPC with subnets required resources | +| [`2.aws-parallelcluster`](./1.architectures/2.aws-parallelcluster) | Compute | Cluster templates for GPU & custom silicon training | +| [`3.aws-batch`](./1.architectures/3.aws-batch) | Compute | AWS Batch template for distributed training | +| [`4.amazon-eks`](./1.architectures/4.amazon-eks) | Compute | Manifest files to train with Amazon EKS | -More will come, feel free to add new ones (EKS, Ray?) +More will come, feel free to add new ones (EKS, Ray?). You will also find [documentation](./1.architectures/efa-cheatsheet.md) for EFA and the recommended environment variables. ## 2. Custom Amazon Machine Images @@ -37,15 +37,16 @@ Custom machine images can be built using [Packer](www.packer.io) for AWS Paralle All test cases are under `3.test_cases/`. You can go in each test case directory to learn how to run it. -| Test cases | Slurm | Kubernetes | AWS Batch | -| -------------------------- | ----- | ---------- | ---------- | -| `1.megatron-lm` | ✅ | ❓ | ❓ | -| `2.nemo-launcher` | ✅ | ❌ | ❌ | -| `3.MPT` | ✅ | ❓ | ❓ | -| `4.DDP` | ✅ | ❓ | ❓ | -| `5.param-benchmark` | ✅ | ❓ | ❓ | -| `6.stable-diffusion` | ✅ | ❓ | ❓ | -| `7.tensorflow-distributed` | ✅ | ❓ | ❓ | +| Test cases | Slurm | Kubernetes | AWS Batch | +| --------------------------------------------------------------------- | ----- | ----------- | ---------- | +| [`1.megatron-lm`](./3.test_cases/1.megatron-lm) | ✅ | ❓ | ❓ | +| [`2.nemo-launcher`](./3.test_cases/2.nemo-launcher) | ✅ | ❌ | ❌ | +| [`3.MPT`](./3.test_cases/3.MPT) | ✅ | ❓ | ❓ | +| [`4.DDP`](./3.test_cases/4.DDP) | ✅ | ❓ | ❓ | +| [`5.param-benchmark`](./3.test_cases/5.param-benchmark) | ✅ | ❓ | ❓ | +| [`6.stable-diffusion`](./3.test_cases/6.stable-diffusion) | ✅ | ❓ | ❓ | +| [`7.tensorflow-distributed`](./3.test_cases/7.tensorflow-distributed) | ✅ | ❓ | ❓ | +| [`8.neuronx-nemo-megatron`](./3.test_cases/8.neuronx-nemo-megatron) | ✅ | ❓ | ❓ | ## 4. Validation scripts @@ -67,14 +68,4 @@ pytest -s --keep-artifacts=t Thanks to all the contributors for building, reviewing and testing. -- Pierre-Yves Aquilanti - pierreya@ -- Verdi March - marcverd@ -- Uros Lipovsek - lipovsek@ -- Keita Watanabe - mlkeita@ -- Ankur Srivastava - awsankur@ -- Alex Iankoulski - iankouls@ -- Tom McDonald - tjm@ -- Sean Smith - seaam@ -- Jianying Lang - langjian@ -- Maxime Hugues - maxhaws@ -- Dmitry Monakhov dmonakhov@ +[![Contributors](https://contrib.rocks/image?repo=aws-samples/awsome-distributed-training)](https://github.com/aws-samples/awsome-distributed-training/graphs/contributors) From 0fb85ec668dae089fe81ed14439fc9d7ca568638 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 22 Nov 2023 16:02:08 +0800 Subject: [PATCH 217/648] Rename template Dockerfile => 0.nvcr-pytorch-aws.dockerfile --- .../2.docker/{Dockerfile => 0.nvcr-pytorch-aws.dockerfile} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename 2.ami_and_containers/2.docker/{Dockerfile => 0.nvcr-pytorch-aws.dockerfile} (100%) diff --git a/2.ami_and_containers/2.docker/Dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile similarity index 100% rename from 2.ami_and_containers/2.docker/Dockerfile rename to 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile From b81fb5d022ab0a31973de758772d3ab6d5b11fd1 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 22 Nov 2023 16:04:30 +0800 Subject: [PATCH 218/648] Template dockerfile: sync docs with renamed dockerfile --- 2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile | 2 +- 2.ami_and_containers/2.docker/README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile index 74df0b89..70b78c14 100644 --- a/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile +++ b/2.ami_and_containers/2.docker/0.nvcr-pytorch-aws.dockerfile @@ -7,7 +7,7 @@ # # Sample build instructions: # -# docker build --progress=plain -t nvidia-pt-od:2310 . +# docker build --progress=plain -t nvidia-pt-od:2310 -f 0.nvcr-pytorch-aws.dockerfile . # rm /fsx/nvidia-pt-od__2310.sqsh ; enroot import -o /fsx/nvidia-pt-od__2310.sqsh dockerd://nvidia-pt-od:2310 # # Compute nodes (aka build nodes) are transient, so we need to keep the docker image on shared fs, diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/2.docker/README.md index ee4c4860..e05c8523 100644 --- a/2.ami_and_containers/2.docker/README.md +++ b/2.ami_and_containers/2.docker/README.md @@ -48,8 +48,8 @@ Once you've created your own Dockerfile, it's time to build an image out of it: # Build a Docker image docker build --progress=plain -t nvidia-pt-od:latest . -# If Dockerfile is named differently, e.g., Dockerfile-nvidia.dockerfile -docker build --progress=plain -t nvidia-pt-od:latest -f Dockerfile-nvidia.dockerfile . +# If Dockerfile is named differently, e.g., 0.nvcr-pytorch-aws.dockerfile +docker build --progress=plain -t nvidia-pt-od:latest -f 0.nvcr-pytorch-aws.dockerfile . # Verify the image has been built docker images From de32dbdd81c4555c5b79f8deb08dc09e29e8f3b4 Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 22 Nov 2023 16:08:15 +0800 Subject: [PATCH 219/648] Template dockerfile: explain about gdcropy --- 2.ami_and_containers/2.docker/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/2.ami_and_containers/2.docker/README.md b/2.ami_and_containers/2.docker/README.md index e05c8523..de328c9b 100644 --- a/2.ami_and_containers/2.docker/README.md +++ b/2.ami_and_containers/2.docker/README.md @@ -33,6 +33,8 @@ In principle, the reference `Dockerfile` does the following: aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl/blob/master/doc/efa-env-var.md)). Hence, the provided template `Dockerfile` has (almost) no environment variables for EFA anymore (and this is one major simplification for those who've been exposed to older EFA examples elsewhere). +- User-space of gdrcopy -- **NOTE**: no-op (like this example) when already built-in in the parent + image. - Install [aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl) to get NCCL to utilize EFA. - Install [nccl-test](https://github.com/NVIDIA/nccl-tests) as a built-in diagnostic tool. - **OPTIONAL** -- Additional packages that worth to mention due to special build requirements, e.g., From b6016ce673f2cf628de7ff69878532754ba74cdb Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Wed, 22 Nov 2023 15:04:19 -0800 Subject: [PATCH 220/648] Add mpijob manifest for EKS --- .../0.nccl-tests/test-nccl-efa-p5.yaml | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml diff --git a/4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml b/4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml new file mode 100644 index 00000000..39ed754e --- /dev/null +++ b/4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml @@ -0,0 +1,101 @@ +apiVersion: kubeflow.org/v2beta1 +kind: MPIJob +metadata: + name: test-nccl-efa +spec: + runPolicy: + cleanPodPolicy: Running + backoffLimit: 20 + slotsPerWorker: 8 + mpiReplicaSpecs: + Launcher: + replicas: 1 + template: + spec: + imagePullPolicy: IfNotPresent + restartPolicy: OnFailure + containers: + - image: .dkr.ecr.us-west-2.amazonaws.com/cuda-efa-nccl-tests:ubuntu22.04 + name: test-nccl-efa-launcher + env: + - name: LD_LIBRARY_PATH + value: /opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/nvidia/lib:$LD_LIBRARY_PATH + - name: PATH + value: $PATH:/opt/amazon/efa/bin:/usr/bin + - name: XLA_FLAGS + value: "--xla_gpu_cuda_data_dir=/usr/local/cuda" + - name: TF_XLA_FLAGS + value: "--tf_xla_cpu_global_jit" + - name: NCCL_DEBUG + value: INFO + command: + - /opt/amazon/openmpi/bin/mpirun + - --allow-run-as-root + - --tag-output + - -np + - "16" + - -bind-to + - none + - -map-by + - slot + - -x + - PATH + - -x + - LD_LIBRARY_PATH + - -x + - XLA_FLAGS + - -x + - TF_XLA_FLAGS + - -x + - NCCL_DEBUG=INFO + - -x + - NCCL_NVLS_ENABLE=1 + - --mca + - pml + - ^cm + - --mca + - pml_rsh_agent=ssh + - --oversubscribe + - /opt/nccl-tests/build/all_reduce_perf + - -b + - "1" + - -e + - 2G + - -f + - "2" + - -t + - "1" + - -g + - "1" + - -c + - "1" + - -n + - "100" + Worker: + replicas: 2 + template: + spec: + nodeSelector: + node.kubernetes.io/instance-type: "p5.48xlarge" + imagePullPolicy: IfNotPresent + containers: + - image: .dkr.ecr.us-west-2.amazonaws.com/cuda-efa-nccl-tests:ubuntu22.04 + name: test-nccl-efa-worker + volumeMounts: + - name: shmem + mountPath: /dev/shm + resources: + limits: + nvidia.com/gpu: 8 + hugepages-2Mi: 5120Mi + vpc.amazonaws.com/efa: 32 + memory: 8000Mi + requests: + nvidia.com/gpu: 8 + hugepages-2Mi: 5120Mi + vpc.amazonaws.com/efa: 32 + memory: 8000Mi + volumes: + - name: shmem + hostPath: + path: /dev/shm From 17f0f70557519151c2e5f56a1cbd893d1b21712e Mon Sep 17 00:00:00 2001 From: Verdi March Date: Wed, 22 Nov 2023 21:14:27 +0800 Subject: [PATCH 221/648] Bump nemo launcher to ga image --- .../0.NemoMegatron-aws-optimized.Dockerfile | 98 +++++++++++-------- 3.test_cases/2.nemo-launcher/README.md | 51 +++++++--- .../2.nemo-launcher/conf.template/config.yaml | 6 +- README.md | 2 + 4 files changed, 97 insertions(+), 60 deletions(-) diff --git a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile index f879be08..cb6daafd 100644 --- a/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile +++ b/3.test_cases/2.nemo-launcher/0.NemoMegatron-aws-optimized.Dockerfile @@ -2,33 +2,53 @@ # SPDX-License-Identifier: Apache-2.0 # DOCKER_BUILDKIT=1 docker build --progress plain -t aws-nemo-megatron:latest . -# Customized from: https://github.com/NVIDIA/NeMo-Megatron-Launcher/blob//csp_tools/aws/Dockerfile FROM nvcr.io/ea-bignlp/ga-participants/nemofw-training:23.08.03 -ARG DEBIAN_FRONTEND=noninteractive -ENV EFA_INSTALLER_VERSION=1.28.0 -ENV NCCL_VERSION=2.18.5-1+cuda12.2 +ENV DEBIAN_FRONTEND=noninteractive +ENV EFA_INSTALLER_VERSION=latest ENV AWS_OFI_NCCL_VERSION=1.7.3-aws +ENV NCCL_TESTS_VERSION=master +RUN apt-get update -y +RUN apt-get remove -y --allow-change-held-packages \ + libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 -RUN apt-get update -y \ - && apt-get remove -y --allow-change-held-packages \ - libmlx5-1 ibverbs-utils libibverbs-dev libibverbs1 \ - && rm -rf /opt/hpcx/ompi \ +RUN rm -rf /opt/hpcx/ompi \ && rm -rf /usr/local/mpi \ - && rm -rf /usr/local/ucx \ + && rm -fr /opt/hpcx/nccl_rdma_sharp_plugin \ && ldconfig +ENV OPAL_PREFIX= +RUN apt-get install -y --allow-unauthenticated \ + git \ + gcc \ + vim \ + kmod \ + openssh-client \ + openssh-server \ + build-essential \ + curl \ + autoconf \ + libtool \ + gdb \ + automake \ + cmake \ + apt-utils \ + libhwloc-dev \ + aptitude && \ + apt autoremove -y -RUN echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64 /" >> //etc/apt/sources.list.d/cuda.list \ - && curl https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub > /tmp/3bf863cc.pub \ - && echo "34bb9f7e66744d7b2944d0565db6687560d5d6e3 /tmp/3bf863cc.pub" | sha1sum --check \ - && apt-key add /tmp/3bf863cc.pub \ - && unlink /tmp/3bf863cc.pub \ - && apt-get update -y \ - && apt-get install -y libnccl2=${NCCL_VERSION} libnccl-dev=${NCCL_VERSION} \ - && apt-get clean - +# Uncomment below stanza to install the latest NCCL +# Require efa-installer>=1.29.0 for nccl-2.19.0 to avoid libfabric gave NCCL error. +#ENV NCCL_VERSION=2.19.3-1 +#RUN apt-get remove -y libnccl2 libnccl-dev \ +# && cd /tmp \ +# && git clone https://github.com/NVIDIA/nccl.git -b v${NCCL_VERSION} \ +# && cd nccl \ +# && make -j src.build BUILDDIR=/usr/local \ +# # nvcc to target p5 and p4 instances +# NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" \ +# && rm -rf /tmp/nccl # EFA RUN apt-get update && \ @@ -39,18 +59,9 @@ RUN apt-get update && \ cd aws-efa-installer && \ ./efa_installer.sh -y -g -d --skip-kmod --skip-limit-conf --no-verify && \ ldconfig && \ - rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* && \ - apt-get clean && \ - /opt/amazon/efa/bin/fi_info --version - -ENV LD_LIBRARY_PATH=/opt/amazon/openmpi/lib:/opt/amazon/efa/lib:$LD_LIBRARY_PATH -ENV PATH=/opt/amazon/openmpi/bin/:/opt/amazon/efa/bin:$PATH - -# NCCL EFA Plugin (Dockefile original) -# NOTE: Stick to this version! Otherwise, will get 'ncclInternalError: Internal check failed.' -RUN apt-get update -y \ - && apt-get install -y libhwloc-dev + rm -rf /tmp/aws-efa-installer /var/lib/apt/lists/* +# NCCL EFA Plugin RUN mkdir -p /tmp && \ cd /tmp && \ curl -LO https://github.com/aws/aws-ofi-nccl/archive/refs/tags/v${AWS_OFI_NCCL_VERSION}.tar.gz && \ @@ -62,20 +73,29 @@ RUN mkdir -p /tmp && \ ./configure --prefix=/opt/amazon/efa \ --with-libfabric=/opt/amazon/efa \ --with-cuda=/usr/local/cuda \ - --with-mpi=/usr/local/mpi && \ + --enable-platform-aws \ + --with-mpi=/opt/amazon/openmpi && \ make -j$(nproc) install && \ rm -rf /tmp/aws-ofi/nccl # NCCL -RUN echo "/opt/amazon/efa/lib" >> /etc/ld.so.conf.d/efa.conf && \ +RUN echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ + echo "/opt/amazon/openmpi/lib" >> /etc/ld.so.conf.d/efa.conf && \ ldconfig -ENV OMPI_MCA_pml=^ucx \ - OMPI_MCA_btl=^openib,uct - -ENV RDMAV_FORK_SAFE=1 \ - FI_PROVIDER=efa \ - FI_EFA_USE_DEVICE_RDMA=1 \ - NCCL_PROTO=simple +ENV OMPI_MCA_pml=^cm,ucx \ + OMPI_MCA_btl=tcp,self \ + OMPI_MCA_btl_tcp_if_exclude=lo,docker0 \ + OPAL_PREFIX=/opt/amazon/openmpi \ + NCCL_SOCKET_IFNAME=^docker,lo \ + FI_EFA_USE_HUGE_PAGE=0 -ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/local/lib" +# NCCL-tests +RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ + && cd /opt/nccl-tests \ + && git checkout ${NCCL_TESTS_VERSION} \ + && make MPI=1 \ + MPI_HOME=/opt/amazon/openmpi \ + CUDA_HOME=/usr/local/cuda \ + # nvcc to target p5 and p4 instances + NVCC_GENCODE="-gencode=arch=compute_90,code=sm_90 -gencode=arch=compute_80,code=sm_80" diff --git a/3.test_cases/2.nemo-launcher/README.md b/3.test_cases/2.nemo-launcher/README.md index c8ac2997..4bacd19e 100644 --- a/3.test_cases/2.nemo-launcher/README.md +++ b/3.test_cases/2.nemo-launcher/README.md @@ -10,13 +10,16 @@ Table of contents: - [4. Prepare Input Data](#4-prepare-input-data) - [5. Pre-training GPT3](#5-pre-training-gpt3) - [6. Customizing Pre-Training](#6-customizing-pre-training) +- [7. Pre-Training llama2](#7-pre-training-llama2) +- [8. References](#8-references) +- [9. Authors / Reviewers](#9-authors--reviewers) ## 1. Pre-requisites The following pre-requisites are needed to run this example: - You are using p4de.24xlarge instances with A100 80GB or newer, with at least 80GB of memory per GPU. -- You have access to the base image [`nemofw-training`](https://registry.ngc.nvidia.com/orgs/ea-bignlp/containers/bignlp-training) is available through NVIDIA's open-beta [here](https://developer.nvidia.com/nemo-framework-open-beta). +- You have access to the base image [NeMo Framework Training](https://registry.ngc.nvidia.com/orgs/ea-bignlp/teams/ga-participants/containers/nemofw-training). To gain access to this image, go to [Get Access to NeMo Framework](https://developer.nvidia.com/nemo-framework) to enroll to organization/team `ea-bignlp/ga-participant`. - Docker, [Enroot](https://github.com/NVIDIA/enroot) and [Pixys](https://github.com/NVIDIA/pyxis) installed on the cluster and available on all nodes. It is assumed you are using a Custom AMI ([example](../../2.amazon_machine_images)) You will need to setup the following environment variables before running the scripts. : @@ -27,7 +30,7 @@ export REPO=aws-nemo-megatron export TAG=$NEMO_VERSION export TARGET_PATH=/fsx/nemo-launcher-$NEMO_VERSION # must be a shared filesystem export TEST_CASE_PATH=/home/ec2-user/2.nemo-launcher # where you copy the test case or set to your test case path -export ENROOT_IMAGE=/apps/${REPO}_${TAG}.sqsh +export ENROOT_IMAGE=/fsx/${REPO}_${TAG}.sqsh cd $TEST_CASE_PATH ``` @@ -48,13 +51,13 @@ docker login nvcr.io docker build --progress plain -t ${REPO}:${TAG} -f 0.NemoMegatron-aws-optimized.Dockerfile . ``` -4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/apps`. This step takes a few minutes. +4. Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in `/fsx`. This step takes a few minutes. ```bash [[ -e $ENROOT_IMAGE ]] && rm $ENROOT_IMAGE ; /usr/bin/time enroot import -o $ENROOT_IMAGE dockerd://${REPO}:${TAG} ``` -The Enroot squash file will be placed into the `/apps` directory. +The Enroot squash file will be placed into the `/fsx` directory, backed by FSx Lustre to provide high read throughput by multiple compute nodes upon job starts. ## 3. Set-up the NemoMegatron environment @@ -73,7 +76,7 @@ cd $TARGET_PATH enroot start --mount $TARGET_PATH:/workspace/mount_dir \ --env NVIDIA_VISIBLE_DEVICES=void \ $ENROOT_IMAGE \ - cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /workspace/mount_dir/ + cp -a /opt/NeMo-Megatron-Launcher/launcher_scripts /opt/NeMo-Megatron-Launcher/auto_configurator /opt/nemo-data-curator /opt/nemo-rlhf /workspace/mount_dir/ ``` The `NVIDIA_VISIBLE_DEVICES` variable is set to void to prevent the process to check for the Nvidia driver presence (since we don't need GPUs here). @@ -91,7 +94,7 @@ pip3.8 install -r <(curl -fsSL https://raw.githubusercontent.com/NVIDIA/NeMo-Meg Next, you need to prepare the configuration files as follow: -1. Review and update the partition name in the .yaml config file `conf.template/cluster/bcm.yaml`. Here is a summary of the values. +1. Review and update the partition name in the .yaml config file `$TEST_CASE_PATH/conf.template/cluster/bcm.yaml`. Here is a summary of the values. | Value | Default | Definition | | ------------------ | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -130,15 +133,20 @@ That's all needed to pre-train with a mock dataset generated on-the-fly. This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. Here you start a pre-training on a small model of 126M parameters, this serves as a quick sanity check. 1. Source the NemoMegatron environment created earlier. + ```bash source ${TARGET_PATH}/.venv/bin/activate ``` + 2. To pre-train a GPT3-126m on two instances with mock dataset, run the commands below to let : + ```bash cd $TARGET_PATH $TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh ``` + 3. Check the file `$TARGET_PATH/launcher_scripts/main.py`. The `launcher_scripts/main.py` interacts with Slurm on our behalf to generate an `.sbatch` file and submits it to Slurm. Nemo-launcher logs all the invocation commands, output, and error to `$TARGET_PATH/results//` described below. + ```bash $TARGET_PATH/results/gpt3_126m ├── gpt3_126m_hydra.yaml # The fully interpolated pre-training configuration @@ -155,6 +163,7 @@ This section assumes that you went through the previous sections and 1/ retrieve ├── nemo_error_log.txt # Stderr of pre-training step └── nemo_log_globalrank-*.txt # Log of each rank ``` + Please note that except for `log-nemo-megatron-gpt3_126m_.out`, the other files will be overridden when you launch another pre-training of that same model size. To completely separate the output among jobs, edit `TEST_CASE_PATH/bmk-pretrain-gpt3-126m.sh` and uncomment the `#export UNIQUE_OUTPUT_DIR=1` line to produce this output dir instead: 4. You can use Slurm command `squeue` to monitor the job status in the queue. The ample output below shows a `nemo-megatron` job with job id `1234` is in running state (`ST` = `R`). A queued job will have state `ST` = `PD` (pending). Please refer to the complete of job states in this [Slurm documentation](https://slurm.schedmd.com/squeue.html#SECTION_JOB-STATE-CODES). @@ -163,7 +172,9 @@ This section assumes that you went through the previous sections and 1/ retrieve JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 1234 my-cluste nemo-mega ec2-user R 00:19:40 1 p4de-dy-p4de-24xlarge-[1-2] ``` + 5. Once a job finishes, check the `log-nemo-megatron-__.err`, and see it should contains ``Trainer.fit` stopped: `max_steps=40` reached`` (disregard the warnings). + ```console $ tail -5 $TARGET_PATH/results/gpt3_126m/log-nemo-megatron-gpt3_126m_72.err @@ -172,7 +183,9 @@ This section assumes that you went through the previous sections and 1/ retrieve `Trainer.fit` stopped: `max_steps=40` reached. ``` + 6. Review the output file (`log-nemo-megatron-gpt3_126m_.out`) which contains the `stdout` output of the job. The end of the file should be similar to the snippet below + ```console [NeMo I 2023-09-11 22:31:28 lr_scheduler:910] Scheduler "" will be used during training (effective maximum steps = 40) - @@ -189,12 +202,11 @@ Congratulations! You've successfully run this test case to completion. > **Note**: Should you run into an OOM error, you can adjust the minimum batch size by setting the MBS in `bmk` launch scripts. You can tune the NemoMegatron and PyTorch parameters in such way as well. +## 6. Customizing Pre-Training -## 7. Customizing Pre-Training - -To pre-train for a different model size on different instance count, open `$TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh` and edit section `000` to choose the right hyperparameters. Be aware that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). +To pre-train for a different model size on a different instance count, open `$TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh` and edit section `000` to choose the right hyperparameters. Be aware that pre-training LLM requires understanding on the hyperparameters such as parallelism and batches. Please refer to the NeMO project ([website](https://developer.nvidia.com/nemo), [GitHub](https://github.com/NVIDIA/NeMo), [NeMo-Megatron-Launcher](https://github.com/NVIDIA/NeMo-Megatron-Launcher)) and the Megatron papers ([Shoeybi20](https://arxiv.org/abs/1909.08053), [Narayanan21](https://arxiv.org/abs/2104.04473)). -At the very least, you'd want to review and customize one or more YAML files under `$TARGET_PATH/launcher_scripts/conf/`. Nemo-launcher organizes its config files in an opinionated hierarchy. Below is an example of relevant YAML files when launching `$TARGET_PATH/launcher_scripts/main.py` for `training` stage for `gpt3/126m` (see `$TEST_CASE_PATH/1.bmk-pretrain-gpt3.sh`). +At the very least, you'd want to review and customize one or more YAML files under `$TARGET_PATH/launcher_scripts/conf/`. Nemo-launcher organizes its config files in an opinionated hierarchy. Below is an example of relevant YAML files when launching `$TARGET_PATH/launcher_scripts/main.py` for `training` stage for `gpt3/126m` (see `$TEST_CASE_PATH/1.bmk-pretrain-gpt3-126m.sh`). ```bash $TARGET_PATH/launcher_scripts/conf @@ -206,7 +218,7 @@ $TARGET_PATH/launcher_scripts/conf    └── 126m.yaml # Config for model size "126m" ``` -You can edit directly the `gpt3/.yaml` to customize the number of instances, tensor parallelism, pipeline parallelism, batch sizes (micro and global), experiment tracking, etc. on this file. Alternatively, you can override the settings through the CLI options of `$TARGET_PATH/launcher_scripts/main.py` (refer to `1.bmk-pretrain-gpt3.sh`). For example, this CLI arg `training.trainer.num_nodes=$NUM_NODES` is equivalent to editing file `$TARGET_PATH/launcher_scripts/training_scripts/conf/training//.yaml` to set key `trainer -> num_nodes` to `$NUM_NODES`. +You can edit directly the `gpt3/.yaml` to customize the number of instances, tensor parallelism, pipeline parallelism, batch sizes (micro and global), experiment tracking, etc. on this file. Alternatively, you can override the settings through the CLI options of `$TARGET_PATH/launcher_scripts/main.py` (refer to `1.bmk-pretrain-gpt3-126m.sh`). For example, this CLI arg `training.trainer.num_nodes=$NUM_NODES` is equivalent to editing file `$TARGET_PATH/launcher_scripts/training_scripts/conf/training//.yaml` to set key `trainer -> num_nodes` to `$NUM_NODES`. ```text +-- file `training//.yaml` under `$TARGET_PATH/launcher_scripts/conf` @@ -217,33 +229,40 @@ training.trainer.num_nodes=$NUM_NODES | └── key 'trainer -> num_nodes' in the `.yaml` file. ``` -## 8. Pre-Training llama2 + +## 7. Pre-Training llama2 This section assumes that you went through the previous sections and 1/ retrieved and built the AWS optimized NemoMegatron container, 2/ setup the NemoMegatron environment, and 3/ download the vocabularies. Actions will be almost the same as for 5/ Pre-training GPT3, let do it. 1. Download llama2 tokenizer + ``` mkdir -p $TARGET_PATH/data/llama2 curl -L https://github.com/microsoft/Llama-2-Onnx/raw/main/tokenizer.model > $TARGET_PATH/data/llama2/tokenizer.model ``` + 2. Source the NemoMegatron environment created earlier. + ```bash source ${TARGET_PATH}/.venv/bin/activate ``` + 3. To pre-train a llama2-7b on two instances with mock dataset, run the commands below to let : + ```bash cd $TARGET_PATH $TEST_CASE_PATH/5.bmk-pretrain-llama-7b.sh ``` + 4. Next stests are absolutely the same as for 5/ Pre-training GPT3, the only difference is that result directory is `$TARGET_PATH/results/llama2_7b` -## 9. References +## 8. References -- Nvidia NemoMegatron Documentation: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/megatron.html -- Train Large Scale NLP with Nemo Megatron from Nvidia: https://docs.nvidia.com/launchpad/ai/base-command-nemo/latest/index.html +- Nvidia NemoMegatron Documentation: +- Train Large Scale NLP with Nemo Megatron from Nvidia: -## Authors / Reviewers +## 9. Authors / Reviewers - [A] Verdi March - marcverd@ - [R] Pierre-Yves Aquilanti - pierreya@ diff --git a/3.test_cases/2.nemo-launcher/conf.template/config.yaml b/3.test_cases/2.nemo-launcher/conf.template/config.yaml index 33b5dbc8..d85985cb 100644 --- a/3.test_cases/2.nemo-launcher/conf.template/config.yaml +++ b/3.test_cases/2.nemo-launcher/conf.template/config.yaml @@ -37,13 +37,9 @@ wandb_api_key_file: null # File where the w&B api key is stored. Key must be on env_vars: NCCL_DEBUG: null # Logging level for NCCL. Set to "INFO" for debug information - NCCL_PROTO: simple # Protocol NCCL will use. Set to "simple" for AWS TRANSFORMER_OFFLINE: 1 - FI_EFA_USE_DEVICE_RDMA: 1 - FI_PROVIDER: efa NCCL_LAUNCH_MODE: parallel - FI_EFA_FORK_SAFE: 1 - FI_EFA_ENABLE_SHM_TRANSFER: 1 + NCCL_ASYNC_ERROR_HANDLING: 1 # GPU Mapping numa_mapping: diff --git a/README.md b/README.md index 6441009e..782abbb2 100644 --- a/README.md +++ b/README.md @@ -55,11 +55,13 @@ Utilities scripts and micro-benchmarks examples are set under `4.validation_scri ## 5. CI Integration tests are written in [pytest](https://docs.pytest.org). Just run: + ``` pytest . ``` Alternatively you can run tests with out capturing stdout and keeping all docker images an other artifacts. + ``` pytest -s --keep-artifacts=t ``` From fec27a1984cea21bd71845ac19d48f2bf27187ca Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Fri, 24 Nov 2023 19:24:51 -0800 Subject: [PATCH 222/648] Add NCCL test instructions for EKS --- 4.validation_scripts/0.nccl-tests/README.md | 163 ++++++++++++++++-- ...st-nccl-efa-p5.yaml => nccl-test-eks.yaml} | 10 +- 2 files changed, 155 insertions(+), 18 deletions(-) rename 4.validation_scripts/0.nccl-tests/{test-nccl-efa-p5.yaml => nccl-test-eks.yaml} (89%) diff --git a/4.validation_scripts/0.nccl-tests/README.md b/4.validation_scripts/0.nccl-tests/README.md index 880196cf..ed846a22 100644 --- a/4.validation_scripts/0.nccl-tests/README.md +++ b/4.validation_scripts/0.nccl-tests/README.md @@ -1,10 +1,11 @@ # NCCL Tests -[NCCL Tests](https://github.com/NVIDIA/nccl-tests) enable you to evaluate the performance of the network using the Nvidia Collective Communication Library. This test case contains a Docker file and a Slurm submission scripts so you can run NCCL tests on Slurm. +[NCCL Tests](https://github.com/NVIDIA/nccl-tests) enable you to evaluate the performance of the network using the Nvidia Collective Communication Library. This test case contains a Docker file and scripts to submit NCCL tests on Slurm or [Amazon EKS](https://aws.amazon.com/eks/). Please refer to the relevant instructions below, depending on your environment. -## 0. Preparation +## 0. Prepare the runtime environment -This guide assumes that you have the following: +### Slurm +If you are using Slurm, this guide assumes that you have the following: - A functional Slurm cluster on AWS. - Docker, [Pyxis](https://github.com/NVIDIA/pyxis) and [Enroot](https://github.com/NVIDIA/enroot) installed. @@ -13,10 +14,35 @@ This guide assumes that you have the following: It is recommended that you use the templates in the architectures [directory](../../1.architectures) +### Amazon EKS +If you are using EKS, this guide assumes that you have the following: -## 1. Build the container and the Squash file +- A functional EKS cluster on AWS.
+To set up one, please refer to [aws-do-eks](https://bit.ly/do-eks), [Amazon EKS Blueprints for Terraform](https://github.com/aws-ia/terraform-aws-eks-blueprints/tree/main), [Amazon EKS Blueprints for CDK](https://aws-quickstart.github.io/cdk-eks-blueprints/), or others. +- NVIDIA device plugin deployed to your cluster.
+If you need to deploy it, please refer to [deployment/nvidia-device-plugin](https://github.com/aws-samples/aws-do-eks/blob/main/Container-Root/eks/deployment/nvidia-device-plugin) or [k8s-device-plugin/deployments](https://github.com/NVIDIA/k8s-device-plugin/tree/main/deployments). +- EFA devide plugin deployed to your cluster.
+If you need to deploy it, please refer to [deployment/efa-device-plugin](https://github.com/aws-samples/aws-do-eks/tree/main/Container-Root/eks/deployment/efa-device-plugin) or [aws-efa-eks](https://github.com/aws-samples/aws-efa-eks). +- Kubeflow MPI operator deployed to your cluster.
+If you need to deploy it, please refer to [deployment/kubeflow/mpi-operator](https://github.com/aws-samples/aws-do-eks/tree/main/Container-Root/eks/deployment/kubeflow/mpi-operator) or [kubeflow/mpi-operator](https://github.com/kubeflow/mpi-operator). +- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#cliv2-linux-install) -The NCCL tests are packaged in a container for reproducibility purposes, to run it on Slurm you will need to build your container then convert it into a Squash file using Enroot. +## 1. Prepare the container image and other artifacts + +The NCCL tests are packaged in a container. + +> You can set versions and the branch for NCCL and EFA by editing the variables below in the Dockerfile. + +> | Variable | Default | +> |-----------------------|-------------| +> |`EFA_INSTALLER_VERSION`| `latest` | +> |`AWS_OFI_NCCL_VERSION` | `aws` | +> |`NCCL_TESTS_VERSION` | `master` | +> |`NCCL_VERSION` | `v2.12.7-1` | + +### Slurm + +To run the NCCL tests on Slurm, you will need to build the container then convert it into a Squash file using Enroot. To build the container: @@ -38,19 +64,47 @@ To build the container: ``` The file will be stored in the `/apps` directory. -> You can set versions and the branch for NCCL and EFA by editing the variables below in the Dockerfile. +### Amazon EKS -> | Variable | Default | -> |-----------------------|-------------| -> |`EFA_INSTALLER_VERSION`| `latest` | -> |`AWS_OFI_NCCL_VERSION` | `aws` | -> |`NCCL_TESTS_VERSION` | `master` | -> |`NCCL_VERSION` | `v2.12.7-1` | +To run the NCCL tests on EKS, you will need to build the container image, then push it to a container registry, such as the private [ECR](https://aws.amazon.com/ecr/) in your AWS account. + +1. Build the container URI: + ```bash + export AWS_REGION=$(aws ec2 describe-availability-zones --output text --query 'AvailabilityZones[0].[RegionName]') + export ACCOUNT=$(aws sts get-caller-identity --query Account --output text) + export REGISTRY=${ACCOUNT}.dkr.ecr.${REGION}.amazonaws.com/ + export IMAGE=nccl-tests + export TAG=:latest + ``` + +2. Build the container image: + ```bash + docker image build -t ${REGISTRY}${IMAGE}${TAG} -f ./0.nccl-tests.Dockerfile . + ``` + +3. Create the ECR repository if it does not exis + ```bash + REGISTRY_COUNT=$(aws ecr describe-repositories | grep ${IMAGE} | wc -l) + if [ "$REGISTRY_COUNT" == "0" ]; then + aws ecr create-repository --repository-name ${IMAGE} + fi + ``` +4. Login to the container registry + ```bash + aws ecr get-login-password | docker login --username AWS --password-stdin $REGISTRY + ``` + +5. Push the container image to the registry + ```bash + docker image push ${REGISTRY}${IMAGE}${TAG} + ``` ## 2. Running the NCCL Tests -Now you copy the file `1.nccl-tests.sbatch` or its content on your cluster then submit a preprocessing jobs with the command below: +### Slurm + +Copy the file `1.nccl-tests.sbatch` or its content on your cluster then submit a preprocessing jobs with the command below: ```bash sbatch 1.nccl-tests.sbatch @@ -90,6 +144,89 @@ You can validate your environment for NCCL using the batch file `3.nccl-validate sbatch 3.nccl-validate.sbatch ``` +### Amazon EKS + +1. Prepare the MPIJob manifest + Edit file `nccl-test-eks.yaml` and adjust the following values: + + - slotsPerWorker: 8 <- set to the number of GPUs per node in your cluster + - image: .dkr.ecr..amazonaws.com/: <- set to your container image URI. Note: change both locations in the file. You may use `echo ${REGISTRY}${IMAGE}${TAG}` to print the image URI. + - -np 16 <- set -np option in mpirun to (number_of_worker_nodes * number_of_gpus_per_node) + - other mpirun parameters if needed for your instance type, please refer to [aws-ofi-nccl](https://github.com/aws/aws-ofi-nccl/blob/master/doc/efa-env-var.md) + - replicas: 2 <- set to number of worker pods you would like the test to run on. This must be less than or eaqual to the number of nodes in your cluster. + - node.kubernetes.io/instance-type: "p5.48xlarge" <- set to the instance type of the nodes in your cluster against which you would like the nccl test to be run + - nvidia.com/gpu: 8 <- set to the number of GPUs per node in your cluster, adjust in both the limits and requests section + - vpc.amazonaws.com/efa: 32 <- set to the number of EFA adapters per node in your cluster, adjust in both the limits and requests section + + Please note that the current default settings have been specified for instance type p5.48xlarge. Only the image URI is required to be set for running the test on this instance type. + The current manifest executes the `all_reduce_perf` test. If you wish to execute other NCCL tests, change the section between lines 59 and 73 in this MPIJob manifest file. + +2. Apply the MPIJob manifest to the cluster + ```bash + kubectl apply -f ./nccl-test-eks.yaml + ``` + +3. Wait until pods to enter the Running state + To monitor the state of the pods, execute the following command: + ```bash + watch kubectl get pods -o wide + ``` + Once the state of the launcher and worker pods becomes "Running", press `Ctrl-C` to return to the command prompt. + +4. View test logs + To follow the test logs, execute the following command: + ```bash + kubectl logs -f $(kubectl get pods | grep launcher | cut -d ' ' -f 1) + ``` + + The following is an example exerpt from the logs of a NCCL all_reduce_perf test, executed on a cluster with two p5.48xlarge instances: + ```log + [1,0]:# out-of-place in-place + [1,0]:# size count type redop root time algbw busbw #wrong time algbw busbw #wrong + [1,0]:# (B) (elements) (us) (GB/s) (GB/s) (us) (GB/s) (GB/s) + [1,0]: 0 0 float sum -1 15.51 0.00 0.00 0 15.52 0.00 0.00 0 + [1,0]: 0 0 float sum -1 15.51 0.00 0.00 0 15.50 0.00 0.00 0 + [1,0]: 4 1 float sum -1 202.2 0.00 0.00 0 179.4 0.00 0.00 0 + [1,0]: 8 2 float sum -1 175.5 0.00 0.00 0 178.2 0.00 0.00 0 + [1,0]: 16 4 float sum -1 177.6 0.00 0.00 0 176.1 0.00 0.00 0 + [1,0]: 32 8 float sum -1 175.8 0.00 0.00 0 173.1 0.00 0.00 0 + [1,0]: 64 16 float sum -1 175.7 0.00 0.00 0 172.9 0.00 0.00 0 + [1,0]: 128 32 float sum -1 171.8 0.00 0.00 0 174.8 0.00 0.00 0 + [1,0]: 256 64 float sum -1 176.7 0.00 0.00 0 172.4 0.00 0.00 0 + [1,0]: 512 128 float sum -1 174.4 0.00 0.01 0 176.8 0.00 0.01 0 + [1,0]: 1024 256 float sum -1 172.0 0.01 0.01 0 175.1 0.01 0.01 0 + [1,0]: 2048 512 float sum -1 175.9 0.01 0.02 0 174.6 0.01 0.02 0 + [1,0]: 4096 1024 float sum -1 174.1 0.02 0.04 0 174.7 0.02 0.04 0 + [1,0]: 8192 2048 float sum -1 175.7 0.05 0.09 0 176.5 0.05 0.09 0 + [1,0]: 16384 4096 float sum -1 224.9 0.07 0.14 0 183.8 0.09 0.17 0 + [1,0]: 32768 8192 float sum -1 193.8 0.17 0.32 0 191.2 0.17 0.32 0 + [1,0]: 65536 16384 float sum -1 194.9 0.34 0.63 0 194.8 0.34 0.63 0 + [1,0]: 131072 32768 float sum -1 203.8 0.64 1.21 0 204.2 0.64 1.20 0 + [1,0]: 262144 65536 float sum -1 218.7 1.20 2.25 0 217.7 1.20 2.26 0 + [1,0]: 524288 131072 float sum -1 225.7 2.32 4.36 0 225.9 2.32 4.35 0 + [1,0]: 1048576 262144 float sum -1 239.3 4.38 8.22 0 245.5 4.27 8.01 0 + [1,0]: 2097152 524288 float sum -1 269.9 7.77 14.57 0 306.0 6.85 12.85 0 + [1,0]: 4194304 1048576 float sum -1 305.7 13.72 25.72 0 302.2 13.88 26.02 0 + [1,0]: 8388608 2097152 float sum -1 336.1 24.96 46.79 0 335.2 25.02 46.92 0 + [1,0]: 16777216 4194304 float sum -1 530.9 31.60 59.25 0 564.3 29.73 55.74 0 + [1,0]: 33554432 8388608 float sum -1 859.2 39.05 73.23 0 856.8 39.16 73.43 0 + [1,0]: 67108864 16777216 float sum -1 996.0 67.38 126.33 0 1001.7 66.99 125.62 0 + [1,0]: 134217728 33554432 float sum -1 1950.5 68.81 129.02 0 1725.6 77.78 145.83 0 + [1,0]: 268435456 67108864 float sum -1 3010.8 89.16 167.17 0 3020.7 88.87 166.62 0 + [1,0]: 536870912 134217728 float sum -1 3608.0 148.80 279.00 0 3599.7 149.14 279.64 0 + [1,0]: 1073741824 268435456 float sum -1 6426.3 167.09 313.29 0 6426.1 167.09 313.29 0 + [1,0]: 2147483648 536870912 float sum -1 9197.5 233.49 437.79 0 9195.2 233.54 437.89 0 + [1,0]:# Out of bounds values : 0 OK + [1,0]:# Avg bus bandwidth : 52.9753 + ``` + Press `Ctrl-C` to return to the command prompt if you do not wish to wait until the launcher pod enters the "Completed" state. + +5. Clean up test run + Before running a subsequent test, the current MPIJob needs to be deleted: + ```bash + kubectl delete -f nccl-test-eks.yaml + ``` + ## 3. Understanding NCCL Bandwidth The NCCL tests reports metrics for the time to execute a given communication collective operation, the Algorithmic bandwidth and the bus bandwidth. diff --git a/4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml b/4.validation_scripts/0.nccl-tests/nccl-test-eks.yaml similarity index 89% rename from 4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml rename to 4.validation_scripts/0.nccl-tests/nccl-test-eks.yaml index 39ed754e..72091674 100644 --- a/4.validation_scripts/0.nccl-tests/test-nccl-efa-p5.yaml +++ b/4.validation_scripts/0.nccl-tests/nccl-test-eks.yaml @@ -1,7 +1,7 @@ apiVersion: kubeflow.org/v2beta1 kind: MPIJob metadata: - name: test-nccl-efa + name: test-nccl spec: runPolicy: cleanPodPolicy: Running @@ -15,8 +15,8 @@ spec: imagePullPolicy: IfNotPresent restartPolicy: OnFailure containers: - - image: .dkr.ecr.us-west-2.amazonaws.com/cuda-efa-nccl-tests:ubuntu22.04 - name: test-nccl-efa-launcher + - image: .dkr.ecr..amazonaws.com/: + name: test-nccl-launcher env: - name: LD_LIBRARY_PATH value: /opt/amazon/openmpi/lib:/opt/nccl/build/lib:/opt/amazon/efa/lib:/opt/aws-ofi-nccl/install/lib:/usr/local/nvidia/lib:$LD_LIBRARY_PATH @@ -79,8 +79,8 @@ spec: node.kubernetes.io/instance-type: "p5.48xlarge" imagePullPolicy: IfNotPresent containers: - - image: .dkr.ecr.us-west-2.amazonaws.com/cuda-efa-nccl-tests:ubuntu22.04 - name: test-nccl-efa-worker + - image: .dkr.ecr..amazonaws.com/: + name: test-nccl-worker volumeMounts: - name: shmem mountPath: /dev/shm From 5d5b9b00bf6f27e6063615a1598e984a0c359432 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Sun, 19 Nov 2023 03:16:32 +0000 Subject: [PATCH 223/648] revise neuronx-nemo-megatron example --- .../1.convert-weight.sbatch | 10 -- .../8.neuronx-nemo-megatron/1.setup-venv.sh | 30 +++++ .../2.setup-neuronx-nemo-megatron.sh | 16 +++ .../8.neuronx-nemo-megatron/2.tokenize.sh | 17 --- .../3.convert-weight.sbatch | 13 ++ .../3.precompile-model.sh | 5 - .../4.pretrain-model.sh | 5 - .../8.neuronx-nemo-megatron/4.tokenize.sbatch | 20 +++ .../5.precompile-model.sh | 4 + .../6.pretrain-model.sh | 4 + .../8.neuronx-nemo-megatron/README.md | 120 +++++++++++++----- README.md | 1 + 12 files changed, 176 insertions(+), 69 deletions(-) delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch create mode 100644 3.test_cases/8.neuronx-nemo-megatron/1.setup-venv.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/2.setup-neuronx-nemo-megatron.sh delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh delete mode 100644 3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch create mode 100644 3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh create mode 100644 3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh diff --git a/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch b/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch deleted file mode 100644 index 03e70976..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/1.convert-weight.sbatch +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -#SBATCH --exclusive -#SBATCH --output=slurm-%x-%j.out -#SBATCH --cpus-per-task 96 -#SBATCH --nodes 1 - - -source ~/aws_neuron_venv_pytorch/bin/activate -python /home/ec2-user/aws_neuron_venv_pytorch/lib/python3.8/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py \ - --input_dir /fsx/Llama2-meta --model_size 7B --output_dir /fsx/Llama2-7b-hf diff --git a/3.test_cases/8.neuronx-nemo-megatron/1.setup-venv.sh b/3.test_cases/8.neuronx-nemo-megatron/1.setup-venv.sh new file mode 100644 index 00000000..71257a58 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/1.setup-venv.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +set -euxo pipefail +APPS_PATH="$1" + +cd ${APPS_PATH} +# Install Python venv +sudo apt-get install -y python3.8-venv g++ + +# Create Python venv +python3.8 -m venv aws_neuron_venv_pytorch + +# Activate Python venv +source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate +python -m pip install -U pip + +# Install Jupyter notebook kernel +pip install ipykernel +python3.8 -m ipykernel install --user --name aws_neuron_venv_pytorch --display-name "Python (torch-neuronx)" +pip install jupyter notebook +pip install environment_kernels + +# Set pip repository pointing to the Neuron repository +python -m pip config set global.extra-index-url https://pip.repos.neuron.amazonaws.com + +# Install wget, awscli +python -m pip install wget +python -m pip install awscli + +# Install Neuron Compiler and Framework +python -m pip install neuronx-cc==2.* torch-neuronx torchvision \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/2.setup-neuronx-nemo-megatron.sh b/3.test_cases/8.neuronx-nemo-megatron/2.setup-neuronx-nemo-megatron.sh new file mode 100644 index 00000000..75d78384 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/2.setup-neuronx-nemo-megatron.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euxo pipefail +APPS_PATH="$1" + +cd ${APPS_PATH} +source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate +git clone https://github.com/aws-neuron/neuronx-nemo-megatron.git +cd neuronx-nemo-megatron +pip3 install wheel +./build.sh +pip3 install ./build/*.whl +pip3 install -r requirements.txt torch==1.13.1 protobuf==3.20.3 +# You also need following to run the weight conversion +pip install accelerate +python3 -c "from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import compile_helper; \ +compile_helper()" diff --git a/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh b/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh deleted file mode 100644 index f690fe26..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/2.tokenize.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -#SBATCH --exclusive -#SBATCH --output=slurm-%x-%j.out -#SBATCH --cpus-per-task 96 -#SBATCH --nodes 1 - -source ~/aws_neuron_venv_pytorch/bin/activate -python /home/ec2-user/neuronx-nemo-megatron/nemo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ - --input=/fsx/data/books/book.jsonl \ - --json-keys=text \ - --tokenizer-library=huggingface \ - --tokenizer-type=/fsx/Llama2-7b-hf \ - --dataset-impl=mmap \ - --output-prefix=/fsx/data/books/book-tokenized \ - --append-eod \ - --need-pad-id \ - --workers=32 diff --git a/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch b/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch new file mode 100644 index 00000000..bb5e274d --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch @@ -0,0 +1,13 @@ +#!/bin/bash +#SBATCH --exclusive +#SBATCH --output=slurm-%x-%j.out +#SBATCH --cpus-per-task 96 +#SBATCH --nodes 1 + +: "${APPS_PATH:=/fsx}" +: "${MODEL_PATH:=/fsx}" +: "${DATA_PATH:=/fsx/data/books}" + +source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate +python ${APPS_PATH}/aws_neuron_venv_pytorch/lib/python3.8/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py \ + --input_dir ${MODEL_PATH}/Llama2-meta --model_size 7B --output_dir ${MODEL_PATH}/Llama2-7b-hf diff --git a/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh b/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh deleted file mode 100644 index f8652ab3..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/3.precompile-model.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling -source ~/aws_neuron_venv_pytorch/bin/activate -sbatch --nodes 4 compile.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh b/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh deleted file mode 100644 index d254082a..00000000 --- a/3.test_cases/8.neuronx-nemo-megatron/4.pretrain-model.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling -source ~/aws_neuron_venv_pytorch/bin/activate -sbatch --nodes 4 run.slurm ./llama_7b.sh \ No newline at end of file diff --git a/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch b/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch new file mode 100644 index 00000000..1edf5e52 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch @@ -0,0 +1,20 @@ +#!/bin/bash +#SBATCH --exclusive +#SBATCH --output=slurm-%x-%j.out +#SBATCH --cpus-per-task 128 +#SBATCH --nodes 1 + +: "${APPS_PATH:=/fsx}" +: "${MODEL_PATH:=/fsx}" +: "${DATA_PATH:=/fsx/data/books}" +source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate +python ${APPS_PATH}/neuronx-nemo-megatron/nemo/scripts/nlp_language_modeling/preprocess_data_for_megatron.py \ + --input=${DATA_PATH}/book.jsonl \ + --json-keys=text \ + --tokenizer-library=huggingface \ + --tokenizer-type=${MODEL_PATH}/Llama2-7b-hf \ + --dataset-impl=mmap \ + --output-prefix=${DATA_PATH}/book-tokenized \ + --append-eod \ + --need-pad-id \ + --workers=128 diff --git a/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh b/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh new file mode 100644 index 00000000..48367ac4 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh @@ -0,0 +1,4 @@ +#!/bin/bash +cd ${APPS_PATH}/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate +sbatch --cpus-per-task 1 --nodes 4 --output ${TEST_CASE_PATH}/slurm-%x-%j.out compile.slurm ./llama_7b.sh diff --git a/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh b/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh new file mode 100644 index 00000000..07c5eb84 --- /dev/null +++ b/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh @@ -0,0 +1,4 @@ +#!/bin/bash +cd ${APPS_PATH}/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate +sbatch --nodes 4 --output ${TEST_CASE_PATH}/slurm-%x-%j.out run.slurm ./llama_7b.sh diff --git a/3.test_cases/8.neuronx-nemo-megatron/README.md b/3.test_cases/8.neuronx-nemo-megatron/README.md index 3a85831b..cbe0f1a8 100644 --- a/3.test_cases/8.neuronx-nemo-megatron/README.md +++ b/3.test_cases/8.neuronx-nemo-megatron/README.md @@ -5,13 +5,38 @@ ## 1. Preparation This guide assumes that you have the following: -* A functional Slurm cluster on AWS. -* Neuron SDK and Torch-neuronx installed. +* A functional Slurm cluster on AWS. We also assume that Ubuntu AMI is used. +* Neuron SDK is installed on the cluster (see [AWS Neuron SDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/setup/torch-neuronx.html#setup-torch-neuronx) for the steps). * An FSx for Lustre filesystem mounted on `/fsx`. -* `torch-neuronx` environment set up as virtual environment as `aws_neuron_venv_pytorch`. See [NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/setup/neuron-setup/pytorch/neuronx/ubuntu/torch-neuronx-ubuntu20.html#setup-torch-neuronx-ubuntu20) for the setup. -* `neuronx-nemo-megatron` cloned on home directory of the slurm headnode (`cd ~ && git clone https://github.com/aws-neuron/neuronx-nemo-megatron.git`). -We recommend that you setup a Slurm cluster using the template in the architectures directory. +We recommend that you setup a Slurm cluster using the template in the architectures directory. Before creating the Slurm cluster, you need to setup the following environment variables: + +```bash +export APPS_PATH=/fsx +export FSX_PATH=/fsx +export MODEL_PATH=/fsx +export DATA_PATH=$FSX_PATH/data/books +export TEST_CASE_PATH=${APPS_PATH}/awsome-distributed-training/3.test_cases/8.neuronx-nemo-megatron # where you copy the test case or set to your test case path +``` + +1. First of all, you need to have a Python virtual environment for `torch-neuronx` under `APPS_PATH`. + +```bash +bash 1.setup-venv.sh ${APPS_PATH} # The argument specifies APPS_PATH +``` + +2. `neuronx-nemo-megatron` library need to be installed (and initialized) in the environment. + + +```bash +bash 2.setup-neuronx-nemo-megatron.sh ${APPS_PATH} # +``` +You will see the following ERROR line during the script execution. This is safe to ignore. + +```console ++ python3 -c 'from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import compile_helper; compile_helper()' +2023-Nov-18 09:17:45.728072 175272:175272 ERROR TDRV:tdrv_get_dev_info No neuron device available +``` ## 1. Prepare Llama2 model @@ -21,7 +46,7 @@ You can submit access request from [here](https://ai.meta.com/resources/models-a We will assume that you had placed the model and tokenizer as follows on cluster: ``` -/fsx/Llama2-meta/ +${MODEL_PATH}/Llama2-meta/ ├── 7B/ │ ├── checklist.chk │ ├── consolidated.00.pth @@ -33,13 +58,26 @@ We will assume that you had placed the model and tokenizer as follows on cluster To convert the model to the standard Hugging Face format, the following script in transformers can be called with the following (example) command: ``` -sbatch 1.convert-weight.sbatch +sbatch 3.convert-weight.sbatch ``` -Note: For the purposes of this sample we assume you have saved the Llama-2-7b model in a directory called `Llama2-7b-hf` with the following format: +You can check progress of with `tail` command. + +``` +$ tail -f slurm-3.convert-weight.sbatch-xxx.out +``` +```console +Fetching all parameters from the checkpoint at /fsx/Llama2-meta/7B. +Loading the checkpoint in a Llama model. +Loading checkpoint shards: 100%|██████████| 33/33 [00:12<00:00, 2.65it/s] +... ``` -/fsx/Llama2-7b-hf/ + +Once the job completed, you will have the Llama-2-7b model weights and tokenizer in a huggingface format under a directory called `Llama2-7b-hf` with the following format: + +```console +${DATAPATH}/Llama2-7b-hf/ ├── config.json ├── generation_config.json ├── pytorch_model-00001-of-00002.bin @@ -54,16 +92,16 @@ Note: For the purposes of this sample we assume you have saved the Llama-2-7b mo ## 2. Download and Tokenize dataset This tutorial makes use of a [Red pyjama dataset](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T). The dataset can be downloaded to your cluster by running the following commands on the head node: -``` -mkdir -p /fsx/data/llama2 -wget https://data.together.xyz/redpajama-data-1T/v1.0.0/book/book.jsonl # Note: Dataset download is 50G and will take approximately 3-4 hours to download. -or -wget https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample/resolve/main/book_sample.jsonl -O /fsx/data/llama2/book.jsonl +```bash +mkdir -p ${DATA_PATH} +wget https://data.together.xyz/redpajama-data-1T/v1.0.0/book/book.jsonl -O ${DATA_PATH}/book.jsonl # Note: Dataset download is 50G and will take approximately 3-4 hours to download. You can also use https://aria2.github.io/ for faster download +# or +# wget https://huggingface.co/datasets/togethercomputer/RedPajama-Data-1T-Sample/resolve/main/book_sample.jsonl -O ${DATA_PATH}/book.jsonl # Smaller sample dataset for quick testing ``` Once you have the Tokenizer and the dataset. You can tokenize the dataset following the below command: -``` -sbatch 2.tokenize.sbatch +```bash +sbatch 4.tokenize.sbatch ``` Post tokenizing the dataset, you will have a path to the tokenizer and the dataset which will be used for pretraining. @@ -94,26 +132,33 @@ An alternative to the JIT flow is to use the included [neuron_parallel_compile]( Before starting the compilation you need to update your path to the dataset and tokenizer in the llama_7b script as below : +```bash +cd ${APPS_PATH}/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling +vi test_llama.sh ``` -cd ~/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling -vi llama_7b.sh -``` -Update the below lines to -``` -# For tokenizer -model.tokenizer.type='/fsx/Llama2-7b-hf' \ -# For Dataset -model.data.data_prefix=[1.0,/fsx/data/books/book.jsonl-processed_text_document] \ +Update the below lines + +```bash +: ${TOKENIZER_PATH=$HOME/llamav2_weights/7b-hf} +: ${DATASET_PATH=$HOME/examples_datasets/llama_7b/book.jsonl-processed_text_document} ``` -Run the following command to launch an AOT pre-compilation job on your ParallelCluster: +to +```bash +: ${TOKENIZER_PATH=${MODEL_PATH}/Llama2-7b-hf} +: ${DATASET_PATH=${DATA_PATH}/book-tokenized_text_document} ``` -bash 3.precompile-model.sh + +Then, run the following command to launch an AOT pre-compilation job on your ParallelCluster: + +```bash +bash 5.precompile-model.sh ``` Once you have launched the precompilation job, run the `squeue` command to view the SLURM job queue on your cluster. If you have not recently run a job on your cluster, it may take 4-5 minutes for the requested trn1.32xlarge nodes to be launched and initialized. Once the job is running, `squeue` should show output similar to the following: -``` + +```console JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) 10 compute1 compile.slurm ubuntu R 5:11 4 compute1-dy-queue1-i1-[1-4] ``` @@ -124,7 +169,8 @@ tail -f slurm-compile.slurm-10.out ``` Once the precompilation job is complete, you should see a message similar to the following in the logs: -``` + +```console 2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total graphs: 22 2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total successful compilations: 22 2023-06-11 23:04:08.000738: INFO ||PARALLEL_COMPILE||: Total failed compilations: 0 @@ -137,18 +183,28 @@ At this point, you can press `CTRL-C` to exit the tail command. Submit the training job ``` -bash 4.pretrain-model.sh +bash 6.pretrain-model.sh ``` As outlined above, you can again use the `squeue` command to view the job queue. Once you see that your pretraining job is running, you can view the output of the training job by examining the file named `slurm-run.slurm-ZZ.out` where ZZ represents the JOBID of your job: -``` + +```bash tail -f slurm-run.slurm-11.out ``` Once the model is loaded onto the Trainium accelerators and training has commenced, you will begin to see output indicating the job progress: -``` + +```console Epoch 0: 22%|██▏ | 4499/20101 [22:26:14<77:48:37, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.43, v_num=5563, reduced_train_loss=2.470, gradient_norm=0.121, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.40] Epoch 0: 22%|██▏ | 4500/20101 [22:26:32<77:48:18, 17.95s/it, loss=2.44, v_num=5563, reduced_train_loss=2.450, gradient_norm=0.120, parameter_norm=1864.0, global_step=4512.0, consumed_samples=1.16e+6, iteration_time=16.50] ``` + +## 5. Authors / Reviewers + +* [A] Keita Watanabe - mlkeita@ +* [R] Verdi March - marcverd@ +* [R] Brad Doran +* [R] Justin Pirtle +* [R] Pierre-Yves Aquilanti - pierreya@ diff --git a/README.md b/README.md index 3b63c799..db4f3d95 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ All test cases are under `3.test_cases/`. You can go in each test case directory | `5.param-benchmark` | ✅ | ❓ | ❓ | | `6.stable-diffusion` | ✅ | ❓ | ❓ | | `7.tensorflow-distributed` | ✅ | ❓ | ❓ | +| `8.neuronx-nemo-megatron` | ✅ | ❓ | ❓ | ## 4. Validation scripts From 8cb54ccf72843af60dbdd2174ddba901f700d944 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Mon, 27 Nov 2023 09:23:02 +0530 Subject: [PATCH 224/648] Updated Readme to resolve PR comments Signed-off-by: Ankur Srivastava --- 3.test_cases/9.nemo-multimodal/README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/3.test_cases/9.nemo-multimodal/README.md b/3.test_cases/9.nemo-multimodal/README.md index b2f069e5..bc65254c 100644 --- a/3.test_cases/9.nemo-multimodal/README.md +++ b/3.test_cases/9.nemo-multimodal/README.md @@ -1,6 +1,6 @@ # Train Stable Diffusion with NeMo-Multimodal -This project provides a guide to run Nemo-Multimodal on AWS using a container from Nvidia GPU Cloud (NGC). NemoMultimodal 23.05 supports multiple models including Vision Transformers (ViTs), CLIP, Stable Diffusion, InstructPix2Pix, DreamBooth, ControlNet and Imagen. The test cases can be executed on Slurm and use Nvidia Enroot and Nvidia Pyxis. In this project we will showcase a working example with multi-node training for Stable Diffusion +This project provides a guide to run Nemo-Multimodal on AWS using a container from Nvidia GPU Cloud ([NGC](https://ngc.nvidia.com)). The latest version of NemoMultimodal supports multiple models including [Vision Transformers (ViTs)](https://github.com/google-research/vision_transformer), [CLIP](https://github.com/openai/CLIP/tree/main), [Stable Diffusion](https://stability.ai/stable-diffusion/), [InstructPix2Pix](https://github.com/timothybrooks/instruct-pix2pix), [DreamBooth](https://dreambooth.github.io/), [ControlNet](https://github.com/lllyasviel/ControlNet) and [Imagen](https://imagen.research.google/). The test cases can be executed on Slurm and use [Nvidia Enroot](https://github.com/NVIDIA/enroot) and [Nvidia Pyxis](https://github.com/NVIDIA/pyxis). In this project we will showcase a working example with multi-node training for Stable Diffusion ## 0. Prerequisites @@ -20,7 +20,7 @@ docker login nvcr.io Username: $oauthtoken Password: API_KEY ``` -Please make note that the Username is exactly "$oauthtoken". +Please make note that the Username is exactly `"$oauthtoken"`. ## 2. Install Nvidia Container CLI @@ -81,7 +81,8 @@ docker cp -a :/opt/NeMo-Megatron-Launcher/ ${TARGET_PATH} To get the `container-id` above you can list the containers like `docker ps -a` which lists all running containers and their ids. ## 6. Build customized docker image -To achieve target performance of Nemo-Multimodal with EFA on P5 and P4de instances, we provide a customized [0.Dockerfile](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/Dockerfile) and we can build a image like below: +To achieve target performance of Nemo-Multimodal with EFA on P5 and P4de instances, we provide a customized +`nemo-multimodal/3.test_cases/9.nemo-multimodal/0.Dockerfile` and we can build a image like below: ``` docker build --build-arg NEMO_MULTIMODAL_VERSION=${NEMO_MULTIMODAL_VERSION} -t ${DOCKER_IMAGE_NAME}:${TAG} -f 0.Dockerfile . @@ -109,15 +110,14 @@ source activate nemo-multimodal pip3 install -r requirements.txt ``` -All package versions in the above requirements.txt file is recommended from Nvidia. An older version of the package `opencv-python-headless==4.8.0.74` has to be installed to avoid this [error](https://github.com/rom1504/img2dataset/issues/355) with [img2dataset](https://github.com/rom1504/img2dataset) package. +All package versions in the above `requirements.txt` file is recommended from Nvidia. An older version of the package `opencv-python-headless==4.8.0.74` has to be installed to avoid this [error](https://github.com/rom1504/img2dataset/issues/355) with [img2dataset](https://github.com/rom1504/img2dataset) package. ## 9. Pull this github repo ```bash cd /apps/ git clone https://github.com/aws-samples/awsome-distributed-training.git -cd awsome-distributed-training/3.test_cases/8.nemo-multimodal - +cd awsome-distributed-training/3.test_cases/9.nemo-multimodal ``` ## 10. Submitting slurm jobs @@ -126,7 +126,7 @@ Next we will show how to submit slurm jobs for data preparation and training. Th 1. `nemo_configs/1.config.yaml`: NeMo config with information about different stages and environment variables. Refer to the [EFA cheatsheet](https://github.com/aws-samples/awsome-distributed-training/blob/main/1.architectures/efa-cheatsheet.md) here for more information about the EFA environment variables. 2. `nemo_configs/2.bcm.yaml`: Cluster setup config which contains SBATCH variables and [Pyxis](https://github.com/NVIDIA/pyxis) settings to run containers in Slurm. 3. `nemo_configs/3.download_multimodal.yaml`: Config to download the `laion/laion-art` data from Huggingface and prepare data for training -4. `nemo_configs/4.stable_diffusion_860m_res_256_pretrain.yaml`: Config to pre-train stable diffusion model. Currently Nemo Multimodal 23.05 supports the 860M parameter Stable Diffusion model with 256x256 and 512x512 resolution support +4. `nemo_configs/4.stable_diffusion_860m_res_256_pretrain.yaml`: Config to pre-train stable diffusion model. Currently Nemo Multimodal supports the 860M parameter Stable Diffusion model with 256x256 and 512x512 resolution support Run the following next to substitute the environment variables in the yaml file and place it in the right location: @@ -142,16 +142,16 @@ You can run one or more stages like below: ``` HYDRA_FULL_ERROR=1 python3 ${TARGET_PATH}/launcher_scripts/main.py ``` -This will create separate folders for different slurm jobs and create folders with the relevant slurm submission script and config file. For more information on using HYDRA please refer [here]((https://github.com/facebookresearch/hydra)). +This will create separate folders for different slurm jobs and create folders with the relevant Slurm submission script and config file. For more information on using HYDRA please refer [here]((https://github.com/facebookresearch/hydra)). ## 11. Download and prepare data - We will use the popular [laion-art](https://huggingface.co/datasets/laion/laion-art) data for training the stable diffusion model which contains >8M images and their captions. Please review the [download_multimodal](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/8.nemo-multimodal/download_multimodal.yaml) file which contains the following sections: + We will use the popular [laion-art](https://huggingface.co/datasets/laion/laion-art) data for training the stable diffusion model which contains >8M images and their captions. Please review the [download_multimodal](https://github.com/aws-samples/awsome-distributed-training/blob/nemo-multimodal/3.test_cases/9.nemo-multimodal/download_multimodal.yaml) file which contains the following sections: 1. `dataset_repo_id`: `laion/laion-art` Huggingface dataset repo id, in the format of `{user_or_company}/{dataset_name}` 2. `download_parquet`: Downloads and paritions the parquet files and stores the partioned parquet files in `${DATASET_PATH}/parquet/` 3. `download_images`: Uses [img2dataset](https://github.com/rom1504/img2dataset/tree/main) to download the images specified in the parquet files and store the raw data in `${DATASET_PATH}/tarfiles_raw`. Each partitioned parquet file will run in an array of slurm jobs sequentially. 4. `reorganize_tar`: This section will reorganize the tar files and create new tarfiles with `tar_chunk_size` number of images stores in each tar file. Make sure `node_array_size` is set to 1, otherwise additional preprocessing will be needed to merge the tarfiles from the two tasks in one folder. The reorganized tarfiles will be stored in `${DATASET_PATH}/tarfiles_reorganized`. -5. `reorganize_tar`: This task will generate a pickle file with the necessary paths for the reorganized tarfiles. Make sure you are reading from reorganized tarfiles and not from `precache_encodings` which is included in the original version of NeMo 23.05. +5. `reorganize_tar`: This task will generate a pickle file with the necessary paths for the reorganized tarfiles. Make sure you are reading from reorganized tarfiles and not from `precache_encodings` which is included in the original version of NeMo. ## 12. Run Distributed Training After downloading the data, you run the training job next. Make sure the trainer inputs such as `num_nodes` and number of gpus per node in `trainer.devices` is set correctly. Also, set `max_epochs` to -1 if training needs to run till max_steps have completed. The model by default will create a tensorboard events log, but weights and biases is not switched on by default. Also make sure the datasets path at the bottom point to the right paths for `wdinfo.pkl` and `tarfiles_reorganized`. From 6f0eb1e7b60297e675aa2be6829df195c7cdfc25 Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Mon, 27 Nov 2023 09:25:38 +0530 Subject: [PATCH 225/648] Updated Readme to resolve PR comments Signed-off-by: Ankur Srivastava --- 3.test_cases/9.nemo-multimodal/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/3.test_cases/9.nemo-multimodal/README.md b/3.test_cases/9.nemo-multimodal/README.md index bc65254c..ec9f63c0 100644 --- a/3.test_cases/9.nemo-multimodal/README.md +++ b/3.test_cases/9.nemo-multimodal/README.md @@ -24,9 +24,9 @@ Please make note that the Username is exactly `"$oauthtoken"`. ## 2. Install Nvidia Container CLI -If you have created your cluster with [DLAMI](https://aws.amazon.com/machine-learning/amis/) or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. +### 2.1 If you have created your cluster with [DLAMI](https://aws.amazon.com/machine-learning/amis/) or your custom AMI, please make sure `libnvidia-container cli` is installed. You can follow the instructions below to install it. -To install libnvidia-container cli: +### 2.2 To install libnvidia-container cli: https://github.com/NVIDIA/libnvidia-container https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html @@ -40,7 +40,7 @@ curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dear && sudo apt-get install libnvidia-container1 \ && sudo apt-get install libnvidia-container-tools ``` -You can set the Nemo Multimodal version and others as environment variables: +### 2.3 You can set the Nemo Multimodal version and others as environment variables: ``` export PYTHON_VERSION=3.10 From cb23fc7b7548f8db2fc3bb50d853995acd7ac05e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 03:57:50 +0000 Subject: [PATCH 226/648] Bump requests from 2.26.0 to 2.31.0 in /3.test_cases/9.nemo-multimodal Bumps [requests](https://github.com/psf/requests) from 2.26.0 to 2.31.0. - [Release notes](https://github.com/psf/requests/releases) - [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md) - [Commits](https://github.com/psf/requests/compare/v2.26.0...v2.31.0) --- updated-dependencies: - dependency-name: requests dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- 3.test_cases/9.nemo-multimodal/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/9.nemo-multimodal/requirements.txt b/3.test_cases/9.nemo-multimodal/requirements.txt index 83d904a4..5dba838b 100644 --- a/3.test_cases/9.nemo-multimodal/requirements.txt +++ b/3.test_cases/9.nemo-multimodal/requirements.txt @@ -4,7 +4,7 @@ hydra-core>=1.2.0,<1.3 img2dataset omegaconf>=2.2,<2.3 pynvml==11.4.1 -requests==2.26.0 +requests==2.31.0 tqdm==4.62.3 zstandard==0.15.2 opencv-python-headless==4.8.0.74 From 21d9da684512cc97a4e3af5feeb330a3f028fd94 Mon Sep 17 00:00:00 2001 From: Alex Iankoulski Date: Mon, 27 Nov 2023 09:01:42 -0800 Subject: [PATCH 227/648] Fix typo, specify versions, shorten outpu --- 4.validation_scripts/0.nccl-tests/README.md | 31 +++------------------ 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/4.validation_scripts/0.nccl-tests/README.md b/4.validation_scripts/0.nccl-tests/README.md index ed846a22..a0a6c813 100644 --- a/4.validation_scripts/0.nccl-tests/README.md +++ b/4.validation_scripts/0.nccl-tests/README.md @@ -82,7 +82,7 @@ To run the NCCL tests on EKS, you will need to build the container image, then p docker image build -t ${REGISTRY}${IMAGE}${TAG} -f ./0.nccl-tests.Dockerfile . ``` -3. Create the ECR repository if it does not exis +3. Create the ECR repository if it does not exist ```bash REGISTRY_COUNT=$(aws ecr describe-repositories | grep ${IMAGE} | wc -l) if [ "$REGISTRY_COUNT" == "0" ]; then @@ -179,37 +179,14 @@ sbatch 3.nccl-validate.sbatch kubectl logs -f $(kubectl get pods | grep launcher | cut -d ' ' -f 1) ``` - The following is an example exerpt from the logs of a NCCL all_reduce_perf test, executed on a cluster with two p5.48xlarge instances: + The following is an example exerpt from the logs of a NCCL all_reduce_perf test, executed on a cluster with two p5.48xlarge instances (using EFA_INSTALLER_VERSION=1.28.0, AWS_OFI_NCCL_VERSION=v1.7.3-aws, NCCL_TESTS_VERSION=master, ARG NCCL_VERSION=2.18.5): + ```log [1,0]:# out-of-place in-place [1,0]:# size count type redop root time algbw busbw #wrong time algbw busbw #wrong [1,0]:# (B) (elements) (us) (GB/s) (GB/s) (us) (GB/s) (GB/s) [1,0]: 0 0 float sum -1 15.51 0.00 0.00 0 15.52 0.00 0.00 0 - [1,0]: 0 0 float sum -1 15.51 0.00 0.00 0 15.50 0.00 0.00 0 - [1,0]: 4 1 float sum -1 202.2 0.00 0.00 0 179.4 0.00 0.00 0 - [1,0]: 8 2 float sum -1 175.5 0.00 0.00 0 178.2 0.00 0.00 0 - [1,0]: 16 4 float sum -1 177.6 0.00 0.00 0 176.1 0.00 0.00 0 - [1,0]: 32 8 float sum -1 175.8 0.00 0.00 0 173.1 0.00 0.00 0 - [1,0]: 64 16 float sum -1 175.7 0.00 0.00 0 172.9 0.00 0.00 0 - [1,0]: 128 32 float sum -1 171.8 0.00 0.00 0 174.8 0.00 0.00 0 - [1,0]: 256 64 float sum -1 176.7 0.00 0.00 0 172.4 0.00 0.00 0 - [1,0]: 512 128 float sum -1 174.4 0.00 0.01 0 176.8 0.00 0.01 0 - [1,0]: 1024 256 float sum -1 172.0 0.01 0.01 0 175.1 0.01 0.01 0 - [1,0]: 2048 512 float sum -1 175.9 0.01 0.02 0 174.6 0.01 0.02 0 - [1,0]: 4096 1024 float sum -1 174.1 0.02 0.04 0 174.7 0.02 0.04 0 - [1,0]: 8192 2048 float sum -1 175.7 0.05 0.09 0 176.5 0.05 0.09 0 - [1,0]: 16384 4096 float sum -1 224.9 0.07 0.14 0 183.8 0.09 0.17 0 - [1,0]: 32768 8192 float sum -1 193.8 0.17 0.32 0 191.2 0.17 0.32 0 - [1,0]: 65536 16384 float sum -1 194.9 0.34 0.63 0 194.8 0.34 0.63 0 - [1,0]: 131072 32768 float sum -1 203.8 0.64 1.21 0 204.2 0.64 1.20 0 - [1,0]: 262144 65536 float sum -1 218.7 1.20 2.25 0 217.7 1.20 2.26 0 - [1,0]: 524288 131072 float sum -1 225.7 2.32 4.36 0 225.9 2.32 4.35 0 - [1,0]: 1048576 262144 float sum -1 239.3 4.38 8.22 0 245.5 4.27 8.01 0 - [1,0]: 2097152 524288 float sum -1 269.9 7.77 14.57 0 306.0 6.85 12.85 0 - [1,0]: 4194304 1048576 float sum -1 305.7 13.72 25.72 0 302.2 13.88 26.02 0 - [1,0]: 8388608 2097152 float sum -1 336.1 24.96 46.79 0 335.2 25.02 46.92 0 - [1,0]: 16777216 4194304 float sum -1 530.9 31.60 59.25 0 564.3 29.73 55.74 0 - [1,0]: 33554432 8388608 float sum -1 859.2 39.05 73.23 0 856.8 39.16 73.43 0 + ... [1,0]: 67108864 16777216 float sum -1 996.0 67.38 126.33 0 1001.7 66.99 125.62 0 [1,0]: 134217728 33554432 float sum -1 1950.5 68.81 129.02 0 1725.6 77.78 145.83 0 [1,0]: 268435456 67108864 float sum -1 3010.8 89.16 167.17 0 3020.7 88.87 166.62 0 From b70322076e72573f1fa79c43d18cff17c41e24e8 Mon Sep 17 00:00:00 2001 From: johnbensnyder Date: Tue, 28 Nov 2023 14:14:56 +0000 Subject: [PATCH 228/648] Added FSDP model --- .../0.create_conda_env-checkpoint.sh | 18 + .../1.distributed-training-checkpoint.sbatch | 60 +++ .../.ipynb_checkpoints/README-checkpoint.md | 57 +++ 3.test_cases/10.FSDP/0.create_conda_env.sh | 18 + .../10.FSDP/1.distributed-training.sbatch | 60 +++ 3.test_cases/10.FSDP/README.md | 57 +++ 3.test_cases/10.FSDP/model_utils/__init__.py | 0 3.test_cases/10.FSDP/model_utils/arguments.py | 157 +++++++ .../10.FSDP/model_utils/checkpoint.py | 114 +++++ .../10.FSDP/model_utils/concat_dataset.py | 39 ++ .../10.FSDP/model_utils/train_utils.py | 412 ++++++++++++++++++ 3.test_cases/10.FSDP/train.py | 270 ++++++++++++ 12 files changed, 1262 insertions(+) create mode 100755 3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh create mode 100755 3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch create mode 100644 3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md create mode 100755 3.test_cases/10.FSDP/0.create_conda_env.sh create mode 100755 3.test_cases/10.FSDP/1.distributed-training.sbatch create mode 100644 3.test_cases/10.FSDP/README.md create mode 100644 3.test_cases/10.FSDP/model_utils/__init__.py create mode 100644 3.test_cases/10.FSDP/model_utils/arguments.py create mode 100644 3.test_cases/10.FSDP/model_utils/checkpoint.py create mode 100644 3.test_cases/10.FSDP/model_utils/concat_dataset.py create mode 100644 3.test_cases/10.FSDP/model_utils/train_utils.py create mode 100644 3.test_cases/10.FSDP/train.py diff --git a/3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh b/3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh new file mode 100755 index 00000000..cecbc699 --- /dev/null +++ b/3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -ex + +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh +chmod +x Miniconda3-latest-Linux-x86_64.sh +./Miniconda3-latest-Linux-x86_64.sh -b -f -p ./miniconda3 + +source ./miniconda3/bin/activate + +conda create -y -p ./pt_fsdp python=3.10 + +source activate ./pt_fsdp/ + +# Install PyTorch +pip install torch==2.0.1 torchvision torchaudio transformers datasets + +# Create checkpoint dir +mkdir checkpoints diff --git a/3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch b/3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch new file mode 100755 index 00000000..cbedafe7 --- /dev/null +++ b/3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch @@ -0,0 +1,60 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH --nodes=4 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs +#SBATCH --job-name=FSDP # name of your job +#SBATCH --exclusive # job has exclusive use of the resource, no sharing + +set -ex; + +########################### +###### User Variables ##### +########################### + +########################### +## Environment Variables ## +########################### + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa +export NCCL_DEBUG=INFO + +########################### +####### Torch Dist ####### +########################### + +declare -a TORCHRUN_ARGS=( + --nproc_per_node=8 \ + --nnodes=$SLURM_JOB_NUM_NODES \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$(hostname):29501 \ +) + +export TORCHRUN=./pt_fsdp/bin/torchrun +export TRAIN_SCRIPT=./train.py + +############################ +# Llama 2 Training Params ## +############################ + +declare -a TRAINING_ARGS=( + --max_context_width=4096 \ + --num_key_value_heads=32 \ # 7b: 32 13b: 40 70b: 8 + --llama_intermediate_size=11008 \ # 7b: 11008 13b: 13824 70b: 28672 + --hidden_width=4096 \ # 7b: 4096 13b: 5120 70b: 8192 + --num_layers=32 \ # 7b: 32 13b: 40 70b: 80 + --num_heads=32 \ # 7b: 32 13b: 40 70b: 64 + --model_type=llama_v2 \ + --checkpoint_freq=50 \ + --validation_freq=500 \ + --checkpoint_dir=./checkpoints \ + --resume_from_checkpoint=./checkpoints +) + +srun -l ${TORCHRUN} "${TORCHRUN_ARGS[@]}" $TRAIN_SCRIPT "${TRAINING_ARGS[@]}" diff --git a/3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md b/3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md new file mode 100644 index 00000000..5a2a4bd8 --- /dev/null +++ b/3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md @@ -0,0 +1,57 @@ +# Get Started Training Llama 2 with PyTorch FSDP in 5 Minutes + +These scripts provide an easy way to get started with multinode [FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) training on Slurm. It is designed to be as simple as possible, requires no data preparation, and uses a simple Conda environment. + +## 0. Prerequisites + +Before running this training, you'll need to create a Slurm cluster with an FSx for Lustre file system. Instructions can be found in [1.architectures](../../1.architectures). + +## 1. Create Environment + +On your cluster head node, navigate to your shared FSx for Lustre file system, and clone this repo. If you followed the tutorial linked above, it will be location at `/fsx`. + +``` +cd /fsx +git clone https://github.com/aws-samples/awsome-distributed-training/ +cd awsome-distributed-training/3.test_cases/10.FSDP +``` + +Next, run the `0.create_conda_env.sh` script. This script will first download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/), then create a Conda env called `pt_fsdp`. + +``` +bash 0.create_conda_env.sh +``` + +By creating this environment on the shared FSx for Lustre volume, all compute nodes in our cluster will have access to it. + +## 2. Data + +For this example, we'll be using the [C4 dataset](https://huggingface.co/datasets/allenai/c4). Because C4 is several hundred gigabytes, we'll stream the data directly from [HuggingFace](https://huggingface.co/datasets). The `create_streaming_dataloaders` function in `train.py` is already setup to do this, so there's no data prep required for running this training. If you'd like to instead use your own dataset, you can do so by [formatting it as a HuggingFace dataset](https://huggingface.co/docs/datasets/create_dataset), and passing its location to the `--dataset_path` argument. + +## 3. Launch Training + +The script to launch a Slurm batch training job can be found in `1.distributed_training.sbatch`. You can adjust the number of training nodes by modifying `#SBATCH --nodes=4`. You can also adjust the training parameters in `TRAINING_ARGS`. Additional parameters can be found in `model/arguments.py`. Note that we use the same directory for both `--checkpoint_dir` and `--resume_from_checkpoint`. If there are multiple checkpoints, `--resume_from_checkpoint` will automatically select the most recent one. This way if our training is interupted for any reason, it will automatically pick up the most recent checkpoint. + +``` +declare -a TRAINING_ARGS=( + --num_key_value_heads=32 \ + --llama_intermediate_size=11008 \ + --max_context_width=4096 \ + --hidden_width=4096 \ + --num_layers=32 \ + --num_heads=32 \ + --model_type=llama_v2 \ + --checkpoint_freq=1000 \ + --validation_freq=500 \ + --checkpoint_dir=./checkpoints \ + --resume_from_checkpoint=./checkpoints +) +``` + +To launch your training, run + +``` +sbatch 1.distributed_training.sbatch +``` + +You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. To stop training, get your job number using `squeue` and run `scancel [job-number]`. diff --git a/3.test_cases/10.FSDP/0.create_conda_env.sh b/3.test_cases/10.FSDP/0.create_conda_env.sh new file mode 100755 index 00000000..cecbc699 --- /dev/null +++ b/3.test_cases/10.FSDP/0.create_conda_env.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -ex + +wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh +chmod +x Miniconda3-latest-Linux-x86_64.sh +./Miniconda3-latest-Linux-x86_64.sh -b -f -p ./miniconda3 + +source ./miniconda3/bin/activate + +conda create -y -p ./pt_fsdp python=3.10 + +source activate ./pt_fsdp/ + +# Install PyTorch +pip install torch==2.0.1 torchvision torchaudio transformers datasets + +# Create checkpoint dir +mkdir checkpoints diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch new file mode 100755 index 00000000..cbedafe7 --- /dev/null +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -0,0 +1,60 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +#SBATCH --nodes=4 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs +#SBATCH --job-name=FSDP # name of your job +#SBATCH --exclusive # job has exclusive use of the resource, no sharing + +set -ex; + +########################### +###### User Variables ##### +########################### + +########################### +## Environment Variables ## +########################### + +## Plenty of EFA level variables +export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d +export FI_EFA_FORK_SAFE=1 +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa +export NCCL_DEBUG=INFO + +########################### +####### Torch Dist ####### +########################### + +declare -a TORCHRUN_ARGS=( + --nproc_per_node=8 \ + --nnodes=$SLURM_JOB_NUM_NODES \ + --rdzv_id=$SLURM_JOB_ID \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$(hostname):29501 \ +) + +export TORCHRUN=./pt_fsdp/bin/torchrun +export TRAIN_SCRIPT=./train.py + +############################ +# Llama 2 Training Params ## +############################ + +declare -a TRAINING_ARGS=( + --max_context_width=4096 \ + --num_key_value_heads=32 \ # 7b: 32 13b: 40 70b: 8 + --llama_intermediate_size=11008 \ # 7b: 11008 13b: 13824 70b: 28672 + --hidden_width=4096 \ # 7b: 4096 13b: 5120 70b: 8192 + --num_layers=32 \ # 7b: 32 13b: 40 70b: 80 + --num_heads=32 \ # 7b: 32 13b: 40 70b: 64 + --model_type=llama_v2 \ + --checkpoint_freq=50 \ + --validation_freq=500 \ + --checkpoint_dir=./checkpoints \ + --resume_from_checkpoint=./checkpoints +) + +srun -l ${TORCHRUN} "${TORCHRUN_ARGS[@]}" $TRAIN_SCRIPT "${TRAINING_ARGS[@]}" diff --git a/3.test_cases/10.FSDP/README.md b/3.test_cases/10.FSDP/README.md new file mode 100644 index 00000000..5a2a4bd8 --- /dev/null +++ b/3.test_cases/10.FSDP/README.md @@ -0,0 +1,57 @@ +# Get Started Training Llama 2 with PyTorch FSDP in 5 Minutes + +These scripts provide an easy way to get started with multinode [FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) training on Slurm. It is designed to be as simple as possible, requires no data preparation, and uses a simple Conda environment. + +## 0. Prerequisites + +Before running this training, you'll need to create a Slurm cluster with an FSx for Lustre file system. Instructions can be found in [1.architectures](../../1.architectures). + +## 1. Create Environment + +On your cluster head node, navigate to your shared FSx for Lustre file system, and clone this repo. If you followed the tutorial linked above, it will be location at `/fsx`. + +``` +cd /fsx +git clone https://github.com/aws-samples/awsome-distributed-training/ +cd awsome-distributed-training/3.test_cases/10.FSDP +``` + +Next, run the `0.create_conda_env.sh` script. This script will first download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/), then create a Conda env called `pt_fsdp`. + +``` +bash 0.create_conda_env.sh +``` + +By creating this environment on the shared FSx for Lustre volume, all compute nodes in our cluster will have access to it. + +## 2. Data + +For this example, we'll be using the [C4 dataset](https://huggingface.co/datasets/allenai/c4). Because C4 is several hundred gigabytes, we'll stream the data directly from [HuggingFace](https://huggingface.co/datasets). The `create_streaming_dataloaders` function in `train.py` is already setup to do this, so there's no data prep required for running this training. If you'd like to instead use your own dataset, you can do so by [formatting it as a HuggingFace dataset](https://huggingface.co/docs/datasets/create_dataset), and passing its location to the `--dataset_path` argument. + +## 3. Launch Training + +The script to launch a Slurm batch training job can be found in `1.distributed_training.sbatch`. You can adjust the number of training nodes by modifying `#SBATCH --nodes=4`. You can also adjust the training parameters in `TRAINING_ARGS`. Additional parameters can be found in `model/arguments.py`. Note that we use the same directory for both `--checkpoint_dir` and `--resume_from_checkpoint`. If there are multiple checkpoints, `--resume_from_checkpoint` will automatically select the most recent one. This way if our training is interupted for any reason, it will automatically pick up the most recent checkpoint. + +``` +declare -a TRAINING_ARGS=( + --num_key_value_heads=32 \ + --llama_intermediate_size=11008 \ + --max_context_width=4096 \ + --hidden_width=4096 \ + --num_layers=32 \ + --num_heads=32 \ + --model_type=llama_v2 \ + --checkpoint_freq=1000 \ + --validation_freq=500 \ + --checkpoint_dir=./checkpoints \ + --resume_from_checkpoint=./checkpoints +) +``` + +To launch your training, run + +``` +sbatch 1.distributed_training.sbatch +``` + +You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. To stop training, get your job number using `squeue` and run `scancel [job-number]`. diff --git a/3.test_cases/10.FSDP/model_utils/__init__.py b/3.test_cases/10.FSDP/model_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/3.test_cases/10.FSDP/model_utils/arguments.py b/3.test_cases/10.FSDP/model_utils/arguments.py new file mode 100644 index 00000000..766af75f --- /dev/null +++ b/3.test_cases/10.FSDP/model_utils/arguments.py @@ -0,0 +1,157 @@ +"""FSDP binary script arguments.""" + +import argparse +import os + + +def parse_args(): # pylint: disable=too-many-statements + """Parse args.""" + parser = argparse.ArgumentParser() + + # hyperparameters sent by the client are passed as command-line arguments to the script. + + opt_grp = parser.add_argument_group( + title="optimization", description="arguments for optimization" + ) + opt_grp.add_argument( + "--train_batch_size", + type=int, + default=2, + help="batch size per dp rank, for tensor parallelism degree 8 with pipeline parallel degree 1 this means 8*this batch size per node", # pylint: disable=line-too-long + ) + opt_grp.add_argument("--val_batch_size", type=int, default=4) + opt_grp.add_argument("--max_steps", "--max_training_steps", type=int, default=5000) + opt_grp.add_argument("--seed", type=int, default=12345) + opt_grp.add_argument("--same_seed", type=int, default=0) + opt_grp.add_argument("--bf16", default=1, type=int, help="automatic mixed precision training") + opt_grp.add_argument("--grad_clip", default=1.0, type=float, help="gradient clipping") + opt_grp.add_argument("--weight_decay", default=0.2, type=float, help="weight decay") + opt_grp.add_argument( + "--beta1", default=0.9, type=float, help="beta1 parameter for Adam optimizer" + ) + opt_grp.add_argument( + "--beta2", default=0.95, type=float, help="beta2 parameter for Adam optimizer" + ) + opt_grp.add_argument( + "--activation_checkpointing", + type=int, + default=1, + help="enable gradient checkpointing to reduce memory consumption", + ) + opt_grp.add_argument( + "--llama_intermediate_size", + type=int, + default=11008, + help="intermediate_size for Llama v2, a dimension associated with MLP", + ) + opt_grp.add_argument( + "--num_key_value_heads", + type=int, + default=None, + help="num_key_value_heads for Llama v2", + ) + parser.add_argument( + "--logging_freq", type=int, default=1, help="number of iterations between logging" + ) + parser.add_argument("--tensorboard_dir", type=str, nargs="+", default=None) + + model_grp = parser.add_argument_group( + title="model", description="arguments to describe model configuration" + ) + model_grp.add_argument("--max_context_width", type=int, default=2048) + model_grp.add_argument("--vocab_size", type=int, default=50432) + model_grp.add_argument("--hidden_width", type=int, default=768) + model_grp.add_argument("--num_layers", type=int, default=12) + model_grp.add_argument("--num_heads", type=int, default=12) + model_grp.add_argument("--resid_pdrop", type=float, default=0.1) + model_grp.add_argument("--embd_pdrop", type=float, default=0.1) + model_grp.add_argument("--attn_pdrop", type=float, default=0.1) + model_grp.add_argument("--summary_first_pdrop", type=float, default=0.1) + model_grp.add_argument("--initializer_range", type=float, default=0.02) + model_grp.add_argument("--model_type", type=str, default="gpt_neox") + model_grp.add_argument("--rotary_pct", type=float, default=0.25) + model_grp.add_argument("--rotary_emb_base", type=int, default=10000) + + fsdp_grp = parser.add_argument_group( + title="fsdp", description="arguments for fully sharded data parallel" + ) + fsdp_grp.add_argument("--offload_activations", type=int, default=0) + fsdp_grp.add_argument("--activation_loading_horizon", type=int, default=2) + fsdp_grp.add_argument("--limit_all_gathers", default=1, type=int) + + # learning rate + lr_grp = parser.add_argument_group( + title="lr", description="arguments for learning rate schedule" + ) + lr_grp.add_argument("--lr", type=float, default=0.0001, help="Initial learning rate.") + lr_grp.add_argument( + "--lr_decay_style", + type=str, + default="cosine", + choices=["constant", "linear", "cosine", "exponential", "plateau"], + help="Learning rate decay function.", + ) + lr_grp.add_argument( + "--lr_decay_iters", + type=int, + default=None, + help="number of iterations to decay learning rate over," " If None defaults to train iters", + ) + lr_grp.add_argument( + "--min_lr", + type=float, + default=1e-05, + help="Minumum value for learning rate. The scheduler" "clip values below this threshold.", + ) + lr_grp.add_argument( + "--warmup", + type=float, + default=0.0032, + help="Percentage of total iterations to warmup on " + "(.01 = 1 percent of all training iters).", + ) + lr_grp.add_argument( + "--plateau", + type=float, + default=0.0, + help="Percentage of total iterations to keep at max if using plateau lr", + ) + io_grp = parser.add_argument_group(title="io", description="location for input and output") + io_grp.add_argument("--dataset_path", type=str, default="c4") + io_grp.add_argument("--tokenizer", type=str, default="EleutherAI/gpt-neox-20b") + io_grp.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="Checkpoint folder name to load from", + ) + io_grp.add_argument( + "--checkpoint_dir", + type=str, + default=None, + help="Saves partial checkpoints (model, optimizer) to this dir.", # pylint: disable=line-too-long + ) + io_grp.add_argument( + "--epochs", type=int, default=3, help="times of iterating over the training dataset" + ) + + parser.add_argument( + "--checkpoint_freq", + type=int, + default=1000, + help="number of iterations between checkpointing", + ) + parser.add_argument( + "--validation_freq", + type=int, + default=None, + help="number of iterations to print validation loss", + ) + parser.add_argument( + "--validation_batches", + type=int, + default=10, + help="number of batches to estimate validation loss", + ) + + return parser.parse_known_args() \ No newline at end of file diff --git a/3.test_cases/10.FSDP/model_utils/checkpoint.py b/3.test_cases/10.FSDP/model_utils/checkpoint.py new file mode 100644 index 00000000..f247fae5 --- /dev/null +++ b/3.test_cases/10.FSDP/model_utils/checkpoint.py @@ -0,0 +1,114 @@ +import os +import re +import pickle +import statistics +import time +import warnings +from pathlib import Path + +import torch +import torch.distributed as dist + +# pylint: disable=import-error,no-name-in-module +from torch.distributed import checkpoint +from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict +from torch.distributed.fsdp import FullyShardedDataParallel as FSDP +from torch.distributed.fsdp import StateDictType +from model_utils.train_utils import get_logger + +logger = get_logger() + +def save_checkpoint(model, optimizer, scheduler, user_content, root_dir, sub_dir): + torch.cuda.empty_cache() + + save_dir = os.path.join(root_dir, sub_dir) + + with FSDP.state_dict_type( + model, + StateDictType.SHARDED_STATE_DICT): + state_dict = { + "model": model.state_dict(), + "optimizer": FSDP.optim_state_dict(model, optimizer), + "scheduler": scheduler.state_dict(), + "total_steps": user_content["total_steps"], + "start_batch_index": user_content["start_batch_index"], + } + if dist.get_rank() == 0: + logger.info("Writing checkpoint to {0}.".format(save_dir)) + checkpoint.save_state_dict( + state_dict=state_dict, + storage_writer=checkpoint.FileSystemWriter(save_dir) + ) + dist.barrier() + if dist.get_rank() == 0: + logger.info("Completed checkpoint.") + +def get_last_checkpoint(checkpoint_paths, model_type): + steps = [int(re.findall(r'\d+steps', checkpoint.stem)[0].replace('steps','')) \ + for checkpoint in checkpoint_paths] + checkpoints = sorted([(step, path) for step,path in zip(steps, checkpoint_paths)]) + return checkpoints[-1][1].as_posix() + +def load_checkpoint(model, optimizer, scheduler, checkpoint_dir, model_type, device): + checkpoint_paths = list(Path(checkpoint_dir).glob(f"{model_type}*")) + if len(checkpoint_paths)==0: + if dist.get_rank() == 0: + logger.info("No Checkpoints Found") + return( + model, + optimizer, + scheduler, + 0, + 0, + ) + last_checkpoint = get_last_checkpoint(checkpoint_paths, model_type) + if dist.get_rank() == 0: + logger.info("Loading checkpoint from %s ...", last_checkpoint) + with FSDP.state_dict_type( + model, + StateDictType.SHARDED_STATE_DICT, + ): + state_dict = { + "model": model.state_dict(), + "scheduler": scheduler.state_dict(), + "total_steps": 0, + "start_batch_index": 0, + # cannot load the optimizer state_dict together with the model state_dict + } + checkpoint.load_state_dict( + state_dict=state_dict, + storage_reader=checkpoint.FileSystemReader(last_checkpoint), + ) + model.load_state_dict(state_dict["model"]) + scheduler.load_state_dict(state_dict["scheduler"]) + if dist.get_rank() == 0: + logger.info("Loaded model state from disk") + logger.info("Loading optimizer state from disk") + optim_state = load_sharded_optimizer_state_dict( + model_state_dict=state_dict["model"], + optimizer_key="optimizer", + storage_reader=checkpoint.FileSystemReader(last_checkpoint), + ) + if dist.get_rank() == 0: + logger.info("Loaded and sharded optimizer state from disk") + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + # UserWarning to replace all_gather_base with all_gather_into_tensor floods the logs + flattened_osd = FSDP.optim_state_dict_to_load( + optim_state["optimizer"], model, optimizer + ) + + if dist.get_rank() == 0: + logger.info("Converted optimizer state dict for FSDP") + optimizer.load_state_dict(flattened_osd) + dist.barrier() + if dist.get_rank() == 0: + logger.info("Checkpoint loaded from %s.", last_checkpoint) + return ( + model, + optimizer, + scheduler, + state_dict["total_steps"], + state_dict["start_batch_index"], + ) + diff --git a/3.test_cases/10.FSDP/model_utils/concat_dataset.py b/3.test_cases/10.FSDP/model_utils/concat_dataset.py new file mode 100644 index 00000000..70277453 --- /dev/null +++ b/3.test_cases/10.FSDP/model_utils/concat_dataset.py @@ -0,0 +1,39 @@ +import os +import numpy as np +import datasets as hf_datasets +from torch.utils.data import IterableDataset +from typing import Dict, Iterable, Union +from transformers import PreTrainedTokenizerBase + +class ConcatTokensDataset(IterableDataset): + def __init__( + self, + hf_dataset: Union[hf_datasets.IterableDataset, hf_datasets.Dataset], + tokenizer: PreTrainedTokenizerBase, + max_length: int, + wrap: bool, + ): + os.environ['TOKENIZERS_PARALLELISM'] = 'false' + self.hf_dataset = hf_dataset + self.tokenizer = tokenizer + self.max_length = max_length + self.should_wrap = wrap + + def __iter__(self) -> Iterable[Dict[str, bytes]]: + + buffer = [] + mask_buffer = [] + for sample in self.hf_dataset: + encoded = self.tokenizer(sample['text'], + truncation=False, + padding=False) + iids = encoded['input_ids'] + mask = encoded['attention_mask'] + buffer = buffer + iids + [self.tokenizer.eos_token_id] + mask_buffer = mask_buffer + mask + [1] + while len(buffer) >= self.max_length: + concat_sample = buffer[:self.max_length] + buffer = buffer[self.max_length:] if self.should_wrap else [] + concat_sample_mask = mask_buffer[:self.max_length] + mask_buffer = mask_buffer[self.max_length:] if self.should_wrap else [] + yield np.array(concat_sample) diff --git a/3.test_cases/10.FSDP/model_utils/train_utils.py b/3.test_cases/10.FSDP/model_utils/train_utils.py new file mode 100644 index 00000000..322db857 --- /dev/null +++ b/3.test_cases/10.FSDP/model_utils/train_utils.py @@ -0,0 +1,412 @@ +import os +import math +import functools +import numpy as np +import torch +import torch.distributed as dist +from datetime import datetime +import tqdm +import logging +from torch.distributed.fsdp import BackwardPrefetch, ShardingStrategy + +g_gigabyte = 1024**3 + +def setup(): + # initialize the process group + dist.init_process_group("nccl") + + +def cleanup(): + dist.destroy_process_group() + +def get_date_of_run(): + """create date and time for file save uniqueness + example: 2022-05-07-08:31:12_PM' + """ + date_of_run = datetime.now().strftime("%Y-%m-%d-%I:%M:%S_%p") + print(f"--> current date and time of run = {date_of_run}") + return date_of_run + + + +def format_metrics_to_gb(item): + """quick function to format numbers to gigabyte and round to 4 digit precision""" + metric_num = item / g_gigabyte + metric_num = round(metric_num, ndigits=4) + return metric_num + +def train(args, model, rank, world_size, train_loader, optimizer, epoch, sampler=None): + model.train() + local_rank = int(os.environ['LOCAL_RANK']) + fsdp_loss = torch.zeros(2).to(local_rank) + + if sampler: + sampler.set_epoch(epoch) + if rank==0: + inner_pbar = tqdm.tqdm( + range(len(train_loader)), colour="blue", desc="r0 Training Epoch" + ) + for batch in train_loader: + for key in batch.keys(): + batch[key] = batch[key].to(local_rank) + optimizer.zero_grad() + output = model(input_ids=batch["source_ids"],attention_mask=batch["source_mask"],labels=batch["target_ids"] ) + loss = output["loss"] + loss.backward() + optimizer.step() + fsdp_loss[0] += loss.item() + fsdp_loss[1] += len(batch) + if rank==0: + inner_pbar.update(1) + + dist.all_reduce(fsdp_loss, op=dist.ReduceOp.SUM) + train_accuracy = fsdp_loss[0] / fsdp_loss[1] + + + if rank == 0: + inner_pbar.close() + print( + f"Train Epoch: \t{epoch}, Loss: \t{train_accuracy:.4f}" + ) + return train_accuracy + + +def validation(model, rank, world_size, val_loader): + model.eval() + correct = 0 + local_rank = int(os.environ['LOCAL_RANK']) + fsdp_loss = torch.zeros(2).to(local_rank) + if rank == 0: + inner_pbar = tqdm.tqdm( + range(len(val_loader)), colour="green", desc="Validation Epoch" + ) + with torch.no_grad(): + for batch in val_loader: + for key in batch.keys(): + batch[key] = batch[key].to(local_rank) + output = model(input_ids=batch["source_ids"],attention_mask=batch["source_mask"],labels=batch["target_ids"]) + fsdp_loss[0] += output["loss"].item() # sum up batch loss + fsdp_loss[1] += len(batch) + + if rank==0: + inner_pbar.update(1) + + dist.all_reduce(fsdp_loss, op=dist.ReduceOp.SUM) + val_loss = fsdp_loss[0] / fsdp_loss[1] + if rank == 0: + inner_pbar.close() + print(f"Validation Loss: {val_loss:.4f}") + return val_loss + +def get_model_config(args): + if "gpt_neox" in args.model_type: + from transformers import GPTNeoXConfig + + model_config = GPTNeoXConfig( + vocab_size=args.vocab_size, + hidden_size=args.hidden_width, + num_hidden_layers=args.num_layers, + num_attention_heads=args.num_heads, + hidden_act="gelu", + intermediate_size=4 * args.hidden_width, + rotary_pct=args.rotary_pct, + rotary_emb_base=args.rotary_emb_base, + max_position_embeddings=args.max_context_width, + layer_norm_epsilon=1e-05, + initializer_range=args.initializer_range, + use_cache=False, + parallel_attn_output=True, + ) + elif "llama_v2" in args.model_type: + from transformers import LlamaConfig + + model_config = LlamaConfig( + vocab_size=args.vocab_size, + hidden_size=args.hidden_width, + intermediate_size=args.llama_intermediate_size, + num_hidden_layers=args.num_layers, + num_attention_heads=args.num_heads, + num_key_value_heads=args.num_key_value_heads, + hidden_act="silu", + max_position_embeddings=args.max_context_width, + initializer_range=args.initializer_range, + rms_norm_eps=1e-5, + use_cache=False, + pretraining_tp=1, + tie_word_embeddings=False, + rope_scaling=None, + ) + else: + raise NotImplementedError + return model_config + +def compute_num_params(model): + """Get num params.""" + num_params = 0 + seen = set() + for p in model.parameters(): # pylint: disable=invalid-name + if p not in seen: + seen.add(p) + if hasattr(p, "ds_shape"): + num_params += np.prod(p.ds_shape) + else: + num_params += np.prod(p.size()) + + return num_params + +_logger = None +def get_logger(): + global _logger + if _logger is None: + logging.getLogger("torch.distributed.checkpoint._dedup_tensors").setLevel(logging.ERROR) + logging.getLogger("torch.distributed.distributed_c10d").setLevel(logging.ERROR) + _logger = logging.getLogger(__name__) + _logger.setLevel(logging.INFO) + _logger.handlers = [] + ch = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s %(levelname).1s " "[%(filename)s:%(lineno)d] %(message)s", + "%Y-%m-%d %H:%M:%S", + ) + ch.setFormatter(formatter) + _logger.addHandler(ch) + _logger.propagate = False + return _logger + +def get_transformer_layer(model_type="gpt2"): + """Get transformer layer.""" + if model_type == "gpt2": + from transformers.models.gpt2.modeling_gpt2 import GPT2Block + + transformer_layer = GPT2Block + + elif model_type == "gpt_neox": + from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXLayer + + transformer_layer = GPTNeoXLayer + + elif model_type == "bloom": + from transformers.models.bloom.modeling_bloom import BloomBlock + + transformer_layer = BloomBlock + + elif model_type == "flash_gptneox": + from flash_attn.modules.block import ParallelBlock + + # TODO: Add support for Block + transformer_layer = ParallelBlock + elif model_type == "llama_v2": + from transformers.models.llama.modeling_llama import LlamaDecoderLayer + + transformer_layer = LlamaDecoderLayer + return transformer_layer + +def get_sharding_strategy(strategy: str): + """Get sharding strategy.""" + sharding_strategy = getattr(ShardingStrategy, strategy.upper()) + _logger.debug("Translating %s to %s.", strategy, sharding_strategy) + return sharding_strategy + + +def get_backward_fetch_policy(policy: str): + """Get backward fetch policy.""" + backward_fetch_policy = getattr(BackwardPrefetch, policy.upper()) + _logger.debug("Translating %s to %s.", policy, backward_fetch_policy) + return backward_fetch_policy + +def apply_activation_checkpoint(args, model=None): + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + CheckpointImpl, + apply_activation_checkpointing, + checkpoint_wrapper, + ) + + transformer_layer = get_transformer_layer(args.model_type) + check_fn_gpt = lambda submodule: isinstance( + submodule, transformer_layer + ) + entrant_wrapper = functools.partial( + checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT + ) + apply_activation_checkpointing( + model, checkpoint_wrapper_fn=entrant_wrapper, check_fn=check_fn_gpt + ) + +def get_param_groups_by_weight_decay(module): + """Get param groups.""" + weight_decay_params = {"params": []} + no_weight_decay_params = {"params": [], "weight_decay": 0.0} + param_ids = set() + + from torch.nn import LayerNorm + + for module_ in module.modules(): + # if isinstance(module_, FusedLayerNorm) or + if isinstance(module_, LayerNorm): + for p in list( + module_._parameters.values() + ): # pylint: disable=invalid-name,protected-access + if p is not None and id(p) not in param_ids: + no_weight_decay_params["params"].append(p) + param_ids.add(id(p)) + else: + for n, p in list( + module_._parameters.items() + ): # pylint: disable=invalid-name,protected-access + if p is not None and n != "bias" and id(p) not in param_ids: + weight_decay_params["params"].append(p) + param_ids.add(id(p)) + for n, p in list( + module_._parameters.items() + ): # pylint: disable=invalid-name,protected-access + if p is not None and n == "bias" and id(p) not in param_ids: + no_weight_decay_params["params"].append(p) + param_ids.add(id(p)) + return weight_decay_params, no_weight_decay_params + +class AnnealingLR: # pylint: disable=too-many-instance-attributes + """Anneals the learning rate.""" + + def __init__( # pylint: disable=too-many-arguments + self, + optimizer, + start_lr, + warmup_iter, + plateau_iter, + total_iters, + decay_style, + last_iter, + min_lr=0.0, + use_checkpoint_lr_scheduler=True, + override_lr_scheduler=False, + ): + + # Class values. + self.optimizer = optimizer + self.start_lr = start_lr + self.min_lr = min_lr + self.warmup_iter = warmup_iter + self.plateau_iter = plateau_iter + self.num_iters = last_iter + self.end_iter = total_iters + assert self.end_iter > 0 + self.decay_style = decay_style + self.override_lr_scheduler = override_lr_scheduler + self.use_checkpoint_lr_scheduler = use_checkpoint_lr_scheduler + if self.override_lr_scheduler: + assert not self.use_checkpoint_lr_scheduler, ( + "both override and " "use-checkpoint are set." + ) + # Set the learning rate + self.step(self.num_iters) + self.rank = dist.get_rank() + + def get_lr(self): + """Learning rate decay functions from: + https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" + + num_iters_ = min(self.num_iters, self.end_iter - self.warmup_iter) + # Warmup. + if self.warmup_iter > 0 and self.num_iters <= self.warmup_iter: + return float(self.start_lr) * num_iters_ / self.warmup_iter + + num_iters_ = num_iters_ - self.warmup_iter + if self.decay_style == "linear": + lr = self.start_lr * (self.end_iter - num_iters_) / self.end_iter + elif self.decay_style == "plateau": + if self.num_iters <= self.plateau_iter: + lr = self.start_lr + else: + lr = ( + self.start_lr + * (self.end_iter - self.num_iters) + / (self.end_iter - self.plateau_iter) + ) + elif self.decay_style == "cosine": + lr = self.start_lr / 2.0 * (math.cos(math.pi * num_iters_ / self.end_iter) + 1) + elif self.decay_style == "exponential": + # exp(-0.693) = 1/2 + lr = self.start_lr * math.exp(-0.693 * num_iters_ / self.end_iter) + else: + lr = self.start_lr + return max(lr, self.min_lr) + + def step(self, step_num=None): + """Set lr for all parameters groups.""" + if step_num is None: + step_num = self.num_iters + 1 + self.num_iters = step_num + new_lr = self.get_lr() + for group in self.optimizer.param_groups: + group["lr"] = new_lr + + def state_dict(self): + """State dict.""" + state_dict = { + "start_lr": self.start_lr, + "warmup_iter": self.warmup_iter, + "num_iters": self.num_iters, + "decay_style": self.decay_style, + "end_iter": self.end_iter, + "min_lr": self.min_lr, + } + return state_dict + + def _check_and_set(self, cls_value, sd_value, name): + """Auxiliary function for checking the values in the checkpoint and + setting them.""" + if self.override_lr_scheduler: + if self.rank == 0: + _logger.info(f"Overriding {name} value to {cls_value}") + return cls_value + + if not self.use_checkpoint_lr_scheduler: + assert ( + cls_value == sd_value + ), f"AnnealingLR: class input value and checkpoint values for {name} do not match" + if self.rank == 0: + _logger.info(f" > using checkpoint value {sd_value} for {name}") + return sd_value + + def load_state_dict(self, sd): + """Load state dict.""" + self.start_lr = self._check_and_set(self.start_lr, sd["start_lr"], "learning rate") + self.min_lr = self._check_and_set(self.min_lr, sd["min_lr"], "minimum learning rate") + self.warmup_iter = self._check_and_set( + self.warmup_iter, sd["warmup_iter"], "warmup iterations" + ) + self.end_iter = self._check_and_set( + self.end_iter, sd["end_iter"], "total number of iterations" + ) + self.decay_style = self._check_and_set(self.decay_style, sd["decay_style"], "decay style") + + self.num_iters = sd["num_iters"] + self.step(self.num_iters) + +def get_learning_rate_scheduler(optimizer, args): + """Get learning rate scheduler.""" + use_checkpoint_lr_scheduler = args.resume_from_checkpoint is not None + + # Add linear learning rate scheduler. + if args.lr_decay_iters is not None: + num_iters = args.lr_decay_iters + else: + num_iters = args.max_steps + num_iters = max(1, num_iters) + init_step = 0 + warmup_iter = args.warmup * num_iters + plateau_iter = warmup_iter + args.plateau * num_iters + lr_scheduler = AnnealingLR( + optimizer, + start_lr=args.lr, + warmup_iter=warmup_iter, + plateau_iter=plateau_iter, + total_iters=num_iters, + decay_style=args.lr_decay_style, + last_iter=init_step, + min_lr=args.min_lr, + use_checkpoint_lr_scheduler=use_checkpoint_lr_scheduler, + override_lr_scheduler=False, + ) + + return lr_scheduler \ No newline at end of file diff --git a/3.test_cases/10.FSDP/train.py b/3.test_cases/10.FSDP/train.py new file mode 100644 index 00000000..87129b1e --- /dev/null +++ b/3.test_cases/10.FSDP/train.py @@ -0,0 +1,270 @@ +import datetime +import functools +import math +import re +import time + +import numpy as np +import torch +from torch import optim +import torch.distributed as dist +import torch.utils.data + +import transformers +from transformers import AutoModelForCausalLM, AutoTokenizer +from datasets import load_dataset + +from torch.distributed.fsdp import FullyShardedDataParallel as FSDP +from torch.distributed.fsdp import MixedPrecision +from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy +from torch.utils.data import DataLoader + +from model_utils.concat_dataset import ConcatTokensDataset +from model_utils.train_utils import (get_model_config, + compute_num_params, + get_transformer_layer, + get_sharding_strategy, + get_backward_fetch_policy, + apply_activation_checkpoint, + get_param_groups_by_weight_decay, + get_logger, + get_learning_rate_scheduler) +from model_utils.checkpoint import save_checkpoint, load_checkpoint +from model_utils.arguments import parse_args + +logger = get_logger() + + +def create_streaming_dataloaders(dataset, + tokenizer, + global_rank=0, + train_batch_size=1, + val_batch_size=1, + max_context_width=4096, + workers=4): + tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + data = load_dataset(dataset, 'en', streaming=True).shuffle(42+global_rank) + train_concat_dataset = ConcatTokensDataset(data['train'], tokenizer, max_context_width, True) + val_concat_dataset = ConcatTokensDataset(data['validation'], tokenizer, max_context_width, True) + train_dataloader = iter(DataLoader(train_concat_dataset, + batch_size=train_batch_size, + num_workers=workers, + pin_memory=True, + prefetch_factor=4)) + val_dataloader = iter(DataLoader(val_concat_dataset, + batch_size=val_batch_size, + num_workers=workers, + pin_memory=True, + prefetch_factor=4)) + return train_dataloader, val_dataloader + + +def eval_model(model, dataloader, num_batches): + """Eval step.""" + model = model.eval() + n_batches = 0 + loss = 0.0 + + with torch.no_grad(): + for batch_idx, input_data in enumerate(dataloader): + if batch_idx >= num_batches: + break + + loss += model(input_ids=input_data, attention_mask=None, labels=input_data)["loss"] + n_batches += 1 + + if n_batches > 0: + detached_loss = loss.detach() + torch.distributed.all_reduce(detached_loss) + loss = detached_loss.item() / dist.get_world_size() + loss /= n_batches + ppl = math.exp(loss) + else: + loss = -1.0 + ppl = -1.0 + + return loss, ppl + +def train( + model, + optimizer, + train_dataloader, + val_dataloader, + lr_scheduler, + model_config, + num_params, + args, + global_rank, + world_size, + total_steps=0, + start_batch_index=0 + ): + model.train() + for index in range(args.epochs): + for batch_idx, input_data in enumerate(train_dataloader): + if batch_idx < start_batch_index: + continue + optimizer.zero_grad(set_to_none=True) + step_start = time.time() + loss = model(input_ids=input_data, attention_mask=None, labels=input_data)["loss"] + loss.backward() + model.clip_grad_norm_(args.grad_clip) + optimizer.step() + lr_scheduler.step() + total_steps += 1 + loss_metric = loss.item() + step_time = time.time() - step_start + sample_processed = input_data.shape[0] * world_size + throughput = sample_processed / step_time + loss_scalar = loss.item() + current_lr = lr_scheduler.get_lr() + if global_rank==0 and batch_idx%args.logging_freq==0: + logger.info( + "Batch %d Loss: %s, Speed: %.2f samples/sec, lr: %.6f", # pylint: disable=line-too-long + batch_idx, + loss_scalar, + throughput, + current_lr, + ) + if args.validation_freq and not total_steps % args.validation_freq: + val_loss, val_ppl = eval_model( + model, val_dataloader, args.validation_batches + ) + model = model.train() + if global_rank == 0: + logger.info( + "Batch %d Validation loss: %s", + batch_idx, + val_loss, + ) + if args.checkpoint_dir and not total_steps % args.checkpoint_freq: + user_content = { + "cli_args": args.__dict__, + "num_params": num_params, + "total_steps": total_steps, + "model_config": model_config, + "start_batch_index": batch_idx + 1, + } + sub_dir = f"{args.model_type}-{total_steps}steps" + + save_checkpoint( + model, + optimizer, + lr_scheduler, + user_content, + args.checkpoint_dir, + sub_dir, + ) + if total_steps >= args.max_steps: + break + + +def main(args): + dist.init_process_group() + global_rank = dist.get_rank() + device = global_rank % torch.cuda.device_count() + world_size = dist.get_world_size() + + if args.bf16: + dtype = torch.bfloat16 + else: + dtype = torch.get_default_dtype() + + model_config = get_model_config(args) + if global_rank == 0: + logger.info( + "Creating Model" + ) + model = AutoModelForCausalLM.from_config(model_config) + + num_params = compute_num_params(model) + if global_rank == 0: + logger.info( + "Created model with total parameters: %d (%.2f B)", num_params, num_params * 1e-9 + ) + transformer_layer = get_transformer_layer(args.model_type) + + gpt_auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + transformer_layer_cls={ + transformer_layer, + }, + ) + + torch.cuda.set_device(device) + mixed_precision_policy = MixedPrecision( + param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype + ) + + model = FSDP( + model, + auto_wrap_policy=gpt_auto_wrap_policy, + mixed_precision=mixed_precision_policy, + limit_all_gathers=args.limit_all_gathers, + device_id=torch.cuda.current_device(), + use_orig_params=False + ) + + if global_rank == 0: + logger.info("Wrapped model with FSDP") + + if args.activation_checkpointing > 0: + apply_activation_checkpoint(args, model=model) + + if args.offload_activations > 0: + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import offload_wrapper + + model = offload_wrapper(model) + + param_groups = get_param_groups_by_weight_decay(model) + + optimizer = optim.AdamW( + param_groups, betas=(args.beta1, args.beta2), lr=args.lr, weight_decay=args.weight_decay + ) + + if global_rank == 0: + logger.info("Created optimizer") + + lr_scheduler = get_learning_rate_scheduler(optimizer, args) + + if args.resume_from_checkpoint: + ( + model, + optimizer, + lr_scheduler, + total_steps, + start_batch_index, + ) = load_checkpoint(model, + optimizer, + lr_scheduler, + args.resume_from_checkpoint, + args.model_type, + device) + else: + total_steps = 0 + start_batch_index = 0 + + train_dataloader, val_dataloader = create_streaming_dataloaders(dataset=args.dataset_path, + tokenizer=args.tokenizer, + global_rank=global_rank, + train_batch_size=args.train_batch_size, + val_batch_size=args.val_batch_size, + max_context_width=4096, + workers=4) + + train(model, + optimizer, + train_dataloader, + val_dataloader, + lr_scheduler, + model_config, + num_params, + args, + global_rank, + world_size, + total_steps, + start_batch_index) + +if __name__ == "__main__": + args, _ = parse_args() + main(args) \ No newline at end of file From 20a07442716c64bf938e9fb4fed68d71ba5337a2 Mon Sep 17 00:00:00 2001 From: johnbensnyder Date: Tue, 28 Nov 2023 14:23:11 +0000 Subject: [PATCH 229/648] cleaned up ipynb files --- .../0.create_conda_env-checkpoint.sh | 18 ------ .../1.distributed-training-checkpoint.sbatch | 60 ------------------- .../.ipynb_checkpoints/README-checkpoint.md | 57 ------------------ 3 files changed, 135 deletions(-) delete mode 100755 3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh delete mode 100755 3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch delete mode 100644 3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md diff --git a/3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh b/3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh deleted file mode 100755 index cecbc699..00000000 --- a/3.test_cases/10.FSDP/.ipynb_checkpoints/0.create_conda_env-checkpoint.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -ex - -wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -chmod +x Miniconda3-latest-Linux-x86_64.sh -./Miniconda3-latest-Linux-x86_64.sh -b -f -p ./miniconda3 - -source ./miniconda3/bin/activate - -conda create -y -p ./pt_fsdp python=3.10 - -source activate ./pt_fsdp/ - -# Install PyTorch -pip install torch==2.0.1 torchvision torchaudio transformers datasets - -# Create checkpoint dir -mkdir checkpoints diff --git a/3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch b/3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch deleted file mode 100755 index cbedafe7..00000000 --- a/3.test_cases/10.FSDP/.ipynb_checkpoints/1.distributed-training-checkpoint.sbatch +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -#SBATCH --nodes=4 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs -#SBATCH --job-name=FSDP # name of your job -#SBATCH --exclusive # job has exclusive use of the resource, no sharing - -set -ex; - -########################### -###### User Variables ##### -########################### - -########################### -## Environment Variables ## -########################### - -## Plenty of EFA level variables -export FI_EFA_USE_DEVICE_RDMA=1 # use for p4d -export FI_EFA_FORK_SAFE=1 -export FI_LOG_LEVEL=1 -export FI_PROVIDER=efa -export NCCL_DEBUG=INFO - -########################### -####### Torch Dist ####### -########################### - -declare -a TORCHRUN_ARGS=( - --nproc_per_node=8 \ - --nnodes=$SLURM_JOB_NUM_NODES \ - --rdzv_id=$SLURM_JOB_ID \ - --rdzv_backend=c10d \ - --rdzv_endpoint=$(hostname):29501 \ -) - -export TORCHRUN=./pt_fsdp/bin/torchrun -export TRAIN_SCRIPT=./train.py - -############################ -# Llama 2 Training Params ## -############################ - -declare -a TRAINING_ARGS=( - --max_context_width=4096 \ - --num_key_value_heads=32 \ # 7b: 32 13b: 40 70b: 8 - --llama_intermediate_size=11008 \ # 7b: 11008 13b: 13824 70b: 28672 - --hidden_width=4096 \ # 7b: 4096 13b: 5120 70b: 8192 - --num_layers=32 \ # 7b: 32 13b: 40 70b: 80 - --num_heads=32 \ # 7b: 32 13b: 40 70b: 64 - --model_type=llama_v2 \ - --checkpoint_freq=50 \ - --validation_freq=500 \ - --checkpoint_dir=./checkpoints \ - --resume_from_checkpoint=./checkpoints -) - -srun -l ${TORCHRUN} "${TORCHRUN_ARGS[@]}" $TRAIN_SCRIPT "${TRAINING_ARGS[@]}" diff --git a/3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md b/3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md deleted file mode 100644 index 5a2a4bd8..00000000 --- a/3.test_cases/10.FSDP/.ipynb_checkpoints/README-checkpoint.md +++ /dev/null @@ -1,57 +0,0 @@ -# Get Started Training Llama 2 with PyTorch FSDP in 5 Minutes - -These scripts provide an easy way to get started with multinode [FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html) training on Slurm. It is designed to be as simple as possible, requires no data preparation, and uses a simple Conda environment. - -## 0. Prerequisites - -Before running this training, you'll need to create a Slurm cluster with an FSx for Lustre file system. Instructions can be found in [1.architectures](../../1.architectures). - -## 1. Create Environment - -On your cluster head node, navigate to your shared FSx for Lustre file system, and clone this repo. If you followed the tutorial linked above, it will be location at `/fsx`. - -``` -cd /fsx -git clone https://github.com/aws-samples/awsome-distributed-training/ -cd awsome-distributed-training/3.test_cases/10.FSDP -``` - -Next, run the `0.create_conda_env.sh` script. This script will first download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/), then create a Conda env called `pt_fsdp`. - -``` -bash 0.create_conda_env.sh -``` - -By creating this environment on the shared FSx for Lustre volume, all compute nodes in our cluster will have access to it. - -## 2. Data - -For this example, we'll be using the [C4 dataset](https://huggingface.co/datasets/allenai/c4). Because C4 is several hundred gigabytes, we'll stream the data directly from [HuggingFace](https://huggingface.co/datasets). The `create_streaming_dataloaders` function in `train.py` is already setup to do this, so there's no data prep required for running this training. If you'd like to instead use your own dataset, you can do so by [formatting it as a HuggingFace dataset](https://huggingface.co/docs/datasets/create_dataset), and passing its location to the `--dataset_path` argument. - -## 3. Launch Training - -The script to launch a Slurm batch training job can be found in `1.distributed_training.sbatch`. You can adjust the number of training nodes by modifying `#SBATCH --nodes=4`. You can also adjust the training parameters in `TRAINING_ARGS`. Additional parameters can be found in `model/arguments.py`. Note that we use the same directory for both `--checkpoint_dir` and `--resume_from_checkpoint`. If there are multiple checkpoints, `--resume_from_checkpoint` will automatically select the most recent one. This way if our training is interupted for any reason, it will automatically pick up the most recent checkpoint. - -``` -declare -a TRAINING_ARGS=( - --num_key_value_heads=32 \ - --llama_intermediate_size=11008 \ - --max_context_width=4096 \ - --hidden_width=4096 \ - --num_layers=32 \ - --num_heads=32 \ - --model_type=llama_v2 \ - --checkpoint_freq=1000 \ - --validation_freq=500 \ - --checkpoint_dir=./checkpoints \ - --resume_from_checkpoint=./checkpoints -) -``` - -To launch your training, run - -``` -sbatch 1.distributed_training.sbatch -``` - -You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. To stop training, get your job number using `squeue` and run `scancel [job-number]`. From 58d77630c6984d7a92e6275b975e2ee76304191c Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Tue, 28 Nov 2023 21:39:31 -0800 Subject: [PATCH 230/648] added license and improved readme --- 3.test_cases/10.FSDP/0.create_conda_env.sh | 3 ++ .../10.FSDP/1.distributed-training.sbatch | 2 +- 3.test_cases/10.FSDP/README.md | 53 +++++++++++++++++-- 3.test_cases/10.FSDP/model_utils/arguments.py | 5 +- .../10.FSDP/model_utils/checkpoint.py | 3 ++ .../10.FSDP/model_utils/concat_dataset.py | 3 ++ .../10.FSDP/model_utils/train_utils.py | 3 ++ 3.test_cases/10.FSDP/train.py | 3 ++ 8 files changed, 67 insertions(+), 8 deletions(-) diff --git a/3.test_cases/10.FSDP/0.create_conda_env.sh b/3.test_cases/10.FSDP/0.create_conda_env.sh index cecbc699..38cce21f 100755 --- a/3.test_cases/10.FSDP/0.create_conda_env.sh +++ b/3.test_cases/10.FSDP/0.create_conda_env.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash set -ex +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh chmod +x Miniconda3-latest-Linux-x86_64.sh ./Miniconda3-latest-Linux-x86_64.sh -b -f -p ./miniconda3 diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch index cbedafe7..ce34b835 100755 --- a/3.test_cases/10.FSDP/1.distributed-training.sbatch +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -3,7 +3,7 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -#SBATCH --nodes=4 # number of nodes to use, 2 p4d(e) = 16 A100 GPUs +#SBATCH --nodes=4 # number of nodes to use, 4 p4d(e) = 32 A100 GPUs #SBATCH --job-name=FSDP # name of your job #SBATCH --exclusive # job has exclusive use of the resource, no sharing diff --git a/3.test_cases/10.FSDP/README.md b/3.test_cases/10.FSDP/README.md index 5a2a4bd8..1b918456 100644 --- a/3.test_cases/10.FSDP/README.md +++ b/3.test_cases/10.FSDP/README.md @@ -8,7 +8,10 @@ Before running this training, you'll need to create a Slurm cluster with an FSx ## 1. Create Environment -On your cluster head node, navigate to your shared FSx for Lustre file system, and clone this repo. If you followed the tutorial linked above, it will be location at `/fsx`. +On your cluster head node, +1. Navigate to your shared FSx for Lustre file system. +* If you followed the tutorial linked above, it will be location at `/fsx`. +2. Clone this repo. ``` cd /fsx @@ -16,17 +19,20 @@ git clone https://github.com/aws-samples/awsome-distributed-training/ cd awsome-distributed-training/3.test_cases/10.FSDP ``` -Next, run the `0.create_conda_env.sh` script. This script will first download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/), then create a Conda env called `pt_fsdp`. +3. Run the `0.create_conda_env.sh` script. +* This script will first download and install [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/), then create a Conda env called `pt_fsdp`. ``` bash 0.create_conda_env.sh ``` -By creating this environment on the shared FSx for Lustre volume, all compute nodes in our cluster will have access to it. +* By creating this environment on the shared FSx for Lustre volume, all compute nodes in our cluster will have access to it. ## 2. Data -For this example, we'll be using the [C4 dataset](https://huggingface.co/datasets/allenai/c4). Because C4 is several hundred gigabytes, we'll stream the data directly from [HuggingFace](https://huggingface.co/datasets). The `create_streaming_dataloaders` function in `train.py` is already setup to do this, so there's no data prep required for running this training. If you'd like to instead use your own dataset, you can do so by [formatting it as a HuggingFace dataset](https://huggingface.co/docs/datasets/create_dataset), and passing its location to the `--dataset_path` argument. +For this example, we'll be using the [C4 dataset](https://huggingface.co/datasets/allenai/c4), which is several hundred gigabytes. Instead of downloading the whole thing, the `create_streaming_dataloaders` function will stream the dataset from [HuggingFace](https://huggingface.co/datasets), so there's no data prep required for running this training. + +If you'd like to instead use your own dataset, you can do so by [formatting it as a HuggingFace dataset](https://huggingface.co/docs/datasets/create_dataset), and passing its location to the `--dataset_path` argument. ## 3. Launch Training @@ -54,4 +60,41 @@ To launch your training, run sbatch 1.distributed_training.sbatch ``` -You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. To stop training, get your job number using `squeue` and run `scancel [job-number]`. +You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. Don't be worried if you see a long stream of NCCL logs (we prefer to use verbose logging). After about a minute, you should see your model training, with an output similar to below. + +``` ++ TORCHRUN=./pt_fsdp/bin/torchrun ++ export TRAIN_SCRIPT=./train.py ++ TRAIN_SCRIPT=./train.py ++ TRAINING_ARGS=(--max_context_width=4096 --num_key_value_heads=32 \ # 7b: 32 13b: 40 70b: 8 --llama_intermediate_size=11008 \ # 7b: 11008 13b: 13824 70b: 28672 --hidden_width=4096 \ # 7b: 4096 13b: 5120 70b: 8192 --num_layers=32 \ # 7b: 32 13b: 40 70b: 80 --num_heads=32 \ # 7b: 32 13b: 40 70b: 64 --model_type=llama_v2 --checkpoint_freq=50 --validation_freq=500 --checkpoint_dir=./checkpoints --resume_from_checkpoint=./checkpoints) +... +0: 2023-11-29 04:17:52 I [train.py:175] Creating Model +0: 2023-11-29 04:19:17 I [train.py:182] Created model with total parameters: 6889410560 (6.89 B) +0: 2023-11-29 04:19:28 I [train.py:209] Wrapped model with FSDP +0: 2023-11-29 04:19:28 I [train.py:226] Created optimizer +... +2: ip-10-1-41-139:6171:8092 [0] NCCL INFO NET/OFI Initializing aws-ofi-nccl 1.7.3-aws +3: ip-10-1-44-54:6168:6168 [7] NCCL INFO cudaDriverVersion 12020 +0: ip-10-1-14-81:6158:9214 [2] NCCL INFO NET/OFI Selected Provider is efa (found 4 nics) +... +0: ip-10-1-14-81:6158:9214 [2] NCCL INFO comm 0x8b6b550 rank 2 nranks 32 cudaDev 2 busId 201c0 - Init COMPLETE +0: ip-10-1-14-81:6157:9213 [1] NCCL INFO comm 0x8494480 rank 1 nranks 32 cudaDev 1 busId 101d0 - Init COMPLETE +0: 2023-11-29 04:19:48 I [train.py:122] Batch 0 Loss: 11.6533041, Speed: 3.98 samples/sec, lr: 0.000006 +0: 2023-11-29 04:19:54 I [train.py:122] Batch 1 Loss: 11.620493, Speed: 10.72 samples/sec, lr: 0.000013 +0: 2023-11-29 04:20:00 I [train.py:122] Batch 2 Loss: 11.3152923, Speed: 10.71 samples/sec, lr: 0.000019 +0: 2023-11-29 04:20:06 I [train.py:122] Batch 3 Loss: 10.461415, Speed: 10.11 samples/sec, lr: 0.000025 +0: 2023-11-29 04:20:12 I [train.py:122] Batch 4 Loss: 11.8934202, Speed: 10.71 samples/sec, lr: 0.000031 +0: 2023-11-29 04:20:18 I [train.py:122] Batch 5 Loss: 13.9545879, Speed: 10.70 samples/sec, lr: 0.000038 +``` + +To modify training for a 13 or 70B Llama 2 model, just change the corresponding parameters based on the values in the [Llama 2 paper](https://arxiv.org/abs/2307.09288). + +| Param | 7B | 13B | 70B | +| ------------------------ | ----------- | ----------- | ----------- | +| llama_intermediate_size | 11008 | 13824 | 28672 | +| num_key_value_heads | 32 | 40 | 8 | +| hidden_width | 4096 | 5120 | 8192 | +| num_layers | 32 | 40 | 80 | +| num_heads | 32 | 40 | 64 | + +If you need to cancel or modify your job, see the Slurm commands available in the [Slurm documentation](https://slurm.schedmd.com/quickstart.html). \ No newline at end of file diff --git a/3.test_cases/10.FSDP/model_utils/arguments.py b/3.test_cases/10.FSDP/model_utils/arguments.py index 766af75f..e829f018 100644 --- a/3.test_cases/10.FSDP/model_utils/arguments.py +++ b/3.test_cases/10.FSDP/model_utils/arguments.py @@ -1,4 +1,5 @@ -"""FSDP binary script arguments.""" +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 import argparse import os @@ -17,7 +18,7 @@ def parse_args(): # pylint: disable=too-many-statements "--train_batch_size", type=int, default=2, - help="batch size per dp rank, for tensor parallelism degree 8 with pipeline parallel degree 1 this means 8*this batch size per node", # pylint: disable=line-too-long + help="batch size per dp rank", # pylint: disable=line-too-long ) opt_grp.add_argument("--val_batch_size", type=int, default=4) opt_grp.add_argument("--max_steps", "--max_training_steps", type=int, default=5000) diff --git a/3.test_cases/10.FSDP/model_utils/checkpoint.py b/3.test_cases/10.FSDP/model_utils/checkpoint.py index f247fae5..0bfb8ed2 100644 --- a/3.test_cases/10.FSDP/model_utils/checkpoint.py +++ b/3.test_cases/10.FSDP/model_utils/checkpoint.py @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + import os import re import pickle diff --git a/3.test_cases/10.FSDP/model_utils/concat_dataset.py b/3.test_cases/10.FSDP/model_utils/concat_dataset.py index 70277453..687f683a 100644 --- a/3.test_cases/10.FSDP/model_utils/concat_dataset.py +++ b/3.test_cases/10.FSDP/model_utils/concat_dataset.py @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + import os import numpy as np import datasets as hf_datasets diff --git a/3.test_cases/10.FSDP/model_utils/train_utils.py b/3.test_cases/10.FSDP/model_utils/train_utils.py index 322db857..83dc3d4d 100644 --- a/3.test_cases/10.FSDP/model_utils/train_utils.py +++ b/3.test_cases/10.FSDP/model_utils/train_utils.py @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + import os import math import functools diff --git a/3.test_cases/10.FSDP/train.py b/3.test_cases/10.FSDP/train.py index 87129b1e..ce061ca5 100644 --- a/3.test_cases/10.FSDP/train.py +++ b/3.test_cases/10.FSDP/train.py @@ -1,3 +1,6 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + import datetime import functools import math From dbd10c8d7097e16109e5acd3d355f8de8ee0587e Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Tue, 28 Nov 2023 21:41:28 -0800 Subject: [PATCH 231/648] get right license on sbatch --- 3.test_cases/10.FSDP/1.distributed-training.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch index ce34b835..b6fd3def 100755 --- a/3.test_cases/10.FSDP/1.distributed-training.sbatch +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -1,7 +1,7 @@ #!/bin/bash # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: MIT-0 #SBATCH --nodes=4 # number of nodes to use, 4 p4d(e) = 32 A100 GPUs #SBATCH --job-name=FSDP # name of your job From 84fe043fae71841708f408068676d12170e4630b Mon Sep 17 00:00:00 2001 From: Ankur Srivastava Date: Wed, 29 Nov 2023 20:11:44 +0530 Subject: [PATCH 232/648] Made changes to address Keita comments Signed-off-by: Ankur Srivastava --- 3.test_cases/6.stable-diffusion/README.md | 2 +- .../multi-node/1.Dockerfile | 21 ++++++------------- .../multi-node/2.train.sbatch | 2 +- .../single-node/0.Dockerfile | 2 +- 4 files changed, 9 insertions(+), 18 deletions(-) diff --git a/3.test_cases/6.stable-diffusion/README.md b/3.test_cases/6.stable-diffusion/README.md index 94181cdc..e7b31e5b 100644 --- a/3.test_cases/6.stable-diffusion/README.md +++ b/3.test_cases/6.stable-diffusion/README.md @@ -211,7 +211,7 @@ docker build --build-arg MOSAICML_VERSION=${MOSAICML_VERSION} --build-arg PYTORC Convert the Docker container image to an [Enroot](https://github.com/NVIDIA/enroot) squash file that will be stored in /apps. This step takes a few minutes. ``` -enroot import -o /apps/${DOCKER_IMAGE_NAME}.sqsh dockerd://${DOCKER_IMAGE_NAME} +enroot import -o /apps/${DOCKER_IMAGE_NAME}.sqsh dockerd://${DOCKER_IMAGE_NAME}:${TAG} ``` #### 2.1.3 Now we can start training diff --git a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile index 98440b0b..ec47b9aa 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile +++ b/3.test_cases/6.stable-diffusion/multi-node/1.Dockerfile @@ -1,11 +1,13 @@ ARG PYTORCH_IMAGE -FROM PYTORCH_IMAGE +FROM ${PYTORCH_IMAGE} ARG MOSAICML_VERSION -ARG EFA_INSTALLER_VERSION=latest -ARG AWS_OFI_NCCL_VERSION=v1.7.2-aws +ARG PYTORCH_INDEX_URL + +ARG EFA_INSTALLER_VERSION=1.28.0 +ARG AWS_OFI_NCCL_VERSION=v1.7.3-aws ARG NCCL_TESTS_VERSION=master ARG NCCL_VERSION=v2.18.5-1 RUN apt-get update -y @@ -84,21 +86,10 @@ RUN export OPAL_PREFIX="" \ --with-mpi=/opt/amazon/openmpi/ \ && make && make install -################################################### -## Install NCCL-tests -RUN git clone https://github.com/NVIDIA/nccl-tests.git /opt/nccl-tests \ - && cd /opt/nccl-tests \ - && git checkout ${NCCL_TESTS_VERSION} \ - && make MPI=1 \ - MPI_HOME=/opt/amazon/openmpi/ \ - CUDA_HOME=/usr/local/cuda \ - NCCL_HOME=/opt/nccl/build \ - NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_90,code=sm_90" - RUN git clone https://github.com/mosaicml/diffusion-benchmark.git RUN pip3 install -r diffusion-benchmark/requirements.txt RUN pip3 install mosaicml==${MOSAICML_VERSION} --force -RUN pip3 install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121 --force +RUN pip3 install --pre torch torchvision torchaudio --index-url ${PYTORCH_INDEX_URL} --force RUN pip3 uninstall transformer-engine -y RUN pip3 install protobuf==3.20.3 diff --git a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch index caba4a2e..4194067a 100644 --- a/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch +++ b/3.test_cases/6.stable-diffusion/multi-node/2.train.sbatch @@ -13,7 +13,7 @@ #SBATCH --output jobs/slurm-%j.out # default variables for Enroot -: "${APPS_PATH:=/fsx}" +: "${APPS_PATH:=/apps}" : "${DATA_PATH:=/fsx}" # default variables for Enroot diff --git a/3.test_cases/6.stable-diffusion/single-node/0.Dockerfile b/3.test_cases/6.stable-diffusion/single-node/0.Dockerfile index a6ac4ea5..ffe4a783 100644 --- a/3.test_cases/6.stable-diffusion/single-node/0.Dockerfile +++ b/3.test_cases/6.stable-diffusion/single-node/0.Dockerfile @@ -3,7 +3,7 @@ ARG PYTORCH_IMAGE -FROM PYTORCH_IMAGE +FROM ${PYTORCH_IMAGE} ARG MOSAICML_VERSION ARG PYTORCH_INDEX_URL From 49ee366b92ad109f92ef6945656eadce435b09c4 Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Wed, 29 Nov 2023 09:12:42 -0800 Subject: [PATCH 233/648] updated readme and port --- 3.test_cases/10.FSDP/1.distributed-training.sbatch | 2 +- README.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/3.test_cases/10.FSDP/1.distributed-training.sbatch b/3.test_cases/10.FSDP/1.distributed-training.sbatch index b6fd3def..2d5fc8ca 100755 --- a/3.test_cases/10.FSDP/1.distributed-training.sbatch +++ b/3.test_cases/10.FSDP/1.distributed-training.sbatch @@ -33,7 +33,7 @@ declare -a TORCHRUN_ARGS=( --nnodes=$SLURM_JOB_NUM_NODES \ --rdzv_id=$SLURM_JOB_ID \ --rdzv_backend=c10d \ - --rdzv_endpoint=$(hostname):29501 \ + --rdzv_endpoint=$(hostname):0 \ ) export TORCHRUN=./pt_fsdp/bin/torchrun diff --git a/README.md b/README.md index 6441009e..ae34b229 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ All test cases are under `3.test_cases/`. You can go in each test case directory | [`6.stable-diffusion`](./3.test_cases/6.stable-diffusion) | ✅ | ❓ | ❓ | | [`7.tensorflow-distributed`](./3.test_cases/7.tensorflow-distributed) | ✅ | ❓ | ❓ | | [`8.neuronx-nemo-megatron`](./3.test_cases/8.neuronx-nemo-megatron) | ✅ | ❓ | ❓ | +| [`10.FSDP`](./3.test_cases/10.FSDP) | ✅ | ❓ | ❓ | ## 4. Validation scripts From a6c97396883474f50846ff9e91f1ce59cdba85bb Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Wed, 29 Nov 2023 09:14:09 -0800 Subject: [PATCH 234/648] added note about NCCL logging --- 3.test_cases/10.FSDP/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3.test_cases/10.FSDP/README.md b/3.test_cases/10.FSDP/README.md index 1b918456..4983cc84 100644 --- a/3.test_cases/10.FSDP/README.md +++ b/3.test_cases/10.FSDP/README.md @@ -60,7 +60,7 @@ To launch your training, run sbatch 1.distributed_training.sbatch ``` -You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. Don't be worried if you see a long stream of NCCL logs (we prefer to use verbose logging). After about a minute, you should see your model training, with an output similar to below. +You'll find a new file in the FSDP directory of the form `slurm-[job-number].out`. This will be continuously updated with your training logs. Don't be worried if you see a long stream of NCCL logs (we prefer to use `NCCL_DEBUG=INFO` for verbose logging). After about a minute, you should see your model training, with an output similar to below. ``` + TORCHRUN=./pt_fsdp/bin/torchrun From b307d4fe241fb824574af1e70db1f1c8803e45fe Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Wed, 29 Nov 2023 10:35:42 -0800 Subject: [PATCH 235/648] Added SageMaker HyperPod --- .gitignore | 3 +- ...rClustersExecutionRoleTrustedEntities.json | 14 + ...nSageMakerClustersExecutionRolePolicy.json | 84 ++++ .../5.sagemaker-hyperpod/2.SageMakerVPC.yaml | 383 ++++++++++++++++++ .../5.sagemaker-hyperpod/3.FSxLustre.yaml | 100 +++++ .../LifecycleScripts/base-config/add_users.sh | 62 +++ .../base-config/lifecycle_script.py | 167 ++++++++ .../LifecycleScripts/base-config/mount_fsx.sh | 133 ++++++ .../LifecycleScripts/base-config/on_create.sh | 41 ++ .../base-config/provisioning_parameters.json | 12 + .../base-config/setup_mariadb_accounting.sh | 100 +++++ .../base-config/setup_rds_accounting.sh | 125 ++++++ .../base-config/shared_users_sample.txt | 4 + .../base-config/start_slurm.sh | 35 ++ .../base-config/utils/enroot.conf | 72 ++++ .../base-config/utils/install_docker.sh | 17 + .../base-config/utils/install_enroot_pyxis.sh | 47 +++ .../5.sagemaker-hyperpod/README.md | 320 +++++++++++++++ .../5.sagemaker-hyperpod/easy-ssh.sh | 18 + 1.architectures/5.sagemaker-hyperpod/env.sh | 5 + README.md | 1 + 21 files changed, 1742 insertions(+), 1 deletion(-) create mode 100644 1.architectures/5.sagemaker-hyperpod/0.AmazonSageMakerClustersExecutionRoleTrustedEntities.json create mode 100644 1.architectures/5.sagemaker-hyperpod/1.AmazonSageMakerClustersExecutionRolePolicy.json create mode 100644 1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml create mode 100644 1.architectures/5.sagemaker-hyperpod/3.FSxLustre.yaml create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/add_users.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/mount_fsx.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/on_create.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_mariadb_accounting.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_rds_accounting.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/shared_users_sample.txt create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/start_slurm.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf create mode 100755 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh create mode 100755 1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/README.md create mode 100755 1.architectures/5.sagemaker-hyperpod/easy-ssh.sh create mode 100644 1.architectures/5.sagemaker-hyperpod/env.sh diff --git a/.gitignore b/.gitignore index 2ba0f997..278a3fae 100644 --- a/.gitignore +++ b/.gitignore @@ -24,7 +24,7 @@ derby.log metastore_db spark-warehouse -# pytorch, tensorflow, tensorboard +# pytorch, tensorflow, tensorboard, jupyter *.pt *.pt[ch] *.ckpt @@ -32,6 +32,7 @@ spark-warehouse *.h5 *.tfevents.* *.venv* +.ipynb_checkpoints # slurm outputs *.out diff --git a/1.architectures/5.sagemaker-hyperpod/0.AmazonSageMakerClustersExecutionRoleTrustedEntities.json b/1.architectures/5.sagemaker-hyperpod/0.AmazonSageMakerClustersExecutionRoleTrustedEntities.json new file mode 100644 index 00000000..d44297d3 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/0.AmazonSageMakerClustersExecutionRoleTrustedEntities.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": [ + "sagemaker.amazonaws.com" + ] + }, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/1.architectures/5.sagemaker-hyperpod/1.AmazonSageMakerClustersExecutionRolePolicy.json b/1.architectures/5.sagemaker-hyperpod/1.AmazonSageMakerClustersExecutionRolePolicy.json new file mode 100644 index 00000000..c606a417 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/1.AmazonSageMakerClustersExecutionRolePolicy.json @@ -0,0 +1,84 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "logs:PutLogEvents", + "logs:CreateLogStream", + "logs:DescribeLogStreams" + ], + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/sagemaker/Clusters/*:log-stream:*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup" + ], + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/sagemaker/Clusters/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:PutMetricData" + ], + "Resource": [ + "*" + ], + "Condition": { + "StringEquals": { + "cloudwatch:namespace": "/aws/sagemaker/Clusters" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetObject" + ], + "Resource": [ + "arn:aws:s3:::sagemaker-*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel" + ], + "Resource": "*" + }, + { + "Sid": "AdditionToEnableVpcConfig", + "Effect": "Allow", + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcs", + "ec2:DescribeDhcpOptions", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DetachNetworkInterface" + ], + "Resource": "*" + }, + { + "Sid": "Addition2ToEnableVpcConfig", + "Effect": "Allow", + "Action": "ec2:CreateTags", + "Resource": [ + "arn:aws:ec2:*:*:network-interface/*" + ] + } + ] +} \ No newline at end of file diff --git a/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml b/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml new file mode 100644 index 00000000..7ae825a8 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/2.SageMakerVPC.yaml @@ -0,0 +1,383 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +AWSTemplateFormatVersion: '2010-09-09' +Description: > + Setup for tightly coupled workloads on AWS. A public subnet and a private + subnet are created in an Availability Zone that you provide as a parameter. + As part of the template you'll deploy an Internet Gateway and NAT Gateway in + the public subnet. In addition you can deploy endpoints for Amzon S3 and + Amazon DynamoDB. The VPC contains 2 CIDR blocks with 10.0.0.0/16 and 10.1.0.0/16 + The first CIDR is used for the public subnet, the second is used for the private. + Author: Pierre-Yves Aquilanti - pierreya@ + + +#################### +## Stack Metadata ## +#################### + +Metadata: + AWS::CloudFormation::Interface: + ParameterGroups: + - Label: + default: General configuration + Parameters: + - VPCName + - Label: + default: Availability Zone configuration for the subnets + Parameters: + - SubnetsAZ + - Label: + default: Network and S3 endpoints configuration + Parameters: + - CreateS3Endpoint + - CreateDynamoDBEndpoint + ParameterLabels: + VPCName: + default: Name of your VPC + SubnetsAZ: + default: Availability zone id to deploy the subnets + CreateS3Endpoint: + default: Create an S3 endpoint + CreateDynamoDBEndpoint: + default: Create a DynamoDB endpoint + +###################### +## Stack Parameters ## +###################### + +Parameters: + VPCName: + Description: Name of your VPC + Default: 'ML VPC' + Type: String + + SubnetsAZ: + Description: Availability zone id in which the subnets will be created. + Type: String + Default: usw2-az4 + + CreateS3Endpoint: + AllowedValues: + - 'true' + - 'false' + Default: 'true' + Description: + Set to false if to avoid creating an S3 endpoint on your VPC. + Type: String + + CreateDynamoDBEndpoint: + AllowedValues: + - 'true' + - 'false' + Default: 'true' + Description: + Set to false if to avoid creating a DynamoDB endpoint on your VPC. + Type: String + +############################### +## Conditions for Parameters ## +############################### + +Conditions: + S3EndpointCondition: !Equals [!Ref 'CreateS3Endpoint', 'true'] + DynamoDBEndpointCondition: !Equals [!Ref 'CreateDynamoDBEndpoint', 'true'] + +######################### +## VPC & Network Setup ## +######################### + +Mappings: + Networking: + VPC: + CIDR0: 10.0.0.0/16 + CIDR1: 10.1.0.0/16 + +Resources: + # Create a VPC + VPC: + Type: AWS::EC2::VPC + Properties: + EnableDnsSupport: true + EnableDnsHostnames: true + CidrBlock: !FindInMap [Networking, VPC, CIDR0] + Tags: + - Key: Name + Value: HPC VPC + + VpcCidrBlock: + Type: AWS::EC2::VPCCidrBlock + DependsOn: VPC + Properties: + VpcId: !Ref VPC + CidrBlock: !FindInMap [Networking, VPC, CIDR1] + + FlowLogsRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: vpc-flow-logs.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: flowlogs-policy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - logs:CreateLogStream + - logs:PutLogEvents + - logs:DescribeLogGroups + - logs:DescribeLogStreams + Resource: !GetAtt FlowLogsGroup.Arn + FlowLogsGroup: + Type: AWS::Logs::LogGroup + Properties: + RetentionInDays: 7 + + FlowLogVPC: + Type: AWS::EC2::FlowLog + Properties: + DeliverLogsPermissionArn: !GetAtt FlowLogsRole.Arn + LogGroupName: FlowLogsGroup + ResourceId: !Ref VPC + ResourceType: VPC + TrafficType: ALL + + # Create an IGW and add it to the VPC + InternetGateway: + Type: AWS::EC2::InternetGateway + + GatewayToInternet: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + VpcId: !Ref VPC + InternetGatewayId: !Ref InternetGateway + + # Create a NAT GW then add it to the public subnet + NATGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt ElasticIP.AllocationId + SubnetId: !Ref PublicSubnet + + ElasticIP: + Type: AWS::EC2::EIP + Properties: + Domain: vpc + + SecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Allow EFA communication for Multi-Node Parallel Batch jobs + VpcId: !Ref VPC + EFASecurityGroupIngress: + Type: AWS::EC2::SecurityGroupIngress + Properties: + Description: All to all communication for EFA Ingress within Security Group + IpProtocol: -1 + FromPort: -1 + ToPort: -1 + GroupId: !Ref SecurityGroup + SourceSecurityGroupId: !Ref SecurityGroup + EFASecurityGroupEgress: + Type: AWS::EC2::SecurityGroupEgress + Properties: + Description: All to all communication for EFA Egress within Security Group + IpProtocol: -1 + FromPort: -1 + ToPort: -1 + GroupId: !Ref SecurityGroup + DestinationSecurityGroupId: !Ref SecurityGroup + EFASecurityGroupEgressECS: + Type: AWS::EC2::SecurityGroupEgress + Properties: + Description: All to all communication for Egress to all + IpProtocol: -1 + FromPort: -1 + ToPort: -1 + GroupId: !Ref SecurityGroup + CidrIp: 0.0.0.0/0 + + FSXSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: Allow FSX to mount to the head node + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 988 + ToPort: 988 + CidrIp: !FindInMap [Networking, VPC, CIDR0] + - IpProtocol: tcp + FromPort: 1021 + ToPort: 1023 + CidrIp: !FindInMap [Networking, VPC, CIDR0] + - IpProtocol: tcp + FromPort: 988 + ToPort: 988 + CidrIp: !FindInMap [Networking, VPC, CIDR1] + - IpProtocol: tcp + FromPort: 1021 + ToPort: 1023 + CidrIp: !FindInMap [Networking, VPC, CIDR1] + SecurityGroupEgress: + - IpProtocol: tcp + FromPort: 988 + ToPort: 988 + CidrIp: !FindInMap [Networking, VPC, CIDR0] + - IpProtocol: tcp + FromPort: 1021 + ToPort: 1023 + CidrIp: !FindInMap [Networking, VPC, CIDR0] + - IpProtocol: tcp + FromPort: 988 + ToPort: 988 + CidrIp: !FindInMap [Networking, VPC, CIDR1] + - IpProtocol: tcp + FromPort: 1021 + ToPort: 1023 + CidrIp: !FindInMap [Networking, VPC, CIDR1] + + + # Build the public subnet + PublicSubnet: + Type: AWS::EC2::Subnet + DependsOn: VPC + Properties: + MapPublicIpOnLaunch: true + VpcId: !Ref VPC + CidrBlock: !Select [ 0, !Cidr [ !GetAtt VPC.CidrBlock, 2, 15 ]] + AvailabilityZoneId: !Ref SubnetsAZ + Tags: + - Key: Name + Value: !Join [ ' ', [ !Ref VPCName, 'Public Subnet -', !Ref SubnetsAZ ] ] + + # Create the private subnets + PrivateSubnet: + Type: AWS::EC2::Subnet + DependsOn: [VpcCidrBlock] + Properties: + VpcId: !Ref VPC + CidrBlock: !Select [ 0, !Cidr [ !FindInMap [Networking, VPC, CIDR1], 2, 15 ]] + AvailabilityZoneId: !Ref SubnetsAZ + Tags: + - Key: Name + Value: !Join [ ' ', [ !Ref VPCName, 'Private Subnet -', !Ref SubnetsAZ ] ] + + # Create and set the public route table + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + + PublicRoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref PublicRouteTable + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: !Ref InternetGateway + + # Then the private route table + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + + PrivateRouteToInternet: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref PrivateRouteTable + DestinationCidrBlock: 0.0.0.0/0 + NatGatewayId: !Ref NATGateway + + # Associate the public route table to the public subnet + PublicSubnetRouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PublicSubnet + RouteTableId: !Ref PublicRouteTable + + # and the private subnets to the private route table + PrivateSubnetRTAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + SubnetId: !Ref PrivateSubnet + RouteTableId: !Ref PrivateRouteTable + + # S3 endpoint + S3Endpoint: + Condition: S3EndpointCondition + Type: AWS::EC2::VPCEndpoint + Properties: + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: '*' + Action: + - '*' + Resource: + - '*' + RouteTableIds: + - !Ref PublicRouteTable + - !Ref PrivateRouteTable + ServiceName: !Join + - '' + - - com.amazonaws. + - !Ref AWS::Region + - .s3 + VpcId: !Ref VPC + + # DynamoDB endpoint + DynamoDBEndpoint: + Condition: DynamoDBEndpointCondition + Type: AWS::EC2::VPCEndpoint + Properties: + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: '*' + Action: + - '*' + Resource: + - '*' + RouteTableIds: + - !Ref PublicRouteTable + - !Ref PrivateRouteTable + ServiceName: !Join + - '' + - - com.amazonaws. + - !Ref AWS::Region + - .dynamodb + VpcId: !Ref VPC + +############# +## Outputs ## +############# +Outputs: + VPC: + Value: !Ref VPC + Description: ID of the VPC + Export: + Name: !Sub ${AWS::StackName}-VPC + PublicSubnet: + Value: !Ref PublicSubnet + Description: ID of the public subnet + Export: + Name: !Sub ${AWS::StackName}-PublicSubnet + PrivateSubnet: + Value: !Ref PrivateSubnet + Description: ID of the private subnets + Export: + Name: !Sub ${AWS::StackName}-PrivateSubnet + SecurityGroup: + Value: !Ref SecurityGroup + Description: SecurityGroup for Batch + Export: + Name: !Sub ${AWS::StackName}-SecurityGroup diff --git a/1.architectures/5.sagemaker-hyperpod/3.FSxLustre.yaml b/1.architectures/5.sagemaker-hyperpod/3.FSxLustre.yaml new file mode 100644 index 00000000..2d2021a0 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/3.FSxLustre.yaml @@ -0,0 +1,100 @@ +AWSTemplateFormatVersion: '2010-09-09' +Description: Creates an FSxL filesystem of PERSISENT_2 type plus the Security Group needed for use with SageMaker + +### Stack metadata +Metadata: + AWS::CloudFormation::Interface: + ParameterGroups: + - Label: + default: Filesystem Options + Parameters: + - Capacity + - PerUnitStorageThroughput + - Compression + - LustreVersion + +Parameters: + NetworkStack: + Description: Name of the Networking stack + Type: String + Default: SageMakerVPC + Capacity: + Description: Storage capacity in GiB (1200 or increments of 2400) + Type: Number + Default: 1200 + PerUnitStorageThroughput: + Description: Provisioned Read/Write (MB/s/TiB) + Type: Number + Default: 250 + AllowedValues: + - 125 + - 250 + - 500 + - 1000 + Compression: + Description: Data compression type + Type: String + AllowedValues: + - "LZ4" + - "NONE" + Default: "LZ4" + LustreVersion: + Description: Lustre software version + Type: String + AllowedValues: + - "2.15" + - "2.12" + Default: "2.15" + +Resources: + + LambdaExecutionRole: + Type: "AWS::IAM::Role" + Properties: + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - "sts:AssumeRole" + Path: / + ManagedPolicyArns: + - 'arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess' + - 'arn:aws:iam::aws:policy/AmazonS3FullAccess' + - 'arn:aws:iam::aws:policy/IAMFullAccess' + - 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' + + FSxLFilesystem: + Type: AWS::FSx::FileSystem + DeletionPolicy: Delete + UpdateReplacePolicy: Delete + Properties: + FileSystemType: LUSTRE + StorageType: SSD + FileSystemTypeVersion: !Ref LustreVersion + StorageCapacity: !Ref Capacity + SecurityGroupIds: + - Fn::ImportValue: + !Sub "${NetworkStack}-SecurityGroup" + SubnetIds: + - Fn::ImportValue: + !Sub "${NetworkStack}-PrivateSubnet" + LustreConfiguration: + DataCompressionType: !Ref Compression + DeploymentType: PERSISTENT_2 + PerUnitStorageThroughput: !Ref PerUnitStorageThroughput + +Outputs: + FSxLustreFilesystemMountname: + Description: The ID of the FSxL filesystem that has been created + Value: !GetAtt FSxLFilesystem.LustreMountName + Export: + Name: !Sub ${AWS::StackName}-FSxLustreFilesystemMountname + FSxLustreFilesystemId: + Description: The ID of the FSxL filesystem that has been created + Value: !Ref FSxLFilesystem + Export: + Name: !Sub ${AWS::StackName}-FSxLustreFilesystemId diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/add_users.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/add_users.sh new file mode 100644 index 00000000..4c123aa1 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/add_users.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Creates users from `shared_users.txt` file +# each line in the `shared_users.txt` file should be of the format: +# ``` +# username1,uid1 +# username2,uid2 +# ``` +# +# The script should be run as root user +# see `shared_users_sample.txt` for an example + +set -e +set -x + +SHARED_USER_FILE="shared_users.txt" + +# takes in username and uid as parameters +# if user with username and uid does not exists, +# creates user with uid and creates a directory for user at /home/$username +create_user() { + local username=$1 + local uid=$2 + + # check if username already exists + if id -u "$username" >/dev/null 2>&1; then + echo "User $username already exists. Skipping..." + return + fi + + # check if uid already exists + if getent passwd "$uid" >/dev/null 2>&1; then + echo "UID $uid is already in use. Skipping adding user: $username..." + return + fi + + # create user with uid and directory + if useradd -m $username --uid $uid -d "/home/$username"; then + echo "Created user $username with uid $uid" + else + echo "Failed to create user $username with uid $uid" + fi +} + +main() { + if [[ ! -f $SHARED_USER_FILE ]]; then + echo "Shared user file $SHARED_USER_FILE does not exist. Skipping adding users." + exit 0 + fi + + if [[ ! -s $SHARED_USER_FILE ]]; then + echo "Shared user file $SHARED_USER_FILE is empty. Skipping adding users." + exit 0 + fi + + while IFS="," read -r username uid; do + echo "Requested create user: $username with uid: $uid" + create_user "$username" "$uid" + done < $SHARED_USER_FILE +} + +main "$@" + diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py new file mode 100644 index 00000000..e26f6a5f --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/lifecycle_script.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python + +import argparse +from enum import Enum +import json +import os +import socket +import subprocess +import sys +import time +from typing import Any, Dict, List, Optional, Tuple + + +SLURM_CONF = os.getenv("SLURM_CONF", "/opt/slurm/etc/slurm.conf") + +class SlurmNodeType(str, Enum): + HEAD_NODE = "controller" + LOGIN_NODE = "login" + COMPUTE_NODE = "compute" + + +class ExecuteBashScript: + def __init__(self, script_name: str): + self.script_name = script_name + + def run(self, *args): + print(f"Execute script: {self.script_name} {' '.join([str(x) for x in args])}") + result = subprocess.run(["sudo", "bash", self.script_name, *args]) + result.check_returncode() + print(f"Script {self.script_name} executed successully") + + +class ResourceConfig: + INSTANCE_GROUP_NAME = "Name" + INSTANCE_NAME = "InstanceName" + CUSTOMER_IP_ADDRESS = "CustomerIpAddress" + + def __init__(self, path: str): + with open(path, "r") as f: + self._config = json.load(f) + + def find_instance_by_address(self, address) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]: + for group in self._config["InstanceGroups"]: + for instance in group["Instances"]: + if instance.get(ResourceConfig.CUSTOMER_IP_ADDRESS) == address: + return group, instance + return None, None + + def get_list_of_addresses(self, group_name) -> List[str]: + for group in self._config["InstanceGroups"]: + if group.get(ResourceConfig.INSTANCE_GROUP_NAME) != group_name: + continue + return [i.get(ResourceConfig.CUSTOMER_IP_ADDRESS) for i in group["Instances"]] + return [] + + +class ProvisioningParameters: + WORKLOAD_MANAGER_KEY: str = "workload_manager" + FSX_DNS_NAME: str = "fsx_dns_name" + FSX_MOUNT_NAME: str = "fsx_mountname" + + def __init__(self, path: str): + with open(path, "r") as f: + self._params = json.load(f) + + @property + def workload_manager(self) -> Optional[str]: + return self._params.get(ProvisioningParameters.WORKLOAD_MANAGER_KEY) + + @property + def fsx_settings(self) -> Tuple[str, str]: + return self._params.get(ProvisioningParameters.FSX_DNS_NAME), self._params.get(ProvisioningParameters.FSX_MOUNT_NAME) + + @property + def controller_group(self) -> Optional[str]: + return self._params.get("controller_group") + + @property + def login_group(self) -> Optional[str]: + return self._params.get("login_group") + +def get_ip_address(): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + # doesn't even have to be reachable + s.connect(('10.254.254.254', 1)) + IP = s.getsockname()[0] + except Exception: + IP = '127.0.0.1' + finally: + s.close() + return IP + + +def wait_for_slurm_conf(controllers: List[str]) -> bool: + """ + SageMaker agents do a slurm configuration. Wait for a signal that slurm is ready to start. + This means that we can start controller, or do additional setup + Function return True if walid slurm configuration found + """ + sleep = 5 # sec + timeout = 60 # sec + for i in range(timeout // sleep): + if not os.path.exists(SLURM_CONF): + print("slurm.conf is not present. It is fine for login/compute nodes") + return True + with open(SLURM_CONF, "rt") as f: + data = f.read() + # check if controller information is present + for ip in controllers: + if ip in data: + print("slurm.conf found. It contains at least one controller address") + return True + time.sleep(sleep) + return False + + +def main(args): + params = ProvisioningParameters(args.provisioning_parameters) + resource_config = ResourceConfig(args.resource_config) + + ExecuteBashScript("./add_users.sh").run() + + fsx_dns_name, fsx_mountname = params.fsx_settings + if fsx_dns_name and fsx_mountname: + print(f"Mount fsx: {fsx_dns_name}. Mount point: {fsx_mountname}") + ExecuteBashScript("./mount_fsx.sh").run(fsx_dns_name, fsx_mountname) + + if params.workload_manager == "slurm": + # Wait until slurm will be configured + controllers = resource_config.get_list_of_addresses(params.controller_group) + wait_for_slurm_conf(controllers) + + print("This is a slurm cluster. Do additional slurm setup") + self_ip = get_ip_address() + print(f"This node ip address is {self_ip}") + + group, instance = resource_config.find_instance_by_address(self_ip) + if instance is None: + raise ValueError("This instance not found in resource config. Can't process") + print(group) + + node_type = SlurmNodeType.COMPUTE_NODE + if group.get("Name") == params.controller_group: + node_type = SlurmNodeType.HEAD_NODE + elif group.get("Name") == params.login_group: + node_type = SlurmNodeType.LOGIN_NODE + + if node_type == SlurmNodeType.HEAD_NODE: + ExecuteBashScript("./setup_mariadb_accounting.sh").run() + + ExecuteBashScript("./start_slurm.sh").run(node_type, ",".join(controllers)) + + # Note: Uncomment the below lines to install docker and enroot + # ExecuteBashScript("./utils/install_docker.sh").run() + # ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) + + print("[INFO]: Success: All provisioning scripts completed") + + +if __name__ == "__main__": + parser=argparse.ArgumentParser() + parser.add_argument("-rc", "--resource_config", help="Resource config JSON file containing Ip_address of head, login and compute nodes") + parser.add_argument("-pp", "--provisioning_parameters", help="Provisioning Parameters containing the head, login and compute ID/names") + args=parser.parse_args() + + main(args) diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/mount_fsx.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/mount_fsx.sh new file mode 100644 index 00000000..f376d4d8 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/mount_fsx.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +# must be run a sudo + +set -x +set -e + +# FSx Lustre Endpoints +FSX_DNS_NAME="$1" +FSX_MOUNTNAME="$2" + +: "${MOUNT_POINT:=/fsx}" + +is_mounted() { + mountpoint -q "$1" + return $? +} + +check_already_mounted() { + # Check if FSx is already mounted to $MOUNT_POINT + if is_mounted $MOUNT_POINT; then + if grep -qs "$FSX_MOUNTNAME $MOUNT_POINT lustre" /proc/mounts; then + echo "FSx Lustre already mounted to $MOUNT_POINT. Exiting." + exit 0 + else + echo "$MOUNT_POINT is mounted, but not to mountname: $FSX_MOUNTNAME from provisioning_parameters.json. Exiting." + exit 1 + fi + fi +} + +is_fsx_reachable() { + if lctl ping "$FSX_DNS_NAME"; then + echo "FSx is reachable" + else + echo "FSx is not reachable, Trying to mount system anyway" + fi +} + +add_to_fstab() { + # Add FSx to /etc/fstab + echo "$FSX_DNS_NAME@tcp:/$FSX_MOUNTNAME $MOUNT_POINT lustre defaults,noatime,flock,_netdev 0 0" | tee -a /etc/fstab +} + +mount_fs() { + if [[ ! -d $MOUNT_POINT ]]; then + mkdir -p $MOUNT_POINT + chmod 644 $MOUNT_POINT + fi + + if mount -t lustre -o noatime,flock "$FSX_DNS_NAME"@tcp:/"$FSX_MOUNTNAME" "$MOUNT_POINT"; then + if ! is_mounted $MOUNT_POINT ;then + echo "Mounting FSx to $MOUNT_POINT directory successful, but mountpoint was not detected. Exiting." + exit 1 + fi + else + echo "FAILED to mount, FSX to $MOUNT_POINT directory. Exiting." + exit 1 + fi +} + + +load_lnet_modules() { + modprobe -v lnet +} + +# create a systemd service to check mount periodically and remount FSx if necessary +# To stop the service, run: +# `systemctl stop check_mount.service` +# To disable the service, run: +# `systemctl disable check_mount.service` +install_remount_service() { + + if [[ ! -d /opt/ml/scripts ]]; then + mkdir -p /opt/ml/scripts + chmod 644 /opt/ml/scripts + echo "Created dir /opt/ml/scripts" + fi + + CHECK_MOUNT_FILE=/opt/ml/scripts/check_mount.sh + + cat > $CHECK_MOUNT_FILE << EOF +#!/bin/bash +MOUNT_POINT=$MOUNT_POINT +if ! grep -qs "$MOUNT_POINT" /proc/mounts; then + mount -t lustre -o noatime,flock "$FSX_DNS_NAME"@tcp:/"$FSX_MOUNTNAME" "$MOUNT_POINT" + echo "Mounted FSx to $MOUNT_POINT" +else + echo "FSx Lustre already mounted to $MOUNT_POINT. Stopping services check_fsx_mount.timer and check_fsx_mount.service" + systemctl stop check_fsx_mount.timer +fi +EOF + + chmod +x $CHECK_MOUNT_FILE + + cat > /etc/systemd/system/check_fsx_mount.service << EOF +[Unit] +Description=Check and remount FSx Lustre filesystems if necessary + +[Service] +ExecStart=$CHECK_MOUNT_FILE +EOF + + cat > /etc/systemd/system/check_fsx_mount.timer << EOF +[Unit] +Description=Run check_fsx_mount.service every minute + +[Timer] +OnBootSec=1min +OnUnitActiveSec=1min + +[Install] +WantedBy=timers.target +EOF + + systemctl daemon-reload + systemctl enable --now check_fsx_mount.timer +} + +main() { + echo "Mount_fsx called fsx_dns_name: $FSX_DNS_NAME, fsx_mountname: $FSX_MOUNTNAME" + echo "Using mount_point: $MOUNT_POINT" + load_lnet_modules + check_already_mounted + is_fsx_reachable + add_to_fstab + mount_fs + install_remount_service + echo "FSx Lustre mounted successfully to $MOUNT_POINT" +} + +main "$@" + diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/on_create.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/on_create.sh new file mode 100644 index 00000000..0a4f3d1e --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/on_create.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +set -ex + +LOG_FILE="/var/log/provision/provisioning.log" +mkdir -p "/var/log/provision" +touch $LOG_FILE + +# Function to log messages +logger() { + echo "$@" | tee -a $LOG_FILE +} + +PROVISIONING_PARAMETERS_PATH="provisioning_parameters.json" + +if [[ -z "$SAGEMAKER_RESOURCE_CONFIG_PATH" ]]; then + logger "Env var SAGEMAKER_RESOURCE_CONFIG_PATH is unset, trying to read from default location path" + SAGEMAKER_RESOURCE_CONFIG_PATH="/opt/ml/config/resource_config.json" + + if [[ ! -f $SAGEMAKER_RESOURCE_CONFIG_PATH ]]; then + logger "Env var SAGEMAKER_RESOURCE_CONFIG_PATH is unset and file does not exist: $SAGEMAKER_RESOURCE_CONFIG_PATH" + logger "Assume vanilla cluster setup, no scripts to run. Exiting." + exit 0 + fi +else + logger "env var SAGEMAKER_RESOURCE_CONFIG_PATH is set to: $SAGEMAKER_RESOURCE_CONFIG_PATH" + if [[ ! -f $SAGEMAKER_RESOURCE_CONFIG_PATH ]]; then + logger "Env var SAGEMAKER_RESOURCE_CONFIG_PATH is set and file does not exist: $SAGEMAKER_RESOURCE_CONFIG_PATH" + exit 1 + fi +fi + +logger "Running lifecycle_script.py with resourceConfig: $SAGEMAKER_RESOURCE_CONFIG_PATH, provisioning_parameters: $PROVISIONING_PARAMETERS_PATH" + +python3 -u lifecycle_script.py \ + -rc $SAGEMAKER_RESOURCE_CONFIG_PATH \ + -pp $PROVISIONING_PARAMETERS_PATH > >(tee -a $LOG_FILE) 2>&1 + +exit_code=$? + +exit $exit_code diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json new file mode 100644 index 00000000..d3eff3c4 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/provisioning_parameters.json @@ -0,0 +1,12 @@ +{ + "version": "1.0.0", + "workload_manager": "slurm", + "controller_group": "controller-machine", + "login_group": "my-login-group", + "worker_groups": [ + { + "instance_group_name": "compute-nodes", + "partition_name": "dev" + } + ] +} diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_mariadb_accounting.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_mariadb_accounting.sh new file mode 100644 index 00000000..eea7b590 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_mariadb_accounting.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +set -ex + +SLURM_ACCOUNTING_CONFIG_FILE=/opt/slurm/etc/accounting.conf +SLURMDB_CONFIG_FILE=/opt/slurm/etc/slurmdbd.conf +SLURMDB_SERVICE_FILE=/etc/systemd/system/slurmdbd.service + +LOG_DIR=/var/log/provision +if [ ! -d "$LOG_DIR" ]; then + mkdir -p "$LOG_DIR" +fi + +# Setup MariaDB using secure_installation and default password. +# Use expect to for the interactive shell. +setup_mariadb() { + echo "Running mysql_secure_installation" + SECURE_MYSQL=$(expect -c " + set timeout 10 + log_file /var/log/provision/secure_mysql.log + spawn mysql_secure_installation + expect \"Enter current password for root (enter for none):\" + send \"\r\" + expect \"Change the root password?\" + send \"n\r\" + expect \"Remove anonymous users?\" + send \"y\r\" + expect \"Disallow root login remotely?\" + send \"y\r\" + expect \"Remove test database and access to it?\" + send \"y\r\" + expect \"Reload privilege tables now?\" + send \"y\r\" + expect eof + ") +} + +# Create the default database for SLURM accounting +create_slurm_database() { + echo "Creating accounting database" + SETUP_MYSQL=$(expect -c " + set timeout 15 + log_file /var/log/provision/setup_mysql.log + match_max 10000 + spawn sudo mysql -u root -p + expect \"Enter password:\" + send \"\r\" + sleep 1 + expect \"*]>\" + send \"grant all on slurm_acct_db.* TO 'slurm'@'localhost' identified by 'some_pass' with grant option;\r\" + sleep 1 + expect \"*]>\" + send \"create database slurm_acct_db;\r\" + sleep 1 + expect \"*]>\" + send \"exit\r\" + expect eof + ") +} + +# Setup the configuration for slurmdbd to use MariaDB. +create_slurmdbd_config() { + SLURM_DB_USER=slurm SLURM_DB_PASSWORD=some_pass envsubst < "$SLURMDB_CONFIG_FILE.template" > $SLURMDB_CONFIG_FILE + chown slurm:slurm $SLURMDB_CONFIG_FILE + chmod 600 $SLURMDB_CONFIG_FILE +} + +# Append the accounting settings to accounting.conf, this file is empty by default and included into +# slurm.conf. This is required for Slurm to enable accounting. +add_accounting_to_slurm_config() { + cat >> $SLURM_ACCOUNTING_CONFIG_FILE << EOL +# ACCOUNTING +JobAcctGatherType=jobacct_gather/linux +JobAcctGatherFrequency=30 +AccountingStorageType=accounting_storage/slurmdbd +AccountingStorageHost=localhost +AccountingStoragePort=6819 +EOL +} + +main() { + echo "[INFO]: Start configuration for SLURM accounting." + + # Start mariadb and check status + systemctl start mariadb + systemctl status mariadb + + setup_mariadb + create_slurm_database + + create_slurmdbd_config + add_accounting_to_slurm_config + + systemctl enable --now slurmdbd + + # validate_slurm_accounting + echo "[INFO]: Completed configuration for SLURM accounting." +} + +main "$@" diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_rds_accounting.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_rds_accounting.sh new file mode 100644 index 00000000..dbcec167 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/setup_rds_accounting.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# +# This script sets up slurm accounting to an RDS endpoint +# 1. Writes the accounting config to the slurm.conf file +# 2. Creates the slurmdbd.conf in /opt/slurm/etc +# 3. Creates the slurmdbd.service file in /etc/systemd/system +# 4. Restarts the slurmctld and slurmdbd daemons +# +# It assumes that the user has created a connection to their RDS database +# The script takes the RDS endpoint as param +# +# Usage: +# ./setup_rds_accounting.sh +# + +set -e +set -x +set -o pipefail # trace ERR through pipes +set -o errtrace # trace ERR through 'time command' and other functions +set -o nounset ## set -u : exit the script if you try to use an uninitialised variable + + +RDS_ENDPOINT="$1" +DB_HOST="head-node" +DB_USER="admin" +DB_PASS="XXXXX" +echo "RDS_ENDPOINT=$RDS_ENDPOINT" + + +create_slurmdbd_config() { + cat >/opt/slurm/etc/slurmdbd.conf <>/opt/slurm/etc/slurm.conf </etc/systemd/system/slurmdbd.service <& /dev/null" +EnvironmentFile=-/opt/slurm/etc/default/slurmctld +ExecStart=/opt/slurm/sbin/slurmdbd -D \$SLURMD_OPTIONS +ExecReload=/bin/kill -HUP \$MAINPID +PIDFile=/run/slurmdbd.pid +KillMode=process +LimitNOFILE=131072 +LimitMEMLOCK=infinity +LimitSTACK=infinity +Delegate=yes +TasksMax=infinity + +[Install] +WantedBy=multi-user.target graphical.target +EOF + +} + +restart_slurm_daemons() { + systemctl daemon-reload + systemctl restart slurmctld + systemctl restart slurmdbd +} + +main() { + read -s -p "Enter the password to connect to the RDS database: " DB_PASS + create_slurmdbd_config + write_accounting_to_slurm_conf + create_slurmdbd_service + restart_slurm_daemons +} + +main "$@" + diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/shared_users_sample.txt b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/shared_users_sample.txt new file mode 100644 index 00000000..e561b6aa --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/shared_users_sample.txt @@ -0,0 +1,4 @@ +user1,2001 +user2,2002 +user3,2003 +user4,2004 diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/start_slurm.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/start_slurm.sh new file mode 100644 index 00000000..b41b09ba --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/start_slurm.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# must be run as sudo +# USAGE: start_slurm.sh [] +# - Where NODE_TYPE is one of follow values: controller, compute, login + +set -ex + +LOG_FILE="/var/log/provision/provisioning.log" +CONTROLLER_IP_VALUES=($2) + +main() { + echo "[INFO] START: Starting Slurm daemons" + + if [[ $1 == "controller" ]]; then + echo "[INFO] This is a Controller node. Start slurm controller daemon..." + + systemctl enable --now slurmctld + fi + + if [[ $1 == "compute" ]] || [[ $1 == "login" ]]; then + echo "[INFO] Running on $1 node. Start slurm daemon..." + + SLURMD_OPTIONS="--conf-server $CONTROLLER_IP_VALUES" envsubst < /etc/systemd/system/slurmd.service > slurmd.service + mv slurmd.service /etc/systemd/system/ + + systemctl daemon-reload + systemctl enable slurmd + systemctl restart slurmd + fi + + echo "[INFO] Start Slurm Script completed" +} + +main "$@" diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf new file mode 100644 index 00000000..02b4d98a --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/enroot.conf @@ -0,0 +1,72 @@ +#Please refer https://github.com/NVIDIA/enroot/blob/master/doc/configuration.md for details about individual options. +#ENROOT_LIBRARY_PATH /usr/lib/enroot +#ENROOT_SYSCONF_PATH /etc/enroot +ENROOT_RUNTIME_PATH /tmp/enroot/user-$(id -u) +ENROOT_CONFIG_PATH ${HOME}/enroot +ENROOT_CACHE_PATH /opt/enroot +ENROOT_DATA_PATH /tmp/enroot/data/user-$(id -u) +#ENROOT_TEMP_PATH ${TMPDIR:-/tmp} + +# Gzip program used to uncompress digest layers. +#ENROOT_GZIP_PROGRAM gzip + +# Options passed to zstd to compress digest layers. +#ENROOT_ZSTD_OPTIONS -1 + +# Options passed to mksquashfs to produce container images. +ENROOT_SQUASH_OPTIONS -noI -noD -noF -noX -no-duplicates + +# Make the container root filesystem writable by default. +ENROOT_ROOTFS_WRITABLE yes + +# Remap the current user to root inside containers by default. +#ENROOT_REMAP_ROOT no + +# Maximum number of processors to use for parallel tasks (0 means unlimited). +#ENROOT_MAX_PROCESSORS $(nproc) + +# Maximum number of concurrent connections (0 means unlimited). +#ENROOT_MAX_CONNECTIONS 10 + +# Maximum time in seconds to wait for connections establishment (0 means unlimited). +#ENROOT_CONNECT_TIMEOUT 30 + +# Maximum time in seconds to wait for network operations to complete (0 means unlimited). +#ENROOT_TRANSFER_TIMEOUT 0 + +# Number of times network operations should be retried. +#ENROOT_TRANSFER_RETRIES 0 + +# Use a login shell to run the container initialization. +#ENROOT_LOGIN_SHELL yes + +# Allow root to retain his superuser privileges inside containers. +#ENROOT_ALLOW_SUPERUSER no + +# Use HTTP for outgoing requests instead of HTTPS (UNSECURE!). +#ENROOT_ALLOW_HTTP no + +# Include user-specific configuration inside bundles by default. +#ENROOT_BUNDLE_ALL no + +# Generate an embedded checksum inside bundles by default. +#ENROOT_BUNDLE_CHECKSUM no + +# Mount the current user's home directory by default. +ENROOT_MOUNT_HOME no + +# Restrict /dev inside the container to a minimal set of devices. +ENROOT_RESTRICT_DEV no + +# Always use --force on command invocations. +#ENROOT_FORCE_OVERRIDE no + +# SSL certificates settings: +#SSL_CERT_DIR +#SSL_CERT_FILE + +# Proxy settings: +#all_proxy +#no_proxy +#http_proxy +#https_proxy diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh new file mode 100755 index 00000000..59d3d8dc --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_docker.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -e + +sudo apt-get -y update +sudo apt-get -y install ca-certificates curl gnupg +sudo install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +sudo chmod a+r /etc/apt/keyrings/docker.gpg + +echo \ +"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ +"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ +sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get -y update + +sudo apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin diff --git a/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh new file mode 100755 index 00000000..2d2ef61b --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/LifecycleScripts/base-config/utils/install_enroot_pyxis.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +BIN_DIR=$(dirname $(readlink -e ${BASH_SOURCE[0]})) +################################################################################ +# Install enroot & pyxis +################################################################################ +# Modify cgroup.conf to avoid runtime error due to incorrect GPU ID mapping +# https://github.com/NVIDIA/pyxis/issues/47#issuecomment-842065289 +if [[ -f /opt/slurm/etc/cgroup.conf ]]; then + grep ^ConstrainDevices /opt/slurm/etc/cgroup.conf &> /dev/null \ + || echo "ConstrainDevices=yes" >> /opt/slurm/etc/cgroup.conf +fi + +SLURM_INSTALL_DIR='/opt/slurm' +PYXIS_TMP_DIR='/tmp/pyxis' + +if [ ! -d $SLURM_INSTALL_DIR ]; then + echo "Slurm installation not found. Skipping pyxis and enroot installation.\n" + exit 1 +fi + +rm -fr $SLURM_INSTALL_DIR/pyxis +mkdir -p $SLURM_INSTALL_DIR/enroot/ $SLURM_INSTALL_DIR/pyxis/ $PYXIS_TMP_DIR + +PYXIS_VERSION=v0.15.0 +ENROOT_VERSION=3.4.1 +arch=$(dpkg --print-architecture) +cd $PYXIS_TMP_DIR +curl -fSsL -O https://github.com/NVIDIA/enroot/releases/download/v${ENROOT_VERSION}/enroot_${ENROOT_VERSION}-1_${arch}.deb +curl -fSsL -O https://github.com/NVIDIA/enroot/releases/download/v${ENROOT_VERSION}/enroot+caps_${ENROOT_VERSION}-1_${arch}.deb # optional +apt install -y ./enroot_${ENROOT_VERSION}-1_${arch}.deb +apt install -y ./enroot+caps_${ENROOT_VERSION}-1_${arch}.deb +cp $BIN_DIR/enroot.conf /etc/enroot/enroot.conf + +git clone --depth 1 --branch $PYXIS_VERSION https://github.com/NVIDIA/pyxis.git $SLURM_INSTALL_DIR/pyxis +cd $SLURM_INSTALL_DIR/pyxis/ +CPPFLAGS='-I /opt/slurm/include/' make -j $(nproc) +CPPFLAGS='-I /opt/slurm/include/' make install +mkdir -p $SLURM_INSTALL_DIR/etc/plugstack.conf.d/ +echo -e "include $SLURM_INSTALL_DIR/etc/plugstack.conf.d/*" >> $SLURM_INSTALL_DIR/etc/plugstack.conf +ln -fs /usr/local/share/pyxis/pyxis.conf $SLURM_INSTALL_DIR/etc/plugstack.conf.d/pyxis.conf + +mkdir -p /run/pyxis/ /tmp/enroot/data /opt/enroot/ +chmod 777 -R /tmp/enroot /opt/enroot +################################################################################ diff --git a/1.architectures/5.sagemaker-hyperpod/README.md b/1.architectures/5.sagemaker-hyperpod/README.md new file mode 100644 index 00000000..e96eb8a8 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/README.md @@ -0,0 +1,320 @@ +# AWS SageMaker HyperPod Distributed Training Reference Architectures + +## 1. Architectures + +SageMaker HyperPod clusters provide the ability to create customized clusters, typically with one or more head and login nodes, and multiple compute nodes (typically P4/P5 or Trn1 instances), and optionally a shared FSX for Lustre file system. When configured with Slurm, SageMaker HyperPod provides resiliency tools to automatically identify and replace unhealthy compute nodes. Additionally, HyperPod has access to SageMaker training tools, such as SageMaker Model and Data Parallel packages, and are automatically configured for EFA. + +The example that follows describes the process of setting up a SageMaker HyperPod cluster with an attached FSX for Lustre volume. + +## 2. Pre-requisites + +Before creating a cluster, we need to install the latest [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), and setup the appropriate IAM role, VPC, FSx for Lustre volume, and S3 bucket. + +### 2.1. Create IAM Role + +First, we need to create a role with SageMaker and related resources as trusted entities. + +``` +aws iam create-role \ + --role-name AmazonSagemakerExecutionRole \ + --assume-role-policy-document file://0.AmazonSageMakerClustersExecutionRoleTrustedEntities.json +``` + +Next, we create a role policy... + +``` +aws iam create-policy \ + --policy-name AmazonSagemakerExecutionPolicy \ + --policy-document file://1.AmazonSageMakerClustersExecutionRolePolicy.json +``` + +...and attach it to the role we just created. + +``` +POLICY=$(aws iam list-policies --query 'Policies[?PolicyName==`AmazonSagemakerExecutionPolicy`]' | jq '.[0].Arn' | tr -d '"') +aws iam attach-role-policy \ + --role-name AmazonSagemakerExecutionRole \ + --policy-arn $POLICY +``` + +### 2.2. Create S3 Bucket + +Next, we'll need an S3 bucket. This bucket will be used to store the lifecycle scripts used to setup and configure our cluster. + +``` +# generate a unique name for the bucket +BUCKET="lifecycle-$(python3 -S -c 'import uuid; print(str(uuid.uuid4().hex)[:10])')" + +# create the bucket +aws s3 mb s3://${BUCKET} +``` + +### 2.3. Create VPC (Optional) + +Now we can create a VPC. This is only necessary if you want to attach your cluster to VPC specific resources. For example, to attach a shared FSx for Lustre volume to your cluster. + +You can create a VPC using the configuration in [2.SageMakerVPC.yaml](./2.SageMakerVPC.yaml). Which is also available via [
 1-Click Deploy 🚀 
](https://us-west-2.console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/quickcreate?templateURL=https://awsome-distributed-training.s3.amazonaws.com/templates/Vpc.yaml&stackName=SageMakerVPC) + + + +Feel free to change the stack and VPC names. Make sure to select an availability zone that supports your preferred instance type ([Find an Amazon EC2 instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-discovery.html)). Leave both S3 and DynamoDB endpoints set to True. You can leave the IAM role blank. + +Wait for this CloudFormation script to complete before continuing to the next step (it takes about 5 minutes). + +### 2.3. Create FSx for Lustre (Optional) + +FSx for Lustre provides a shared high performance file system that's accessible across all nodes in your cluster. + +Similar to the VPC we just created, you can create an FSx for Lustre volume using [3.FSxLustre.yaml](./3.FSxLustre.yaml), or by using [
 1-Click Deploy 🚀 
](https://us-west-2.console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks/quickcreate?templateURL=https://awsome-distributed-training.s3.amazonaws.com/templates/FSxLustre.yaml&stackName=FSxLustre) + + + +Change the name, capacity, throughput, and compression configurations as you wish. Select the latest Lustre version (2.15 by default). On NetworkStack, make sure this matches the name (case sensitive) of the VPC CloudFormation stack you used in the previous step. Once again, you can leave the IAM role blank. + +Your FSx for Lustre volume will take about 10 minutes to deploy. In the meantime, we can setup our lifecycle scripts. + +## 3. Cluster Setup + +Now that we have all our infrastructure in place, we can create a cluster. + +### 3.1 Lifecycle Scripts + +Lifecycle scripts tell SageMaker HyperPod how to setup your cluster. HyperPod clusters can be launched as plain EC2 clusters with nothing installed, or can be created with configurations and users customized to fit a particular machine learning development workflow. We provide a [base configuration](./LifecycleScripts/base-config) to get started, which creates a basic Slurm cluster. Below is a brief description of what each script is doing. + +| Script | Description | +| ----------- | ----------- | +| add_users.sh | [Optional] creates posix users specified in a file shared_users.txt | +| lifecycle_script.py | This is the main entrypoint, sets everything else up. | +| mount_fsx.sh | Mounts an FSx for Lustre filesystem. | +| on_create.sh | Entrypoint for clusters. This script calls lifecycle_script.py | +| provisioning_parameters.json | Defines scheduler type Slurm and sets the partitions up also specifies FSx for Lustre Filesystem to attach. We'll modify this in a later step. | +| setup_mariadb_accounting.sh | Sets up Slurm Accounting with a local mariadb server running on the HeadNode. | +| setup_rds_accounting.sh | Sets up Slurm Accounting with a RDS endpoint. | +| shared_users_sample.txt | Sample of how to specify users for the add_users.sh script. | +| start_slurm.sh | Starts the Slurm scheduler daemon. | + +Also note that there are two scripts in `utils` to install [Docker](https://www.docker.com/), [Enroot](https://github.com/NVIDIA/enroot), and [Pyxis](https://github.com/NVIDIA/pyxis). These scripts can be enabled by uncommenting these lines in `lifecycle_script.py`: + +``` + # Note: Uncomment the below lines to install docker and enroot + # ExecuteBashScript("./utils/install_docker.sh").run() + # ExecuteBashScript("./utils/install_enroot_pyxis.sh").run(node_type) +``` + +You can follow this same pattern for further customizations. For example, if you'd like to install Miniconda as part of your lifecycles scripts, you can add the script under `utils` and call it using `ExecuteBashScript` in `lifecycle_script.py`. + +For now, let's just use the base configuration provided. Upload the scripts to the bucket you created earlier. This needs to be the same S3 bucket and prefix where we uploaded the other lifecycle scripts earlier. + +``` +aws s3 cp --recursive LifeCycleScripts/base-config s3://${BUCKET}/LifeCycleScripts/base-config +``` + +If you created an FSx for Lustre volume in the previous section, we'll need to update one file in the lifecycle scripts to attach it to our cluster. + +First, get your `DNSName` and `MountName` for your file system. This can be found either in the AWS console, or by using the CLI command + +``` +aws fsx describe-file-systems +``` + +Add both to your `provisioning_parameters.json` file. For example, + +``` +{ + "version": "1.0.0", + "workload_manager": "slurm", + "controller_group": "controller-machine", + "login_group": "my-login-group", + "worker_groups": [ + { + "instance_group_name": "compute-nodes", + "partition_name": "dev" + } + ], +"fsx_dns_name": "fs-12345678a90b01cde.fsx.us-west-2.amazonaws.com", +"fsx_mountname": "1abcdefg" +} +``` + +And copy the updated `provisioning_parameters.json` to S3. + +``` +aws s3 cp LifeCycleScripts/base-config/provisioning_parameters.json s3://${BUCKET}/LifeCycleScripts/base-config/ +``` + +Lifecycle scripts can be reused across multiple cluster. This can be handy particularly if you want to move the work saved on your FSx for Lustre volume to a new cluster. + +### 3.2 Cluster Configuration + +Next we can configure our actual cluster. + +In this case, we'll create a cluster with one C5 controller node, and Trn1.32xlarge compute nodes. We'll use the IAM role we created earlier. Note that `InstanceGroupName` for the controller node needs to match `controller_group` from your `provisioning_parameters.json`, while `InstanceGroupName` for your compute nodes needs to match `instance_group_name` under `worker_groups` also in `provisioning_parameters.json`. + +``` +ROLE=$(aws iam list-roles --query 'Roles[?RoleName==`AmazonSagemakerExecutionRole`]' | jq '.[0].Arn' | tr -d '"') +cat > cluster-config.json << EOL +[ + { + "InstanceGroupName": "controller-machine", + "InstanceType": "ml.c5.xlarge", + "InstanceCount": 1, + "LifeCycleConfig": { + "SourceS3Uri": "s3://${BUCKET}/LifeCycleScripts/base-config/", + "OnCreate": "on_create.sh" + }, + "ExecutionRole": "${ROLE}", + "ThreadsPerCore": 1 + }, + { + "InstanceGroupName": "compute-nodes", + "InstanceType": "ml.trn1.32xlarge", + "InstanceCount": 4, + "LifeCycleConfig": { + "SourceS3Uri": "s3://${BUCKET}/LifeCycleScripts/base-config/", + "OnCreate": "on_create.sh" + }, + "ExecutionRole": "${ROLE}", + "ThreadsPerCore": 1 + } +] +EOL +``` + +And finally, if you created a VPC and FSx for Lustre volume, we need to create a configuration to make sure your cluster is created in the correct VPC. + +``` +cat > vpc-config.json << EOL +{ + "SecurityGroupIds": ["$SECURITY_GROUP"], + "Subnets":["$SUBNET_ID"] +} +EOL +``` + +Your `SUBNET_ID` can be found using + +``` +aws fsx describe-file-systems + +{ + "FileSystems": [ + { + ... + "StorageType": "SSD", + "VpcId": "vpc-0123456789012345a", + "SubnetIds": [ + "subnet-01a2bc3456d78efgh" + ], + + +``` + +Your `SECURITY_GROUP` was configured by the VPC CloudFormation stack, and begins with `SageMakerVPC-SecurityGroup`. Using the VPC ID from the `aws fsx describe-file-systems` output, you can find your `SECURITY_GROUP` using the CLI command + +``` +aws ec2 describe-security-groups \ + --filters 'Name=group-name,Values="SageMakerVPC-SecurityGroup*"' \ + 'Name=vpc-id,Values=vpc-0123456789012345a' +``` + +### 3.3 Launch Cluster + +Now that everything is in place, we can launch our cluster with the command from the `5.sagemaker-hyperpod` directory. + +``` +aws sagemaker create-cluster \ + --cluster-name ml-cluster \ + --instance-groups file://cluster-config.json \ + --region us-west-2 \ + --vpc-config file://vpc-config.json +``` + +You can see the current state of the cluster with + +``` +aws sagemaker describe-cluster --cluster-name ml-cluster --region us-west-2 +``` + +Or list all your cluster with + +``` +aws sagemaker list-clusters +``` + +You can see information on all your cluster nodes with + +``` +aws sagemaker list-cluster-nodes --cluster-name ml-cluster --region us-west-2 +``` + +### 3.4 SSH Into Your Cluster + +To log into your cluster, you need the cluster id from the cluster arn, instance ID of your controller node, and instance group name of your controller group. You can your cluster ID with + +``` +aws sagemaker describe-cluster --cluster-name ml-cluster --region us-west-2 + +{ + "ClusterArn": "arn:aws:sagemaker:us-west-2:123456789012:cluster/2hd31rmi9mde", + "ClusterName": "ml-cluster", +``` + +In this case, the cluster ID is `2hd31rmi9mde` + +Get your controller machine instance ID with + +``` +aws sagemaker list-cluster-nodes --cluster-name ml-cluster --region us-west-2 + +{ + "NextToken": "", + "ClusterNodeSummaries": [ + { + "InstanceGroupName": "controller-machine", + "InstanceId": "i-09e7576cbc230c181", + "InstanceType": "ml.c5.xlarge", + "LaunchTime": "2023-11-26T15:28:20.665000-08:00", + "InstanceStatus": { + "Status": "Running", + "Message": "" + } + }, +``` + +And login with + +``` +CLUSTER_ID=2hd31rmi9mde +CONTROLLER_GROUP=controller-machine +INSTANCE_ID=i-09e7576cbc230c181 +TARGET_ID=sagemaker-cluster:${CLUSTER_ID}_${CONTROLLER_GROUP}-${INSTANCE_ID} +aws ssm start-session --target $TARGET_ID +``` + +To make this process easier, we've included an `easy-ssh.sh` script that takes your cluster name and logs you in. + +``` +./easy-ssh.sh ml-cluster +``` + +If you used the base-config lifecycle scripts, you should be able to log in as ubuntu and run Slurm commands. + +``` +sudo su ubuntu +sinfo + +PARTITION AVAIL TIMELIMIT NODES STATE NODELIST +dev* up infinite 4 idle ip-10-1-4-190,ip-10-1-5-138,ip-10-1-18-53,ip-10-1-20-15 +``` + +You'll also find your FSx for Lustre volume mounted at `/fsx`. + +### 3.5 Deleting your Cluster + +When you're done with your cluster, you can delete it down with + +``` +aws sagemaker delete-cluster --cluster-name ml-cluster --region us-west-2 +``` + +Your FSx for Lustre volume will retain anything saved to it, and can be reattached to a future cluster. \ No newline at end of file diff --git a/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh new file mode 100755 index 00000000..0d98a828 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/easy-ssh.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +(( ! $# == 1 )) && { echo "Must define cluster name" ; exit -1 ; } + +cluster_id=$(aws sagemaker describe-cluster --cluster-name $1 | jq '.ClusterArn' | awk -F/ '{gsub(/"/, "", $NF); print $NF}') +node_group="controller-machine" +instance_id=$(aws sagemaker list-cluster-nodes --cluster-name $1 --region us-west-2 --instance-group-name-contains ${node_group} | jq '.ClusterNodeSummaries[0].InstanceId' | tr -d '"') + +echo "Cluster id: ${cluster_id}" +echo "Instance id: ${instance_id}" +echo "Node Group: ${node_group}" + +echo "aws ssm start-session --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id}" + +aws ssm start-session --target sagemaker-cluster:${cluster_id}_${node_group}-${instance_id} diff --git a/1.architectures/5.sagemaker-hyperpod/env.sh b/1.architectures/5.sagemaker-hyperpod/env.sh new file mode 100644 index 00000000..e4b63502 --- /dev/null +++ b/1.architectures/5.sagemaker-hyperpod/env.sh @@ -0,0 +1,5 @@ +export BUCKET="lifecycle-82d7af9cbe" +export FSX_DNS_NAME="fs-07f690208828a3e9e.fsx.us-west-2.amazonaws.com" +export FSX_MOUNTNAME="e7uexbev" +export SUBNET_ID="subnet-05d6ab3130f10bfdd" +export SECURITY_GROUP="sg-01c8d2e8ce26ff0f0" diff --git a/README.md b/README.md index 6441009e..db1f7d56 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ Architectures are located in `1.architectures` and consists of utilities and ser | [`2.aws-parallelcluster`](./1.architectures/2.aws-parallelcluster) | Compute | Cluster templates for GPU & custom silicon training | | [`3.aws-batch`](./1.architectures/3.aws-batch) | Compute | AWS Batch template for distributed training | | [`4.amazon-eks`](./1.architectures/4.amazon-eks) | Compute | Manifest files to train with Amazon EKS | +| [`5.sagemaker-hyperpod`](./1.architectures/5.sagemaker-hyperpod) | Compute | SageMaker HyperPod template for distributed training| More will come, feel free to add new ones (EKS, Ray?). You will also find [documentation](./1.architectures/efa-cheatsheet.md) for EFA and the recommended environment variables. From e539573e33ca3a0f8e2551a74b6d931f4a2ee105 Mon Sep 17 00:00:00 2001 From: Ben Snyder Date: Wed, 29 Nov 2023 10:43:47 -0800 Subject: [PATCH 236/648] string cleanup --- 1.architectures/5.sagemaker-hyperpod/env.sh | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 1.architectures/5.sagemaker-hyperpod/env.sh diff --git a/1.architectures/5.sagemaker-hyperpod/env.sh b/1.architectures/5.sagemaker-hyperpod/env.sh deleted file mode 100644 index e4b63502..00000000 --- a/1.architectures/5.sagemaker-hyperpod/env.sh +++ /dev/null @@ -1,5 +0,0 @@ -export BUCKET="lifecycle-82d7af9cbe" -export FSX_DNS_NAME="fs-07f690208828a3e9e.fsx.us-west-2.amazonaws.com" -export FSX_MOUNTNAME="e7uexbev" -export SUBNET_ID="subnet-05d6ab3130f10bfdd" -export SECURITY_GROUP="sg-01c8d2e8ce26ff0f0" From a371efe87a8f43f5c6daa02a8374349de59fbe5a Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 30 Nov 2023 00:42:10 -0800 Subject: [PATCH 237/648] add --exclusive flag --- 3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch | 1 - 3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh | 2 +- 3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch b/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch index bb5e274d..119171f9 100644 --- a/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch +++ b/3.test_cases/8.neuronx-nemo-megatron/3.convert-weight.sbatch @@ -1,7 +1,6 @@ #!/bin/bash #SBATCH --exclusive #SBATCH --output=slurm-%x-%j.out -#SBATCH --cpus-per-task 96 #SBATCH --nodes 1 : "${APPS_PATH:=/fsx}" diff --git a/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh b/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh index 48367ac4..52999ebb 100644 --- a/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh +++ b/3.test_cases/8.neuronx-nemo-megatron/5.precompile-model.sh @@ -1,4 +1,4 @@ #!/bin/bash cd ${APPS_PATH}/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate -sbatch --cpus-per-task 1 --nodes 4 --output ${TEST_CASE_PATH}/slurm-%x-%j.out compile.slurm ./llama_7b.sh +sbatch --exclusive --nodes 4 --output ${TEST_CASE_PATH}/slurm-%x-%j.out compile.slurm ./llama_7b.sh diff --git a/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh b/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh index 07c5eb84..ae4689df 100644 --- a/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh +++ b/3.test_cases/8.neuronx-nemo-megatron/6.pretrain-model.sh @@ -1,4 +1,4 @@ #!/bin/bash cd ${APPS_PATH}/neuronx-nemo-megatron/nemo/examples/nlp/language_modeling source ${APPS_PATH}/aws_neuron_venv_pytorch/bin/activate -sbatch --nodes 4 --output ${TEST_CASE_PATH}/slurm-%x-%j.out run.slurm ./llama_7b.sh +sbatch --exclusive --nodes 4 --output ${TEST_CASE_PATH}/slurm-%x-%j.out run.slurm ./llama_7b.sh From 8c967c3c4891f699f5a4751dcb3aff2867d3a321 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Thu, 30 Nov 2023 00:42:34 -0800 Subject: [PATCH 238/648] add --exclusive flag --- 3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch | 1 - 1 file changed, 1 deletion(-) diff --git a/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch b/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch index 1edf5e52..e08f1b62 100644 --- a/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch +++ b/3.test_cases/8.neuronx-nemo-megatron/4.tokenize.sbatch @@ -1,7 +1,6 @@ #!/bin/bash #SBATCH --exclusive #SBATCH --output=slurm-%x-%j.out -#SBATCH --cpus-per-task 128 #SBATCH --nodes 1 : "${APPS_PATH:=/fsx}" From aa2ab232b5c7200f6b3aed28409975f5b2a1c1e0 Mon Sep 17 00:00:00 2001 From: johnbensnyder Date: Thu, 30 Nov 2023 07:34:12 -0800 Subject: [PATCH 239/648] Added hyperpod readme images --- 0.docs/fsx-lustre-template.png | Bin 0 -> 294548 bytes 0.docs/vpc-template.png | Bin 0 -> 330494 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 0.docs/fsx-lustre-template.png create mode 100644 0.docs/vpc-template.png diff --git a/0.docs/fsx-lustre-template.png b/0.docs/fsx-lustre-template.png new file mode 100644 index 0000000000000000000000000000000000000000..793d16f4ebf2c07c42b4b9fd50c1bc1d053317c8 GIT binary patch literal 294548 zcmeFZXH-+&);0_%f}j+oDOIr1oAj!PprJ{xp$SM0NN)j@q7<>*1f;46NbemIIw(S< zmjIF8dkGMdd^?`|KIa+l_e0LVcZ~O8jEroOz4zK{t~uv5=QZcbD;+Jhi&R&sNJvO7 zK2*Q2OF}|LM?ykDMtKhS#AjX>3w)68sG_3tP(_7H=NZJ_(Z!C0MEzAlB8ATTF{aQ> zLQYipyKqYN^NkN7q+h~+-b2(+^vToNfDKoKt9%UkD z9lluh^7g7tbL4ky*tHt$LZj5?k5m~;tzL!22{}w}Dkn*qn2~^1R03%i-Mo4Wu7Rc0 zWBbAc7Z(ZDN3t`a3p-`)1tle1pRHryi!0>M%>Lbz*zlqBL>Y8P_B zjw^_)k6FI-It9rfxUCp*T}{53opRv1`U^^omirC^t+xBj^~~lc8c(EIt*?^!T}tGC zcrLU}`6SjHEcPyH?o~AbJ`u+ z&!PvFoRa;=)(#yPJR8r;=N2XOMvUN?*tN|A{<*G^ zg)jGyyD`0K%a&X=J##-^Cty05{jfjU?thzYmjUmX&g_Z!H*igAZy4w``MY6>?p>5ld*779?f4=er@I0+xB4e!;IY}>t~N>UPLsAkiOuVG3m7) zHARh+ifn^(idxN%=j6hdj2wbiUy{_k>geo5*$0JXkV=1|CV4D5E51RhOkayhtls(Z zZh{v*XVyZ(twVO_BTYq^q6jHd6O%NV(XaDC$|RKJZ@Fj$$mHLUiM{Df3}v*sc>8R- z74<0T_q&Xvv`$U*?icR7kaXuTCmU(fbf=I%D<(n$eonu|5qp;|{6!ZXqi&QJmr55~ zSwzwsxw$jscc*pO{vx@d!mUnK7&?5nUFW<$&H4+cPnI9`e)=y_4PKm#I{tWV>Ehm% z2hG=$&N(+fC}I2&ZsN|xc3wFA{j`8Itx%{`vz@y{5R2P$Wb4Q;xsnUe^YNca9+9f5 zEq@l!(O_U@jE+{j@#*?kgRk8Bym?oLLY-8-Kf8T;^3{6q)*U8IRq>xpPmL;+WT|>o z96lL+lKR};C$V(Xi{hB6j#lA0KJuFF9nl3A<_i>;V;*)0G&9&d5~UEmCCV)7D{>|Y zV=MrBEzu!sjVtvS)n9wI_F&D;h39~+iWL>l-ahTVBJG*VD#Xy)KL3l#quxEr!@)zL zhIB%$fecImiNUmY&Z-_(9_j|M<1fG|lwNZ3+`fF_yim9fSw=~0$p`n)pO0$pvT()H z8@%keFJ*IGG@&Na_<;)-__`uD+(1X?r=GiEQVy4Xm(guK={$FnPukO$U)>YqDSb$- zuG@dJ|60FHe{jE<*aw&U2iH5_dv>LCRnAe*-I|M!)>j3ozUH>{sI-Z&d2bUx!++!X zyHA%uj&F6-b#wbYvREIrem`r+V^<|+j(3te9tV(|MuA@&=(awQnb7rEmLejG&VkAH!&aDNJCJN<>UwaI9^)G*A%$3NSe2gvV z)xkX0)iSvNdZpZKsn1h*hAnLWtqI30lQTs+^V{Ky-G(Nca%+R$_>qeFO zRwK1rYOicx%)WUaruSSgYe3;<+?9@ru7--diJS@ zU#e-ZdZ=F0_t4kMx>onC{C^E_m2*ARaTZ}?qiYa6eEib-T>B7NcFQh1ow>nu0$~cl+pcQa8)~Wfmac?#P#0)n1QhGbMZ@K+ZOl?g3 zEvrj5msT#p-BoIbCi?Dc+1Slc#`mDU1%AWKK^AGxPn^B+y!m?dmv>(lzqx%wy^GS7 z*L|cH8R24GyXBo1;$*NARq?Yz#c_67bUE(IVQhG;9(VL@&B3?1X|;FC&0K_p7`n^M z%Y2^1sgvEHDF3t;F(xQ;=ZQ$2ki=U&*Lt@%vakzRb3NeOb`~E<6;v!NZdPSIW|a<^ zLU#9c`*)*^mWqtaKSORf&%+IV=Cv8D=kFUa7jSf8x{FOz%->fnR96l=gNET|%;iOu z%K2P~j|2(?=L$6=^^FM3aD7qnF+DDGtF!2$537&sp<$bFovDk3x#@`=4`g;I!v)HR zbqvz$&o1hDoq};`cB6N#tsk;=)_nEoA^0Jo>X)k~>Iy2R3V|y1>rBwa_UEOwrS+$k zm3=IOS)8k{S~~{(2Q;T8|8uEs`7@RIsL7K_g+O`C9eqawMx)><{s~k)5@M-W`Ph3I zx`v;?A!+V&E}eWh@?5_g_oAHw!EuTG-B9?5*P!gs@UY{sYf;o{*1k#(_d|^>V_k+*>ipAG7T=d3tzz%3w9H4L#DF zaWe}uZM@Ub>LAce7P|3};C7JEo>D_OOSQK-$QUhJ!v`=Tl>!O%2 zW?{>nMz2rrhx`x9Q1ey%LPlB^c3@Zdf?wfsUF-gPgv^OgS$Apva^yf9ETvb*~&vw2uOZ!fS zKEdrcF!RS_pE_jy=JCMG99pbDPMFYzpY2v|7T+oFznj)PzF37sMFvH=!IKC!zp@VJ zN8P70_(BZfV*cJU!bn6>eB0{_4X_4|kn-)qW0cC~8oDw*GEUJyV;j1r(x%U>QDhJ>Lx;v8b*V`?vL%-s&X$Ds9y>{rhq)ME zfbPd!Na#Z{!mAwnjfWm*Kaz@Gwx!3ik*Lv2d{=wMBP)7IHGpiiSF32Cd~+^U@tW~m zj9oG2>?FEMg!4ot+x~F#7@tFa*<`(0X&ph5*hIn76J){wM6c0y#t-c^HA(n@V@i^< zq*qDEfg@7jrAW&9@8bugw@J?Y{+x`2B+8NG?BC~T0q?}WH^7T{&A;Byyo)5E0RFoK zy!<|r{c|=I-N!Tk98&=IAtAY|r}FS2@UCb3%+Aiu^C`p&$!E_Ed~n`f-NciGgy{zH zMfygDAwD=h5m>nr3dE(CezAS@~)BO@##CM+f<2%I74>F?%c?I-Bw$@8z9{BxiCcAmD+ z9NoPfA#Pm6`&!#TyuB2-xrqb)`|n@RY3JwopOM@=|DF~wL1E%I!lFVV!vDTEaH%}; zQ&}BHKRXwb`;M-F%z!ZzMWtl!$p60J|M%5@hWsCw8vo}~F$pQj|9R>E`0D?>)WFm3 znF_=e7}QJgKMVHv#sBlmzb}**CeHnT$l_mu{{1N+X+Y>xM} z^?~=_>On$sCJZ>f{jYc6n9TI%y6<;Dt&~X~-oLBwN4hrYoysv(OR;t9p8F+r%CpoP zG|d)|cxhCl8FGwExda(%%#&@Ogtw{RN&{hvXwK^d2PG%X=q8-Gy>KOS-Y`!`P6qfZ}Kr~x(U`> z>>%Dj7T$CIjHH8BK-WTVKIY!>>1F{8)pW@BbK;k!=YoWTmL3h_w(@A|M@v1={7h~FH?B(hq`?kyoTYrU8nFwzRxpv)n%n1KA0o#*i9gCeXi67%Rq2W95tbrp((ekh(=Tr8I4 zSE;uQFo~S;e zqmTXzHM&Rtm3bzAga37mYe01*E;7l|HKY|KHEijorjEA9 zmED+C_L0?mR;~i(e*R780L1n$D-Up~CapePDd=!3Iq^29_O8U~RpHus{V1%iP*E`U zAj{}Br|?0_lq9^u!vkFBcLen)-+?ESST{C-{Vf}Dk?;iyiWp~xux2mwlkRlsEGi!4 z!(ran#;Ilvv4W>xu8gzaRRXdWzn6KK$O>g0@5GuX>z#mVYq)icQ&A1;U%9MB5z|aZ zX**_#m__y@o;$HdXDL@BcE4kzaW*MJ!zE}s4uRwD>UnJY$)Hr8lFi}E>9L4@!Xjay zL{ne&jY!&8`hVNf9}dFK>u|-dX8p2)z3#RRA9E{pMHdC9^J)|)v_RVtGc}`3cbj%x zTk&7%tVOGX%o;OZ>veCwCqH_?gzc8P$iqXZraa1OwLv^HGx077Dwx7=O)qos*ceyo zZ+8_~wR#Wm%=tIpnR4&hb8naE$Nd)z^NEV#LrwHgOO6k$SFUOZ!{(P{=RmC0-F%m_ zo2I)yT5>451Srwog7xtC_6UpZ$hJdEd-&;wsaFy(jMxO!c;1Wo*?FvE?u^$29hb{j z=}|f}7s{JA-|UsK%lX98u*SoYyKDFA6mRO5= zE&Y_&ODyxg_^E|~E>@lKydLO7U8IXep3dlmi1ktBgET#3#t~k22igR&SjIw1LRPO0 zBFKybpT8z-nOt*CM4Hi{BquaPP?OP7lcUte;=H6>weXk+*upP&KTC*Vu zdv({U>&-P$t6v{k#QfAvN2QQsmD*S8_k^kGCD(Fnf-VMsyUxiflXFsIjj(Q55=IZN z8BtKiC|e~PuO&kATVEvD^4=NMaTQ5Zaxt#TzzauE=&23g{P4wAL-gp$y%!hR@QHuK z;eV8Fj*M4hG^DACMeNB_Xe!ly7M=Amf`R-CIK2;Eq5g7VN?QpN z-)v&gGI*8jxugGV6wa%WgyTQXuU=*vb61=yvDCliGP)3~ zO5Uf?%hvt6?e853cTVlfls|ftohh@6 z8|d#)Eh-MKD)7Mite3Yy6S}Wd-?^V;#3dkP3PGiAS;<01OQzorvHds`4w%AA$9gZf z$rRa`Q%{oaEB#>+8UyHkiM({u5XTRG`s|-pNAEfW&(l}2O1g}89X}s3pwRQJmoH5s zd%lm)`u@V-PXxu3Kyhn*kT*L$;CnK~$lX)pDCLP*7-z_d%(;sksg~CbxYmBcgW)#r zde>SQ_@G2(>nxx(fZ&f=-dn+TTEa_SF zx6YH4I_^`$0GH0SEl&ces8v%_Lou$)E*9ncl+zIraAo#!r`>e(onl$#QM#K22rrMv z-XgA1aprJoK?`|RD%w1G<82SN(|d$c-Zy;o2grNjxm4Cv=dtA_5kD!Pnr$%`b-Q)H?zakvzaS*xhFL}tYT<95J=gkXYNv+e!u z?QwP9X+Gu+DS@Y!9VwY6We~2%H>)Ld9&{zA$*NJr8vd7uX^V8k8CLvIvEpcnm&adZ z;f@>ndRp}FwG~Y+$J@RPr}xs`xXQ<4CF9Av*L_*LcV=hOCSII=z^9SQh|Q3fz>OCe zDde6wU&+aeRU<#Qs?RMz7H;yOgj;!3=WWR~=_Cru@?TaemTg`GKE05iv5TS+ryqoc zf-G10S2a2~BjQ~7yXRwsFOR~Hs`%86sTd{CQu4RH*w3fUCb6CyqL9-T-$Wr3C{@y_X{7lqm)Gr2V#j=^K~0?Kd-AXH$GOHe=lyWrzH!wciX2I`4V< zfK!^`b{?I5Zj*rW=rv2%n&7Hb5A53^O3)BRdAN0b0BOq(Rd{RU#h7W67I(mZ`tLUj zrR7|SDxNVsy}Bxc&K?I7RKO7>Dp3c0ih_3LRpB10Z``+jT{9tXp*P8DuDHMbvL6w- z@&fq(E#L))vvvnfUb?@7$w$?mew-$01^{x^_pta*^`Ubybk7(Q+p+Z@<+h8>PLtOI+{NYbN|(J*Tm3$@KT3rlDqzX zv-KEHMT{Fv7*C$jl6z`FTTM^}r3u=yjos^5NBMTaL^&8a1R^NA&xEV`i4-eJyBJ^4 zHlQr03TKzCh+ocL7jw&)nEWqwLs{eD+x9X22_T|pRq;vMhmyHF$aeR7Kk9| zc_KTnzt^~wmh+)*h<5t^=@*drAvw=glck)q&SGV9!9E2i5niDcnJ)6ETJ(_8c= zFGce2CAsD#+3X~VVRKHMQIL{dj%-5WDDD&1EU`gHGm&<&fDP$|x;KS}nSkBze+ikt z+hPrPw~8RZjoGj8+V(%Zl_oMZs&{IZhLlY7>Ccac)14W+b}@2Z>IRf3BOz;ZVP#_h zOY^f-IqPDYUTdTDrzae^M8YU905}L=#i(#x)c`U3cJb3ohEwhWNJ(N%&m_P{pG_5h zF__*WXZ2lzh>KEKLs{qbQ>)%t-E&(h5Ud?(OP6>kAZrdbjc(>dVQcfVlrgR_U?1vj+(ZLZ^;HzFvCj6Iw!f zi3SPaSP0hUAn{7g=F~w({J*CE*@s$1;A*r?rJ7oE*H8x0R(!Pvjd;twj|EVF0mKzov?+hH$QrSH}3@u(T|1D+|W0 znAilW#?F$qR2_S6fln(6ky1)ZIxPXq@xW6XMwl2WX^Yr}`qK`fY-K}F$CYXf-(!Iy z@4h(KdBp+8zfp(9vxrzJ3p5VScq~~S#9wh(Qhc!C)r=61cgP4XT`-N}9G0_B&sEbz z(2xz0VfPgU&e5>x_bIeq80hR%W7F+hJeBS=wefKQGKL;m9!`afx>eU491{%9?Ou;L z#4x8rMSwgZFB(70Hdqg>)2hRNju9A52FW^bUrx$Kv271%D;YKHH|+jkgfyJoSIszc zs`b8{-*lgmYr=tm=s;~YNR2}#i!ADsayw&1Hyb=VgT@f5Okl2s)Xc@F$Q0w7K}QDY zB~jEpZ%fQ~5Hd!|rPgt>6y^mm%lEq{0suYT*WWvpUu3x<@|c`5u&~5O9&5f{R+A~c zvYDkkT@`W|oa)u`*4E|K)Uj`}NPs1De&5(-*vICe_&GDSfQzk8Qm0)JFXcLv!c{4_skKK6Wx_HLVoa8>mW^kp(lSk=a6SV{B94^k!_AFVrZS1oO z=DL%-Ra!IedRb*0ClsaEW45@v-T3vdHz<$V^O`(lIN|Yr?-wqFeiYCcTGaQEu=$FL@!KNLo(tV_pyQ2~QU^k6>KdaCecLt(bDs##_@y ztUlUTgENQADT;=@W6v(70Swx#9fQfWX-c8N9zCM;TpEaxAi+IZboGqA?XuN9^AL}xRp(NG^b}3YJG*`r~XK%==UEq+fnUNCd{vqD3 zwh7niOXOpMAyb}x=d~G0tIAK>omR_eQ;C=7&{jOs0DxbVW~I^hi}R3k^~q+h-@+L4 z%+SbG7rybzCLDA3hqMaD_-XS|S;%hZJ?b7#GgtUS&HXm7LTyEyfZHLmP$DtSrRcaoV_ z;lx<}XwY+eZC7}KwBCR513yy|B>ix}O|EUHTZkQTT{#Jsfk&_VcIFqu>5HHE6gT4S zq1dB^G`R)jM(LUuX#QMgVDF9t$0VMD6Urtv<>lP4HN(F>=3v>jXc=@RBN(rGveg_LK=`x16I81PLsoq~Ws^*-ONRh+$cNe#hErJ<}5!;4e7S%CnK|9}UvN^;T z2!^H}W1~UkaoiDP-6$l$iZO!&Zd6S|G3to!onhOKD?4a=f6H;gBxEFdukgFsaSs~P z#N**`U@_pm%|@#8O7;uGZ(^)?t-O+ z2+L}Vd*iFq@bCd~mx{aHaDztd63?GwK#dE4<&GJ-@hK-k&r%Cj*)+V30Oz56E#a5b z%szahlFxs!vNK_`L@ zWQBZ+>efz1onkb1TJ4NXZ>2*Qj%>bG;HFnGwt2bP0_=&KUqlw*6l_-)E!-XbeS4r( zguQjFru@|6a&jpE$0au@jWix5bm_KtwZ_X{BLcqH$fQtegfLPLJ7{cO3-<0zLHTbW zgkHsn=QHfNxPeIP#I=>KL9ryM$! z%s=kpOi%UHt0%+neD>!F(SqexW=@-p1Zy_W30{wGS6@}CR}wi{KV7JF(!Dz#`geJF zEt*4z_Dcz2A>A3ZxwXrXwHa!{l~sr7($5A2g={>7Y2fVqcOA+Ou!nCnG_IckN0<+sPRc9dfdJasnyKMRyMB zX)_Y=BOgk~Zmvpsj~(K%oHL;>XyRjpS%c?C7!KrDV=tRAXt?YwoI3?Q;i|73DuOM^ zq^1+9+RO9zE?Gf0F>@kdM%4VrtK=}ZI((V@mju3*1fyF5y}2}W*^exMh^)YEt5`ZG z00j@mjye=u%(fu_=N_@H9eD7X8XxSxzbD^NxtQTou&m9_`%-duEoM-kS%Wkispxm( zW{OMyNdsV{SSU`@1wN!D!8o>}c>i)*966=leEqu5C=W;Q$+w%mMQo2qkpvtjPrdtb zb(LV*-1p3j`a|^xcTCzV5;Fp&i$5Ddg1mxxQ!D#dR)=sFl0I&aCBzM4LFgKZLV4vR zK6AIy{ocY_jWq;v!(-)J=bZqzymK{JFh$3@?-Xh&btP;-OGfgbn~#mDa0+cpAIZPK zfLiyN5HD&tj+`l}Nb~B~^1!T14W9J^T}~d&W#-ZdL+kellWPMoHQqLs85#X^`fGY* zW9hV|FaTs#3oHDK1~>CFK9$b)NTEbBGFtL_9G4No#fUw>A3uYS>jpS~@WI>=15^Gr zbsOQbX#-lQ8{pvSz6K}fH0x9Gk{swAn45Oh*waL&NpPT&7uR~h`Q;5vdjD8){1MhZ&5F@~Dhs}J~ zYxcAGV_KtKZAm|`(CLBBgqyjQgUmx_1sO3;J$G$MTZV?mzZy)pF)Rff)v7&e?t&@m zIRgQeBCk4(EVj_m%(dRx+_$wyP}Vz$iZ1)-s;!9uLf^W$E(?)xBhiA#Y#0-_^66&0 z87r6kd`0=}8PXqfBt6gg&}%W`}J;8#yr zvDo68ZDZM|0{X|AO!rcKKiSO!ja!w6y~`*Ik8ibtipcYS&PT#tsS?$^Uz41&7=EBs z+H+7pa>8L~>77-xSwzs^8=RsdK?Q!O41^psi*# z55kN1;lj2A7>$-e12H{W*4l?zrUTL2>?(39k2P8D;81V2%T|jo&2nwef38>Ey6bZ# z5s@Kan-4>)=XeHmTUHAiAN&wUrf#=0TecHJK|Q-Q|?~~5) zX|`cUi*cLB`!Qw^XXEGlIT_HRq<}s8sJ7{-gQ6wpjk`ch1Tx97`@)A!0K%8`f?iT$GP+d>opaDFttvxy2>TKRbFfIp2!cD zlItCUU&=6nPOy~QSbt7NtaBH?^X@-9Wu&?}DLow}xAwfZvNgx=^tKXg7 zGj}|{ zr-ENEZnT}f7?LR5nGS7Tv@>8Q`1eLgj6k=hQfw0vV8Tk1r)imJQU_Ax3!h6#rW-Lz zslqYXd81%ws0hVcz1j)-VS+(-GGHKY45)mvbPO>YliU%b{kmm|=6;4u7tBS3>gK+W zdSk{*bg3dHJyt9ImGk{xazYP9B`0m71k?}J`wYenjV5G|2FquJqlSbSQ;KSP$MX|= z`F&u>d$GBfZ2Uo=J{hGKHyDA)!c*KzgPEurGEd$e;j>wbnT@~!P>yqaoF@Qa*tBX^ z$Ek6w>P@9>0`*yn3oIP0xwXlf_z{MxlD}Kdt9|- zw!bblqSG01wyg+Y)!u;wt}bQ-I|(F^b99@Pt@e3-VgYhje_uFabE>}GdJJt;t^%ECv%#L#8WyTdt=iz{%YI#ItzUtv57$Pr(|tCYJ`!A7C&h&-Wc%BXDMPf zeq=z3lwLm!svXx)C@h8DbBDn9wR*N(r7YQ3i^}aaL#Q@>$aiwTCG#3u&-Ar?$sn&G z7{TC>;=CPAqP3zuojyR=tf??_$~6VYk1^%tcKGa^S8ES5v8&%1o*zpdII=d3~r7B%7#5-LNaE&N6BYxo-5`SL8gSC)a>rRqb-JTofptWEt5 z$@L10nG_^CFJ1OQyk0^BLu|eqp?0IcAbPxRXi}3S$7(*$+09t|mS9vWOx5in7V!D7 ziCYHS8bk8kW>gl6guyfO79PK@sdYFy3<}BAcR2BPHu_E(i~4#|?Bh8A`jl!~C_|7L zf6Q8mRxrS7g!t}5l``ucj{E@JqVAPb@R_ns*~!hqbv)0GGjJA2lTJ=sIF@4KnmC|E z#CMm?r><6_U26;`{4qEQ6)AAwLzlA0>z7t?$rdyen2U}!HS%JUTPEfOBl7fW>Kbq! zDS|WaVtV;D`PN6wsuUDd0+>$2C4Mwi4lNmeQlT(n;r;joI&jfxVTf8Fi_4N~Phv>- zrE|1U`B3#mBtk4@g0AsT@hS88OnhS>ObOFT1?y^oOlwF>Q>hz5+C0E%J93UXJB#r>aj)=<%LoQ!175VVRfeC!d$x@>Q-BK76RAy5ofJwYiS zr58WR4srl3R(%=525KO%6+bW?2!d_ZmwgL~L=Z?+>A1=DE$jm zKG}@WPh#zfGrP>CkszVBRaz&(#HFzt(A#*wM{I4MES$d>4wXM>(_>78uaxcD<_k*Z zF7eaoR*o_}7lJIOz)vQ4<}D6R5%r#ZKt)Mc^(*HM?5lzmdXmRrE`MZ&9F|NRm{`bK zsnRsuU)_0-2hFM_!1wY-=e4m08B7jo9=dBsIqV>u)YKkg^n_L};dM+}wPhfmZYBKl z5cPT?p&s2PNT~gsq&qGtrR+Cc%iOMrB~*HWOW(tyG!r;ko%hpk^b{5;*9=wFcX&%p zn0tH>ltN1zTDoRM!*^a>E-x7XGIR5(1fv!Z<93HWD1k=KYe}ch+;ql!r#At#ncWOv z=72wcVHKE|Jyn5bxjtWxiX1@XJxw?>3=A#KeTjOIcYs-|+JYFHEbjWb{ zmOTH}%Zx0(d+z*j?4qS#t#siK zsu~K&x%Jm8IQ(eXPMNRkvvpaF+6zlkV+#Bg^)m zK!+DVoY-$Fg08=)p7Iw?4>+!kRx1RuEW50nXGS7rh$|@n!3NYA_;Hl|Zd7SMq&6o+AqNnEIC<$kVp49S<@jt&tFcMf zlPsLqD)G6`(E69z_}CXTqxp69UK7TnIpCotTmUDNLwfMt<%x>-Ep`O#rhz}YHC~ZE zzp(OgyB`3e?YA}&3~937c}v<9fbViEJK~&OFxxDUA$(KYbh>@7)SEEC&+0KhCDAfKNun^oa(%eE9C3>U21`zECYYlHia9NK_H z*+6tZ@f;H9hbTbz@ol8-)_Y~6hq{QS@dDIb!XeRoOA`0ky(2n^q03P#Cv;MKPll4_4+J)>knq2IBn7v%oIUn0sPUZypc5}3043<_)g8rI;?#P2l5qgAmvDc zGX>?qvkD?S^w4f=*Sl~orh7lq4C(X0#Z|Y#P;0tuuqrins5_g-Xjs;)P0rLaL^b-E z=E;GSbK7T7;U&i_Q#%I}?=fmrX+zF{oHpPiVcW$qPW<)69P@XMTJJSA0*E$w)#Yj~ zm6<=tt$wY-)!*nnmRG&@7ul|aOwN>*Eoo`(LaIU)+ATjR_+X5id=Gw71_|13Pn2Nf z8juKa3@!01V&G^vt|PROGh3ubHAUlBT)l5sR+6?f0&0iK+M9gc*HsefI9|pX*^w_ksFb zQt+XrobsUoy4beEgt_48d{Ozq3X%94-eU$TL_R+7AU6+lm*T(KL!fAIkg;gag5_|3 zx-7kZ;*YeL3B?`6$}CuveGc*Jh7v(_PV`oLX=jG1$GX=~e|r{n+=Sy=weQk&XZx$1R1?ugK4~4ZLD@NndboQ8$art@gm0Y z$j9qi8W1HyfXH&x>ixKrbM7OwS+U|3!2RdfK#1!1Wp5xU^XvE_bqqWjx=Lh=5exzk5 z=SpW)6f$q{U$mFof1U!+)0fe~M_NY}{ZP*{;RuV6m>TR91!$2`&LJIoZ>>?y)edgc zFjbXHGEFQdFdAVk8uoUaC31QPZ9DvSO6zSFJ_HQLR(`Q{IY-$EBOEeu0i;#o?n=V^ zMy`!hUTWTHs;4K<0GIs3J&5u@2z2zBVguoj8q@-W97NnNsP5x*IZs)~W-w z3};9G!IkgM)PmBM4aegtSG3$Lk#-EwB;X{B!;DGbZ1Z@?rSw9} z>(bmOraijCLW(x<20%qPZ*VAoL8?`NdQd<$75s6$?j&mk2i6PSiirouc*U;}Gq%3Y zv`fpD-XO#(vD~?k;=8f(=|MbD@XCI#2rx8Dl_#cepH~jKzG`QF6}4F}p{v?5T=lnl znuV5>qFW;t2|PaI(A8n+)y31uFqT+o2?8`DN{7Ped@(6==z>MB9D=S}g zDQ~Mb8C_l=OkN4ZAAOYn%kT$pRVMZD9VU5!UTu`76j<1;W?^QhC>aN+;=n7E3nFlG z-b)W?x~oFXU|7|tL;;#s1b*0`?{QGe+#yJ)=kFJ59x4QX6*Q36NTA6_t>hOmgvj`z zjqS4Lo$t9?hU~h^@9%l`Eq-&b0a}ARzQ0m4$e5bjj#t7nSkxg6c4K)blF~~Xf`KBu z^wJxi+30-8?af|{S$X(&oD@!&+GvU`zo6k>K{C$FJc7D8cB&duHF)rx`Uf+wx$jTQ*U=1b^4#+xJz}D!>ehkbcK(<1L!~afI@69 z$Za39LfTUxaP}AnWF@LML?BJ7j>J0out_DpLw41bKKGKXH_9D{wg*{TUIO6Ud3^K& z-D^R@wjfXzqICos)3(bSyn)(f)tFQHQX>Ie+H|seAE2=xwJsxAIfG9e<@*+wPtZ0o zf&wna{iUbqfd5Kdb9v&L9}9%YtR+bUtP~$YXY4Uh(a5w!Y<36m`34F$-rxJyVjU+) zooY|ZDk(rrPaa93{rp`@rexfKj;H>U*6ZXS0Y=0VWWTk}-i`_XEg4TWCu3mZRV(7OJN z^(#l+Pp&l*`zgTWJGnXOF{}hEB6&VH#?$mNSHsdfe%p~rjIaA;mR&(ayG=7Eb(v+2|#E)?!ZQ zqS8kkYxSG>sM&3SE`#DBYPxLyIE(rziSO9{{yxX^%bz7~DJL!9cT=ft(PwA`wem+f z-Ux2uLT<5J^o>ls0Bp|X$5P^PJcP9638080od@HWEx{q*`+VK3se^i z)BD$Ef$fU9&nHX~lLEywqiZpCmw|GC24>X1I(T-byQnCz-V-T%17Fd7;N6+L;<6?c z((Iqp*(>LwhZ(GB@H4eg7aJKpI}Jih%tn@X_+oZ@#f{o=F)wW6l_XxvFMOFP z>2n2${mUCYdDKPaQwy26bc>u6$pa+c5zvk|%U^^t4i(XSWVnoZ_5-c`D(nn}LF&k8 zv+#L3nyYsJ5+N|N4kVK!vQ8xboQrFnY^#es#H2`gdL1uA|8Wj5+NlZ7vTrj_4&n(DW6#RC~kJ2b&#(1G&ZoSgZoauCmO zX=U7YD%sADh*H$R?j0(6uqQ5L*LiT_OMxx^XyKJ06!F-l_Ne1+-fm?!4&bWfN4<_e zERHmyiMgsns!xzh!*rc@qK&I2u&-y5NR-v=tgM!=H^vD~rcOBkRp=l5yZciRq|2)j z$K;@PhHwo=#tfh{AX&#MM=Q;yeLnG3EA1+Keqou-vTQEj1Q^T4CXCn#$Sody2vQ>u ziNC*G&i`VtN(~GHGa?p&5VBoDcXYo=<#nv0w&K835ii&Whv@Zcl$g(y2P&y?v7kM- zZ{kL5%w44MnOn8LjaoYOP;)@-E zPuBPFxXOy_$B+00SW_HA&?%5AYp)ErnXTUbDRv6qbP9XLr!0L9x(q?!guubL9>4uc zso6B&shBGcsn?jmPx10|i7k$oov>S(+P@3qfR0M{mwk&7Xt3hIKl)TI+NVoi&j1ga z?Wr!uHn3(uk4?OK?zt!g{Gb!xPZHHlXk9RLap~Rt5~x7LjWi-FB2e1)x%5Xr$iBJF z-;`~`r56`1vy9mu6i0UEWCW)-&Ms~rEn0Rk=xK0x+HWRKm5%g-?&pz=zLNy@s7!2R z#w&d*mH_%r#;QLEqKkdgo&*CGw~kdv10%#C3~|MNDP1^g(mA)?wvK74eri7k4k`21 zM2q7Gw%gh2YXC2JXRZ5i{1uR0rTv5iT?v&z`| z6r3igxg*G>JX#U|_|KWn-;K3DZvs9SHRrIe`{#n>-&D*;M1Aqj`?-lzfRr%`6NAesOS$UKKP>t{^ucnclEiv&lm!oMu~q);Sma`{>Q{g8cPiy^;|98 zj$G9eVLUlY3Cj3M^Uvh|KV)soOKr0UlBy%E;xUy59MMu}h%MafI z+R-&~Avx?-y?o4CMzZ)ypR<%9g8rCOqHa?-zsTdWzq?EF3mYF3Gshmhu2L}8T0^vb zj&av_+&iOYZpZmiaIri=U7)5ZC~MZzSuJ#HgS+N3ZP12tXS*k(|mzzDKq_?o2oL=v_#0 zWf|~U)?JaCt(e`Vv!E)U1Tr`eWFu`yIkJ(3$PfhWEIVZR6L&il0Hy5vn*jIjn8WT3 zb!_QMBUWjMt&zlRVkUyw@)Z;Zk52~*p*NV!uH#FFvP*ZOyH$a$PMJVS3n*d>6xAMN zN1JKL%rg+%FZK6^nLf7j`hi;?t(xERw9Qe2s< zr+kdsee0%^V{8ZFM)H^PecgM!rP_x>PV^c-CBtpTX3x;b%Hy|@yUrc?a&-W~Oe^NI z++S<+6!_r}4`Q=(@BzVDHDte;SStlGFOQ0U5n?b?x1Pi8&I~>w4^)p)XWeRnNuSXs zmmnb(7A9m$r8woa>-v`LbsVvsMA9DF@>;cic>!fxF6eukCS~FP=uRY-+_PkcNWaM} zE`6{FtO9-=BU!)FbLkqk-Ok9|h<>6V`O zPlavBPU$hBs$s9VfxhCcp8B6As5VBNvDY7L?wxL30Er#rg>}!mDW(X-l zCHpcW+l+lTg9+KU?8_LEWo$EcgE739`+1(<`~3CZdH-=Z4zBNYUEk%ioag!ZT;?oC zz}q*PsaXS4geU$ENTm~4} z465p_i(aqUxsvLPhr_CS6v;0)jVy>^br%)Yuaqrr1Ei>GTzCJ&mPj5{=`c@F-Dvu3@~Z`(@~Oj$pt-@{z?T zsQ&$C9`G2u*AvGJ0sRHbuarZK$dM=P^EuKVs#G~2fja=x_VsRR>}f4>G9dg_hcNl=nSNl?tlq{&nyDMyR1xQz?ayJm|DgQ$P(KaIfI`&X z$q`RL>KR5zh3DS&2UPMigLJxTwPe48T>6*ADjb4c+F*8FnxdL5>T*}jeal|2b~~Gv zE1{8$rvS0Qk@|3FroSg_Va*XidAZiaF2a+|ViiYnkBQ_kwW{}?TA){N@%&e4;J;3; z*p)LHdbACj7H`jSvjaBQD;ZvY-OZq4)7erOyofnigvY%#=Hl1rSj@WaX70Tf<1KtLir%~7tnUu3@qn)GLRFQb&L9@JbB@)`|IkCput^lwY}V{F#wO`+g8A+O z#7g(VMr%s3M&WgoHXm^Shl;-O$Kxj$?<^ZnEnpbT|g!W7U16ytd5_592HNLAuPCeYcM!fM(E-whGW zPV)1WN(U7}cmEFn>*Q4+n5F+#1Z)|^GZoeyX8%s@vgeD-K)EH`28tO_<7w|xF092h zM&`zkLHC^3W-PtbdK}w+#g>_K@m~fyYw&hc0O$Us63`|p{O<81CW6!#&3z)agzm>p z0}2G*ys?8uuhb|*n}q7k&ObdRKVwfF#U+5jNRkGX)3;CC)4CSM`+v(!2}Win8{%rU5kgUe>~pYuC$XKK~U| z=LE}a4d^yeTok$cHKm=zYpsn%m39`HqW)oO5B4f8h32x6zEL0#KMjel0XC{8&wd}x z)HF0T@`x(<&3>c<;LW)$i<+biGqC&*uNtlv*2qlcQR8!av@r6g2V(5=_lE$560_4y z9ll=N{-mSjHBcI<;MdKS%ViFDir+x(Vi-N2?_E;WQeD^zdj2X?Gm|#x`6A_aN8!t? zRHnGd{_Z0gxF4xbxAV@1MsMZHaPdO(Qp>IE6gD*;zt?tVj5M&VHtwT1{v{C4yDfsc z-ycaN?c|AH=oz%-rpIMgKVB1R?%#^Kw3CE(o8(d>?{^rfxm{+lndgQNoh%*q z{?x0OF=*fKbjlfqe1`|e#+{qRl%Pe3wM$DCFp`LAxfuXUD+ zHm_8U6~DY0!r&)d{!pH}yaDMEWd=$ertXIqUOFnZYm!{#`4(WwC@ZBO=o)z2^{9u# zbBjTUP%ORH@ZLr$<_Bjs_%!Dd{@NB5u$8r|uh1lWRNf_koPLb=0$Te8x50`t4Z2Rg zS=dz_K2HK_a5K}75n7xGhSZI<30!77(sp#U9#um7t>AV(R{-Vj(7grABWE^j1}~l4 zZT{uh;3jYddwAtMAPYaDw1r6E?V#)FKrr7Kz@^$v0(DC)0B8{8z}NjyvMB{NgNsr>hs--U6@W z6_2&MjhK_81`SMeeOtxVc^o+@9x{}$lo}T+51VQ-Sf6zbwW@Dy;xLr~GEma|Nb&T%h^fQ` zdCBEB(55S#_9T7tCLY~a0M)gwG%szKBR1Ltl#oA~YV8KvKZznnAN-+nO{+e^3ed{cH|PHF0-zglg$TeU z4PlGOe^6)q-#`iE6woU)ZT`>6Cnr;YcFBiZto%`9iGK)+MW8XskaF*z^E6{EfSbso{N7}pZd?BY54%-Sg|F# zO#Q_B9fMg921Mgau>Y1UYwss_zkpW*u_b(_Y=loWt@p z+_a}ylxY6-$4wG2nc)<+x&1%+@BcgCBfxlz(K1FK@2Y?9Hf%Qasv{2f4Uvu8?XcP0LYF}uPT_ycH8{OOM~r(VB)owuL$D&Ib)T_2-=&Y|p;sY8j= zH6eov$2Xbb6W{*g<4>lv_)}hXdUKQUe>c2QSvi<}cj5oO&Aj`9%aQqCXZwHMC+!qNnSC{?+^N*-lS2Procwx& z^&DpKN$Z~7j8-$5^Ouh^*o$eVf&AKP9={!0l@czC_e?C^@mom|{s-rbOFRry<+81} zkQEQ2^PNzqA=IY4=U;1b$NxTeoI9k=oXImRZ9Xt88PF?p9_t+4p8l3A<#Ny<&%)Lo zS9-pM$J%(PFxtkj>vkJNtQqttLM zh^Pp|GH{1!zEe`IWft4er@t=n>kT)N}LWmR2N$=pP#+WGt1ee~CT}8sNGgj99KVm|!A;p+)`GJPmHAk2mel4Fc;M zRh#v$$RFgK%wLF%7>1NCL8!pfe^{xj+M4JhrVR-X3kh_&a*{ zseI+02F9y_|G1f?K!{pR_mE$>i9DbC>R19oGn1pUvyYviHd+x15{dnVIW|r+`FY^!Z9ubb_ z4-#+w+ChPocYK8S5{%4B+eaAjYr=e}Q?>Q(vL^tX_`GANjY{|?o-d`ED z2w3geWm?{0?J0@^QLoPKS=MRDfg$1-T&yIRx|KTe{iC zcyu{&Q&(#wr=_9$IFoV4F|Wp`Vo#61Q(vUXw^wPxItg4N7$Zc$G!HyBr_fCGWgd=+ zbQ$9Y?6AQdD^(9z9eVR}V}y-|=JpkbVaYqXGT3jY8I1W{zd-kPZBgYunn@Dw_idYw za}J7%fq2sS_U_*l{Bw@Kx%fN3rBBMOK&_mjb_&xcaZ8}H1(8_Gthi0^eMJa&{O%;337zo5D-0X}tsAiZ;d7X=sZic8 zHu%bk-S4V!$sz5@x;9VGhBe~8Yefsn$u7yBQf6;Qzl<&9i##wadhzvK^@=hmS3f_| z^WYv&MyXTV=S->nsTN}S@MJ|BNPCO$v&iBku5PWu(*x5;EUKV4Y6@dpj^iv{Cfw?G z{Qa#B!1_%aoi-QOCX3KMb-zoP905LStwVt_-T|xLZsB<)=Que6OQ9wy9R7CLF2!jt z^kovWNW(^%PuLorpj%rqexwis$9*qW$IcO#d|;|7a0PO>ucS-eGE^%eFjBAT_Q_+I z3?;o6ma~KDEg##&*cc>w+h2@2p7bG@xmGLew6emqkp%ZxyXlWX+c<-MSO9M^BRRoX((2I`f5V zT7G-IT~a8k@Pl}+&XNUeXBqXpqm68Z6>*}TN_^wqQ*;u?Ys2TiX;;$Juz>B_sjtW> zuddx$=k^?bUv1HZSwRoRicp8qCc_dYS|{sEgbWJGl3XwyzS|@Y36N}`FQRri#76CT zxji@|np5={W}@aFD{;*+kLj}$|4Oj{qCC>Z^s*qS*iQoEuEgh1KFm8+W_1!}P*_o+ zmV8NHOnxSfN{lc_tw>J3jBOVu9@>av_paYzE8!Z9;vgZt49y$nW zXo|G+igzmA(wBlCobcP}{WP^HB!c?z=ADGjo%wT zm^qVSYFqeJrp~&HE9u^n0kC|@g*PvC?i_@^A1Z5t!Ss}s=}xnjKcsCCF@Tidjqh$; zX#$J5RvUD%OF8YG#aUDEjhg=J^(gVct}`I5l9Z(K^Msy*SwB>*O8)VpBPAd$Wg3MR z6^lH&`!SBMO&E$EEjKXdDsBDU1Rv!jnEV!+Z|Zz5w~p1-E-NsV#hu4VYu&{ebjnd` zI`-v5PEEp>=;YFf#oNQ?aEjd;?LEy8yen@RfyC%u2bWVEx95OcH$r0cNt_5VN*oIs zJ;lJ5=zmD$$Wx~J`bgn-+n0zWzqh*!?+Ze$O+1?*Qacks+*Ca!Kr|?r>e@Hn%O%5Yst*)RS5sXI#i@ny;7p z%x8|z>t#zY?GNW+^H?=)VA_CC)K0FAWJ1=`U?W%sKPM$QeJa_TyX^#r2N z0IxBavXNA%7;Wp$PE_^jBb$$S;+#`#swuqQKSb&_xj@hz>Pcc=WcG~V+cSS|gLF#h z7+Ba#mazXUh!3g)3K>`AsahIsdP`dctg>@_toiCU8E<)y`h|7*EXPlW zQgc)=+ZiT6@PMh0{JNCOGiJPSDzFo<-sPgIQEM$ehqYcK)ix=-?wucwgB5TK6~^Yv zvoVy$s27`61lH_JVO};d`uWe?)Q!U^d;KtikqTM!ts@qF^dp!FvmE{%+ukdjdmkZ- z$;Q1p%5C%{j6{#yzWl31$q74ckf|w~=$@s0=bZ;6zn>ztH4dG3BkoQ(b1%Uiv>m)= zzKU-!-chQuYI}y#tF`Fd){Ur`C`vAckPc64y84Yqp+`Ftr^WfdV0NY%5QB} zTEp}rJ&{^zUPLvnWK^XtrK^NH#m!Mh^UQ5R`yHIj`eBz36Tm2a$z=AkR)EjGz&{|q zFS91#=lvr(tKdz>)T1uH)j(y>xkFjopOtUUvJ{xrjays$ulI^*h4J~=hDPe5>5w3Nx+zC5f{(ZUT>%nz1?K(~9B#Bqaxw;rPrUgkZf3pZg~qnWRz9Pv5&l?%PqAjB1ie4OFf>|(D=t8L|c9kQKxIj zQKda^!`+%B?jAds_q0(^?=E-OHw$SS z1{G8YCn!PGt$4x0FmRYi5s{~n{F^#-a$bJ}?!@0`+!mE~I*SNm@ z<88YP5NE8LY7|$0WlZR?Li|~=l<~!ZiL2I}$AL})^#IGSd&cS4F89(KEAG~%fn6hA zFjk5}v~=2@Y~i|_V^c0wXvg?lGfA=Wt>WsV5q!bT!e#$Y=c&TsfY#nj&If$7GEt@IX5&xCNkMdRtInujM_c-+60gGe_i%i)EN zlTvDL!{@7d0M9yzGH3w;`)EAKF*t%pS}~dL982Ba9?sJN8|YcYL?-szYom;9m{33& z9F$Cr+WyAGqqz6b`Ij~U#v^6(wM6^3&IN(Jfk_+2HtvIFq3! zQtjbXyj^{3yy!%^HHP@vGeBnvx;`4`IU}AO%*Gt#blsP_5Gv|~bE8FRcV#{ zd>Lg{X}FT)LhiWLDebW6J?Z4K>D?FsysA3F*^@XJw|Bkn&>|foL#>g&a^LCjZGnMN zc6fJjBQC{Jw$e60hmFmT%=3a#@rHdwC9c_-h*S8Qf;?g7Ww{d1^0fl*LEzC0Y|Q6! zvI87`0W+OFZWqjdb~~ShkTdtGRiG}1*MEyQpu5r|JADzZ6(b1#kkG>L$51!}e^K$e z{b;*xpqO#>R?UPtoa)4C!tveRG)J?(Vm-;(e$Z4>nQnRj>Z}8CRp3({OERzXuZGGU z&{KTc@ACaM?iPAus1*`q0tj8c;C2Xz>sh2An?tFVgJkS?gQEx;NSxX#ER^nyst%;A zX$(8Ljg}-muSG*{3kV2|rrT+omssU$CdlG%9LfX9fT?}CgUb}=j&q-P(wCO#ZLQ}l zf1W9ZrTi21KMo(GyJh8n`TQ}aAwVhdgo<#XDqX`}(c9ry{@B+Z#l$9I0OE9Tjucrcn*N;+>l4|4x79HKF&KO za}39J^rm^JfY)UZ(qa(|Te_aFx`OL7eVsc?&eD z@bHt{E0X%3(brgEB?L1c(Xv9rS|mia^1=Vd=wjX@kCLR}7F7w;QL;Z1Nh30sML(Qu zQdFvP+qNSi1K2(+L*6+7Bvgt@hZ z{<+D`%^l|t;E>hN?pji&e{&sc$DJ5u%&EeEoYHR6=UDPre;D9O;@cpmhNebV z*9m*#>&JM|@MF^-tq*_AN`uP8Cl4-_wl~?NSOHF(7ob-nzr3UQ`fGCUmpjFAvr>FG z?Y|*;%PjF@g2uA{f#f^W^NmRMPPk_^LdT2=lPH zoY8O`LmHrThZbVI-)_t|n=AkPzqe$YzK}=1x?X^eDJMvNUFC>e>kP>OHV$zT8A2?X z_7v~u{BzXp$P)4kuHu8+wx?_?9VhlfdfW8B`rnf)Z4_5YU;6#)`_Zig6fB2aq}F$( z*Mk`(wUrxq5*?k4C3BWUfy8AtSL5cTj}k_<8!tVDu%V>P}PqniL81IO}!v-Q!{R zpU=$9%sl^ZE|!AGR8SBWq@c}?NfKmj<0{)?VXD`?uMITs432S& zwBR#0Z$F3im*|&2d4)1Xy_Fm*DUckk_yo{g70(|5d
lr|5r2#{W0Oz%N02A~?aP z`0nuzchh}9%TQePA^Fttu1f%fbDf>xna5kHo9stYf~Hq9fB)}C|923qRRefRYqQ%C z$Fk~5#elr!`E8MF0>^W*O4h(rN^HEeJI0a%?+^xjv#;T$8|S?<$Gh&%5mS`Fq5kLT z{?RPx+yl3v&Jp6EUtlsDRpL0R$++A5+Plf{MOUI^#mlBA@USGuXU%}FV~Dcg zoC95~DJ$&DRIfgKQb^tqbe7wBHRqV5IBg&V7HlypcYF&3TfbsZ`lE|rRIOdYomY%6 z!k6Hztnfs8?BQBAXbwuYFIG;yuymNmXw9s$=4<_WZW@cQVqzDrXiox{S!;TAS`D77 zSa!i4T7anX_?1G#exC7S{jnF62^~4Xd5Yr$=;;>9#Z8|dE^uf>$5OOt^T^_$gJZ`Y z#1Zv&tAA{e!yIba5nEVf4;C`+Pf7e%exB)H!cLu!C$8U7%Hg|V)4kUf@>T!&AQGJC zTP~=d_3`an#e;nZzgZ1$ssHH(Fuz9Fm&7i9E_DPd6op!sdMd)=zngG$@9rh7I05aS zOW&SbbpDxf=w5#xlVuFT_B>!7AJ$Slc(+w|SiLc4Je+!R9~LQN1(%zOe=yvkD2l0w zq5=UO%D{$c3HpGrRTA|Z{gm!TfhW1?O-Oj2|E?yHgmdZTay-}c)ISk1vrCUyTDwn!N25m1bPv&>CCmF{GCS5nVfqU9`4SPLXa6uDp#^91}T8GQl{|9kuv>%W+?cMM;0XudXhi(=R6x+w0-!TesXn0)TB z-cY{h^Cx_PWHyt4FtZh+%+S8NAINl*zc*Li#TdP5jwRMpF~)QpBV#(kT>SE`*xKdI z=t#2_OypaR^Y>ox$z@VMExkf01O1TaB$tPH4f5ll7l$Jvc|i52ClBL6wX9JJo##Q` z*x+ngzit!ELPh;l%|R_XPnWLf!Zq_UZR}-MXVJm$A88dBopj`Ti`9aZn9nYo{*9DH zezwULsr7#sPJXrwjyRo8s?Eqgl2UjEUX{-x|E5RyFZ86@468$D%jXwr_V@g@x9JhP zA3eoPEvBU?Wt(4DA-UElLVI+E86<`-JB00ZIWr#4fpE7^wXAmfbyCW1{Vx2{NtfDs zDU_bL%(s_oMn8KZ&TVmCjxyJq6gBnTBp*h74RF1IxQG0OLrt>uxc)rw!`TT`gbP^w zi+m|?OHk+PiC{)Eg{^&u?IAzJTzJCGi0vW?2V$H8Skw`f%BjF1f`q>%v}yRWIpsg2uGVwE}M>&{d=li0#5DPW9A|e zZs`~lnDjES<)+68`sFVEV&SmY^gOREPYJ7?zW-2JSMo79rn92WU@*1MiHu<}v&g@j zG=G^7Fz2wFe+}raVg|!^9k}QA#%|^L#i=*Y zZ5c{B{*c&i8lHUH|C>IMH2wp8TV4T0o=cu27)BY>H=UxNu7tN$XB&p&JsAa1NkmxC z+ugMX%&%iSqKpq?JQ`D60s`N_oLTra4%4C1Qv#Tc)KDs;8jo)P&!os20ahZsT>~lV zikHN}V+F^Jitq1Qh2P=%f1B&T6C5`$&^EN`X5oarCRWz;*sQSnej_4sQro5+J+JW# z)H@pn35$funZP;Hc<#~>eP z-^fk;#zJl~n6&DM6=o*ul&VTDdG$+9d1svjX{SuR33rb!*v?>Lgsu%r!|GBeslKL` zOV1W=FGLRYe|U5LM1FQ-XX$7YoJu_TD)9o}&5lynGl@DTCxd&x+Y*@M#tP(I_{_lI z>w1v;V;`X` zLi7Aq<6>;Deck9OmVVH)*na^C_zRWZGq5O9_m{StF?^KLsKY=x(Z|ad!K_cg7fu8h zi+qTYKZ@^6H<$|MNkW#h9Q=BfsoLmve|r^ir4jNr#j(AN^I*qi#XTR4GHPOOIozFU zY4fCf4BfY)`%p)PrSp*>_*eQ^GvZFuu+RQcd{5h*hFMXRHgkqJ3>y)M&aLMzda&3& z;H3cHWn6iM4K+hMTtqDb(Zer`zuzBk)J;}JR0nW}-{Mrt7orr5a7 zkEN@?4TsGtDKu$l3EuHS<4$z_>o&8&S8MrR3cqXw7)3!Vf@<_v?nCHN&K5!23vrfC zI~hHDz0=+$AQGAo7Js#;u_X7)4C{)D?OGm>6v(Q4X?2T^SXgzy zuFHV)3S1W4i@l|LqvTZ3zl0Tepg=%Nw>%9SNLqxA>jF?8kYj!-%z^z#+FiQL+MS_Y zI}h*FtxWD^yr=19{}RwAPFH5w%UWVnehnKIxqdqGK@|xo`E#}aM-JWWenb1JHSUNR)SHlF=cq09_z;^<;M=Oha#n3 ziu>by?XY}LGFzac7aJPpNim>njuA5073(QhyZoi)5g1nI_7!oaKz#^A5Z#Ok(y#vc z-P2gQOo~!b=2S6uXYOJK#lF^y3Fp!OdV7sv31|_pJBtqh7`i;GAZ()E$!Pay0PNdA zCpD*VdIw69H{dBjmV35T*@S0_wq3B4)rQ!urLhb2D}>OX**>Ty zgr_Wpj)?;o?*cE%6%mt!*IzZR+7ZS5pjKgUB69KfU-r5N#~u#y3|Fmz_FfHu)9-5{ zWP%nXtP=#yV}&vAB`2~z@l4muMr-wNeTMr~`rl>w+8&G!wI4>>VWe@RUbj%r*KG2( z@{Gz$Bqt#Pa<)?xdyLChLAT(r~6 zBF3oPeuuLD^CVf5(n9ViAX+Yl>Gqsfxb|1M$95?IyClt~@}LoX-ZOUCgguCZaYrjO zkKFMY(D7UzCaxH-Kwq-n&|K>%ms_Uc;2QbCJdD!qNR-5j&xtj|L2b?2@iQcZIdnd& zz&NYg*t!Sr6=5h+vtlwU%sGE=Z8qKoPq8Y1i}3VHRpMTNSA8cTy$7{^rxy$j8bkaN z9dWIrk|p`vlJ9ay*h+)SuL+rEib8VpV+CI*>>RXc4909;jw!tVm;HNm1X*HeO`BzZ z$XW}uV2ed~NL7pURBIq|>(_3k9enkR&E>YG%iWyl_46B0cd71);oE=7Tt$jgj}zg^ z`@AXHZ==_DPQ`j&A=f@?xBOY=>%4qF_-TtcAOy2v?95$o*JYb4ahjqork7TWVQ-Ykez~mQk?wpj!}j!l7%y(C@SMGFv-i^?ooHutm!=|)iD#h&&ZCx|RiKg) zSe4d+eb`|?7O7cLj1pB4Nan%EMLMkyEYxusSA8D~tlb{zqF;W)r!h_Nk~w(f;P*>W zJesYlS8_0Fcug}VvY=+>He7yfC$v@t=@LGprXe@uGPxUmOw|8=> zJaR(?@_g))8Mq}C30ac+dH6k_f2dX@_^+~GEuM5Af#XsiBJSSkTfhISJccPD zm4Y2|(FM9o;4AFzGPW4tF08wWwpDHHrQC}q*jDs>n|CkZ2VvgWlxdkM zuiO*4$v3G7S+*ye=?9384d4q>Io}WUzHX_M`vLBi<2Si(AG^$%My?}~fAKOIn8~Gm zPkH0DTTU?d!I{CPq1bnI1`8DM&x_5zi`B#XwwoArG4sOSj}}>`73XVQ9zJioYv@(1 z99Oc2h#MnU$KmIDOjk-ND~sh?J!$Ft;~!Nj5cLVEUN;bOz2xwNwFfL&Lu;zW=__{= zU~2oXfx3ykpRM!OjTW^6p%bG*m$ju>{Il#1(0W5qVm>o*G;U%O(+d*mh5s8ECl!1Y z3pI!O4us4VtMM+Q_OIX9Zz{migXV2=Wz!O2Ruj#1SKfH;T%lmv-EkP59%jWfU7yk? zS&Ir0Tm$alZ|w=lkXtV@vKXyu3$UHc^l;zt*Oi4ovfZdkqa$FRJpIg)!uyg(xm0t= zn%;Xsu2T%KVDnkQ&@H~Z^5b0wKH$k_CzMwTD!Uj02sm zjm@2iYF<&Du7hviWZB1LqklR7Ob2&w9`x51HPAVL+wL%o<=jUI8fV(?(lJPkMjA2xD$4Gyo}{{#UHyc-N+jN| zbSF?3NQL54XvW!F$=U4i$=&y)s8k6?6!OF`lneBi(70VHG8LJQy#E&YJa;utu=&<_ zlDQwziNnKNTzZ}b4th9DS?w=e{v?1PN25*Mi*t>P?#{&7Rj&6swJ;+XMSnwIYxc}P-k{8Gtp;uy^c89>BE7_l#HoomJkp z0Sm&kSLsUyYB5mm?&z2Si~g(0d7BA*CH@i&mV3u7jzp!}Acu5cO*>Ui^g#~}|8hq> zM6EU@*(sFIOZ=(`WDZ6o_!chhN=$+)2PF*HUT>M@PYKcyhWE~KvBP5NC$-dC_Z9WgR!{|j@g$|pSw?Rt)o z+0P*wB3P2Od8Vi#N6Ma?IX-0}bl0}u11vre^?8jpOw@F^$R%UzDc{4{JHfJo*Bt~ zzucDX+kD-?_1~{JJU=Y7-y3V1uP?xK16>@h-r#p{ldNrU)!S?tsb`kw)lXko609C{ z3;dCG>r3Y0fL^Vp1@H-!8^g-Ox?kfT;-9JsU7@4BMU?g>4zO)32=K95?rd%pbYQ&P zF*Um6@)jP*BUtDXA(NF~e(Q&)=Zhr;%Z9)3F1$1Muek3^q0QXtRR#N& z6-72Yj*{Pf0}=_LiriZ%EBc&zL$%Sbm*(sJvc=N|`13;YBUppDc=C%f+-q3x>-Ej}X4bR;W8(Y_uMEQahh}v^-@zN||_l4&TZqqn|NmkFmyhH(j z8R2b|Q&x2bo63=Lr&mTjfRFCVXEg9w%cMc^iq8=bKorTX&R%!n_xqN!;B`gN5LbodNua%bs+$d@@K;D9__Db!F5 zQi)#hv01NXg<`GNQ*QW9W^VATq1*~ETT|8e{1z;>uV!D-A70Ofr~cl92YLvy-@sKI zT)0m*KD|4-r?G$|hW!cP!gf=f_I~by+Ln!(cVlQ|de~m??z;!u8?*(<$;tD#)!Fzz z7|J+Xjb!u$$0U(3(>H{SJz23o0x+SWv`XL`dMaouVXjkEGvocs89jA(AU#=Gt7)(u zk(Bg1;3A@}*i}WEtXJuvxok>fCK?;_X-ru}>2)I0=bX&3VO+tT-spn+pN!HM{en#6 zU*;|G8WFqe3QA&7cXHT+pZ#vsOU!PMklwCV4^n(|VREY&abY*Q4e~B8)@GH7g8{4r zCYCVC?zMm6iZsLQENUbchP_9g$Yej)vU}$7!8f3rJo&rS77 zh}s6d%ZViF{>+Y_yb-NaYXGl4tpD{I%I2=(%bD=HlCS#DB%eTTndu1Q2qq;q_8xx1 zNz&y^J*1BWoebr8542c!X<%!xRZmXqRnBvY)Hh5ZY&174vQVGf@#a{q-6kh*r#4w~ zkea-FFOev53rk3jytn7rHd89Z_3p{$2YueAgw09e-p3>)P!A}sab93 zRs4RDSRfZ;=%GM;zz7k~=l9GK_I9>c2+2x6{FuH%B#(Zvli0tp!Q_U(FZg0VpdoeD zlo*&y3dq;Rl!&~rRQoM!5y==2ARl9)?^>$cF?;lvmBR~bQ1G2JIL%j2(xKK}NQm)I z;XTEk8A|V4e`hNSXEFj@ve7-`E-*;BKI)aIfxUJxS0_DDwC$IX+C=b_zcLGwc?Tlf z4mN76Oz$`?q}I+OQ(+|&(TtOQ|H9FYu$;IqsB}{YocS~c;RV{4Kf6Zv^Sz)&CO1Ez zbc8!*p;o0Pid)9@0TM|CbRt#UMfuVVT^6;#tn(dQ#o8 z&GUZuW9(h378`I%eriLe70> zq;;aR?w}-;<6U7%-c|;lp1Ef0G%oL1lYA05WO!ppAE)yd^Kiqa+2v*%6j+*{F5^m3*AuCPKS@UZ)Flr$sd3R?`$!f{8bJR zV!OW_Jj__$Xb=uBq%Vno__b>G-JxZslWoNQ7w9d%d;?V3!ql>{Z85E|s;YI;sfCVKX=F6YD>e6=G zCdv39x430GwZlBUxc7B6YF4=Fw}&)3>-M&9(aO?XwIVQz{~n{nV+T-9xlU zJ&0SOJ4$df+dj{p@p~@Ps3c1;+jr=c=3s%|tQf-`yl4MlmI$HLj*gn#FaEc0R^8qZ zz0_i+i631(SV5%j6)S(|3};MkX{5{E2lIRb?3Du@FpoOhG$5ru-+gVY*2$k-Tz_Yw zCJT*-R=8FIy)jEp?q9Rs7|q;DUx|Cj#!#;Dv&rG^M-KT6Nf4HuO3#wpS;k`u=h6M? z^t#9iLZ25FzfcqUhLANBv63Y_*5p${vyk2d-z}IgKQP0r*b@*wCD0B0Lk`8uUmu^r zb>R!j#*7Ef5`-Z=pQIzc@=!1+w5f?VB~_T4;n^?t~~{qk73T+cD#^v&h;tpcJr|9E92#v%v}t3(IRb*2zPhrI&C##D-t zY{VfCdhFywIDp;s4#b$igrk?l4*U$Jmg~kDZN{=HOe;Nwjn`KIy>(@^g>;w-@JXHb z$}qC_W++ADM6hy`a0nRShCUNsUuU9M!CU~7n~;~|^LWZ8CvKgjQD(VOcjCWy)9umVi#x0?1xO1Aap*l*O3FHK3M4NNnt=gsr??pHyB|jxBC6YMdXal-TWu=-t$!40?Ks-&5S*p6Y{ftGGZGE%4pU~W~0c}3r z!=9gH&QXzwFjt3w1pHL3C>rf6_p+x=5VQ@y?p>rhv&h&dB7<)@Ag!_%ZR~S9({Vam zT3{=EaA(@W9@2Pv=LrHpz9hLmgfZI-4UjiC3Mne0*j9*Jo$iE4cgFC1YlV}>3h{`4 zTWwLL!?bSrcD*8eV{;w|3&aBe{T}jLZ-fdWzYYB&N(A9pl><7mdEIk0CmTH!y-EP) zvjc!KV{@MUlH>E|W&L}=W+`A|c#gvJa`g2uaWzJPx}egyU%z{vbNgYZRM3L2(_CFk zs{iH2F=?oT#*A3P*HEG|@N2MgP>pe1>q`gmP){U*f zc-kS)x;Niy*_(1aw0g~xj(2M|I7?Z}zBL5@=?cH}pS^gO;V-@hHjNU-;6N0~BILvG zm07E)G+U0MxMaV?@)4(ON0*@SB~K?l3I%ap@!ch2gA9&(5s=aiY6dy~3_UwNg>Os-;X$`wa4+PH6PT-d4)_KE}P`zs#XP+1##JF%wQl8J4(yMDwlmWnj%K)}$ix zme>d_d2tJmT%prKEY=u4umk4zp7Fj#*$7Zd$gAn0(fct4En!eO*SptQaKRx8j~4`onw4$GB0>1_azdoQdhbH zIm4`smo)R=(u7rOU>x*@zbUxzELWnSH+KvR97lsGk8OVMd3W1k>m}6jwy6KZ-CKu6 z*>&y1(j|hD0tyDGv`9A?sHDKqsdU$XNViHV(ozCL4_!kE+)_h#Bb`Hs#K5eeJzhoolUgy-bMreRL5|2SGG9x)KVIyO+CpC21#`;zZL< zvC_9?s7jmNBTueJWQ`e}vY4D5C+t@)*hpwX))6jMcXggnT^?))FM6-Svn8WcJil)qOi}?02iuR zNdNX~r6260%{%;@V0#a}!_C1TF3b!a>cdUL2-8a6aRd$i;hG7cIk;j{(4U*cCU8)T z+&djQks1#>hm(`KKV@@YjoUA%mJxuqq(fE8miHfIZOV)-1-|=NoYd)huS9OIiUMu( zISaY@M3+`(a%f-V+lH)|%3vQ+|7yweP(8ZZQ;u( z*1)-5Mm$KU%;Xk&fpuRsPh%=aZK|V7n;yG2Jc_t5>*O_5J1@tuyLP9Xtl8{M@5j6K zopF`|dsS=`EkrF07H+7zgcJMPZ=FXTNu5rUA9JujA$H{`ag{PsY$`7EK!jArEe~~3 z?&^u$5Lv2_p5qUmrD6ED$i(dQ^_k@)^440E@ovHtl2=@6>)6OG?%CEJW{0|OKG_deY|iXc97lwJYO2);b&i$9gWm;FZwJlklpM+HkfY!x*cBH1dzwtQPP1my=Nt?(|rj{&-8plXp3uHsOr$RlVwD{>Hj* z{t=kB*5atJ^a(_9D>+>-R%;95hNPJdG>4A*M18egrF0o0Osd+;z?LoYwoX*%Bi0d5 z&jN%5G&yI*@rBg%6Kh8ovM&!*n!A;M>R{a|`;}8he8xIL#VEW_a_?8Hn^+sU) zhX+?KV5B;XZa?ZSvmf2`qbcYpx8zWmAm}D z&V$wNP%8<)l7EGV#iZmW$2WSt6l0Ha`RtMd_H~yPZ#j9Cv(nP5j>zwmc{r6k$;ERK zK6kQAlCqPiIhy!>M^W6C(epSenKJ*bi0OuCb+3o`+Q_xZ{$6YDKg5ZW{MdkQNpLUq z6E!Iji=Q%$+o}cE*tf~jr5G?c<0|vdZ3zR7SN7T0)lCVNx8Y}~x4W+gm_yJn zIYuAd(NHgt8dM@(6E=5~dj6`PQ?%Nz%xX#%WU9=*AH%ibWRDel;qoU_ZviAf12aM; zY_g1{q~1BTLaCf641P;3QbpqLsswBe0a-Xv*pnM{!gDBC^N^u+>MJ$k7%Z}^1?knc z1DeE}PO0>X(Pn~l<;t`QhpF*7eymK{uoO#jM8SWRv6@PV)k|b=EG^fj7!)UQNg)^i z`|>wzGrtqt9%+)Bbd}K_coYkdu-y}*CtZc{1mokCiPfI(GlY7=_BSVg1fBV!ncoe% z+?br!5)glqw*dLz^s`5%DaGZRwbfu@UBOh|o7QymWU1%cYs|+ruL4ZO6la?%^g~a& zHg24yPbVVC)GrB{^(wjWey8dP_u^RXCQVf2|CjFs@~flxgiw8>og3S(hh!dm;UT`T z2sk!uEG*u)pkmSqT4(rKJ0w^ zRg|sxiNjY?x4r9~4ph?$6=p8!#J$jJR|8rNQ4q_bM(&UuAX$#nJ_e{yM2zR>eD4)NbZ=-2sgz(Ah+9r8uxw*X_V4LCx{N-g_uB!yE%bpBseS>Js z=1Q~9Z-uNKA0(1V9ImyTztUI`RgY7WIED51Rb9z(-dk4T3e?pl7>E3t@!FVL8MJ+$ zOi4<<=z=QUI>ifKU$vZFW?>34W}~_i`|8KowndKH2m}nj;=g-tyoc!wMpo;pZmfq>EIUXs5bL zS@NO8F%OG()`maT>W!W6b9D$KAYyAA8kHj(Uh@wUcwOV4y@{Wa0Kn z7jDwLCCYh--@l*ZHWS-KMqEuM51lqoJh{nWu3yb*?&o0nTbr8N)xtKuq29D=%{mHBV1TX%{tL6Q^~*&;gJ{;XqZ6$xDXa@R;Ln&H!7dE(Cl# z`OYH~^dcg$3pFNKsjrNZ7(_GF(#$5OjfzGN4V$WOg$ykrq%Dc@o=KeS*GiugGHKcg z^wwSb-h(~SSIi?~@u+K7;?u+Tl3ayO+3OaI9ZwIFmWdHu z$!`y++|K8?n%qQV3%u^y!`+(KlDbRr4o_r~JrYf;i$xp7!pEvMpL~cv+YU%Vj%`Y+ zL$wA;{P@3n1j^fO%9K5s9`{}NR`3DdiEx+bD;F4WydxNHCGPh~>;h@@Jq!-x>IYd= zcNtZx5r6}VFEVD}vJeaD|IBe4c7X`@oAE*Jy zDK9WNkfWZv-Y8qfGgcAyYW$U~8Yx=>`o7X(xLdcX{Y*njuzG!DD`SA6FK1G2CA>G1 zkK+*lmdd|FnDW;VS_N>oDQM`|&PR`Lk<_{j)K+)p0_qL5Zs<(x@=^5qTVAoO*z`5g zO8vu-&fW2Y=Gc3|r0B0IBR26QKh@avr)q62jG$sTxuNI~WoAXlo@y{zkk{U*x_Dli ztAI;&-()o(U!~qSLOgP4sbNJ64U!|v)$gZ}+3`sftN6OQ=bX^)C<_Q{X06RPYqsx# zEgpf#1`c6IoTXIpAHCrBv;<aUQf(tDhLGuUgz~V^ z{ldgK@t6wpk?%VvHar)_t!p_=In1lhLRqx&3N&54Z!TPwpE^7M+1aZhS#`W8ICZM` z2n5dUX~&G;0yY?#m#afJdgYRVjT}!que@FVCZuc4?Zm?{OF*kvK;zjo^InINL2TfE zzE_e`B%p=CP-?D740I@^q?0_Ub`nGVquR~R-{DD_=yuBZDI=w zI`00Q;hbIVHON`Nb=eZ(7-@GChcNw`(({9MdfWZUJ~sciht>kH8=oPSYE-bngUQ^0 zM2~kTs?~4A|5Q}R3Yw54g+^&F`k#t%DdU$X$_h5aAW`)jcBx;@%<_!PzpR&__mD;W zApaufi(s5K?OuGyBxX}J4ski{6lU-;8d-4ezz(5&fdw(x7<2TNaE8jdpIyI4)xN6~ z)yOG!eDY?za(KiJVfETz&Sk)Efa+JDj&zsyb|# z+CWDrfWoKeC!heT-t1gYH}Bbg&PrZ0&mSt2DPut!7IvD^WAOhZ+bCm!0YLK_BP5Q> z2{zs=v`2Hqku#s>UDVWj#tDrRJP#K%su#t|GYl7>LlB~ooM0|n?mXfrjEWH}KxTOqLHpnz%}9fdabY#h~)sT+z3!SSOUtDnDb1tGp3uUMN(! zT*L7*?V%D~Fpg;GYW-m^4U9OJF9%Yl=Eg5;d4pQ|KZ7wTOcw+x=`7=a_xmq$MI}m8-NEwD8gbRASqM3q5~GdH>du`}{UJ7%>Q2Eqg2;N}(%f_5lv#cWoVc0WXx^(Kf zTBCq=Q>9fQ6f+W2@asm{7d(5Qygx{nI)-%t!5Ie1E=b99*yfnzD%%5y)C+)9R=HzB zm9JCf(00(^gjokG`F$g7(6&2eoA$6DWQ~eM z`K|mx@BZE_tJI1MvS(JS%_DLU0!KFTz(In+D0?LzZo(`01-Zk{rg=6YWv@e52Z=|s z?FBLX@d&w}fS^=I@vo}ul6VBNPZ3-PQjJfrq#18JVKoAmtC}-Jz1n(7qZ1UMM&5e# zhhDAUVmY5i7H&{pu@Ny;X%j4~38v&>H8MbMUiS4rKdZLt1S=+nZ=d5eN#Oxv?7=6$+ z*JHlU{UP`8cXM=q1zYOy$0}T~0y)dThLZL_4=uQ%S8jC_KgfBX7MF0JE+iM*kQ>f4 z!;PNPT+4Yq9hTO(v=GuU0n^ONtz{mLW%$ak3wz`T+`mw zp=atKrJ;im$}Y?QFvqApozl}M-o|Sn;vu|hS zw)@r18h8#8P~FxklGdfYS0I@cCq*SqTA)=nrhaRn*>WgLP1#CF5#v{>k)ZJ(tpknn zM_}Fi4Wg5@;LHd4?5*B&*Ab_ZU4xTS0I@5JTpi4z)*`=h`9BNUZ~y8u%gOE4Ag#}! zw3NN-w+gFN?UA=nCpPQ}wf_ZFT&IKkG50GwuvvhEb&yk%w(Ic2m4Q~3863EK5Odux>7&d(Y0~v_&*d? zJsNn69dT^CH0->ol48>NuH2B5lrTh!ftHz)pWi`C#~cA66c1w9sG3#SI3P;P!B)^? z4i|koUTjRFnFGxoAUyxgL;X1I;P>BL0KeP0-~3K*$p&0d^>J@^iK}pJ_>v(jr3O1Q z%ai3sKR6@JZQemrH9SHnM|MieTD2BYlL!5M-dziQk^B!zG5uljgd8~MfBOBus~IJ3 zEcM+K8_CDMTDvLE{P&lSrR4oqd>WbTviQhXeK~QnG{2jL`q_x8(i<-D2zQcQ<+mJD z9V@d;%T32fEdyUii!xTov7DWiqs-E2MoPWkz*(NgGKIVB@zaHc1s0sx9H|~Wyt#ND z1w1_6k{?lJ*W#g-#yX+?h@8)#U$2a$dT`w2$;B+BF=(Uy<#v4e0apPbws$J*1F1au z-O{pa)IrUYyphMbtl{{CQ36!NPG~!dW=?L}z0OkzAH^z2#54_rm0FA{gGR+v%wLoN zcd=8OX?L+(v5eQsf{YhRI7V)mgD*`%$-Cx+)zE<+a7U0hi<6SCzQ@OILbK!en+WMD zCp;XQH(-@BQ#U^E9U~ksvwma2%%06^MEIL5|LG$SajbiC$5+1bA>n0{fyJP*MXAuM z)HIe~xW~6rhMnw<>6^=m-c-DncoIv;NgRLwiUq^KcY}#=%@5Jlun-j==?ariF!_Y% z2#tN#!UT? zb7Kcnb9@HO8M2z1t{F0{Zt9RVgPak3!p;;DT$9zcYlxbzNILi`jk&iHi6z?;vtdY; zI1P)_t;PZViRLo3Mp$3*Q^!XCkI#ZIdGo@*h|J9F&U^!4$Dk+R5;`8I z>{whKPEs{f>1pxK;cbtC3uu|K_mS`#iORtC7G~y4CczENO!%1+NWkW`UbDz3W+%cx zOup(~K5|NNId(>sG{DBwWy?n47_-~Kdl##7AfUuf+6Qqsre zouEPSva0LkQEg`MI0#R6#taqc2y)(Cj+RY|uFi-ApO)%QJ}Yl^!;b1=&|=LYxeUZptoFGZrljB}VB{%It3Z(-6n zP5s263N34HUab;Rr)pY}j^Dgybxz0-Q>K#9KAbCA+X+lS_eugdbd?IA$C1c zC+OwEFiX=I0jX-GXIwlMrhCZQKo#SeF+!-6fGyO+ORrkWR#RZ(*hbgp>z~lj-xjw2 z;(Gx4+X*GdVaudl=P_hPs^Y87Bl8hdc!Hi0wrlF8)Gl%rO4i-XOfpl(yCF76{h0CI zN*;kwOrN9s9q-^2fUL8Dx|;6wz>i;Z+BKqc9>3CQiT%zTOFHo(Tu^H!jyJ*Yw-E|M2m&AKUXe7lNzT{Os*SA)D;JihaGErUePS!AYpp{ku{rectQ+@Z$|opiEgUjBRsmwez+k19s@%j z>u~DUyHjHo6_xYmDAUX2tFK?&sxxziZ4Z>*uBJJ2KeLlBH&*{k;t%ien`Ikt^r`%r zwBb-X!B{}2V3Sjcfc|2KtnJEnipP@cTTwKhS;d)rP}prj8|v!671eoUcN=q}#Q{OSF)U4#S5l!|HKdM?T4(@b@Nq`Iy> zI$~&2Iw#^R{pdc1p(-aXPr&{uO1(3Nfp*|1cTtu@ce1iRv~r;zmcXPpc=JhSbBJlY zn2lDsF(SLtRi!$Dn5KDMLF?6yaGy!W-$U!)K(>GT=tD>H#LqoWot&{(R!c_5wcOZ7 zVs_D`|I~AckNlWq#SWID*H;=JEmNU6`L^#kS33+tP-x%@(|Cj)bWKi6Q632ud4ar( zdfWw!g(1IBt=z6r{Gzj9g6l-h-X92~ep`5!E%qqx2~TIFdA!Jw)uR0_!cz}Ry5btj zlwuEQl4Lc!V4Glwq4o8ZESIXcS_EI0V=1F%8VEI_CwCH2?LV4yz;JbDa@c&NI73cn zg1K6xUjo<)Dr8?qyx-%rS>TZxP9^oWD5w0*1hPHb^C#Bra&xVZn zCX*Arg9mnif-i=0yn-D-eEC_0>JnDt@|C-I#F6c3$VXwAxLDqjkAd9avqaGxgE89F zTCw|>*<1tM&%VAd76P8-?q0gZM?!u4rPvdbqxA`7S5o(B^d%r9y39WxbL#}7)o&@1 zP_im%6kalfH#F$PoMbAJk7Fj!O!PiFI6hUJF{O%SbWC~XzY1Wx_io@WV5aAIjeOz* zaV&+szRKr=l4l|ulSknjM&-LdZjc*e_DcimK30BSqa87wqHzp6#N;MGWOx-}<~Ern zaV?#)jDHGN{s!y&9J55o_i*)##=)xIbeh~0C|Fx!VCtY;myK8Jcc_RQJ%}7iM(uQ9 z^Ar=Q+F$SW`h;iOOE??}t_svAEFPA?eF_lp5oSlJQHGvJe zZ@YL4EMH@2ndIWc96I$KU=(FOSy*X?5Kwis{mFyNiYtK_S8AXh)C9TV9plzTHMK zovsK=+`#OhF1(_RejOcqdwV;=Gbe$sRml^jMcNAL8)Vg?N5a&v^B>$eT5xcJyTe+mxvE`1fYSH zGdhPUH9?|`sG7OjyncduMEAKo86M$WqpN2ey|ag|riCEYO{WFVX-4N=OHD}C^B=Rs zCAZPjXOXLHQ6$$^eVs6p2SQ#+Ec)U8pD8a|Mrl*8JE3R4y-s*A(9~C$d}2Qb-}LqN zl;G2$y6v=k{9nF(#T#Gus3`QPp2f?Bdd~UGG#kMQvSHz6#bA1PR*yA%v_rqDy|Eg1 zncgH`0?oimHgvVC3c7r-@;4pz#~W^zh0RGpLyR76)iE|xBI}r6ereU;{1o;~nv_8e zA8qxEd$*AE-n-o@W7JF5;kiKF=^;s4nt@&9gk5E_-dc*pB;ye5BUF@%Np4Uu_9F+( z_XF`ag7Neh6b}F@0F!@d1r5<+PRot%MR8+zGwD8in=DT#nrl{liHQNwNL=TDc{wK) z>wtI4`=RGqi6*Pcqr&;A+~eG-_3#|yuTk2H+&0cHABCiL1)P`mfGAkvl!o&#fkgoi zpC)1P16v1naHolr-i@Ke9$m|^yzEk|UY&S;Yq~MJ2%Csb+Dx~`uiC%<0W?s~dCU}u3~J3KK5y_~5exT_&HBbzc!elz9(SQ_Nd zLIRY-5Utqcr)7Y+K(KP7IS5H%g1>0~cxZp~2-#8$8$ky$z^~$N41YZ4eUfz^^(Ebc zWne3%e0@#YUpS3^$kRf!n%wXcBa@S%q^=MB!3ijs+S&tcN{{`^homCuoca^u#_(e- zCO@)#K&{MRnA)4gjROADTp(-;Ca#)S={)B$u&5u`_Ckm$(iOE() zkJEgl9vN8PkP9GCSD93@LOwY&PaW6N zf&}hJ)ZRd0ZX)+!g)tmoT7o@`;?%wqAHhRIyLVFFEY&Y(|KqjFZpw;frbwDRRRymE zhJwr#k?`HUKd*@2dfma}2TXGgNU}v$Jbs0VB(}?@pZYbg{~zgeK7g8(i@qWg+y4mI~1{EZg7w4&+Fy!p3;G3Bv%EN$CXJaj7fv zh=h8KYWVIGGe>`uY^Emw%U27)&z1I%zbAK@%)+~eS2OQc(g)NMv8kqhK01Bm7lIGxL`Sk%`PAHB9l zR7M37WV>=ZInZKO8I%%%twk{-N^yHYi;(YbR_Ap9Q|s2VipIvb*tS<@<>hs#iD*Db z3>`ZCSggz}nFo3dj%JB9t&Tb31!DNEwP%gA8yoY72@S-YHrh^SJl(c4WM+(Z~& zhE-{Uvr{}CptH0|3?bg!5PHL$p$66dW=Io^y0NcMnVyN)FAB7h#`YArD&hFi-DVfC zrm=MGhsmdVRF^~Tmiij@3gzDJ?%I6u_xz7=LP>{z%Kx49jMth7l|0}1&gV~eO8p9WK#|L`p; zBeRGo{8l_VyYtbZgzV{nxJKloh8oNkKalF38_<)EE$?0US2@3qRs17PcwQ)XmPgOs z%fYkQ{+^`%AoZqfcr9>YfvWtV;Q%!@G?QBR0K?mr_2GL+T!|HjKQr?Wm0RfOQ(AXl@SF4T)>$C0UHcE(Wh{brfNE3_IlS`QPM~ky}JIr-B_} z*Dx#gg~-!Za~ncXehVfvAJZRo(k$7tYEWXdXdeVstVR4o>i!52XM8Yku4EOZ^(RCF zFNADti~xzukhZ+V%SRslg;KWV3Tn-{dlZwICKn;@Jp0z+-gPPb!Vz%Em6cyx-ntCZ zVp!4=qFGIF{Kg-_Z0Y9xus*nT!dDkxJj}2?$rKwrldgCUpd4PkvRA?}R60o^qNFzW z=BB$@3albK|1D`1lGK$7i$P_r1V#+qCUP^{e;Gm7#mo_Y{jV0yf>|C)IQ}4a&+Xdq z@m;7L6)*>;quOynQyp(EL zH-bkDEDoD|Jg*N0MrVqJOZQ$9`2WD{AOrEsR%WkhQy8*7F-zca#*(I?cH+BgLF%8) z^GV^xcmnI?VEWcuB?%ZxQOfo2=3d?0Skgt;CG&lH@CnL}Ue^=7p6{O#Yo6SBk1*vWqMCLyP$p@}mI zk-b|^G?#KT`;oYJgbAyWd10Z!9;4UbRr4|PE^K8mv$rpzq@sG!3pRx&p=*HO%-0WY zYr-TsqM?6z9u$ z#Qvdk@V86j6L`Du2_E5zb#blx-nJ>9xB%UK(O=&^7Iu(bF&mSJp|`Hoe+z#9xJfB> zL<5478bxo5R8>`DlYbm&Ae=Tdro$fCdpVGjBdE%)$DhGzp*{>Af8Bz=_q;<*>%8&9 z0WJULvCP|P-8V+zV2DB#M)SgWpI}KyYUt>&|9};Xp)51pp6)Jo>RSoZ(>kGuBFwQ?ImcDJr{E!!uXNc{)>6$v`T(H@B|^t zXm^1bf{XF=Y{dH~d6hNp#|^oCIS{0h(4A1(6vb)JojFB{#{^Hkx&LawU{D#a0k~s} z^PNX-ueE4_?CRLiJyIyWuB}4xX6!+o!C!F>Q$m@UQ7S!xxV)kW)fQuGRKnTSt4w=* zwEw(}9i%KIy?ega0+S(JWy)Urs5|nIU~Z5U9A)}g0KfI);TVO- zpP8AN@uqKxIoAiatnzF0ACVN*b=UuTIkZv%8OyP}BdIao5>p<@cRwq4Sd!HFgXTXM zkd!bW9dye8$p9gNdB3@XY)PH8A=dI%xHzZJw<0Ip&hPB(3rvq5-E%)r17Vf28ea6hrTBon6yJJ6k%0`i=W9ITNitnK`i0D>KC)cwGUd~C?X?Ng4)vCRD4}Q0iJod z3<+=R+d)2owqUM$PCEY%SpU|~trCT(enqic&Ieuz`-%1kc&X+s46a#1fv+4ULo3>r z*$QY##aW!7_ReT&52TzeGhbbKGWQug?=j|EleC8H}P>>Lq$) z+V#1=i}u#sMoyhctC;h6+eQh#u&zth=pnM8Mxj{D%SMQl#dCjD+lF@;Y_1y`(ew%J%uljJxUGLbs5YB->g;g*320t5@%#~1_ zunVT{T<~$Tq2T-W&&!s4n%OSIczhMx-_o`)!+pEHpkEI!(i8m>;k;snK8#*P}V4)(L8NSiV4eb-6x&Jr2ICB?ZBO5q~f?q%3S&O>Y8nvq0HC& z^=Aie6Z0cHlS^C@9x4czHH%C}fnQEl`v=vpmpWBxX_)dt(c2Z*L5*W+3uB#Zsn9$Q znGlPrNUC-h9%0Kg+MB_Ck6isL*{#-B zK!f(eEFa39eejHwoZoUtj(@@_D9f2Fs8=rumK+C>_ z|Lig&D@~G1G<_L5{gR;>to1o~nZQ;#mGZUxYBdWl1oRWYj4r~5c_ddof7!<54f;B_ z?J61LgnWK7>su<|ibQCs{Iu{;|D;#@uGMYR>uz`b*2~x5<<;}#C2gzs7mFu~kVEV5%gR z*Ymu^M-Iz=^|y@afAZrc=dLkHnsu2fdvE=S(T{n2MM957@rVtTm+ZW=&2O~8;=YT* zJz5;!G~VqzV@?!yw)iQHY9-8=Xrto!kDAw>A-7u?Zn7!@ z1}i4(`5#NqH#6J<7QHsk^`dnT1esJ9pKZ{&M6OpH-uz}LQ7>ZDL*%fD4*&Jk`PX;! zVYJ(34YTUm5siM^@%flqI}+*VKx$tHW`Xll(o(8jxh%YTw`wiJp6x$V7D zT;K+0?PHEx83a)-Ms3!(II3zn0S5cs$ohd1if) z^DF0~o6g+_0vcX7Tn|H+p5P?gC-Mi2dYxg@FNY*3T>s~uIPaAUDme&Cv_$NJN{sTJU-Rek1(P!|woon^aKV$p9ke`ZW|Q<~s~&TBFpbDYf(xM$t0~WRHzsU`gkr6fHM%J8hceT_VUN zj(JPlrGjqWub6&kos|azHBTBiZgdmD&M&YU8NK4~dmJAGcip6Cns8s6oX>ZeBI|Sm z&i1@F7W%QV%@fPW^`eedn}LI}wiPb%D?#hPQ>V7@0tle&;p&6>Q*v?BRGde5T9UX` zZK~%s%!)kFNnRUqcJT}+Vy%C7d^{&Q_TF#VA=3WQ4}@|M$eiKk-b(pl`FTO#$~-teUv z60TYnkqw0Y^2o&dq;ufD$%ToSM66exD5Po$PKshsfR(aM45q2>OxTRridCJhBA+C9 zbq;W76e`{HTz}wt`oVO5PO&ZmkVHP$)U4WD@70agH8Iz;8J*ghJd|iZVRUEElD~!H zAIYLvB+nvoG~l&RO(f9@lviA##s~I(x0QAet!GId8&8ksx?iMv4~|u7D#wfYw^ZhGQ4#N-Yxa!zy_M2-RtZ`dimG4J7NfzNR>c_dw$^}--hCU zgxUDn|1p63p9EHxKDdjWN6WQN1xnWLS`U*0+toCy!t<;=hjh{k)@$au#1SsQXLardF+#tI{@S8QeG@{zGL}&iQnkXWeaCRZsr{cP*i*Jai#qR; zL)SFLE*y$lP@j$fsYDs2!nOui<5Erlb z>3On#i)>0{D0Iw|6RS~rLs1+{+QopYb|>(w*G$&PV)~@$*f0#y5?1x>ip2b-`dGt% zzL#c37kf$6DIh5bRHLR%2GqL$``KC=@V<-BJ>A+^=xy)c7^{fP?mq)G;b+C2cwVP% zOJ{k?<5-PAx>YaSh#r2YblvK5hxr*CM%CpjAJdoL@kTG|M@UH(VFASndpD*dD&Gary%$VBE>OjErxHk0@2zvsos>Ualwl_`Xz7zMP$ z&ab8!wA0A|fN-l((1`rr-Wx#&E@OuDt_Rr!r{CzvEc?61aGgr&y-r9SmcL{5&KlGx zov47J=;JGZUF31JzN<$s9VL2W!Z5S#=~6O^=1;WM|9hdKfTti;DDDWK@MO~^OY~^Z zF*POtpYR@fCyLj0id_|cvC{RP{P2#iLTsfdxduca9aGKLXSOF^-zG;M4!2$nx8%dsheXx}1h!Ek$ zpa!U?^6t8hit!s;=<*vxJy-C%s7^J#*}v@2OiW zx*}@8c>Ud~!AkY&OJ!x{YjVTiC%Ed*I5%rfe9)S1D89bNK;Wl-T(5C^w_6J)4@X{Y z3Tm(CACuH7HRDu)$wh$yc(_O!iwjh0L+mFvXau4H&K!SqC;#Yk-Vr(!01aLV{o0yK z`%z9ULz>w4|4SYK%Zd{I4XE-MTe`=Ib8)v`bzAwk)h*yBC)VuktnHK&<^O<|-Cz=( zUB1P0wy|g89;01e!C>SI^-$Lx#lM_<*1fya6fV(%QuYH}abeRQ(@hQ|SN-0EOOf!2 zj+Dr_P$XI2QOP{x@RCoYwt&0*(#&jSrny4KA+a4ZZ4>|*q9LVM8wNgfUtQpYd%#FC zn5>D+s|+)v_%TLBLnC`=qnn{!!P%E@Nz=fLM8a-D;CP# zS`%#RGt>AsfWv{fMjButSc>4JV`Bg7Y~Y!9A!2STbNbyb#9<)C4}Cij12E|m7uFo@;ivYf744m_twB>ht(RsO}Sug#}TDgxO zKh6Umbp9if>kEk1E-cnOuw3}NRJSFw_+~<@*F1&Xem&)Zj!id|Pk7@`cirGu4NP*V zv2J!A6<DousdlL8ERDTargy&F!~sP1nX*7SG$wM zWIzyLQ8lwTZoDz!nrbdwEVr3qJe*h&Nnb#p$S+S~VR<1kfGkPzBHU;dgT*bS!*V9W zY-@V)d3I(UFTfP&Q%szwt0WiTHbO8QbffKnc&L4N+Wj5{%PG?&0vbhnl7A09q%R{J zKSpr=!RC1H`VIgYkOI>R5&E7-@@kqH;ggxrVLcVCa;t5{b>)T%A=@eUqpn|GCzFQ< zm1S|}Vl-m!oFV~Q9z28?nG7)b7ln}5LXpPAaonJ5ys5B)SHd)RJ4RaPsZY0b;i z-;(TZb9jCzarQ-`qq`%nB*SG>uXvUi*~SXc_WdDUwqjfil0ZGNfGLK5u>@K4&o#(} z-j86C^ybD2eErXzj0>Qit^{O8%3 z3x?bUzF?8Kv>*SuEMGXKW*tDc(C>~ejQn$l@Zx*V$-!{iOOGG^^HB66qzTmov_Nmf zs~PAq`yLTkB!$8Ke~!OjsMC~(B;-t19i}mF{yE@x@jWIfut@C7KmNJ1a}kd5nt(_< z^l06|3bXIs1uQ(4I*X_T zNsVj`olljWtFUprBZ7Zj*JkXR&7oNK(u*R2FY%11`1o!PdE%`q=2EF=;5NS^Jf%1s z557QKF0$#xo+#q-Wl&xBfpy)nUPSIR$mlHebJazI0t)0a z^w12JnTJGgsM9&1Oy{@`9qc?<2D#CS31_r%=-%_SO+(bkId`w*2j|0S`+VSXX8#na z-n@0M6%;A0J3rHRFVCwupi8w@6An&b!L6%AQ#K~SqF-)k)2%jc(UlQ!Xgeu+$yv`XAP-nhJk^(|O31a9s~zL=5t zKtF7Jpkl~>dI+Snngd0nV8!nL{TkPn;)E^N_VPY1_$x7?tLymxrXWw}uw~`Ew^HsX zMnCY9M-sdDfQ)hqmfO$^3Lj0B(w(@KU9S31hjN{dqHxW4`nm$g1k=TR4>!W`(3jy#+}NR-x= z5U1*xRCq@2Xum>Y{gy?~(NW8$6=BeL)`9(m7#S#Kuga@6(I2%#m(dRv8M|=@T;W*` zZKdcwO`c3lET2|>cewfq1?PCUi>4ss&2@`Vo7`hwoac?eL1z{iZ+h*;T?V0q7`7mnMlor&Or@oop*4#upFvbBQ< z&^0POWqaO#Xt@8OPF!oN3MO7FJwo$u_W9R6yy(_Th9xHQ86bo0bE2cd_R*5Ahw7$t zkGMlC7JBkNT8q4LBYJyhpoD9TZ_<9wOcUnPt);=1n?(ZJzU)TV6Q)&r@R}4r46_ka zK4RV^)4p_mG8R4TO#0rOo=s8;?nx%PQyNmcCRHt?ZCh9L)v@^}_WF%nxoffY20rWJ zKktiZHu=RQc|GgP3q{oQNCj3*pFHoPcP--+-Hv^1TrlZuHjq@spmU4Sj0L3#CF{#c zNGjE9U(-tw*FjPP6bDUklMP6zmbblte((LJ#{YMl(3~{K9u;+M1dwb_D@)i!QEo= z{8M(d5$J;Y^9N~ggl1jh)*GcM4MrP$0*w}&B^{@Xn!}k)=UiOEHCl=8&ILN}T$>W( zg@t03^CsWXo3$q_=EHDn?hUfjt$wb)a(PR@#5KeBFe!YRF_Hx+)HwF>>X_~ZyBmaN zsBkTkbY~!}XyxO(H*qG2iCs+(!|-yBFuP%tIAoHzq&XIkrm7#Ih_90m+w zu6+GpS#?o_3~oDiz2vcA-Igt-@S)F&mpJ>yEn*EaYstt=T-T?x<(}JR8`KnCrfe@L z`ZX{-_iMg{Ya!%nb|C{+VH_e)DIUrh-ex<80Q!Ro1 zRW)x*%}H0T7l#O)YLY|5ZOPbV4y?x0Qw@YOE-NdmQBdubGB!{UAOZ`}lTF>Is3OZ1 zcHAHI?dF3ly}$Ku5};8GT;*q7=K*Q(D1oW7@6pHdft-3Z8g;*R=w0U$$&7uo#f(>{ z`xiD>9?A8$Q2i?#{-!L-dcu26+4>EwY5HK zMh``zDn>gvLAKT_7FExcjNYV=*}-V_iNXXiaZhx9c-n zZ);t}Uw@Imay|PoynqguC@HJ$h!v8J);2L;F^!)QUH@L20kVmu2Ng?tZ{@|0PGyh4 z#GHi!b1zvpm6L>_uB@51XpYa8K}Yp5(}iV3$Hb^-b%%XoM*DSa z!8hlW{~xyAJ1hytdmqln(#p)rr{Sb+WsY*?f=o+$%u$wGv0Nx_MH5siODjt&%Yi%1 zEtq?RWM<|-R1{ojW{Qe}iim)~%kv%Y?|rY&?;kGy;Ns!}&N=sepL6b0O-gYgC2sR0vrY>8+asW=nt;)MXnUCmox<~Znfc)HXH zTA<_3tCJBz7Mh&$FYZA)LHyb7+3J1PmzN{td%jAtRHOIC^*vR|YT-w`ulH*sSkv6_kX!gi6q@aL?cjTDf~gJ&`t}ZW!R5$jTFO)-jr!t*yQ-aoUD0 z$kz#0uA5fAEV_R^=$x|w7t9iH&js#_&`1QcC5j=d<%@5?w~;pq9TYVd3SA6F z2}QGFdsosXZtyk(=f1|WtM}U&F`rb0ve_V&o1&526&S2hsqw9j1Ncl`WwMv7SLvco z$YXsOI8dO;kd{{-rd3CBfi(gH=;(dE`E;$WCMFhF(Ov%otnSyOJ2g$=sH&-b5E=g>ydj{PaG= zTfu(+5@tZ_NO9z-kXqk3m5OQ0^+>XJitZfcovd2hnza%7QU}Q_9|bnk;h?-Sf*N03 zOukEOaS3R6TiLesPr+i48C4EJHhmEEr>L~aV*eBlx!`WbkOIVQJ0bAIX>gTsWagCd z3W6m1HF6$Ongh4RKU#}spEd2w)XF}D4UYRZnHuBkFF|G-VoMgrKcxn=3`&qJI*!F1 zZ8@sRaQWAmND>hHV=8sqi>@~uagXUdNPav8{SIr+O^l(#W;Z)D5d13koMkxpiU~#~ zwPch%P)}Y~g(Hh`05}NahUU?ORkL`%W6+dlL0l7CGb%^zJFkfGD?ykGBz{TAjV!$E z!EjrlKF$BCkg-K$tXDQ~La9?wm?Kyr7)3}1WDq=&+xnd`QyJ>vcvjo@n6rcIRx4rs z<9!P_%hlHWUWfyQ7u3f7r>v?-Fv6}7A{M0)ptas$oFoI36;zkojGR2bl6x&3wNBP< zrU|%#T8`)H##!_sO8s4+;n+KdM~mk9cJ&umJrzm*Wz@~4f8p}endRaaw$b^ z!;xjUiyw3XZ3#DSiNxicy@FABl&$Sd%QE(QnWcvOTWR@w`oPY z8TJC_>kpDC(bI(=+1gy?=mjx^WM1p+yw!!?%@!(JdMu{vcf&m*FQUqLl_CSYl;;af z1K+~`S;`k(AzDNX!J(M@^j(`Z?4clqaj!MAaBxK)tZO=pd+Q@HMk^Me=mq4NgQVux zc?%6$WqG~&?CGp<{;f~E_Y79gv`kb=KIS0y=rG|LJZFWX%2FD9JkLI92i?V)45o$| z6Ij4o6o0Psi1%ff4!<(Z&rP6h41zOwq*efm_r7csahl(U>l-r08w-AH);5t0-0YHK zfE`=B^c7Z73qeN!yGO?5a=t7f8H>TG_y42lUd=xt5+7b=V`*KJex`SwC&@-dO@bb& z6-BNGy}c{Devtjn_ENKX!#9pEi4a^-`rSaFMTR#holxJ9W`C zd5_gP;QewPB~AB{E_5?{z$nPek{RM^`b3Ws*?1aH#JU`9=8-Cfl;syY>XOXCN1q<1 z81UN+mDO6KmyLC={IW?=05acunu@9LzimvDQ*^}e>rG>U8PxTp{&yo^61N2%4ITIn zPwC8scoiJNW0Ix&btcaj*`h#7+#-PDN)+&;m?~yuXJ<#B0H_8)rtLLaxKXbQtomTn ziW;AIRM>Ye36uBSr)-pPV-{m6-cJlx-A{q;2%D z`+0%D%diss3!uMs(NK}^HZq+mmMGmo+~9_M1-5pJaFi4_p^&VA6dx4jO+P`ku(fPX zlohxP8To+WfjS{?KG+UQB_=w+b1YE44zhn4!B!6n9u)gfWS>mqDUstha-->U0})6w z@F}kInxU}%2>EHKI{7M}Po`ak26ubY0=|k_ut%g^5X=F zyc9o+X*6+0qJR$#mq&>!fHPZjL5_fLi_uYTR-&OPSU{l$Z~}NbikBZNgdNs%qYfU( zY14y@o-QNiznC(tFRP5TXVgdF>XQVy&9E)9vh-JA1XVid&Hx+wgoyYn_G{j7j&4j* z?AIzH*(qG7Iqc8ss1xhNpX=#h-x9w^D`k>FQpy4vf-py(JhWY4Hn~^(bS3ZiHE>X_ z5)W`>+nN^pdkEXoD!RCYoHkmrwKHvqw#Y5(WjUwy>i5kB8ImFdJ8Bh(DgVNr8i?L0)DMfRv6vqS^rGTvW8xHSi(LY)4i*4Bg8DE-G%fwZ%nF3rl}RIScb2 zczlDIfoiwe&m2~M%R9DlYYytZ|H1^uZ{Pc|pXQ5R(*qxLwTviUnu=m#9fkrIbbhBz zFzS(Y`Lm%p0-!sB!`V!kn~X3{Kbq1u_4>zVa2xmC0D$)$U!-G1UaJpXHFnXed$lWY`-@0G&>Pwxm?Y2zos(JzO?(H`>Um zNYF+u#(@y@V*2_bjXlI9RI|f^c<4L=zd;~&(+Wdvo=TkKI>Wl;$b`9C=SlcSZ*F1< z7yEepEDTO)!R+Z_c+p`Mv~8Y1luH*P*PCtHF;@I%r60o9`Ry}@8hP@&6M}-cM zEyogsVTLvOhD(1P;*wXm`EG{N`xfHw@sr?z&Q4Ik8sMpWI48O$p=|IcOb~y_p_n&B zsVo!?gUA6;3u_lgTI#EweO0T7jkTEd|1uQHSN-+aJm=6(u?RjbQ~q0Bu{1x)XS!Xj zqQ2L7{W}u2_9_RkFo-*g+gdM|v}-}q#mY#1B|2gd@~3d~W@+8I;eVZt;rf! zN=B*lH!t*vt}dBZu^voqa~g_GrI8Dty%mmEt}u@*XUK(sKh8g3U{r|JB>1QLznZOa z55a@Ohe~wn{iKxyG;8V$iQTM|VZRREYuBwZ3E{)Y)iT%WN!C$s2fbM_i=4a2sy(OE z)$JY)fb)1tL*obdo&zhixGgs{M&Hf6Elv<;mRKv=gX>*1HLfbNgGRlI%M>n9^j?sD z*||lHem1=hB@6^<*i|gN=qg6eU}S!5h-K20FVXD7W2^QX%;?jK?-ndcm%^g4Sc}Bp+#A`+}~paCaBj@znamjvl^cQYShVU2|UfJ-)z2L zq7m`mVeuCfX6WgLSq9L8P&?c83V>Aioe#5t)S) zLGrDk$0=P;AMmX)r<~)fX>|#5(#}o&Sa#DI1k6zDS8!srt!~A77UAzy71lnRYOTA` zPP8@}>Yo+vZ+f$Q)O^yU7$0y_mZHEy6Wy=)J!|56!4 zdV9qToFXvJLE7Z^E^vq_%@yA^i(gizjtVH}d^d-M9rxAA*d}QF1bC&D;ke#x1KQpq zuEf@~0sK~Gol_&inUtPjMN6)JYlonZQ#- z-wTbm+Q=0}`~l5^DTgJP3OBmD5ypE<;76*HnXdGX_2{aW*;h0kql0dcvBG{8N(=X< zvpZfxlXg>DVMnSsW3lPBsXGdgIc#t%SKyM$05h?jzfBZoqy9PBH>*uQ| zQBx$+mrt}MpEfUA5Q<4m{0+#EZ>{6}fTWsnt5SmT5$8pe!&0 zkM$kNK-P^}@ju!a3VT5=9)dMM-;_v8ov6&qn&)z_(;k^hO3I9ET#-bbjc`lN=2lx( zQN%ACy*oTNt2_z`d=1U;nc9#-#yx+AnQdr#T~qW!G zrClKzewUrVh2wtVh(dIm{$0bo1XGsDn@G-=dH)Uq-Rv7kXU_rua9kH^e0C!Vv9f{U zH*s=?Q8(P8I?r^hFdiw`EJ5u^+&6a&vAK|Q^% z%hvQcnphl7FhJf7$ytjZH?TROe7Ou?tWc%VyGcKjghz6#C*0VRaUwdfwQ;4Mm{`iy z)Is-B*+e9AAhF50@YQrct!_KI9@T6;4cahyMe?8Zzm${!Qa7O99s90`p=}2)i2F)c zm%X5E@b8zFJ;q~k3%YfYno<3HsAyk7GY^_ij|YonwVI}MzXW~JdppZE0v+sK7z{YY zaBBy%%^y7jS0h_bS4hYhYgE4`Wgn3~ZVvdqWJ>%T1@u5-CIzI^R~=ot=0O(WyqSM9 zESSs4`ZGtKYN)N~T1e_|U~~mV3v*3+wa+b~R5t+@%82hKHFc-umxkUCF9p|#&HOi2Y}>31gn6edG|4N}rYcQkq|6Lvkm z5u2v~W2v>rCZZX+pU>V3rt@5>(}3&ZsX@6Y($$4DVq62cCZ<0Vhb@{35P!ATmz66yKtKL_ME;2thuzL?Ev+x3x6G2-VRsn2)SH%3zfzt?v_rn;>-NXv zl8X=&j}lpl?eUHc_qk)?*>JZAwi-9{s?g#pLxv(66bZ5dn>J4!^$lM^zn+#6I@^`9 zyPw*f7$n$)Hsr+!K4gc|KSdsfS0UyLCRS+H7*#@6@nZ*YeafC^?0tdPO3GSE^CdYX z`lq}K*Z+|N9|<~h2dzrNH^`F-8o^51;uPkWqrCi$FHNe zx-;rB9r)tA*iK)S?CJp0@K$Jf79%X25;OBbk5anQ$`5(w;2xD_#QqJ992UkO6~gmn zY5$dsBPK+kQ`tB;6w^g$+j36G)w-h>$he*x7gC9hGz0fK+tD&r0J6g|Wz*u>arB?Z z|1Ozc6J3pLC0~qqq?u03MFtMcT5*%ll;Y{3KuU*4eZmLfc$Cck49Hwg@xHoj$-$9vZ18p*ruCTvU0 zyNuue>DxuR#uDYSApTuZ@jtBg3gCgEpu|AkqZ9&!D)f%jc5Q6*yU{a}c98$TgfgvpA6z>K^ZS4)XL|h@kU=-@X7u=!m2rcIf;_SYv6DT2bC0G z{B(KW2VpjHab#U2Ta10ag$HqZ|7c~eL=(>z-^h)+5m0?zlpXm1kxXZzWuJNNNZccW z%z>r!a{2xdYd`=fhWy%3>?%(8JTC3LTd}AGDt~#f`lcw^N3iNw41{7{KumKCe^PLo zB|?cmp0z~e$Y+cOetp}%RCprG~Sj=MPFnS==RfxBG?T<|BpA2;)^ zBaWA7M^xqj&%vw?^2lOUflg4jNXk#up!W__54&EE3+A&nQkev6>`}kRfamfNt8tq$ z2d_zef3-ypmwo_cuEbHnjoa*A$WMoq_Fbd=Io64a*2QGq&c$g!=dNoJ#?SU2m`C() zds@kFg1c^79i!C@9!PM>6NtSK3?@g)e+05$$x@1JWq*jE9^9D1{Ob7x>aG3MQGwbH zHXp(URTD3X%bSg`9mr?gWGNA3cB2cCY>Q@h3RtK7K+swCDPsMN6rApqN{*-NG)|HP z@*je}oaW@UZxhZ4j6ypEyr2ziK?~bWC@;tVGT)%HMxLJg+3q`b8*9$~_g2_j@y)4` zr*~x{l6J)g7LV>yG|!D{fk3F4RQEJ!P)kRDYFRVeF*74kcGb^Yk2fuTqt|7ZbTUz1# z*bVlnpi!am&$(>#c!a<=zG>ymhdBiHCEa|;_=&LDiam!*NtP01kc8Ot-}wzlbRjvM zauhjznSa~)1h}FFaMKNzH}X4;DPGQ1E}R;C6F3<*=KNiamd;@x7f@@*n{pSZx!!5? zGhO@Sj>I(Hd^kY%{m)q(`PxT_v}RFOsMRkx)qh#0RnH1Jnp0OP+!eSnlOUXnoHX4k zg5BulOCB0boD*!ODrZl5`2UUgHhi(zO|=+Rh=IZAa#kGm{Trj#?m3V0= zHP6uH%?v<@I#i{Dln8yaK!nT=UGWcE_Ssv-8qqK=U8WxQtJKxsc{^Yh%@Uc~QNjvc60LJQi zuPap&vrS9S%7udRJbO1<)(Ppz*U#xCfRI5yg0b!tDWS+p4-m6a*1jw-^}t3#td2V~ z7r{h916hyqgDCS_dVV_g>+N>doN%mFD&%~R`lDI%Z#(`zym-K*==SnA?!LS458T|d zsCiEO!1dFAi@kkw1FQ(o#fPm_O+$2mDlSG zUq&l_m*1Z2KYh(yn_6SWc6!>`8s9g2C3nZQT74&Ji;=H)OES8`nxJAzQ3I=?m#MF# zR=T%&s&c&cZ1X0ln{Q6MJ04)PAkHC|q_DgmN}+Dh`sRMX_jMxN6!Fc5a*PxUs(e?+ z_V!C^_)w%O#Y!yf`EY9|Udtz3v zR8-X`nvX;UxiYfigLFe1wx?|0@mK6GNAb-k25Aw9*;CtEb z0NbOTsKBC4|BQ?Uk7pfLe>AYo`poed*>`M65;gvJh^g9jdVUKy+hR=Q(sXpr-VP9FM5I2!u>4ccPvIp z9(!EoQ+A@NGe#!2&S~+&hu~_|!d|yxt-vDk-IYeK(L|SErURY8T0FAZBy;rY6^j3q zV=dJ0c1H*vNITl>ye(&&e0N8Iy5XewW3%LDGkyg2v2NEWt-lkP_^d-6rjwc-S zRB^F#!QCf(>szk5UpSJ@<0oc?r)=bR_w6je+m2M%yztk*Ba^xO?9-YCN>cGf8>o;I zqgfmDOzM?p?ba4xpLy-RPY;*N1VQ1m78213Xa+e60Wg|MLuW!xBH z(BfW%0*vGHCLZLWHpDwK$x*Ee3`;0^FmY1J0bW%=R&?&mKj0gi?SOZUu^TaUdRiaU zH{iQm{G)zi-|i(M|9n?rlw?43;S2vusRVyUU7 znJF9c_~wgpyo*By3!2O;$tsU*YeQ(T^0bHIyIG0a+<194!R)F3jcvrteL?2qVgXA^ z>xEmOx*o~%+!XCMq@U9L5NT#WzH;_OoV5XS4}9cm?FABVy1U27KLjwy+o$I9E6{fS zj*@vTUTy17c&p#E63N(IU3H>-3gkn`wbJg*JA(Xr(>bs8g}W-np6RalQ#6-;*|*0F zlgJ6~Z2*|XH`hZ_f5DMuonO$yao8K%c^^JmV-&toBo`i<%pIix8(TiiOr4kC9VGb_ zrw`5rM3y!?!~)7v-GejRfBU2s&FduftD<-hln6#DneEr+L$k_xw~3e109y-KVh#+| z%Cm`ASBlaK`GM33pfkl{l~eb6Tb?(uxKz7_dTjmwi~e0h(8IA9j9cUFh5%tZcN^5% zHg(QJr(gn0xEcliu5cAX&vKkjIbk?Zs!3HK0QKU3M>M=e+r92gh&T&xd+BZ52Y7QO z(=Ig{t3=?qntW{DZ~5c1{1ZmSbGAEPPoa^#ZT+=u7_fD>A}kdWIhp_J>7e5k<`CJD zasOQ?@KUmXPIQuNXyZhR-DThIi_g}nso^h=)Btub#veeAwK9&j z-u!~Wxy=Lq9s&gKD2UUq$~5r7Zbtqz;vY6Qc(j^IXnWs>&PfM68Zhh0n*KFxFww&P#t{jj#w7Wc5d}wD;xN-Dd{FU#qLQ-ege`O*^h>t)8x8 zTQ(g}44L>dToU$g2+j*y)`z4%b$Kdf-`g9l1K3323v4njn{L=7No;{ex$Zo!MePs& z!SEE_#?b<1L2*LP)juyVeeXh3izjBa{cJ6lD6!{u51=h(r@vhXztp9ccLo$YW;v03 zPSkHJ+|S%Z0U~zXQGc;Wb|sF+6L9!|LfkOTIYH{GU)KtIj|2%U&JJlC^tlEZb=ew` zI|s8{Q`ZE~BhaC)hxpkM^Rjf<*Eg5ee@rkY;_;OM#<_B*0cx%Ru#Vao)4xqZK`ZcN!5%`cML`7ZN zrw*N_F!W)svd)0y`Y7SiKUf*iXBwVyYMWF=CfbrjQl`@s?a(Y+F&oE@~OB~C5tL!zZ@4?eaWS5yk~o!@$E$D%?;OZhE%=gx-&8dfbLWf^@G>!y3@RcCGQjvcO_0h{Qnvd?5ebc?QI)rr-%#Y;E zGx|Iut}@Vf3S9l7t}(TWoC?S9P|=2y_k9v2PAoK8U((`aNUZG?mBM}O|0#u&>p5`d z6`-@&(_k#GU_(K>$Nh}fl^d$>J_GuqpSF@gcgq{Su%gpwQYe01cUSvxc5b8E_k_$Us6QYA*5Xc{4mTqwB*+4*p`4HfE zU$po81!_7myMay^v%As4{w#=9Ud$zIe>oSp($P+88-CZbY;&Q7rl)u%81Z&YiguQ06t#C&4@ zTOUbc*#O*`S-9a_W^+(Hr|q&9Vo9^)bXcVUb2l5cfOT%-7_d+|%n|+I?ikfaW8Ea$ zJ}6~bnZQ$*moko<h&(%dO~6#{%ZlnFT-A6b>vH0D=lt+f~8Bc>)QnYOsA z7|K>0VvHNo7XT-qU;WPGglLJdu2%T9ro^{1M-H!uXXP*I1&^KetSnvw3O1yoRy}1X zD&%`DlPfYyR}S&iL^J?Funfsd5kyh$4K4rG{!+{n$ z?!Pj8F>FQto>|rz()`JKh`l%dxc?C?+s?k7-pPd&pLs)-<3n;?Uw~n#2ayT-7`NOQ z`wg*%G#}}ztDUI!$1nT0=0$w2#%o0@yjc+R-(ic&xl?=Be+~5A=0}_+^&s7u%y_Vw zMQzES#VeARbax?e9Ind@ey)3dd1vy@ z=%-n1_ud(_MBs8eSB|r6m-FcptHW3=r4H&nEeqAWIVb;mg#)$aQbvMnYyV!lGQw!O zKCT;_pA&Q`#V`==l;yGi!n}U>zT>z4Ono&<=Vl$$C7+nYe$clrqNmA3h9W1o0to8% zZzl54FMPI#F2Z~+v72Qbz4u48=y=XUgOz%2REg!lt1xFih`?4ZyDxBWdZ*!A+QwQc z_(QsT?&3hxmVmLMVB@h=HZmSe@iW&4V#w%^vxBG#1_$4 zwoOv48-~1GgH^(S4tWrC$02?DQ!DXalNHxdIQ?ajvD&++DKJE+2^T&V-^`!%%{g!P zV}F==(vOpG5gA_DHXF*G-;?Yn*$q>k`x<~9IrG0MuM5QgT{-%9k?;RJT-m>C_wiQ` zN>KQj#dzu1tFhnqs5~sbrTu82uQFe2e-A2LTYNVk{4-f`*yZJ6wcnR4D!O8t^x;2= zcj_z(-rVdvE2UHWi)aHpblJM;B?K_~G|Qcam)te}G)?!@;WEV+{Iq?%kO>dvM(c#; z{Ll{LKbMh%7&*-i?0p~b9#pzhy7FJUW%cI|v9#2lw4_AtC_vj`d=qXH`U2``6?-pG3VLCXdjtyfmN<YDi-M&!j` zx8r%wB~E<0*Ee^TdF*Ax|5rvj^FKjb-|ZM^V4sv*ft>toc_da)PXRsf&@fcZ6jeG9 z^Pxl~%vQkN^lW2Lb9*y=L+RVv&cc#~V z48|4L=NDw-7IaqJ*mVsUCM-j)GDt*ms-5f;Iuyrh{Ypw8J{Be2K}_zH);o;K6w_Us znqY<|;(cmz0y5g}v0b}37+4=+!~*p7RwwT^S!*yhw-Ql)3Kr}uur+NI;vK*d3_Dru zl&DssX(#o0orIIH#t( zu$(Kys`VWWx9Qwgf;ijn`@ih44`uBTdwP5Mt%J{(vxPsf0V!p2Cw;8sX7(K4h6%-@ zn_K{c8ZYXgZ7F;|Af^VyecErP1E(pcr z!i`FJ7R!2W_8&L;#)!3X1n zK{6Hgb3TSruPL&fxQzo>Gk#u5F9kcBiHheyj7O;qE<5qbOigcw$z_zcWi2Qhb*ieoGp%BA!=B@P$8%uQUD5{LXKco(Ix3saR z;YEovFTQ{Zqqj049#5NJiBzgLx_J4A{S^Ok=GX~(ysz{VlWw6c zKT(Xy{_vHJB$h}FXfqc9BNH)cpX%!7()6o0rQSiT0f0UG`Ww*4Up{ zrXj~Wfr7e4?fT0=8)1W&2<+^hESwl3^r)w)O`I3=1fc)<(E`Ywm_b|oFS*qOb=)}& zK|W0Ch+3=I+?u=6#2X04Q=dkJn zjB9mee!gxWrT<4LHfcOHMT-b;$*0*F##qsAEtbHeu4B%-L@oH}$l!?RP2mCULE4I0KegrBJVNyz`+afZ*a}X9 z1xWOZB|!RB$Ag&6J0+J0y9OOI7%v=gNUM2{8FFKE{V|%Z(-OBylabKwZMNG0!dqCL z#Wq0RP>Vy0Yxy(Ir3@_~=06tj9$keWJ^o1ZrSZl{qh-~O_7ch^osb(@ngaH8{Q8H( zwtC)NS9VgE(s@xo0D1gg_o2>ZAEx#W^Q`y7;2=Z%?84>b8#@Yw?T>+VevXX^IdXG# z#IWZ+)#G*EtuYUb54bemx3kbf;u(yKG`%3w$Wi|9ySOcUs$qJ7@O~oJczh>1O(ieR z{YcDuGo?zXUHS{9s!=a~LCEC=(@eO^9G~vJPE89^gY&y|AXb)jG68knC0f>m^XbmL zhK-|Ro8t50l5wdTIH!}S&~JO*_)j$@8zT|(_XSEX_{B-q!@$0!iHY%TfjK3Ar;?4< zj}PbKlDDBSB$IO4rncW;QUvMZgyPZC>QzoG-Sc-f^H>$ai!}%WAu^=OXV=d>jT9xb zk3_#U9tUtvQQq=<%+KP-hvhwH-kSjqEyescp5Ji1IZPN!&L@k~PbtmYCm@UzzV$Vi z#49N!(%MjX4ZlyhD)J+(mV1xP_g86eN9W18?Jlo)?x@)3(;gZLpWBJ_n7<0S?lwn zscfMA!1>Q{k&6+heppA={_qp(IJo)y77gd0r!dC1^V7`wbvF0?CHfW#Yzwic+|}mA z*pw#n0j|eCAmZMmeZi8`i@pWS*`2Ue}^j*EaBD0rN=3MejA>`BS_lcO#))k#N zx^u|{+GNwDGhqF8V#UaagX_i2%icAFC?DD9ErQtuHgl&P%IbFcGavF06kzNdci$}w zqWRXTuhP~hwuufQcnemNqw=1)q?|e|a+VSnE1=UZpRpVzfn)eN2qRjEF~LaiQ@p=) z6MA{mw2s`8yNd_!`*lgFh`KzX3f4eLn~O`~8sj30#KdIk{7-W|kE*?>M0^otrrcK9 zSft_$!cB!sFOfdPhW>>aRIqN!E@Orc^zK9(V*qhRI^VG^?6p?Xo3pP@#nPAbPYy!Q zNFPIcdrR)5fB0W703}^QJJ-nt-50%y9n-+k(*ejyZ@nzE(~-nY#)<2Dk5(fiirAk} z-F2>+@51cSTFbQRGnNiLe-^_he<|^^2a2B;stKTaIo9!-^xDfzgSkW{!?|3al^l28 zJ&cS__-C9V9uW^=nBx%&LhJ`5;u{O37g6~I$gmCX=)VHb9%)vsdx5~ZLnq5q5rmSi z+kEGHD`)kE9BMwlnd-$EiS|pgl71fbQd#Rm1%OV@&y|mwdXN{dZEIco!BB30j$dE? zi4yfmsL_APcr9NZVuRvFegoR-!e)LX}tSS*LNX$c{!hM#%>nI$_2~*c#$jWs}@X@PiwjQ?t(6|YUp5oXl6K&I3qaT zY=&u+)z71G%@9gQ7@x~~cROr%-8W+Un=l0)?I-_BnKZt=wjD+?c2V!X&xeGtP%Qh^ zCElhkEdm~_#r`lzmmz-qY_XwyHj|nvoz*ZpeO-V0;F8YRa}(_3tC!qUSNJB+>?D8` zydWk*G_3mnCn^8Zm1G?;mHnseie;Cn*_oAT(X8l_h5^Kr!*{)78C#eI%$*!^ypRPj zDxJBWSaVQGN+D3<>-Tpf){Rz;;MzY6>9d3HMM4Oy4F$7A7ARTi%Zoj2apxXz&C;^0 zSUCDD_!e?d3kBNhP%IU@A-#0Bu|gYCbgva1G=DPO2pN?%FI|*y_)Y$qaY^0z4hi9Q zcAWI2BJbMt*W%3V&}{NuhMuqfi%~=7Zum&z@z*ZKe*`rK5XIVl;TubWv{LKl5$Sg~ zD_r4&oFU$ej?9nSV93A9^TQN<%9I)ZWag)IQHJ-)2*Jy zOmyHUJdQB6hRgCijra9L_H^%wja>Y)Ez_GVGBe1$bH>YdT4lLhlxOCR@bpt_9Od(m zY{{L%<9Jt`NWKu?ElbfwYG$I7e5wP;#V@eVe$wQ(eaH7BK57cLO)BOg0?93bk1xy> zX=~H8V{drd+m;dQ_(M6%Ee1hNKKEf65)+fc0oji-rKb+ZE{sn5w>Z}JWM0quy{rd$ z?{s)5Fqi#6RJ!L}(_Y0MsaY4^Q(WBfEreE(cOULP2sN%f?FlkNR6*7CN_k~%@s!GH zlaBiSz8z^lTg;hq2Su!8rP*+ho!A^##?9X6yNuDRy=>T#O}JJv9X%3Ymcll8e}AA% z3(Vqnb`Q@Ju9nVT=T2H}INzjV@{Hv-epSYko~fd&xXa&mnfBM4%+ezLWLpGG*MTOV z9J%~LR%{C>K(aVvHlynaDUo+6DO#EM_2&^bNym3KGK+dz)YksOmKdB$IeY4nt+>mE z`B|T6*l*45=y2?9`1fQh2|*w3m*QMiHn}n zN=Xc%O863gH(QfE^W$nGMH4y^`hp$Ts+d9xD_Zu)JX3g1#hF}op;n#+=EZV2@|XJ; z5nTE=Ob_}YMBngXXvk;W=`d{00>>zgR{bzpWbw@9$#U9yKpG{}0XHaRQgD{wpEp%<&S7(v{AHMu2T(GPPR2G@;R8j~!F?)* zDo(h(k+iiGH5q_B=-k*Hzp07babMV8NLl=5eC|@#39a}Yea|w%k2vWwuNwP$G6sq8 z>P1B(Yb>+eN!Q;IJWgumS(o(0jr@|qua@*`0_+z}yGi(^3Ea=9vje~K-_90~${A!E z2rDEC7sU%cUf(+w4|LtitWS}cpir~j9B)Vivpf5;?X%3HVVf?wuaf6OC_(tMzk|8` zie1artT?Uo!x*f7N!K}a5O@rj- z;GFy1^2|Ltwi}|(Sy%;2%1aC3BUWzP)}<+6T6=QLwY%Dq6&Ftw>`$R)5EtuC zcQ6LBzX)}hrGgr863FFFT^HiAvaNqYzadRK%0YcHUM^~9g{juKA2G(-?#{fX^wYtw zCP<=h{}t|;*mRXV={=Lg@ca1k#vWr>`Aynp%2>OBv#$v3%i5370Q>)W#lD|jtFE9M z^f`+?l2~Uw$kK2h_WyyMPPP8PmnVqe2_eybMY%TRlA8s)U&c8TufBHC0WlsG*Mr~w zE!=k*P#xJY1(K=^$Oj;~&wQ0ztfXnfZwz!yYe@^wQEn!HCkBtRxML-F#k`pHo9}+= z4>;|SzJb`}+|Ioi#tkW6w<7;TD`w>kE|VYj>*UI9!SC=nxgwbX-H!mooix2+z3OE% z2*)2<4{1%?j6WYpzmo*|*^b`F)OZ_6Re=rKX_Tj>E?3kOb`#Bd8A5`SUfo5^`KDqtND46wZ|F;ao7bPY3$or1xmX zO;Y{0tj&uX5A*ozKi_yan{*5jerwyiMZ4Ecc9pG2Hj56s0Z^)Op zWg+GqRGEvOHmpX;F8QmavYlb-mJxZJ#`Z9x8eggI1L(Y?@~8Q$@oJ)*Ouo)k9TDA16OHCH&$#P&-2Ww|A>oc$LKH(w8t!Z? zo07kZ(9FsIMl;d>qM7ax-tp2YSAeiS6B)z5KUzH~Yl?oYlawi84O$I};>BG!_vl}a zrLOW8ddAmS=juP=)gAoZ78vQErul#TJa_$pXpOg6bAj5mM+})m4pL8qc{ ziF7MiUX%Ve{{%`T{l`isht#$$^5-|w-8Cg!DdxI&ya#qvO*ItY3wRNn| zn;a@Q_UIMV6iyEf)>>s+*V;$X^uMMW6Ek1v%1snIi5nKy`HT)9g6=4=mhFuu#Aw_Q zZJ1F-sfrd68@y572HuKhw3%vg7&sr-3(` zQ%zy6Sw~7>g+6}M!CmWd5yh4?ovB#m72j~@Uh>R~Y|OUUQ@E=!b28)-10Nv=w_X@3 zd+iGG?vQdg)coI?jBGE=(WuM6DUie!iK84U26S?O(AWu%qXE3wG*%F;nEl)*els0% z@lbPOXj*z7^M#M&2rsZ?VQsUgsXY>^AnhDc4WhENEEy)>c7+-e*I)i0cc}k;R^%(D zBC*^=xSCO!lRxII8(=WJ4+9(}#M-01@Z&itaW~7Yb}(w&iZp_BKD&JP|7K@*t+=s< z%sm;~QCwBi-P2bJeoE_r^Dnz*?SFN5<3vf^iPFs2y-xUkug}HO4jV^l~j4WS2mjK)yMoA zjO)#*&uXqk>3Dlt*dL1ef7pBPaJaj!ZCD8*Y9a^;ks=~mqPIb!MHgZiHAL^dGlUS4 zAbK4o7@~~cOG0#`8@;!vqmSlCAwr;4bhx3y;FOXB92?er@0in24;9pxT>AC6#UtVePAbwzTcg+ zul_ls8oK*=#|Q3CVg^V0f)%?uvmLO|LuQ+e;a~1`J+@gGVR)1)j_p6XYp=yJXU8px zo58E45NRH<3O(6^C%)EEH!X<4AIntRjW-;`tj;ebV@5AW#&1wEVZj%gu{ z+zf@E{`Y%*$qHSl3?EWn&E5I7f7^L<2#7%raTO+x0ba?2RZW>tRglf{-Z8rVT%S)N z7gOP`myU5gOB*TQa~-L42&FqfAPdY@)66DtkCq&AcbV*M!d9#RBHjanZ5E^B`SfM* zz-f*5obj`6Pg-%drJ)}SzqpG1NUyhNp{PlMKWOk=K^)wp)6 z1Bhg)MSmq76MMuKM_&!cKM}iU>UDAbb^%KC^Qu?L26KcRVv$dc0u=MZ#oV1OjB6=d z9Nt#EgG&F<$mFKeTjta^jktWfF5b-8ZFKwxaEfJmEtx8%A3dynv>owrw>>J zZL>67`%Fb}h2ev{fz-v+vyi9QK)Uco zIZzJARh`W^A#9aqxf=!#{mh2Ek?;&lzj?UZ(&z*S$cbbCh=#KGK#)S+nRDEPXBAgD zJlRu%JzryTd4VSh2+PJQx)A5Q#ln%H&?sE-h&%uGtSy`;*t5j8aIGMocrXp=ubBLp zd*y+4MA@@W0^fTKVpua*#f-216*mIgcETivbte$#qMMnIbjsh-P__olx(k-vWrJoV z#aFVQPvKl5&qSX#j?#JgUIed4KcdMXpE@|QAA+i#h_m5$PYHolo$V7^0K-b!C|!?h z@%mh*--X3Z4i$ZRS$&!r=KhGBpZ*d7gE0O6-Le1H9RKf1o541hfLzQqca0f!F|?2W z84iVB9Ag9=87+E#(xGdR`sOfw&I_60IT9{yN}?gpiRUiN)!P2hEi0%xaIEyYRe01J zESx|*?lJR@%hm53UgVA>Ze)4-UBvf`jFIlH5>>~-c^?+E57$e|+i*aA-%8KYkL%_|vlg6k!x&zy*#W7TcLK;oU!u=4Z$C zumAJW^TB#nPjLEE5&KIMz`uXAAr^3lxV|bHQ~b4@{Lkz9Gsmz=L@xG&I`t>jK|ep5 z|JpiY;2!AX54Ty~{Y&eX8 zSgCscKgn1Amlyu&Pfi~|6Vtq6Dh~R+12_Nz%Dcfmp?^P+|8XDw{?TIi>J^H<~Zp8xR*w`nj^TAIn(BGHzll4ynY<&;MGtu8WYZ@^htOnwOh!Kh3 zI5XfV{&U&>ykEZmxom$Cy#HLbf4Q#zxom&FG=QD>k7fH8Q~Ce8WoxdC5IvjcayyKo zDRLZ<%?9uBW@}Xk%p)VlxXO8^5D{?qy&V9J55MrcFBi>vXYVD@fc^w$3ZcUhktrYJ zvIKoN^s?&N%3wCLQAgwlXn=gsPmJNe!nHqM^3{$V07SQhT=&Pz(~l-dK&n(6;FopB z+xG+(A|ob_l&HMefyt=VV`#^dI-B4`*V!ON4g^8E_H-yPB*3O#>rq0vW${^Adk7Fx z`@q!|*6{Yq^O**`g*R6g?tAQ8GV6uDWD_gT)^3vB2Bz#)1@~ZvV}chcB!0Z_@%`0B z`lvHBULqI7mYPI{)*+0F1NzF9Ts+4ffZQTtz-+NcTEgtwAJn{6v?p@3+cMkJ+|+>WHwl)+PbpmI{U&c z*CFu2NBJM%)7=HJWHk}vBrr@Fm?ROcihne=&UdWoiOV{D1)guJhcFaX$5sv?1#$zfhP|8 zH|^Een_+IbkUEW$5mVVLAAD_8_LTf$eLaKYt4JQo_LxsJsx5Mb4qYMk@8?5M`LES! zxE&*f`k&zuwPu!1Rh*sy0^%a$mP0s~RP9{t$NhCTp<9&Sd~7f@R#jJV%)$@Z#I0TJj-#OFm`i~78(z9VQ03f%m0L7Oo6=U$CP;9hv6M`;n zc$DoUNYV4HmgFMCWqY?%q)Ylxe>WaJ#SyNF-(tdhK`U$R1UeU4Zh#T1_!(96Z`La2 zh(jadvx%%kw6gK(+)e_2cnt{bJ&o92j_M<-!;@eHNSqK@&@c$N>QQ}^`H?^l$qw-V z@telL{TTecjs1ZH5E^<=IFLUi@4PQ!nwRzHg5xJRw$=*w-cEd3s)Gn}-6nK}=P zfg>Yr>L3Q_Znd1D_GNgoF+#fE1N|w%rlyOI4;BJXrn|3GwMQJcTj>p2_;#%6KXA;l zSi4gAm{nH9tbJP(*lAPD z)bO!H!u_dgm*hxxCs-t#s%(2S8>3p@Vq1T_G(V9}e*V}82x6q=?Aj`SFUFxO5nhF! z8+$Z$!)#B2OR|n%|2=C#e1@(~k_WHVr1|Y7JfMQF;VqFYFjcuAL3wBg7w^vs`cLlh zpY*z~2H=^0xaVU#n&Xh}d}>U;RGj;&f=)}_ePxz}!4vI==ZrPjte_>l!{x!9qg~FP zl>WLJ70zP;b_kou!+w#sv1*8C>qX7kncG9`pd@nkP8e0`=Lr3R*!?lsearIlqxI1; zO+}Hz`%1O_JElYQedXumKnM&}{|s{IzIE(ejz4LpJT^@Yq$6IEdQ{mZERM=Qd8R0| z{_=2&jc5KI%d3@S=vfeVkje0()0FHtoI{WkE&oXRA@26f8zterGnf=+Ale)3fT|)r zv+v8sTL4nb?Q=>YQ}8O6XzXsX2wy*G|4V#=_so5UglrVcaX~E~5?E{VjNg=7k8Kc> zx>*g@e5!eTFcNlT%EDeZ+Z=Y9H&(2V90~_{lw}z^%oy=$_(sct=FB(8_dm)le5WA0 z?7u?bO--U=X=Wwf!AocKtw9pz77Joa;0a&4yu010r1MBeujz{Ygk=bA;qmzFQtOIQ z^2prUc&XMl)gwC2vv35JhOZB!_)4k4*#7M`r$L6X=aQJYchpB6>*bTzow@6qaIF%8 z@vZBGoiZK0bK!6S<8%4lVGq0AS5P2n*{U@kCQD^%&g6QZW~p1F z^TzIeq~+|qEtd67i*3{~Ob@#EH);lR-+$4^BD6d zCKbd#VK>qI_Wl@yF6UZ&+xb;JRpD;8hgbZ~;R@#oeC8L6DpMMtJSYem1}qaWn!Xvm zYvLws@!VgS?gf&B$;WXDsBPp-J>ybZL`G zr?fBEO;86jQ*Cb78O%8_U;atD&!4W>KdvD|4WPf59|B1i0beEu84PVGA8Ck{#OWbp z%gpyMuo>F5GS`_gm9011)~Cqw!AO^>9p1t<;sIkt3|e%WP}Uwuzqn<^evou{Tk!K3 zB2SAuI@RyUeWgUYxS5jOxel(IzrB%1i2yK^V#0P_)_6BnmdY@k@Rpm|#A|wSO?l9jb9gy3rbDeUk(*%y`!Z9EpRT+=*b-s#6isJ&3#h zFia_)H{V7{)al4F+aZ+CC*L;pP0`jdVtr<@zHp97t~h!fuwV9LNKTVxiR%2AK|`DJ zjmfolF5}+8!u`i~z0({dyaTVKklYofTQNmFnlawp!e)H>YuqF?E@+YJ>{s2-a$(Mx z^6Ps~f>CBZ>Cw#!=m^4y%qqX6%=p`0K=bKJ!B>0nrS=|!RcYTW6nhsVTp$o~r-{UC z*KcKZWIL&=RZfgf0yW!(xK~^1cZ+Hs&q|tbV$`@$%THcbgL#eQvHZ*^dBuZ9c6< zmW5?{H##Ao8rcAcJQh9pLE-kJmubz*N)a(uWjz?PFVg?2?oMO#uwVVDb#`a$UX{QX zFZl!98$5IRV_=1~Ed#TL3bipsPQbP3J}H$BO*Th)unonTXvX2zN9Iqsw}lEhyEpfH zm76c$6*%2ZgdG6@W9G;L5iLcs`aDi$X(Y2>H;L_@loA2qS^wO|5}n0_8$R9?6|?s9 z%z9yPy9Rc_NKul(ZHFpvJ@36F){}XzY~6L|-p-KgzbH}Im(nO(H?jTAp8n14x!i4< zO~5eVQ%OqZw8LsL9($*+z?)4eX}_cneTRj>tVTvwzBjay9Gcu zH!Ld`S6GCdKDh%n{H~;&_6^SV_=Vs`d~ff)Js117C_cA0(;HSA>dhZKMGQqoJMC%% z*g$)<44E{ylK?r`Z%&NK*hcd4f383*CAy>@B(UVWOB=-ruyMc2J683|kVbfKlqj}L zCasQ(J%_XBsV9YYb?Z6X=+Rv>UB2Bim5i{B3tL3+^f=E}=)+$4X-~RaeHr(hbAoM^ zEJa8rcMJ|q(5p-V9_I2u-AA}Ct|$-I0BihPhyGEU&PPfvCD{IQ(fMKTV|3ANU$eQ~ zGcgp~%7JAtw+oEQUUdRZdzx#!y!pyPsAn*jsOSdBAjPz}<*ctrLAuvR3EKNrjpY3R zr{{pHnZvm_>_wG}hXy@*v`tl-?3bO1po{oSz~rYZ47uJe;&C7hn~N29*mK~_Wk84> zs970B&ufPA`kf}r^n-qEmz1mytKpN-Esze8BE|Tvcc-)bMrR2bh_5sg#{}6lHX~JX z=OtlIyL`ttuiv6#SI&3K+2=GKzu$e*fJg3aY4ovrF+fOo&b%9T8|80UK}6H0-dorI zdix~o6HF7CcCdsP6L#v|eOd8?w%X0aK^yt-AaiiM9AaSCyUYz(3qqtyjtu zj1e%}In-S{UXlx9ty3>?0ji2eH|pvqK65%AXeia<_nC|3K@18BhQ@b8wZOw8%uaP& zCiQn*2CXgG8@fibM?i8>tTWm0KL&XHdrbHbqlM+`3WP-D#W$Ygk(=){b*SGWy-~9k zPP8D&(!-wRSusr^M3gGfNgT!k)o09cn{C{&Z?2x7ecxB+q*}c^k@AY!q$Z;S{2u>P zoaE3_KVBg2Ex&jTtNv|Wn(Y&=4a4{(<>Ko{asUC4Vdp2ycX5fXoYzwcxPo%Fm|r@@eCtW4cCDNe~NwCjM1~?^E z)v=nrcEz~&!P`d4J+vVYT}~XKpR6#pfBCp~c@5WjW6Fr68lR)Gp4miNp^!D}H+^Xq zALWGagPC@x3u7E2k=qNh$AI}*eG&^857=U|)BHYJN%)zUhihhVlO)qEA@3q?i;*s_ z&|>=}W}&kD-HWX*u#eK*-mkvx8jX1lKh986ve}vGA^v)mrlKlbpeKJh(em{PBwj#qLUt$1#B8$v-atk93FOvrl zg$ElYc@Ni&lWg=aH!FEIkv=@$>E3)A6fcw4ac@0O)J8c*QS_)Os4vaw^15ROKsK&* z`fAz%8o4b-&uf*me_E2nU^nI>_Wy-vgV3xBB?>c6>u0rHih8&~bG^$Lx!t5vqw%7< zLQ!c;g}MXqwi72~^-5)QtTcz13oixI0SVMYldf7DC5fjCrI3iv)>WEtopSHbiCRb3 z&MoKTCY4&n!71=Rv_M1NoQ+4hE6%hE(6nVNbIg>AnF&By4A!+^d36Sufy1%KHvHe6&@km3+P&)g7+ zaR`awq)3!cqJUCFviFIIAva0g>WGJrsTeq2tO1O?*+%4-yeEGS@S3u|X$ywuXSmj0 zAI-}{|<(zNbnO-hZB=6^}x=5gujLQPRjlbKxdh z7=?){L&uuajmyr(Sr&DAOL02NnSw}9=fTo)w`Pyk(rnIe=D@hD4n)=7=%o@~JUY3N zN{jN{bv}(FGS6>Z5P+MS|Jt1(p;!8yLTorgOUdnM zWG3BqHVe~nRcXYA^E_@Z@9jFkoGQ*nbBxYvXkdbHliA4SzRgKqMZ{^msGjafm3|PM zohle;sg;Cx20(I7@7a#N3+H@D4S-O6gNEe`gi`Aer25yT3 zJJcHYGBA>WIXgB}PcM|Iv6@O1k5IDlGM8M0)Slt)J743d37vccMo67OlorC*bo6>& z9JW>8!Hi0|Gl@&0hTtP@$B(9BQ4sEgRO8J4?YWR7XNR^$+jZ312fkCZ<^?$cpdYiz zFSRDI`N{bv!i>?=1)pieD%?82)ike39&^d%}u12u1D1Bo@a z2XUCS7HuPjITT=vZLn+#(|~rK4(baAo;255uwEl!aEv|{ zJMQ8%9KHb%QFNltS=?DJfq-bNAH8DfXhMb>K&W3+)0z7Fmtv)hE_yO)U210$l)fmo*MQpT@bBl z$RGHWNlJy`)FV&dblFZU)(Q=|p+GHhCc)Nl2!J6ZrRp9t6f7H)JYa!r*dHD))h^!e zs~u|uVEQr6kec+#MZ~2V==Y{Qq5|s-y|7G3tY;JW=VyI_aN&W}yn!55)%Hk{?#Vj8 z%VeW1T^2SqY&y5mY|yi165#;=psL|Q{A9!ATAZ!YwPHf-N6p|>5vH5dppRM|i9?h2 zt0m$ILAs@mpRRL&2UP11b`bO1vQZ$X(H%!b%LVKD&Uk4z1}NI$@hNAHqO{M7iyJ9CB;!hXKt8oo;y{4y`)l)N)21 ze|MY-v}vV`(XkBqSqGPY6P(b$z_Y=RwbQ~bDN7?%!QRI#%-uOJi+x4v`VW0z>PE3(4e}m z@nxz-WI~HBs{Z_pB{)~!A%5Am8!`akScX#hO%@IR!R6$ zlEvk-_2Ejn$)f#*j2LZGrDMRg=mT5~*rBNs0R4h^E48zfu?0G=SPn3VmulA<=dqs6 zvW(Q9`>ajYjZ(zkkh2{I`os))dA?q#epa*?M^5YGV!h>HIG4$GM`1dCF3D;gfv!7w zXo;$4pia^)zNG7JC&}#Yd{v15B`%KB`GH(9K&*di#6QR}DtEG6WVhwFND+9G?~xtO zXo#Nl+OI+nyFc_W0{lfmL<+0F=7kSkc~+If7!yUe%e7i@+No&+t=x#unXfH3Im(Hk z7aQK&((QwzcT@_*yel*NDD8EFW>FqXJ;$yX7-)N|C3tPU=XCwL)rPj84Ug8nUuU~b z0k-?%yf=JSt#cpU_Bt_}uH%QBUcE4rF2ev&OJBoNZv(QMXHb&;FH${efEq+8t|YDy zV?7T{o;nDENz^~m%+Xbi6KT9Tutcl}uw?dwlre&}Fm)1!flRy4Y_#P3E{pdo_oC0; z9xRVR-wBVF2Ey61OnTpxk7G3e;-K|N*S(fi%e&%mA;s*?O2`6m<>;;U4>trA=I4|| zh(>F&R=6KKpmqtJeT*vz0Y5U}=2Y&`^oUK}!BeCjNj zccjq(U^Y{#Wrx(SBEx9pdOyJQy!K&{9Mq#P?ZYJR(hwwJHE5vFy@B*$lS*&#kn5q_jwz$)ss*IL$7=Lg6X zyEm*MhQPI-FyG9>pZN_y0tNud^~>ZdAw9(YY-5g*YB}{T7`Zb~w}r)}~kE7~xd2 zHd5T?hbawHTpt6XRZ9b33M3O=v4gi~!@d^KD+xaeb=n|z7PQiJJCWn)h*UQohHN!_ zbd4Kavjyg&;3gI2z2(-9!7LrV`0wE-xu&`*;xK*k1^*WdNrgrgZV0We)p&0$@Nq6S$#5F25Ra6j zso{e2YUTA(rDGYA#Q__I*Ez!xcUu<}R!?^?ApcSeEWC7;*EO@ph`YMH4hT)uPG}dH zyb=rLEj+PZ+TeVRT6N|c?0V?F77c{D9>Mf<14!IZ`5}zBsa&_ck^tI9DD1Iz&u8Aa z@gm4Qp<=xS``E2+2sKi(0;v6fRGNRJ^UUgBD#jt> zIVgDL>uK-(wn#sL4&`P!QKl2ks-s_TJyA7rgR`3kRHfvE-2fF``giX1%hKQbo8VSHf%iTO_Q5 zma*|sb{)}H+jF5p^SQZ~Vt2R9aScc-BSZQfKbr5pi2wz|Ux0P9mYP9HF zWma9O;r3JW1B)emM!kAQf;H$g*qP*h!dU#Hu+VYjijam4psZCg9c5Ua7p8lCi2s(} zrfpoZqtXZ@-jq)XjpbrO^Oc^gf^yMwa_nCo(#}7B`V+|hQ|_LD9!OW1d(rW#e{Hl@ z`u%lLJ@4~VGM5fS{l0mes7=6_l5{i0a~7Y+b3p}7zkHy#v@X!C*70gquGJ2B-Keud z24+;hJ4>WeZ>^w22cnS!F6)QV`Vq}vM0P-(1n`bIo8$*2lB3^?c)1dRIOpN!XQlbl zp{2G4C2EhwY$O!|$o(iXY@5x7Grhh5f`$8hTETx6x1Z%cFxZ{|?{Ga?@AoguGD*hX zK8R3~PHdU>RgwcZ^SdF9U*~|-@rF6*?c%pT*@jCc6d`}_1@Kdjw51$~GPPjNKDw>; zU16wM$Xvak7Z?GM`J_96FoXj*g;2z48|tTP`w2uc6b9bGH7OEW(ew$20FmdI(HSrD z)P7NzZC3b3?30V6?iAsEn#nrm9|!yJ5tjP2+lwX4N6({3S~*@g&zX+ z3A@XrUCztu9aP0)n?wuz>*kBO^|{9JDk1B7$Q9~Grr=-a)v$1uUjlpSSoKI9(k zRMrxUjkf)-+AlLw)4uNc<@DYQ`)td4#c&wl!m2T?{y~V=?S&k}wj#>yLu$RTWAuQk zLn|Y-S|kt?f4Zzp-UHs=Pb!Xu(? zg9q2yuEulcGnp(1_OHW#(gX9SVCxT1#&Ak#n1$6KdUl|?!ee;{2m@Z-gSEC|`@bKk zQS-P+s$1ppUQ;$xMq8Kb3B_|m#hPn(i?qsjSMe?&V0rSdW@Y?gGktfN!cuVvjzO3= z!sC@5!?h_-ZJ3fh!!1XO6r9(_jY4C3aS6^(fx&}rOA9RO9i>tEkwEqW;a2t9iq`mp z6ek93|2q;uGWRdSDByL=iWjNS)&MNOLM)Ed_s+k>$A^L8ouf2xP_aZRAwB&JDc811057U!BN;$R%TI@@ykZ-2dn zobL?^2G7GmDozKKcDI7*nI?rBbWPc9+V`?>2QD&E$OUMGR{xxe<4m&T?vyWXV502X zDut0g#`#K}8tR(EFk_LyxcDnCA*L=UR}Phl{m8A zbVr0kyW*j$XguRsnN^Hc{rMf}#<0;NqQ%awSYg%rvVmM($hG^fJ(fez9$NvR19@@i zd&+hVFY~r3s9xBh1z+n5&uRwRC$-Bx$+hgMmD@u%dy|nLpZpd+hnjRWf7D7c(px5v;PP}G4IRv!Z;1go8%^6MCDGMU9{P-?Q#)iR}K2RPu)bi zM@$*aXCg)q%0xn2H#;_n_Do5Qfoc}X>w49e-V(7NKs#62b}^~S6DY&I5S4N*;O(xj zx}3+w91CwB*qFvV9&6&dJntvdpRPJo2dhgMVvF*0bMT0ELh~pG5Z|1GWR5Y($9^sT zK=V7=(^rV%-Bs)c3MM6)QF$fTbj{ic`4UcD*U{INSK21MF25O_-x=vo_nx3t)SJRe zExsE`IOS!E^#Vg7-|-2Qa8hX%1Y0wg46+MRCaINWzTxAg>7TCZGZVm9NOM>xeBA!~2oze7UnNO`OVNPa-ePv^?!CQG2`5)L60okIk!XCmPr$ zx=hh*aU6`n)LX2t@&B1s@xMQs?!q&W@Y#ILqCJ$pJVu{~SVBf>@_ccPe#_%<@?FO5 zzJhuEgjC3xn4t6WTyBm^cA}>!G=vL%lVRZXsz8NS%*LtVaO>8FWYI1hk>S{8U>ryK zEm!{;wDC~4)?J~`uycX%$ywE|7PD?i*vNk4a&qgcP1(*`wfI=M(WVdmH9l+YZhMS; z3RdcbNnuT%&d}7)SXI`YwEm0!+gMPvUr%zK2gM>j7E24BW&;rcw@8< z#r2`MHbm+FKyJ4vGG%khbU-RZ^~c(eqB+(f&P-gIrwDLua~FGw?DTdrHUCZ>c7waq zmyQ~N{-ixYyMxvjZINvCCL@6nQgZzdCVP_Lein<%29!o~nR#8;Fg#_}Lki7ao2kX@ zPG2I6phFK-X@D@V$z4ude?FVZNMgu#Tbn98=^iq6zpm!sWL-bX>m&Z|`U6mvyLf0< zfc(Ez%0GKz{L_Vy9|au0WEK76R%_>Q3=M5R`j}w!S?$zj3Yfvcy9%icd7ax$oZO?>s3C#f)<~8Sb}4EHr<-8P2Jr*j(cQ^^r}5P>exOT);B&oKSQ4zex1Gw!-DnqcKZv79XxLyhuP}KgyhX9`PQCTF6dkz z!EOL~VC444--0ez+Mg8=N<=?W?}zA?ITAvOHswrUguf*`t&|r^uOkNWeD-=A$iC8H zArcsvC%ZOW2m*|w>E?R&-31WkHqPGQ!c|~=IR0rsUB9|una9|zFIIu{oW>{fJ*2CU zB~sui?1Xxh#DfpFHjdCqh)ypQpaL#-wO@xV?%_*Pk=)TD(d~u3KT-ORT@P_C5YU|C@~*FVhgrZeNkShAuy{V$kn*wZ8b{0@#>5Lr z_pNGFS-!O#FByb5pAG2B0Opaa`_fODrhj8<#pbU7_H|}Au6SW^NGq`Crhui-g`#J< zbc5-8)lEL@R%F8Y^Ea2s-rEe;$&|w8KPcn8FExRtUF1BEMfq~%gd}$^2Vh2P|y1=&lk6xYgIzpmG)}% zZIME+cv{qT&F`x+h1%IPaR1hCzE5=Vn}OCG_nJChVlJ&&c3gm@RQQfu*R4>QT#dr{ z;X;a{z4GJPsNoU2AFoL2ED5cE%Ei!V)bns;8p+%<9E3e|gF$;p`H_TXn;`@j9)T|I{8 zO9WBYA1TvV7MUcdX>;fpEc@%KKX^gk!!1y(aSBdiE(JXs&=*#|`A<|b2jVw@n!u%yo0Ntt(kV@(9( zAm1C*3@>Hrx?I~!LXB2-A>M)9WtF8>dZE_q8P6(JjC+;jqT6y1<LXt@iA>>)s34kAV{{4vmoCsb{CtY7@fsh;!Nx zP2jU>zZ{V%*Dc})iXu>j%bEpfoE>~7+_rrmxVF&+pY&U_DRo(NGH9{MVLW~f0Rc_c z|7myr_ebwMeQ(e(IC8C!*R+HrhXHn^W4YgCJ~c2LqF;VJe4$r9oZs%Qi#nIr;Wp(+ z%^n|+_7i$vE`H$BJUue;iG=7B-rpfT)!$SMJ1r0`PZm8d7tk%sh1~Z(aa9zBQj9_Y z>o-52-M=ld(ZZ+)8xMc1j|0^a*U9w#4loGqA8+#Iu2X0>UWP}_w?zQ%mIe!tw%f{p zUig#O?lOnA@0F{S61K9LHhjhuXHcJ0Wu zN}>evbT9n~rVrsmx@MN$+rLMEP~#DhPfBWI@(Ib`1FX9y=`CT_>fDu&4vXGKU-(C* z1RZxid5xWby-JQZf9sRU-Eb}u-0S8X09|Fco~?!`^gKSjTd#=d)1GkT=c^6d9a zIyHmXjl)cD%yXZ|wmt=(BP!bye95vku3IOgh2+19WC+Nm>awRmq}~`1WP8WnM5pbV zWV#KatIJb9_J^oxMQ1#yo2E)5kj;8*g=Bd+|LqU%-vUe1^%M+Z2U8G-bSPfFPBpta zLO05=E!+TT!Sw6uhFlWZz|RD*#@~_?2J)zjx2Yt*{w-&W;XmK@my`MbZ70PduOx~> zpYopX!vz98mFB|&!`vr!yWw5B<_ZsC!B@bK^a$bA4z9uqk zzE_2`Uq2#QO^SZG4f?brQgN3Z`+vV1)7FIFTd`)4(6dkCfuU`H9=l&jc|&$fnKOvO z&kx^awS$rZPjlF6JP$Q^H2wr!>(O(UpfRv4$>_$ zlo4J{+mUy4w@xdjSX$JZ4n8C@`W7k&V!7@8u#)&@&qy(A35}O}Qo-aDt;7IE?8+({ z-*-|yb)89g_3lxPZngKs6*>B-`#}_dJs>*GjW*{+My_F7;x`wk z*d3@|#Y3xV65dn$rOpR*TD6NV2~+38jC!mZ_X9)e`BwGJHKb*!*nX5Fyf1@uM;`9v ziba6=9LuFu;E_Y|!Ym{7$RE0E@)pyZz?i-mm{hzbeO3*bezGA1$7=qa@V6xR$gx1d~7c$wIy+JK(TKcrNySZbv%rI{9x{iCKKND zQFGhGmWi89O_}tl#?VK`In+gqONR&?_YjhR>JPN7m|x5@cW?I?i=3_9?5#I3^*Ao2 zo^KNlJzl5pU2~T=YClV%08c@LIo<*%8BTj)S{FDHoj=ji@`70o;vyI-@lk zx=q|T^w@c?25YQrdX=p1U3LP@ez>65SqC~?NH{I(M+wg0gf*c0BWS?n(t%yXIXqc9 z5c8MptByCHc*mP6D{1Y-BuAvZ-A*tgIdXZ-M|15hL4iuQ}^;G$5V3uoajC#Av zrwDb%>evP7J7#x#U7D*D#^!?$gC!bcH!@T2dZ{1ql3?w7xFp^ZvTJvVGO2@>qr&f; z-h&y@8sFkH55FbqHd;Im`uz0kN>qCVbsTy$lE%Sk?}!byuo36!W-r?@r1zmr4czE% zJwDX13*~rhH)?r!+C2gOQT}cq2eMo*>iak(D-{G(<#~L&(iTeSvl5riC6=N?z?=Bt#lx4U-_GxHOu$@E5)NSM2*lgF)&Nfy?>`2U=t1mX+OH^QXQhqPX`ULB}n9 zi;8aCzz_{+h%9~1QHqV2N%I=Qa2a8dm#WstcgukIOtI;Q+z@%&m(qm$p=wv#%k2lD zr#gLK*SuG&b@C1M+hFUx)!usUWb|_{m;3!F-5((gOTVmVC?vJNywv{%QoVmG%xcU< zHp9LNg#C0ooByqJ^wm`)ZuC z$RX?NS&2L<@YXJD2+7Me|5TL+#=kXQU28&-H~Hh{trDS2SFY=n1dv%zIE5{PA4BOa z9xk>pss@x40sg?D{+7K7G>$VpqE1I^w3;hr% zCxg9f7HbogZ-wKTYm4)QUZ#sn@R_|F${jK{zYXP`%Ens0Iqc|6uJGWQhjm&TbMtV2 z4;Dfs&`9=rcuX>adi_>FXwsuGmo?rc9(_a6AZiZlo~7Z`rRqhRUXvoj!Z(?MKOhza zbICCXI~-(HF`3kU)WOy)-Z(-6V|kIn{GEL4NYamlM~5>rZi#tB`R$balxQ@?=N5;eEmE$zKDE)9Z0#0 z$-A+4=18G(SL}yUpwZ0LisX7ZO()wv>0}zxJpaM{ya(Y=yheS5HD@>gGlV@f#xjom z42pdtk0C4SQmI)f2g&-Sen)~{j=c7?(Q!($b*m}Up)ulnx%$AA<=})BuQ|A<0r>J2 za@{IV$!5hwW)9w5Hkv*+7GjvNmsHh0oi$ol^p8zoVJ#zD<}D|LV1gXp%b#Ph*^UQ|TQq!RPIPx9w5O5Vj@ zH%B`JGW&xFFEx-CB)|PZ%P`QMB$Ri4KgDXLVoW?dA&$ZOLm_NP1#H{H=ej18>l{jI z^S+JL^}$LAFL`tk(sJ?{vM&|me7<)?77RY8`SdxEvI5c=u{w+_Q@eQGU+;D-{f^NbdVCL=J2xq<})j*Ha zNI>#+IBLEcA)GGGZgdPsrjE9X5K6?I)W!C}qrKC*116?$v>Akq-CSdaR&wxcIw$wd>i4|Wh-OY)Lg(A=F?>%HE&BYZy3b;lnXdDkj2U+?txM8cLL zzc^_c!tv(~O z38R$dRYZ6SWM)Y^*`--?s_nHg@?gfR^VHtu*scw+c>GySw$!SyF8MQ4=i`1~9Die@ zhoWodCZkPW@^lZ}zJ-b=u<3af`bM&9KK`ydPIPbPPS%})lfZ}tCl^lFS?&L zUvsLPw#HFZJ+(G?FR@|AyQCZRaZh-;`Z=zydC~dTtGxwg`<8J?E_*-NJu}Bs2XUO9=AhF$Y5L`s4V1Jf1tL7kdN5mU{m{1DN+S7pV09Ei zZDjei2R~7Rz*SAt?pl`9iO+*ZrIZ}nVqncB1d{LKa;qh0lnd>1O@a34cdV6)p_otw zaK^X7b1*HdR?$1PppuZ{Q7^irF>Wp>lO34zSK5d9Fp}TH@kc}XexD`@ryZS#jAMy*+sA7p6rt!)gMVIPvw&y85auOS{PGWa`+o)&9xs}C96YKLLt5h*o~8X z@I%*-+`dXDuH(q0?u8swaKO_NkUAp%Wu9_7WJxIXDcm^E=CT4eAgLc?64_}h`lpjf zIdYMtLxR9pZK}t0mxp#)e9b|J6&gnc&nu?C|(lR7)E;?g9DY4?UIaY0!|KRGQEG}kKhJH<&N=YkCyf%Bw z`ul-d7kTRkcxBH7K8T zyav$jIY_6_TLXh!p7{ZI@Xix$Udy^uA*9PaFFj||!!HzBj{hHf?-|xqo3#xK2qN7A z2uf1{1*G>bhzimKLKP5^UZuBClx77{dK08W013ScsB}ULJp`neB-BI_0^jDId*+^J z=6T=2_s{od{^dB*_P+MI*0t8T&UG%!%*Y>m@*_Vihmmptkjs)suIQ{5MQlZw!jwk+ zJ-yV~`EL$T)b|XXrskKQ@qr2CDTB+FNV3Be#r(2HUD$#(!2c18{--zo7WKPbQYrodhddlK_~clJur-ygIt04Q8C!p-btlnD}VztR6a4*ZRkcDJ{_`R zKrPXEkN52A$9#7A7AknYgUYgUP5!IF+VGl=sW*D=B7Ij_`#rU&H&v+=aPjgyEm#vM zkwn|?p6U};i^0%V+x4K`S|0$i-;YhDR^%_=@6da6= zmvG+I=}CtN&^eTB{xm5T&8E0olpw^-x-^n^Tf;m<(S_Ahr_$r2VVhY+P?aYes|J)G zy+2uK!c{DrOaT?YWNj2W1Xehy!REZbw(`avkn%h+v==Y&83+i*)3MmquU0ud7Af(` zCux%~gg3q%UQTNNc=MsLMR0SeXD2(^-Q@0otO$s>w3N-#Bj{VUjkLXe(hKsV5o7_C zBCr_fa0u_NjN{lmcC!<&DdqH&c48kJvqI=);{>LimZ`Vgvtl7AN?{c@7%~*ftiJI= zaK+7Mq}&MY;od0kW-cM$Q<^YU)kJ#k2aY8%>`FJ&OLQwDAwfu9?BO)#XO8-XC<1XM z5Y0N%?*bx@C~uqcUvx0qsEot7-r?eX6!(Y9_5}H1OR?8gb@p;*w43SOGcJk~aR9zf1TLbP*q2A`b z)f@-4Sh6~fAa9#0=p%Pb{kOknU0cGv50+|oL{kM!8u2?S?&|8The)TzzcnPcXz+GB z$`cvde`ff-iJDocD|lVn#~4)B;~HajyE|YMvmY%EuBpU`d~?Qcs&?6ohxT*%e740m z!HZtviL9=~{?V^cw$nIThP%7|aE%1kjj<&Hp%<)oj2N^W0fn@y021P9Sa)>jnV+R$ zmw%FwecFgrM zJZoF+v44K3Aec5V9;xH|SWT5%L9UT|b(kpR_p{+W-SR3dq^km+ty=ErX{6+Zu_O~m z%>;k9UZ9I!-I932%}AR9_OK9<~Mbgm4IBK@M+%8;iME&=sf@U!EI<_37-Rb$z0rC^4UCc00-b|WX^zaFFs)0nzn}Y+8PDJ zorax8mgt;|stmKKFV=du=#VzOz6K1r$!w521zFA~`H?|}99Fj;nF@%?uPWM~=u;3V z4ZJ@&&#V0aM{-T8=K224{nxfa?) zMtvIPOP}0rJhSI$mxtm&Ki(U8_5$_YcUQ)%_u{_ivIR6Ia5+@Oaq0{1j>b1nKbxZ3orBATafzu*GPX{>#`Io%*RcUw+ZH|Me9 zV3F43bk;lN_VWQg3u2$G?$fF{IuY+)(5HQvzh}()WMHXGBfTLGcuv32D_s@VV~YAV z-M+OCX~R}Gi&>*-Y@;VRn1m8x%?C4Ss-HF^Ojv%<(|#;ysiv}drEblyoOA1voUid` zox}u^VyzT5QzcS1{>DG-x8v-3LypzMz)nY61&4uNS2V**w@Y2<`9m5W!)Re3 z*+zd4&PQ<7y&A=7)X)~Ii7OYt-WiHjMU{Dn#GtGvRlhh!)VDTGO}$pgL^G6u`(NDE zj8!1er38PNSr-DBUR%!Au+z|x2Ddv%JM^h6w!8`)ORwFhF$gYN*rYMNe)XF2iliYuq<)=X@k}WOO}JnhDx3`~ zmivS{)S;yN+H!#mk~NK*TR~vRAawhNQO$XYk<}8;nyUY~vZ?&_)^WKxV2%}V7EfP? zm@J;wbY_6i6&xvJTmg%c-3=O}(Jz5R#LJh12jri|!; z_tqXi7c9?9SzJ;7z?Ta!vqov=3W(DrZKLKCDWOk4IobmGo-9pnq?%VlcPUp0YRA|f zbZcZ!cEJwQe{17=uo6PmnS#|6?yE{`>cA}mDbKzAUA&M(vi)|=s%)p1_nETiUxo>U zuIYVSvk=hMa|J`cGHzW44V~*Hm^i>#-rc2JUm+eK;A1R@) zE3eR5)~g1WA_93N1d47yaA}?s@vTyV-5q^n*j2$#0T|cFnum_UemFE0e1@*;2V^RJ z?t#ShJa~_|Jfo>u?b8U*%N?`f#kv4^j$?#gmbcXURaQ=~sg)gM{sZde6x0jH+Iy;s zVHqZ<RV}~p&%CohTnSDfMsT3OcxncVW)G05Y5dKqIx@@Z(q1PNnts>QtM}t-VHBx~ zaWjKj6>FyX1X6mjDVp$;5+rF~IvN|8<&TynbB&=C0Wz1Dk{uo~m{qfPSevR?oE|_g z(PAeNi*s#xr(G^ldCKZerjalAjFg4L4YiEmA+T(JC9$EShBV8k{D|&+@6PN->2<6NUu(>%}=kc)4(- z2;5PdmGj{T!P{<|#cYYw?&~2hKFXH$v^yTzgbuK=dz!7wIC?{SMmKwWhPhHCgmaF6I0e+5 z3>+@t)qDz_d#0eV%AqVtS`et%cvABWeohhjIC5c>c;~wEuMdn*()Zk=K`-vI=It?W zug`?!9K+`N68*SoBgmA%2PJ&3KrkxTF6Yks{NCohOOq5PprL8i*_4RU7@)us>w`dw zK-D0#4?X!qFPcJ;WOLxmIxVnjP8J)?f_bIxEPpuj4P|@FC_4SJc)W^JYvXGaF`}Bo zd^uPt(i;tMw(5(=BYXtWpNxgae0fP!APLeltlDO(XtZJ89V`Fl+qB1lcHpz<)%{V1 z70W}p)Ae$v*U&46P4JwD<~PkM%Av9qq~WZG`j%i{$Ki5Fq$_wTybx-G`Z0wmXJ?!F z=xib?W3EEYu-3qN74S~s5V&3yX<>`)QScT-bcH=ZvGQnT)-6-u1a2vsg;YdV*&g}C z_j^^n1fK@VaS;WwGJFe-%A&b-(bnjar%?(O&&Zd(8n*43@R&5uK`!lKg*a0|%O*ac zo+aLnUeoXm9iYqo6-l{~Ui;z_n(GZ}eE?NtaKpiuek)nVI-o0E%q)5lO^y{#sdpav zCC~CoXWOJiHDnWI_gEeK;p|Vw+3sAH`~+RWT2&p1D%X0#g2mnxWaww*Z@)4O zq8XWHWV?1vxuD}&6rqwdJu+6sDYc|1!kQQCTwhCOndLIC_~SJU&cx;;Uzta8jh5A6 zc(y#!p9~>|b8iSEtwG0vmL>ZZwl++(;P(Ox+;N*t=$%REQ7lbGq$He4^Vdi)eK?kF z52yIrl|+RgOWJe>aTl-#1c4e$^pgVi9`1b$RXEn$LASSbfw^}Np&qImQ+V5gjrAZu zw^;tRibBjBS$#O`Rj(GLskoB>`OZ;UcL7)d%@^d>JbB-%_GyFK-B4YAB$h?=$Jw5ssW!z*du2GH)Uo2;Y)QXD&;ku!sP<6 z4JZTuqewYd>xB$!rMpzawviuMcxDLkb5P9+GnBtU!{E+4#tf&y7r;^tD11ZDTA*y6 zgVMKV=RU{n4ZazI&YQVrgbo9~Wit+BRSsGv>93Jf& z(s3?-a~Vt}y9f+G!mmf(n~%c1KW(UdtAq)#5{J+MFda}7k7a?jcoYpZ6DO&+y~5)OZwl>bcw z&N2XB{5JQwqfhLo1=Fb4VPjoM?9qdB!4}?~Piaw()Z)E$Q@Src%U-NROour@N=rZq z4kxe%%36SHWXj6wGms@*)pQVKKA6RfvORvV^i%7*p51TiDm+j;&`wL`4s-J^wpePO zk-_>NW&OVU?eeV8pVcUnw7D@0#`_}$#LXotv*5EEy7Nk-?iFITAuf?>qjYGhf1sJp zW9?LhHqbW5b1rD%!4LlIQXOY2g>cWR9ODdiJZb>I&gKErd!DyzwNfIWBvTU~qtZtb zJ*ZDwe;Nk)1x}h>^9rG#72Y$qI1?_Xsv_^Mrj%;1D18Y{B1dK5|Hwp=DES z0KYc6?q8A9oX_nG_zu+|rwCD(#-PJ@3;hJKi_Q^lxC357NZb)tQHBs(xeT8O;MBz5IT* z2t5*S=b&FlnpFPGK$IH82LOj?JHr(qI3J0P8~j9BzBs!hv!LFnCg^ z&x>dpet{8}1NXWuVxyq@RVApP?G%<|M^#(xN@>^OeBla{ip*&MjAhw8#|m*z%qA8<-CrBt zZThh69VZdA@91>N(!wg^#@BdhI^&oYzx|uCEfP`n3Bgy(%HM2#5!^}_S?$~GCSa? zH)3$4R~W?yfH|XoOfq?So?94>*EVh6@IlEvu>)wox(^4N!JNM4jA!ne`d?$(K>!W<0Pl!>%)FoNO?jB6`OfO zuxjP`_iP^rXb~hTIN_JFyHB7zu#|xpk@OO^&%PFg=EqNYQQFX|hL4JWg3nM%PIL^N z(u5Q(UI%qw1 zg_{4At=p018A=J`&sJt~3!ic2Kzto{>%m?9x5BQ|wH~8XRA7;iEPc?hIg2Zn8Trc4 zVni}W=gi<-YL$a`dA}j0c4{4L)b|^JZ~!-4>|EJA<^wI(n%oIfM7{E;R~#sBMZ3H> z%YdGQTF$UYbWLpdI~6FpJl^o~5nQAqs<^Yjq z2yQm8KexzkFn`AkgS?a74ri|js<7$0UI_8k6UO|EZsfoKM`z0!mj-&Dwn5r;BW6fS zF@_wxL~jN^+Esv9Jqt`iJuVf}4ILn|5BkO)vTLOB#2C~7$(^sl;SZp5P<6f#z2?R1>5il9i-AHPW`7VG`6oJ^PG_obZHO*EE%l?oSSQ?Zx zgSw;zIezrA)slQPMv;`&xYS!Fs?^FE>j~!GI7!@HRKQm zs(~!R-)`aCl-MY&f}7ebo@rfn$4aaAg%Xw_1uN-e^MH2M!5CKaHuc~oqaG0ZJAAWVfiA`mjSl>)KdKn7#Rud*^ zGx1!0ZFq$?Z`Iecw?QGj6rav6eL%+hYrVu>>ld{SqS0Ufu1?yj0f(ozFOP&c1k_VW zWhLC_@|g(ahk(IlWz+=zvIWjE5?$Gr)50`^n+J~XHGhaniyDu1=Ddtr zQ;m>)vw%#QWR&hi4K25H#c;(lL@{Z0RzsY};q{7;X0Hlob&BuL;WI?k(?K0RkX$ z^^d|~{j#?jJ1V4Tu%;WQvRbTOy&wusbZiXln9BsB8eBu*i9|ew`Cq{AP7nqs0|6-9RhS{s z6n4-Uq!qnvNaoR3^0+cGs%^N9plDWS3pD4iilCoTS_7qc1+QYvKvjT%ulpCoR9Uzm z%+b4h6`*;A=XqbQ>iozB@lSO4Uj2CHdwsfO2qtFA$+cx{8q-nUj|gtoS3HP4`(uJI zsDoqpfiD4ur68M30a}MWDDPmzU#Zrp$SU2DF~?(nDs1m@DU6QM$-DKo;mRbUl4|AB zdSD^w7Ag;Ex!3HqU%{ehLikm^>oDATxlH^c&FYa`5=h-YfZ;!WUY^%zL(~SWfxUicBpKR>VuZ+1fR+4F44vNpW|o ziRcl%<7xT%@C*ephDQ7({PUpWuD5|^ds=HYC4bX?lQ{)QQNHThT6vA^8yjP-s0tzh z^E&UUC21zreQY%=jkFE>l!30>)MNjEAlXg184M_|z`?4~?nY);$z zlA6nB3X?EO{?Ns#U0JVa4cb2Qx&SS9jf4^7T)!K;YY&4{BmTV9z4gm{e)rAi&We`ENVyE2QX-BW;>ZeMdjd48 zvh?}_^$)VUtnO^n4d71*wW&o`9(n(@H;lGTk3q0E{9O>tdpZvG8R02Jy)tefqe-rY zf%b!1RNn1j@R^t-oYr0b6{cQ-I?OE73~U>dF3s(rmn*K?J@ax85s;}C@kMb*cG@*f z`b*zHgRFSq9kZt`9M=Na>(dWi2ar;-@_s|#Ufw|t#u#&bm8)x3UiO6J&E2~Klz5?S zFPbq%VBU30o_>=sOIqPI9Ok|FVHAe0G9<8$7(9v`V=W!wd?N4Ei6KA`T{}fHXXx3g zBK@^FV{ElK%8O<{ox7kpZh#UmFKN|b{m(JsDbi+A5kBRWz%3CMix=uH^|X?6U7LnZ zue1#hNy7(M53bHXdd?y$CHn5d2i&ti<8jr^rx_h;QM-RMeovu4dB%Scpc{eHY-n_{)aC%w5O1*d5!x((ZE4dfIFPw9yACnJK zX4Y@*8xA{e37=)Zm?Z5|T{2M)PpbIEgL9g+C*wNu>=@SeT{;F^dxQ>iEP;2JO0Y`9x4gcs% z3e@y1*J)`z$P)*BurB`;q8NptX3ho2a4qff$MXx)u7g^PguZ*usNU6zOAo}R+MGQv zf5YY#bKLS8XlK{zcN(mt1xQH4-*dCj;yh84-WRTp=s;wXZdg6H_e)5NGgDU0OoS9Y zWEsx)7P4ji&y4ziJo{*`;ru!{n5QgZ(<0g(Hg`YIP`Mv-C-M>Ga|34PI%F%|p1@&`2uk zg}$3=)Jq?L4&!N6JN9h@Yp+oExgijM%_PL~3H(-To-!v**w2W}kKHz}BL?~LrSIMY z@sjK>I8Lc&a6Ug9lRnq`Pc)qW>`JqnlmSYSI>K=g-Q^fABB<=QxhaxNT%`Bd@2+C7 zEctjd?m=mq3HO5^UQ4z-8cE8P)7O8iaf@^5>~y{NgX5+Z(@Ixl*5~bxcTFDn-Z}<` zvlqT4-rXzG0AMrKlGpvzPYgnJ-G1&)SVmFOJGz%^Zat5a^2M$Ml}Ste)|0)#G+qr( zJgD$cjpodXq_lJ}Sk5TAa|x*XeX#AN7IA*o%Im4#*PUA^e(5_sW7O}At4A}EKs0L~ z&xuZyFVe$quDk!}OBLS~c-=21!lPX4zRG7(=kt2m;v*mSqrnZ(_-{&!keq&g&bToM zB4AlHI9%m7$D8Xh@g&1$(3=O~>U*#J#N~sR>du(@mm9{5X4v%pPMa;z=fobiuXK1m z`~2LLR%Qclr06?T;!ZG>mPmgHAz%erF2-S8f#sbPIGf zlD|$zS$}`UZ9XM_%$1kv$)5iP=HQ&J2j>TUdHmA441UH;(rKYr>?B;xzi!}bI@Oz0 z(JB3i^5DMhI3eG6atM?;;;@;X0Z(W7sAybYFUw%A;Z=A4Zyg*@62AUibh6nmdfTst zq;9{2#F4bAi!F5lyEH9zXrV3zz{P3$*6?WbvN?cE5@E=JJP>==?Pwi$uSP3r7XcW@?<3Xiq)2VkY^|fbm_BO zn5lg&n7r6O-Qx+&`cCB6rUavF%9mJt)4?bB{9hmW<_AO%gB$$rWr$e&A04ZIy>o(B zr*FEKEy4?wFD<;yy?m1R_^-RlI?KRGqs2=#ig1#;P8BVE{dc~ue4TRI$zOtx;clCj z9rIs5dXiTRoNsKAVS%W~@E^WF6TghlLHKp;kN4wvw9cE3g{}jFJN!V~ege=-OR;fb z^3`!``O6Ra*HPCT7Q)W3Q$qOHf8#s}yTtvs75c>iRsh)H944EHtNVVVb{<4Lt>*~W zivab~4$traiu@zlYuE0+O|&#Bc@K!U$bx|;_M3a_qp{6R4bj?Bqrr!ZM#=kIb`=|A zpbB3k$*{ydtg~L8!83=Ml9v~Q*185H69F-j&hq#3Pxs4;ZMudQ^W5UDzs&U;RMBgG zRQC;lHb=na=TBgrfB#4=5~!!#h00xlOD*8{)-q?K=RA+* ztjXcwHwR!u7_(inA)kO{=|_9%XLC9a6k5N*Y{55RBV~)b@RvJ4Q|xm!erXW3x5;1| zIk)8u?^EKBK0tNQ(EO=sb7P$iqFc@2YUS-;Csn2el9@}q%Avw3i0y)Ut_O*b*zNdr ze{n~H;_JxZmQE8U%}`;dgkI4nE2jz{Lb8DxLWgrPQO2!f=6S42#!P;0$}49s&3lLiVe zB)h&GB&xP6eIlUZPrK*jp}GUIxRpI>!1<=&(WNOsSO-uZ=}PUhe4ogw#Lps@li1uTCCw zpH0|nxqt9$D{W6U>-IN5cR-HuJaaIv&OpF&au-l|47qNXk=F^C@00j>Eiz3ZP+QV}ngpRy{!xRE@tFAL%DBDB;ZeNEJ@ z-%AXldc_E`p9bv%6xUQ}-k0#luqkn^a`dh5PL&6Ix!8Hs;%`U(J7?Wjupk4-zbd*y zFHAVwO$kH zFq0hkd3yAOa~>j|+}rpGTZXhQKd3aX#JFm-gwCK)c%WL~w*o)rKh6#9yET{3n~=2G z0GdPREMMGQgT0riq}y{#*M~1W(y|p6PbBzY7+b_WaHP${_Z)wetRRtuW%s zzuMcl^_qOHEtee7vK*jruX6wav^*U^FvkYRv7UB#$OG{Cs`#0@hsk$(hQIT51LK*s za3*3r8lRWf_NPV!E(7-O00`|Qmx4aO3Q};h{n}jeJ6jQL^oK$90|w>6OVWm{!NmTw zDe3}_legQ{Z&^Dvi)9}JPE39+RnqMP;D?iJ0HvseoqTM-XoFp;MHA224|ZNmY|~Ja zN=K_EyVTCHoe<%kju;j`H;It%tMhV$ZnMIJN|Q4|L>oZI66Jsl?)@G83LI3~tEV`^ zMvJ_yJp8IZ_zqb(fEFgSedq51wA}hwj?b3h1E13Cnn|4BSlH!)Rfd#O#!P~VKOY{q z@QPtFHc^47wsS8pOO;mssSme0*`i~#Iss`46oYyB;R7U}{tlj08*}>ja86}GxZb}* z8h#u?aQBSH0;-yq5P+^@4~)7cE{a_PZA1Wu!kia+CToWXL!@2@%F+FefhtiqN9SHu zWU>BVkF`84%l!RulSR z!U9?MEjVUQ&DD7>=RT>T$iA)QB2!`a!ICNDnuA_G)n4y(NAhm#eTUIqwHD$TS{Cjm ze6#ha1E_sbNl17Y(1ziLwTw(_I+bsB9=aUMPcRT_gZ5ut*M2`?ayU%ClLP3p{9d}C zta%O)c?U>xH)mn5o8u~Da?aMiZTg9r zv(7bxk=R!2MTpwgS_h^R#6q$vD!ivqMK926Va!4nal0nluQFa#D=*{s4mj=rh4`LW z#qliT&0mV0bo1{k?{6#Mdnl1JN%$<^IKwz#X`RkcG- zq_FU(*ZDVNbPg>ys7tAM0-tneBLcF3IeV{!~+>U&JYVbmftzIl{EpK9>D1Q0HgD#`WS}_*$~!P zHx!I}uA{JOByg3a4VYncf6CC!4||-Bh-q#`<7Vj3k&et-j`h2!IL}XnNkFZaT^%M% z;*Sl~H~!--bCtXhX+u$)UWO-Z)PLW5e-GLJFFyBwt_Z=tiB7!{h+d0_9y?!nMB{Y- zDe^%Ju*IB-E)5$qP#q6j7?1zXLc$r2H<+on{=We^ao(jmPP1x=77bj3Z(@dj3(=$0 zo3-dEei$a)dHKO_;anuARgTv>Mf+k&!?!J9z%^alK(FT z^uO5HtTs6ott23(Xg0gSgSb|h4Cp<$n97Ce4nYp{e=X3W8O|3EoSe|0l|oDXQk-Fj4aU|KZ1lUAvuSz)NY+RJ=sb zgXU?OwEY$7X8;mYX5J#GdROPe@zXr~6Q+~fIZ0>QKcOZJ5B~wv_5Cj}oq-q9>P;1u z6#z26nz{Zvp)R0aa*lD}r_a1n+I*p+pAd~@$=GF}(fSyg{{s?jAQiCTA#S8ZnWf4%G?_T zzTX3NPoE#Kdd)2Gh$(yf@iC0Q!$V8z!h}lbC^(fxXCn{BjvXU{u($Vjlr}-W1=@hB!ACFatU#fFeY@aZxtD)z=YqDRR%D7rW+Q&)C(U~8xIgan{`}b;~YuNf>R!4Hp#`*WF5{+ zAKo^(N@xz>AT&P+KBs=Z(Ej1sm(C^?7_-rmmmdpmsGFGPTJ*%)_~1NuxU|#v zCfO6#gi8w)LT}?0*>M>r%{%GI%e^uoO&pn+BJWh2_5@V+BF~8^!hh}1aPC;n+oU%^ zC7}ia`tpW`Y+gvCMtX+HONoM(38)4vpgte9&^dSksLq(wxo?SPsa%f!oweZ1IBRb# zY%=sMP8Ouml_rwDP|sU@u`?A=fY-jld!3Tu$IFgRt-r44B(E6K0K{UpaplC5zW>;t z%4x?(6fgTVsoyIFCjm%_djj*zYp$TPrv`}xPM)d z|L(YdbzH}L=)bP=--ECJo^k)SCjUL-{(bZOH#qsXHTipR;Judq=d373_loz6FjKESlVy24&Wm6V&D?*f3) zX|V4r`^%|euIXR(Vd9B0p38E)&bmid$%HMhKQ1S=#yLK2u_(~T)p+N{#~T*kpl=yZ z4C7=!;G8*4nc6&V+Mh<0)2wke5qofW;GgSN_)`z!m@7;`ykg_%#~Uc_?@})t0Ptw6 zzo)JAdNXzVJ+0WxU<;IUeqA;!0DmoB}(52K?vH*?2 ztK<=C9VX&H5?U)Pp08ebOkh+XLWAXg$NIdVTQACz2LZdZ}$1IOKsGi)eYG z-P9SwUt0rN*KExIdE;D{^`#6VwmY8RC})wsOoovbg|Y$%H*H=h1VQWVaNC)NUEQD- z>8X^a&B`2|#8DGFE_agj6j}3xgD{TS{N)&=P;5~=KLh8k^@Yy7+DkxNBaV}S1OICywbYhUW8??p2)h3pp=-d#a2XXu&K9MxOZT@*C< ziilV|1@IUsD7`NQl=ZsrT_&CoSH!or|s^X*<~8mY%E8N)7oyc7{IUNkFyw zwdAdt9Nj55Y-G|U&XK@T2iL@)7R}>+ytW{Rp&YDkT2)@OcDhIm^tdsmRW*vcqlr5$ z7^{*#DUsux92a-v;Z>R7gYXSRjU}MoI%Mz;m^M|STUA{`w0QxRCQ-R}^;g7jKJ5Nv z@BVY80q<|Ga$o0oeD^Tm^h!Pry);G|9(Krn+WL;=3Aabs3ghvO%&8L8m)}9&E>$^= zEi8ka79$cZ_6){{)0edmxqK1v0-AfVu?+h-WC%CVAZCR<>(hY(`iT_m-G!{$!dop$ zheL>weR|b;T6PM+eJvDX9;{cJrv=wTyQY_+;)vk#GCZg(*yQ_eD za>fl49y@d9=Md!rTu=v_FEEegto;1+8^)#?YG%P#+Ur*~_D7Us#+=@244<-(yWZNA z52(e)seI=T)RwawZP@mEaZi3~nqNm@_|Z~tI>y=Y?_R^4`Ksrw3=+~ub)e<5>mGuZ z%~v+vR5?l7R{WTs*X5O?`D0PXD<#SVJ}Ii}`tB5oO5*weQ2nvXY%RTYb=?rmo2$XJ z<2(gWU*6Q@Ara-zULRC?yq)lML#OFw`<+`IDue($g&)I~Mqb@BfJ0420YkTA(dJXB z4Df>p%N~#7?hJ}5RwcNuh4bW{Y(Q>@IJCIkl_r$t+Gwex#_!Wa(5ydxALIo%ygI|Euda7JF(=ZR-6Jcg=!l@4%P6qSJ123 zB;Ujf-3-*O$OT063g>e*CUf+?7iz&9O1k1fz!*iBcV&P>-||~rv|Sn&TiJ_$oRLX! zAu;Iab5O(6!sGEb26|pbwFc08?4{i4vXx9ozIWI<@9q_m`58dW+T!Yikb^GgQCv}} z_oL-^E>Abh2R(LILiE-0wNgzZX*O&))`A?zohNtc`@B~giiUNZ3ZT&veLJd#RrlC< z8{JFjE}E0H#n@`T2Tqr+HnrDU@cZdYre(PSReYjex@EfhI!>KlpS}AgfwPPA^F5ll z9Q6;;%{|DY^66S8z9i(N;=t~gBjT5%&^M{V--D#aT*jXk8BHSl)^JAghPN^j0E276kqBoq2kL=k7i;|mBP#AMpFTGNAsdt1- zi*_;i2pped6r;VJ?v3I%Eb*XYkuT5BX;9P(Fb3Ky-f0J|v+XEU@w+;b!xQ>aHKs=U zR1q-*^QPsDjis6;)%8Fb9CW|J11@Cy>Y2h}GaqB_j+0A{@?m5D-pX?db2%Suvra(i zQ0^-^;%?yhXB=mp$6^swtD`%W*LR@hO#dl?le*by4*(?*^q|cHl^S+bsVxjoVAR_? z)>2$d3pXJ*hjU$`Tr(7#`I{i0bdMCv0wwdi-OJ8in7Dj2UUfSo7;VEFS}-M{EBVB@ z#h#u)JAk0zUqyVo?qBf`?DT5(1J4^9%NWcuFNfBLyR_>vsWF+-H(8*JUZdVtt&Q|r z1G>d$9cTHo^GIqlLk_yBerP3Vn-l!%eVxZNBI!|1MVtXXze;)!pJvD-c5N~theina zwIg|VUDM|6l%O3H{h^#CQr1sdXY~nrR!}jKeCNPoeaLL)%8^g)@?iQg=LlI-@^Pe4 z-yfeL>peMbZ9G;KCQVB=c`LBGq#^`&HxM3#25MR z_#%()n^vx^Y~X4>an-J3G-I>X8Zi|ZuW`1?<|}+@Niz4WE;K_s*klW88fWStO^!u2 zShSQuk!d?FRj@;@EsL3XyZRWzqWhpv=RO^pJ=;dFOpLF<#`vc%O ztgn^0GM;B{G{z(nCe@HV3yjawy3sSt^(KB%N#G*H*-i?n8`Fd!`WFM5A)b*{C36+# zQ=_sCpT(4GJBv;km$i!Zk7J)l=1x{S29n`YTsFq$`JL!9yFF-a7+aBH=YJ#TnN(hXa(QIn^bF6$DRUymSzV?J1Vs&8{#roXF=STx%SbL`)$FL&ODZRdenHy65wvx?SNcY92r zPd)Ik6dAZ?pj_~f+|Xead%utQz~;m}?OGQwqDWKi4ousLTQp06uYv~$>#SD{L`5l6~mDx>>iW1{+;vSvH{d2gb2{Wlo9uKa3yDm$`v(ls z;Y=WvHGwm|+9)4Wm0A|oWUgdXW-e64?s}1rW)c0l2cPEkYE+N=wZ9LlU$?u!z6}J=?duuE{Y(FEd3b@S?K7h)Qm3Moz zb@Z^}ws^^sYa)!z1CQMhGmfO=Sf9CtUj8H(MD`wiY7WQf6pA?qVH*VJV$b_#P*exSPr2ei?o$3!MEp zn0~|h#BAN#D$*@U9inMy_GMm%`$xl{W=%iu#>+w|^FUkEeZKM` z!&I0J&X~FSk1MTjrqZ~S(HGFiBifEkyBqtxyjyED1dm%*y3 z0nOJ!T)pd0?;PxMLIU#RX{CUmc+?DjUMEeSyUt@N_Ko59h8bcca2wC%l#MGMdF#LI zM{fo!SKCh71)3|hQ#0TjEOB2B6A;5=cytTElx=PQ;ddqbJm)UaRS~y)Vf(P*fX40k!di<)2$qicu4QvN1Vje7t6j)i0Z1^!r0$zy{QjGFz`{{|cz^bf`Qxt=>S3PX z*DVJl2TKpb2HM+Bh}kgYlmGMvG{T?w^f*K`P%-xk*+AC7eHkI9fD*4GMl>_o^J2p1 zj(368YL!3fBFQf(@6bY5{e$=^>S5+bTgy*dvoABAxuzes=|e$@3IyhFcy6yYk^@s4 zuMdWPa3gGdz%72cx1Q7&MnRbmOS=WYLOV2zr8EwhO{3hcOnpK+jx!06b1hl6a7Ov_ zVEqrk^~eiIT!|&7bSU4>4rVn6*cqa1`LPneY;#73zmQjH64rVksS+VLg&JPoOa?Ve}Y5rl5TqMPed&OVax zLUUCnut<*yj``gV?e!nwfMaPr&VI3FdkjskzJb>FEmyZqFGFt*08@YXB_6N3hHOl7 z0#ry(w>YW0?-GHYiGXVJrp?6b_hY|0Q}6DtRwWi1x9h77h!tcq0wQ)&ol9l1kNtqz zA~>^JT@od`rY{b_kvOy|g4u1x&Wr=V3(>XOajQ|2RjI9+e5Vk$*uP*RKjp>hu?JsT zpUFb?hz5k?z?-yubZjGLl`1*a7T~DPSg9HD9=uRUv-~z8As*Pd-*QgP0VOemopU^D6JnMlcw6<> zc4a6Az}~w|1taHJ9$<H{7POQlT)5&W}y8`LObH^Bd+| zm_}TfUFN$GU_hx|i=Z(VQWv~izftY5c8?8D8V+S^eeEZ4f&N2TMV+*T~9GRZuF%Ne8A&?)_BAnp%>7kqx<*08O zkR`bP7$gS+ikCXW%w~c3Uub&%Q?g#ajA8rYNlUKy@7qo6zM<|5NiVaoIvKJE78OLv z+Q$q<^dsje%Jetiz_pEo;l|jbIg0(W8CPP zhGWMhSD#|4lACh5O|L+MQ;DCVhOlFL3%8`ak+;`r~B>)FC7KHT%ufyuFf@{ zqg#7WZK~b^L$4c}DFX6`S;H-_1lb)ZM}_^>G0JX~Gyg zG6t6bh<2|vPl2J4X@;YoIg$<9nm3SXRT}Bm13j+N>QX1U!nXUlS z^EB~@Z^!+?W4(gC)fz$X&j120JnT-<2bBjYKLjZZ*4UHc9NiFq$X6o zJ3I|h2zIYJa`bB4+6sB~!~5cifqi20epTT_wjP=wl>pbIXx!dQiN5Q4Pk-xJ&}(s+ zFa4OVJ0XfrnL(-f!=>K6WO_yrBNmzm>ikHY;>*_H8%pU#dnGS$P^m#f(OHE`& zmG$C{hf!3=Y$F9Pjb9m}O0E>*MpEWc9*1jBLM)M;6EO-&4mv9@qbD(GbB0j$QJZhw zv3xfT{ie8YE2vWJroBXV(RGW@Up$1&u>2q)HnG6VK;cAbPVzw}BJ){(xS+$~CmF+I zxNRaPAx=3pt>IhxXOa&(7J@ZZa*WISatt!bo0YUsCe5~0A382#BN+(!!=B@0uZcrL$+cn}afswy`v;znvpP!vd(HR}2eD6_ajB!7{jPnG`8i!Mz zZ`OpWb(@DJ2f>1uA@JIhw#bI`)vvXxjf01i&2|PK)p`8K&-~s*#gnWPg?*jKA>YH4 z8Nx&fxZS(cOWk9NS1h088>3XJKa<;toom%*%ybkG(!6z|tSD&G!}}frEh^v1`CWNJ z-E)!H{Da{=S|>nUAV7_$e@qq1>AM@3us-}w)}6B7`5N>E0Bi?&g{p}WTtXNY;^O%q z`I1n76(=_W{mS!(;8~d*7v<9^^>9$FPP2wjDZAilg4`K8Xoac>QIuZGFXT$*IZ)~M zd5yE|Ouk!&1_OtlQcS00Z(-w1(#0BYJhLr{J@(p)Fs#B>M{Mxxs#XyoRh@ub_D5fO zQ$tvDsVU9Q0)o(2z_k@+Y-ya)YN(l$!WA&}_#U-)UMo3HJ>Jb`@KHN{x{BF2qttsBam}cn!?PX#+CZm<83b&i9p54MkXA4@4UtYcrwEwBq%s;i^ zaG0tMi#{VA8k84na~0?G?9W_75Bev%*2$a#u$$WmhKyQZb*R2!P-ud%tC_D&!}yy= z>-7M39!@fQgp`Q{P<9LOrCqe7gQQTsAReQ3H=dt?+Ub^4!%1AYea%nFC&|~dri4gUmVHNRt z=Vs3!5evK7!X95#-26VJAg^5g`D`;80Je9zoHUc=nUpoS*6~$m)T{ZLR6nId`k}Z& zciC_%>8*py^H)#;n%5_oeNo!StOKYhFOZK7SRjKbei)W6!cJi`Lc9tAniLdv-BwxCNovhHMVUu`}rzT$T!$Wcnkot_vqFOFgEl(y6MY7QJTgUBA)nls#_w#x&Ap z$_f>o6`^LfC6hf?j=EKI9Y{D_Sn=Uu2@+FyN^lEa3`_A7S>7Kly{})Fd;LMAm=;~@ z=eEtNQ{V!#czi|1!=u6{FHa@?(SSbQ_!nVVBl869+8N6g_@*oI<^FHk$ShhbYA z-K`(MeQq#pS5J+t^Kwx>pKoie=cnLTG7w1|-mLO+^Esf_V-jbJi zT-<`t_TN}1wixuY-&x(BQ`cvTr%E&HV~x+CpN3(94Gt)F|E_wnE>F=<+Lw@nU*b~u z-p)2ZQ;NO+4W+TP*qxB=6K8NQ)f)7?zP4g9q(%rcY)!b#BX`4r^EC&t4&@Qh(CJ~H z)CSrMGznM9mdIyV67O}3LuNu~Q)^yo6m_!5JM7u5pBhGPVCX`tTo&)J;u* z^m^ojopirU56CrMI@m*4oHw2w>NH=-W=Ox3%b(_0^aF_GvlMZU&!;Vn-3xou28khA zWwz%e`Op;H&rTd+`*R}zO3&lUmix?e-|281t)}2^)z#i4g(!`x!CwHu%I$UKPbvHl zU3%`I$Lm2KIK5BCpl^0Q2LNZ@Dg{9yhxB}NN3JuGoOfDTAXCtWT<$Y&e;okiX_L>* z7DOy}t%|HI{%Df#8; z`R|nzdpy%MobQ5J6;Voj7%=JF&MG#0S-`2IV{#jrFV@khTl5?dPRi1oJrcm>n03%O zoVX)WC0uo0O{W>XpyOweFXEo-e&n{0yFPjR39Se7yt#I$`P4__g0y2>;?-2anW;~w z68Fw@B9AlI{77ZQS&8GH1nBVrPL5vs2LPRP@H6|z-6Gpo>SNKr?Y;ehIoOSK*tJ^^T9EMwokW2!Zw$QMwPmLAV+|46AUWP)0zYCQCU+q6->|70BEMKPUYlO&%_Q_XK5^Zp{f%XZkc={u2i9pR54I zPlf=`s~l%7Bi+~^cHx9nqaT&L=}mnZOy9?OWw_uT_|6NU8Wb~qM+ zCkUE8Wv}7UANb0rO!|51@N_xyuI5F?NrbsZd;3lw_Tkh}3F+OJ=a8?Y)zW0Ht&Rx$ zB;GVO=yX>X%{zTdt&||Tdb2=-^GgtQ)ATl>OeVoXwot$dJ5rLpfQ(U|CTs}l?{+Jc zVHO#fm2G5)_k^=5oV!H_6^*;%xtUg1cUdvN&2x>aJzwrXL_GA=m4HH$T)zh9_#(aA zK(Ba5(a{1E{dC8j&rS)34Kn6?45e&NOM@RZ!mGCsjkd4g(FxWt;JUgIEo=FZq z>*aP)q^seW+~UW^m!(9lJSY0tuQ31#K@_Q%jQ(lP{6B?#V-oBl+-=Y9VS>nKg# zuE5iLGkDM;i};DLoy4qh-`)nv2jdAbvEw3Rswjt%>p$~H9Nng63l$y}IB{B~7Sq@6 zusMeJFVUeVb2UKOQ(Uw|qOVR@;lu|yBs*@UG zD+!z_0HT${xTJG|f**7;dV#)T02;)lXsCv@NyOk=8~CYL!u&Im|`3ANQOls%EUV+fD~%dC(}+B|aZ>U$4H6=^kX?B|ypa*zj9 z40jYovoWcOjpSF7OJWnGK09|UK(>G!PnWDTkl`@!jodGySLg-I*^?4O5 zd(yjX_q=v%%eT^}p$#mILOEs4u{t6GYbM7Z6YMj9lQvk#Y0_3(ZQ^JGx3n*0bt2j> z>*>Gv5SlIx#>&azE)b z8aATE2HjVG2h6JR+C^BAH#bw#I!=@rDukw3)-rS?7@9btnq`K3F1j8wQ4oBW*~WI( zJPy`=JBz!o78up~=nLu(FT%9>IPZqgUEWU4^hR3so$#_kE{lv7;1rN*g;Myuyi7FE-|jfMERnQx5+l;0Y=#ZZM03BW06 zJ56n@Wa~=~+;wd~{*1E|O$MX0w8dYcjD1f<_PyF!1GMJ`@!*C1({%i?c31bdDx1)9 z<>?qMq`oL)m%YgJWOBkyza^0A)==a(HReKIpagi&AJ>9>bI=8Hdnr-wZ4%7D`#S#k z*cCs#oR1uH3^je!aCi-`_QTej0h3Hp5c1(D^D195dwnvUC^RfyEY zI$aw5&POQo^Wk8Nb=9T8y&5rHnz9BfR0hD+4%U$$gjr*AzEvv?%D8aL*5c6zCU)E3 zS7HaTl{5J`i|Oxov8sKgY?=66-dcdY#pUKJuhe96N}kb^fb%z6G=k&a2i>gbnlv7P zBA&HN^B-B}`Bc6xVblrqXFBIrg^|Pu>)an&Vx~Eo>&87j-7y}(E0Dfx&}79;BS04; zlI_F6pBC2fdeE6dpfOobGGzXk+xYwEy74(io=Ajzn@X}{_^?eDd+}j!;&IJ`iYMPq zMevY9-%0#3B-F&Nu?QZ-_}m?zQMF;H|5_DVfXTafMH$1SJ%wlmq#m^DBzDU#dhFIS z;}!UkuCCKYNdo(P=aD z;Bc@0WnsA+(>M(+=^eak`>2)RV(hJ+H9J*lL?eS~rK0LMp}NKQT;>6?^4}W^4ucUV7Ew z=ByNRFI$@n8>>024&L)Dj9aM3D_97D*z2dC9Bd~BhZuBf;IvZ^+-XH?ZZ#4PMWPi2 zuJv;>wLrJ_pt17o=Y9#HY?HltZ1{=FG(p=rh-C&_=HseZE=@gc%t^e6&gTJ|E4H7w zbjX?||F|pY+d??=NHDn-4`=eVTdv0*yX8CRfkarixa*seBJrp3$K@kQGJ2$TOh#pf zZc}B_&q``l*`|;J7065v53tSaiK`C*9GD6ai86LzT3< z+1Ty!Td*!W?hycuicjoys7uB&`;FWK3!AlSqv@pWkLU#34K$zR7ghpr`k_>%{xB^D zZ;Vrh#?$22$iJs%^U_$A-T~bgPehTUN?Mo(t1{~Sgh)RGc)OvcC%r%y-JGmo`s1U- zNCf@kaxP~LT=C;OPe?yyp^7hshpRQh0>m7!?^1&(nTLFTQLbdiT)6Z{;;~|(jUQr{ z*(P3OLFlWZR-;6UQ2T_fMauQ;!<#D|1SRa{ywo;+R7{-BQY1=^lK~ zwcS*7@G%+1sAF-n=HLmHI@&GY;Hp4D&P^77Itq7!o%E;9u|{N)JOpfm(NZ3w#?q#! zJ(H{kD=uHJjJVi5Ug6Uxq+Oz8X}@a#g5t_I16ivsY0A32{}Kpsk9?3MByqeR46glV zzQ*vh#bA7WBc2bxHZmgQTdu|^ZQpaf-^RY;nTVX{(N8Ym&UDYNYdKr(<5RGwpx~%~ z6D8zc#gp_sggM{PA@pVyptYSceB|Y#R29yPG46LkGMNpd-72*6Gz?6LS*kHF+-k?@F_4tEIXVM_K;pIGc8{q;vaERy#|Ryy)`a?gEl*<&>4&j0%Xq5{ zZ>=r8VeV@+&1omxwBw_@+pE-DeLdoq6}!snA!_F9j=F_i5DAg8i`u&cC>_hLarwm8 zD5o#RbeeFU?|GrS;=jMiQ%_rVM|ZMy`HbVfHpCn{u1Z#!<7LX6dLprw5-xarmCby6 zmiO@1!20lMvp*JfPP)U`8UFg@^httlAy)_3FivV0lBdd}xjHn4MGy4SjLJ$bU4 zg&y7YA2gTI-Wc)%%D2=`TJOq8U9d0SjUyeg=+70&-n1*^O^>Ty}x3?G{`g~uX@c*^r{^@W%PvJNX7wN8E zUxd<}4|2~QA=Or?E|JIIv}CDA>hv-p_>m6~^1eW}yMVMh(CV;mQWZKpjCRM+ukTK@ z19V~SJ4n5_>r=R-ixQdh=_2o-kEH<7Rt@~_1JFT;b=*@231jz)uEhmp;HPXHpx|Pd zQRHxERC;XTEX>Uv5bY06;w_B)CMv0CrBmP!5E%`17F+Ioxi$e1EpjhVobGQ&?)c9) zZonIH84%m80Oxy$DL__lO58b$=XYEy(Z8Y8#sOZx(I0A z*k{l5Pb6l%e&PY51&j-2enaxiV64!$swu}M8R88p93QQ9_1E-O;bB{K>+;xU;284V z+bORt817mfszZKgEWee4_(;d$rmlSm05{J9H5wdd#grGz$U=IT_%J4dgV)zyJe4_l z;_IJ5XrDx|!2_Uwv{J5qhxQ&E*_E9<$E*$%oA4rqt4xhE8(qCF?v|O6Hy^DFrU>uk z#o^X%t!o7YM{#VReCD&;_N8aG_f=4ZB~HB$hpz#PEUCvZ1{vYVx_GiPhCm77cYk+d z38jH)A&sX>mj0C;b!8qFk0obp)d>ZfC#yq^<38|hK+#XvQ2Q1t{iH;^O6L%0xK^lN zd5XS1sjOQdns3kwmur~Z<7>>|2d|qqH!vLz;$Ti?Ckm+983mkNgG0E-fG-10P5O5Im06E6Td(gd1>R2JtPyQ$x`Q}2cy z-`?6}SRF*`lN)MwUmRGcja0iU1wQdV-=^kZvZE4bA2E+J+G{A5!N}?TVP|VvsF#Cj zZVma~q~XBC!6IFq0)?5sDFU?*z#MTsiRP*$wT8OKqTqKAC9q!EyVI%_)3Oc`vIfel zBQVGKmP0G`+5E>tUv3|EBq|N6dJ3syAz4;Xou%Tt-8x;0uJnnEotcF0We{H!JmE-8 z6%U|5o;e#0V}L9Of_X%GY{Grjq0ofEqzq++nJ-GTVK;B&2gJW1OsTD3mPDC@c7yxM*D`{iM+tgxJ9}%`d$fG zJY#-3or}53UTAH8dd1?2P3>Izy`67N_|6#W)sEaRxK5`Dk6<-_3Cp)&tINg|JaV~& z^6%eto*0$7Wph5yJzpI)A`D}?1*F4PXg*ZfoP@>#*W6D#yEeIZH%S^Cl^45gmrHQ+ z0CZ2Em8h{7`I%NQ4Rec|ju#)gt;u$IDV&ge+9S*XalYQ5r60F!p$YEt8na87{W}eE zH_+gZ^bz46eMgm+I#(pSpxe&GQk{Xg7LVMa-P!%GolGKU3UyLPcm9dAt|uE7HBVER zo&dCQ{GC8_NcN5$2+903CR1)vcKAx+ln<5=bW4#9@J4PR+;wMnjEB&$+y=mU%bbAS zH2U3t0jN2)DHki<4$T}+V67h+IE2ZKPr)0%K!WCb%ikY!IC#wP0d{tgUZgs=us^e6 zan=Bz0$`Q;I?vb7;LDUlcRL>Z8>CtMKTHpRC4$S>f3>r~wg1R)+Das7`6b{nviC`Y z)P)I5Qd?h#AS?dg4i6-CbJb*+*X0tO_AvvC-CG7oQx_nM>iYAm3F{rZrBF+8z_Zd= zSRj-2FYZ_`*F-nSx2McNf!JagB69|)V~>NxBh;EED~w%{;>U&6$UIH&3FKrIQ;zg7 z{Gtmc*vVi@17!$CoEe)i()1%}YV=xvDWrCDuQ2(=m+#Z)3IJgelk;3!N9k0w;Z-BW z4C6!3_zpDyhuIAjgA?x2EI_B|>JL7$TN}k_uR$0O@$J5>wJamJz$tz~^VO0nZn+!j zpCpZLjNnRqGl-?;Qhes}%b@{Ev>zT+ezd+loOpISv_vT$QiqX+NL zeTxktjFU>BwBKB;8%DC(NXJc=V<_*7d$5^2g%Ap(%jJ4j2GRcvx{h7_fTR zdk$&V5bKUuoQI!XEzWVI9QD#B!*4Up=Kv!p^z>h$m{iLca!O2Y1-)Q(xAc6oe;+8a z(wJQ0b}dh$Ny8ER63ZH1$T)dO(CoD?6!etp#E*R^F*nqUhvdVo*UGM#(`ffmAG#DL znLlAoY_8MfJU6Yk^pnD8*CPiv)Hw6ICX$5wQ_S6A6HLQAovdH}0Qv(cz8T8Gr~0y* zl;jLOm`i&#D3%Hh{6MF+#lSI1Xg_~2j|_}%fl0+n+uggiJ}4k^VR!7kQ?H1$x$2lv z3+y^{LgaO3;hT~t1K7pop3)Ntp?<8?d&;t`XpppV!%ZH@6umkiOv+lKMlguphU8(M z?!hoN0f7H}E2H=cK%@U>tPz^2J!sIdk)=6{TSqNF<(WkfA>w#q<3mT~VYk~G?LLlp zB$*38*gG1ez7r#2fdP$_{{Zpp^L6WrVnN1Mn)m>Zlnjm?t?Ccn$&h5C*9_ZlT9&?Znz5wE$DPe47;TXpw>H71GS zyi#97prp%@#hDd9W0 zNHO19;U!-%xFHEJZ!ez&&For30DxG|djZq*UUU5=+6U81ZibrWIbK=9KPHe1=Im{c z0)^!(IF>z8e>OWY`>3j%UO{8GVAr(ORANaGv<-4~6tw9QzB+)Qz2zNSSq<%U+x@ru z+(q@QwO6hdeqLr?cX1a8F>rXhzQCwf@euEqR;mN6oF;bMcXF`!UK#8}^~Uss6G&1n z*Y{rdiaE__u#bdDk>qEqh!kGsdtL`0p?4saJ(e-Rqw;TL4<@Ts3w-hP^+ci(MS6-?pv$J z)`PDzbo6?nx$1iok3Wt9&iogR9BlYUI;OXz;j}z@zS-%7%0{P1<3+M8{sa1t^<6uW zN4|2r? zuEz9W6>U$|nMStRXQ*)C`=xB&|CP$?!3C(DhsSXf-6nCC3djJP?qBFf=(d>pvpssi zQ24@9U(kv+jY*eOMGS!!h8{jtH+;-ZeZ#UV(nQcOpteJ}uz3$k_gTt9^;-U8Uh2`i zYCcwPf+$xKfu0auAb-{bdByN#IlW}zyih~W^BmMSF%>ZQUG;bsK?{}T3!Nkqrov## zrNiA?03+pYS4%CQjtI4+Tq=H_-H^bzB8|vjd3N1rS6WfUWE2-#s6pzjH($9XSMp}Wy=tZU z1nH9VV}H*oGJWyLxWZOE{|A=V*%0HowWo^R&4!vi#G8GaGZ;1 zb5(%NanMw*2GkQ7?#K-!5W?l#p+8r=MOxvycOo)*YNVGz{xg*RZDB8?(2P^^5H zn0b#OTG(>H4eGL*`*`Y(>S}D&)=nxFk=(Efz>%6`GUuDGZt(vCuXyIaUob6mtrKDO z@QhGw%AI11nJgVW!jPpcLM)i^gvGdR`yh=K&H>j^E7WaHbA9*gCo|v6dOZs zL|)F!>Vedw-YG>5aWnSc3C;Hmz@yhOD*hQk<*)xoyLLv8J5=nT{_D^D84HHC4tUC$ z-ba5^GfIyipaPA^K5_huyz5`AQ^3qw_Zz3StHefZ|NPufZ{VI(a&py$^zV(kaQ2j* zLgD{Cw}0_i|Gq*TBUC`o@ZE?==D#`aOu(us#rbcx8Z9|f<0$a|F!{GAbmvH#fTyf; z^gwS}j5XM00HEjda2DyCK&4|G6$8KW^XJbuU@U*D$11Re{yELu%b*g7}%_ZVkJpb4!GfS7ObBnb~ zNM4nwI0MWVOs`}=?+tMiata21V=h_vm>X51I2m!y?w{LP&gfdMQNxEJmgrePJQw_|5v2>&}%4A4hQE&Dhkh7Ujm1qwF+oSSfk zC4OwXA%2^I@1wWt092LtBjqGdjq4kcD?$gxolm<}iH6?(u@UDoU~B1Dw~w$Xm^SEcvW`1eMl$ICLqA*iGFe5)q4+FRtt>3+K5^9{Fp1s?Te0KREXdx6=xcV0cBa zJpP(58Cm|$d~^6+@~QF(djP7Lu0j8P80MS`^zlg&&;`tNNh&r+T?VKe?7|k5{n>f- z*Z-5#A_FwoIfP{NS1!khxL&jw+i|v%{^vAoY~nf(s1jHReGbVl0M4EG8qwW~{&}3z zY6rNUrY1yPvwGBWW}I#ggzsf+*Upx)`1Ett-8*-Fmf$89Edcsuz~AAI|KmJ+`F{#9 zwl2^wlYTf3Qs3LhuZ?$=^%0wk``5ndzezi>WeW%@&*uqU{9BH+?kwhTl)2IC^Xa5hWj34BGS1P0VEL2PHbNJ0L!PeoxF&F!{&iHG-O{?S_$Ca_`Qa2f|^X@%exL@!FL^ zLoU(kloaOuSV2ZaB52axNhL)_XsKREcG#v2lwZ9MUTzamJeKM$9P`|(*%VlbpBPn1 zxN*9O993vZV~YKaKdNS!!$R0XH~CdwG@3?Cfx`kRC4br+A3mKBEmZbVi~j4M|Mv&# z;@247wOu?NI1Mc)HGS6FCGNM!>^fIR#TN!mXVhG}R z$VP+Y{BICTQXA7>EYJV_gunGj<}H1%>V_w>hco~^1$0HHobw-*DYrZ-&`x{NTUK;M zSG#_^)(_4~F{+J~Oz)4@^L;~r!71z1^Vp0G=zl?PeZWN%GcrDUf){e`92}Q z`SbVYH6dr0tj%>azry3kP2fHUV%K>j;m4{!-Lu)d@YZ1XX}w9#WX0p&8_p&nyMnsU zIA6-?y)2=YYJkz-5%;Se{6?;@UB7pKlR;+h=U{)m6(H8MIB?~=_wK4qs<7I@uG3r* z0*72M$&k2dAdC`%HWtjJH%hbk9;PQTMqejagIE|1<(63V-R`9MfT*%&2%-0n?uIxvo@V?&2`$D@e@tBQ z%L{$1qGD}5dav@ptRuB*MiU7A?4{hEmIDL0&bU)4si}=1^CN%jWvqb5(a#=3Z`M(1 zEVJL5fzzZ_V{@Vip;zSu#3K{_{{4HsdT)1N13DCzv|h{b+w&T-+s8P!gZxfRh6-x5 zH*eZNnh5MqW2W|$=ZCu)+UO1Kqlh=F8zi6yAXY_V5us>8tAncAtQf6rW*2>%ib(xWTi^jq%1S^H|tFd=ASscaMDWrqVKc+c%LQUlz zxi-UEsVJ0LHdF<-L7y_HxPTW(WaZi(pqxhNj*8B(|kMXIG8`II|WfSK%w zTUxo-=9C59FWasxPrt z0?*C8-?t+B&=KM4DY2H%ANJ*V&mpOe&ZbWIV0gvlDY7T3t<#k=&C};{y}5>Ea2~w( zb0xW3PkGE-Ge14DyIlEjdsvqzb7U`Uun51@K;K#Pk&noOMl0^e3Ob|x$87efG~H>D z&!gH$1Zhf&vOm0BkO^cp>sLkcZ_}{yK~oIJYO8=P;)NLsw=jOwCd=;59&R3&nMb6?#r$_(8` z1N%FYq`BZ}`eeTMW9-31OFtBsl-H&zb|lazQwCzv&Dq2rk@ednE78|mBiN@44^9Md zX$MIS6DKb6J8KX#i6YPWuKbhh2}dXXLU|dLP7K%5{YyQoME^1#`u8V~0ej1=0|QkKy0)xs(!z6_k4|(^^i)O2xuykIwgW=Pqe@*=Pr_ZvD}IOl zsXMuI`EcgzjfnkV6_hFKm0&9cnOGb6ffefbN;q2Rq-<>_t!nDJFX70g_of-K8z|{V zP0LbSBBQd)w7=e(gtG(gNF3Ni&i~v*a2T1_`WI*>1Syl({FNCVTz0Y+D|}LC?%2YjaZD%prs*CX}_zCIT%?RtZ~W4ii@fa33Fi#KFlM^gGr zRZaUkG}R<-a`JIi&yO$x9m9-yqxSDf5g%(M3KiI^< z>9dhmR78I&i)}YdHXb)LvgPoZTbciyzM zS9IlC%J}<&FD3S42?F40#AbhUg!v|CM%&(m1G6gF=8xWF(JuxSwngG~J^ND!RAya6 zXw4#%HJXE4sm0EXfvDgKF5Q&99s#|3bH(= zyG}q4exhQ~fG$dV+g)1^j)L`INIFqN&L+?2_Sbh0ktfXdwp7tpJ~E$(9oWTHoHv|5 z7Eqi@Wp{(vRiM4E3@n4bg!V7+SVfh>%a<QnvJ*%q~2?bvUhkT$3nNbD?8Xz9G3M&Q~v$TL)+How1` zq>expnU>AxAt1T^{Xui{t{t&ggyubm@8+h6zxTX1giKvj5{2)k<*4S@&*c`8L!D(r z8}MWwfI^lpis?Rl6>vn$UVkG9{`bCjf14HG&Y8ih1U-0GC7-DsB0cbkof$XdCtLZO=;0WG;m^y2I31_1Kc!B^G8mDu_3-~UWSz7xh|=$N#$RkB&n z&!O^bRI2UK*vE|m&4`FMQAR}G6FB5v815&5R#Xio5#GnxR*J=3-j`T0H z9lq{6taO-?0y-3FsHqWYOJ{rR*2Yhv`8fbDaAgxO!#TffoTd@4@wqG-LehM}Z|Jwu z=G=mfxShuD235BQ_6af9hzv2`oeS8a2u*q@eorMiTBuweG17l9vj==LwP*@S^PBUI z%rduRjH(}5@kXanAlzksIk$;(lMs1^(%x6)5USF4_V~VS$gnvinTH#P@+66p4S7}Nk!!GC-+Xy`5$ZO%-_J2;ZvFADPcQY zaFr7;;T>}p@Xe-ZpEupgL_tn7t55r(qujSt4x^tmFF`~@a>AG`>7CaML@+9eKdOoR zy<$enJCf9XH9pVoq>R>=&9o#QFV$60l3$C@@+{WH7j0&yfyc9AMG?TvxktAi5j2r- z_tedM6)rOI#F{9~=xoXoGuTU&Fj9%nCLT~xl*#_r0+0PlBldeJVhz9kYc&_yGMq@o z)_{i+h@PSVdAob7q-|Q>nNY1I^1Xq1wjlwWVhN~Qdj4Hyp=hTudB|x|DJ8`z`?+(c zr%%^C?f$Df^ZJxmozCD1`G!+9_0tzQS)Y5JeEwd&l+?2=g)Dn&IVxp5my9;~S-+oz z6$$ClTnG-m`t3>1E$T)qhxBC`Bs#TEeAqc%0(b1~zGq?wuiqQT?e3tKD|9kT%-dD_ zSOoyJi;fJ=W0Q1Y8<#Belaa#G?Vgk6gw1*L74jCd-_YB0-BFZ1QyzvLo?k891{P|{y%wYkdN8ASiBev3*Zv&lZh8HweP|OinUOkcH&3)kX@kY20Mm^68RbzD29@N z6lz-kY9i4iV&TAugTAmRBI8rQ6G{MHvp~pzmDoTM@SSwD91l=60)oVPL=Fur5|R(@ z3i;;5k7xi!EJ}(gC=k2q7gkl|-WB&*yQ>gAqDvPT;j`S`Y|8tW3|L7zcyG(yW*3ZXh3k}o| zbf-*j`& z8PaH%*}a;U@2f^ig?s2irz*G^{~0+7wb+bvl{@53Wo*6ACj=Ws8suX+$2gzh*8l zy?M8h0YGQg_aQ36&mf){Y4Gq+?xYs5wSyDMBWv@ z-n=9XNL}zZhDI3Ur5%p=rP3u*ZrnO4TSg_7bf+bKU$cFi!a@u zJjEh#^HX@4&wV`b8~%rb8I-$m##wRHDZ(~JV}%Bz!&&g;@}%()DE=UC*Vu>u>QDz;xZulIjaSUWIK?eC5uz1U`aS_jwgUuhz=m3F&@`cH1c z_eZDj?RN0Sq(YzH9#NwGnUK-yjk>|i-e#)S6?o&9hSA9z>ayEDb)qZ1`?H_e%>xb$ z+eHl>=$BK6A;I8d%%S0YnAmjJw9tfo*jzLfGF%MhEm?Fhp}IN118;}m{1c@c)gl~A zkS2(>d(4HpH#ASa&*@!}bvxBFJ+PR=+?1b3U6Ex&Hg3EEdW8x&;=m4jn1ki%)0xcb zQrxbWE<#dsPxJEVY)G~J62 zlsf0L$H=LOiwWn`w}8*-Atj;e59&7Fm*D+AB}mD7|NLb=46{&47Un9d!NxzDTQRwd z=b=B`k>Tr;!uVfA*8qOtKK5|*^nDy%hSDO!v9W}9AL2c;y}3+Xl~+k+$>--NEuYJ$>)VeuBF*Qc+S9p<(&r8*%hbMH3zsf_>u%at+y6YnuE?I|7g|7{!puiIdN1f4bQ`Q~zisb)+(e}GDU zqp>p@^kNmpw!V|Bm2=a~1BQ#c(!PJiRy-KIjSW4`}h?8qY_j&hr{kL<; zTf(Ap^Vc|Oqlt5-<1o+GK50rG8l4d$)~K{K6X~5K z@_zo~GQ&S!$=$w+EhT;?I}SBK`L;sb8(VBOiT}}ZH^ROF9aOSsVAp1N%KAqYz7ert zV(<|=%)rK9prxyQpWh)OrSh8E#3?7nJYLM9Q*vXB;kE<6kq?XOLQ{=HFp^tK zo1Q~u;+9j{aL)f??@I%r?!Wd+X`!M8Wr=dP*r{wK+!ZQYC1WQ$S;rXr7>a~YNtCTr zl(8=}_9+?4z7GatiLsBd4Q9stKXX6N@Bi|7=6?UY_cfpOoaJX2`? z(Grqp6;%GIiez2X(4L`}hP-%*oa*%&AcRT;`hcg_1RB zeCSIB(r>7f%9#ymFeNFqQgi&_iS@sqa!-}L9!2gyEFHnyb2l)bAZ>N*79p@Hg9R?Q zJHT1XChHL`LehF^traufbvR>%F$yiKX~?Rys=4v~lXz4i`mXO>SxA{#z{it{kA3-z zsyCilOr}g4%B&?l@%O5^^x@uV#=3Ix_p>c0nq6r#WNpNoq-3*Vx8J!)%y5A5LM0(S z63r*~;QJ+n&Ccx~zii3a)1_@n7InGZtGyf%$1fPXfN;a56&7pm<^*h0X!Zq_)2-Vm z=?HV-;XG;00BSnsR!6?F)C*DuGx#E!7?^oO)oQJJYi9hF~JcU!-zHNc~PO@$l zurD0(k4g2axFm)D>kWSXP3c6vZh@qljaiuY{Nl#*{ilOIcRHDKM9hx!ulip!gOkgQ z;yciL45*JU_4mq(@L{(wL*JN;g~=a-lFt$}w)EV~%%-H4za$+`xW4_;G$$DIy1m=4 z*H!C#jL!Q+aUKo%?&g=P?S!m98{i-rUT3)HVJYZyi%~mN+)RZ-)a$BmjD3%f0izH(xAu%-8*sf3Ber&8P>jECzt6Atl04((V4jGlth6{4In zA0!q}W5N3&_%;sTEg$oPMezBp6`Vx->`r3BsKo>3oSRoepn&yh>L```dOKu1!k#CV zd&eiJ?xY6eic&%Ccz?z6gcRz~v3NRt#O1mTYT?~CU_c+oyBar5OeXT|!)190NEF0y z`;f10^_J?+osXwU3MxMSOVczfi#Ory5aq9V33y z-%z>UBXNw0v7hmBmFYHGayF`gD}P+De;_+az15-WTzVU}4t&Y;eDuh5w?t+lL`)BM2|y(KMa$d8Epw<_Lq}9SZZg`G ziH-5qOQI4p?=+66KPs%)ZjLRP01Nx=$5OB;3wm8+y7;NDmL=LXEn?zDk#vdp>mLmd zg)HG-)0lNwqFCOW+2W~b zp(cbs)9V@t{ogmc0yTOzCZJh)0B{GkH;}=_vN~y~o*2oivAvo2di&vR)VLLa`g#DMJ)>b=vKk0`bmuNG)Ji^w zY?}!*3?eH(-^07&rY7P{_MK>+waXdVNI;GvkRN4hr6Q+Rba1p07=ht8UlnBOO-b51 zKd2hF&I*@6NL2V)oewze7#{r;`7_)mr;**Wd|rIbv%yEMJ|f?Eq`=AHRkyw&`7}ys z8f~RcPE&t%BWm`xg_w=cV4rb;UrOEV8|$XVS5YD_36hm}#%ta3YN5X^Gkxht&9IUC zur?vf<4uFYhXbZ7V=yH?nWIfp=rZ;5@2_mx$X^+`J!$UwrTlKjskIfO;s-fX-0)d{ zYEC7;?3*4OD7fR5SeRjpy9c%3qB5Ogdm?bZ5-%9WgCJT`o$g}#;xt3Q%zg0ev|FDf zzfGRi6)$xYXpcmJPQ0MVO(IR~8RC-KkaW#G>C&~=gO&F`X&aR>#HA$n9SHWUH5*2d zWL|NE&R)aPmS_uvw=+eXC;qNDrO*!4F13b^ZGz`e{&5rkqWsB*O0{sw-O@;Misl;yW`Jxi+h9W&hRb!8Dob)(2+KOS_{U z-+za`6-s9Clp<&SUvBG59cfo+5-y+7GGY(r9C%Zx`IHA_$9nZ>zjC>F4T2Lg*e%iu zH`CV)^M<5$4O0*Bwm(UnOJoy@%RdEqSZ8?`Ro@GmrbkL29bylR@?P{}C89GjT?Ny3 zjbAAae9qSG+0AkIE*K|&+7i~%J5X(__j`SEer>-XR6FxHx1#`?zb&Sz8P@5=md*KR zR97^?Sof*Ss=s4%kb4gt;N3pVp05WWD16^VG0lNp7e+3yoi*OQ`v#T`llkA{?c4=6 zNoedd&4JC6#483~s#RtWpRs;J@B>Jc?@9B`WItQH_W)xbn`aE9efON;6>|?uTuNk< z!qyB6+x3yi_IB~Qgn>p~_@5ak#1b`||K^X~vDHWxeeA9BU3 zjv;HHKL7ge1w?rNsun?syAiOgtlLlWpkBiFU-xZv3NIsRhh=NV=upp0 z`|O-+9c-3jKB{6-4Bl&aVq3e(9J#@1a~XI01CB2u3bCd-v~Qv2M| zt_g83<66?=0Mb^p^O!3&#U)0mLJNy?P7$oHP&OO9soU~ZXnJzsgftLXeRhSm3>s6EVI>FyzYg7b*Q-V+oO~=jJ;j>u<~cG z;egt5RvzV-a!dR1^9C<+G6EHLQek?CHf5WZG@@PQMyw<8<&Equ@wRnwLv^7{B*mCT z`M))-v64<}MJL4;YELyPUlg(lau&3qX$eH1 zwlay`GHY}>AL>I&x7EA*H2Z7KHNVm$fKbkPit&MZX3CmjhY+1kTCv3`h5S>uj@TB zv<=sEBAxs3W&bVbH1^C_N)<4tuMCGCt9YoU#hDwO-UVxzw33=%dg*;KYt`KDVHyxeSNdD(TYuEaYAhBBv9*xFz#?C1TK>I)^UeTH^ z>B4_`)~=35LHpJ*xC+bXMJ3{#jebLZr8nd+l~+z@Un>c-=IZ>iY{ivttkzvCf*HKB z!x*?sSxg~Ij>bE{XDe$#6&n!}K))(5eeGjMq*?VB9}8L?b-YL-;|`x&L7mS6wrS(s z?qBo({$HG`_zq41as};UeQCOhx%v@}N0>bNnE0NFjQyu&hNbFUy6l!zCL4Z=r}wJR(At? zr=Y@~bG}y#_Kx$@PXiSx?%t_AMq31Bq8|v_#{1m0D$67-!O?5yY&+l7(^322z;BP5 zouOi*j<%A%{mf7jf-)a*P+_5`qg3{ROTl72_jGj&`o3phiB@kmvHqt~r3#})%6^k% z;~>+mES+QlFLI0xJe%*CjLx|}{+wY~>=)}GMH9i3o=8h43+C`~^~|xN^kZ^$+uGR~&SK3+F@@DBN*ZHID!Qs{OC)l>U z-sz5c<80##ha^L?^j6ibucks0^V)`!4rzz81u??EfvPpgn8cAWE7ZNKaJaBEP2yqwPq`w7eDtG!UU+CK<~ zewX%O2Dg3;c9v3gQidVb8Sb~zpgYE93hTjBkqEnZA|_V$wBe9aAT=EwbI#`@j_%hj zF%Y0fUM<;Wzx}N2t^=3AbOKDaF1yHw2v5#T@^DOpwCLL3{n&x>A?K=)ww!HFz2U}; zY+A@e3$4(<+n~wxe2I3DfiYqf=cHwFp6j`Z@Nsf&nWn#jysr67ZZj*M$W7VWWxJgr z&NKYULAG0|bH8YCP+e*Tr5T2z$KUugj9ZmE93!-|(~&pV$?%ISi-S^LHKyJ`OI$H~ zXGn>8Pnw1d>aA+zuU?aZnW#bV?8OXvR-m@=Nr$5e@tD`)KsgjcH`Ct!Bd3Lli-My< z)|!GG;)!ub`?nn}xV7xH9`_zix+D<;It#dvZNh?8uk) z$8F{49hD7UewE{6v7`R>dYz3YVXVA=Ce2D<|3=K7w_8+U-&WVb$B3l-AZ4F5<*xY` zv8CCr*Cxu!J+H>>aoRGTFA>s0c0*dW`Zf(TW|!T|8+@y)D4+TEbkvMtmTk@Bmf3wonw6X zwb%~r5LK()^+RhGmp)|GLvyTOdgP=*K38v)l*TN*vEJF7G=w@7!b+R_t`(k)TBz1_ zeQGJ;QJW^+{R&MVu}#%4nV;hb-s-e>++ANh85;i+hQfdOgx+0$AtUsRCylgcu++82 zb$Irxry34*si4{~IRy(xEmYDo%k-DSoNbz8E{MQ2(MhY9JFv$v!=YYvck(KRAG|{6 z7!dZDSjNeFWdQTMr8M#(cx?t9WQ%JIxd4v6O^}DTWiX7HH>h6iAv985@D};mJ9PQg z?n-%*mxW}e_n*)~mF~7VjtJbxmU-CjdV21Q$atyBuXn=a84yD?M%KdYL|_Q>li@^- zZeiP8tcgS3ov@;kt@O8*s6UAh*{f^17s>KqDIph8b_cv#{fSM789QdCTWP^By)l#hWd)GSULmC2kACMCU1 z_{lRM<7Q;_sIY(|qOg|Yw^lojc0Yu(kTp@x*uS&tFih*5GzzhCCb=@eOl$7z;XM-@tHnyS^6!C_od z5Ve?_h~T9feXvVJ^xbwQL~3Vgr2}sqCclncI){wq(b&1t&5y@h`#JF&)U3w`PtKC8 zf6r}O`tf*|L1KYjwOh=I??DUhAQ&${rsrLiF6QIK&AX5{4(k zd1)HD6^M;I7)vG<#$rbG|HhP=TjInKZiScSe&_b&2Drj&d>;x7pnxC~40u=N9fr-| z$qk}?)C{^&Qc=*5!jE%3(k*NZ&m(?vJF}cT^x62FXz<)t0Hsg4#N*v+$70pbx5YoM zTCL(1sFa1+LfqsY>x*>$?zXb)YdGi3H|u7`Vat@^<`@jKu4KZNM&?Hq)P#H_(s79$ zH78j>G%o-t-&b@$iuC19S_ z2qaq}bRp?zIRnDUoKJNe5zH#`t4*rCp|icpRK-*KXdI!>MGZ&1Z>%Xd?;?qOjZZYh zp7duBk!Lw~liXCjEPXeklhi37(8>a6GWH5)R>c=N%!+)z z@V)(w4yap>5h{TK)tE8UlAm6yiIN}zn&%$s+%W98XNA4K#_+p<`awDePH&k^yd{v^ z?z%?)Xz93i4&*XkC&iLIuyENw<+iTBP|dr>sTvImwH+BVOv@Q;1600Vu{eU zFV43T8?hcnsUxk7-&BInC_nVK>V+v(>FNMf1NecoB&)*8?Plnh^))9vZg(k*9oZNT zoPSMvV7mA`u{|1EN$TmMPV{iYj79>qkqnQtfNiZon66B*>Dtxl?+ZcZJC^?QR+I@T zFNXx{Btn*dyOX}u;&fAHqZ4XRcIsBr1(bQXyoW7fL4xGWcGg2qFz`>8eU@^!_pP%969h{% zpK|lcV%ClU3Lw=1ram|CW!L-F%dXRc+z=&-G~X0isGEeaqHOLd@F2Xr?Dl+|Ss-P@ zW3oYAs7cH4Sbvwr|Qvbs}&BxlbmGKrmEisG$tu5-0Fg@D>*$TE7+lc=REk#EgHl zYO?d0MyJZLu_S>s!OTOVuV~aj>Gy7c-*{q{a;YuQT!I3kz?O`b{i?A@Zk!Zvwl;or zSi>6lz<%|7NCmZk;56PK8cLU_R(6<)8mv9szsL{D!?gU;cY3wLmcc5;(CR?W$o zVQHsFt2>dG?!c_nboP_0VZO7&5=)5XVrlnxUTKZV5Hmg7AHJXSW`B zl@nxI%&K8@W$g3?#oUu((z74F-W#O%`z^&(!S1ldrE4m(3Q#ryl`vUhge06tXMr=T z+yO~&OSaJROgvmgg3P;F7vF&xpn9z+m{6844~jXJuoiFcBeesPaQf)&k+QW^GRU0GKaFaT0ZzkJYJ~o(aR>y`m5bq+Xxxy@ zYvY`I7UmLi`6(t8Q-^WR$>)3gJL@ug0TM_C&k0FysCe-yp_XiYui_L zw7w!3-)EOOf^PE^*xw2v*VsX*>N4B!KpO{+yvLzR!P_T&PO|H^nP8bPs+c$oUY0 zIT>O+p5|WlXEb_KOH5~H_u<4JS855J;EkxuZe$G z?hMJ6-19@JXr@3;sa>JU;B?-j%HZCigqexp-+ysh7`-gsRN4N6b-*??KS6!7f>x|- zOSQ)X7O{4dB3an;IL?IVt8>TYQX7fWSy)3WOg=v?1#(YMXK4DWhrbmiC1Xn;(dV=z zS!cmsDkD6XMKc%8brg=IpWqOp;C|h<5S6RxQgflg*=$*QR| z3B%r8NzDk=y_$RPw$xwon?4Rph*D>Z^CpV@plG>Kz$QLMPA5LTa{4j9>-ij6!4S*M#f}vo5Z%tqoWDtVVZ=g0v%)iy7k)0SQJKCf$9<;t z#4Y#XO~djzdu>Gn+rc%0yF$mX@lLxmcgL&o>H&o_W%b=gHKE#VUiu`@nB8C5 z%RPR00P;h3%^vQ&;z;6b-T6fg#+M!Z>``Q>N-L2Lc>g5P#A+a?`@_erbR`=@w$S8P zH}D^;`1_IN92`!4$CC7)mYGk1TW@FQ=~&PFgRWx*1%m_VQFq2|rsTC&FGtBawixnW zEPzyuj=!IZhKCKdt{x8{|FnhfIO3BIRRA3i=wRgq5<0^3!Glb>6k<)pWp=V2;VZ{L z17qQq%gaw&J5i#O14iu~b%>uD{;}ynp8aAXPR|l{ya$$5s1t5nHy4WrvWlAwEOcmE zgcEa;)+xl8v(0Q7<>s3p0#}>p2bk9D831=^Ja!(S# z^tq3>*s@svq_!f!iakwn5@pZGu|8ozF5T;I*xC?Ql2G$0FUW53_AH3AM-Q_;`36Sy zEsFhCVHaLjg3Inb;xBgg^q;@E$ZAYoXSc-wO#JW-utk6Cl_s#8bq3NPU{qgD-(~9r zu-5a#-ypTRquvIbvFJ^Wll(u`MUL9U)2}v$P!TU^fgySsYL$P}c|+Kq$ulO%_jZw-VQRl$s?DRFckj0mp1KEJca| zjXQdY0lG|D_5q&kv9YTlaiaVmEr1b79AEOhX*&yVt=1I694t(4BILClM02 zDyZ!i9S^0BDuKo$IAbZpjxb?tVy=W9_!^b8Z+=T73D4%DJ#spFTk5#_=FDZvP%8P& zAyp6Okg*IxQg0IMrr6-**H_uBPz6W(-tq7Y`|5;2pj-Rd6(cK! z)1LQ}`6h+G@lVBPY+>D2!E4pqQaqlOl&k09Y}Wj;Zj2HKkJ`%G38|x}t2*K@;~gQEf`ix_?-9?-uV*sRjqdq6K%bEv(79OvISr)aU%{vt?$fK1WqV10vwz3fH|KVvU5%6#fao04o2;-u z5_j6oKmS#0AAVFgL2cV1ZfIIa(XIc=Hgj@21e!|<@2EW~U^ArbcG(8?7TU9pOn)4# z%r3(C;KZ?>WP%!;$#!?-te4Pty4@z|<|WMPKAoR$CwM*=K@)Pd8K*;g@4UJZQLZ`- z`UOLj=RQ3a-P#~16uakmD){uy5OJ0M$YbJI8oWxo zc=L(=WM>?q;aR%5fr&=Xwl`v`FB@J15le$qKF&z(-({;f;)IC;)xPN0kLniTcuPSV zxiq-WbapHL8aaAo<7&m93{ZX>#b){Kx|qD}#Gjc_Qrt=9(C$Nlv}4?gvDFDSp3l~d z2?l!)xJf~p7U$h=%t>l)w#ZG|=fjm_L78*=pYHx&uCafzk9auefdDGHlcYR_| z<|zTIA!Q*Cu3dGPH@92{eMTj9G;Hf5Op%iw_G2(PCy#;}I-OPJ8%e?ntAB2G9%0kA zeHT5mJdjFkJcwkvJgb>|UHkU=x=G$uBgLE6540|DL>PbXHGw1aZv%Qx6cM87UV@9~ zY1s^3@usN>!J$DzmZMMBz5`vbJW3Se~Aq2z$_W{fzf;zG&ld zuef&wCSDZ}S}I%;j`Z|3t*Kes1eUvsl=28iJD30*&7M+!*_rC3k?tw zR%BL9MkiL4^qdwfBRXqo6Sd10@!Nzly8Zs;sZ(NUpwq;C5Qf?85I1zNsf6HVDZTbg z0a)w(bshvLC+!N1Tc4<x`RIYaKue!#$qVpSw-%+aA50iXrH38 z6NYENKcQk{PPfEucYSU{FMF8k-Vu(Tk|Zd?%PP(PxLB+1o8Ogn=N+^;5A3g{K=P`l zw>jGY<)?rq`nBAsEQu|9Fg|TTemyb=`gl)*lHpfRqGox;T2Hc@Kh)!VT%80}b+<~( z$t$ce-(i$rw+Jsn+bOzhhy&^iw6t*ZOk>W-THh(kwS6J1c=f6*{#@whcvIf*(mkhR zJlzVcU1T*)KX|j}mNegSsVQv|R6Y`Q6a}qVKPw6f*sZhl+JR*QOkcpVZBr9md86g8 zS#LfHG`yzXu?OryAgl5W< z2%~FC)|HNyMZr~d$}VP{Gx|`K|Dwfp zexe4X*#3JBdX9_HL8WxD9qDwW$MeF9H92U*N1BP$b|+|_mI+>hcvLqF{-;m!p`R|B zH+=7$)>yGJ%;FTYcICf*Q_TR__7!sB;viQeJ)owI%@rmqMA4fAI0 zD1xSyv+V#HF%y!m%82U9V6{J5Md1idRoD9^-=L~eVC_y6d`mlO&hJj`zwmFD!@uj2 zk)k?8y6xxXF|A;~=X``V@u#)NASO4pDzp@828<^uHw{3s zC#J_Xe{Q^(hP9df%ZO^y(0+2Fj=?q3I+%ASEucX?(nr<0b!B(`{Pu_l;7^Q;h_EJ^ zteH!T!95UL(Iy<$DQI+n;u>(8H#`$;8M z{=4-Z%N5#gV#V%NgMP8=Sv@|w{|PUVr!>^tu@lp*-TQ&8mg!Gm@h|^ssU?^DUs5NR z_GeyH8wHvrGw@#ZuFufP8&?0d+!5ALp~*6?aa-Er3D?)c>|Y#n&h806gjY?e!}^bR zwzP@0T0mu}`1C46*W3J)TFJRmGC~%fH3oOm*#zX^r@zL1-;>#ws%s`5?< zExPJMO*%%*N7BJHEQ>WqDO^x?wRZ>hJ1bCA&Qg2nGfx^Mv!?Kzw zP8am2nGDi53H0TRZ%*xx*u%Gg2pi?>M&k(X+oX4or+%oJKRv`dt+CU!TdrYM4*O2- zDtmyH75ocw`_7jWvIF>#y=Wf7tbq`_8|t ze*zT1;{z>Pk8{h&+er5p=u6lBv146Gvl}ZXK84rC-<{&8Qy{fO4_;XE*4ctSkja2x^C>Ud}8vtAob9G ztY=1?ZGWh_2>hWN=?k0f{b0T+=fD@v(XMMp*`E9dKY%C7SG(V{_UC^d+A?!t#6N|v zkFkGD?yJ~y4*rJ?qe7AmqrvzO#pi#F1}jVQFW8y57gLW-bnshTxyZl3iF;yoYcJ=u zCt}#(^OcFg$yR=H)k1Hl(n+???e8u-x7UzXh$Pkju0?X;2SaJI6GZI&Ux)Iade5&c znbAxC;y`JZvXW)||7sf?!LX77UDBz)}d|v7w4%{8fS+DkpIJ2BiPh#b^e@l49#P{r9-8a09;1Ng+<1eQ5KL)iw3* zU)=ByX5jKE#r~^%aS05oxu%RwT_kXO+F1EZvwvl(l7JO~O#h1u{(o2G|E|dYY^4AD zR{h@<`9CkhMv=<@_ae`q@o$FA2q?H!`*nT1Nqe?rj{INHz<=C|%Q4^{_3Z=?Wqmsk z!f5+CkliF|KNef)aB~QmIWiH`vVA3K{M3FnUAmy&qpE|v8^e>|H<)n6qd{5($3Or}(A5 zl4Qe=RGnQbIdeD&t|?vJ`r480ymv}JXLjl?TUsT5v}ow2LT2Gjo}mk}bz`kH!zbkS zsa=!L5Z&}qFT3tfRGdz;->9KuYX&?zp0Y}$C4#4Fsl&nqpa5ZJ<@&M8dEb$MzaVCn z7Oth|;fT9r*yLKN>_wrTo79n`EZ)$J?PxO_4m{F%wWq;LFqh$gmh`Oy#2^n%pgqxR zc91--lldGJEuItAtr~Fd&}iy2c@wo5bGR`DnnEFrO)MWi4TzzE2xn+txl&xRb*PN!c<&+Jcp+uk z>GxjWN;JpvO3|_Re?~PNy4?CQp*Vmt{5{z&AIJ;wm#(^A5EZC3P|3|Jkl+W^p(2KK zH_b9PWnK4*ouRH=V^p?^o;{nwq?)w=ZOr48r+AnvEdwqiRmO2BiayZ)adNrrKgcb3 z85Fo2(v#W)5!fevWTHsi@Oeu?1=_T=9Tcs)n&%#TtjZ4~2Ci0``b6W-yISOZ zZ8;g^k+VjhFT?^#T$u~BbfY2aK=EXWYI-+GH`DD`-CY`-O)MCm1rXQ$tY@n!3fP9D zI++js{E4-}+L)xOX*f&rmnFFc)PrwW`>(y)kF$%FO-nY?P#y^~8MYjFy}1Z<2TfLV zE&dc#ZcBn}Jtn$Mc|<;RCs6ND2qo#{W_=5EZk9CFkrOKdNM^s8mv}3maWk_=i@Dg* z6NOmyizu(b5-rOC(u3)g>b2r7o_FS!rHA4KY_2cLpjM=Ty*@D=GbMiCiiYpHHI3E#^C$dS zUs+{J^RL3b*xx4lQm<2|Iz&UpMvI3_ba8~I#8{--N=LxbmOx2h1!Ec|p8qoCNy@jK z7*s)ru=Wf3)nIgwB}|*@Ddn(4@Thhy^U|<@FwK8Z=AJmuz8W4vdl;D~Spklqvtn(< z?dUJJ5^XNZ8`w#R&8o$IZrOL^=P)tL= zh39+atwjvc?jx?+aU5;u2>~UUc{~Tr7 z;_0Uj&g3TAP4!lCCUoc^kQ$C6O_dohuZ;A!x6Doqq2%JP`x6uk34Szd<(5RLclgn1`xkcAR7C=Yi=`yjKYX(!N`o*JN9`OnjYiIFGkAD$qFo&}<0 zlztEOp*hAAGX+v#p(1)7a?P=T)mD_SP)ucS+U5ud*-EAX(2c6QDat=J?`J+p)=}gbuX00bw=! zr6omWrD)Ft9cC!SUpi*PwTnL?-GAXIh8O}bPDwYh46F@+b&l0|67rQ}gUOYuQcU`4 ziiO>2`(g@-`-1Ga8_Hd3nE&q~Q+%KOi+LVMb23mrjc*OM!a z^RfvU$YBLh9k@U_=K-2E93kwpNpVaVg|;Xb_Z-1?%)MScQoUg2l;#<-HAqnKd? zGA2pIF<h@I1;`swyt4rU!*l21A8_-HPIXys5L&3RnHZzBNFW6YD@TlVwuP`l^IHjnL)3L-E_$^R%~0 z3E+_^t<;SvDLrc@Ue}(gK~5`20ZEuvm~4!7PDVOaN;Ac%f;~3#LpXppZsG3Fca8fK z5uyZgnU2~ZQZvqL9szRt6BM0WvHUV6N^wWU=wm+np%f!7$jK<7uIb!m3!4P$(Tlnh5PFfw->v{LGs7G zysubQR(&tgk9fU#rX6!yFvs{%X|-4AkPkQv3R0+ehTi2nm&4y z7}uX~F$G^2xp{7P{e-Sf=+t`a^(_;BATRwlbjd8o0rMF2n3IE9b|IBP9h51k$TPGy z5IBBb@A*5YcbL4T?%4MDr#u5vsN-VNpYF4_S&t-HlrV>lRoLaGO{^6#8Ox|j4 zyq{b@(?x6)UX{Jy@5{n|EZl(qviOc>9b|1SYu)R7$mp1ubR9L;)~0b?dGs{NrWfN5 z|K7bA?#9i#1aL>i>QZp56#zw<@&N`rGI2!|>rQ8L2c_yPiwClx$jQxDEU9JVHKwX4sJt7-ZFs?AA@*;|ZTt(oEs7 zLFHN2-L*7ss!L82VtJo@XBnH1a-58AbooeiKJU{}L1Bb!g&K$pixwyoToIz7Yj@IM zqvwj-uevqNp>|~XEkEe8aH@h+tK1;E$~g^TyJIP6#P#Wm{ica{*6Nd%DQdSChzc56 zqWuzjN&`kh@jG=7p$+I_sT+ z0OYSI3V1rA>2ah2kWF{OWjm6}&Mj|TTeOClNdNVT1xPQq%HJ%O)WAC*O4@sXIzW3l zGm5~O0-sm1Y)wFWlq^Pqkw*_WRclO5-Vw+|Yz9r6gWK=L6Ds$XjP@HW)s?Puu?$|v zDUj7*O(5)+JfgChQbI%JcVF)&e{^hlYlg6z)(tCqagV(;VYoJLiszmPc(L#{CABMX zG|=wblt~8%93GfGkbP1tWD@8)eKGErgE*7SoIOm?k)D)ksE+6{7QB^W$-G|mH7^_T z!);_93M}W{o_}!vzvS=h$^H4ORN>&>AJF!97X)m~{C2iT3SFKX%z_$|pjEl1)BMgG z>cwS}*n$q0^+Tss8tt_Iw)*^Nx}}48+3kTvn7rcExq-yVEZxHQmdTD(>yVA&O{+7% zOx5-th&{$;AE=%_2-ttm2<(YSwt{*9k=TQYuBYxRo=~aWWeXSi(zk!V`l?H-YSY5j z;#|GYR$D#p&E5k&t1PdnL=4LL{p@%w`JH8$eEFa=Q+%RI$V*D8qoWYpt5GG3)w3=L zQut#3wfzsqM*HESx)fefbCVvXW7o&b(>6=qHO?{~WM(Jqweba%P(!72`Z3p*`EDQ= z{qQH7`6MG<78N5~G!vt+7Hi9=3g&^`*>)3#uHAW6?`E99vYZC9aCgg~EyYNgXu02B z0w(FS4FY+B9=qLGjwcF=a}kc1_sUz+)a4z|%Hn13<37raPvk zDPP%&57s)Wuv13c4W*?sR&;{htF=HyL}QKR2o5-b$%^AaCPF(>XyT*V^2r9Q@X`6g zYDn?p*Q<$){SDfw9ySncYqxHrMd9I#C{%&`Ir|DAOm9!z6E8hqkPU?1Q3VM?o>kYa z!Alh-8@T#Lp@P)>%@l~3^l8^%phG+I1uz1ip7BH<5gAJEW9gU{Y|raMe~mXK_X%)I zs0EJnB|8u-1?FQ&RjnqO+%*1aQrU#~{i<5H^(=eg>*5#W;8NgRhF8(0n zz+LWATp;8 zl5S46$YEndk=qi44OIk@g_g+0OyKn+>#)!Li9*M0ke4OrVZZCs{1yg2jw0sSAP@iX z8gUYU2ui8iX3zbz{;U2&5C%pYys%}zUcQl~->7GWU_Wzp9yIC=+`7o5%KK{C z17xCuW*`Zw2ozXSnnor>_P>Z`4XZ0!L7MG6UuOyc%5mRN zUhIEy3k)lDrr{(%n=~zGThRl?t+BUHnf))aSVXa}-J#h3J=Of*faFRxhv(#~tgWei=qaJh&>gLi%X5U8E2M>D z&E$60U7j89L5Q=tG}%C}Z~S5AlUdlV!a7xzg1Yhc&2>D*aaVFWUdqXpkez2>J9Hui zygyPyAF`mdfCpZN$PnHm=}I|WU)edW?~A|tf)@-Fx2rdG$=oHbmax91E*QxpmGVZ3 zVMv=LDL@A(&W&DJ##h&zj$W|rJazk$x7EN@lq}C!H4!`C~(w-=q|8 zHO~J(_TDS1skUnywTlJBf+$E60qG*WBR-0B1qn4Egd$x^=tY#GBA_C@sWj=m1wxc6 z(xir(NDUB3=mA36cYOcn`}ZE>8~cw(`(S@39I=v>b#}LFdPP|=_B>B(5h{|A zJs+Y%TIn!zpEG|1kZAG@rFHfKt0n!c2uh=141PQIp5(Cx> zNRyonNNol3^_EB$#*{OA?)>Xz3q;6|(f*k)l`C?)MW!3I%St+L-EB8` zwhwfiCc`yWF^|$w;lg22uUgZ5gnAA7-a{Ax0~0w6$3sM$WkObe35VL;4@Axr^6S-c z-WD+pgMG~~WCi3k&df)i+_JD6xI2%VJLm{ET$}EL4|9b_S>yT-@Xp92Xc441!7Va! z1t_gH*WOilBYApi$8H)e;gv=&xDSrxT)jjT`Mn*t<~o0GBv;aL z*mx|lYTa}mUINXPU9T2BeYhbFwxn#Z99IJ4JkA{6>QuX1PyfxB>r5}yw;ekP-ZVZ8 zpR91%^cR=@@4oi|dhv51kH6n)w;89SwD=*M^O9~u`||A#AF!7iqFUEwgzewI zNaOV*1AP*RzZieEw6+`Z|Gn`3H~M|D2EmQV;f;Bf+L2(kKp8PDqT9Ve*lFQ{`qeif z-wczs%IOc0{pR9A7-$&ZUfNMVoL_!q;ulE#L_+k?y(K@C*ILhp9D>U9F1U<3)Zg)d zo^`hh`JiZPLSC$d_A?VN5^>2e2qA7^{kit4$ztm0feCP5u2^-R4Gjci{C54FWyP7bVZKzX0R z-WEyq+05KE?p6E9o2)OlALr(_Ys)x2_-`zgdb?<{i_B792{@f(>b=J@ym#rxs9!q# zHX3*OaQj5afCz9YU*){8idN`(ZAWasZPze($U5M+6>qvUqKRsB z&v-`QvPrul+C3)2A9sSL>`g=3B^{{OXQ7p;D8<%q4Y{A@NpX>vssZ0IYK7jXuD|e) z2JD8=qqCNJ0O1M!r4y&8-<(Gf&*YzgVsO)P5A;VW!8y5B&7`-+GPjO}2tEzZJ4JV} zfR~zBKU1cHD5^HOR!%D1o|@v(c(oTL#dSt6^Al#ChcylzvW)ksr!vRQgvi{ygFRs!x1+WnS^k{m)K{hx zn*5KmN-|&}4Uq4)Lfn^T_g`J?>Vgz$OnhJW)d;GabZObvZ&69}OJyjksrIaoadE^x zZgb86CErK`c}usu8PqFYm4U2~Rsx`M8lST-1*oZXrv#cAtT@Lo z`5bt{0Tfx@fR)a^CaxNI;I){uHM$pNy)gj9)+^s>-YLvA7S!&2^cfsZn&*7Q3g~^X z8c%jx=y?yx;+I#`7uCqSk)?MuT;=XD8eOwydXv*aS@b3{frbXdD@}dEKTRt@-Z?DM zm&X998m%feg2|{=t;?68&p%>PYG-=9#&i6t?x&K4YnK{5^zh+OvFWj09U3WCuXS5P zv|gi&EqkezS#qIl!PF8OT#WfW@?ud^Hy1WhcJ{}qX`BC!aeu*K7+`cYT#Dk3>8WjeA!Yqm&N8e*YVpYy~uadJLMI*>*Ky2I2*FK z?-I6jQC+D1nFrMPdGQDA15IE+u2x3hZ?gb#Mw7?#T{UxwK!`!RbirO&p7Tr?g@uU- z9|hXR*6!3fn)rN*!OBq#7K+^KdRIO7GpY2qNeRCEHl{Feh_5%-Hj`p-;Zp)jRR}yH zUnxE{55Gl{+X`M3j~->(UPdLy9A6nJPmtcZT=`Upm6m~hccL|IvUsYy0|8h`aai0^ zm4Jh}FO-rr4!K5eSaj;u&Wy@t3;OAP6LNG;j-=#~J`NQwK$XppJWg_1>Lgiq&&KEp z^KDtqqP05u^j-2*0hifZCK0yc4 zE0y$J!Y9{F;9zqXS$(3WLjM3-A~oy=Z9#!t-p06Nu>6CW&z;lw2o_l(8IPxIjL9x0 z`(1zz&!P)odj~nAVJt`V-D)%RS`$_wFop}4KB>PB9ntZr93E+&L60RyOyx&MbwFNe zgiyq>%zVtQaNZAy9#?lmEFXb>fz}#c-#fVtDy*^{0Mti_NH#Bh>^mKf)Rq!s^#WoWoY0-xAMiq&?wa`&w!H^0xfK&W9jXHEEHBhZQ zp$1+uy6ZPTP}PKgiD~<3duD2#%)s8^P2FZPusIauFIU13Hvp;Og23?a6+nwN6I#DA zSnD#yLTd5E?!D79Ga9mxINsOW11KWd9r;U92l(N9RAPg{a3}AsZr_%+0>0P57<;LB z7tfV#GddevZ!uPF20P%?Zp)1|bnTJad52sbrM=3oflWQz3zkv@zNJ)mj-I}q&B(H~ z0m-qeKm)@%V|iAfto$|D>nk{VyULwB$-(B&WMk$9A5;;Ek7L+h!+k6hZdVQl9R$oU zr~Q>7-IXdbTj%{)Q0BOXurIuz zEl;+P=*s2zUWB^J(wf@KQ(FZp&}NEGd0_XnkR2I0u~5U`aE~_(Q-k{n(I(Ht zmGqDhqf<+u99-&_nqQui)6QN*VE3Y$??!EO7xry77&{?M!MX_!vi@0t!OL?y0OfJn zI%Q+^y}G|gYt->Pb3v_7$F0V*!;t#BVg3x16d&*Q5)Ns{==G(kwJ3fmt{|>tr>4PU z&eDyOx=T8A!mVcXO#@{M`SN#3pl-D~VQW8riNMba2aDQlNDAv2bq5snWYInA{wl-x zGEgNq6TVb4S(I1+*ZejDZ6?aUztz|M{;Yab`GCuOY(25=zeVeZkD^`%8fI4~wN^94@n^Z(uqET6BeDf241G*~kwSMp<6ij4ZVt(f zo|dXmU5g7~q%0eOnE4e}VvV`}oSm&x)B@b8p3mlVoKrPsK?F7-DZ3s%;qsk9a+=gz z=Lz*_bMOKJ)rPKA0m{E_9qEHUFP5mBRCD3I37>&`_7BP#4--JSYO_SCjxPV?Q|K?G zm(n;~Ir(1sh3~FQt%ZnMRaYnf8OF@K!hy?kYTXR_!~9E}AwK~c5vMQsE7L96XZO2z z4I&);N~Y%;dzpufPlv^@W}4Sq_(K6vyL1(2c9jk6Jy|K)^#d7i%Wd_2+5C7uwtZZm zOZS01C{XazoUl|ax7nE~?OJxw>c-8Vt;gDD7nKQSP^Tt=uzB0v_<2)3)MI^+9m)4S zbc?1mUcF6h2x{gG;fi(};0!sYJ?C#eoSadN3qFO48HjP}p*ZCKBm!bxy}r#Z`g?l< zsE!LR0H0!X29gh2)Gd(%OF%Uk4>$B+1K3nj+_h4^d2H=jmu%n1TRQ$*;J2sR_gT+6 zh&MQ+$O^_OLdQD`>wfp=ccz1-as^uPm2Cw+`PI9eM-M z%O}n4k|G-qTp^W$&I8Lvwruk|VEaip|LLGs@Rj--kPH#$+U?Miwi5@YG0q=O9OT4< ziX`Sr-)4tx1+teD>2}aC*ur}ZvT+l%yF7%QmV|g_IC=>qcVa0iRe)@PWxTT0nXUt@ zO|K*yytl?MX&qC}WH#@D!?L|;P4$z=8}SII<=o5j$r#uYs#%!rKzIpvHgAqPT$oo> zJ+LuNzM-^*N=}-L@AaN+dhKFh&OBuywUMkF)dlGkaMZ2SLhac*A^0)Ot1f^p%_bUW zj;@vv39>Wr3V&T-#=Y-;6KXeK!3Pbb1GNzk@@To&E}hNy$tcL^J7@&$wbNB5_ z1^Gkaq;6z4Q@QquLjFuqSeC*f3{{Z!AG#=-!byGTvv^27mE%Gof6)3w>aS3x+*X^d z$W7|0oN5aBgU|D%{)!tE@@KmI$|dTMb8z=hO(^8g#(*sKSA3w5Kdwe+-~0<^{U7-m zu|iOuOzkYBP=AFgh5V5gJnp9i7_eu41`O}{3BOPOWl;nMO zUQ9)xTnT63S`q@-5{Ra2T@0Na0?1BYk>)#5ziM44F(+^ST`f?4gvwXwiOj_ZpBrLg z&2B^R7;%|SakALnMj!n7a>;&otDA7Wh^F?Z?^J#kP<1$O0E=O;h~9vt$p||?uhNPG zmF&Xr1Nq&eD-wQ|N9h-+<(!MplzBrYg*7}c8IbjN*AhJZTRv~J~ z`G7g z&YT5{h*EtBF{*#GCdpl+TDtt_wSf4Ny%RyQt+IJs-$15v#FB-dR%_o-ihmEXBeS7I9MyQr7163(;#88XkUv}JIB1KZ}YHqw zzVlwcd$PF=UoU@?_ekchyqyXp#Yu|2il+W+<9=GWWaQmzZ$K1zc9MH;slL*<0xS)EG#(?Zq3GiP1eaI*{zNCJ$H_m18N|&WcPMBV>p#@ z8u)}KZ;qd!WxcS+>UTY`{$BWHmtAw;fAElkRYaCo&*ITwXSx8z{r%t3H59SQ-$>7{ zc53_2KveQSE`-|bD%GQ@8nt^lhv>Mlu~hje&3O|@WQ~GmLZ7OkXG?la40 z;>OTk`_I1+N98z{RZ*9C$oy^xLV@ac9>E9_2Mfg@TSw)@D}Nz1$#R_XWeha;z(sQErx$(2wwFG)u2b5+6l#cA`MOhs#juA$>{J2AIy3d@iYcxZJaKV4>GnYnV zsm>^7qTi_eh z(A>urC&*-~uv;@?PkTCU4 zQ?|7QDy!wQ7Wh!KV%1J}#}C_md@}HnN2R6)05#sN{>burvwd#}b3R!hh`V-HxDO{(_c;yudDtTd#M*S1T;xm}zI2=x;<|O;C z=SKj2^|rzn$qH-P8)Qcc$!I6dd9L54?bXjr0dwlDbQ}e% zS5DEJx^9cRH1+UUb^q6J3E73p946=>^V^j3+enzx_SZY6-rt?VTSD8>g5d+~!UFSP z7sIi1UK5FdQ#EtFd6QpZ+Fh>$$X5RZ$Y(NwmSEpB?|@{V9q-}1eV2ojl3{LUjd^={ z6)H0hpEK9sePJt2pPcan-7Wm?ZxsSfGfe&aaT8gI`g!w7`AP=S8k}h7V$~?h@X)Za z2c|@2EMRS5BLD`-u}h{J94+p0V4SFAgl`vZj+h1X^gMj2QY*tvX1@+k2h?@_nWrPb5qOTSeX5?>^tJORQd)LfuX#T# zSQ4Z53EZX8XoIxq`r;=iJjqD2({0|>{#i4*308bRy1Uvd41%0ynUB6 z(?5A14o%qFtiNV7^AVpVi0Qj84slqg*1*Budq+Pob_ten#IpW7FQb-cLS@ST>CrL1 z@t@Va4p5Wer)u66lkwc1VAO2dooN|ENw1&3jCyTfSSwB3hzZiRG(L`F*r@w@LrJPe zKa+k!C~yP_nQy5L3LIw%^WTAhNg`L*s2Xam@Wgqq-Oe7Dj!w3LoT98^xQolDBfEh6&Zj=(rP!q0>abzmn@trS3$lWZO)n8uAjq?pIS zT9b;pwV_AgD-OdkG$~1lrS8_DIDw$aj9**TyGWhuPIxa_^ zqYAT9HCNLbRMW!6n92}#t=iMn=|^}d-G4Q457RrNwikxHTUlwPP5h}e<0qv@qEL^9 zes0AsRNZz`GiGE|w6MbsyqWrn2i6RaNqpf!TCY{~*qI7!d=YV#Rin}V$DxVkqWk~g zj>om5cEBMwfj20P>fG;lAjh+QO(3!RvR=@4;6JX!Nd|oHNieMP=ckHukmQ#itPg#* zDSS5EjBC0>CMLw$*Lo56&V3=qpqn*BJ1?DNDb}aFuodz;xI+{AocCKOlmuurFBws2 z1$}3-1cbia@~`yaJuSD+t7ZY49-yfHGO`Kg0jPT4ei|>mr)4yTYh9RNGjSMY@UZoLa~w*mm!XX$WsZv zFzNjc;lQ>|Ncvsjl^e>&T`$=^QYN-Q&MF5hAbDf#GjLfg!IZf&hw|Y~hQJ_FDpDTY#(TX;#0V&E<**UYK3KXCcD?ALI{`z0TJ9+B5j#MVTx1eVI$Y-yt^cfzT2p!txw9c7sP9+>p)W=iuNY zZ-~DZSOqy;G-8ACA&*y2=He$SWyv!fpzjWCY|}aC)|JzP96hb)%uA)CdJc9<9SI)z zq|uoRl5w>Hh26R6c$b!|usU3quTDXrMGjR%FKfPsXIWk?(-Bb9`73oe!K~%TDM6__k8@|svU&g zfe&?UiYj9U8FG-V(Vr|GTQ0$&`8Qghkw^NBC|aNJztZ}g2BVEU*`f@DltJ7A1p(Wi z)r}k$HY$Jmo@z(&r%3!G^2reOQCHo&BFj&V%s|s!yBNCV-7(}x7pd^oraURr(ey7? z94zoJPiZT*Zu>iT)q>t@_%+`jtll-eregoMN_kV>bByn@;aBy){NNs*yHm5ZY#p`p za@{5+&fQCEuDW$kK{-!mn`Z+%I-HzPn+xL>jTQQBe^=0^URky{>$N{nm-N5kjXx#0 zpW8Bdg>*8lW9bigQb+3Y9;Q}v82~*+-)7L5s{jC#AX?WJNW3a+7kD3+GDpuld*7Jz=G{>|C(GF%b!da&-h{Cal<&?Zel;^KUPam7!3;-4?zUe5)~ zHT7uDjo4>Dh@Pg_jt?Kv<$HIz3hz4(waPoEb!L1Ds*G8);Tfsq7_`TghJF@+=cC>MF9#u-GOe<6lv^G zy~+1iA1(w|fOv_VkUOw5>=TC=a!43DZ`Ntf6XZInw@z*rTEI*^($Ru3P{t=ZP7I> z+k~)pkD7@ytjqrksn3t6|3xvkeEO?02YMqXlYL=-+ICT42jXoWEq(nRR|TjHm^2!2$|EC6ngu2rc^AKu zfS#ZUXFK>8GrgQbWrhF%yp-dE238?MmdFA8SbdIoav>;2Z}-}P76xxoQ|5Kkj2Uhg zD$90YgWN%s*F>#d_-Y=UD5F^@W3U75^M79aiqf9$MSQ6}8QmS)ynG(pR0YPx!DtH4 zV}oPV5$OOYb*G4ZI$Aq1u#9wHUULV1>PDa31Zswqdk}hg)*KxFqXbA@CHy~1fd9W1 z%XAspdIdBfn!zs0)$f2mKTFZHs*^+h8P#ryAdB08GUHykt*9H-$wN>mg=>DG>8}ZS zlC4N!p_E(BWnhmEn4L9Eh|6A|#5><_UY_6tiWI{ZUDC6!5^S90GohLTxWek4P^qor zsa25<8T;BTr)d@H<3E$m|3izdHdfHMX#;GrVUhLtfmaiDLQXlmbYltad7DFy=&e18 zB%Bpv%o(xA&!17+Dgfr!bewS`!X_=y5rz!DcHXqE2N0-~aVE~-4 z3*_xW|VbRHsPGmhOk zb^g?0)l7AzQ>W(X=HJ~{o=mAQ50%EH`bvK^A?y)w6Up+vn@!stCi^?`_Kd;;!(~uC@&~Lv5^L!)~a3{K4ube$8ihOI0*W{aD~gab@d1uHw$~ zW$!jO=rMYwLx-vPqnxeum5zRa6N_-kwCs2%^sO&&@NmP5;_gj0|K6b*0@`dN4w2UH zUU}C7r+PFPT?mc4&h@ixM^R$C(`{zV{Pi{I53S%HZvU7Uwdpi?X2czT=yNN~;wV+$ z{^u@JOO(Ezqqmzu9aq$*{kEHbGxNm(?P(%i#iBMCD8l}~AUY6reT9_eJN zaa1{tf6&eD`VpB@o01uU1oQ9tR{5|d&6(sEhd$rEbCUY6R!*RN^}<@J2)yni|5{~h zdf?ch2{7}_Wzj^#id(ICRBxe~AZ}A%#xHf8#u-~L=t;!HEKfAbl1O4Q+D1XeKMxkX z1jzZMavQRds$1P8o^3>DPUA*BH@VPJ4wfS znKsi(BTWkJgCteDJOEr37fg3KH5jG8L0E3W*q`YzDAnV zyWVT@mu$P*rOxbr){?RG=*YI~4|J(7Nx?=S2kvK# zmm^dzR8_2wZ?%ap&pz6j-__<$3&w_a%L1<`MvUXrcN^{W3zQYUil3xPn86u(3BHp zfa|f@z{M_Ed!EM2gq%Tg=m{pR7iEgW_K(r11dvl4M#=}KE)191#QIbyu3v%ug>}<= z#UBC%n6i9rh35;H%(V9NX9q!db@H?>UZg*BR>ta$>x$c9RpVH;L?_!wa4SS*3yv^TgwJg%h**DbUznzsA zQ2MI3dE2_(nU4<9J&{+0Hdm{3o_pP#viP z&M3VO4Qa1AVbu*IAz#F{X~nLMH>a6XA4$?6J9-IgPFHs@!NTrNEF*)%Z}pRWWQShM zfAf{$M4<6XcoTxns6RKtf9G6!1^cQlxJ zP~d&?_8Z@=e&7)6nb%^j`WiehK8S0~geP@%tr(qhn)`(L(tcF9^2KZalV>-+c98JB z(jSBz?Q%vx)ks?pCU19@jPzK2besJ#`4;YgRx7n>7Z5apzq@l+MP`a%(r26Af*l5rJw1Ni`{Qd3w9BWUg6z?iS;t`h$tq zLx&Obx-F{Pu0*L;xrjp(iEzL?C}$?SjgU%QmUL8OZr6y`J0ILpO%f?8TN>~2MQ47% z4;J!X&F9J(*UTde8r5lR5@fG@%KZePFCVp^?|o22VCl#@K~rQ>!&4-}IR`(Q;E_WUZJe?iN`(WADe?yH z+eQV-jE|o$gNf%09Cpf4Q+16dKA6KVLuK8{?LI-yl^YMmO^ra;mXKIX%`SRCXR%;) zGizG`?w;M`Enc}>X^p&e^TM-j%(daNA8aUL2{%RO(R(|zb&PNNgIZ`6-gB63-X`-s z-hW`-vD3W16eT~3PMnE_lRJh{TsD|V*6-r9vyP+Xy{SSN5EzCTc$3XatvZ2Q&Wtmz za#G*ja^Z3&TvLdEhgu>dYs}d^gJSYJ)4&8*xhm5=P0UT-hX1_ z7AJk~(cqr+duNKzx)4l;J4 z@PX#MscTEDfHAK#axJLLQFzM!9C!!&i!Bz9*~;gRcP6a7{3xWS>_cL{?kMYCr9Hp0 zM|v%PLDc#zufaMr%F3N_|F$MZ0P>?lGm$SzMXER;<($>luK-qD57$fh{1eYwm+knv z^YIDkdVTh?c5F>x~dMBzndF$Q((`nF^1 zZo_4sC|@Zhvpg9)ILZ>D_n=l@?fEZ-7Bs1Q?XqgZ>Ocx~+pK%)@dden$&4FXE1wcG zw3tNl)B5xFK8s@YTlJqmr;Q(widOhmYN& zq%IN%H3P}q_f|7jNm!hG>T6jg;E$MiP3rRfRo9UIYkaB)Mn+L0L|WpZ&!m9yL9TZj zq~LRG7_?K%U&NWisN>51&mU&~)ca~4(sNnrDN}hL*R!yQD@$){_~|3b0j2%9eLzi4 zI~!Kpo#7TX*wwFcihd{8qsnVvn|0z_Cq7O+(=JU)Z`-Ki=?w<{kI{15Sl7bsM-hBq zCLTDFH`zXxUtN`9$v;~z_Rm#9jsNeI9Yz3) zFu3l&onwVzm_Y=C+uA|DB!>Qlfym_?I;^bzOFzDgc@S83@#j#walyMg?u5=mdxa_W z??_nwrMHS|&zDCk@E5qOb#MktPhS=NlXKcyhAIw_Q3TLKT=4zMU(tq6E>hF5ELt`S>2YF-brJsFg=@+*7AUFA= z>)E)>8$$;=RqPmrOg?@fLQWUoSan8qMf-WXCc15dfExJw-SiLR4jc9!v}VoGU{Ray zXcE7-kiOqj;(gV`={w>N%y7#*O7XYv-8QWC`P>xA(1+%|a+kW&=#S~_US?PFO3U@n zPVFu0{-iW9cDMm#$EvZ(A;~pN3nNu-$g(AE+@mAGKJ|9u__k>_8>ws|!!KXU+>4MJ z#E&_mnRXEf1>D^47`HI_T(WoAf!Op=z2+I5*V67S+r<8@Ave{Dwd_sR^|_u7IilNH zmVRTVfub1~I^T2%(4vC)fByK7N^Frln@cR z(2FKy=D-6$PHcrq=Di?daf~#vexCX`7^DK0!+E9QS7-P?aNlK>_MgqyR`Ptlyf6?r z9W&G>Kf8UA-i_@y3%kR3X~{aFr$@dyz398F=aWukAKem*w2#Jz%q{|EW}V;vp%%CH zR?G09*7q;Op9@&DG>j>DtYX8HpPdnNmEREYMZl=EVR(>z9K_t@C7x6CkUX6`C)Stv z4Bal%{Vr4&#D8cfc3$I$Y*5d~-C_N^?6>Ce%L7a7lX8w2V9N$46wt1WoEpFqOG1Bb_Ylb?)<}O|O!EVyx=bF9|y+2j0o03=C*2XpaE8V^%EY{+)sg{msf@w2^Oli zc4wL-=v)wXx+Wbs@nUhf*35%AEL`ic^!~(#Lae*3t}KtA;2Pc z2CvC7t<7$@2h=JUt{rl^E})yo@IsGLI^@_`u}ipsS&7=LpuuDZI#Ot+&U??*#$o4e zjZR6fO~^k`iS3u9cD4SmkEUlk0}E`nDlZ9{4yND!0#Q#^uu7JW3e`vwbLLdkN}5SV z17~xGNV?m4dbXRVGfAR2_O{jy9<9{d-3ROQqP=;(C&wLQ)0{y3>_eaP=MhSkW=Dv* z)xU7|trJF=<%unY*q=g)}sM?VPZD)^G_ zqHrBschuteCOYy~Q8KKD#cW#ly3S$!1@-t*L=lrhW!i+bwIfj?v)_%Qn=wl5o?EZhJ(2=JQ@obW8lh4EY}4ZCl^5eXZo1pLZ()%W`z)pluKN#kfHQ z#A)!94adShhO|k`=w4;tecHXY;g8#~6J@*H+-(S+;$DpmQYlU{`$)E`9mJBll4cgO*1Xi(eKw-)mnDz_KP=`;rW%M z2;??OVH@?D@Y4$cYsj$GS(^%Fb^G~Ih_@{3+A;TopVf%7k{;?C*d zM36-E*7HOy-fGA0^xF1kR2cH1k;FU>|I9n%QDb+rP%xa6?XT*X?p*iHX}*}B>fKID zTs0Nbg|(e#)sGV^92lS28qub25QZPEkB+;~j{$hBI?QtUTk5K9<4D&~(919`_|8~Y zrALdO=sya%kD6f}NSFCutDc!4)c&k2skjeq6%j_a021zIc*|l}`Z1aV(xamZQr-yv zuaAM}b3X;EwY~RGLFpXwtV_;^<{6w}wB>eDp4FT__QzX8)KJ*gZsGbJ>9VtKEobNb z2xzV^vH%%4@`-SY3F3$Eynfs7siJ6|*L)Fo@jOdmM+$$fpL{;b@U$VaV*51xLiLIB zNr8hHSdBdkI_tBBf3^>4?rA0RXcz)psPvQ@G+2;n-&e9AMw4cZ`R@`imG{4>M9qqH zouC@bqG6~1s07r-PJ}FrZO-?WGF|QqWC{*v6pm~PXGR+ZNQV6Lik{<#da|siDcu45 zu|@M6Ue)LuA8wg99FsmUT&nY4%UV=?_QOcXGi{r8zBeltc=-p@Ki;PfaIxX*C^%&b zzq)+eX^aU=p=|io1Yu)0*byi8I zyzfU{)&wQ-HFg@N-|TEhJqcqXdaT-M3j9@0SP4?nO>@6fYzlQz!)O1--*lS-8MZRX z+0)eGegFBtV_*m>_&eeKv%7zHot!VYt{W52PbIqezdoIQ5%7g4YE>F&|K_*RurDuy z5U1b^14C7RVFS;Via{^Ul||N5lEHs<-M2DEF!Qt+QFG*PzU|PF?_dziyx8}~82#T} z_dkdC-#oAXuMSTbLkq3;Qr(wvRW3BG<$iskscgTQ0!zuSoU4J8E5~a5RT@HRIVT#! z0w>p8Uf?*NfhZ4vW#idNVdN!K(EPjR=XB$vSqv)s;r@~3H7avnHqb)g|LN8=>YKLP z<)356+vVwAv51DQ;|E7zi!KuP-zMC^OFD^%Medg7J>7b{IeBW=fB!}K!Y}lgSDVH0 zj8!sGZe3-pvLQXtVT2p)JoeGArc|xz)p=H*vlm}n{!Qe4%C>o(x)$zM-7cvrh zh7n>K!JzBo{*>@m(W#Zii%F!o?4P&G*svd7o~)#Kd8sZIb^*xEwA~_|;ghmQ=LO9w zv|6-NZ8z%;r)hIGzt;wJSPq#5{az+b6Op&iQOUUcMxCc&INVScm=-sfvLpSaC{vTS zAgSM@ay&(F)U*B5dD4+!L1uq}t9{un#4O^4_~j`F!yP=6s7`^!RbI%gqKdI+2I#k+ zY6VA0C9#!z`V0P#hqK+D(a6aYJ#psu@0>Xqg^aq$Zc;btnIHv^+&=6)-6m9F4>`-K zID3jW^07wcAVi7+BSGF?oK4j+QP^JIWJxErd${vz&iS+3NX25$1tHA?Y%! zv%l4z-#VoF>F8cqXaHBKIJ@)PA?AxS;7oI`i&`hXhrvepwVO1*c+eqGDV`Lv&kligY{ul0hcxIO=x2| z(ri}HMy=tP3ztgl*{;f3x9y*LZu^Q=#1F^SAqh|;|6ltN!l-MH)@z=w6~=q?+%`m7 zzr3d4W1^L|Z*0-@id(wW`r~BCsw$`*J46Uk{DT#-1x7Z0k;dK1X-HFCisYGudS9m} z{+`Dhc)UFq*PMqdjVl+21jF`axFwy&E{*az7v^tXT4}wRCLUO`FHa<3LWyf|;E5SUw?ip;;V+WhYU(Ip1ajrDmmx z-8Vpbq;FKYv^mtra%W~}B|P3&@VpaXh8lir)xMs1L+cWD9a+REu4XCfj16j>4R9Vu zIL_9Sv|L^@^HlnuiXv_>SN+T?7$laZ1}XG?Z2R!w^%v(KZMrsAMex-4LEjifXSyiZ zka=aqHYYz=Y4-_aLM8^@wO)790(H9%yHkrK zDhDflJwp!(IAAffK0c$BwmmBMtw(b4BLB~4AUnQhbEK~icT?kUFZaNRX|E1(&aT%!_T5J-KSoRCE zzBTypF>WwokFITLtjsLbf9<77S%XD=;)$L9PCDTMyRkvEn2o?)ME~k=c15zIr_Jtk zg4S}Uyc5aHhqvx6h*87tCsW04R`;BC-m!C@?gWnV@%m5&yp{E;g3p&z^WKZa5+IE7 z3o-$Z+?%#eK>!&$4SeSE)D_~>kT)l@ZY_Uu$)GwkdDr&Sz3jU@ro_)()W<+a6QIO%@!QCV>(YEPZm)3^f9!rYPIr#oQ z|3`myHI!b{wZ@ZFdQ7DsmubK)Z(~9a^$?0MraM!TCI6L0zp>E0z22{j-1el#3#AawiU|I4|+)u1;Exp zt`ydZWA*#&+)ze(S1MklLy-^po9|TxgjL>zT5%TTcigvn>=l zDphsQ45o}K0C+jjRGUAf6y zae5qUU-dG}?&gKgUiDM7irpsn=SGjL%WEy>(ijq__%85UyW?1Te!)s_yv`3im5+5r zKNcMDG)jWpE-=|=0jL=5!%a4sk>NqLXc8YA=Jo*1#inqWa+=KdJ(A@~nvJ>?A6oJ6 zswwH_v!3dgdxU7I%-h&6(FFfIWd8cgc1m%i0<54bOK+t7_+|DwUs7SkC49u80n~c@ zceALfSeEfj=tZVd{&IlE6F=T`2ys)3U+}d4vhk?MTZQL2`l|=rzUKGaO`)>s9G2By zrS}lszAU}|#HuB$BZ^Fj-Zz#^p{2&8ea`7`pE<4F`acz(qy6D8Z(XW;wM1uI$9i4| z1m?F6WY(7&8*;S4w!gKCY^yM4kssvA=Mp+a8hu6@1N;vvJ)-sAJLe{hE=SvaX}{tt z>G@kVE>Q?1kYQ%6sdqIVs6~;#ctguf@;1l6BUmADN$BYjr?LRU>pR-UYoTnav*aL6 zUla)dg3B%#7ARAdV^z>>KjMaa`p7_X`&enJ%^o@CnD}m{9)5B27AGyzHeNT0JX~}1 z#b(Wk4LiJC?Y(wLd&azC5;RS+fRl*{&X(S7!0(1g+lUgKZQJGI?-U_q0@_B|Uq#J$ zLKLbq;-Bvui3~2OeUa4#4?DSW9Z)yo*RnYb_bNvt$I1Ch<5$XZ0>hXl`?AyzOK#WI zZu|C;7sqolRp840=#8tk?1O2CKC|2$bhC$dw7_%iC4Xhz8J}J9N!SZn!^NJNs z{~*wN__ULcaBIQm-<41zQV}6nT8|%bZ<~U!kQ2V;J4t@k({siB$cLtN?vXMMH{c-; z4EWD2thSDQui^Ci8~}K}^{Lq!eHSV|Wd1P9EH5>J zNwQhPR;MV-vVAvkzV>Kvn$ex?MTnk~XCIf5_{~hmr&ziRT&_1r0BS2Ot5Kviusx(P zI>FhRsh(z@Rm5?Fma}H`y8WqP)xO!p&rXg>YF~m^#%hnQ+e_Lyzj$Eu-nm$>xm&hQ z%=7zg`<&@}v(?&me(c+8v(LI#%udiOy|L|TE{GJG$iE}z*vaH~(=dM_cw`Yf%`({M z->RJ&yE%T+RzJ7S%m~95i(NlTUS?qT>-WUPg#a1PVGC7FnRN#?dk1IV2 z_fg{S46AHUie`LoU}Z^6sy_kU@U&lpCr6I9%aAX!_ba*VT~2u$cEWEoaPS+aSPsrj z5?o5y#`x2+El95?*lj%;eGlW&*ic%s(MY>*WYq3@a$pQt;yK^5yCXoRiueCw?=8ck z-n#!`K#-DDKxt4w8bmq`L?tDM9z;MIq+@7MLQs${5$W!30cokBK|nf2a)2S8&AHF{ z-@og*fB*Z;dG)+`e8uH8^Zo8wd+k-9wLbg82A_RTXujq1>G<3KEwKO3{|k8Rs{*j~ zw`;`b=1V*UKM#sA=KB&a5`OmQ414#;leNm%ixgdUeR}SbPa80a(q%Ua9A(j=(FeSORfGnw{#uA^tA&%0MkvS`o(luExeI5a}#?ZCd!n7R-(8pW6;uSZgERiY6yk zim93P3n$j9=kZJc)>=?@K91Pk4mvW_`=M09X#_zMyWBzcoDt3s@XrBK|C*Njf$Qh8s{M%!6^ z6W!2^y8k{g?s<`__81wmc3YG>vOKl{&iFD`ND(Ul_aM6yK;aZ{pT!r(NSwJyEz$5-79-*OWh6K^Gl)k5RH^VsB=0 zpNhQx=Cb_EQkJ)1(?pnvN%6?O^14rH=gi!In2d~*BB zJRPz)MgpE0eN@hz>f(T#S|4lSgxfvcB8+f2nmlBqT_KhL$gSfAtKpba#9Hb1T3TBb z?qZBI`b9+DuyONP>BUdO2l^eG9wP(+?jOV-XEJpwz)?D#xy@M`xVpjx+@1Lt^CX&r zf8@1ACrKmCJhoFVbkg9k6yTPGE(pcEvj_yg zYVZZ(|CIVJK|TfVe(mFTzg3}iO92&0wsokUW6J(iIjQ^;1D_fj2A6iRwo4~?TG1lFi(s$KC%qZ9_@m{#Rv>L6poD``7gOHazAF9^)IKB22K^J)rY~GaFw$W8njiFD za?@Dz$`vl2h&yR7{(5#j-u&|PMScf`tsAhca`U+RvP0iYwVlqCbz(W;K*f1MPA4Et@x8NPsw$&Uqd-X)RM;NJ zN*vm3Z*Hma;W$~!ma8lB z&V3kCuFl4=J6O$3K}0F?lY(N$MB_^x=L&2lP#NVq!MCEsy}t(Sf_QI z?r@*Db#e?g4TN6LgMU6e-QG{Wu>IU0$a-7S4&qKkYpxV6V&rN3GF7Wd{D9ixVF0j9 zQ`x;!vKgm!EYokXSAmmGZhFn7<1}QTR12c9UFRJ?%y1)g;Y7w)grI>rrReIKl;vD! z4)bg2H!dnah>grm_{Akm22i{}xH5_2cTDGc(!GRj$9k~4_p3qW>ch>!bho5d;;gmL zQ;(`r0lQ+I2HAEZieg?WUuTw}dYc#KW$oei|5nzoxm%W7&Le>A3u(;@-Rt1~}fFsWUXmL_{f=RBN|Qg?{z#$GSyFai1J zrhde!H7=Pk$kEPfD}*`?>m9W(gW(fUT8w;Bk>{2)b{6ibQuwuA?Mrc|mm%iEsyv<- zl3!iK@u_%{I_Kc?f20w4?ZlsY)l?-?%akAd9U7#VA~?@ zoxr3|{*mAEHj>3yY*1yWL)ztgKu%dQ>1>%nE2K#JXI@DuY{g(JB&ea>J&p%AZ>zjw zdab2RZmC#9_C+TH{5~WTQ@~nhCSSHKqPI$uHBrDjUL)Q!_!aSKmM8y4E-WcV!1~wAsa^85;n!qqg4>K6J?grw+x4w z^?!ixgE}F1ly_}ax#iF|{ETh6=-xNOwQ6lnI!3!7Uw}sziQd`HE9vF;g-L+k;7-ZS znHN2#)*jszmScS;98DMD_1Z^H*AIu5JR@}G1!1OD1&n$6=E`g*#$WdUy$m~%jx)S) zxVFNQ1AqzwQHaTU-LpNT-eDQrL9p}}>`p)Q{rrYhI^*r+hGLwVGY$naWOZufy zXJd=gOhVoK1fZ4bsT7nGGSU5)MJu$gjZ*i5Wo$N0%uLwBWGJ)Lk(FUX-K1s9X2U1Ej}! z)a!F2b1OV?jElaYt(_y2J(48`u4oY|#YRN0l-i)8Sug<&chX`U@il_K*`TS~WGpe;8<I>x`>;5`{KWt`D5qvEg9#iM%o5MPh9I~qBgN0QNB((#m zvmg8jjQN^9stm<3O!~1=H|JrKf~L*E4yzLY!-wm8|Xid;^?{~^+HLT12FDaw;EIL$(OGu*O_M*K3RG*RDsYk z9j9YFwccVqK1Ai1o(|VuL~Ux{t0svqkv_e{>3z`_MueDSZ&s}<5Zf1|m~{*u!<>;u zifDQdXCpF2<~fl{M6jic}@$9`5@nZzk{GX@xr^aKXdBp1)%HPaOrnYbZDr`;kMlo4*Ks|Zb8j;x} zJyybTz4nU1tb-%Uyk7j!4asoYlZ!3VCAK2#@%^m5!NMH5G&fFOq()M{8CuM(4bsTb7Wa9^AyAA6A8)0omQU0sBc zc20)?o>a(o!W=7>-}rHqqq*wS62D`G&l*>fXCEk`>bGr)w#^xpQ*&H~&2o@Q;vS|E z`qWQGb3^4KQXM;-y3AoNQ7q41=c?p3oJ+qFKAXx_IpiL^$UeBB2c50`bteOYWseVo z`ldG9daF{zs}3$?4njZRv(%lGx`Pc0$(c4OBG379v-xd z;1Q(}vby=qhTHIDw}&isd5z&%kJ?qtYqvZ}3MuUaz4yX-u$q_aZjR>FqH660L7vgUMMY7}lb`E$Px5Hz>>5 zyY{AQAa4yJ2?E-rCf8?QWZ%D(6xc|~U$XK)*%}!79_w$Q9NXMr(_o0d(8+H~ZAzG< z=pW{k=yYG2*KpfP71{RO>uiJ*^mIhQQ6>-5Gz;&gP?V4UuokggTWLUu>Lm@mi<&C9`jO=wvV-Uc0nrISNi>;eXbC7g0{mnkN_GSIWkf>I|G z{j-$&A6JJKb=Xxn@d{^tXDOsM{7`T4T+8^qYcfztKA{s7hjuHk%u~=KadRk@uL7~e;`RjqF#f&Zj5cyUGL2%uDS^QDA1tyeaW(Fseh&pbM)o|bTgF8&*AN08MOs` z)M5^ddR)T%y!P?OJ6gF~U!QiYlC!+5Cf%8O1oN?*3%1;C%N@RFIWrf4^XMCAU&MdN z;lHR~O0rK7LC;b!_bB1F=n;Ho$vVO_Tr=WP%vN8A@a`jutn(|TJpW{*gOPLy+k!Hj zjl27fmyx$-_BB$A(Ux#E;-2tyEyHo#&i800!~Y5_B)omE1*bn9>+Ul6ZV`~iBU8Qh zlfqocxk>MIMbk%^O*<(z<8e$JTx|3Oi__^-a!w0Zd>SDIAGd4&yY2YD9H{_LUvZc1 z?i$XjS94vKyPfBprLTM3L_L??eHiE^-?-k|x98|eVYcvrwYV{=b^L zVOQ)GjIGKcm;duLzgiegSNE@DzQV<4yC{0>J{XMg;2{xNxXp*dz}&|-a{*1$)h&Wz zs~wb^Ve}zIq^kLE(Tf-MZvWunKV`{&bO+32U9d6QdtLFaha5?%Bf_20A_TlBT=e3gy=MsM8WbQ?Hu zr?mx=tDC=FuL3?1A-CagNFM~O% z-A&uHyt}!yJz_UTfa<^K^?wu^8GU?s%=8-Ph{3MbmKD`&{45+MRQbvC4HvIywwfDf z>Vn;6d)A3wsmOx;t9m%3UkKg6<HfsIey{*mi{F_}r#BMX- z2xEQ#?r*;0&Zh&jgRk83Uo88X@*hk5Z>GUO#rF-Y(nFnT(}<#$ldhL=Ch!RK2Jg04 zgtC3;$RKb0Ty(PvZP#>p3`l8lg8IaH=6FQsrzb;;!|dOYkZRJD%RAFHtV?_<+Q|Iq zX`U`Rg?6vOzkZIZs;{62&$F`^9?8S6p3=&)3VHNR5Qis|GB9#Y?`g7 zT9n5oS&N~YKKyA7a+)XPFS--;t34j$heMKE3Cp!KGFb~TCBDvqzT#E{rbT*UdIrw`wh8I8oWFmhn6qz{}Ei5eL& zKba6UUE_7qrQC4~?R|88fMZZzPmS41wJ*C^NrtBc{Jj70@J|DDpO6v@sw68c4Bwp$ z%W=uS4?u7FLNB8hh%~(oF`9?rr!=|i;I4%_<7tG!H6DMNr(me)W@IW)6S@FwvmuB) z&!3;k?HQEg?lP3v-NH`E1?$S{?&?DKhX8G+;x2lE^Djx9QB?!*R z{lg%_J94BIB}B7Xp*i$Y3~`c8Klf#L3>%qac?@GjiaKKO$e6JOaz1#7n_Wn;*FQ@U(?0=lQnBjfYCi%9ee+ArZHP1?0Mf352KgCT=0P5{h9X&2k&|zt9L<7OL?FBDWr;OId4zj7I)r`QB17r25cBC za)(@zENWM~KFkYt#Rx{#AN1ocABm4Cqa;YsMp5wsu3q+kZ?a1l;l9~`bDPg(hF+r< z+f8GB)VRs=p2l?kIiwVca{`ks%+;@+>$1%S!Ro9NXd3 z-Z=ivzRq*vLV$Rzv@3mpAM3;1og+25sYR!Jor2!qmRMyS7#Py&#LVWoR&!$_u)n~XdVZ~`}h(? z^^=R#YQDeim+0S2zpbF|LSD}jE4%+nqxuI8xwxi+sMARp?fFKw)pU9LCP^1&>xq-p zr};-HQDR@GS|s&dy1M_;%GK9Sg86=W!#dzRy1*dLBcE;m>0y5#Uk z?+i~Q!wQ&vi1z$QfaV`t)g>K)yPzLj1Sl_tO7uC^So-_Y_uz$duVV@n!T7t!7~ur+ zeC9A>yk<%GS*%b+!>ylFbq_yFGTq0X<(CyClcje&OJ^mCc?$RW8GBYI z--2~~$H?!9T*wL{U>KV}TZZ&Gg>5I@a4So?m5!BW^b_q$-zNfzi2BJW6|iT|_OV0{ zSKLCsDjcty+IHXQdW8sgB4<^IlQDgpG~bLBpurIx8@zbMu)9VPWiym@@dJZ2jY%eZ z^xW~5am6q^-BNt8*&Mpn!(=EAMqvAF3b3m!HxT_JZ*lOpf+;w>?+BQly%cIl?VDTa zPxSc$lLqtCoau7JF|P*=N=T5$bcajTrb!rS%Jf&!E_MSFpjPPN=hV(CYlA5HwI4OH zAAh@O>)2L0XnwGqUsx*jY_|Zk5XIWNN8h^C$k&!)729i6ZJXFDqzie-m>>U^8nK$I zaggf>PrbvLzv!)7HXF9o9p9((M6^lNW@pIIq^(1`&$Y*th(+~l?3JXIqu{7}?wcf~ z)+i+bBp7~Uv5X0eWOE_D99ADRc{9@o>R_lX#>pT-`8yt!z1wS<-kV4G&m!+x`qF@#m_*RQVOWk`V?r2HwS`N+@f!Fv`~5qoZ0BK5E>bsw3P zp(W0xsyT${TFXVA=8H%+`GusFlLMN2?$actMs21O)o!6RHbMJqk5g^cdYHjDb-&)$ zDtXft+z3A7_47mB_TKUGTMn-}!}T)_SE3RkMb+GozvyeNi9_S!^H!!S%`?_eC*O5H zf6WJz?GvMQa&qQiuv4IK4rBh$LDyvw1nHNc>w;fjS1rD+dUooHZ1p{iE3(B3XjDgg z>=w@&Yv6X#;hhe3Mo;nvnea2O1@+@%&D&;(<%1r@AHBC+pRO$TCGNf{|3(te5(&si zv`b!9f=PUNdIw&;>bneHLy+au`ex(l*&jcqqB*senp@gTzl&#Q`AhemYD*RFPTqT&2gw$08S>E|Ls-t^jq^Vk zK#>}`3MSgcuj3u^i^78_t87j+B3a#jB0;+Y07sXSOEr>)*CHFQcf7aLtFoG58nu8& z(TTf=JJgHMMbPn#=6U_RR#&9mI4wT(*?@bn+uQlGDx1=uEGwsf4%3NhUJ0PSvu}eU zp^dzMYgeT0?V-*7QS$j!yjZ|y!17$fu79zR`bcMatiU7~nFwo)dEiXEyADh+meF-| zO|UvB++Q<|ICGQu@aH@oT`;iZ(P{qfp;xX6kJZO6{^YjeCjpR|0D5ZMq#rlaHyLx#FZ#9So|Xu3m)GkH7SD z^u@sg=sPE;RuO+RiG%3?2mbXAVTxOVzcUw|IklFp;{jn)1N$~#Dse3*sr%ct`Zj+y z<>Z)vu6v>@Lo7KJpqtO@)M*q^YClYAHfM+u4wd4yo*i>!tr2tfQF(jMgm)GDMxEoS zo>;l{T7p~Al7ZYIx_;&Gy|6dZ^|91*Tnt+`mXR!6mGe_p=j{5`J4TBhXO6G&spaaYJ z=T14QtW|HXc+NF=jHzRcuy_j=FWXQvTfs|{FNbrlS>Clj`DEY!Njjg!dGD8Z&jh_t zb;SLAWJy_p{B0y1--z}-EthJbILyO*iTZ&`{t@n;m%Zkq<+Vm1$(J7I?V6+3IuXAb zN;4GZfaUQObAfPQY`DEzDy=as^m-lrqyV0+STy{-^jWf$%V!Ear{e{HyCzzM1Gq(? zbAn;p#LcOfj(neZZvu561M;iy%^~|YafxhkYC&`E+fUvLI9Nb>s3UH|4VmS_UsYam zWgXDKOk;%LzRQa1JwA>4eNqU^b!Y_##n-PCQst>IKo`XVSjzDXaXz4ZHg?u^v?8?%<59k=0QSx+y?p|+^{mik@>dTxUcQ%wqy77860G5bnsY^sF#i3@zaUO53=7)>R zjDLMWXFir0ScU?F`U-c;btBH{HSW$Jm-OH7bl0>gL54(MFw&u5Z+PF*epe4d*Ta(H zm0%5U79Cc926kIdR61e`urut_@F_fb?^XP@5T!yieD-^04AakiTXYuYBBIE7V|%J* zHvY@#5@5PDySV>g7Pj(0@S$1cXvx2%>|B9m>bz>dpWU)+--j!IYpT7tas~R%T@cL zla+);e-b$cE)q5I+e*G1b;Svd>1sT~T?)`3;ti)zk^d->b4d~QdR!sV#pefc*;=FY z1%wH30gc)CnBq1%z6G2Rh0tt@gGew#@pD{0mREdUT0h36wVKRS35p3Cxjl z3N8ZoCaQ~e^iZ2Q&T*2zOC&5Wl z4)7$lY>au3nZaJWswm%Et_xPJGZ1i)D%g(`b|D@{o)^a&!S^goWURrk766wj)e8-# zXc1?6GxOhN_cG;oz96xZOn1*nyn#V;q`m+7t$N{6N~d_$pL6IA-RZbhPg}8flD9sK z9ikrl@ZDuOM0Z{_z|r?-q}JY8JORk4CEMLNHP2ZOs4MTwcTZ+j#2!{unf0E?Qr|!C z#s4<3I6JrfdEzn~$Swil5b@T`T`+_U=GExLe`gqPV;BLt3U@C14XF;TCX=$Dp&}Qv zGEUIUW?B#*{TCO&_I{y_8F8@`V4C7JnS)KuUVDRYtY2H>d}gXdIw<;G4#+Ca<76I| znE&AwGxEdm*z+)*kAriWqZCeSMwb%W_wu@={Mwwjr)$(T^9A#vHTAi;^Xt5UZXcj< zL8?75ZpDww=o3X=U=1?9IvE=}(MvcwJtiaKHG?7>xRi-IxB+>9e#M7R8t z_5h5^@_BT(9QYY{etg?-ml)GXEc%}TdTE(v{k?lzzs z7|Z#*0;nzO!Nj{rTH8SrcVv=i>AKxtN(agGbUJpMWfm~ZBQ)(^t@BHRwff=g1_zCn zL}%Rke2>7K_!_OH%hhYtJOz!bRze2`4OiR_B~aqpL*G;@c2TB# z_JDZ-diEq$v`2qR{+bIyP({pn&ANXIkg}{zt`P$51ifSpu`-7#G~<tm?FrqHZ!Ent;{=tk>9w( zZhLjQW^dP`)zNfHTn3W1EB9rTu;NeRzgH)-49-90I|0WJWr5gt;4J(05gnc^g1H*| zQulvsfuzZ4ZTXTPv6EN#z8V3QXB*&Afnv6{v^{yYH!vygfV48?%u&m*tE`9uY(o=( zh3*z1hs;qYA~a)F7a_`|KxD>q>X!RBg1&Fjb3Hp{(gFriY@`$YzJGS?3ivR)M=ihF z?UXK)2VZBoQSc#7s)t6YqT4!AGlU1VnJ4)kBh5e;@gPx?b7W(pM;=w1DQViIjE!ea z)`+g}c$of2b~fU_cg3EUYPw{`Y^dguWxCnh634~P;PfTHA>x{Sm4b2e&(tlxX!Jm+ zP@eyJUVyC+1fPiQsYyJTd*~t_=CF95*sB-CV&o)>p-4cEC z6(ft7OJp zONLE>Dy0JIFD+s&;*Lk}ewF3YtgCiT`sK<1nhwCq;3~4te zV*|F{EKFr0_mg3IVVUfe?aA~e5NioHn*kIKhR|@0ofx61}Cn2)04bf-=9#eakm~M@L>s|`Z$uK z2El9;VU2S>{q`*We3&-ECq^?yx6Z;hfRL(xHJVHR1tqt((Mhdd!PT*QUzIrlp=wOY zoT6jQT{u2qOroQadXseCxOrhk$>EX_z&_iGxTfUQ8_)p1q-0^(y#_YEi#jB(s~U8)yt* zEv~jk@f;t=2AcL71f4v?3II%F_giFFz97Z3!nZ$xn(QIHb7iWgk6mZ~OeA}q*=zN= zRId1YaZ6FxS)Mb-g4x?PZ@Q1sd2o?5#RgFF>RKMNTR-B-i^ zfjT0&(9(84SY<>sYAFIfrG+xkVtr4)&=tOE_-lX7qvyo5Cq{eLz3kl`#Yl)jaPo8t z>lhEc6UX+1KN$G^Bz5-oNbmVrO1f`NrQ!Y9OAae07azW2XgbUaI_Jx6l)pDtFn70N zU5JR(s`tX3{?@JuFXu1e;o6n7<&kZSc`$Q*$FzQ=pJj_BEEzLTz+&GuXKuA+_Zui0 zzw9?37*u(^b<&3gOVNohHrac!Yg+Pfnbf<`JauXeS!K)?hmqQV$YN-ULUzZL3d6iW z^Dp2XivomyCrK-a&p~XR5DMNV8fooHtEu_DD$BTvRu4pXHG(a1n4EE6E7lENsWQFz z>Go?h<=aPs0Oc>p55{0>m}SakGv+#*a=eLwAT%rbs?+h5I(w8n2sHg9+D7v(z8B7T zEPWqbT#f@Iy&o?D2e&Y3@q?sWrOU!<43zuBb9Sqj7wRBZ>DJox0!7 zHc2CQvmN%m4mW3bFY7DgCC1M(uN56%gG#NtoyB_to`^lJgA;E$QC2`V8FjR?m|&J3 zedm$;a7Lcftu611Gc9JhkURGIb4mA~F$35;KL93Lczk?ehLBq1V()ImoVzZxqJQr$ zBi{2H*s_=o7!+h}8^#aSAIm)UTkvOoaE&qKZb&xc%VxZgFRT7|e!+})@%*qKlr}Hd ziwoFlrLGCUT6_Ds#CzgrHdZGJFaJ3l{NB2#llQSxyx{zeLgzZ9tK_7Nv`<%_RSOLn zd|zi2+uASsCMV^tdY%n4IP0!HYDMISce-~_b1}+PUv1Kpevbx0I`jwDZSJB>tohG| zTi%` zS)mOR9}gdD;>=xDo?xB3cPz_0E?GHsc&lXAeWSwk|k9EGG@2haMpq^D@%b12Zn6wy7?fRwpBh zSE-k&eII~JO^HLg7=b7GyUV(K1@*wSM(qZ_`xzRUYCYU@^J?eI#N|9t3HtVk*^6lg z>C>Hoh1S$_W}CXR@O9_*jeQwV-U5Wp*Oj^H<&-_1R8Bic&j zlTin>?>uGv{TQdwbIsqre~CmRYXVWT@eSX0viz@Q_t2Pjbn+azjCu__b#}BfOjQ0u zi8N--k|U*RtHt`VW^3K*-B;E|_cX2YcQ9LVI}5_Q&eoD70fcOE001K-lsNIEElaFOq>sORvn!W2@*O# zL{~q!*?tEz%+XNk??L)%z258#l`I7n zQc+rX=lBk1-P{$MuK(?c{7Zf+l-2t|@%D!%B<&3mJUlL)pLUY%x0!U`zXMkl1r}*w zw(h6fo+3|07V7J$9|o-H5Rf-|zBMoDp}cO~L)6i}ADql_o%QA=(-1l;lplzdGxW3f zxM#8dEmix0OJ78fO~NQCtpXRjglk7#<-25|RFN;A;DCn3T|9?;en+~#%1C^X>|mKI z=I^BS*ZWf(FK;Vio2_4amn#AJTo8@fy3m<=9*SC=YV!Un;&eymuOI(+fA~|5|GB64 zR(A@7>~`@^|c{+*lflrbEWg+`G6mbzWoxnLn$nGrG1NpD+Je+C)h2tbdFtJWXQ1K z0L=^ipc&Yz5WAu3@Nw6ZxN-MId$4a&3$jy6!T82vnih?kbvd+*o2#quDy@9CwzdXi z&bBK938@ZVrE+nN(~TMnK^3o&R!)YN(4)D(q>Ow?;0nRE97`LtzwzS>_y+gMORi9K zcNp(l7NDlHTTe(t`y1uimkzt}W)JhC}{p;J?31))8s42w6vRqq?vq1 zTCd_;ZwlL3DJ1cEQE+#Geu%$}(BJ*hY)9B@Sj<-%21QVI&}sV=TmQr`B4g#^Abqm` zYl=!a`JP+#n1vVGJ0%09!0$~F5FQfhAj?^Yf|1gS6X;}jb+!xy8#mP01>xLbPP6E^ z7`8U7`HzLXC}3tHPNsFwhh1dcV6st!gj|S_beXA@OJ;m@G}+o{I=P^P8y?RjBQnj~ zbBeYQpc?oRhe!EVxhYmt3x}(#+1Pkfs!J`*Ppd(*NXqv1jT@U?O|o^_GI7B12sm|r zy$LQ+m_0<>+LAK(i$w-3c&6AfW}$u1_kG%hY$l|o7@mHajb zS~f6rMO|D;cmv$~xD~>wOIal=N6EY?jEOTVKF{Pv&oGB?4rl_>A0{UA0-HMB88Lg=AY!Z=bwg#pV z%B*V(;179<(s4K&(urD*>N2m+*L%wLR<|9}X^vu*?Y; z-<4;Sa0C=wut5XS7dQT=JQC9b+_K-$!>xjcatWUl+0 zCoUYmOk7Zu09GMF7<(a4xw9l5z z=Oygr-gqi`AQiIa$sf?DpHKoy%{G5_Xtn3@F4uJk_7yEXU=q~hY|Zs@9zt^TL~J(W zz%{WCds-l*XnQ1x8qV^7A!#TjZ`VWLHLA>R|LAr9wQ@WHp@6Guc6=A6#@(2$_a7L> z6_ayp_H&THkxouoma^V~;4aJ(e*4y=?qKhK;Ioac$2CEXxOB=zQl)Y5IMbRBSHC|D zkaaB<5h5frUAl?_H?hV%S1A^u)WEllaQnsC_FQ`9*`>pFnv#5ZOmT1VI4SVTK0}4f$Vh3AAh%*;K19d^iyw*QThaO8 zDqb0CC>cZspJ?*|$!yN9x2j+(dlZ|A2EO=-Sj9hhH6~7}r(tirIB$^{Or zoG+2?>YDppgrnm<-ED=qAD-w+99nSwXMz|=>Wf?=Ul!hPJo-8mLPtk8I5BZ6 znQrTtKjRLkhagotE!u{n`8)a3_M;(}+9VoyC>d)(wTVIz`=d9)U4iE zw5lvjv1QuXeQ@69e|*3!6Q>l?Y*?eeuYu*W_S{5|3@^VS1qWBnwHTQg(Rcn#TEcb= z8*jL}9&po5exHmc2y8!E=%l($hyJRbl0S{Fo#fIpHZ3Hl-OLk#gz;8#Dm7JIe1_H* zA#`um`Td4c8-mO`F+>J>xe`C$wddCFiqGqvJ(u2QdWW9hxT_Fa(5r<{p2=W{e|p|$s-8^58G-uic`(Y@KOlm6jWwb9m3)%juE7rJ4bNON?f z91LGgvZwQnC(0q+6;)ZS9?@ZXwFY69B8RinTz4STV;2?YE8N>#$x@T0G6p9%Rz{6$ zie8V=qd4#~6Nf5x56&k?OGeoQgYe~Ja~)z(<^MqbWOuRrmJTRk?fm-kC2b4!p0u8I zwt2sHCEFcB8`s?^_j`u@Wq%e`e+XY{`~p}50`NjhmMoW?%QkX&VY!POS%R>PLQK>~ zrzjiR@>b52fkBEO@-E1IZM41hgDbF}q}uAZvO z**AzB9tdjQ<|?ExMu}4{4)Mc6?4!%}qcpeGqzAhD7I@GO##G4ewt74|xa;TF4qI|d zs0eM`Etyu@`?~ks(9o`Um6u^-YE1&Riz@r9ulS^B`#u9?+Y(;5KdN~f_%~xMN3MuI z21sp{2yb}1UVF`LG#v~c>1!5W=1r}p%#-t7w?X>J5?%?&bZp9mVI4%-zJZ(Y*C;qg zKCJqJu%(UnTP?Ww8>t#8d63s-jSC6?;3ZDJ%*9#;W;I3Hb*q+Mn}o!fqM`g(DniZ1 z!uqh>i9pyGGwo2Gq5N6J zu3MS&pq8lSf>#ABn(YRxeQ$k0%^PMoOc2nHeHM7oea2~n#K9k~2i8I4N7;Jk4dZ`g zA+kvj85Ltlik0>fTLo*o;v(q^ixu`c-5Qdefv4bCU-SHbw*D; za?_Oh=#8$8_K^uCyid5D!7_%79Y9T<*=p2(5$)!J?L~W(3BF=#%ml@Xw?y~9p6eXf z17#Ua8PjC7sy8rrkfHD4z33!~b_DgA0{et;i^4Lm$v_klx+mCVP~-90vi-EZ#LH?h zHQb+oG}D)p05NK4IOw*R^qzuXIPP5PU=Jy*gnmH;LWgw)$b0}nGv*EDdyk;es_~Fr zSu(p{*EH`=`E5h@!PzM5$`vA8XX|3v+Kavp%@`GYDj{v9M_LyS_pgnjkLUz(sWAMa zarIj!YdBwi4VNBod`Z{WK{eLh`9O=2YwpwJ6`X^UI-!yC6SKnA`le$o$J7n9MFqW! zLcly)t7VI3%VD{P2(e4y*p=G?phcL;YY^ z-rri)+0n%n$h@gbo2ued9CwEf_xe6fZ(K{ihF7J~-#p~sVeCzOu)-4}(`jhOwO6Ix zSRncFC&Hs7q+UN-kdi~&A{DdZUlY70GP2W`uW9a6eCiWTgj=`?TQal!2i}7MGVyz^ zdv+|ftr!O{6qf*-k~|!JQLkdd!LZrbq~4>9A4 z7fz52$)PQE9Svx4OH)f2PnUOU?%Q!@e)ZC;kS1I?pkdrZsMdJ5Z%sb; z2ksrh_4QohKzM_*Lmb@ig`PJ0eq{c^^-XI|D~YOee~M%4d2dlk)9wapgLmpv=Z!OK z?WaY+F8(`inHa@@4r_!wT-;F6kY=bDZ{tLW zS}xyi(yOS2ckK=B5%%a~papkSh}Sg6qD>Y{9rd0cEZwTPpLvw2r$=^|YnyJmg!d^V ztVn{{TlIYPjrZ(F-n2IjtTN+VNm|>SlkNP9n&w)q{<7Z35F4sRHA6%1$1ch)LN89d z&y|Yh4EQr1O+$ehy3T9h;!;qCz?s)qzz%6>t~)a0mBZW&PlK-aN^in_XC0rxMQQ@Pc`oRD~7 zZl(dk%#CQ#;e`;LWJwls1WC^l9F}kv3pH&C5O@qsC#BCKv^mw_^hlS z_Kn5G#naQ%Z$EKz{@Q6oumO@`&zXTK_CDdX|ouXBY z2=JbK4GCC_y5|&arJt)JGR|k@R@mr4koPiW-K{eFlTrThjrpb#?ed<^H%CR4P4$BI zEPUfp*2>e#Ge4pvS>G#emA@-5e~~WNUR`4R67;wU4!EG*kAV%p>TA*y)BdGh{^i7w ziMjEdgxOfiJ?FK=Ks?SZ^{}F$ScP572IKCPXy+yOK|?8_@{KD2vW$F0T$4-2FxTQk z9ReayKF>te)s{M+7!r_VhHna3sGEf`2y4DBZk;zo)8CRZejxnHH{U87dWl;cyK#)f z0+h*{jxM@8fL$Wa`H=fzfH`zfsk8g|=feQ&C4ggGRk(g*YqvxjEW(}T5|nZqZQ&Ax z=rJT~&?Jc%)pG~$jV+>IH~OK+X!3iR7jjd+EH_|H4IeLa>pkaui!WBdBYZM2QYrg` zS2F)~Y{Xe0?r_|`qHjz7M=iSi09?+qoQ0o&b`}TsdOrGC*v+pp1myk6!qP=gA?mjj zm|E+p0V-@d_oeAPj{c*5f05RttW#?@KY|l5(zf}nJv^2$xf0sQ%mI0jqA{Lo1W{T?2O}^i5w)$^DUbYb$Z%M2r%J(n$f%%+PNKuFB>bL z?W`Z9)X`k(S+30Z;A{%wri98nzCePq08rE!D%`xMM?{**2tC=SRo@q@Iu(~z*c*!HJ*>8xD3y?j z6ZCi#5Y1;)vkL_Uc2Xm)N*DWe2Ekl`Or`jGF|Q~yw4)eIEPv8CNi!QR-Aaifk@1gP zCP|g|wh6BZDGrud ze?nB?)v`#x3F#biBd>fya?MA9z|~s27P}p^++Yl=Kl%BoD@wCXc?S<|&mjY<2oB5r zJ$a-+3y%>XKk+qaQYyjQQWt?tYIu@Myy~LUVP4e7?*)n zhG8VvIA!NinQZT2E7)pr(86Uj4(F1A6kc{*P|Rqz7U$)O7fFdYb^jVYQj-WlJG>{o z8FGo$?{NeGNNX^q@*a=1k>2$?dbN()pu>eN?|xk6-R&GN&QaVlf`Eqk7Dr%(pJ~6d z8&QE9as5wi{O^Xcm>MUBTbHtIr8iXFx)_fv^Cwi%6`*-~rFTHP2oZX^VYh}g7nK307uvfEHNmF$_SCElwttn@!u1$JKmrfI>%nYg7J&rdaRNj2*{*kW zkl$_hTeS9DkN>{b|G7K9qj&jSwofmOx`9*iZ9+1*79+3+>@pByVoD$lg;CtPS`J;B zsYX}9M$riLeUq}xXD^yX^*}yrb3HX*yPX9VZOvj^!plA-Em$$9?nB*tW7GoMZ;t8b z=%qJcFEopqjVFXUK3cz;o}K+@Z9ENZXwV;0kciV_*jx|U2Jg!Qo@tOCz~}l6oPgXL zeFD#(YKML?nX&l) zVedVon%uW{QHm4`#f6H1lqCvF5m0H;Q3O<~ARrw?dY4`Uf}m6p>0Rk9^cF%W0@9_1 zn$R(ngdS2kFKgd@&-jl!&OWTqcl(JkgyemHWzJ_l^OIN8S6FOsp|bGSymoV-gxR_WNPc#OZEgMoIau_i5J0NX)v-03K?fsG>?ee(Q$xTJ2PkFc`cnivO1^y@!#?_U)%dmCUZa!M<5TfUmhI=fX!?7fL!{8u*BAn zz;H?H+m~ilerndMoqhj!tSGp%C8aMiD`!yxKw*Eue5SRr1;{h=+=~V5pOwS1Cp=Z- z%Rew27F2O^;x!Fa7Dk>Im+e0SxB1?aP^H6mk1ISkEL^7Fo!KQTWVYo1@b+^h@N|Ex zN8}POgn53`IP(Xz(K9~-gvqGiMUG~32uPrt=?7N;$s4q{_vv*}h4C&p8Qwgzt-78E zXi0f;FZ}~K(wDXuH1v?>=4z*C|95mo*TK(0j6oOA`i+392DnC0sNigOBX63sh4FlA zBSc!;5n#)laQi#bXI%z!^{Yip%=K3CwNR)mP+IcM30MQ!Y5r326dFMCCk_;usFU1x zAZN9H=1xEXjIj}-_8%yCUI5bF+n3bOd@L*L>FJp(1YMeyg4Q*K@JbmcncP1*MA&@-B{ z>y*GV@-EyizoRNc;8^d;<2~i;F5;9Nu%j*b7KaBO@cn+C|!`2|c6k$!TxGb{3oFY{1x6 zgiTu;)$CTI|C|?rlOdGRZSI$yY{Xn!&~^Sj zO7c>$T-a5qA(_pL*Us80aH zwKbb?QV*CK#0IRYxiyi2`5FtLw!EVuz52rk13|v@kYs$kFhOX&EkqhzW$+HzVBwCk zjKi5Lf&@H_-RBv0TV%2Julv5coBdf7uh>u9PQv&PrHtkTJNtywt`E@jAyOV;yiqBq8XQ zrF28{rx)ZAuFLAazjl2g+w7YaBHwBBtj?eDM!fa~9%8_mJmAmK5e%?HZjcGWLUTCZ`Iwdw`pvESl7p+$}6j z4mu3D9|N3*v=;nsT=4et@VI1^%3!JAb%P;+vD(rztvu33 zHdQb5me|?Eih%HmnP=b^?FD8bqxyS%gHtj|LMDctklt>NE)W`Tq;7`0^`Ro@-H7m# z3kW7jcI$U?>o0Er{?DIhG;nyzuYZeh;De=fiQ=qB76cfnOFO0dPjHis$FQTUFrIoO zI(4G8_V$h#oFoP2ZmeJ}Fi!5s zCjk4Ny9Y`3a-Z89o?Wh=#eCdd?(V99Y<&eW$sW&FA(}VG=Rm8vplCA_KLg@^y>vAO zwv^?MiDH;E6F!l}_z+I|*TGG>9$)H(TFKxTYXZ8)+p%?8N0pb5X2*eYhaEzXVf_^! zK}dHOa?7jB51syO+3n^~p3#t+hYgpSv9a)L{DxGG``vWEXVxmyN}O<4tET`fP^ z4-Y5fV+m*wPtei6>hh$K%kT;9m;G-_QSZP`4Zg~G7**5So}Bu38do3mgCguMZigaj zK!@=)Hp8E|O?=$u0`|+@bdE?WM-s_dhX;3Ilt zn&kEF1`NX{WZBGn?kZi&dgXC{9muIs5B+;m-Nh5x zssaLa1lM&l8PZt%d#7xv)r)Q$>#bMr@aPAa*1`LizGRzx&Tc(Abcs_9Is|p)6OW<$ z!?F4;#4fjv=Y>BchG98th_sOD-bo`THW=yfL)ES_hw_D!OxMw3w=T?ymVDV7@wV`c zNYL&WMvNrOjr;xFZ3bml%$BVY6g1+!EC%*dQaq7p#brL)Wyy|-;0nrHGz{zk3VWC6 zkR;`nBQgI8W4~|id6i2)P%*g>8%&F(F}ASZK1&z$ZparI!xXp@mLlzKO3F{$e|7i( z3fIgIFz(cwSq({mPs;#)ObZ@S7JWg+3{^poA; zaQ)+*zy|SGzf~tTw6&Z8T7hAiu=(t9St+&6lS_(aPOM7? z>4W`Ym#DMuK-ELajtFj?OUq^((+`*Gl23}bJo`wb9Hwk)TXNXh&!~oC(ITRSUkJ9e z(PC&mmcAhcXsSSS#}QMGV};|!sS++hTf0m85+rFb+iNLTICis%o<) zJJHD~Kyh$t>8%n_lrZPYr4qw_m1)l1y-dUss4k`D;2{#!8-BQZft2#Lzenudp`!<^ zBx+iKg(qNe6TuF{AG$|s3!4dJx2P=(aUk})hPLn3DZ)0M( zNh?cftyePv{Ppao=a~>&xgd{13N#yFq>(3!O1t&mTys0u%!nPbOM6J;SKFA4jmXSw z!k6$1&KypSk2CgA%a+)6S$^%`JCh&;7q>K7$9JM}W6*%tNs$0Hs`vyLMmHs1!+rJ7svGnlvmbfIl zM2(3W)0A$?1?2!BA-k8rW+X2Q)F&<~w~oFf0(;)8PG(>4E2rV4|R=5=>tsTn`1 zkO{V(=`tIk5i22eOC1Jg(!z{{1(nD#Vj~%-G#`{Oo!9y#OjBP!?S^gKuK>ZLPbP*z z%>K}&+Y_xSFfiBYBLKvamZS99256SQh3!xflK`5wyGs0 z^XMZ4-O6vKh|2~nwWu5+mBqfkxOJy6`Nu5Nqer8GtC}`8HpJ~_fjw^nw97Rfwf0ua zSh;(0J7nw{<;32x$gmW@oX@yQzlpCj6e}5rFxiGJcBj8dUWb{Q`bX70)XB-W={XBe+@Gj8%*=~_zt8VPXf%yy9J zrG)n$hwWAFFmjgSxzxT__N)fd2i1^cTx!+8-VR5E7 zTkcP}fN_EYo{objVA|B>Bhtjza}WwaSL+z>N6cth3w3SrPnAe(A1N=dv4_Ll=Va{iH6I+Z(z57!= zMa~mOoO5{( z#wiv@^HnXkR@B+8+5b{F9^4oHNR9M<+9|6EjMQsYjEi*%*q3H-=i6tAmFBCTx84kQ zIOHsS6Dzu|MHR-MMVvnVQFwQPyML4Ici=n;%m_M+PR5)HxWa{Bd$7mUsaOo%dZpV#Fm7JDFUH4 zJ?4Q^w*NRC(@_04*{Zb)5aI|tb!6Z_eh|YgFQg&Ay^_|t*DKgCcg-$pdT-qmJTEPT zAHhvjOmOlT*E0NC>DK@XdhL+inlvOR#ZuVS`;Lgx@Rtp)#50|7^0%zMK3tQa?T=I?N%kJC+ zZ?%j^xw)|4K4>m*^kxOD>QOnDja-0=TBcOKpIioIhddcK<1=jvj(eqE1Jk_lt3WVD zo(wbzC42uu2A&mqs-GepsEk!Y46kmzfLw7C0QkC3(VvkOa=XFbdJBkgCZq3k7c8^3 zdU)Lk9N1m0NVEs-RE92H%qF}&MAS(tq`Y^eyW#UXuRKft;JMLu(019_eq`8c6>wT} z&(WXykbz8Ta7);;_HLXArW{o&k8&wRZIV3q46Eg6-X{{nh~}AyV?dKi&ulIH7fnl=ZY9SSgs_HV{SktZepA^=;KZ&9 zXR$}LVoWAB9o&N}6&i_=rOROn8Ypl7`8UQTvED=Qp^wyvU3n$_!{euG*Ixv?))ZoH zoHSv`8}2%%dWCDlP zN2WorV`a8rR=vSsQXN%T;5!g>5wjcP4r4){j3|+4XGgtY-bk;&7z^a3%K{=jfT;lc zOmHkWz#t{WpDm;X=*dSQ`T_&TYRb7#b{Rl2rWtEJFTX2yy*I!rlx5c{LO5nD=?uwAK>6S*4*TTu~dbCbgeuosn&0Z5Zr7On{I6^&A#xYk5+<>Y zblpA;k%+`^n8^cq0{|6Yj<|8OcswJy&RU3j;KT`aJp#Qs_&p*+vI$eUi9b*}MLKQRaT{}GZmP^eFXS*`AhS36kbgVb)ER52Zb zEKz;te=m4AhfAbM_Q~F}{UrIZ0`(Y}8OX}9dRKWBeOmw15`dBDo&XV5)j?8{+h19h zfAcbyqd?WvVe1*#`A5G$f483$wI2ALLYRW_IyG&bEwKUA*8t200$xJTZ0b#lshZ95 z7h{Oa2O5BLHiAysMKKVk9bpE9@KPb-(l(cdL9IfJr`*D(khE@3Svh3~`~>FRt*42+ zc{I1|Th+6l%>fk7Ris^sxftYX(LxGVj2zq8)t5hcymc6v{Nog&NqwcTU_U`)xww${ zAtvpbwf(Q^;;}A4%c{G?d9Ro&&5+f}*&6!~JzcB%#wH>{nT}0eYrAnAVCSL@31iBR ziy*(}#N&>rWg34udDE1@2H!Qkk;zHDnu?W5LX_!M5Db-+MqWoZyAyF6X7B^avz64C zzXiBsYmA*PP}8s}w-c!HcQB5WkoVe3X}Mr>3Kp>=J#QwkQyPDWfT zf;jDh)bIKrPt+SD=;;SnqB_$(>S9&4 zDY5j=6ES*by`Oq~MDJq)lH(Nd>h7PSP)933wccuWg2e2V1feh%_p@JR?&U3?NW{u5 zeQw!!Q{!)VN1<^j8nqiC07`Wcn|Cb7pDj!A@2lKLVX@n%6TI`=QjQBd=WjMLaq!4_ zZpvrER5(fkYJv3Gb~uUz!f-i<&1>D>h(pXV0t@;B30>$p<@c}LP>&n-M+@y1C&*tD z_G?Fr!GWOKt^;7F3;fpyQaMxtkC)xrnS`Tnt0omxglo=x!{Gp)wb*Y-q&~>6^YC2{ zu;*hlLB1%fR9sxmvWq2Zbj?BCAr&a^hHn zbvKYUhZfbweQFCj~X2N?D4P#>5&qLuCnwYf;w(BW>i| zH>bo21x~>|aoiGZ^QBmAyg&2^&$0g^jY(!lOXlDgFAkrT<-Ib#vkNRv8I7qKOwY#I zj@zk>q!O??j2BCvpN$U#{zAb4Kv?xaV^gWKMVr8*+`k9ps5(3Bwa@$)K)WoeuyRsR z*+0;~|94CNyCr8jVE?@(|214^6Z8GoaQ*+-)W9Uz1C4GM)84EOGw zpYuMUeqgusBSp%yZ%20JdD=7*o^TFxw%)2A0O(-2n-+mlL3r$!@0OXEz1k1{w+tao zj=$9Ov~5hc1J>V&xH)I_+5S~Q%ywrqeFL_?y8K852sSyMK+3CQv=aH|1Ry41lPRO*K)b31s@({oBa zb>_p!t9w%@gIS3KR?hXHP5)g)Xxu%G(jGc=lce~ZISF_zWs=lbN++YQW&rA9ww*Zf zIfiA&@DRLknsq)+5pDbs2B9vdlcAYApLCX2G(oLWLSjL_yI4!iJ=f-f0AgVI9hXte zS#-;`N9wuBW@Xa_PR1y0k7ryYG-4!Zoi zffP_Fw){L$7caIbh-WwpU@kxH95|dV`R}MUyY#X?`Dn?foom>^FA(5>`8AG_kI+-! z+0lgMY;cUM88I!KJihz(9MFu(C`rHC0r=Nk@NNIoxfboj9#IhJ@xT(B=8+qqAbDUt zz{0e~;FU!})PjtOQD*v}BL&KijhsKlX(37NYUyOXQE5jP7w&o|}C1qf}I*^MAge-^+GijKx9&p^zOftoI2wR(&`VGmy?k#d8?9dUZq+y{)IAT+aIwZ*P7MNa3-7S31{-+QAzGnrhI0x zgzn50rXCiBM!!KrfSFVwc@94!y+Q?$C95r#`EV;W;1vi41n86N`xg8$pZQ`qaQ&Pz zK3WJL0yS=_A8a+bg?u}*blciA%33|oIWuh(I$O6kX12Ja&_Zdt%&+#v8^OsdVl#Zz zL|+x!T(`2)&EF?Wjej=%FrUc<_dBrcb(5j8++oW=hM0cAaE4QJ)g(G(6J9Flq+xeQ z!W5LaGLR~wM3Wu3D3-cf{(UvYvbvYIGoo*+X@4cv#w^LKVDNrH8g%dLs1C_G!*bBOTq)Pb$H1U9dZTvoj8f{;Pq63O`e zKt1CiwB@AzHOODK+03RQR8yXt|9els5^f+Kckx;pE=JS-FDzrD$&lox2gq0Y{-Crt zv=!)K&exv=J^%}6g4bK1-kC>DhkV5^p(Q3=0p0$AG-U(yXB=7u3>kZRg(vv_xj5Pj zrXJIEUy-+#35g1RQ0u~~IU78V7J)|__M?vC|Ul*$-GM1u{X%Fe%o(xVA@BT!ILh=n5U zGvusa%0QS`&<@!HKTwQJGna-$2a$2>LQUUK%tv@2&$`O>^HxVKvuI!1PxfxQw1_RO zafp@igIkZkju(^6w6ZH)dsj_XsfCM%lEll6!nVH548iwDPhySKt|`mXUDdF)wNWD8 zIO(#LEO&B9$k(d5+HAFn@6s;PdJNLGk3*r>P2;nmn4Jdi-4C2r`)lRjLOv|^5nN!m zSiR5-!(`hnR!Sr%Ncm&P9r=K(%v0=}hgaX|s6F{lbDjUk#M}@-CeIvBF{<#G!LM2g z@)>&YO+&lL=c;sXKoEY!=+ByR8aigZQp4Lk%W2lRM)kJgE4}Vz)3LMy&C?8_$(rgE z778CSFLM+X&JRyIjZ5VO4ZXQTEB0}xA_z)CRnhvGjMXK#9Ig42{T69O@4GkI=rp?K zx<=*A9(W^9o;4d!3l?lgl-RgZSlr`aC!q!S6yN&MFc%!9yr-tOl;j(nJnngKNB+kC zs#v`03iJq+Ej?`}I=R_z7IMZ}f2T|F(IA+jgQvu4wqA8adxEOKzS8W&p4IKf^hKd$ zQy!OAyjoke)E_q#d{n{C1r9UMktZv{gaIAiJR^ANTLkfgDTSE;sF{`(;qQ$ME^|S# zDIBki3BSP1;t2#e<+k?mdkxOFgd@(qsh>8~Yp9=*^42e!s;veNUQ@LcsAUQb8f9Ot z%)HFR_+?Tsy*C57*8fo@>jJ%bi;F8LAywG?g9q1i-KMZ)uJv;8y_jaz?TSX@rs@KR z^=v*(hN2Hy`r8kulC&tpW@ItA^~~FeEji6*G7(sn6F6sOg?_2EXDVs)*7Synw}glM zSDfosq1m*0n&y1>rQZ&E?-PO=#$ zKQp;r*!AGU1s>x%kNU57O!~mff`PO@f4&qxUY4r#So8qK*Jh*^%XPNA+4YE{5 z$8Js6=)d?{%I3;IeuQ$jL)2Coh$n3dbSb9>pK;(K@Oux=15WRU$4n~g&mYetsAxSW zWl4mefSbCnS`K)XySffnD!YxqvI5B_qmt%^MHEj4U%@xV-Ma?g6q-=V-zT02nJ_#x zh7cui3!R#yqQS0f<1YC@&>3>jR$HHLY5tpuw&jsFS%wAoegjUScsDc9rl$RDL898R zeKcF#?Y7c@MA~Y=ae_!Foz{5T!_DJmcJnnargAoB8};;+VJ3-6*AZ^Y5S!ejv4vhL zn%IkC0_HO@vd+otW`6V81@k*XY1pV6?gJ0Vwb_Mg2}w>vyFH!@giqNo`q%i=?SO5_ zl*$scMEFk()PTkx%g*hM?rZ)O(>Gnyd{Ii#QTwO>xHc!~>ECc&I?+(`2#sVqHKRQO zSiATQF4%5SS;O8dZ6HOmCtH>rHS*4IcG6lV2{114|HQbc22(tVa2TBN)UJ3JXcPG{ zok873)OJo;w5T`9eMrhgj`Ukl+Fq`chFK%!7@FaB^ntR!nYWYs`mF|}U&1+?$7)Wr z9j=sN$RO*vxqxmS3vlyCPH7}ri=qu8oe=c zy*ia;uxQ2>V#l*4e5C4N?Q1G}&-V9Scj?Z_A*2_)*gQ1lYZyogdX!;yZfaHMm`l>p z{esT4Pozl=zOa6Zlj_LzT z)`RUWH2aEZIP@;~SZh1^^xfJ3N;Ca{q)D#i{ADi1P*QW-_;g$j0)KDBIV3d7n%fxa z5sPBPJ)>c!#i(r7`ffkDqn+ofaQK8Ko>b?Qf9jde7mVk;XlSI20^Kvb8?wsQj#U{(mH{8`2**s z8qA=z=%Y$u=tJfpJnj+7r~PXEg*EpsZjpTB>4%hKTz<#Tn>Nw8lw*ur2%hO`t0;D0 zwEL#GWUI$vu&2Ysr5j8ss2Gne*MdtN4|#mvMef#K;<+?*kL|{Ip|rf~Lh#Cf{@old zUT%Md1ETFGF#sx64qIo{a%lYP4%O20wB!#{ut{#v`1`z+KhG2W;>X#6TOKfcIxK>X zRS2(WKk1OgZ139L0w8TVCa$jqz*VFKzad$zA|h-*8Bm;!Xg`oURS-VdsQ;|(@dF8j++phG0~ct zdQFvLF&l(_$c0vJ#cu^lF4u~a^J*{fs6|=6aH3e0KOtIDJv(f4YAW~kMoldH6)$+2 zm5mwI$x7rHxl+-WU&%hRPs{_KE=XWq)I^9P*M(n`6)s}^rmm^?cl}czxNEen+=iXp zO}v{VF?N%Nd4mdgBk^`yFKVR=tVuI<Wo`q!1`vQk#0VGhC$@{g7%=|FrUozpQWqzWz4KSExfjkeZ*enH6V4v8yd!W@Q`fs~4odWi z>3hcSt@iySlL%HL+^SYUT(?KSMCGA+`FnlWv)C#F{Ne`EVOksF7Y4dtm)l?U-EHd0 zf7ScPLyINvQK$_v*xuHwlbb(KsON@zbSl>kg`(8k!jQ(Aj7jvB5hs$UZ^=PplUbX1 zU(y{xf#iV;DSChu-!#+?lO+l8{S-3L8?K!Z~|tdKd|~m6r#(Db4i%lJipB5jYl??d>*A7wMU>Whf=@bdE!@}Kn zqn}QuftaJaIu{6OZmYA?(VCm$VVkR@5zEdB@}$FXpH;F)EW1Jx$FuDR`QpK@r0^K+ z<|mm~dcK)Fb#Tq$sC8g{2gD9dty_KpH_v{?0#47EsrLc*Jzg%^@!Q~4cSu3q*xc~{ z%L3f0DNz67=jmWobScb_0@H%cH4d7Hq3rp?{%PnfdBU+1fZ~kX90#?Jj}wMz)W$9Y zfVy)gaT@?e=YNV8jwvR7zp79)O&Wsj^j%?NRh-z(CC~vKT(*;ZsOeQ=ZV1i0G|tS2 zv1Q5HWQjVyvyVi1J&~adE0!%d#k-j<8x#6jK#XrhD#~`I$Q8QLe|XjN`@J*2AO+Q( zbJdO%xn3g#bh_|$=E1hY)MLkD1{|)V-{o0DI{w@Mhxpszxe0JX3vS4^ zX)i6E=-Y6x!50^MeIrhV!v{->%j-{^CwBRH!8DZRj)LX6>=v*eqVn}f*C?X~GVtXn zjYsBhe*NTDP3mQscS!)ksVk!EzkUPQ^Vx~CbK+c+dwvSc9SCm5GObN(l^|<@YsHt} z4H*>9KCcG6mhfx$kLN27_-Pqz;Um=OW?c>0e5+p>ydr)JZImoqlbcD>is)bGKFVRk zGNy`xB5?o;EH@NQTpMKX2V_uem1SPf%ixh&aM#t+U6$1(Icn{Mt#{Jpy z+2ds|2jAWpi04&##kpPLgNsbm-a7uKo%d|9vL7?spqV6Sc89&TvFqa9@m1BNzl7Ku z4Jyq;DmOWM*-l{yybH}f5;{)(am#Rd}2|0`G&$&@$&S;HK@K4WYMJB zqsbNyw>Qjxm;X5=C0D%cak7A3g?QF#e_}k)li`Emy#l*WH+0lEV%`1}1NwF60u+e> zzzh9(xH0|(pj&@5)~8fd0vGEIg6HCyZ`S878hW0AC02@vY%i)sD*7>JCdV^_Bmhi3 z1Df8^j}3hhv~mM}s5z!-TjV|Kf`4v52Yk0VfAj|5smty4cP?W42e{>waLPZg!~6_r zG}sX~_4*$?b}Uc}9j0M0qdNAa@1lyGoJtZ3t~T?zA}B<{MjA*sYm(@ zl%X^8jZNnWof$v<{RKDv1RgVvSrl^Ck;DprlVUunz>!cZsCjJi&sOEp6W}qq{uk_}?Xbc_AOufQvk7hfXRG2G20W(BlV4l3|JVKc*L(eo@e6tT2XJQlrl&v)!9@QB zGs(3S$1l7pv45$GNPlgEod7BzEcgAIcdLDD_LmDdtwKWSnEK>Twj}C)riw(o?N8eN zI1bFkjH$Msr|n%MfdiUGwaaW5dFwH?GHuZw*$ae}B%3TJE9;QxAW)=-S){12NVZO_ z;6A-0vPwsqEh{7?hGoT{iZ(|6k=@3W07}$H;D42k7xsv0 zL#!u`#oI^Z+{SU{%U!K&40H4#3{O5dBm4Y%4)5j}b>fZHvy!-Xw^6}2t z?%s6cb4m_j!^Y3i*Jp&y4fVKuH$F7H?cVYYU+9X*`pSn%@#(njxI`UmuEz(#<6z-s zA^!oYsp#;i(lV1fRVVh)Ko=5O3jD`afBhQsX_6_pDSu%!g)S4cu8%aD}2S z_e_K~n7yWZ$E{E7o{8eSPZd z#fP_bsql>Q#80dwCtGvlqphBwyGj?_F^9TO=O{W@v#zJ1szvzvVS+dI^0;M2G5F_9 zPiX#k`sn{-EPzJ+sx-{#d59(gzo3J2>pP$bTZmUabWk1M6qZP^d&@NWTvBeAaA$9K zokeYDl@kiqtWu~J-Gj>FMN;phY;<7$c*L#5yI<8`>SyjWz>kqN7-mowZM4#b6g;}n zZf{1K8`p2*otC6dDZ+G8(;kEIau6HWDx8Iy|&tS<%@+4zy;GJkS!PtVXR{9mg ziwp2TV_+A$*ntut?*#RnV*fVTW>X=4@uF>wy$B$<6|3C(M2C+U{HyWZp%mM}LIN2t z^?8Dd9)bvHIA8rsEl82>fU)w?4Da`z*aYEL@(@_Lcd18per7^Rq~9)SG!umWqU$i( zADap48=SAW-wzm;r}rqsuJ$L#i?mdfbza>rkT9xuT(q`nMvb7|{MwfJ$+!CT%j0#7 z#N$hJ@ABoh9~kVv9glKJZI0&@B;I+kM$kDnEX-g*wO+9^?Gt>UM02z?Eg&&3H)THO zVZtg|Y>|28zWbWs*Ipf)7o5U1rGsxX-~7p!_hMm@-z*7t@#Oxj&(TQx7XkgDS5Cq; z6&WMc^xtK@6IPMec8%qMHCLmskg(F37KNFN* z)So{?m5+X#MfPL`>8=f#eYK;a$FLoRl^Ha&Z!R^~md#d<^o!S;+3U(bcE)%*J&RtT zcw+LqV}GW9+9u(J*@$5C1_vS8w8ERkN1WSS$*&S)v{J#)fi#Ry>gzgy?dIyEW#*eD-3f?M z9RBq8K|PpEY4##1HxXKU7@h*@kFTRD1yy{iA^^6{*1c>FD*0IwoN4= zcDCMA2Z!uhxB69@ZvP*krh*^++fNi7Bs#`t80m;!XRw2zAjuhhSFA)gwpXMvKCoxV zWG?|uvv)x_VX-^LC{HVw>YE8y!#?|)Cp2t!-bFK3drP+k-gcGLeUT+R(9SisD}Gs5 zoe`i-s`PyJR(3G@Xf+H2P^}Yus)kL~`D&l@tUK@O7rc$cmzzQm{a;1;#~n87x3yb* zUXBERIx=0Wx*_>4tUP__7vAogI5ZgH<5D)A?G@oW#O6di-0(G+?eq7fcU~zqD31)l z?u+VZGK`OHJzyW0-re$gw^h|dI-IB6MtcttKYTO!V6Q)(uX;6E`OscT)B_G*eEpEw zzM*+0F#np<>oNC&L-^jxj{hw)gpBnRiREsv7DSc{w=jRFncifJhEZ+-;AoRyQ`7U4 zj-$5l4h`eQ2D+lE8Ivv*>(yfrAtQ0=UY}Vfhwv-|_iHIuwMJ{zfb&uU&#vh}(TSlV zM?YqYQquNwke#}wM@nKr=n1TvSRDlJRh~Y5SJgz=At6ZK(V)R1p?58>rx>1Q`u%EO zV*Dhc>YAYTPoXuC{Sp7*PSG@#nB~e5r?k~fbd&Y`2Etp&_}X&_WrG8%MpIr%Xln{F z6nV@Rko_}LzzSaJ$H+(8iR#sEu->Ml*!a&(zjj6OyjuS#}=Lj#8O`pG|PmI>YZ5!x4y4VrdNosNo{#q z+;E|=Xj#9Lv^gcD1L;1$W_aQ`>9RLV>{|01isyyAlF$8eeslt};w&sO6^Fojp!Rgv zeD?juHhkeDLIxnEqX*ix9|z&fXwUHQeJK7ITGeUH#+XOx>n(v#?T{4Lt6EZFpZv1gpOD zkYH~&PrNnVfZ0#;8 zOkC`y|Bo_cP%;j7VuAkTC;Vt(p8t@uC?;9y6WES@(gr*e}eEJ)_OzEwXLQ^yM0dLkoZBMDu|yH=!?<7GnT-A3Xj?J z&v)d-!Suo z>=IvT5r<^}hu9yTYh5^jt<}CAl1uSSKSIzwL)kLPhk?VBkh0tD@>26$L5S0g-l0cWf1^2QS?f<9& z`!m>XQWwl#b}B-sc-WHln8tzvA!X(e>qCzbv zR^*@s%lekR7&)70)@UUJ*)e9BS`ptXhS7$8D6v_5LSyygb8A6-zq1Yp&Md&PRt@_; z;>p$R@Mg_n2c2^&;fCT#6dhQ3yml*ojjpDd_L6;QwKZhq-89YmPgG(3{Z2HuMDWg% zhRr8uMb#n-Q7Ge~-#A65o+LeT*mZ1z7wi?zF1QzN(sle}L< z@f9qRCcNdo+%e@*EcymF!f=2^A9HvG)hc(@K4MaBS*WmJlDG6`+uQ%5VMAztH*V)CjS)!mV^Wa#S?GP7`-#woj5TN48z&Qy9-?7R?EP_z6yZc^0_c+@Af(w8wdJVZn*k zY@cnF{1$8n#YR9tbljz!aqECZdHOGl!gN@S>>8IbJLsWmXY<((I*>y9b$y`3P)G@9 zVXtBh3goF$7OdutF*GyN4+C7ySc1P}RM`i(Dt<${wxJawHBR~^i%FhfmGt)4p&zq+ z-)gnp{Uk%Tqw0iS*awc#(q(|SDFPwDGA@Wq#jdqtowzOvJYG> zxzt=^6Uo!zQ{fI7)P!aH#T_Wu9!ndr0SOI+#{zk`I%k`Dhuh0AQZEv0l&gg~Mh9@l zt~YIFMSaJ)M4bk_C%G?ocBgC{%7)EJ51z(#&gap&^v9ZckW=4H+Ldn?;l|AuSxrwA zM1$|Ys*=xe$HcX>(aRqn3+(fXnwXtr-aj5Py?nxX-CkZz@&7Cn3W*l{e4C2CE(9~3 zj1;qZA>wQIS-Y%D95J(6_h(IOvH{pOzT=?-nu^8TdOaqrzWk;LF-Ll0eetok~ zRoag&RAX|2Hwnn%+9QEbvBqT9GhbR=gkm0qak_J~2+-|0YpSMg->qMaF9#6G5Te5q z4rHCGxFEZ6F}zLRH2jg^E-iCfVupM(coV8{bMOXsjgWmD@`?RNj&<5gZISzSNjC)9 zcXobIQxl`(;@Piy;((jJDdBijFQP25E>YqjdpAkIvH+ERR9N>g^>%2eRc&so>mrqD zc(xPe;}((&rs;}!WX79RSC%;vEgiFc zc)1)_W<{dNLHYV1ZH+(HR5)S$p8U=>(OqNzAxqt%cf9RFA{va$xlHytgcP)Q`nQ3tQ?P>3~U3i%vX*DR8gEi&2ZULVt1Kn1= z;nM1VWLmU;AYF9@Vwn_wiWn#5pVy)KvKZ=456FCLECe6)6% zaMVaB91;!r;ity_b@ykW_iyv5Z%DI2g~pkSAM3H|fy!^OXbwZsU!$h}pC%>dhFF?n z#Xhb~QZl7D*7z}vDJ=ehkQ{e>E6}25>gRl5>Y%FJBu`^DR!>HIx)joYy4|;DJbCAn zZw}UMM&Q-9_mvuoduFHl3ku;Cw5`M~li~VvbQ?W{)+M?AE%8GgZ1HM*u6IgKN-OqK zvXg1JAyi_6^tuL)g1Rp;=@9)3X}LGO6>HuFVysff9L<{p1@VFRe$T7auJk9vnb;Xw z6;gh?x5;WYgI^;bFZ=S3Z-?J)2h0OI+Jfv|2%O&jU1J1J zprbpAFKk(G8KfJ+f7Dp<@1)!WiVA__;BR&TZlJJhHz|4FV#6VRYfGN%u9v@-0p{Mommu(S7kf zjhfzzZ9gcO^-_82oq57{Am3ZX8ixPG8eO6UTW4P|{ArZ^pYn|csywRam}5yx9DnOA z9Ib_^8+Pmal{oX9N)t%owkB%`f(o9E7SnP_J9ZVU0e6lv3)nLv(bC(y1gEuJ}cJQEZY5-C5-{<7sUq*%m6CH3vZ3#ZjpG_}<9?iKPn zxBaN}cNQ}ko>Qj-E@)2DVC}m2ujyzN9Mn9tr=_jqfhq0JaUh({u_|=ez*0Fx;@Pg( zrPoT+$vT!4uIqCg9*$ju8`dCDV&%7uzkeH2-Q3TatxY96<`ihuC~-htICgTH3Ov$i zVa+DDnq$_|LNtay38JyO9A+opy}o=!VvQU|kQM^2XEgEM$@k&3=Oa|M^M@W&^DeFw zxwOKw_mt9dwM7o5zD)^hf;DY<2Fa z5FcfG1Y<=Ww?!iX?qw{$Qht9&p^>8FX*M<5H}a5EG|RhV0S{S5L(<8|Yu{pR@R+LR z41aE~8YtghrUy13OB84+3!f=(=D20GSzwUs)Q0WXoOj_TWPfT=+5BIOy?0zw-PSFv zG*LuR6a=IQ0wPVRB2|zk2nx~(pwfFy=tY_eN)wRYdy`&5v(Tg&S|lMvq!W_R1B8Tc z<9p7z-@Whqc<(>>LsHh>d(OFL8Dp&PwG*E~YW3O}$ros>4;#w01AftU9k%N!Q~SUr z%lR#l1hWz!jw<6#z?j1^Fvx4p_Oh&VGZ<|%2-wpXQPRj$CY=e8uGCO)oa>lM< zxjI1P_p}=~+^*$*TiEMEyZ}+9%6(8hfCdv}QP0puP=^W++bbe5U{DOv;wT1Qbl_Ff zX; z_PDguUpPPJVlUl|*armMdSOZu^cv$^zjBe3uPnXOGa8lII@>^AWja!)oj0pX=yh1N z^TXaCZut?gC=%Dr`lrirU{1OHJQe!g^mo_~Oz*N=%6YZ=&0u$mD0!xQe?!1yVOv6JMJNLeCdRA=oQv zj@oR-;<|kM?oDZrRWCLMjN11E>BtdRtddqos~NEz%p#$8IrxT{fzZ19xnF`Sf`m@V+an=@%X_Zx_1%%k$>InH;0cP*Ld> zBg!ZM;>nE{OvttYTP7$-^5h4j8ymJ?t&1vNx2~BlI}0QtG7MO`NdGhMmb0G337UIQ zHYj&MSA(6WY^68uC*Z9cf?9pNbB^;UXc>c!fz1<^Qj0G#wWN=rzo3VmwTnOd48FWQ zfARWSqD%s2rbB0!!*9MZnoK`;zZG3(+;&GtwAmfM?+Q|(R}a9c$uiIFW%Tc|ln39( zR=5Q-keIv)d>L%;z%3!IZer@)9e*K zuBV;Tww*A?Yx~BgxhK}1F$B~FEy{ylsZCw@0se} z+;~L!P?9n+#l4RKm%T{Cuog5pd}uGVLMmGU7SEhus^^vc;%wpDhFCVaOF;V(AL*|< z0i6Bn)cnd7wq56l^=FhQd$+{Pw1!tDk`BA0K&QIaK+92HHUG-ugHlB_^wYh zkZ<|~5rrn6ci^)ZecWc-{N}HG-uy2co|=y>`sRg=f{LyNa}U!Q#qIcZ{tOD*Mp^q| z!gYsBX9IT0$^FDfg?4GERMNz$d&579Sq64y%ccV*OIrzZ0bo;r%dqsvoS`{=hdTDc z%0X&$KgN7!D($7LL;zbBBG&nMqn|5x5T^sw+wthyi5mzp(M}x6YSle;X6oydnPMZx zxkC+raXqZ|B5lUf2s)l6WM9FHb9c;q>6p*>uD<^g4ShM_%5l3}aweFuRpG0_I4W91 zzixk%4^u|tLt}S;>HwDgxh~Y+s#uM-%ejCSU)t?EW?i6YkI-E*r9N%t*CYwD=2!`8QIt7Rc=< zwh0<245Ifg{rhW+KKy08u-%4QO^hS7x$aCUirUfkGMmQUYAWMm!6^UF=}U@$a0;6v zb-;B86u_U*RmP5IF=-W~iEsjJ9?GkYPoKtsgUqlIZ*6~))8t%I&<5r?9B?XcepcYQ z!@X+Qq3q2G;_?)G26peNU`?$-q22}d=BFY8fkf-oc{$iVJ;VEST>twZv zOOzZW$CdAvG$zK0!;R)k6%yQ!CCFKW^=kuJTAM8|LQ*e9f5k!uwhV$HQ+5q5H&VeD zhAhh1CcE{9SxbAcuV&hyX<0+cz;2S7lYV!^2NHe>+3X^a>aBnTK#|dO_dFca9m>>5l_GvHy4Gm6tv@7 zBwv3KE@`lTQP;inJ$YQa2?$ zs(no5E!R7XN7MNWECL5(5!JIO3`~R>H{MzE&_AUW0|n`Qp5HY zTrdYl_Ib-o_$m%sPsIJM-UddGgAm2Xl1V$!cMij^5$=1W(sO{jl4{ont2u#q2qUZ* zuQ9l!Jp>|f>xd$!qUK3!GM=g{$+W~b=hEup63uk0;^n&C^MtggWS6@@57BQu4L)=+ ze!X@jAj4K4PH<>`2qYkN{_h;5*Q8oTLFfz6(-C{^t*95s#aD_zNg<1`Hgk&y=3#9~ ze~qR>w@#udbHFs3!*|`!%ijrJc^mPQ$w{+e)383&VP3Z-7f>(8ey?=TdzFuxan$sd zudgGh*Kawv$J+AOs9s?KZLAdkAU))$PuiT;{VyA_KI}BR_aA&r4^dORZrHBrKp(cM2^!D)oDoj zdQ?g9`#>Chnj|DFtq^prA_rM|?Yh#6tGMt;IPRWa=m-`5Fo@{RSIz+vO$DY6Qa(P% z+DE}}9jqoDSh*^uEgBvYJJTb-9KoEQ!FK`X#L`ICAl2>~gg@o7YnLnEBl^I$wYqJ6 zXV>{+VbA4X>O;yp_K9FizTZL)mey0(sHka_f66ct^I3#eiQs{W^n2 zDWNhtdiHs??tg{HroQVq@dPL7nIKKVB?pU^`z=!ryMeVt&+vVh=J_}6Cl)syv?>NU zANcd)KFrx2VN=HxBw*3yQO|OMET51&lb==MPVn+NHJf9Pj{vtm$$`CE^k%33GBH$S z%E1Osy&~kBIS^}ye(*BL?zN4jEkE$W(-Chv)%*0K?FVmOR{GfQgX^S3{nr?jeQ-Xc zml&%d+H6h7<&A$?YmuYi`0;cQKg=s1t^Of=JXV5GhzTEjxG zl`13>f}iGbg1hWAV`&%=+e@~MJJJjizh*giQF@f)jqzc3Xx~2l=b?06sZt6p*AWJf z8mmA*Ilgj{{fDe7ypueX8}^XR4c`$`xXP-_W^dv9!d@x0xs7i4g4etYtY_h0 z)7lqNC`H#S8q~Db_f%mUTXkS-N770ea1cGV`bM^w!crTLqE&pxuid81qcI2JvAS%LDn1vhkms1)F9XA||Afh#p-)D8 z-Zfv4kn*>j`Ze8dv1ep9Q;i$HeUgO!roRB2k7WWv*P&_OpW5%2%gTOwBiq6*L@}jD z%jn~@lzndpb77B{vUIiup+p1TAX9oLMtI-fn zsz9GhU83p?1OV}%S0;JP?kY+?7z-Y(4uhjZJ?kxCzI^>jKhubn#Fn# zbh;nhe0F-&|C&JPorS0iy4rw63RoBoB9zLCY{nTE5yv2RU$Un?a4*0dUT@u`09fU{02WGpTD)4FNB(#m0j`o8yBrHQmSO#J=IEd8upA2bU40 z70ZLdQtn@|Gu!Nx~z5Tc}qa1MUwwxCx<7^#WOS5bK;rVYMV6C_K`EcBA>w39M_yK z-20aEux6eiBM}rr67~n#*FN}Hu6ex$yr zakCwL+Rb6PIC+67LbArF7<9bE(%&pN>mY-L-P=xA^c3mI-UC@h^HX!y_=CeQbITRB zkV&UIEi2mwIrWE>;dbqIVZv_|&QOeu7tGf4!t*#qegf%j4)Wl0&R&qP9@rTQi1<%! z14yv~|HgHp?bhJ0wyTOePsPoPpPhwqF57D^$gbi9%^?G}^Rt|U>=)roSUHmR&dJQ< zp;<4Akn|Ow5a_qvVvqzhSfwAByEe4|d8_m-gXZ0zBuLSIo?iPXZ8L832+EtT*#bE_ z^^t*R&y=1p^VqxVjd{+Ea%PXQ(XOEnCmj}{m*FU%n+Wlr3Ovd-*+A=cWtK;T5WJrs zHahpHlvrTKPr8&0R;^|^i^|8F_gN1vol$Pv{%f`{#1n4=gtmE0JT&UyfjZ)zmuZHr zMRTW3k$r-4@l!)+npxY6Mu%@rVMeZBW7$frMZ?_RHM@qV(d^Fze!Z$!(Rl=j;O0$>dnhV(U`8%BBNECQCrh3M{j`vK4KALX(>P}tbIYr4au7@9QD z%&JgVtp=fMNE{#_y-Ratdb`y}BewbiQEE{G_Bj|I4E-o-$uS=3mH#dfk+mXB!V|*J ziL3@T4bY~_2Y_ETBL+2!eB^)GFpXFG!5#rozWKT0z3q?C$ISu8*b9fV!SX}v*1(S8 zR6Z(y@!+&mx=C3UBx`wN$gsvlG)|86RuP|qGfML+I#X*mYSFs7_j$=zqPr;-(&1C~?Ip0r1>$8(~vmZzXH0}_?M)AmxMXgL5|dBF>{+6W;TFlXT3gIg;fd^VM6X#l?~ z4-TzEN|u0Gf{~AnGOEAs9+Gz|+(^04d^o|QRpell8midhNd+LjTk{lB1<4s(HyO7b zk{mo4bAHf5Ln&Aue~_SHG$>#&vxc%^_RQ% zDbDq_>YD~_?$Z(eIX~88O((Po74H@u^U`z_kJg>MEMW(q31Rkx7Jz-(R<$E`lKx=C zmXbEN4)!(g+f(F@I-q-J6eoLWO8HDJ>tPHbKt?nyZa>K?uuKh376yHh%b7G5pK#!6 zW@5XQFg1D>1qNuV+@SF`v^y`%NqQ_uV65GJS7YbkH%-+_ljH_2R^QB*Ffp%+ma*{j zip?<*yN9uC#|z>*U)8T7<~_qBPwI&D0|RuAVYLOuruDj@R%(q{3=Mpd&|?vLz!*(2 zG?*;F3kP}Ron=6-$ummKSEK;T2b?-zeq6aDyqBnBf1BfX91&Mc7#dm5zVTS6(kWX| zORk_KJ7gS49?M(r}A*Jvp`?b18pGDy;m-h zRTx3gMtyRK36z{W<0aRYnX=rWy>iHLt*kIL1Bx2&z>{p4GlA0{$nEx{DEnpZKlLZ+ z5jDMbhK$i3UeT(~3L~32tG*ND|AD#C`8dl5jcWer`r2pGe$$o*Y+9s-@i^&_!K#ow}X_=5>y-y*MVM40{AHtee?d z+8WCTsw~`%3Qtn@hm>anfJGre~abv#kNXhG3aeznY% z#O{})XN1T=4_#Z0%O%smtYgagtODo#Y82S8{K=FEH{uRyNDIF|KSwjM*xDCGH8j=z zp2chFj5eL*3${`p3&3^WWdEyl&^VRHQNUK1H}7BLU2`|9nD-uloRinVH|GJ*rD<&N zR>CP|ESV6do-^)9eMOqhA%BH5hNcpXz%;HK!RuoU9^vsou=%K9rY?7ohz_c=z0yJ4!!5B7&_8o|*44Dr zPWH4LJpUJZjgsx%+nDVjD&e=+?Q(JC(a# zsKN9S-e=&;^3^3BO1FoPn9O(f?`KcyjM%eXi(qNbd>;)o6zlK!bnS`3oV^mhGtYM= z!E$27W6a%WSv&!FB1N5-J##Q(gGtK+`f%#h9eS!5L+4ai|E#FNnPTJ*=Wty)Vw;1| z`PpJyei=-|lt3HQY^P6bbj2$vQWL&9Gk3lQxgtegbMWEt4wnLu4@Pz1XasEqO4YIZ zkvnYL8@tH-5d$0)i6yRyu(=qEM+V>76J#{DqfA}DpPp3<3{W3+@gP+5XrkLUXZdFv z@1O$VyXKAzrcyP7(!y<>*>1zRz*YIf@R8Z$zqY=cane{{8awbGc9K%Yx*D<(!=c|N zY<6T1fYwO{moiegRHp`1U!EB<;vlv@XXpKF2GHH$2fDjY;ILs`*IS^PmVO~dtS}SFw`PuZz@_sG1SSbt@fR~2$ ztsXRKo$XeWSPeOE?1Y{XG=Q~^AlXcR8Vj)-%cD_ega){+C!@2pepcR(}Ztc>BOui;**v3i`e{9NakM@DmvcX<}Jvvz2 zXx0D;MA?;I#>={n;|@ zv?4c!0XK%iSB&L~t(txv;t6e^0UBoNS@$}SCADZ$y2tSS;wWT$cNxXOcACfwEq_o{ zQJ;*~WBDWpkfd3of7bfuh*w!+Phlb4rcmmZ+U@i(-Eht?AZhhOxfRK_?{u#xFs77} zJLx0If_8-}8ryztcpo;YG~8RQ0OCdX)!bQqGC2UEL?hfRhPb-gwwqzyK?oWb>1<@v z9yVqyvkwi;R*~mKFqi{HsYXo6#}c#~_6vxz`pcwj&!e;28d2LOOPVe~37f%x3PN6N(A^2J0 ztL@sotk*c4U@whiN#t+D3yJ++TvPt7gS=ZhE7f?QS;QG)KLwq1lJ-US%@mtW8NVlK zgz%$Yz9z^cKm>bwe$OmSW6RK>XQrqw2Oya-acstyHls(Jc?agu4+ttxo=ugo+8k9p zPE?LOr;(P}!!xU!FCX5hb0{wh@VE48&C{-gl_tC&uuzoov}PU#sJ{@2u|4l>vK4Oz zq01MS0Bafu{7dGep%5m-uJkdmB%Jlezln!w>RiuGJztV(8w$7y4AR8$Fi-2hfH+Uy zX6*tn?&|jrmRJ5w6bSQX1n6cc`*EMIz(i0`lui9V!-4-lEA2lNHmBTK9LJ_az~gArQZI9^7Y@Ji;^e6(Em8I9F6$T1O4wW z`X4?q9yo!Xp_opPP4)lfqW-g9u2z76v>kN2GXI;IL3 z=#}Q3E`s ze7)L0hq7R0)qyNQqhGJm0%Mn-@+p44&hgK%3fgZ%O2nr_&u%c!?805A8*HG?s7`&L zCeR`uVcbM7yr@|Rc#hZaZPYN(UrH3#dOWnm?gG%XFrFDkkeU5}laQ1y!x@_0mpb{T$uGfE}zu%W5oE2P)QX3CUjmPL%Yx0yteNEWHzu28*?P`XevIFFbV)I_Bl z(B720{A-;ujE5_J5s=TQw&2GH?<5~}1h}1>osYSam_8|}1u*p=yFcn|_j@?bssHDG zk6Hfu^*MYCj)tm+uccI(3equ&BoCW4m;CgYm1|ub)3Nh+sM9L?n3}-1A@^OwE~f1- zuGbA?=y$}bZ(bXUa$uq;zBW<{arac-lcQ2l%5(`yI0SepP z4@PMA3~t{ilxK7ug7X1-rzB;jSo!OdJ%v52b(?CPbhtyCr9glUr1jI_w$Ux&tAi;Y>Wbc8x^>O)4GWA);*xU4N6d7cM`0w32V#=9jMIRc+@oI0vjiJ51=R(*6+$`yqR@6@BNYkK!M_RqU;jt%Ks*o`AI>>Mi6K8v&-_Y|=FcV`%wAg#mAm z93NLa2Zh&?XhPaH(_k3oaq-WeeW5FfY_y3@8KSnV!@Z$>H|SRPyZHa0gq?tAl+ix#Pf|N^4@5RpR9h% z2Cunsu*JbPXlqOpCc(Im!OM%e?BQ=frK*2}QcEP9;%$j9RDgg*K7k}`B-QW@#L9FL z)BBhOZpv3fBV1#GF3|wDAT0Jq?&^wq#}07RZX2q`y}(~`p(Z{C5BU$AUXO9#KJw;AMv>~*Sn zE`MLt)#^PB+$vwgQpUe*3Dz`Wv54R+!wS!qJ_CvSBTO-U;r?c1e$_}I)%(}oy~D^w zqCmYhz!an0Q|J3~>Vg`BhGUpEr_wz;GWWqo{Ts68kM(eQsKwg96e;32sj~eyQK3Q+ zS)(q3yN(80Ym{?-A0|qozyBrCP@-Y(e~nd7=r0*D=jswRXZoa3%T3);>B>K5;}Lp} zXMigg#0uQBHfKZ|Pwsu6a-&2(dM6s~msEiH=yHvpVW5F9PnVnUIKay_x3bPUMw}!C z4E6uE>5bG;TGX{|xh-90%D)ERbUN#Adj;;ad2NxCzdn!aItcYG>EJ9{zpyioi+6Qr zcMuma_~Ig3?m{SbC+u{DRc_BCyt z)58z<7rzOdMzPp!mm@ZZ4D||scnp|0I7XhLNCXxYt$6Sj*bQnz%H8lDMGK>;@c*@_ z^+SxCpYH@FxMzE5K$zqYnvdh3KLz-@oZ!UYL`uYBl+>IR&)FO=Hh1;6(VJ1&W(Amf z2?Ph`odlBjN(wePCuIQ|%o?>8^cjb9Zt)GG9{6#UF-lNa~?_<{P z&l)tBXea3*<7$nChK+Ao7HCWPC;+55U>_G)$=XFjP!F{~p-ik;Ne$X%*KlY% zRPoC6Z!}#)rv85HT_Sl}lr!+KY8^$03ppnUY!fc&k{f+2N?bf=wbSLaQmTzHMlz7< zOwXs$`n*OX*U1th20|#K6nB`KKyyfBbOZ22c}u zW}b2UqXWTEqNGMVW2IK%mRS_AZy+!F^X)4o zHKvdA)IH=i>OMr`SGw4aoul!7W%~#F54ki?E-{huf#iQ(BICa=5%2s-;;)yuLoMbQ zFf>E0WrJ8p2X(jmc@7q_BM;`!bHyi6&$1KW{V`wKcNlnlo1`ES`2Wyu7&8}e&QcDg zW4d&Y)W5X=7#^G?HFW_5j*vI)e%t$BfmbR`G5u>H!;g&Mvq8j%7Xb2td8_B6Cg*9f z#W}m=`JoT|apC_cb^KbBtDvbbaKDF87ql#Ld~HBsMgLCxS*lm{3{1Dtq4)tY(M8XZvp{_A?eQpsGB69LynVuFw&Yj zqE@(l?#P9YFW5(Bt|)2CO${Z<+;Y z3#r!zj~gER&JZ{q177t`zn&p$)GeXL&|gXcun3cy71J4>km`|=$%#F0ss2RF_t_^^ zjC;m*c5vjyjdMUYVLgFE$jD;sqq17n9py2K9FD(E1R4aZTL(se zKpZntJOPd4JSKV>>@Y-a@JvD9;r49cE{lxFz$-plt{vTrCl`D(9!5U>@%%2R>v?_P zHp$v#r_a-*(H0bH|M^v)CerZn;)Ef;7CF$5a>-^XVG>X~=(rdw5&coz?t&k#_D0&4 zd;ND0+wVzz5B_o2_$Cy89((t$(cZ=@eLInOG?U1-QH6P4LxaQANrVkcc9Np<*2GLb z4eHtQWExvEB*ro3zjof9ok99B%Gk0?XN1q2r2{$ zxf94>oJNdwxgYvi?y*G>k;hBhK+q^2>KY@@pvd#U8ZcFj<%oaI#0xvRVXkqS3$I^6 zMO~5k3D1O##1bB}*ZL?B#Q0n{#Q4<&f#Iw$5FbQ<+T}+49aclWoUXu0|Fr$_^T)SV zdw-T#oeC=i^hEDu>8P{=YG^w0M#1TFlDSf7-J@rwQi?KG>9Ut^hi>#@Y7$FwuFr`u zdG2?f;YbcRqj>EO0K_=S4Eh@=(8K5(UwbR#-^r3SC+`~G_Nbhh=xoTesBtfEN?Q5; z=kn`26_3UAEdu8$b4;PY0j3Yw1#m^w=w+LF)F+RaAU+C08hynTRQ(qKQgpwQ^^=Zz{x}%b})=Q(mPThG1G_2dLMn?5I5NDsm=|)(FEb{Sp>NvIKwKkr1DoIfyB=3Y&je&tRMEi6`;c>>_rT+@Y2MX%eo_ zr1d%c4P{2O{kHDi`iI?;D~sA*@sZsx+6>Dk&#XY=wsy#yM2oFxAR_GcCHZjO3;p!4 z_Ddo^#8n^FMOaUMm(g5Q>Oaotq*YaZRZrVadFs;smqGK^*#UDu$L%P+vcR%R%I*a2 z_-`k^GCuHzKh%AN6xZK<<6>HREUS#U|IEy2&17|z;7zyLAD7^B6ttopElKU9Loc$_ zt24N^fKs`gjik>>k*B^~xv8zhrGL)+XeH330SrmMZkEQ~y0}gD8M8LqbX~BgUg$B? zg&x7m@uK6){a!WgPUQ)TmKJ_TVdQ)kS^@480a#1`dj5>j1$(|)H7c`=rCLi*qYr9SIw$?n2u+!Qr(?JT;aTZ&!aKa=HN{ye@fSC2tg_0G8AK zvsyvP_Z|1?L5zlb(>F?}v<9&6=?RrEPF0%$ zhZ=9C97(x_X#?jQ>|u%OYrts`k6EP?&HQ~Bci)uA5q>+>IgB$2tW8!B>5T%ANPhe~ z4#BRYt%{*+aowdODa-QgCnly6PTVAXyTPL?qD-Is=zo6P!)UV*cALfQaB?Q~wpl`; zCS@=Hiae2`Md?GJWXCeI65DS7`KiI+9$@SR=>`0)XHk$J%27<#i3$$@9)674ul0S-kOw5%~4UYqqR z#j{y-kz>MPSp$P zd#|_{1I)-sjSev5N6A7s(r@Pq?cGObtCh8xb2 zu?uS+QWG#Q&CQ%DZf$TvN%V9$!x9(8f~MpKaMY?`xst4fkz}nUVdFr1QY+n1eXe8j zInQC6tRg^FiQ;__DQb8uWTn71oyMT^-d1Pm0j|I9YK09TSn(Ynllol;Z&ERsDq!#h zv$Zc&W*{FpWeH)+iwemU0~z}DnKiiIkSTDXW1MISTx9jq2GW!ysj761cjd_>gWZlf zVopf1z__j7vQ@obF+jm91xgs5`;dDm>-ZgJ1-baxzA^*-L6-XEL2Z0j78IFYRXFsCq_i`FFDFD?=2Vkb$@s$0T#GvI zFaSM~EQygGXkv%9ACs@VqrGL!)n%{No7kh@duipEv&#{y7J>wLupfYwIXR?bRS@nR z4*>PQ?`1)6j@GtzG*!ORYgJx7Zieu?OBse-?+bZ}hA4w?$PSo(?~Hh6`YZ%7uBI8& zARtf_qzjdLbMu@Dx3yrM_2YMX~QObEo3n@ZM%adS>WljbdYa$^Dxk*Dg=_gFXPrZGpFVz zR>c>8FQp<(%x5<*vygZB#nz%d#%lGMp*1d;rLC|1HQXa(oLoX4V2@QlJo^|XkAJ-t zi)i}rl&4c;rzHRvWIP*;e+fNKD>>{}=et*&C&dmN1V6Ftx3~+z&KEx>KbVz-A01UZ z2b*<0t9)XSB69xdC6|JNg+(wHrM9UIvOfAbEm7t_da2IY6zEsl6-(eqcE?m@5IdB7@d;k){H zm9#SMDn3v&QlSE!y3t1(mCrq991H0l)SN}~b9M?KoAI9vgg~JZ3omR7M!yo?bk5GH zsAR!?uuCYreGe3QeZG0|(5WWY`*o0maW1?>(B}7-tP<}t!cI%6VW3x^i}l-nYRcVf zr10avYe0;jNXN&`m-K&1m|wpxv9pI84074PKg6l1IH^P(b1@c2kmH$y-^nfEm8;xW zdWV4HI&yV8n=+(9A(0CbR~gXPcUony*9GPhh523^S@h~;-d0hMJu9;$Ro(jgvIA;e zx!EC=GjQ+S(S5-uh!Cr;=mjDt|G)%GS7c@Z8=#7qWR!#DNfWo*dhbB%_M=1v+tlO| z{5$W*FQ}}?w~LI{6CX8Q#bI@$BuKx^qK=3&Y8$rvFg(SSeXEr}K(i~%ZP1p{0x~~h zrfs%}2(zU9VKq1C79=N_nr1N>fXCQ&+aap^HxLSz3$SM>04_iJ+#M2HCHf6UB)+RTx5 z_Ze` zyFK1*S(qp6%9dq0d?lfOo&CKyOT=e1mBR5@mT7CP5?M~nZO)VJ+>Va6u|Tlljn>i! z^d^UJ)&s9;N15x)%)xIz+_Eq>1wE671*2+F5vNN=(mA(G4|IoI6qZ{*1^WmX-Za)w zsu0yl=eYH~aCvQ+3J#i*d-~I@r`%@=@p)AH9Pbx2fy0JPD%!_>RR;$Qr>U{@4#OBF-yVEuB#Jp_Vse)$@(BFIgulUe?sec9NH5M^!4=M>6kA zO@C$yc|8hkF@a&0hco&NnRf6Y>j}igRsMhg=n7B-( z8>2Zls>*rfo7)@=1^|5X8^eHDi5*iP%tXr1pT=npw*O#6g@{^$$A|~iyVP)i;X@38gE^Kiiwp~VSfY4F#Rz%;a z(2xsnjhAj*f?6LuV%a~8o^8-d?~0N5VA!DXDYhMQ=gZruZcFLB(#V+90CmY*zB;XA z`B$}p3CYV4eXWa*SKIqITE=DZgKlbmc`EY7$#g#UjU9nY()ob?=zNx0r?Y^7ANVQ2 z6l+42Jm|XI6Z6)P?4_atu2DSxh-?^C(Cu;-++2+^G&XN~k&LQYcJvFR&uZQ5st`@K zEqQiZa5Z7!OvL9G<3;>;R_hH^StP4WU zw~5sz%%$y>*@uc3rqXYBxUK~3l&QBtA46mJ2y=Vv(1q4iwD|En7sJpLyu>v5Tq2K| zRGK^_k8n6Mw)m(uSD{OjdFyxGKS+f><>_8l7dX~C&msd0%=Y*Z@Y_km#a3|DPd4A1 zSLU>GRk!qu^@(`dmNpfA{bx7!WA;o{-M>trU80NQ8NW(3ys}D)YHaVAJFSfG7N~w$ zeyTNiTOMB~aWVDMBz%_J^v%=gs3Y6Q zY|D+^au%UazOu76nRRkM!K&ji)vnThWuz=wh-Oj>`(wH}rTuwiUXH(TNQWkG?eB}5 z4%A=!II+B-nyy~5ZK%S-@IV(?^Lc8lX|wa~mul3vT~;?0nMK8yos<2SbC@q*n0QIP zQ{p3jMCaJe%FD82FTsSpFjNO3TegN_FEoi?(lPbqVly0fiRBlJl+ZF*RZ50 z(R-^PX~qmSx&iEqxC349wDj$u-_vj#)S=ZuKvqCZ4BL0!DiNuo$?faghO^`itpk@? zZhR(N4b+@fS`JMN*u%JmeDFvro}Ne8?zagd1C|{n(<+`wOJcm?@6DFlBoF*_XF}CX1NN&|D`f z92XnGY7BS3j}@3P6OBo~b1Vz3;If=Syk6S-TG364!Q~K${so0pl*K3ew01x+6%0~w zk5lwb1t}(o0y6Li;i7bW>wv zz7w@ZP);j&mVC`|J=slVyL1B4E|78GxW=?_fe(;8lwgmG|PB(j|F8r zA(F(%l}YNmY|`MJ47up{co3z771?W=A6}M2lL6htyDkwv_K~0OOWvMN=|O56eCZJ+ zCkP|K{>#J)X0>w+z2ClRoYs%5DJ`QY6^?HN;ld(D4QEuv(nmj@OFLx=`*MJ-O+K!? zSQcN3Es%V;64fLy#jqe^>GQBks?^|;WMv7qMDo^Z6qA3;*i1o1$~`X}p!F8mC@a)h zp3Spta1oq1mNIA=ENgKgO{(|)PC6Xy?E02WwRJ^b8;VX|Qt)0OITWb|xeDvmIxm`< zH1b`Odj?H6_>S}{*o|RI{!#wH)mn^z+%1=KO#(=oh508*Y(!4xsXW6zXwx7TOp{g9 zY2JDLC|f!shfh023R}~J1ZKF2s@uv*2mV}op?Bn-lwji;g+I}(I8M5}s402gay7;d z$u$#liEE<#4JW-ns;Rg^_ZRWEn@{CjH`-QvgPp$8@%1O;vhvyB5H0jPHEnd(P?6%dk_X!K)=_bGIUxod0b3?eq@~Z@ zqjn8s)-fdGJiF8w2dmV=%hum@;8_Rq@)Q&79cx6&BL!>ngLq-xv-l%?7s(u9Irben zb(KE5w4y>o=4S&p*-}~X_b!{%qc?Su>j$Qd8#!0EwIx5b--tth@<_@A@%@mhA5dlP zFU(PkxNgT2M4PECa5&)#T3^r?k6&JL=N_EM#49iwb6UM}Ja+y0!T$TZwMixy`b^my z-8jVu6&E_U_V{P3HH0IGT|3oAnxByG;2NauaK&dnn1hJxI{RVjDj{(@?VyIST(*;T zazFq9k|9+{D0MhvUTXD;y?vyEs6uM@Iy$XPuj+Vjj6P^4=^~nOjC+dH{d|q{a*pM3 zA5PrBG^Rysy1NM4WH^k0FI(uVO6?OC_H#-Gjf$)4-_!Uw4Pd3II_(0OHG1FDi24M( zW*XI(pVhU%P~Sw6}dH&^Sl1MLs`s%&3}3c8ya zXV0zu@QY285WKl!+^A<#W(lp6?{w*Ng-K1iUb!6)SD1Bi``JDW{6iEJb`?j(slCs@ zUC>NRR4q!oyAvncm0@+xk?P!&Oi)97gR9EIN438{^SjE_|W58qU|&Zb(rXZ1ECmnPa(}km=FRkYk}@Mv`)X z!7OkODI>`4i$*#0iB8pRLm7si*-(%1J+UCQ+)2B_Q+XEJmV`?@l;}_r8FAm#l8wDj zY{0A~Gw7mY|VA6TH*%B z-&7!4X?k`6eM!*YHSVPbXYlPZn}j>`R=CD=0o{HR9`_qK)G%ehw{Wo=AC7ES4Lhohbzz%ysq1 z+rCuyYf!akj;z}XK{4RLLl^o=;a5Hn&oO}SPO^y;%cY~5pG>WV!Yw%vy!MS}@9U&1 zz~!^92z;>;ZtfsQJqD# z;OdwXh0VA^?=8m_)w!>taq;x092uUd`})={sxE5{{8nxE@>yio&wC;$y!OMrpy~Dd zy$3*;u)MJZDgH0BKiZ^-AfI!;AK8IC-bD1Z3GA*Myax0|qQTqb0Q?8$oRIJAb%TWz z5r@wM!h2n;N8G~P^blhNEJ2|u4(1pYGITIa+&J_;LkmE1ej9X;Pklo;N{?OJ-!j{B z&*>(ieiqdS?gZ!NPn>*F(~v1|O`!wkngY-(osZPht$a-U z!O3;BGA_C}GBe2|b9X%LW(1r`KErKJTaNlB5AF6r(r1?eT11(sc&i{I~a&hMO$`g`0z-yC+| z_uiSgGp~8g%p7m!-d1j^*}<1ix6c|urF%Pdm6NV8+ohci`-!sqbX&+bl4{%N_wgrs z^d$Vg4l};*X-^qT4D(rr)`qkXXB-s5;vw-xB^wWX)@JhFYu=$g+7cysY}2Tw&|hO; zR_M?|pTij=Xq77qD;celqrvcG&5KXL`6WWv;P+b!jlKE1?9bbYdnHDuhg-quPgZ>F zTb)>5LGwxGG37R4mv|}J(VVr>USV5dd%x8RzH_Zeq-Lrs<55*oZVAx)q}F1$X!Q&ofPugT?1Ibhm)?lN4m>NEW+Kl) z!&M(pFpM0wVyIf}l19}8)C$XTWfX)3zUG?93pWS&P2JMfXWke%UXyTL>WS*b$12zQ z6rR{{W<4!3O|qXgGHDq^(K3J*51TKNL~XJqs$2-*(!rPdTq>TiL(CcaVduVy5+M`e zx#@SngFROcfYD$bM8q3I73e&U9yp9|Rk|R?X1h{D;=L$;2sD;2&u3+i`!KvNT4ADO z7%?S>rLY6%a%ePicebR-$r>dzS3i86Ayjbwjx!};Ak)D`9nMsaEM>fDxK}g}M=DJ4 zVfCC#{A^a?i*oalaNua6wT?D%!uPYrecU@6FZe__YcK5N;S;(is4KdYW22oqcD_p) zD-jT)4xut-UZzgL+NGzaeM?IzeV*3p(l~)90SlkxH}sB|kI!Po$Jp4s?&@KCcGj&~ zC2ceAw~XnuC_pbr)J0sjOX^(S!fx<|B-r<`o#zfxKShuvg_gr`33f3PkpA4$1W=deYS&_1x;bIOAI!`&o6%xVAZ#XP?yTF7_~wPcG*Fc5W~ zpaL8m?!#tZFX@0F9k*wu z%(&TOxIw#EgY4c*r+#o!b9od>Jzw7@R+)A^h1-Gd*awr!xM)zA>tQ?2k>%97htap7 z{hhh@0)j1u<{pfUB4>6xli7YP#fgf>cp%P_2#BqMaVn^pI+uQ^ov`G^S&kPOj&8Pj~! zlfs!lEAgcII>~j>&J7{0B27o@q1?DNY>OAcA6_)IywmE70>MqSa);WZALo3e+O6+} zOI;@HWJXg59ywX-yFS@}FdSBkl@3Xcn_&y3*$pbQogNZL zKfZ8JGS9Up-|Y0OG>>fMjgkD@!#Std-YP-fU$|!%pU9^0kB^NwRc{iz-UZ$vFv?7A zxd=`=B}*$_7IkI*QpH9)F`3(!Q?Fx_9uH`dHg$n^xl?906h&h^AuW)Wkh};BJIBXq z1ap&CO&(}5jIc+>8%*i8g^~KUjFVY~SWqXz?$XAXcb~m&&_RDUvG_EV!p3$sK4bEVQ-kFjd0$oJ`1Vn86B$m6`Ol&&%y`6g5>6-u|h z(%b%i+j!l%ZlFuC}3MMao^5p0|P*mUtKH21ibWcegtOioPY z1cWIC0$STHxlwv+ud-1R+IX$)s0J1g+?cIwk_G^oF4^`IHt(%QxXmpZ3wV2tn%l|Y ze1uoxwe*|(E+zcR;-@$l_bCp~xg{0X*`cVy9!_x0ufe`O@_xC4WwOD7_*y<^c#b6_ zD(}znpF7&xj0bK^1>U?*Um%H(uk%}kKF%6AOX#9hQl{KjI2Y^&0p}5{v>Wl|_3A`R zq-cqCkjgoJcBBTUs2Nf6JE@Aifzj@m%03nwR?>ZMk-#2cZ!lT^da09=&}3ce%+y8P z0iOI+>iNLvs}CiPZWp>=vC1{|_3FEKtzK;K;%BqED^BwWpaz-s=RxrS%}owz5%)Q4 z(FT*W=4dG&jdU@|3NQ?>7;i`GJy+~G%IrSv2qeCOrwC?Z>t1~-ff29z;#i{bmQDl+ zcU?Us5IG#t^2|>a863fyLc`E60bE%00@}TMzhBCFJc62`_N=LWc~p4r^U8_-!As*a zvkJ5`WDv{b$X4Q@n{@e2_tYd+f;|__!Z1T>c+6$8@N35(T&jP_qh^HXpTGCsnag*3 z&uv-Jcyyc{yK4zkkb9Lq)@uJ^kOoLCv9bJyHw5!dfAFwU%tovDu4J|Arw#YR^$qmE zp-U>AMurCunY|j1af;_I?J*RgSCdD1DsX4{<=L*9ly^i7fPHB9;xNu-KA$RhFVkbN z7QU0tF@v7YB{^1K{%J86E2CL}7E9QTqm54)C1=mFFRm@~m`KvLd%<<)Fmj+zihcjE z?M7``wZS>cv~T-rM^hnltTL)7k5P6UU#G>&PTwGARiKb%620h~0Z9WcPVZEau7bd< zW$T`KrhEVNR_!T}I8SAyLY*OoCl`hpJd)ZR+rQXvh!b#vDX`Gv(J9t=@Fqp8qlOh^ z1AbZg;$OxNfY%&kY?6RoQGrqAAoMB>s_$S)Q3TF6_iWyHn!IMLRt(*SuvH8y_3!tX z*$SmQ%YQakF>Ie=#atajL>8XHBP--U;^t`U`j6p$qOOTMv0GJl83P^MHnt#^pd33L z2FR+qZD&`HBvg^MT5S-jGvM2oZ!p|_mc6*{^w|-`v+`_q zLGH+94($2UIZx*_ml^~wa%m{8e6Q3> zv~1q_!kAo(Ua1#ru@(ZR|Kj_@%3?%nz7#x`5YK!&1~cz)9ZHcdR@3SF8srblt(mMW zNHoTGV_OmWU~#?Bzc$k^!?hk$QDX-t7vysjd-qozr!>wNdO@PF{cgjO@0_%U`9{of zSW=63+ry{o6ub^S`)J=HjW<|<@xpwv+?J}UbcZ?UWI7+V!sFrDA5(7x82RY&F`rh9 z^@zec($s>dWTW&@>!$QxPH8$mB^Kq*%X8w@Q~RXv<|&4syt}d3kDlq??tIgLqxE#V z9^sYuDF3540;0b@gP?OMbC{JYS{-2jRzB~2Mx1D?_ia7bC@bTjc0;}fezw46U(MAoGoQ_B-CIwXR=( zgz9`g67|BI59Y|jq>Y|&(x}w_R-~TefY0i_Y>G#Sc(dp)iauM>sb~<%WS^%XXVSOE zHn+E87AsVYPma>K(46;~s^g8;MlIWWs4APai?5)Az77@FvHp)95;0eOQ&T z2%&}eLgc2(Y%Ioa`wR*-nzJXfZeR=@NE=XNAxW@)n9ZhTOXtmLlHn-2B>x=_sBTBx@flcu$#@$Uw!(H zPJ!M$)EMD&%INuRkU;g`>)=db{~dr&OjJ)KD%b*ig4;WBXn*#}JzcRkl$=+kC~Iix z>A8(zs1FYl_*%s0iX$#wpdt;#LdVXR*%ejvyP6&$A)TZU6}Kd}o$g&!+zt!S}EQiUYLP_hkUgl?+%<8#;AZ>X5-9%D}ZlaWJk@wLgPfdRE zuJ@*YV?&~gLRkYTRPwNPbE2xd4_zVwrO+}wsw(U~(vfPDvBkkW%p#96v zb6z>y<$C270jH<7X^MVQ%9%TpYtX~!hCN_|yp}&f`rjgwuv3Fp-4IrMo>DvUxfA@Ji6kMNZ#GAab8OgxP!B|%&ZL{)v;~!+TWLbNh358HRx@>&>WD+XI@50jOL6$B>Q|KFgqfB^sL5W zw$JR1tHDf3c8rh~>Qt7Wx#n>5^Z2ma;Gl$WvdAML`$@RUC+iPW8Wozn4oQ??_Nn9r z?7*B(sXVO3ReIQ4<>I4MpRpO1vJw!wy=7L8Gwo}hpR0r)mz$53IUb&tkui*rwcqa* z>o~aX@9@#9zh7ch;iH!1xG&BPar=zg(<$*eG~{?9AOm*VLhm#p)JL+O%3VFuGB+WR*r*`#qwMm#uJ^Fnpv87 zx?4zH;#K3_OGc>>6VYmr?I1;0E;d@gCOJJx+FqLCs@(WUTc|G-YhO8g2f!VrThhV~ zlk6cF19nIPvH%W6h55rZ09`SpRKj2~}YzGss zu(Ov{Z+Mt$+LcG3ytc9X)RHm|6M-Dj60fHfG?I@%O%@4@G-S{Ceiyu`@PZhI*K*=& z14bZ=5sp|pMImA| zV|nbc95EFeYye_l9gh6)k`^tylG}>owmI{CHkFP$0a9!*^)X7vrR6MPN4p(9A!^=7 zWoJlheY(41%;&zi7bz?1Y!)p6lE27T_yow!uyW@+`lSg1b!l!LRfA<$sHwRFg7HeZ z?2SN}%3kGU;huE&iZlb|PpZZ#K>0A}%GsIwb5K;)_r51h&8-^w_T<|VI<4HCxP-1l z#MGQoadzT~O3M0m&{t(cM=$XCPHMP;GGlfpqWil(_Y>&ds79MAkj83rzMJS(Nvc%b zAJgdol%|D<^ly^1;wK-Z8Q$|B!V4g*?2Uv}#zf%k4-WEP9j&_HKIC$R)iO1{OGPe9kU9qFZ?ZRq}kSV;E`+Z7vE7N?K zc!ts8c&aAA9FvjdhFb5QR!aKOMD%&qOUgv{+g#^^f|=)A%PbO)g_m+Mjh^E;YOgDf z5D^OSZ5A*>P3QvBW3bDqhWsNQs(-h5gR~kr=an%MsITA(F}}`IYhHYIVvpWWpYP5z zwy5rXrA(4*jVCewO;=5c13Bs5m;@%rdO?k8ghmJQZh(a8%oor6^AJ{emMI?8+m^&~ zp+|2=OC-o%VeAVQ+YkM8}I^Lzb$h5DZlrL zv%-T23t%CKV?>2;R}PuW9~NRQ&P|~CF!A%x9`oc?tzisU4swy&3)k!t6x0$JlPd9v zy;r|a3FULQzM&Wym6Oe<2tPAdRb+0LV`biab%?dNUVg)T=-^4~0}>M>6YQ)@xMZ&` zpWEOy*Bt`9c@I~=8K3sv(s`!u&Ob=wJ|cn_!iB?}^Mzx8$7>RL0i7Bd>@{o3F|o{P z&?luiy@V+@>)YjuUj5QgJzqi6&D>ypB{6w$W!KWIu~9m0bJ92B!8QT(=Dvxz3bY}G zRDvbdmyR$gFjeBYv~g9xCPB}&^f$wRozUfKSRJIvdy{|N)_R2D8X+j$-RG|7@Z%RcwNIC0*q)$lb94)Viw&&o2ZHerVb;*C7+$T456hik`0LR$E+BkMP(nDz`N zy$OmZ~2tg$z(*LL=)^KBRZ_u_Qds z9+|98v^o{DO}Rvmh_-{|rx(3hiQ$b!>vf7QW6$S|Cs7R)*Y8_@CCg4Jwu_!Fw|NS~ z0p08(KFj7)SMOxuQz0-Bf&zph7N^{No2Y^LuAz3Tgng$+Q6;+HPZ$fsW)VZpJ|`_{ zQ*ga$qnazd+p@y#RoP(&Pf@oX?yc{e4zoy@XFl-w-uq(+XkcQuPfh=bOGnytdCx~b)J3l{SDI;V|`!K`4> zhu95D%SsyAE}fBY9z5uMj)Y5JV^&M)u%kmbf=}s5YI*6m;s*2XF*}Vf>N0Fz+E?