From 1ef3057110f17d1da65ac8361366ecb2d2e6dc3e Mon Sep 17 00:00:00 2001 From: Sean Whalen Date: Wed, 2 Oct 2024 21:19:57 -0400 Subject: [PATCH] 8.15.1 - Proper IMAP namespace fix (Closes issue #557 and issue #563) - Require `mailsuite>=1.9.17` - Revert PR #552 - Add pre-flight check for nameservers (PR #562 closes issue #543) - Reformat code with `ruff` --- .github/workflows/python-tests.yml | 2 +- CHANGELOG.md | 9 + build.sh | 2 +- docs/source/conf.py | 33 +- parsedmarc/__init__.py | 886 ++++++++++-------- parsedmarc/cli.py | 1185 +++++++++++++------------ parsedmarc/elastic.py | 406 +++++---- parsedmarc/gelf.py | 23 +- parsedmarc/kafkaclient.py | 80 +- parsedmarc/loganalytics.py | 119 ++- parsedmarc/mail/__init__.py | 12 +- parsedmarc/mail/gmail.py | 78 +- parsedmarc/mail/graph.py | 207 ++--- parsedmarc/mail/imap.py | 69 +- parsedmarc/mail/mailbox_connection.py | 6 +- parsedmarc/mail/maildir.py | 27 +- parsedmarc/opensearch.py | 400 +++++---- parsedmarc/s3.py | 32 +- parsedmarc/splunk.py | 58 +- parsedmarc/syslog.py | 12 +- parsedmarc/utils.py | 184 ++-- parsedmarc/webhook.py | 9 +- pyproject.toml | 2 +- requirements.txt | 3 +- senders/README.md | 22 - senders/__init__.py | 0 senders/senders.sqlite | Bin 208896 -> 0 bytes senders/updatedb.py | 91 -- tests.py | 45 +- 29 files changed, 2093 insertions(+), 1909 deletions(-) delete mode 100644 senders/README.md delete mode 100644 senders/__init__.py delete mode 100644 senders/senders.sqlite delete mode 100644 senders/updatedb.py diff --git a/.github/workflows/python-tests.yml b/.github/workflows/python-tests.yml index 4c7d9439..d5946904 100644 --- a/.github/workflows/python-tests.yml +++ b/.github/workflows/python-tests.yml @@ -46,7 +46,7 @@ jobs: make html - name: Check code style run: | - ruff check *.py parsedmarc/*.py + ruff check . - name: Run unit tests run: | coverage run tests.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0022640d..1a1d6cd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,15 @@ Changelog ========= +8.15.1 +------ + +- Proper IMAP namespace fix (Closes issue #557 and issue #563) + - Require `mailsuite>=1.9.17` + - Revert PR #552 +- Add pre-flight check for nameservers (PR #562 closes issue #543) +- Reformat code with `ruff` + 8.15.0 ------ diff --git a/build.sh b/build.sh index 658ff78e..ce37510d 100755 --- a/build.sh +++ b/build.sh @@ -8,7 +8,7 @@ fi . venv/bin/activate pip install -U -r requirements.txt -flake8 parsedmarc +ruff format . cd docs make clean make html diff --git a/docs/source/conf.py b/docs/source/conf.py index 8a3acd3b..a83821ac 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -12,15 +12,16 @@ # import os import sys + sys.path.insert(0, os.path.abspath(os.path.join("..", ".."))) from parsedmarc import __version__ # -- Project information ----------------------------------------------------- -project = 'parsedmarc' -copyright = '2018 - 2023, Sean Whalen and contributors' -author = 'Sean Whalen and contributors' +project = "parsedmarc" +copyright = "2018 - 2023, Sean Whalen and contributors" +author = "Sean Whalen and contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -36,13 +37,15 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', - 'sphinx.ext.githubpages', - 'sphinx.ext.napoleon', - 'myst_parser'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", + "sphinx.ext.napoleon", + "myst_parser", +] myst_enable_extensions = [ "amsmath", @@ -64,7 +67,7 @@ autoclass_content = "init" # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffixes of source filenames. @@ -81,13 +84,11 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" -html_theme_options = { - 'globaltoc_collapse': False -} +html_theme_options = {"globaltoc_collapse": False} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] diff --git a/parsedmarc/__init__.py b/parsedmarc/__init__.py index 4354c0ff..3fa334bd 100644 --- a/parsedmarc/__init__.py +++ b/parsedmarc/__init__.py @@ -34,7 +34,7 @@ from parsedmarc.utils import parse_email from parsedmarc.utils import timestamp_to_human, human_timestamp_to_datetime -__version__ = "8.15.0" +__version__ = "8.15.1" logger.debug("parsedmarc v{0}".format(__version__)) @@ -43,8 +43,8 @@ xml_schema_regex = re.compile(r"", re.MULTILINE) text_report_regex = re.compile(r"\s*([a-zA-Z\s]+):\s(.+)", re.MULTILINE) -MAGIC_ZIP = b"\x50\x4B\x03\x04" -MAGIC_GZIP = b"\x1F\x8B" +MAGIC_ZIP = b"\x50\x4b\x03\x04" +MAGIC_GZIP = b"\x1f\x8b" MAGIC_XML = b"\x3c\x3f\x78\x6d\x6c\x20" MAGIC_JSON = b"\7b" @@ -72,12 +72,16 @@ class InvalidForensicReport(InvalidDMARCReport): """Raised when an invalid DMARC forensic report is encountered""" -def _parse_report_record(record, ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False, - nameservers=None, dns_timeout=2.0): +def _parse_report_record( + record, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, + nameservers=None, + dns_timeout=2.0, +): """ Converts a record from a DMARC aggregate report into a more consistent format @@ -110,15 +114,19 @@ def _parse_report_record(record, ip_db_path=None, reverse_dns_map=REVERSE_DNS_MAP, offline=offline, nameservers=nameservers, - timeout=dns_timeout) + timeout=dns_timeout, + ) new_record["source"] = new_record_source new_record["count"] = int(record["row"]["count"]) policy_evaluated = record["row"]["policy_evaluated"].copy() - new_policy_evaluated = OrderedDict([("disposition", "none"), - ("dkim", "fail"), - ("spf", "fail"), - ("policy_override_reasons", []) - ]) + new_policy_evaluated = OrderedDict( + [ + ("disposition", "none"), + ("dkim", "fail"), + ("spf", "fail"), + ("policy_override_reasons", []), + ] + ) if "disposition" in policy_evaluated: new_policy_evaluated["disposition"] = policy_evaluated["disposition"] if new_policy_evaluated["disposition"].strip().lower() == "pass": @@ -128,10 +136,14 @@ def _parse_report_record(record, ip_db_path=None, if "spf" in policy_evaluated: new_policy_evaluated["spf"] = policy_evaluated["spf"] reasons = [] - spf_aligned = policy_evaluated["spf"] is not None and policy_evaluated[ - "spf"].lower() == "pass" - dkim_aligned = policy_evaluated["dkim"] is not None and policy_evaluated[ - "dkim"].lower() == "pass" + spf_aligned = ( + policy_evaluated["spf"] is not None + and policy_evaluated["spf"].lower() == "pass" + ) + dkim_aligned = ( + policy_evaluated["dkim"] is not None + and policy_evaluated["dkim"].lower() == "pass" + ) dmarc_aligned = spf_aligned or dkim_aligned new_record["alignment"] = dict() new_record["alignment"]["spf"] = spf_aligned @@ -155,7 +167,7 @@ def _parse_report_record(record, ip_db_path=None, if type(new_record["identifiers"]["header_from"]) is str: lowered_from = new_record["identifiers"]["header_from"].lower() else: - lowered_from = '' + lowered_from = "" new_record["identifiers"]["header_from"] = lowered_from if record["auth_results"] is not None: auth_results = record["auth_results"].copy() @@ -231,29 +243,30 @@ def _parse_smtp_tls_failure_details(failure_details): ) if "sending-mta-ip" in failure_details: - new_failure_details["sending_mta_ip"] = failure_details[ - "sending-mta-ip"] + new_failure_details["sending_mta_ip"] = failure_details["sending-mta-ip"] if "receiving-ip" in failure_details: - new_failure_details["receiving_ip"] = failure_details[ - "receiving-ip"] + new_failure_details["receiving_ip"] = failure_details["receiving-ip"] if "receiving-mx-hostname" in failure_details: new_failure_details["receiving_mx_hostname"] = failure_details[ - "receiving-mx-hostname"] + "receiving-mx-hostname" + ] if "receiving-mx-helo" in failure_details: new_failure_details["receiving_mx_helo"] = failure_details[ - "receiving-mx-helo"] + "receiving-mx-helo" + ] if "additional-info-uri" in failure_details: new_failure_details["additional_info_uri"] = failure_details[ - "additional-info-uri"] + "additional-info-uri" + ] if "failure-reason-code" in failure_details: new_failure_details["failure_reason_code"] = failure_details[ - "failure-reason-code"] + "failure-reason-code" + ] return new_failure_details except KeyError as e: - raise InvalidSMTPTLSReport(f"Missing required failure details field:" - f" {e}") + raise InvalidSMTPTLSReport(f"Missing required failure details field:" f" {e}") except Exception as e: raise InvalidSMTPTLSReport(str(e)) @@ -265,29 +278,26 @@ def _parse_smtp_tls_report_policy(policy): policy_type = policy["policy"]["policy-type"] failure_details = [] if policy_type not in policy_types: - raise InvalidSMTPTLSReport(f"Invalid policy type " - f"{policy_type}") - new_policy = OrderedDict(policy_domain=policy_domain, - policy_type=policy_type) + raise InvalidSMTPTLSReport(f"Invalid policy type " f"{policy_type}") + new_policy = OrderedDict(policy_domain=policy_domain, policy_type=policy_type) if "policy-string" in policy["policy"]: if isinstance(policy["policy"]["policy-string"], list): if len(policy["policy"]["policy-string"]) > 0: - new_policy["policy_strings"] = policy["policy"][ - "policy-string"] + new_policy["policy_strings"] = policy["policy"]["policy-string"] if "mx-host-pattern" in policy["policy"]: if isinstance(policy["policy"]["mx-host-pattern"], list): if len(policy["policy"]["mx-host-pattern"]) > 0: - new_policy["mx_host_patterns"] = policy["policy"][ - "mx-host-pattern"] + new_policy["mx_host_patterns"] = policy["policy"]["mx-host-pattern"] new_policy["successful_session_count"] = policy["summary"][ - "total-successful-session-count"] + "total-successful-session-count" + ] new_policy["failed_session_count"] = policy["summary"][ - "total-failure-session-count"] + "total-failure-session-count" + ] if "failure-details" in policy: for details in policy["failure-details"]: - failure_details.append(_parse_smtp_tls_failure_details( - details)) + failure_details.append(_parse_smtp_tls_failure_details(details)) new_policy["failure_details"] = failure_details return new_policy @@ -300,9 +310,13 @@ def _parse_smtp_tls_report_policy(policy): def parse_smtp_tls_report_json(report): """Parses and validates an SMTP TLS report""" - required_fields = ["organization-name", "date-range", - "contact-info", "report-id", - "policies"] + required_fields = [ + "organization-name", + "date-range", + "contact-info", + "report-id", + "policies", + ] try: policies = [] @@ -312,8 +326,9 @@ def parse_smtp_tls_report_json(report): raise Exception(f"Missing required field: {required_field}]") if not isinstance(report["policies"], list): policies_type = type(report["policies"]) - raise InvalidSMTPTLSReport(f"policies must be a list, " - f"not {policies_type}") + raise InvalidSMTPTLSReport( + f"policies must be a list, " f"not {policies_type}" + ) for policy in report["policies"]: policies.append(_parse_smtp_tls_report_policy(policy)) @@ -323,7 +338,7 @@ def parse_smtp_tls_report_json(report): end_date=report["date-range"]["end-datetime"], contact_info=report["contact-info"], report_id=report["report-id"], - policies=policies + policies=policies, ) return new_report @@ -346,18 +361,18 @@ def parsed_smtp_tls_reports_to_csv_rows(reports): organization_name=report["organization_name"], begin_date=report["begin_date"], end_date=report["end_date"], - report_id=report["report_id"] + report_id=report["report_id"], ) record = common_fields.copy() for policy in report["policies"]: if "policy_strings" in policy: record["policy_strings"] = "|".join(policy["policy_strings"]) if "mx_host_patterns" in policy: - record["mx_host_patterns"] = "|".join( - policy["mx_host_patterns"]) + record["mx_host_patterns"] = "|".join(policy["mx_host_patterns"]) successful_record = record.copy() successful_record["successful_session_count"] = policy[ - "successful_session_count"] + "successful_session_count" + ] rows.append(successful_record) if "failure_details" in policy: for failure_details in policy["failure_details"]: @@ -381,12 +396,25 @@ def parsed_smtp_tls_reports_to_csv(reports): str: Parsed aggregate report data in flat CSV format, including headers """ - fields = ["organization_name", "begin_date", "end_date", "report_id", - "result_type", "successful_session_count", - "failed_session_count", "policy_domain", "policy_type", - "policy_strings", "mx_host_patterns", "sending_mta_ip", - "receiving_ip", "receiving_mx_hostname", "receiving_mx_helo", - "additional_info_uri", "failure_reason_code"] + fields = [ + "organization_name", + "begin_date", + "end_date", + "report_id", + "result_type", + "successful_session_count", + "failed_session_count", + "policy_domain", + "policy_type", + "policy_strings", + "mx_host_patterns", + "sending_mta_ip", + "receiving_ip", + "receiving_mx_hostname", + "receiving_mx_helo", + "additional_info_uri", + "failure_reason_code", + ] csv_file_object = StringIO(newline="\n") writer = DictWriter(csv_file_object, fields) @@ -402,15 +430,16 @@ def parsed_smtp_tls_reports_to_csv(reports): def parse_aggregate_report_xml( - xml, - ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False, - nameservers=None, - timeout=2.0, - keep_alive=None): + xml, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, + nameservers=None, + timeout=2.0, + keep_alive=None, +): """Parses a DMARC XML report string and returns a consistent OrderedDict Args: @@ -431,26 +460,27 @@ def parse_aggregate_report_xml( errors = [] # Parse XML and recover from errors if isinstance(xml, bytes): - xml = xml.decode(errors='ignore') + xml = xml.decode(errors="ignore") try: xmltodict.parse(xml)["feedback"] except Exception as e: errors.append("Invalid XML: {0}".format(e.__str__())) try: tree = etree.parse( - BytesIO(xml.encode('utf-8')), - etree.XMLParser(recover=True, resolve_entities=False)) + BytesIO(xml.encode("utf-8")), + etree.XMLParser(recover=True, resolve_entities=False), + ) s = etree.tostring(tree) - xml = '' if s is None else s.decode('utf-8') + xml = "" if s is None else s.decode("utf-8") except Exception: - xml = u'' + xml = "" try: # Replace XML header (sometimes they are invalid) - xml = xml_header_regex.sub("", xml) + xml = xml_header_regex.sub('', xml) # Remove invalid schema tags - xml = xml_schema_regex.sub('', xml) + xml = xml_schema_regex.sub("", xml) report = xmltodict.parse(xml)["feedback"] report_metadata = report["report_metadata"] @@ -461,20 +491,21 @@ def parse_aggregate_report_xml( new_report_metadata = OrderedDict() if report_metadata["org_name"] is None: if report_metadata["email"] is not None: - report_metadata["org_name"] = report_metadata[ - "email"].split("@")[-1] + report_metadata["org_name"] = report_metadata["email"].split("@")[-1] org_name = report_metadata["org_name"] if org_name is not None and " " not in org_name: new_org_name = get_base_domain(org_name) if new_org_name is not None: org_name = new_org_name if not org_name: - logger.debug("Could not parse org_name from XML.\r\n{0}".format( - report.__str__() - )) - raise KeyError("Organization name is missing. \ + logger.debug( + "Could not parse org_name from XML.\r\n{0}".format(report.__str__()) + ) + raise KeyError( + "Organization name is missing. \ This field is a requirement for \ - saving the report") + saving the report" + ) new_report_metadata["org_name"] = org_name new_report_metadata["org_email"] = report_metadata["email"] extra = None @@ -483,11 +514,10 @@ def parse_aggregate_report_xml( new_report_metadata["org_extra_contact_info"] = extra new_report_metadata["report_id"] = report_metadata["report_id"] report_id = new_report_metadata["report_id"] - report_id = report_id.replace("<", - "").replace(">", "").split("@")[0] + report_id = report_id.replace("<", "").replace(">", "").split("@")[0] new_report_metadata["report_id"] = report_id date_range = report["report_metadata"]["date_range"] - if int(date_range["end"]) - int(date_range["begin"]) > 2*86400: + if int(date_range["end"]) - int(date_range["begin"]) > 2 * 86400: _error = "Time span > 24 hours - RFC 7489 section 7.2" errors.append(_error) date_range["begin"] = timestamp_to_human(date_range["begin"]) @@ -540,8 +570,7 @@ def parse_aggregate_report_xml( if keep_alive is not None and i > 0 and i % 20 == 0: logger.debug("Sending keepalive cmd") keep_alive() - logger.debug("Processed {0}/{1}".format( - i, len(report["record"]))) + logger.debug("Processed {0}/{1}".format(i, len(report["record"]))) try: report_record = _parse_report_record( report["record"][i], @@ -551,7 +580,8 @@ def parse_aggregate_report_xml( reverse_dns_map_path=reverse_dns_map_path, reverse_dns_map_url=reverse_dns_map_url, nameservers=nameservers, - dns_timeout=timeout) + dns_timeout=timeout, + ) records.append(report_record) except Exception as e: logger.warning("Could not parse record: {0}".format(e)) @@ -565,7 +595,8 @@ def parse_aggregate_report_xml( reverse_dns_map_url=reverse_dns_map_url, offline=offline, nameservers=nameservers, - dns_timeout=timeout) + dns_timeout=timeout, + ) records.append(report_record) new_report["records"] = records @@ -573,18 +604,15 @@ def parse_aggregate_report_xml( return new_report except expat.ExpatError as error: - raise InvalidAggregateReport( - "Invalid XML: {0}".format(error.__str__())) + raise InvalidAggregateReport("Invalid XML: {0}".format(error.__str__())) except KeyError as error: - raise InvalidAggregateReport( - "Missing field: {0}".format(error.__str__())) + raise InvalidAggregateReport("Missing field: {0}".format(error.__str__())) except AttributeError: raise InvalidAggregateReport("Report missing required section") except Exception as error: - raise InvalidAggregateReport( - "Unexpected error: {0}".format(error.__str__())) + raise InvalidAggregateReport("Unexpected error: {0}".format(error.__str__())) def extract_report(content): @@ -618,14 +646,13 @@ def extract_report(content): file_object.seek(0) if header.startswith(MAGIC_ZIP): _zip = zipfile.ZipFile(file_object) - report = _zip.open(_zip.namelist()[0]).read().decode( - errors='ignore') + report = _zip.open(_zip.namelist()[0]).read().decode(errors="ignore") elif header.startswith(MAGIC_GZIP): - report = zlib.decompress( - file_object.read(), - zlib.MAX_WBITS | 16).decode(errors='ignore') + report = zlib.decompress(file_object.read(), zlib.MAX_WBITS | 16).decode( + errors="ignore" + ) elif header.startswith(MAGIC_XML) or header.startswith(MAGIC_JSON): - report = file_object.read().decode(errors='ignore') + report = file_object.read().decode(errors="ignore") else: file_object.close() raise ParserError("Not a valid zip, gzip, json, or xml file") @@ -637,8 +664,7 @@ def extract_report(content): raise ParserError("File objects must be opened in binary (rb) mode") except Exception as error: file_object.close() - raise ParserError( - "Invalid archive file: {0}".format(error.__str__())) + raise ParserError("Invalid archive file: {0}".format(error.__str__())) return report @@ -653,15 +679,16 @@ def extract_report_from_file_path(file_path): def parse_aggregate_report_file( - _input, - offline=False, - always_use_local_files=None, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - ip_db_path=None, - nameservers=None, - dns_timeout=2.0, - keep_alive=None): + _input, + offline=False, + always_use_local_files=None, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + ip_db_path=None, + nameservers=None, + dns_timeout=2.0, + keep_alive=None, +): """Parses a file at the given path, a file-like object. or bytes as an aggregate DMARC report @@ -695,7 +722,8 @@ def parse_aggregate_report_file( offline=offline, nameservers=nameservers, timeout=dns_timeout, - keep_alive=keep_alive) + keep_alive=keep_alive, + ) def parsed_aggregate_reports_to_csv_rows(reports): @@ -736,12 +764,23 @@ def to_str(obj): pct = report["policy_published"]["pct"] fo = report["policy_published"]["fo"] - report_dict = dict(xml_schema=xml_schema, org_name=org_name, - org_email=org_email, - org_extra_contact_info=org_extra_contact, - report_id=report_id, begin_date=begin_date, - end_date=end_date, errors=errors, domain=domain, - adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo) + report_dict = dict( + xml_schema=xml_schema, + org_name=org_name, + org_email=org_email, + org_extra_contact_info=org_extra_contact, + report_id=report_id, + begin_date=begin_date, + end_date=end_date, + errors=errors, + domain=domain, + adkim=adkim, + aspf=aspf, + p=p, + sp=sp, + pct=pct, + fo=fo, + ) for record in report["records"]: row = report_dict.copy() @@ -756,18 +795,20 @@ def to_str(obj): row["dkim_aligned"] = record["alignment"]["dkim"] row["dmarc_aligned"] = record["alignment"]["dmarc"] row["disposition"] = record["policy_evaluated"]["disposition"] - policy_override_reasons = list(map( - lambda r_: r_["type"] or "none", - record["policy_evaluated"] - ["policy_override_reasons"])) - policy_override_comments = list(map( - lambda r_: r_["comment"] or "none", - record["policy_evaluated"] - ["policy_override_reasons"])) - row["policy_override_reasons"] = ",".join( - policy_override_reasons) - row["policy_override_comments"] = "|".join( - policy_override_comments) + policy_override_reasons = list( + map( + lambda r_: r_["type"] or "none", + record["policy_evaluated"]["policy_override_reasons"], + ) + ) + policy_override_comments = list( + map( + lambda r_: r_["comment"] or "none", + record["policy_evaluated"]["policy_override_reasons"], + ) + ) + row["policy_override_reasons"] = ",".join(policy_override_reasons) + row["policy_override_comments"] = "|".join(policy_override_comments) row["envelope_from"] = record["identifiers"]["envelope_from"] row["header_from"] = record["identifiers"]["header_from"] envelope_to = record["identifiers"]["envelope_to"] @@ -798,7 +839,7 @@ def to_str(obj): for r in rows: for k, v in r.items(): if type(v) not in [str, int, bool]: - r[k] = '' + r[k] = "" return rows @@ -815,16 +856,45 @@ def parsed_aggregate_reports_to_csv(reports): str: Parsed aggregate report data in flat CSV format, including headers """ - fields = ["xml_schema", "org_name", "org_email", - "org_extra_contact_info", "report_id", "begin_date", "end_date", - "errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo", - "source_ip_address", "source_country", "source_reverse_dns", - "source_base_domain", "source_name", "source_type", "count", - "spf_aligned", "dkim_aligned", "dmarc_aligned", "disposition", - "policy_override_reasons", "policy_override_comments", - "envelope_from", "header_from", - "envelope_to", "dkim_domains", "dkim_selectors", "dkim_results", - "spf_domains", "spf_scopes", "spf_results"] + fields = [ + "xml_schema", + "org_name", + "org_email", + "org_extra_contact_info", + "report_id", + "begin_date", + "end_date", + "errors", + "domain", + "adkim", + "aspf", + "p", + "sp", + "pct", + "fo", + "source_ip_address", + "source_country", + "source_reverse_dns", + "source_base_domain", + "source_name", + "source_type", + "count", + "spf_aligned", + "dkim_aligned", + "dmarc_aligned", + "disposition", + "policy_override_reasons", + "policy_override_comments", + "envelope_from", + "header_from", + "envelope_to", + "dkim_domains", + "dkim_selectors", + "dkim_results", + "spf_domains", + "spf_scopes", + "spf_results", + ] csv_file_object = StringIO(newline="\n") writer = DictWriter(csv_file_object, fields) @@ -839,17 +909,19 @@ def parsed_aggregate_reports_to_csv(reports): return csv_file_object.getvalue() -def parse_forensic_report(feedback_report, - sample, - msg_date, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False, - ip_db_path=None, - nameservers=None, - dns_timeout=2.0, - strip_attachment_payloads=False): +def parse_forensic_report( + feedback_report, + sample, + msg_date, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, + ip_db_path=None, + nameservers=None, + dns_timeout=2.0, + strip_attachment_payloads=False, +): """ Converts a DMARC forensic report and sample to a ``OrderedDict`` @@ -882,8 +954,7 @@ def parse_forensic_report(feedback_report, if "arrival_date" not in parsed_report: if msg_date is None: - raise InvalidForensicReport( - "Forensic sample is not a valid email") + raise InvalidForensicReport("Forensic sample is not a valid email") parsed_report["arrival_date"] = msg_date.isoformat() if "version" not in parsed_report: @@ -903,11 +974,12 @@ def parse_forensic_report(feedback_report, parsed_report["delivery_result"] = "other" arrival_utc = human_timestamp_to_datetime( - parsed_report["arrival_date"], to_utc=True) + parsed_report["arrival_date"], to_utc=True + ) arrival_utc = arrival_utc.strftime("%Y-%m-%d %H:%M:%S") parsed_report["arrival_date_utc"] = arrival_utc - ip_address = re.split(r'\s', parsed_report["source_ip"]).pop(0) + ip_address = re.split(r"\s", parsed_report["source_ip"]).pop(0) parsed_report_source = get_ip_address_info( ip_address, cache=IP_ADDRESS_CACHE, @@ -918,7 +990,8 @@ def parse_forensic_report(feedback_report, reverse_dns_map=REVERSE_DNS_MAP, offline=offline, nameservers=nameservers, - timeout=dns_timeout) + timeout=dns_timeout, + ) parsed_report["source"] = parsed_report_source del parsed_report["source_ip"] @@ -938,15 +1011,19 @@ def parse_forensic_report(feedback_report, auth_failure = parsed_report["auth_failure"].split(",") parsed_report["auth_failure"] = auth_failure - optional_fields = ["original_envelope_id", "dkim_domain", - "original_mail_from", "original_rcpt_to"] + optional_fields = [ + "original_envelope_id", + "dkim_domain", + "original_mail_from", + "original_rcpt_to", + ] for optional_field in optional_fields: if optional_field not in parsed_report: parsed_report[optional_field] = None parsed_sample = parse_email( - sample, - strip_attachment_payloads=strip_attachment_payloads) + sample, strip_attachment_payloads=strip_attachment_payloads + ) if "reported_domain" not in parsed_report: parsed_report["reported_domain"] = parsed_sample["from"]["domain"] @@ -966,12 +1043,10 @@ def parse_forensic_report(feedback_report, return parsed_report except KeyError as error: - raise InvalidForensicReport("Missing value: {0}".format( - error.__str__())) + raise InvalidForensicReport("Missing value: {0}".format(error.__str__())) except Exception as error: - raise InvalidForensicReport( - "Unexpected error: {0}".format(error.__str__())) + raise InvalidForensicReport("Unexpected error: {0}".format(error.__str__())) def parsed_forensic_reports_to_csv_rows(reports): @@ -1002,8 +1077,7 @@ def parsed_forensic_reports_to_csv_rows(reports): row["subject"] = report["parsed_sample"]["subject"] row["auth_failure"] = ",".join(report["auth_failure"]) authentication_mechanisms = report["authentication_mechanisms"] - row["authentication_mechanisms"] = ",".join( - authentication_mechanisms) + row["authentication_mechanisms"] = ",".join(authentication_mechanisms) del row["sample"] del row["parsed_sample"] rows.append(row) @@ -1022,14 +1096,31 @@ def parsed_forensic_reports_to_csv(reports): Returns: str: Parsed forensic report data in flat CSV format, including headers """ - fields = ["feedback_type", "user_agent", "version", "original_envelope_id", - "original_mail_from", "original_rcpt_to", "arrival_date", - "arrival_date_utc", "subject", "message_id", - "authentication_results", "dkim_domain", "source_ip_address", - "source_country", "source_reverse_dns", - "source_base_domain", "source_name", "source_type", - "delivery_result", "auth_failure", "reported_domain", - "authentication_mechanisms", "sample_headers_only"] + fields = [ + "feedback_type", + "user_agent", + "version", + "original_envelope_id", + "original_mail_from", + "original_rcpt_to", + "arrival_date", + "arrival_date_utc", + "subject", + "message_id", + "authentication_results", + "dkim_domain", + "source_ip_address", + "source_country", + "source_reverse_dns", + "source_base_domain", + "source_name", + "source_type", + "delivery_result", + "auth_failure", + "reported_domain", + "authentication_mechanisms", + "sample_headers_only", + ] csv_file = StringIO() csv_writer = DictWriter(csv_file, fieldnames=fields) @@ -1047,15 +1138,17 @@ def parsed_forensic_reports_to_csv(reports): def parse_report_email( - input_, - offline=False, - ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - nameservers=None, dns_timeout=2.0, - strip_attachment_payloads=False, - keep_alive=None): + input_, + offline=False, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + nameservers=None, + dns_timeout=2.0, + strip_attachment_payloads=False, + keep_alive=None, +): """ Parses a DMARC report from an email @@ -1088,8 +1181,7 @@ def parse_report_email( msg_headers = json.loads(msg.headers_json) date = email.utils.format_datetime(datetime.utcnow()) if "Date" in msg_headers: - date = human_timestamp_to_datetime( - msg_headers["Date"]) + date = human_timestamp_to_datetime(msg_headers["Date"]) msg = email.message_from_string(input_) except Exception as e: @@ -1099,8 +1191,7 @@ def parse_report_email( smtp_tls_report = None sample = None if "From" in msg_headers: - logger.info("Parsing mail from {0} on {1}".format(msg_headers["From"], - date)) + logger.info("Parsing mail from {0} on {1}".format(msg_headers["From"], date)) if "Subject" in msg_headers: subject = msg_headers["Subject"] for part in msg.walk(): @@ -1115,8 +1206,7 @@ def parse_report_email( feedback_report = payload else: feedback_report = b64decode(payload).__str__() - feedback_report = feedback_report.lstrip( - "b'").rstrip("'") + feedback_report = feedback_report.lstrip("b'").rstrip("'") feedback_report = feedback_report.replace("\\r", "") feedback_report = feedback_report.replace("\\n", "\n") except (ValueError, TypeError, binascii.Error): @@ -1130,13 +1220,15 @@ def parse_report_email( if "{" not in payload: payload = str(b64decode(payload)) smtp_tls_report = parse_smtp_tls_report_json(payload) - return OrderedDict([("report_type", "smtp_tls"), - ("report", smtp_tls_report)]) + return OrderedDict( + [("report_type", "smtp_tls"), ("report", smtp_tls_report)] + ) elif content_type == "application/tlsrpt+gzip": payload = extract_report(payload) smtp_tls_report = parse_smtp_tls_report_json(payload) - return OrderedDict([("report_type", "smtp_tls"), - ("report", smtp_tls_report)]) + return OrderedDict( + [("report_type", "smtp_tls"), ("report", smtp_tls_report)] + ) elif content_type == "text/plain": if "A message claiming to be from you has failed" in payload: @@ -1148,13 +1240,13 @@ def parse_report_email( field_name = match[0].lower().replace(" ", "-") fields[field_name] = match[1].strip() - feedback_report = "Arrival-Date: {}\n" \ - "Source-IP: {}" \ - "".format(fields["received-date"], - fields["sender-ip-address"]) + feedback_report = "Arrival-Date: {}\n" "Source-IP: {}" "".format( + fields["received-date"], fields["sender-ip-address"] + ) except Exception as e: - error = 'Unable to parse message with ' \ - 'subject "{0}": {1}'.format(subject, e) + error = "Unable to parse message with " 'subject "{0}": {1}'.format( + subject, e + ) raise InvalidDMARCReport(error) sample = parts[1].lstrip() @@ -1162,14 +1254,14 @@ def parse_report_email( else: try: payload = b64decode(payload) - if payload.startswith(MAGIC_ZIP) or \ - payload.startswith(MAGIC_GZIP): + if payload.startswith(MAGIC_ZIP) or payload.startswith(MAGIC_GZIP): payload = extract_report(payload) ns = nameservers if payload.startswith("{"): smtp_tls_report = parse_smtp_tls_report_json(payload) - result = OrderedDict([("report_type", "smtp_tls"), - ("report", smtp_tls_report)]) + result = OrderedDict( + [("report_type", "smtp_tls"), ("report", smtp_tls_report)] + ) return result aggregate_report = parse_aggregate_report_xml( payload, @@ -1180,23 +1272,28 @@ def parse_report_email( offline=offline, nameservers=ns, timeout=dns_timeout, - keep_alive=keep_alive) - result = OrderedDict([("report_type", "aggregate"), - ("report", aggregate_report)]) + keep_alive=keep_alive, + ) + result = OrderedDict( + [("report_type", "aggregate"), ("report", aggregate_report)] + ) return result except (TypeError, ValueError, binascii.Error): pass except InvalidAggregateReport as e: - error = 'Message with subject "{0}" ' \ - 'is not a valid ' \ - 'aggregate DMARC report: {1}'.format(subject, e) + error = ( + 'Message with subject "{0}" ' + "is not a valid " + "aggregate DMARC report: {1}".format(subject, e) + ) raise ParserError(error) except Exception as e: - error = 'Unable to parse message with ' \ - 'subject "{0}": {1}'.format(subject, e) + error = "Unable to parse message with " 'subject "{0}": {1}'.format( + subject, e + ) raise ParserError(error) if feedback_report and sample: @@ -1212,31 +1309,38 @@ def parse_report_email( reverse_dns_map_url=reverse_dns_map_url, nameservers=nameservers, dns_timeout=dns_timeout, - strip_attachment_payloads=strip_attachment_payloads) + strip_attachment_payloads=strip_attachment_payloads, + ) except InvalidForensicReport as e: - error = 'Message with subject "{0}" ' \ - 'is not a valid ' \ - 'forensic DMARC report: {1}'.format(subject, e) + error = ( + 'Message with subject "{0}" ' + "is not a valid " + "forensic DMARC report: {1}".format(subject, e) + ) raise InvalidForensicReport(error) except Exception as e: raise InvalidForensicReport(e.__str__()) - result = OrderedDict([("report_type", "forensic"), - ("report", forensic_report)]) + result = OrderedDict([("report_type", "forensic"), ("report", forensic_report)]) return result if result is None: - error = 'Message with subject "{0}" is ' \ - 'not a valid report'.format(subject) + error = 'Message with subject "{0}" is ' "not a valid report".format(subject) raise InvalidDMARCReport(error) -def parse_report_file(input_, nameservers=None, dns_timeout=2.0, - strip_attachment_payloads=False, ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False, keep_alive=None): +def parse_report_file( + input_, + nameservers=None, + dns_timeout=2.0, + strip_attachment_payloads=False, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, + keep_alive=None, +): """Parses a DMARC aggregate or forensic file at the given path, a file-like object. or bytes @@ -1277,14 +1381,13 @@ def parse_report_file(input_, nameservers=None, dns_timeout=2.0, offline=offline, nameservers=nameservers, dns_timeout=dns_timeout, - keep_alive=keep_alive) - results = OrderedDict([("report_type", "aggregate"), - ("report", report)]) + keep_alive=keep_alive, + ) + results = OrderedDict([("report_type", "aggregate"), ("report", report)]) except InvalidAggregateReport: try: report = parse_smtp_tls_report_json(content) - results = OrderedDict([("report_type", "smtp_tls"), - ("report", report)]) + results = OrderedDict([("report_type", "smtp_tls"), ("report", report)]) except InvalidSMTPTLSReport: try: sa = strip_attachment_payloads @@ -1298,19 +1401,24 @@ def parse_report_file(input_, nameservers=None, dns_timeout=2.0, nameservers=nameservers, dns_timeout=dns_timeout, strip_attachment_payloads=sa, - keep_alive=keep_alive) + keep_alive=keep_alive, + ) except InvalidDMARCReport: raise ParserError("Not a valid report") return results -def get_dmarc_reports_from_mbox(input_, nameservers=None, dns_timeout=2.0, - strip_attachment_payloads=False, - ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False): +def get_dmarc_reports_from_mbox( + input_, + nameservers=None, + dns_timeout=2.0, + strip_attachment_payloads=False, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, +): """Parses a mailbox in mbox format containing e-mails with attached DMARC reports @@ -1338,13 +1446,10 @@ def get_dmarc_reports_from_mbox(input_, nameservers=None, dns_timeout=2.0, mbox = mailbox.mbox(input_) message_keys = mbox.keys() total_messages = len(message_keys) - logger.debug("Found {0} messages in {1}".format(total_messages, - input_)) + logger.debug("Found {0} messages in {1}".format(total_messages, input_)) for i in range(len(message_keys)): message_key = message_keys[i] - logger.info("Processing message {0} of {1}".format( - i+1, total_messages - )) + logger.info("Processing message {0} of {1}".format(i + 1, total_messages)) msg_content = mbox.get_string(message_key) try: sa = strip_attachment_payloads @@ -1357,7 +1462,8 @@ def get_dmarc_reports_from_mbox(input_, nameservers=None, dns_timeout=2.0, offline=offline, nameservers=nameservers, dns_timeout=dns_timeout, - strip_attachment_payloads=sa) + strip_attachment_payloads=sa, + ) if parsed_email["report_type"] == "aggregate": aggregate_reports.append(parsed_email["report"]) elif parsed_email["report_type"] == "forensic": @@ -1368,27 +1474,33 @@ def get_dmarc_reports_from_mbox(input_, nameservers=None, dns_timeout=2.0, logger.warning(error.__str__()) except mailbox.NoSuchMailboxError: raise InvalidDMARCReport("Mailbox {0} does not exist".format(input_)) - return OrderedDict([("aggregate_reports", aggregate_reports), - ("forensic_reports", forensic_reports), - ("smtp_tls_reports", smtp_tls_reports)]) - - -def get_dmarc_reports_from_mailbox(connection: MailboxConnection, - reports_folder="INBOX", - archive_folder="Archive", - delete=False, - test=False, - ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False, - nameservers=None, - dns_timeout=6.0, - strip_attachment_payloads=False, - results=None, - batch_size=10, - create_folders=True): + return OrderedDict( + [ + ("aggregate_reports", aggregate_reports), + ("forensic_reports", forensic_reports), + ("smtp_tls_reports", smtp_tls_reports), + ] + ) + + +def get_dmarc_reports_from_mailbox( + connection: MailboxConnection, + reports_folder="INBOX", + archive_folder="Archive", + delete=False, + test=False, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, + nameservers=None, + dns_timeout=6.0, + strip_attachment_payloads=False, + results=None, + batch_size=10, + create_folders=True, +): """ Fetches and parses DMARC reports from a mailbox @@ -1428,15 +1540,10 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, aggregate_report_msg_uids = [] forensic_report_msg_uids = [] smtp_tls_msg_uids = [] - folder_separator = connection.get_folder_separator() - aggregate_reports_folder = "{0}{1}Aggregate".format(archive_folder, - folder_separator) - forensic_reports_folder = "{0}{1}Forensic".format(archive_folder, - folder_separator) - smtp_tls_reports_folder = "{0}{1}SMTP-TLS".format(archive_folder, - folder_separator) - invalid_reports_folder = "{0}{1}Invalid".format(archive_folder, - folder_separator) + aggregate_reports_folder = "{0}/Aggregate".format(archive_folder) + forensic_reports_folder = "{0}/Forensic".format(archive_folder) + smtp_tls_reports_folder = "{0}/SMTP-TLS".format(archive_folder) + invalid_reports_folder = "{0}/Invalid".format(archive_folder) if results: aggregate_reports = results["aggregate_reports"].copy() @@ -1452,8 +1559,7 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, messages = connection.fetch_messages(reports_folder, batch_size=batch_size) total_messages = len(messages) - logger.debug("Found {0} messages in {1}".format(len(messages), - reports_folder)) + logger.debug("Found {0} messages in {1}".format(len(messages), reports_folder)) if batch_size: message_limit = min(total_messages, batch_size) @@ -1464,9 +1570,11 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, for i in range(message_limit): msg_uid = messages[i] - logger.debug("Processing message {0} of {1}: UID {2}".format( - i+1, message_limit, msg_uid - )) + logger.debug( + "Processing message {0} of {1}: UID {2}".format( + i + 1, message_limit, msg_uid + ) + ) msg_content = connection.fetch_message(msg_uid) try: sa = strip_attachment_payloads @@ -1480,7 +1588,8 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, reverse_dns_map_url=reverse_dns_map_url, offline=offline, strip_attachment_payloads=sa, - keep_alive=connection.keepalive) + keep_alive=connection.keepalive, + ) if parsed_email["report_type"] == "aggregate": aggregate_reports.append(parsed_email["report"]) aggregate_report_msg_uids.append(msg_uid) @@ -1494,27 +1603,30 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, logger.warning(error.__str__()) if not test: if delete: - logger.debug( - "Deleting message UID {0}".format(msg_uid)) + logger.debug("Deleting message UID {0}".format(msg_uid)) connection.delete_message(msg_uid) else: logger.debug( "Moving message UID {0} to {1}".format( - msg_uid, invalid_reports_folder)) + msg_uid, invalid_reports_folder + ) + ) connection.move_message(msg_uid, invalid_reports_folder) if not test: if delete: - processed_messages = aggregate_report_msg_uids + \ - forensic_report_msg_uids + \ - smtp_tls_msg_uids + processed_messages = ( + aggregate_report_msg_uids + forensic_report_msg_uids + smtp_tls_msg_uids + ) number_of_processed_msgs = len(processed_messages) for i in range(number_of_processed_msgs): msg_uid = processed_messages[i] logger.debug( "Deleting message {0} of {1}: UID {2}".format( - i + 1, number_of_processed_msgs, msg_uid)) + i + 1, number_of_processed_msgs, msg_uid + ) + ) try: connection.delete_message(msg_uid) @@ -1527,17 +1639,19 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, log_message = "Moving aggregate report messages from" logger.debug( "{0} {1} to {2}".format( - log_message, reports_folder, - aggregate_reports_folder)) + log_message, reports_folder, aggregate_reports_folder + ) + ) number_of_agg_report_msgs = len(aggregate_report_msg_uids) for i in range(number_of_agg_report_msgs): msg_uid = aggregate_report_msg_uids[i] logger.debug( "Moving message {0} of {1}: UID {2}".format( - i+1, number_of_agg_report_msgs, msg_uid)) + i + 1, number_of_agg_report_msgs, msg_uid + ) + ) try: - connection.move_message(msg_uid, - aggregate_reports_folder) + connection.move_message(msg_uid, aggregate_reports_folder) except Exception as e: message = "Error moving message UID" e = "{0} {1}: {2}".format(message, msg_uid, e) @@ -1545,46 +1659,52 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, if len(forensic_report_msg_uids) > 0: message = "Moving forensic report messages from" logger.debug( - "{0} {1} to {2}".format(message, - reports_folder, - forensic_reports_folder)) + "{0} {1} to {2}".format( + message, reports_folder, forensic_reports_folder + ) + ) number_of_forensic_msgs = len(forensic_report_msg_uids) for i in range(number_of_forensic_msgs): msg_uid = forensic_report_msg_uids[i] message = "Moving message" - logger.debug("{0} {1} of {2}: UID {3}".format( - message, - i + 1, number_of_forensic_msgs, msg_uid)) + logger.debug( + "{0} {1} of {2}: UID {3}".format( + message, i + 1, number_of_forensic_msgs, msg_uid + ) + ) try: - connection.move_message(msg_uid, - forensic_reports_folder) + connection.move_message(msg_uid, forensic_reports_folder) except Exception as e: - e = "Error moving message UID {0}: {1}".format( - msg_uid, e) + e = "Error moving message UID {0}: {1}".format(msg_uid, e) logger.error("Mailbox error: {0}".format(e)) if len(smtp_tls_msg_uids) > 0: message = "Moving SMTP TLS report messages from" logger.debug( - "{0} {1} to {2}".format(message, - reports_folder, - smtp_tls_reports_folder)) + "{0} {1} to {2}".format( + message, reports_folder, smtp_tls_reports_folder + ) + ) number_of_smtp_tls_uids = len(smtp_tls_msg_uids) for i in range(number_of_smtp_tls_uids): msg_uid = smtp_tls_msg_uids[i] message = "Moving message" - logger.debug("{0} {1} of {2}: UID {3}".format( - message, - i + 1, number_of_smtp_tls_uids, msg_uid)) + logger.debug( + "{0} {1} of {2}: UID {3}".format( + message, i + 1, number_of_smtp_tls_uids, msg_uid + ) + ) try: - connection.move_message(msg_uid, - smtp_tls_reports_folder) + connection.move_message(msg_uid, smtp_tls_reports_folder) except Exception as e: - e = "Error moving message UID {0}: {1}".format( - msg_uid, e) + e = "Error moving message UID {0}: {1}".format(msg_uid, e) logger.error("Mailbox error: {0}".format(e)) - results = OrderedDict([("aggregate_reports", aggregate_reports), - ("forensic_reports", forensic_reports), - ("smtp_tls_reports", smtp_tls_reports)]) + results = OrderedDict( + [ + ("aggregate_reports", aggregate_reports), + ("forensic_reports", forensic_reports), + ("smtp_tls_reports", smtp_tls_reports), + ] + ) total_messages = len(connection.fetch_messages(reports_folder)) @@ -1604,23 +1724,30 @@ def get_dmarc_reports_from_mailbox(connection: MailboxConnection, always_use_local_files=always_use_local_files, reverse_dns_map_path=reverse_dns_map_path, reverse_dns_map_url=reverse_dns_map_url, - offline=offline + offline=offline, ) return results -def watch_inbox(mailbox_connection: MailboxConnection, - callback: Callable, - reports_folder="INBOX", - archive_folder="Archive", delete=False, test=False, - check_timeout=30, ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - offline=False, nameservers=None, - dns_timeout=6.0, strip_attachment_payloads=False, - batch_size=None): +def watch_inbox( + mailbox_connection: MailboxConnection, + callback: Callable, + reports_folder="INBOX", + archive_folder="Archive", + delete=False, + test=False, + check_timeout=30, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + offline=False, + nameservers=None, + dns_timeout=6.0, + strip_attachment_payloads=False, + batch_size=None, +): """ Watches the mailbox for new messages and sends the results to a callback function @@ -1664,11 +1791,11 @@ def check_callback(connection): dns_timeout=dns_timeout, strip_attachment_payloads=sa, batch_size=batch_size, - create_folders=False) + create_folders=False, + ) callback(res) - mailbox_connection.watch(check_callback=check_callback, - check_timeout=check_timeout) + mailbox_connection.watch(check_callback=check_callback, check_timeout=check_timeout) def append_json(filename, reports): @@ -1706,13 +1833,16 @@ def append_csv(filename, csv): output.write(csv) -def save_output(results, output_directory="output", - aggregate_json_filename="aggregate.json", - forensic_json_filename="forensic.json", - smtp_tls_json_filename="smtp_tls.json", - aggregate_csv_filename="aggregate.csv", - forensic_csv_filename="forensic.csv", - smtp_tls_csv_filename="smtp_tls.csv"): +def save_output( + results, + output_directory="output", + aggregate_json_filename="aggregate.json", + forensic_json_filename="forensic.json", + smtp_tls_json_filename="smtp_tls.json", + aggregate_csv_filename="aggregate.csv", + forensic_csv_filename="forensic.csv", + smtp_tls_csv_filename="smtp_tls.csv", +): """ Save report data in the given directory @@ -1738,23 +1868,32 @@ def save_output(results, output_directory="output", else: os.makedirs(output_directory) - append_json(os.path.join(output_directory, aggregate_json_filename), - aggregate_reports) + append_json( + os.path.join(output_directory, aggregate_json_filename), aggregate_reports + ) - append_csv(os.path.join(output_directory, aggregate_csv_filename), - parsed_aggregate_reports_to_csv(aggregate_reports)) + append_csv( + os.path.join(output_directory, aggregate_csv_filename), + parsed_aggregate_reports_to_csv(aggregate_reports), + ) - append_json(os.path.join(output_directory, forensic_json_filename), - forensic_reports) + append_json( + os.path.join(output_directory, forensic_json_filename), forensic_reports + ) - append_csv(os.path.join(output_directory, forensic_csv_filename), - parsed_forensic_reports_to_csv(forensic_reports)) + append_csv( + os.path.join(output_directory, forensic_csv_filename), + parsed_forensic_reports_to_csv(forensic_reports), + ) - append_json(os.path.join(output_directory, smtp_tls_json_filename), - smtp_tls_reports) + append_json( + os.path.join(output_directory, smtp_tls_json_filename), smtp_tls_reports + ) - append_csv(os.path.join(output_directory, smtp_tls_csv_filename), - parsed_smtp_tls_reports_to_csv(smtp_tls_reports)) + append_csv( + os.path.join(output_directory, smtp_tls_csv_filename), + parsed_smtp_tls_reports_to_csv(smtp_tls_reports), + ) samples_directory = os.path.join(output_directory, "samples") if not os.path.exists(samples_directory): @@ -1790,6 +1929,7 @@ def get_report_zip(results): Returns: bytes: zip file bytes """ + def add_subdir(root_path, subdir): subdir_path = os.path.join(root_path, subdir) for subdir_root, subdir_dirs, subdir_files in os.walk(subdir_path): @@ -1806,13 +1946,12 @@ def add_subdir(root_path, subdir): tmp_dir = tempfile.mkdtemp() try: save_output(results, tmp_dir) - with zipfile.ZipFile(storage, 'w', zipfile.ZIP_DEFLATED) as zip_file: + with zipfile.ZipFile(storage, "w", zipfile.ZIP_DEFLATED) as zip_file: for root, dirs, files in os.walk(tmp_dir): for file in files: file_path = os.path.join(root, file) if os.path.isfile(file_path): - arcname = os.path.join(os.path.relpath(root, tmp_dir), - file) + arcname = os.path.join(os.path.relpath(root, tmp_dir), file) zip_file.write(file_path, arcname) for directory in dirs: dir_path = os.path.join(root, directory) @@ -1825,11 +1964,22 @@ def add_subdir(root_path, subdir): return storage.getvalue() -def email_results(results, host, mail_from, mail_to, - mail_cc=None, mail_bcc=None, port=0, - require_encryption=False, verify=True, - username=None, password=None, subject=None, - attachment_filename=None, message=None): +def email_results( + results, + host, + mail_from, + mail_to, + mail_cc=None, + mail_bcc=None, + port=0, + require_encryption=False, + verify=True, + username=None, + password=None, + subject=None, + attachment_filename=None, + message=None, +): """ Emails parsing results as a zip file @@ -1867,8 +2017,18 @@ def email_results(results, host, mail_from, mail_to, zip_bytes = get_report_zip(results) attachments = [(filename, zip_bytes)] - send_email(host, mail_from, mail_to, message_cc=mail_cc, - message_bcc=mail_bcc, port=port, - require_encryption=require_encryption, verify=verify, - username=username, password=password, subject=subject, - attachments=attachments, plain_message=message) + send_email( + host, + mail_from, + mail_to, + message_cc=mail_cc, + message_bcc=mail_bcc, + port=port, + require_encryption=require_encryption, + verify=verify, + username=username, + password=password, + subject=subject, + attachments=attachments, + plain_message=message, + ) diff --git a/parsedmarc/cli.py b/parsedmarc/cli.py index 05b4e0f9..f1f8ec42 100644 --- a/parsedmarc/cli.py +++ b/parsedmarc/cli.py @@ -16,21 +16,41 @@ import sys from tqdm import tqdm -from parsedmarc import get_dmarc_reports_from_mailbox, watch_inbox, \ - parse_report_file, get_dmarc_reports_from_mbox, elastic, opensearch, \ - kafkaclient, splunk, save_output, email_results, ParserError, \ - __version__, InvalidDMARCReport, s3, syslog, loganalytics, gelf, \ - webhook -from parsedmarc.mail import IMAPConnection, MSGraphConnection, \ - GmailConnection, MaildirConnection +from parsedmarc import ( + get_dmarc_reports_from_mailbox, + watch_inbox, + parse_report_file, + get_dmarc_reports_from_mbox, + elastic, + opensearch, + kafkaclient, + splunk, + save_output, + email_results, + ParserError, + __version__, + InvalidDMARCReport, + s3, + syslog, + loganalytics, + gelf, + webhook, +) +from parsedmarc.mail import ( + IMAPConnection, + MSGraphConnection, + GmailConnection, + MaildirConnection, +) from parsedmarc.mail.graph import AuthMethod from parsedmarc.log import logger from parsedmarc.utils import is_mbox, get_reverse_dns formatter = logging.Formatter( - fmt='%(levelname)8s:%(filename)s:%(lineno)d:%(message)s', - datefmt='%Y-%m-%d:%H:%M:%S') + fmt="%(levelname)8s:%(filename)s:%(lineno)d:%(message)s", + datefmt="%Y-%m-%d:%H:%M:%S", +) handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) @@ -42,12 +62,18 @@ def _str_to_list(s): return list(map(lambda i: i.lstrip(), _list)) -def cli_parse(file_path, sa, nameservers, dns_timeout, - ip_db_path, offline, - always_use_local_files, - reverse_dns_map_path, - reverse_dns_map_url, - conn): +def cli_parse( + file_path, + sa, + nameservers, + dns_timeout, + ip_db_path, + offline, + always_use_local_files, + reverse_dns_map_path, + reverse_dns_map_url, + conn, +): """Separated this function for multiprocessing""" try: file_results = parse_report_file( @@ -59,7 +85,8 @@ def cli_parse(file_path, sa, nameservers, dns_timeout, reverse_dns_map_url=reverse_dns_map_url, nameservers=nameservers, dns_timeout=dns_timeout, - strip_attachment_payloads=sa) + strip_attachment_payloads=sa, + ) conn.send([file_results, file_path]) except ParserError as error: conn.send([error, file_path]) @@ -71,20 +98,21 @@ def _main(): """Called when the module is executed""" def process_reports(reports_): - output_str = "{0}\n".format(json.dumps(reports_, - ensure_ascii=False, - indent=2)) + output_str = "{0}\n".format(json.dumps(reports_, ensure_ascii=False, indent=2)) if not opts.silent: print(output_str) if opts.output: - save_output(results, output_directory=opts.output, - aggregate_json_filename=opts.aggregate_json_filename, - forensic_json_filename=opts.forensic_json_filename, - smtp_tls_json_filename=opts.smtp_tls_json_filename, - aggregate_csv_filename=opts.aggregate_csv_filename, - forensic_csv_filename=opts.forensic_csv_filename, - smtp_tls_csv_filename=opts.smtp_tls_csv_filename) + save_output( + results, + output_directory=opts.output, + aggregate_json_filename=opts.aggregate_json_filename, + forensic_json_filename=opts.forensic_json_filename, + smtp_tls_json_filename=opts.smtp_tls_json_filename, + aggregate_csv_filename=opts.aggregate_csv_filename, + forensic_csv_filename=opts.forensic_csv_filename, + smtp_tls_csv_filename=opts.smtp_tls_csv_filename, + ) if opts.save_aggregate: for report in reports_["aggregate_reports"]: try: @@ -97,16 +125,16 @@ def process_reports(reports_): index_prefix=opts.elasticsearch_index_prefix, monthly_indexes=opts.elasticsearch_monthly_indexes, number_of_shards=shards, - number_of_replicas=replicas + number_of_replicas=replicas, ) except elastic.AlreadySaved as warning: logger.warning(warning.__str__()) except elastic.ElasticsearchError as error_: - logger.error("Elasticsearch Error: {0}".format( - error_.__str__())) + logger.error("Elasticsearch Error: {0}".format(error_.__str__())) except Exception as error_: - logger.error("Elasticsearch exception error: {}".format( - error_.__str__())) + logger.error( + "Elasticsearch exception error: {}".format(error_.__str__()) + ) try: if opts.opensearch_hosts: @@ -118,24 +146,24 @@ def process_reports(reports_): index_prefix=opts.opensearch_index_prefix, monthly_indexes=opts.opensearch_monthly_indexes, number_of_shards=shards, - number_of_replicas=replicas + number_of_replicas=replicas, ) except opensearch.AlreadySaved as warning: logger.warning(warning.__str__()) except opensearch.OpenSearchError as error_: - logger.error("OpenSearch Error: {0}".format( - error_.__str__())) + logger.error("OpenSearch Error: {0}".format(error_.__str__())) except Exception as error_: - logger.error("OpenSearch exception error: {}".format( - error_.__str__())) + logger.error( + "OpenSearch exception error: {}".format(error_.__str__()) + ) try: if opts.kafka_hosts: kafka_client.save_aggregate_reports_to_kafka( - report, kafka_aggregate_topic) + report, kafka_aggregate_topic + ) except Exception as error_: - logger.error("Kafka Error: {0}".format( - error_.__str__())) + logger.error("Kafka Error: {0}".format(error_.__str__())) try: if opts.s3_bucket: @@ -158,12 +186,8 @@ def process_reports(reports_): try: if opts.webhook_aggregate_url: webhook_client.save_aggregate_report_to_webhook( - json.dumps( - report, - ensure_ascii=False, - indent=2 - ) - ) + json.dumps(report, ensure_ascii=False, indent=2) + ) except Exception as error_: logger.error("Webhook Error: {0}".format(error_.__str__())) @@ -171,8 +195,7 @@ def process_reports(reports_): try: aggregate_reports_ = reports_["aggregate_reports"] if len(aggregate_reports_) > 0: - hec_client.save_aggregate_reports_to_splunk( - aggregate_reports_) + hec_client.save_aggregate_reports_to_splunk(aggregate_reports_) except splunk.SplunkError as e: logger.error("Splunk HEC error: {0}".format(e.__str__())) @@ -188,12 +211,12 @@ def process_reports(reports_): index_prefix=opts.elasticsearch_index_prefix, monthly_indexes=opts.elasticsearch_monthly_indexes, number_of_shards=shards, - number_of_replicas=replicas) + number_of_replicas=replicas, + ) except elastic.AlreadySaved as warning: logger.warning(warning.__str__()) except elastic.ElasticsearchError as error_: - logger.error("Elasticsearch Error: {0}".format( - error_.__str__())) + logger.error("Elasticsearch Error: {0}".format(error_.__str__())) except InvalidDMARCReport as error_: logger.error(error_.__str__()) @@ -207,22 +230,22 @@ def process_reports(reports_): index_prefix=opts.opensearch_index_prefix, monthly_indexes=opts.opensearch_monthly_indexes, number_of_shards=shards, - number_of_replicas=replicas) + number_of_replicas=replicas, + ) except opensearch.AlreadySaved as warning: logger.warning(warning.__str__()) except opensearch.OpenSearchError as error_: - logger.error("OpenSearch Error: {0}".format( - error_.__str__())) + logger.error("OpenSearch Error: {0}".format(error_.__str__())) except InvalidDMARCReport as error_: logger.error(error_.__str__()) try: if opts.kafka_hosts: kafka_client.save_forensic_reports_to_kafka( - report, kafka_forensic_topic) + report, kafka_forensic_topic + ) except Exception as error_: - logger.error("Kafka Error: {0}".format( - error_.__str__())) + logger.error("Kafka Error: {0}".format(error_.__str__())) try: if opts.s3_bucket: @@ -245,10 +268,8 @@ def process_reports(reports_): try: if opts.webhook_forensic_url: webhook_client.save_forensic_report_to_webhook( - json.dumps( - report, - ensure_ascii=False, - indent=2)) + json.dumps(report, ensure_ascii=False, indent=2) + ) except Exception as error_: logger.error("Webhook Error: {0}".format(error_.__str__())) @@ -256,8 +277,7 @@ def process_reports(reports_): try: forensic_reports_ = reports_["forensic_reports"] if len(forensic_reports_) > 0: - hec_client.save_forensic_reports_to_splunk( - forensic_reports_) + hec_client.save_forensic_reports_to_splunk(forensic_reports_) except splunk.SplunkError as e: logger.error("Splunk HEC error: {0}".format(e.__str__())) @@ -273,12 +293,12 @@ def process_reports(reports_): index_prefix=opts.elasticsearch_index_prefix, monthly_indexes=opts.elasticsearch_monthly_indexes, number_of_shards=shards, - number_of_replicas=replicas) + number_of_replicas=replicas, + ) except elastic.AlreadySaved as warning: logger.warning(warning.__str__()) except elastic.ElasticsearchError as error_: - logger.error("Elasticsearch Error: {0}".format( - error_.__str__())) + logger.error("Elasticsearch Error: {0}".format(error_.__str__())) except InvalidDMARCReport as error_: logger.error(error_.__str__()) @@ -292,22 +312,22 @@ def process_reports(reports_): index_prefix=opts.opensearch_index_prefix, monthly_indexes=opts.opensearch_monthly_indexes, number_of_shards=shards, - number_of_replicas=replicas) + number_of_replicas=replicas, + ) except opensearch.AlreadySaved as warning: logger.warning(warning.__str__()) except opensearch.OpenSearchError as error_: - logger.error("OpenSearch Error: {0}".format( - error_.__str__())) + logger.error("OpenSearch Error: {0}".format(error_.__str__())) except InvalidDMARCReport as error_: logger.error(error_.__str__()) try: if opts.kafka_hosts: kafka_client.save_smtp_tls_reports_to_kafka( - smtp_tls_reports, kafka_smtp_tls_topic) + smtp_tls_reports, kafka_smtp_tls_topic + ) except Exception as error_: - logger.error("Kafka Error: {0}".format( - error_.__str__())) + logger.error("Kafka Error: {0}".format(error_.__str__())) try: if opts.s3_bucket: @@ -330,10 +350,8 @@ def process_reports(reports_): try: if opts.webhook_smtp_tls_url: webhook_client.save_smtp_tls_report_to_webhook( - json.dumps( - report, - ensure_ascii=False, - indent=2)) + json.dumps(report, ensure_ascii=False, indent=2) + ) except Exception as error_: logger.error("Webhook Error: {0}".format(error_.__str__())) @@ -341,8 +359,7 @@ def process_reports(reports_): try: smtp_tls_reports_ = reports_["smtp_tls_reports"] if len(smtp_tls_reports_) > 0: - hec_client.save_smtp_tls_reports_to_splunk( - smtp_tls_reports_) + hec_client.save_smtp_tls_reports_to_splunk(smtp_tls_reports_) except splunk.SplunkError as e: logger.error("Splunk HEC error: {0}".format(e.__str__())) @@ -356,76 +373,105 @@ def process_reports(reports_): dcr_immutable_id=opts.la_dcr_immutable_id, dcr_aggregate_stream=opts.la_dcr_aggregate_stream, dcr_forensic_stream=opts.la_dcr_forensic_stream, - dcr_smtp_tls_stream=opts.la_dcr_smtp_tls_stream + dcr_smtp_tls_stream=opts.la_dcr_smtp_tls_stream, ) la_client.publish_results( reports_, opts.save_aggregate, opts.save_forensic, - opts.save_smtp_tls) + opts.save_smtp_tls, + ) except loganalytics.LogAnalyticsException as e: - logger.error( - "Log Analytics error: {0}".format(e.__str__())) + logger.error("Log Analytics error: {0}".format(e.__str__())) except Exception as e: logger.error( - "Unknown error occurred" + - " during the publishing" + - " to Log Analytics: " + - e.__str__()) + "Unknown error occurred" + + " during the publishing" + + " to Log Analytics: " + + e.__str__() + ) arg_parser = ArgumentParser(description="Parses DMARC reports") - arg_parser.add_argument("-c", "--config-file", - help="a path to a configuration file " - "(--silent implied)") - arg_parser.add_argument("file_path", nargs="*", - help="one or more paths to aggregate or forensic " - "report files, emails, or mbox files'") - strip_attachment_help = "remove attachment payloads from forensic " \ - "report output" - arg_parser.add_argument("--strip-attachment-payloads", - help=strip_attachment_help, action="store_true") - arg_parser.add_argument("-o", "--output", - help="write output files to the given directory") - arg_parser.add_argument("--aggregate-json-filename", - help="filename for the aggregate JSON output file", - default="aggregate.json") - arg_parser.add_argument("--forensic-json-filename", - help="filename for the forensic JSON output file", - default="forensic.json") - arg_parser.add_argument("--smtp-tls-json-filename", - help="filename for the SMTP TLS JSON output file", - default="smtp_tls.json") - arg_parser.add_argument("--aggregate-csv-filename", - help="filename for the aggregate CSV output file", - default="aggregate.csv") - arg_parser.add_argument("--forensic-csv-filename", - help="filename for the forensic CSV output file", - default="forensic.csv") - arg_parser.add_argument("--smtp-tls-csv-filename", - help="filename for the SMTP TLS CSV output file", - default="smtp_tls.csv") - arg_parser.add_argument("-n", "--nameservers", nargs="+", - help="nameservers to query") - arg_parser.add_argument("-t", "--dns_timeout", - help="number of seconds to wait for an answer " - "from DNS (default: 2.0)", - type=float, - default=2.0) - arg_parser.add_argument("--offline", action="store_true", - help="do not make online queries for geolocation " - " or DNS") - arg_parser.add_argument("-s", "--silent", action="store_true", - help="only print errors") - arg_parser.add_argument("-w", "--warnings", action="store_true", - help="print warnings in addition to errors") - arg_parser.add_argument("--verbose", action="store_true", - help="more verbose output") - arg_parser.add_argument("--debug", action="store_true", - help="print debugging information") - arg_parser.add_argument("--log-file", default=None, - help="output logging to a file") - arg_parser.add_argument("-v", "--version", action="version", - version=__version__) + arg_parser.add_argument( + "-c", + "--config-file", + help="a path to a configuration file " "(--silent implied)", + ) + arg_parser.add_argument( + "file_path", + nargs="*", + help="one or more paths to aggregate or forensic " + "report files, emails, or mbox files'", + ) + strip_attachment_help = "remove attachment payloads from forensic " "report output" + arg_parser.add_argument( + "--strip-attachment-payloads", help=strip_attachment_help, action="store_true" + ) + arg_parser.add_argument( + "-o", "--output", help="write output files to the given directory" + ) + arg_parser.add_argument( + "--aggregate-json-filename", + help="filename for the aggregate JSON output file", + default="aggregate.json", + ) + arg_parser.add_argument( + "--forensic-json-filename", + help="filename for the forensic JSON output file", + default="forensic.json", + ) + arg_parser.add_argument( + "--smtp-tls-json-filename", + help="filename for the SMTP TLS JSON output file", + default="smtp_tls.json", + ) + arg_parser.add_argument( + "--aggregate-csv-filename", + help="filename for the aggregate CSV output file", + default="aggregate.csv", + ) + arg_parser.add_argument( + "--forensic-csv-filename", + help="filename for the forensic CSV output file", + default="forensic.csv", + ) + arg_parser.add_argument( + "--smtp-tls-csv-filename", + help="filename for the SMTP TLS CSV output file", + default="smtp_tls.csv", + ) + arg_parser.add_argument( + "-n", "--nameservers", nargs="+", help="nameservers to query" + ) + arg_parser.add_argument( + "-t", + "--dns_timeout", + help="number of seconds to wait for an answer " "from DNS (default: 2.0)", + type=float, + default=2.0, + ) + arg_parser.add_argument( + "--offline", + action="store_true", + help="do not make online queries for geolocation " " or DNS", + ) + arg_parser.add_argument( + "-s", "--silent", action="store_true", help="only print errors" + ) + arg_parser.add_argument( + "-w", + "--warnings", + action="store_true", + help="print warnings in addition to errors", + ) + arg_parser.add_argument( + "--verbose", action="store_true", help="more verbose output" + ) + arg_parser.add_argument( + "--debug", action="store_true", help="print debugging information" + ) + arg_parser.add_argument("--log-file", default=None, help="output logging to a file") + arg_parser.add_argument("-v", "--version", action="version", version=__version__) aggregate_reports = [] forensic_reports = [] @@ -433,136 +479,137 @@ def process_reports(reports_): args = arg_parser.parse_args() - default_gmail_api_scope = 'https://www.googleapis.com/auth/gmail.modify' - - opts = Namespace(file_path=args.file_path, - config_file=args.config_file, - offline=args.offline, - strip_attachment_payloads=args.strip_attachment_payloads, - output=args.output, - aggregate_csv_filename=args.aggregate_csv_filename, - aggregate_json_filename=args.aggregate_json_filename, - forensic_csv_filename=args.forensic_csv_filename, - forensic_json_filename=args.forensic_json_filename, - smtp_tls_json_filename=args.smtp_tls_json_filename, - smtp_tls_csv_filename=args.smtp_tls_csv_filename, - nameservers=args.nameservers, - dns_test_address='1.1.1.1', - silent=args.silent, - warnings=args.warnings, - dns_timeout=args.dns_timeout, - debug=args.debug, - verbose=args.verbose, - save_aggregate=False, - save_forensic=False, - save_smtp_tls=False, - mailbox_reports_folder="INBOX", - mailbox_archive_folder="Archive", - mailbox_watch=False, - mailbox_delete=False, - mailbox_test=False, - mailbox_batch_size=10, - mailbox_check_timeout=30, - imap_host=None, - imap_skip_certificate_verification=False, - imap_ssl=True, - imap_port=993, - imap_timeout=30, - imap_max_retries=4, - imap_user=None, - imap_password=None, - graph_auth_method=None, - graph_user=None, - graph_password=None, - graph_client_id=None, - graph_client_secret=None, - graph_tenant_id=None, - graph_mailbox=None, - graph_allow_unencrypted_storage=False, - hec=None, - hec_token=None, - hec_index=None, - hec_skip_certificate_verification=False, - elasticsearch_hosts=None, - elasticsearch_timeout=60, - elasticsearch_number_of_shards=1, - elasticsearch_number_of_replicas=0, - elasticsearch_index_suffix=None, - elasticsearch_index_prefix=None, - elasticsearch_ssl=True, - elasticsearch_ssl_cert_path=None, - elasticsearch_monthly_indexes=False, - elasticsearch_username=None, - elasticsearch_password=None, - elasticsearch_apiKey=None, - opensearch_hosts=None, - opensearch_timeout=60, - opensearch_number_of_shards=1, - opensearch_number_of_replicas=0, - opensearch_index_suffix=None, - opensearch_index_prefix=None, - opensearch_ssl=True, - opensearch_ssl_cert_path=None, - opensearch_monthly_indexes=False, - opensearch_username=None, - opensearch_password=None, - opensearch_apiKey=None, - kafka_hosts=None, - kafka_username=None, - kafka_password=None, - kafka_aggregate_topic=None, - kafka_forensic_topic=None, - kafka_smtp_tls_topic=None, - kafka_ssl=False, - kafka_skip_certificate_verification=False, - smtp_host=None, - smtp_port=25, - smtp_ssl=False, - smtp_skip_certificate_verification=False, - smtp_user=None, - smtp_password=None, - smtp_from=None, - smtp_to=[], - smtp_subject="parsedmarc report", - smtp_message="Please see the attached DMARC results.", - s3_bucket=None, - s3_path=None, - s3_region_name=None, - s3_endpoint_url=None, - s3_access_key_id=None, - s3_secret_access_key=None, - syslog_server=None, - syslog_port=None, - gmail_api_credentials_file=None, - gmail_api_token_file=None, - gmail_api_include_spam_trash=False, - gmail_api_paginate_messages=True, - gmail_api_scopes=[], - gmail_api_oauth2_port=8080, - maildir_path=None, - maildir_create=False, - log_file=args.log_file, - n_procs=1, - ip_db_path=None, - always_use_local_files=False, - reverse_dns_map_path=None, - reverse_dns_map_url=None, - la_client_id=None, - la_client_secret=None, - la_tenant_id=None, - la_dce=None, - la_dcr_immutable_id=None, - la_dcr_aggregate_stream=None, - la_dcr_forensic_stream=None, - la_dcr_smtp_tls_stream=None, - gelf_host=None, - gelf_port=None, - gelf_mode=None, - webhook_aggregate_url=None, - webhook_forensic_url=None, - webhook_smtp_tls_url=None, - webhook_timeout=60 - ) + default_gmail_api_scope = "https://www.googleapis.com/auth/gmail.modify" + + opts = Namespace( + file_path=args.file_path, + config_file=args.config_file, + offline=args.offline, + strip_attachment_payloads=args.strip_attachment_payloads, + output=args.output, + aggregate_csv_filename=args.aggregate_csv_filename, + aggregate_json_filename=args.aggregate_json_filename, + forensic_csv_filename=args.forensic_csv_filename, + forensic_json_filename=args.forensic_json_filename, + smtp_tls_json_filename=args.smtp_tls_json_filename, + smtp_tls_csv_filename=args.smtp_tls_csv_filename, + nameservers=args.nameservers, + dns_test_address="1.1.1.1", + silent=args.silent, + warnings=args.warnings, + dns_timeout=args.dns_timeout, + debug=args.debug, + verbose=args.verbose, + save_aggregate=False, + save_forensic=False, + save_smtp_tls=False, + mailbox_reports_folder="INBOX", + mailbox_archive_folder="Archive", + mailbox_watch=False, + mailbox_delete=False, + mailbox_test=False, + mailbox_batch_size=10, + mailbox_check_timeout=30, + imap_host=None, + imap_skip_certificate_verification=False, + imap_ssl=True, + imap_port=993, + imap_timeout=30, + imap_max_retries=4, + imap_user=None, + imap_password=None, + graph_auth_method=None, + graph_user=None, + graph_password=None, + graph_client_id=None, + graph_client_secret=None, + graph_tenant_id=None, + graph_mailbox=None, + graph_allow_unencrypted_storage=False, + hec=None, + hec_token=None, + hec_index=None, + hec_skip_certificate_verification=False, + elasticsearch_hosts=None, + elasticsearch_timeout=60, + elasticsearch_number_of_shards=1, + elasticsearch_number_of_replicas=0, + elasticsearch_index_suffix=None, + elasticsearch_index_prefix=None, + elasticsearch_ssl=True, + elasticsearch_ssl_cert_path=None, + elasticsearch_monthly_indexes=False, + elasticsearch_username=None, + elasticsearch_password=None, + elasticsearch_apiKey=None, + opensearch_hosts=None, + opensearch_timeout=60, + opensearch_number_of_shards=1, + opensearch_number_of_replicas=0, + opensearch_index_suffix=None, + opensearch_index_prefix=None, + opensearch_ssl=True, + opensearch_ssl_cert_path=None, + opensearch_monthly_indexes=False, + opensearch_username=None, + opensearch_password=None, + opensearch_apiKey=None, + kafka_hosts=None, + kafka_username=None, + kafka_password=None, + kafka_aggregate_topic=None, + kafka_forensic_topic=None, + kafka_smtp_tls_topic=None, + kafka_ssl=False, + kafka_skip_certificate_verification=False, + smtp_host=None, + smtp_port=25, + smtp_ssl=False, + smtp_skip_certificate_verification=False, + smtp_user=None, + smtp_password=None, + smtp_from=None, + smtp_to=[], + smtp_subject="parsedmarc report", + smtp_message="Please see the attached DMARC results.", + s3_bucket=None, + s3_path=None, + s3_region_name=None, + s3_endpoint_url=None, + s3_access_key_id=None, + s3_secret_access_key=None, + syslog_server=None, + syslog_port=None, + gmail_api_credentials_file=None, + gmail_api_token_file=None, + gmail_api_include_spam_trash=False, + gmail_api_paginate_messages=True, + gmail_api_scopes=[], + gmail_api_oauth2_port=8080, + maildir_path=None, + maildir_create=False, + log_file=args.log_file, + n_procs=1, + ip_db_path=None, + always_use_local_files=False, + reverse_dns_map_path=None, + reverse_dns_map_url=None, + la_client_id=None, + la_client_secret=None, + la_tenant_id=None, + la_dce=None, + la_dcr_immutable_id=None, + la_dcr_aggregate_stream=None, + la_dcr_forensic_stream=None, + la_dcr_smtp_tls_stream=None, + gelf_host=None, + gelf_port=None, + gelf_mode=None, + webhook_aggregate_url=None, + webhook_forensic_url=None, + webhook_smtp_tls_url=None, + webhook_timeout=60, + ) args = arg_parser.parse_args() if args.config_file: @@ -579,45 +626,44 @@ def process_reports(reports_): opts.offline = general_config.getboolean("offline") if "strip_attachment_payloads" in general_config: opts.strip_attachment_payloads = general_config.getboolean( - "strip_attachment_payloads") + "strip_attachment_payloads" + ) if "output" in general_config: opts.output = general_config["output"] if "aggregate_json_filename" in general_config: - opts.aggregate_json_filename = general_config[ - "aggregate_json_filename"] + opts.aggregate_json_filename = general_config["aggregate_json_filename"] if "forensic_json_filename" in general_config: - opts.forensic_json_filename = general_config[ - "forensic_json_filename"] + opts.forensic_json_filename = general_config["forensic_json_filename"] if "smtp_tls_json_filename" in general_config: - opts.smtp_tls_json_filename = general_config[ - "smtp_tls_json_filename"] + opts.smtp_tls_json_filename = general_config["smtp_tls_json_filename"] if "aggregate_csv_filename" in general_config: - opts.aggregate_csv_filename = general_config[ - "aggregate_csv_filename"] + opts.aggregate_csv_filename = general_config["aggregate_csv_filename"] if "forensic_csv_filename" in general_config: - opts.forensic_csv_filename = general_config[ - "forensic_csv_filename"] + opts.forensic_csv_filename = general_config["forensic_csv_filename"] if "smtp_tls_csv_filename" in general_config: - opts.smtp_tls_csv_filename = general_config[ - "smtp_tls_csv_filename"] + opts.smtp_tls_csv_filename = general_config["smtp_tls_csv_filename"] if "dns_timeout" in general_config: opts.dns_timeout = general_config.getfloat("dns_timeout") if "dns_test_address" in general_config: - opts.dns_test_address=general_config["dns_test_address"] + opts.dns_test_address = general_config["dns_test_address"] if "nameservers" in general_config: opts.nameservers = _str_to_list(general_config["nameservers"]) # nameservers pre-flight check - dummy_hostname=None + dummy_hostname = None try: - dummy_hostname=get_reverse_dns(opts.dns_test_address, - nameservers=opts.nameservers, - timeout=opts.dns_timeout) - except Exception as ns_error: + dummy_hostname = get_reverse_dns( + opts.dns_test_address, + nameservers=opts.nameservers, + timeout=opts.dns_timeout, + ) + except Exception as ns_error: logger.critical("DNS pre-flight check failed: {}".format(ns_error)) exit(-1) if not dummy_hostname: - logger.critical("DNS pre-flight check failed: no PTR record for " - "{} from {}".format(opts.dns_test_address,opts.nameservers)) + logger.critical( + "DNS pre-flight check failed: no PTR record for " + "{} from {}".format(opts.dns_test_address, opts.nameservers) + ) exit(-1) if "save_aggregate" in general_config: opts.save_aggregate = general_config["save_aggregate"] @@ -643,13 +689,12 @@ def process_reports(reports_): opts.ip_db_path = None if "always_use_local_files" in general_config: opts.always_use_local_files = general_config.getboolean( - "always_use_local_files") + "always_use_local_files" + ) if "reverse_dns_map_path" in general_config: - opts.reverse_dns_map_path = general_config[ - "reverse_dns_path"] + opts.reverse_dns_map_path = general_config["reverse_dns_path"] if "reverse_dns_map_url" in general_config: - opts.reverse_dns_map_url = general_config[ - "reverse_dns_url"] + opts.reverse_dns_map_url = general_config["reverse_dns_url"] if "mailbox" in config.sections(): mailbox_config = config["mailbox"] @@ -668,20 +713,20 @@ def process_reports(reports_): if "batch_size" in mailbox_config: opts.mailbox_batch_size = mailbox_config.getint("batch_size") if "check_timeout" in mailbox_config: - opts.mailbox_check_timeout = mailbox_config.getint( - "check_timeout") + opts.mailbox_check_timeout = mailbox_config.getint("check_timeout") if "imap" in config.sections(): imap_config = config["imap"] if "watch" in imap_config: - logger.warning("Starting in 8.0.0, the watch option has been " - "moved from the imap configuration section to " - "the mailbox configuration section.") + logger.warning( + "Starting in 8.0.0, the watch option has been " + "moved from the imap configuration section to " + "the mailbox configuration section." + ) if "host" in imap_config: opts.imap_host = imap_config["host"] else: - logger.error("host setting missing from the " - "imap config section") + logger.error("host setting missing from the " "imap config section") exit(-1) if "port" in imap_config: opts.imap_port = imap_config.getint("port") @@ -692,65 +737,78 @@ def process_reports(reports_): if "ssl" in imap_config: opts.imap_ssl = imap_config.getboolean("ssl") if "skip_certificate_verification" in imap_config: - imap_verify = imap_config.getboolean( - "skip_certificate_verification") + imap_verify = imap_config.getboolean("skip_certificate_verification") opts.imap_skip_certificate_verification = imap_verify if "user" in imap_config: opts.imap_user = imap_config["user"] else: - logger.critical("user setting missing from the " - "imap config section") + logger.critical("user setting missing from the " "imap config section") exit(-1) if "password" in imap_config: opts.imap_password = imap_config["password"] else: - logger.critical("password setting missing from the " - "imap config section") + logger.critical( + "password setting missing from the " "imap config section" + ) exit(-1) if "reports_folder" in imap_config: opts.mailbox_reports_folder = imap_config["reports_folder"] - logger.warning("Use of the reports_folder option in the imap " - "configuration section has been deprecated. " - "Use this option in the mailbox configuration " - "section instead.") + logger.warning( + "Use of the reports_folder option in the imap " + "configuration section has been deprecated. " + "Use this option in the mailbox configuration " + "section instead." + ) if "archive_folder" in imap_config: opts.mailbox_archive_folder = imap_config["archive_folder"] - logger.warning("Use of the archive_folder option in the imap " - "configuration section has been deprecated. " - "Use this option in the mailbox configuration " - "section instead.") + logger.warning( + "Use of the archive_folder option in the imap " + "configuration section has been deprecated. " + "Use this option in the mailbox configuration " + "section instead." + ) if "watch" in imap_config: opts.mailbox_watch = imap_config.getboolean("watch") - logger.warning("Use of the watch option in the imap " - "configuration section has been deprecated. " - "Use this option in the mailbox configuration " - "section instead.") + logger.warning( + "Use of the watch option in the imap " + "configuration section has been deprecated. " + "Use this option in the mailbox configuration " + "section instead." + ) if "delete" in imap_config: - logger.warning("Use of the delete option in the imap " - "configuration section has been deprecated. " - "Use this option in the mailbox configuration " - "section instead.") + logger.warning( + "Use of the delete option in the imap " + "configuration section has been deprecated. " + "Use this option in the mailbox configuration " + "section instead." + ) if "test" in imap_config: opts.mailbox_test = imap_config.getboolean("test") - logger.warning("Use of the test option in the imap " - "configuration section has been deprecated. " - "Use this option in the mailbox configuration " - "section instead.") + logger.warning( + "Use of the test option in the imap " + "configuration section has been deprecated. " + "Use this option in the mailbox configuration " + "section instead." + ) if "batch_size" in imap_config: opts.mailbox_batch_size = imap_config.getint("batch_size") - logger.warning("Use of the batch_size option in the imap " - "configuration section has been deprecated. " - "Use this option in the mailbox configuration " - "section instead.") + logger.warning( + "Use of the batch_size option in the imap " + "configuration section has been deprecated. " + "Use this option in the mailbox configuration " + "section instead." + ) if "msgraph" in config.sections(): graph_config = config["msgraph"] opts.graph_token_file = graph_config.get("token_file", ".token") if "auth_method" not in graph_config: - logger.info("auth_method setting missing from the " - "msgraph config section " - "defaulting to UsernamePassword") + logger.info( + "auth_method setting missing from the " + "msgraph config section " + "defaulting to UsernamePassword" + ) opts.graph_auth_method = AuthMethod.UsernamePassword.name else: opts.graph_auth_method = graph_config["auth_method"] @@ -759,19 +817,23 @@ def process_reports(reports_): if "user" in graph_config: opts.graph_user = graph_config["user"] else: - logger.critical("user setting missing from the " - "msgraph config section") + logger.critical( + "user setting missing from the " "msgraph config section" + ) exit(-1) if "password" in graph_config: opts.graph_password = graph_config["password"] else: - logger.critical("password setting missing from the " - "msgraph config section") + logger.critical( + "password setting missing from the " "msgraph config section" + ) if "client_secret" in graph_config: opts.graph_client_secret = graph_config["client_secret"] else: - logger.critical("client_secret setting missing from the " - "msgraph config section") + logger.critical( + "client_secret setting missing from the " + "msgraph config section" + ) exit(-1) if opts.graph_auth_method == AuthMethod.DeviceCode.name: @@ -780,159 +842,154 @@ def process_reports(reports_): if opts.graph_auth_method != AuthMethod.UsernamePassword.name: if "tenant_id" in graph_config: - opts.graph_tenant_id = graph_config['tenant_id'] + opts.graph_tenant_id = graph_config["tenant_id"] else: - logger.critical("tenant_id setting missing from the " - "msgraph config section") + logger.critical( + "tenant_id setting missing from the " "msgraph config section" + ) exit(-1) if opts.graph_auth_method == AuthMethod.ClientSecret.name: if "client_secret" in graph_config: opts.graph_client_secret = graph_config["client_secret"] else: - logger.critical("client_secret setting missing from the " - "msgraph config section") + logger.critical( + "client_secret setting missing from the " + "msgraph config section" + ) exit(-1) if "client_id" in graph_config: opts.graph_client_id = graph_config["client_id"] else: - logger.critical("client_id setting missing from the " - "msgraph config section") + logger.critical( + "client_id setting missing from the " "msgraph config section" + ) exit(-1) if "mailbox" in graph_config: opts.graph_mailbox = graph_config["mailbox"] elif opts.graph_auth_method != AuthMethod.UsernamePassword.name: - logger.critical("mailbox setting missing from the " - "msgraph config section") + logger.critical( + "mailbox setting missing from the " "msgraph config section" + ) exit(-1) if "allow_unencrypted_storage" in graph_config: opts.graph_allow_unencrypted_storage = graph_config.getboolean( - "allow_unencrypted_storage") + "allow_unencrypted_storage" + ) if "elasticsearch" in config: elasticsearch_config = config["elasticsearch"] if "hosts" in elasticsearch_config: - opts.elasticsearch_hosts = _str_to_list(elasticsearch_config[ - "hosts"]) + opts.elasticsearch_hosts = _str_to_list(elasticsearch_config["hosts"]) else: - logger.critical("hosts setting missing from the " - "elasticsearch config section") + logger.critical( + "hosts setting missing from the " "elasticsearch config section" + ) exit(-1) if "timeout" in elasticsearch_config: timeout = elasticsearch_config.getfloat("timeout") opts.elasticsearch_timeout = timeout if "number_of_shards" in elasticsearch_config: - number_of_shards = elasticsearch_config.getint( - "number_of_shards") + number_of_shards = elasticsearch_config.getint("number_of_shards") opts.elasticsearch_number_of_shards = number_of_shards if "number_of_replicas" in elasticsearch_config: number_of_replicas = elasticsearch_config.getint( - "number_of_replicas") + "number_of_replicas" + ) opts.elasticsearch_number_of_replicas = number_of_replicas if "index_suffix" in elasticsearch_config: - opts.elasticsearch_index_suffix = elasticsearch_config[ - "index_suffix"] + opts.elasticsearch_index_suffix = elasticsearch_config["index_suffix"] if "index_prefix" in elasticsearch_config: - opts.elasticsearch_index_prefix = elasticsearch_config[ - "index_prefix"] + opts.elasticsearch_index_prefix = elasticsearch_config["index_prefix"] if "monthly_indexes" in elasticsearch_config: monthly = elasticsearch_config.getboolean("monthly_indexes") opts.elasticsearch_monthly_indexes = monthly if "ssl" in elasticsearch_config: - opts.elasticsearch_ssl = elasticsearch_config.getboolean( - "ssl") + opts.elasticsearch_ssl = elasticsearch_config.getboolean("ssl") if "cert_path" in elasticsearch_config: - opts.elasticsearch_ssl_cert_path = elasticsearch_config[ - "cert_path"] + opts.elasticsearch_ssl_cert_path = elasticsearch_config["cert_path"] if "user" in elasticsearch_config: - opts.elasticsearch_username = elasticsearch_config[ - "user"] + opts.elasticsearch_username = elasticsearch_config["user"] if "password" in elasticsearch_config: - opts.elasticsearch_password = elasticsearch_config[ - "password"] + opts.elasticsearch_password = elasticsearch_config["password"] if "apiKey" in elasticsearch_config: - opts.elasticsearch_apiKey = elasticsearch_config[ - "apiKey"] + opts.elasticsearch_apiKey = elasticsearch_config["apiKey"] if "opensearch" in config: opensearch_config = config["opensearch"] if "hosts" in opensearch_config: - opts.opensearch_hosts = _str_to_list(opensearch_config[ - "hosts"]) + opts.opensearch_hosts = _str_to_list(opensearch_config["hosts"]) else: - logger.critical("hosts setting missing from the " - "opensearch config section") + logger.critical( + "hosts setting missing from the " "opensearch config section" + ) exit(-1) if "timeout" in opensearch_config: timeout = opensearch_config.getfloat("timeout") opts.opensearch_timeout = timeout if "number_of_shards" in opensearch_config: - number_of_shards = opensearch_config.getint( - "number_of_shards") + number_of_shards = opensearch_config.getint("number_of_shards") opts.opensearch_number_of_shards = number_of_shards if "number_of_replicas" in opensearch_config: - number_of_replicas = opensearch_config.getint( - "number_of_replicas") + number_of_replicas = opensearch_config.getint("number_of_replicas") opts.opensearch_number_of_replicas = number_of_replicas if "index_suffix" in opensearch_config: - opts.opensearch_index_suffix = opensearch_config[ - "index_suffix"] + opts.opensearch_index_suffix = opensearch_config["index_suffix"] if "index_prefix" in opensearch_config: - opts.opensearch_index_prefix = opensearch_config[ - "index_prefix"] + opts.opensearch_index_prefix = opensearch_config["index_prefix"] if "monthly_indexes" in opensearch_config: monthly = opensearch_config.getboolean("monthly_indexes") opts.opensearch_monthly_indexes = monthly if "ssl" in opensearch_config: - opts.opensearch_ssl = opensearch_config.getboolean( - "ssl") + opts.opensearch_ssl = opensearch_config.getboolean("ssl") if "cert_path" in opensearch_config: - opts.opensearch_ssl_cert_path = opensearch_config[ - "cert_path"] + opts.opensearch_ssl_cert_path = opensearch_config["cert_path"] if "user" in opensearch_config: - opts.opensearch_username = opensearch_config[ - "user"] + opts.opensearch_username = opensearch_config["user"] if "password" in opensearch_config: - opts.opensearch_password = opensearch_config[ - "password"] + opts.opensearch_password = opensearch_config["password"] if "apiKey" in opensearch_config: - opts.opensearch_apiKey = opensearch_config[ - "apiKey"] + opts.opensearch_apiKey = opensearch_config["apiKey"] if "splunk_hec" in config.sections(): hec_config = config["splunk_hec"] if "url" in hec_config: opts.hec = hec_config["url"] else: - logger.critical("url setting missing from the " - "splunk_hec config section") + logger.critical( + "url setting missing from the " "splunk_hec config section" + ) exit(-1) if "token" in hec_config: opts.hec_token = hec_config["token"] else: - logger.critical("token setting missing from the " - "splunk_hec config section") + logger.critical( + "token setting missing from the " "splunk_hec config section" + ) exit(-1) if "index" in hec_config: opts.hec_index = hec_config["index"] else: - logger.critical("index setting missing from the " - "splunk_hec config section") + logger.critical( + "index setting missing from the " "splunk_hec config section" + ) exit(-1) if "skip_certificate_verification" in hec_config: opts.hec_skip_certificate_verification = hec_config[ - "skip_certificate_verification"] + "skip_certificate_verification" + ] if "kafka" in config.sections(): kafka_config = config["kafka"] if "hosts" in kafka_config: opts.kafka_hosts = _str_to_list(kafka_config["hosts"]) else: - logger.critical("hosts setting missing from the " - "kafka config section") + logger.critical( + "hosts setting missing from the " "kafka config section" + ) exit(-1) if "user" in kafka_config: opts.kafka_username = kafka_config["user"] @@ -941,64 +998,63 @@ def process_reports(reports_): if "ssl" in kafka_config: opts.kafka_ssl = kafka_config.getboolean("ssl") if "skip_certificate_verification" in kafka_config: - kafka_verify = kafka_config.getboolean( - "skip_certificate_verification") + kafka_verify = kafka_config.getboolean("skip_certificate_verification") opts.kafka_skip_certificate_verification = kafka_verify if "aggregate_topic" in kafka_config: opts.kafka_aggregate_topic = kafka_config["aggregate_topic"] else: - logger.critical("aggregate_topic setting missing from the " - "kafka config section") + logger.critical( + "aggregate_topic setting missing from the " "kafka config section" + ) exit(-1) if "forensic_topic" in kafka_config: opts.kafka_forensic_topic = kafka_config["forensic_topic"] else: - logger.critical("forensic_topic setting missing from the " - "kafka config section") + logger.critical( + "forensic_topic setting missing from the " "kafka config section" + ) if "smtp_tls_topic" in kafka_config: opts.kafka_smtp_tls_topic = kafka_config["smtp_tls_topic"] else: - logger.critical("forensic_topic setting missing from the " - "splunk_hec config section") + logger.critical( + "forensic_topic setting missing from the " + "splunk_hec config section" + ) if "smtp" in config.sections(): smtp_config = config["smtp"] if "host" in smtp_config: opts.smtp_host = smtp_config["host"] else: - logger.critical("host setting missing from the " - "smtp config section") + logger.critical("host setting missing from the " "smtp config section") exit(-1) if "port" in smtp_config: opts.smtp_port = smtp_config.getint("port") if "ssl" in smtp_config: opts.smtp_ssl = smtp_config.getboolean("ssl") if "skip_certificate_verification" in smtp_config: - smtp_verify = smtp_config.getboolean( - "skip_certificate_verification") + smtp_verify = smtp_config.getboolean("skip_certificate_verification") opts.smtp_skip_certificate_verification = smtp_verify if "user" in smtp_config: opts.smtp_user = smtp_config["user"] else: - logger.critical("user setting missing from the " - "smtp config section") + logger.critical("user setting missing from the " "smtp config section") exit(-1) if "password" in smtp_config: opts.smtp_password = smtp_config["password"] else: - logger.critical("password setting missing from the " - "smtp config section") + logger.critical( + "password setting missing from the " "smtp config section" + ) exit(-1) if "from" in smtp_config: opts.smtp_from = smtp_config["from"] else: - logger.critical("from setting missing from the " - "smtp config section") + logger.critical("from setting missing from the " "smtp config section") if "to" in smtp_config: opts.smtp_to = _str_to_list(smtp_config["to"]) else: - logger.critical("to setting missing from the " - "smtp config section") + logger.critical("to setting missing from the " "smtp config section") if "subject" in smtp_config: opts.smtp_subject = smtp_config["subject"] if "attachment" in smtp_config: @@ -1011,8 +1067,7 @@ def process_reports(reports_): if "bucket" in s3_config: opts.s3_bucket = s3_config["bucket"] else: - logger.critical("bucket setting missing from the " - "s3 config section") + logger.critical("bucket setting missing from the " "s3 config section") exit(-1) if "path" in s3_config: opts.s3_path = s3_config["path"] @@ -1037,8 +1092,9 @@ def process_reports(reports_): if "server" in syslog_config: opts.syslog_server = syslog_config["server"] else: - logger.critical("server setting missing from the " - "syslog config section") + logger.critical( + "server setting missing from the " "syslog config section" + ) exit(-1) if "port" in syslog_config: opts.syslog_port = syslog_config["port"] @@ -1047,68 +1103,59 @@ def process_reports(reports_): if "gmail_api" in config.sections(): gmail_api_config = config["gmail_api"] - opts.gmail_api_credentials_file = \ - gmail_api_config.get("credentials_file") - opts.gmail_api_token_file = \ - gmail_api_config.get("token_file", ".token") - opts.gmail_api_include_spam_trash = \ - gmail_api_config.getboolean("include_spam_trash", False) - opts.gmail_api_paginate_messages = \ - gmail_api_config.getboolean("paginate_messages", True) - opts.gmail_api_scopes = \ - gmail_api_config.get("scopes", - default_gmail_api_scope) - opts.gmail_api_scopes = \ - _str_to_list(opts.gmail_api_scopes) + opts.gmail_api_credentials_file = gmail_api_config.get("credentials_file") + opts.gmail_api_token_file = gmail_api_config.get("token_file", ".token") + opts.gmail_api_include_spam_trash = gmail_api_config.getboolean( + "include_spam_trash", False + ) + opts.gmail_api_paginate_messages = gmail_api_config.getboolean( + "paginate_messages", True + ) + opts.gmail_api_scopes = gmail_api_config.get( + "scopes", default_gmail_api_scope + ) + opts.gmail_api_scopes = _str_to_list(opts.gmail_api_scopes) if "oauth2_port" in gmail_api_config: - opts.gmail_api_oauth2_port = \ - gmail_api_config.get("oauth2_port", 8080) + opts.gmail_api_oauth2_port = gmail_api_config.get("oauth2_port", 8080) if "maildir" in config.sections(): maildir_api_config = config["maildir"] - opts.maildir_path = \ - maildir_api_config.get("maildir_path") - opts.maildir_create = \ - maildir_api_config.get("maildir_create") + opts.maildir_path = maildir_api_config.get("maildir_path") + opts.maildir_create = maildir_api_config.get("maildir_create") if "log_analytics" in config.sections(): log_analytics_config = config["log_analytics"] - opts.la_client_id = \ - log_analytics_config.get("client_id") - opts.la_client_secret = \ - log_analytics_config.get("client_secret") - opts.la_tenant_id = \ - log_analytics_config.get("tenant_id") - opts.la_dce = \ - log_analytics_config.get("dce") - opts.la_dcr_immutable_id = \ - log_analytics_config.get("dcr_immutable_id") - opts.la_dcr_aggregate_stream = \ - log_analytics_config.get("dcr_aggregate_stream") - opts.la_dcr_forensic_stream = \ - log_analytics_config.get("dcr_forensic_stream") - opts.la_dcr_smtp_tls_stream = \ - log_analytics_config.get("dcr_smtp_tls_stream") + opts.la_client_id = log_analytics_config.get("client_id") + opts.la_client_secret = log_analytics_config.get("client_secret") + opts.la_tenant_id = log_analytics_config.get("tenant_id") + opts.la_dce = log_analytics_config.get("dce") + opts.la_dcr_immutable_id = log_analytics_config.get("dcr_immutable_id") + opts.la_dcr_aggregate_stream = log_analytics_config.get( + "dcr_aggregate_stream" + ) + opts.la_dcr_forensic_stream = log_analytics_config.get( + "dcr_forensic_stream" + ) + opts.la_dcr_smtp_tls_stream = log_analytics_config.get( + "dcr_smtp_tls_stream" + ) if "gelf" in config.sections(): gelf_config = config["gelf"] if "host" in gelf_config: opts.gelf_host = gelf_config["host"] else: - logger.critical("host setting missing from the " - "gelf config section") + logger.critical("host setting missing from the " "gelf config section") exit(-1) if "port" in gelf_config: opts.gelf_port = gelf_config["port"] else: - logger.critical("port setting missing from the " - "gelf config section") + logger.critical("port setting missing from the " "gelf config section") exit(-1) if "mode" in gelf_config: opts.gelf_mode = gelf_config["mode"] else: - logger.critical("mode setting missing from the " - "gelf config section") + logger.critical("mode setting missing from the " "gelf config section") exit(-1) if "webhook" in config.sections(): @@ -1136,18 +1183,21 @@ def process_reports(reports_): log_file.close() fh = logging.FileHandler(opts.log_file) formatter = logging.Formatter( - '%(asctime)s - ' - '%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s') + "%(asctime)s - " + "%(levelname)s - [%(filename)s:%(lineno)d] - %(message)s" + ) fh.setFormatter(formatter) logger.addHandler(fh) except Exception as error: logger.warning("Unable to write to log file: {}".format(error)) - if opts.imap_host is None \ - and opts.graph_client_id is None \ - and opts.gmail_api_credentials_file is None \ - and opts.maildir_path is None \ - and len(opts.file_path) == 0: + if ( + opts.imap_host is None + and opts.graph_client_id is None + and opts.gmail_api_credentials_file is None + and opts.maildir_path is None + and len(opts.file_path) == 0 + ): logger.error("You must supply input files or a mailbox connection") exit(1) @@ -1161,31 +1211,27 @@ def process_reports(reports_): es_smtp_tls_index = "smtp_tls" if opts.elasticsearch_index_suffix: suffix = opts.elasticsearch_index_suffix - es_aggregate_index = "{0}_{1}".format( - es_aggregate_index, suffix) - es_forensic_index = "{0}_{1}".format( - es_forensic_index, suffix) - es_smtp_tls_index = "{0}_{1}".format( - es_smtp_tls_index, suffix - ) + es_aggregate_index = "{0}_{1}".format(es_aggregate_index, suffix) + es_forensic_index = "{0}_{1}".format(es_forensic_index, suffix) + es_smtp_tls_index = "{0}_{1}".format(es_smtp_tls_index, suffix) if opts.elasticsearch_index_prefix: prefix = opts.elasticsearch_index_prefix - es_aggregate_index = "{0}{1}".format( - prefix, es_aggregate_index) - es_forensic_index = "{0}{1}".format( - prefix, es_forensic_index) - es_smtp_tls_index = "{0}{1}".format( - prefix, es_smtp_tls_index - ) - elastic.set_hosts(opts.elasticsearch_hosts, - opts.elasticsearch_ssl, - opts.elasticsearch_ssl_cert_path, - opts.elasticsearch_username, - opts.elasticsearch_password, - opts.elasticsearch_apiKey, - timeout=opts.elasticsearch_timeout) - elastic.migrate_indexes(aggregate_indexes=[es_aggregate_index], - forensic_indexes=[es_forensic_index]) + es_aggregate_index = "{0}{1}".format(prefix, es_aggregate_index) + es_forensic_index = "{0}{1}".format(prefix, es_forensic_index) + es_smtp_tls_index = "{0}{1}".format(prefix, es_smtp_tls_index) + elastic.set_hosts( + opts.elasticsearch_hosts, + opts.elasticsearch_ssl, + opts.elasticsearch_ssl_cert_path, + opts.elasticsearch_username, + opts.elasticsearch_password, + opts.elasticsearch_apiKey, + timeout=opts.elasticsearch_timeout, + ) + elastic.migrate_indexes( + aggregate_indexes=[es_aggregate_index], + forensic_indexes=[es_forensic_index], + ) except elastic.ElasticsearchError: logger.exception("Elasticsearch Error") exit(1) @@ -1197,32 +1243,27 @@ def process_reports(reports_): os_smtp_tls_index = "smtp_tls" if opts.opensearch_index_suffix: suffix = opts.opensearch_index_suffix - os_aggregate_index = "{0}_{1}".format( - os_aggregate_index, suffix) - os_forensic_index = "{0}_{1}".format( - os_forensic_index, suffix) - os_smtp_tls_index = "{0}_{1}".format( - os_smtp_tls_index, suffix - ) + os_aggregate_index = "{0}_{1}".format(os_aggregate_index, suffix) + os_forensic_index = "{0}_{1}".format(os_forensic_index, suffix) + os_smtp_tls_index = "{0}_{1}".format(os_smtp_tls_index, suffix) if opts.opensearch_index_prefix: prefix = opts.opensearch_index_prefix - os_aggregate_index = "{0}{1}".format( - prefix, os_aggregate_index) - os_forensic_index = "{0}{1}".format( - prefix, os_forensic_index) - os_smtp_tls_index = "{0}{1}".format( - prefix, os_smtp_tls_index - ) - opensearch.set_hosts(opts.opensearch_hosts, - opts.opensearch_ssl, - opts.opensearch_ssl_cert_path, - opts.opensearch_username, - opts.opensearch_password, - opts.opensearch_apiKey, - timeout=opts.opensearch_timeout) + os_aggregate_index = "{0}{1}".format(prefix, os_aggregate_index) + os_forensic_index = "{0}{1}".format(prefix, os_forensic_index) + os_smtp_tls_index = "{0}{1}".format(prefix, os_smtp_tls_index) + opensearch.set_hosts( + opts.opensearch_hosts, + opts.opensearch_ssl, + opts.opensearch_ssl_cert_path, + opts.opensearch_username, + opts.opensearch_password, + opts.opensearch_apiKey, + timeout=opts.opensearch_timeout, + ) opensearch.migrate_indexes( aggregate_indexes=[os_aggregate_index], - forensic_indexes=[os_forensic_index]) + forensic_indexes=[os_forensic_index], + ) except opensearch.OpenSearchError: logger.exception("OpenSearch Error") exit(1) @@ -1251,16 +1292,15 @@ def process_reports(reports_): if opts.hec: if opts.hec_token is None or opts.hec_index is None: - logger.error("HEC token and HEC index are required when " - "using HEC URL") + logger.error("HEC token and HEC index are required when " "using HEC URL") exit(1) verify = True if opts.hec_skip_certificate_verification: verify = False - hec_client = splunk.HECClient(opts.hec, opts.hec_token, - opts.hec_index, - verify=verify) + hec_client = splunk.HECClient( + opts.hec, opts.hec_token, opts.hec_index, verify=verify + ) if opts.kafka_hosts: try: @@ -1274,7 +1314,7 @@ def process_reports(reports_): opts.kafka_hosts, username=opts.kafka_username, password=opts.kafka_password, - ssl_context=ssl_context + ssl_context=ssl_context, ) except Exception as error_: logger.error("Kafka Error: {0}".format(error_.__str__())) @@ -1289,15 +1329,17 @@ def process_reports(reports_): except Exception as error_: logger.error("GELF Error: {0}".format(error_.__str__())) - if opts.webhook_aggregate_url or \ - opts.webhook_forensic_url or \ - opts.webhook_smtp_tls_url: + if ( + opts.webhook_aggregate_url + or opts.webhook_forensic_url + or opts.webhook_smtp_tls_url + ): try: webhook_client = webhook.WebhookClient( aggregate_url=opts.webhook_aggregate_url, forensic_url=opts.webhook_forensic_url, smtp_tls_url=opts.webhook_smtp_tls_url, - timeout=opts.webhook_timeout + timeout=opts.webhook_timeout, ) except Exception as error_: logger.error("Webhook Error: {0}".format(error_.__str__())) @@ -1333,26 +1375,29 @@ def process_reports(reports_): connections = [] for proc_index in range( - opts.n_procs * batch_index, - opts.n_procs * (batch_index + 1)): + opts.n_procs * batch_index, opts.n_procs * (batch_index + 1) + ): if proc_index >= len(file_paths): break parent_conn, child_conn = Pipe() connections.append(parent_conn) - process = Process(target=cli_parse, args=( - file_paths[proc_index], - opts.strip_attachment_payloads, - opts.nameservers, - opts.dns_timeout, - opts.ip_db_path, - opts.offline, - opts.always_use_local_files, - opts.reverse_dns_map_path, - opts.reverse_dns_map_url, - child_conn, - )) + process = Process( + target=cli_parse, + args=( + file_paths[proc_index], + opts.strip_attachment_payloads, + opts.nameservers, + opts.dns_timeout, + opts.ip_db_path, + opts.offline, + opts.always_use_local_files, + opts.reverse_dns_map_path, + opts.reverse_dns_map_url, + child_conn, + ), + ) processes.append(process) for proc in processes: @@ -1369,8 +1414,7 @@ def process_reports(reports_): for result in results: if type(result[0]) is ParserError: - logger.error("Failed to parse {0} - {1}".format(result[1], - result[0])) + logger.error("Failed to parse {0} - {1}".format(result[1], result[0])) else: if result[0]["report_type"] == "aggregate": aggregate_reports.append(result[0]["report"]) @@ -1390,7 +1434,8 @@ def process_reports(reports_): always_use_local_files=opts.always_use_local_files, reverse_dns_map_path=opts.reverse_dns_map_path, reverse_dns_map_url=opts.reverse_dns_map_url, - offline=opts.offline) + offline=opts.offline, + ) aggregate_reports += reports["aggregate_reports"] forensic_reports += reports["forensic_reports"] smtp_tls_reports += reports["smtp_tls_reports"] @@ -1399,8 +1444,9 @@ def process_reports(reports_): if opts.imap_host: try: if opts.imap_user is None or opts.imap_password is None: - logger.error("IMAP user and password must be specified if" - "host is specified") + logger.error( + "IMAP user and password must be specified if" "host is specified" + ) ssl = True verify = True @@ -1437,7 +1483,7 @@ def process_reports(reports_): username=opts.graph_user, password=opts.graph_password, token_file=opts.graph_token_file, - allow_unencrypted_storage=opts.graph_allow_unencrypted_storage + allow_unencrypted_storage=opts.graph_allow_unencrypted_storage, ) except Exception: @@ -1446,11 +1492,13 @@ def process_reports(reports_): if opts.gmail_api_credentials_file: if opts.mailbox_delete: - if 'https://mail.google.com/' not in opts.gmail_api_scopes: - logger.error("Message deletion requires scope" - " 'https://mail.google.com/'. " - "Add the scope and remove token file " - "to acquire proper access.") + if "https://mail.google.com/" not in opts.gmail_api_scopes: + logger.error( + "Message deletion requires scope" + " 'https://mail.google.com/'. " + "Add the scope and remove token file " + "to acquire proper access." + ) opts.mailbox_delete = False try: @@ -1461,7 +1509,7 @@ def process_reports(reports_): include_spam_trash=opts.gmail_api_include_spam_trash, paginate_messages=opts.gmail_api_paginate_messages, reports_folder=opts.mailbox_reports_folder, - oauth2_port=opts.gmail_api_oauth2_port + oauth2_port=opts.gmail_api_oauth2_port, ) except Exception: @@ -1504,9 +1552,13 @@ def process_reports(reports_): logger.exception("Mailbox Error") exit(1) - results = OrderedDict([("aggregate_reports", aggregate_reports), - ("forensic_reports", forensic_reports), - ("smtp_tls_reports", smtp_tls_reports)]) + results = OrderedDict( + [ + ("aggregate_reports", aggregate_reports), + ("forensic_reports", forensic_reports), + ("smtp_tls_reports", smtp_tls_reports), + ] + ) process_reports(results) @@ -1515,11 +1567,17 @@ def process_reports(reports_): verify = True if opts.smtp_skip_certificate_verification: verify = False - email_results(results, opts.smtp_host, opts.smtp_from, - opts.smtp_to, port=opts.smtp_port, verify=verify, - username=opts.smtp_user, - password=opts.smtp_password, - subject=opts.smtp_subject) + email_results( + results, + opts.smtp_host, + opts.smtp_from, + opts.smtp_to, + port=opts.smtp_port, + verify=verify, + username=opts.smtp_user, + password=opts.smtp_password, + subject=opts.smtp_subject, + ) except Exception: logger.exception("Failed to email results") exit(1) @@ -1544,7 +1602,8 @@ def process_reports(reports_): always_use_local_files=opts.always_use_local_files, reverse_dns_map_path=opts.reverse_dns_map_path, reverse_dns_map_url=opts.reverse_dns_map_url, - offline=opts.offline) + offline=opts.offline, + ) except FileExistsError as error: logger.error("{0}".format(error.__str__())) exit(1) diff --git a/parsedmarc/elastic.py b/parsedmarc/elastic.py index 93ce05d8..34d7953c 100644 --- a/parsedmarc/elastic.py +++ b/parsedmarc/elastic.py @@ -3,8 +3,20 @@ from collections import OrderedDict from elasticsearch_dsl.search import Q -from elasticsearch_dsl import connections, Object, Document, Index, Nested, \ - InnerDoc, Integer, Text, Boolean, Ip, Date, Search +from elasticsearch_dsl import ( + connections, + Object, + Document, + Index, + Nested, + InnerDoc, + Integer, + Text, + Boolean, + Ip, + Date, + Search, +) from elasticsearch.helpers import reindex from parsedmarc.log import logger @@ -76,24 +88,21 @@ class Index: spf_results = Nested(_SPFResult) def add_policy_override(self, type_, comment): - self.policy_overrides.append(_PolicyOverride(type=type_, - comment=comment)) + self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment)) def add_dkim_result(self, domain, selector, result): - self.dkim_results.append(_DKIMResult(domain=domain, - selector=selector, - result=result)) + self.dkim_results.append( + _DKIMResult(domain=domain, selector=selector, result=result) + ) def add_spf_result(self, domain, scope, result): - self.spf_results.append(_SPFResult(domain=domain, - scope=scope, - result=result)) + self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result)) - def save(self, ** kwargs): + def save(self, **kwargs): self.passed_dmarc = False self.passed_dmarc = self.spf_aligned or self.dkim_aligned - return super().save(** kwargs) + return super().save(**kwargs) class _EmailAddressDoc(InnerDoc): @@ -123,24 +132,25 @@ class _ForensicSampleDoc(InnerDoc): attachments = Nested(_EmailAttachmentDoc) def add_to(self, display_name, address): - self.to.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.to.append(_EmailAddressDoc(display_name=display_name, address=address)) def add_reply_to(self, display_name, address): - self.reply_to.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.reply_to.append( + _EmailAddressDoc(display_name=display_name, address=address) + ) def add_cc(self, display_name, address): - self.cc.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.cc.append(_EmailAddressDoc(display_name=display_name, address=address)) def add_bcc(self, display_name, address): - self.bcc.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address)) def add_attachment(self, filename, content_type, sha256): - self.attachments.append(_EmailAttachmentDoc(filename=filename, - content_type=content_type, sha256=sha256)) + self.attachments.append( + _EmailAttachmentDoc( + filename=filename, content_type=content_type, sha256=sha256 + ) + ) class _ForensicReportDoc(Document): @@ -185,14 +195,18 @@ class _SMTPTLSPolicyDoc(InnerDoc): failed_session_count = Integer() failure_details = Nested(_SMTPTLSFailureDetailsDoc) - def add_failure_details(self, result_type, ip_address, - receiving_ip, - receiving_mx_helo, - failed_session_count, - sending_mta_ip=None, - receiving_mx_hostname=None, - additional_information_uri=None, - failure_reason_code=None): + def add_failure_details( + self, + result_type, + ip_address, + receiving_ip, + receiving_mx_helo, + failed_session_count, + sending_mta_ip=None, + receiving_mx_hostname=None, + additional_information_uri=None, + failure_reason_code=None, + ): _details = _SMTPTLSFailureDetailsDoc( result_type=result_type, ip_address=ip_address, @@ -202,13 +216,12 @@ def add_failure_details(self, result_type, ip_address, receiving_ip=receiving_ip, failed_session_count=failed_session_count, additional_information=additional_information_uri, - failure_reason_code=failure_reason_code + failure_reason_code=failure_reason_code, ) self.failure_details.append(_details) class _SMTPTLSReportDoc(Document): - class Index: name = "smtp_tls" @@ -220,27 +233,40 @@ class Index: report_id = Text() policies = Nested(_SMTPTLSPolicyDoc) - def add_policy(self, policy_type, policy_domain, - successful_session_count, - failed_session_count, - policy_string=None, - mx_host_patterns=None, - failure_details=None): - self.policies.append(policy_type=policy_type, - policy_domain=policy_domain, - successful_session_count=successful_session_count, - failed_session_count=failed_session_count, - policy_string=policy_string, - mx_host_patterns=mx_host_patterns, - failure_details=failure_details) + def add_policy( + self, + policy_type, + policy_domain, + successful_session_count, + failed_session_count, + policy_string=None, + mx_host_patterns=None, + failure_details=None, + ): + self.policies.append( + policy_type=policy_type, + policy_domain=policy_domain, + successful_session_count=successful_session_count, + failed_session_count=failed_session_count, + policy_string=policy_string, + mx_host_patterns=mx_host_patterns, + failure_details=failure_details, + ) class AlreadySaved(ValueError): """Raised when a report to be saved matches an existing report""" -def set_hosts(hosts, use_ssl=False, ssl_cert_path=None, - username=None, password=None, apiKey=None, timeout=60.0): +def set_hosts( + hosts, + use_ssl=False, + ssl_cert_path=None, + username=None, + password=None, + apiKey=None, + timeout=60.0, +): """ Sets the Elasticsearch hosts to use @@ -255,21 +281,18 @@ def set_hosts(hosts, use_ssl=False, ssl_cert_path=None, """ if not isinstance(hosts, list): hosts = [hosts] - conn_params = { - "hosts": hosts, - "timeout": timeout - } + conn_params = {"hosts": hosts, "timeout": timeout} if use_ssl: - conn_params['use_ssl'] = True + conn_params["use_ssl"] = True if ssl_cert_path: - conn_params['verify_certs'] = True - conn_params['ca_certs'] = ssl_cert_path + conn_params["verify_certs"] = True + conn_params["ca_certs"] = ssl_cert_path else: - conn_params['verify_certs'] = False + conn_params["verify_certs"] = False if username: - conn_params['http_auth'] = (username+":"+password) + conn_params["http_auth"] = username + ":" + password if apiKey: - conn_params['api_key'] = apiKey + conn_params["api_key"] = apiKey connections.create_connection(**conn_params) @@ -288,14 +311,12 @@ def create_indexes(names, settings=None): if not index.exists(): logger.debug("Creating Elasticsearch index: {0}".format(name)) if settings is None: - index.settings(number_of_shards=1, - number_of_replicas=0) + index.settings(number_of_shards=1, number_of_replicas=0) else: index.settings(**settings) index.create() except Exception as e: - raise ElasticsearchError( - "Elasticsearch error: {0}".format(e.__str__())) + raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__())) def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): @@ -327,33 +348,31 @@ def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): fo_type = fo_mapping["type"] if fo_type == "long": new_index_name = "{0}-v{1}".format(aggregate_index_name, version) - body = {"properties": {"published_policy.fo": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 + body = { + "properties": { + "published_policy.fo": { + "type": "text", + "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, } } } - } - } Index(new_index_name).create() Index(new_index_name).put_mapping(doc_type=doc, body=body) - reindex(connections.get_connection(), aggregate_index_name, - new_index_name) + reindex(connections.get_connection(), aggregate_index_name, new_index_name) Index(aggregate_index_name).delete() for forensic_index in forensic_indexes: pass -def save_aggregate_report_to_elasticsearch(aggregate_report, - index_suffix=None, - index_prefix=None, - monthly_indexes=False, - number_of_shards=1, - number_of_replicas=0): +def save_aggregate_report_to_elasticsearch( + aggregate_report, + index_suffix=None, + index_prefix=None, + monthly_indexes=False, + number_of_shards=1, + number_of_replicas=0, +): """ Saves a parsed DMARC aggregate report to Elasticsearch @@ -374,10 +393,8 @@ def save_aggregate_report_to_elasticsearch(aggregate_report, org_name = metadata["org_name"] report_id = metadata["report_id"] domain = aggregate_report["policy_published"]["domain"] - begin_date = human_timestamp_to_datetime(metadata["begin_date"], - to_utc=True) - end_date = human_timestamp_to_datetime(metadata["end_date"], - to_utc=True) + begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True) + end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ") if monthly_indexes: @@ -386,8 +403,7 @@ def save_aggregate_report_to_elasticsearch(aggregate_report, index_date = begin_date.strftime("%Y-%m-%d") aggregate_report["begin_date"] = begin_date aggregate_report["end_date"] = end_date - date_range = [aggregate_report["begin_date"], - aggregate_report["end_date"]] + date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]] org_name_query = Q(dict(match_phrase=dict(org_name=org_name))) report_id_query = Q(dict(match_phrase=dict(report_id=report_id))) @@ -409,18 +425,20 @@ def save_aggregate_report_to_elasticsearch(aggregate_report, try: existing = search.execute() except Exception as error_: - raise ElasticsearchError("Elasticsearch's search for existing report \ - error: {}".format(error_.__str__())) + raise ElasticsearchError( + "Elasticsearch's search for existing report \ + error: {}".format(error_.__str__()) + ) if len(existing) > 0: - raise AlreadySaved("An aggregate report ID {0} from {1} about {2} " - "with a date range of {3} UTC to {4} UTC already " - "exists in " - "Elasticsearch".format(report_id, - org_name, - domain, - begin_date_human, - end_date_human)) + raise AlreadySaved( + "An aggregate report ID {0} from {1} about {2} " + "with a date range of {3} UTC to {4} UTC already " + "exists in " + "Elasticsearch".format( + report_id, org_name, domain, begin_date_human, end_date_human + ) + ) published_policy = _PublishedPolicy( domain=aggregate_report["policy_published"]["domain"], adkim=aggregate_report["policy_published"]["adkim"], @@ -428,7 +446,7 @@ def save_aggregate_report_to_elasticsearch(aggregate_report, p=aggregate_report["policy_published"]["p"], sp=aggregate_report["policy_published"]["sp"], pct=aggregate_report["policy_published"]["pct"], - fo=aggregate_report["policy_published"]["fo"] + fo=aggregate_report["policy_published"]["fo"], ) for record in aggregate_report["records"]: @@ -451,28 +469,33 @@ def save_aggregate_report_to_elasticsearch(aggregate_report, source_name=record["source"]["name"], message_count=record["count"], disposition=record["policy_evaluated"]["disposition"], - dkim_aligned=record["policy_evaluated"]["dkim"] is not None and - record["policy_evaluated"]["dkim"].lower() == "pass", - spf_aligned=record["policy_evaluated"]["spf"] is not None and - record["policy_evaluated"]["spf"].lower() == "pass", + dkim_aligned=record["policy_evaluated"]["dkim"] is not None + and record["policy_evaluated"]["dkim"].lower() == "pass", + spf_aligned=record["policy_evaluated"]["spf"] is not None + and record["policy_evaluated"]["spf"].lower() == "pass", header_from=record["identifiers"]["header_from"], envelope_from=record["identifiers"]["envelope_from"], - envelope_to=record["identifiers"]["envelope_to"] + envelope_to=record["identifiers"]["envelope_to"], ) for override in record["policy_evaluated"]["policy_override_reasons"]: - agg_doc.add_policy_override(type_=override["type"], - comment=override["comment"]) + agg_doc.add_policy_override( + type_=override["type"], comment=override["comment"] + ) for dkim_result in record["auth_results"]["dkim"]: - agg_doc.add_dkim_result(domain=dkim_result["domain"], - selector=dkim_result["selector"], - result=dkim_result["result"]) + agg_doc.add_dkim_result( + domain=dkim_result["domain"], + selector=dkim_result["selector"], + result=dkim_result["result"], + ) for spf_result in record["auth_results"]["spf"]: - agg_doc.add_spf_result(domain=spf_result["domain"], - scope=spf_result["scope"], - result=spf_result["result"]) + agg_doc.add_spf_result( + domain=spf_result["domain"], + scope=spf_result["scope"], + result=spf_result["result"], + ) index = "dmarc_aggregate" if index_suffix: @@ -481,41 +504,43 @@ def save_aggregate_report_to_elasticsearch(aggregate_report, index = "{0}{1}".format(index_prefix, index) index = "{0}-{1}".format(index, index_date) - index_settings = dict(number_of_shards=number_of_shards, - number_of_replicas=number_of_replicas) + index_settings = dict( + number_of_shards=number_of_shards, number_of_replicas=number_of_replicas + ) create_indexes([index], index_settings) agg_doc.meta.index = index try: agg_doc.save() except Exception as e: - raise ElasticsearchError( - "Elasticsearch error: {0}".format(e.__str__())) + raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__())) -def save_forensic_report_to_elasticsearch(forensic_report, - index_suffix=None, - index_prefix=None, - monthly_indexes=False, - number_of_shards=1, - number_of_replicas=0): +def save_forensic_report_to_elasticsearch( + forensic_report, + index_suffix=None, + index_prefix=None, + monthly_indexes=False, + number_of_shards=1, + number_of_replicas=0, +): """ - Saves a parsed DMARC forensic report to Elasticsearch - - Args: - forensic_report (OrderedDict): A parsed forensic report - index_suffix (str): The suffix of the name of the index to save to - index_prefix (str): The prefix of the name of the index to save to - monthly_indexes (bool): Use monthly indexes instead of daily - indexes - number_of_shards (int): The number of shards to use in the index - number_of_replicas (int): The number of replicas to use in the - index - - Raises: - AlreadySaved + Saves a parsed DMARC forensic report to Elasticsearch - """ + Args: + forensic_report (OrderedDict): A parsed forensic report + index_suffix (str): The suffix of the name of the index to save to + index_prefix (str): The prefix of the name of the index to save to + monthly_indexes (bool): Use monthly indexes instead of daily + indexes + number_of_shards (int): The number of shards to use in the index + number_of_replicas (int): The number of replicas to use in the + index + + Raises: + AlreadySaved + + """ logger.info("Saving forensic report to Elasticsearch") forensic_report = forensic_report.copy() sample_date = None @@ -560,14 +585,12 @@ def save_forensic_report_to_elasticsearch(forensic_report, existing = search.execute() if len(existing) > 0: - raise AlreadySaved("A forensic sample to {0} from {1} " - "with a subject of {2} and arrival date of {3} " - "already exists in " - "Elasticsearch".format(to_, - from_, - subject, - arrival_date_human - )) + raise AlreadySaved( + "A forensic sample to {0} from {1} " + "with a subject of {2} and arrival date of {3} " + "already exists in " + "Elasticsearch".format(to_, from_, subject, arrival_date_human) + ) parsed_sample = forensic_report["parsed_sample"] sample = _ForensicSampleDoc( @@ -577,25 +600,25 @@ def save_forensic_report_to_elasticsearch(forensic_report, date=sample_date, subject=forensic_report["parsed_sample"]["subject"], filename_safe_subject=parsed_sample["filename_safe_subject"], - body=forensic_report["parsed_sample"]["body"] + body=forensic_report["parsed_sample"]["body"], ) for address in forensic_report["parsed_sample"]["to"]: - sample.add_to(display_name=address["display_name"], - address=address["address"]) + sample.add_to(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["reply_to"]: - sample.add_reply_to(display_name=address["display_name"], - address=address["address"]) + sample.add_reply_to( + display_name=address["display_name"], address=address["address"] + ) for address in forensic_report["parsed_sample"]["cc"]: - sample.add_cc(display_name=address["display_name"], - address=address["address"]) + sample.add_cc(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["bcc"]: - sample.add_bcc(display_name=address["display_name"], - address=address["address"]) + sample.add_bcc(display_name=address["display_name"], address=address["address"]) for attachment in forensic_report["parsed_sample"]["attachments"]: - sample.add_attachment(filename=attachment["filename"], - content_type=attachment["mail_content_type"], - sha256=attachment["sha256"]) + sample.add_attachment( + filename=attachment["filename"], + content_type=attachment["mail_content_type"], + sha256=attachment["sha256"], + ) try: forensic_doc = _ForensicReportDoc( feedback_type=forensic_report["feedback_type"], @@ -611,12 +634,11 @@ def save_forensic_report_to_elasticsearch(forensic_report, source_country=forensic_report["source"]["country"], source_reverse_dns=forensic_report["source"]["reverse_dns"], source_base_domain=forensic_report["source"]["base_domain"], - authentication_mechanisms=forensic_report[ - "authentication_mechanisms"], + authentication_mechanisms=forensic_report["authentication_mechanisms"], auth_failure=forensic_report["auth_failure"], dkim_domain=forensic_report["dkim_domain"], original_rcpt_to=forensic_report["original_rcpt_to"], - sample=sample + sample=sample, ) index = "dmarc_forensic" @@ -629,26 +651,29 @@ def save_forensic_report_to_elasticsearch(forensic_report, else: index_date = arrival_date.strftime("%Y-%m-%d") index = "{0}-{1}".format(index, index_date) - index_settings = dict(number_of_shards=number_of_shards, - number_of_replicas=number_of_replicas) + index_settings = dict( + number_of_shards=number_of_shards, number_of_replicas=number_of_replicas + ) create_indexes([index], index_settings) forensic_doc.meta.index = index try: forensic_doc.save() except Exception as e: - raise ElasticsearchError( - "Elasticsearch error: {0}".format(e.__str__())) + raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__())) except KeyError as e: raise InvalidForensicReport( - "Forensic report missing required field: {0}".format(e.__str__())) + "Forensic report missing required field: {0}".format(e.__str__()) + ) -def save_smtp_tls_report_to_elasticsearch(report, - index_suffix=None, - index_prefix=None, - monthly_indexes=False, - number_of_shards=1, - number_of_replicas=0): +def save_smtp_tls_report_to_elasticsearch( + report, + index_suffix=None, + index_prefix=None, + monthly_indexes=False, + number_of_shards=1, + number_of_replicas=0, +): """ Saves a parsed SMTP TLS report to Elasticsearch @@ -666,10 +691,8 @@ def save_smtp_tls_report_to_elasticsearch(report, logger.info("Saving smtp tls report to Elasticsearch") org_name = report["organization_name"] report_id = report["report_id"] - begin_date = human_timestamp_to_datetime(report["begin_date"], - to_utc=True) - end_date = human_timestamp_to_datetime(report["end_date"], - to_utc=True) + begin_date = human_timestamp_to_datetime(report["begin_date"], to_utc=True) + end_date = human_timestamp_to_datetime(report["end_date"], to_utc=True) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ") if monthly_indexes: @@ -698,15 +721,19 @@ def save_smtp_tls_report_to_elasticsearch(report, try: existing = search.execute() except Exception as error_: - raise ElasticsearchError("Elasticsearch's search for existing report \ - error: {}".format(error_.__str__())) + raise ElasticsearchError( + "Elasticsearch's search for existing report \ + error: {}".format(error_.__str__()) + ) if len(existing) > 0: - raise AlreadySaved(f"An SMTP TLS report ID {report_id} from " - f" {org_name} with a date range of " - f"{begin_date_human} UTC to " - f"{end_date_human} UTC already " - "exists in Elasticsearch") + raise AlreadySaved( + f"An SMTP TLS report ID {report_id} from " + f" {org_name} with a date range of " + f"{begin_date_human} UTC to " + f"{end_date_human} UTC already " + "exists in Elasticsearch" + ) index = "smtp_tls" if index_suffix: @@ -714,8 +741,9 @@ def save_smtp_tls_report_to_elasticsearch(report, if index_prefix: index = "{0}{1}".format(index_prefix, index) index = "{0}-{1}".format(index, index_date) - index_settings = dict(number_of_shards=number_of_shards, - number_of_replicas=number_of_replicas) + index_settings = dict( + number_of_shards=number_of_shards, number_of_replicas=number_of_replicas + ) smtp_tls_doc = _SMTPTLSReportDoc( org_name=report["organization_name"], @@ -723,10 +751,10 @@ def save_smtp_tls_report_to_elasticsearch(report, date_begin=report["begin_date"], date_end=report["end_date"], contact_info=report["contact_info"], - report_id=report["report_id"] + report_id=report["report_id"], ) - for policy in report['policies']: + for policy in report["policies"]: policy_strings = None mx_host_patterns = None if "policy_strings" in policy: @@ -739,7 +767,7 @@ def save_smtp_tls_report_to_elasticsearch(report, succesful_session_count=policy["successful_session_count"], failed_session_count=policy["failed_session_count"], policy_string=policy_strings, - mx_host_patterns=mx_host_patterns + mx_host_patterns=mx_host_patterns, ) if "failure_details" in policy: for failure_detail in policy["failure_details"]: @@ -752,11 +780,11 @@ def save_smtp_tls_report_to_elasticsearch(report, sending_mta_ip = None if "receiving_mx_hostname" in failure_detail: - receiving_mx_hostname = failure_detail[ - "receiving_mx_hostname"] + receiving_mx_hostname = failure_detail["receiving_mx_hostname"] if "additional_information_uri" in failure_detail: additional_information_uri = failure_detail[ - "additional_information_uri"] + "additional_information_uri" + ] if "failure_reason_code" in failure_detail: failure_reason_code = failure_detail["failure_reason_code"] if "ip_address" in failure_detail: @@ -772,12 +800,11 @@ def save_smtp_tls_report_to_elasticsearch(report, ip_address=ip_address, receiving_ip=receiving_ip, receiving_mx_helo=receiving_mx_helo, - failed_session_count=failure_detail[ - "failed_session_count"], + failed_session_count=failure_detail["failed_session_count"], sending_mta_ip=sending_mta_ip, receiving_mx_hostname=receiving_mx_hostname, additional_information_uri=additional_information_uri, - failure_reason_code=failure_reason_code + failure_reason_code=failure_reason_code, ) smtp_tls_doc.policies.append(policy_doc) @@ -787,5 +814,4 @@ def save_smtp_tls_report_to_elasticsearch(report, try: smtp_tls_doc.save() except Exception as e: - raise ElasticsearchError( - "Elasticsearch error: {0}".format(e.__str__())) + raise ElasticsearchError("Elasticsearch error: {0}".format(e.__str__())) diff --git a/parsedmarc/gelf.py b/parsedmarc/gelf.py index a74928c5..9e5c9dac 100644 --- a/parsedmarc/gelf.py +++ b/parsedmarc/gelf.py @@ -5,8 +5,11 @@ import json import threading -from parsedmarc import parsed_aggregate_reports_to_csv_rows, \ - parsed_forensic_reports_to_csv_rows, parsed_smtp_tls_reports_to_csv_rows +from parsedmarc import ( + parsed_aggregate_reports_to_csv_rows, + parsed_forensic_reports_to_csv_rows, + parsed_smtp_tls_reports_to_csv_rows, +) from pygelf import GelfTcpHandler, GelfUdpHandler, GelfTlsHandler @@ -14,7 +17,6 @@ class ContextFilter(logging.Filter): - def filter(self, record): record.parsedmarc = log_context_data.parsedmarc return True @@ -33,23 +35,24 @@ def __init__(self, host, port, mode): """ self.host = host self.port = port - self.logger = logging.getLogger('parsedmarc_syslog') + self.logger = logging.getLogger("parsedmarc_syslog") self.logger.setLevel(logging.INFO) self.logger.addFilter(ContextFilter()) self.gelf_mode = { - 'udp': GelfUdpHandler, - 'tcp': GelfTcpHandler, - 'tls': GelfTlsHandler, + "udp": GelfUdpHandler, + "tcp": GelfTcpHandler, + "tls": GelfTlsHandler, } - self.handler = self.gelf_mode[mode](host=self.host, port=self.port, - include_extra_fields=True) + self.handler = self.gelf_mode[mode]( + host=self.host, port=self.port, include_extra_fields=True + ) self.logger.addHandler(self.handler) def save_aggregate_report_to_gelf(self, aggregate_reports): rows = parsed_aggregate_reports_to_csv_rows(aggregate_reports) for row in rows: log_context_data.parsedmarc = row - self.logger.info('parsedmarc aggregate report') + self.logger.info("parsedmarc aggregate report") log_context_data.parsedmarc = None diff --git a/parsedmarc/kafkaclient.py b/parsedmarc/kafkaclient.py index 68eef1f4..35d1c2dd 100644 --- a/parsedmarc/kafkaclient.py +++ b/parsedmarc/kafkaclient.py @@ -17,8 +17,9 @@ class KafkaError(RuntimeError): class KafkaClient(object): - def __init__(self, kafka_hosts, ssl=False, username=None, - password=None, ssl_context=None): + def __init__( + self, kafka_hosts, ssl=False, username=None, password=None, ssl_context=None + ): """ Initializes the Kafka client Args: @@ -37,10 +38,11 @@ def __init__(self, kafka_hosts, ssl=False, username=None, ``$ConnectionString``, and the password is the Azure Event Hub connection string. """ - config = dict(value_serializer=lambda v: json.dumps(v).encode( - 'utf-8'), - bootstrap_servers=kafka_hosts, - client_id="parsedmarc-{0}".format(__version__)) + config = dict( + value_serializer=lambda v: json.dumps(v).encode("utf-8"), + bootstrap_servers=kafka_hosts, + client_id="parsedmarc-{0}".format(__version__), + ) if ssl or username or password: config["security_protocol"] = "SSL" config["ssl_context"] = ssl_context or create_default_context() @@ -55,14 +57,14 @@ def __init__(self, kafka_hosts, ssl=False, username=None, @staticmethod def strip_metadata(report): """ - Duplicates org_name, org_email and report_id into JSON root - and removes report_metadata key to bring it more inline - with Elastic output. + Duplicates org_name, org_email and report_id into JSON root + and removes report_metadata key to bring it more inline + with Elastic output. """ - report['org_name'] = report['report_metadata']['org_name'] - report['org_email'] = report['report_metadata']['org_email'] - report['report_id'] = report['report_metadata']['report_id'] - report.pop('report_metadata') + report["org_name"] = report["report_metadata"]["org_name"] + report["org_email"] = report["report_metadata"]["org_email"] + report["report_id"] = report["report_metadata"]["report_id"] + report.pop("report_metadata") return report @@ -80,13 +82,11 @@ def generate_daterange(report): end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S") - date_range = [begin_date_human, - end_date_human] + date_range = [begin_date_human, end_date_human] logger.debug("date_range is {}".format(date_range)) return date_range - def save_aggregate_reports_to_kafka(self, aggregate_reports, - aggregate_topic): + def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic): """ Saves aggregate DMARC reports to Kafka @@ -96,38 +96,38 @@ def save_aggregate_reports_to_kafka(self, aggregate_reports, aggregate_topic (str): The name of the Kafka topic """ - if (isinstance(aggregate_reports, dict) or - isinstance(aggregate_reports, OrderedDict)): + if isinstance(aggregate_reports, dict) or isinstance( + aggregate_reports, OrderedDict + ): aggregate_reports = [aggregate_reports] if len(aggregate_reports) < 1: return for report in aggregate_reports: - report['date_range'] = self.generate_daterange(report) + report["date_range"] = self.generate_daterange(report) report = self.strip_metadata(report) - for slice in report['records']: - slice['date_range'] = report['date_range'] - slice['org_name'] = report['org_name'] - slice['org_email'] = report['org_email'] - slice['policy_published'] = report['policy_published'] - slice['report_id'] = report['report_id'] + for slice in report["records"]: + slice["date_range"] = report["date_range"] + slice["org_name"] = report["org_name"] + slice["org_email"] = report["org_email"] + slice["policy_published"] = report["policy_published"] + slice["report_id"] = report["report_id"] logger.debug("Sending slice.") try: logger.debug("Saving aggregate report to Kafka") self.producer.send(aggregate_topic, slice) except UnknownTopicOrPartitionError: raise KafkaError( - "Kafka error: Unknown topic or partition on broker") + "Kafka error: Unknown topic or partition on broker" + ) except Exception as e: - raise KafkaError( - "Kafka error: {0}".format(e.__str__())) + raise KafkaError("Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: - raise KafkaError( - "Kafka error: {0}".format(e.__str__())) + raise KafkaError("Kafka error: {0}".format(e.__str__())) def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic): """ @@ -151,16 +151,13 @@ def save_forensic_reports_to_kafka(self, forensic_reports, forensic_topic): logger.debug("Saving forensic reports to Kafka") self.producer.send(forensic_topic, forensic_reports) except UnknownTopicOrPartitionError: - raise KafkaError( - "Kafka error: Unknown topic or partition on broker") + raise KafkaError("Kafka error: Unknown topic or partition on broker") except Exception as e: - raise KafkaError( - "Kafka error: {0}".format(e.__str__())) + raise KafkaError("Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: - raise KafkaError( - "Kafka error: {0}".format(e.__str__())) + raise KafkaError("Kafka error: {0}".format(e.__str__())) def save_smtp_tls_reports_to_kafka(self, smtp_tls_reports, smtp_tls_topic): """ @@ -184,13 +181,10 @@ def save_smtp_tls_reports_to_kafka(self, smtp_tls_reports, smtp_tls_topic): logger.debug("Saving forensic reports to Kafka") self.producer.send(smtp_tls_topic, smtp_tls_reports) except UnknownTopicOrPartitionError: - raise KafkaError( - "Kafka error: Unknown topic or partition on broker") + raise KafkaError("Kafka error: Unknown topic or partition on broker") except Exception as e: - raise KafkaError( - "Kafka error: {0}".format(e.__str__())) + raise KafkaError("Kafka error: {0}".format(e.__str__())) try: self.producer.flush() except Exception as e: - raise KafkaError( - "Kafka error: {0}".format(e.__str__())) + raise KafkaError("Kafka error: {0}".format(e.__str__())) diff --git a/parsedmarc/loganalytics.py b/parsedmarc/loganalytics.py index 13996132..3192f4dc 100644 --- a/parsedmarc/loganalytics.py +++ b/parsedmarc/loganalytics.py @@ -9,7 +9,7 @@ class LogAnalyticsException(Exception): """Raised when an Elasticsearch error occurs""" -class LogAnalyticsConfig(): +class LogAnalyticsConfig: """ The LogAnalyticsConfig class is used to define the configuration for the Log Analytics Client. @@ -41,16 +41,18 @@ class LogAnalyticsConfig(): the SMTP TLS Reports need to be pushed. """ + def __init__( - self, - client_id: str, - client_secret: str, - tenant_id: str, - dce: str, - dcr_immutable_id: str, - dcr_aggregate_stream: str, - dcr_forensic_stream: str, - dcr_smtp_tls_stream: str): + self, + client_id: str, + client_secret: str, + tenant_id: str, + dce: str, + dcr_immutable_id: str, + dcr_aggregate_stream: str, + dcr_forensic_stream: str, + dcr_smtp_tls_stream: str, + ): self.client_id = client_id self.client_secret = client_secret self.tenant_id = tenant_id @@ -67,16 +69,18 @@ class LogAnalyticsClient(object): the generated DMARC reports to Log Analytics via Data Collection Rules. """ + def __init__( - self, - client_id: str, - client_secret: str, - tenant_id: str, - dce: str, - dcr_immutable_id: str, - dcr_aggregate_stream: str, - dcr_forensic_stream: str, - dcr_smtp_tls_stream: str): + self, + client_id: str, + client_secret: str, + tenant_id: str, + dce: str, + dcr_immutable_id: str, + dcr_aggregate_stream: str, + dcr_forensic_stream: str, + dcr_smtp_tls_stream: str, + ): self.conf = LogAnalyticsConfig( client_id=client_id, client_secret=client_secret, @@ -85,23 +89,20 @@ def __init__( dcr_immutable_id=dcr_immutable_id, dcr_aggregate_stream=dcr_aggregate_stream, dcr_forensic_stream=dcr_forensic_stream, - dcr_smtp_tls_stream=dcr_smtp_tls_stream + dcr_smtp_tls_stream=dcr_smtp_tls_stream, ) if ( - not self.conf.client_id or - not self.conf.client_secret or - not self.conf.tenant_id or - not self.conf.dce or - not self.conf.dcr_immutable_id): + not self.conf.client_id + or not self.conf.client_secret + or not self.conf.tenant_id + or not self.conf.dce + or not self.conf.dcr_immutable_id + ): raise LogAnalyticsException( - "Invalid configuration. " + - "One or more required settings are missing.") + "Invalid configuration. " + "One or more required settings are missing." + ) - def publish_json( - self, - results, - logs_client: LogsIngestionClient, - dcr_stream: str): + def publish_json(self, results, logs_client: LogsIngestionClient, dcr_stream: str): """ Background function to publish given DMARC report to specific Data Collection Rule. @@ -117,16 +118,10 @@ def publish_json( try: logs_client.upload(self.conf.dcr_immutable_id, dcr_stream, results) except HttpResponseError as e: - raise LogAnalyticsException( - "Upload failed: {error}" - .format(error=e)) + raise LogAnalyticsException("Upload failed: {error}".format(error=e)) def publish_results( - self, - results, - save_aggregate: bool, - save_forensic: bool, - save_smtp_tls: bool + self, results, save_aggregate: bool, save_forensic: bool, save_smtp_tls: bool ): """ Function to publish DMARC and/or SMTP TLS reports to Log Analytics @@ -148,39 +143,39 @@ def publish_results( credential = ClientSecretCredential( tenant_id=conf.tenant_id, client_id=conf.client_id, - client_secret=conf.client_secret + client_secret=conf.client_secret, ) logs_client = LogsIngestionClient(conf.dce, credential=credential) if ( - results['aggregate_reports'] and - conf.dcr_aggregate_stream and - len(results['aggregate_reports']) > 0 and - save_aggregate): + results["aggregate_reports"] + and conf.dcr_aggregate_stream + and len(results["aggregate_reports"]) > 0 + and save_aggregate + ): logger.info("Publishing aggregate reports.") self.publish_json( - results['aggregate_reports'], - logs_client, - conf.dcr_aggregate_stream) + results["aggregate_reports"], logs_client, conf.dcr_aggregate_stream + ) logger.info("Successfully pushed aggregate reports.") if ( - results['forensic_reports'] and - conf.dcr_forensic_stream and - len(results['forensic_reports']) > 0 and - save_forensic): + results["forensic_reports"] + and conf.dcr_forensic_stream + and len(results["forensic_reports"]) > 0 + and save_forensic + ): logger.info("Publishing forensic reports.") self.publish_json( - results['forensic_reports'], - logs_client, - conf.dcr_forensic_stream) + results["forensic_reports"], logs_client, conf.dcr_forensic_stream + ) logger.info("Successfully pushed forensic reports.") if ( - results['smtp_tls_reports'] and - conf.dcr_smtp_tls_stream and - len(results['smtp_tls_reports']) > 0 and - save_smtp_tls): + results["smtp_tls_reports"] + and conf.dcr_smtp_tls_stream + and len(results["smtp_tls_reports"]) > 0 + and save_smtp_tls + ): logger.info("Publishing SMTP TLS reports.") self.publish_json( - results['smtp_tls_reports'], - logs_client, - conf.dcr_smtp_tls_stream) + results["smtp_tls_reports"], logs_client, conf.dcr_smtp_tls_stream + ) logger.info("Successfully pushed SMTP TLS reports.") diff --git a/parsedmarc/mail/__init__.py b/parsedmarc/mail/__init__.py index 3256baa9..79939cc6 100644 --- a/parsedmarc/mail/__init__.py +++ b/parsedmarc/mail/__init__.py @@ -4,8 +4,10 @@ from parsedmarc.mail.imap import IMAPConnection from parsedmarc.mail.maildir import MaildirConnection -__all__ = ["MailboxConnection", - "MSGraphConnection", - "GmailConnection", - "IMAPConnection", - "MaildirConnection"] +__all__ = [ + "MailboxConnection", + "MSGraphConnection", + "GmailConnection", + "IMAPConnection", + "MaildirConnection", +] diff --git a/parsedmarc/mail/gmail.py b/parsedmarc/mail/gmail.py index 436e1f02..b426746f 100644 --- a/parsedmarc/mail/gmail.py +++ b/parsedmarc/mail/gmail.py @@ -25,45 +25,47 @@ def _get_creds(token_file, credentials_file, scopes, oauth2_port): if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: - flow = InstalledAppFlow.from_client_secrets_file( - credentials_file, scopes) - creds = flow.run_local_server(open_browser=False, - oauth2_port=oauth2_port) + flow = InstalledAppFlow.from_client_secrets_file(credentials_file, scopes) + creds = flow.run_local_server(open_browser=False, oauth2_port=oauth2_port) # Save the credentials for the next run - with Path(token_file).open('w') as token: + with Path(token_file).open("w") as token: token.write(creds.to_json()) return creds class GmailConnection(MailboxConnection): - def __init__(self, - token_file: str, - credentials_file: str, - scopes: List[str], - include_spam_trash: bool, - reports_folder: str, - oauth2_port: int, - paginate_messages: bool): + def __init__( + self, + token_file: str, + credentials_file: str, + scopes: List[str], + include_spam_trash: bool, + reports_folder: str, + oauth2_port: int, + paginate_messages: bool, + ): creds = _get_creds(token_file, credentials_file, scopes, oauth2_port) - self.service = build('gmail', 'v1', credentials=creds) + self.service = build("gmail", "v1", credentials=creds) self.include_spam_trash = include_spam_trash self.reports_label_id = self._find_label_id_for_label(reports_folder) self.paginate_messages = paginate_messages def create_folder(self, folder_name: str): # Gmail doesn't support the name Archive - if folder_name == 'Archive': + if folder_name == "Archive": return logger.debug(f"Creating label {folder_name}") - request_body = {'name': folder_name, 'messageListVisibility': 'show'} + request_body = {"name": folder_name, "messageListVisibility": "show"} try: - self.service.users().labels()\ - .create(userId='me', body=request_body).execute() + self.service.users().labels().create( + userId="me", body=request_body + ).execute() except HttpError as e: if e.status_code == 409: - logger.debug(f'Folder {folder_name} already exists, ' - f'skipping creation') + logger.debug( + f"Folder {folder_name} already exists, " f"skipping creation" + ) else: raise e @@ -93,44 +95,42 @@ def fetch_messages(self, reports_folder: str, **kwargs) -> List[str]: return [id for id in self._fetch_all_message_ids(reports_label_id)] def fetch_message(self, message_id): - msg = self.service.users().messages()\ - .get(userId='me', - id=message_id, - format="raw" - )\ + msg = ( + self.service.users() + .messages() + .get(userId="me", id=message_id, format="raw") .execute() - return urlsafe_b64decode(msg['raw']) + ) + return urlsafe_b64decode(msg["raw"]) def delete_message(self, message_id: str): - self.service.users().messages().delete(userId='me', id=message_id) + self.service.users().messages().delete(userId="me", id=message_id) def move_message(self, message_id: str, folder_name: str): label_id = self._find_label_id_for_label(folder_name) logger.debug(f"Moving message UID {message_id} to {folder_name}") request_body = { - 'addLabelIds': [label_id], - 'removeLabelIds': [self.reports_label_id] + "addLabelIds": [label_id], + "removeLabelIds": [self.reports_label_id], } - self.service.users().messages()\ - .modify(userId='me', - id=message_id, - body=request_body)\ - .execute() + self.service.users().messages().modify( + userId="me", id=message_id, body=request_body + ).execute() def keepalive(self): # Not needed pass def watch(self, check_callback, check_timeout): - """ Checks the mailbox for new messages every n seconds""" + """Checks the mailbox for new messages every n seconds""" while True: sleep(check_timeout) check_callback(self) @lru_cache(maxsize=10) def _find_label_id_for_label(self, label_name: str) -> str: - results = self.service.users().labels().list(userId='me').execute() - labels = results.get('labels', []) + results = self.service.users().labels().list(userId="me").execute() + labels = results.get("labels", []) for label in labels: - if label_name == label['id'] or label_name == label['name']: - return label['id'] + if label_name == label["id"] or label_name == label["name"]: + return label["id"] diff --git a/parsedmarc/mail/graph.py b/parsedmarc/mail/graph.py index 2fc4364d..f022ca25 100644 --- a/parsedmarc/mail/graph.py +++ b/parsedmarc/mail/graph.py @@ -4,9 +4,13 @@ from time import sleep from typing import List, Optional -from azure.identity import UsernamePasswordCredential, \ - DeviceCodeCredential, ClientSecretCredential, \ - TokenCachePersistenceOptions, AuthenticationRecord +from azure.identity import ( + UsernamePasswordCredential, + DeviceCodeCredential, + ClientSecretCredential, + TokenCachePersistenceOptions, + AuthenticationRecord, +) from msgraph.core import GraphClient from parsedmarc.log import logger @@ -21,15 +25,15 @@ class AuthMethod(Enum): def _get_cache_args(token_path: Path, allow_unencrypted_storage): cache_args = { - 'cache_persistence_options': - TokenCachePersistenceOptions( - name='parsedmarc', - allow_unencrypted_storage=allow_unencrypted_storage) + "cache_persistence_options": TokenCachePersistenceOptions( + name="parsedmarc", allow_unencrypted_storage=allow_unencrypted_storage + ) } auth_record = _load_token(token_path) if auth_record: - cache_args['authentication_record'] = \ - AuthenticationRecord.deserialize(auth_record) + cache_args["authentication_record"] = AuthenticationRecord.deserialize( + auth_record + ) return cache_args @@ -42,53 +46,57 @@ def _load_token(token_path: Path) -> Optional[str]: def _cache_auth_record(record: AuthenticationRecord, token_path: Path): token = record.serialize() - with token_path.open('w') as token_file: + with token_path.open("w") as token_file: token_file.write(token) def _generate_credential(auth_method: str, token_path: Path, **kwargs): if auth_method == AuthMethod.DeviceCode.name: credential = DeviceCodeCredential( - client_id=kwargs['client_id'], + client_id=kwargs["client_id"], disable_automatic_authentication=True, - tenant_id=kwargs['tenant_id'], + tenant_id=kwargs["tenant_id"], **_get_cache_args( token_path, - allow_unencrypted_storage=kwargs['allow_unencrypted_storage']) + allow_unencrypted_storage=kwargs["allow_unencrypted_storage"], + ), ) elif auth_method == AuthMethod.UsernamePassword.name: credential = UsernamePasswordCredential( - client_id=kwargs['client_id'], - client_credential=kwargs['client_secret'], + client_id=kwargs["client_id"], + client_credential=kwargs["client_secret"], disable_automatic_authentication=True, - username=kwargs['username'], - password=kwargs['password'], + username=kwargs["username"], + password=kwargs["password"], **_get_cache_args( token_path, - allow_unencrypted_storage=kwargs['allow_unencrypted_storage']) + allow_unencrypted_storage=kwargs["allow_unencrypted_storage"], + ), ) elif auth_method == AuthMethod.ClientSecret.name: credential = ClientSecretCredential( - client_id=kwargs['client_id'], - tenant_id=kwargs['tenant_id'], - client_secret=kwargs['client_secret'] + client_id=kwargs["client_id"], + tenant_id=kwargs["tenant_id"], + client_secret=kwargs["client_secret"], ) else: - raise RuntimeError(f'Auth method {auth_method} not found') + raise RuntimeError(f"Auth method {auth_method} not found") return credential class MSGraphConnection(MailboxConnection): - def __init__(self, - auth_method: str, - mailbox: str, - client_id: str, - client_secret: str, - username: str, - password: str, - tenant_id: str, - token_file: str, - allow_unencrypted_storage: bool): + def __init__( + self, + auth_method: str, + mailbox: str, + client_id: str, + client_secret: str, + username: str, + password: str, + tenant_id: str, + token_file: str, + allow_unencrypted_storage: bool, + ): token_path = Path(token_file) credential = _generate_credential( auth_method, @@ -98,156 +106,149 @@ def __init__(self, password=password, tenant_id=tenant_id, token_path=token_path, - allow_unencrypted_storage=allow_unencrypted_storage) - client_params = { - 'credential': credential - } + allow_unencrypted_storage=allow_unencrypted_storage, + ) + client_params = {"credential": credential} if not isinstance(credential, ClientSecretCredential): - scopes = ['Mail.ReadWrite'] + scopes = ["Mail.ReadWrite"] # Detect if mailbox is shared if mailbox and username != mailbox: - scopes = ['Mail.ReadWrite.Shared'] + scopes = ["Mail.ReadWrite.Shared"] auth_record = credential.authenticate(scopes=scopes) _cache_auth_record(auth_record, token_path) - client_params['scopes'] = scopes + client_params["scopes"] = scopes self._client = GraphClient(**client_params) self.mailbox_name = mailbox def create_folder(self, folder_name: str): - sub_url = '' - path_parts = folder_name.split('/') + sub_url = "" + path_parts = folder_name.split("/") if len(path_parts) > 1: # Folder is a subFolder parent_folder_id = None for folder in path_parts[:-1]: parent_folder_id = self._find_folder_id_with_parent( - folder, parent_folder_id) - sub_url = f'/{parent_folder_id}/childFolders' + folder, parent_folder_id + ) + sub_url = f"/{parent_folder_id}/childFolders" folder_name = path_parts[-1] - request_body = { - 'displayName': folder_name - } - request_url = f'/users/{self.mailbox_name}/mailFolders{sub_url}' + request_body = {"displayName": folder_name} + request_url = f"/users/{self.mailbox_name}/mailFolders{sub_url}" resp = self._client.post(request_url, json=request_body) if resp.status_code == 409: - logger.debug(f'Folder {folder_name} already exists, ' - f'skipping creation') + logger.debug(f"Folder {folder_name} already exists, " f"skipping creation") elif resp.status_code == 201: - logger.debug(f'Created folder {folder_name}') + logger.debug(f"Created folder {folder_name}") else: - logger.warning(f'Unknown response ' - f'{resp.status_code} {resp.json()}') + logger.warning(f"Unknown response " f"{resp.status_code} {resp.json()}") def fetch_messages(self, folder_name: str, **kwargs) -> List[str]: - """ Returns a list of message UIDs in the specified folder """ + """Returns a list of message UIDs in the specified folder""" folder_id = self._find_folder_id_from_folder_path(folder_name) - url = f'/users/{self.mailbox_name}/mailFolders/' \ - f'{folder_id}/messages' - batch_size = kwargs.get('batch_size') + url = f"/users/{self.mailbox_name}/mailFolders/" f"{folder_id}/messages" + batch_size = kwargs.get("batch_size") if not batch_size: batch_size = 0 emails = self._get_all_messages(url, batch_size) - return [email['id'] for email in emails] + return [email["id"] for email in emails] def _get_all_messages(self, url, batch_size): messages: list - params = { - '$select': 'id' - } + params = {"$select": "id"} if batch_size and batch_size > 0: - params['$top'] = batch_size + params["$top"] = batch_size else: - params['$top'] = 100 + params["$top"] = 100 result = self._client.get(url, params=params) if result.status_code != 200: - raise RuntimeError(f'Failed to fetch messages {result.text}') - messages = result.json()['value'] + raise RuntimeError(f"Failed to fetch messages {result.text}") + messages = result.json()["value"] # Loop if next page is present and not obtained message limit. - while '@odata.nextLink' in result.json() and ( - batch_size == 0 or - batch_size - len(messages) > 0): - result = self._client.get(result.json()['@odata.nextLink']) + while "@odata.nextLink" in result.json() and ( + batch_size == 0 or batch_size - len(messages) > 0 + ): + result = self._client.get(result.json()["@odata.nextLink"]) if result.status_code != 200: - raise RuntimeError(f'Failed to fetch messages {result.text}') - messages.extend(result.json()['value']) + raise RuntimeError(f"Failed to fetch messages {result.text}") + messages.extend(result.json()["value"]) return messages def mark_message_read(self, message_id: str): """Marks a message as read""" - url = f'/users/{self.mailbox_name}/messages/{message_id}' + url = f"/users/{self.mailbox_name}/messages/{message_id}" resp = self._client.patch(url, json={"isRead": "true"}) if resp.status_code != 200: - raise RuntimeWarning(f"Failed to mark message read" - f"{resp.status_code}: {resp.json()}") + raise RuntimeWarning( + f"Failed to mark message read" f"{resp.status_code}: {resp.json()}" + ) def fetch_message(self, message_id: str): - url = f'/users/{self.mailbox_name}/messages/{message_id}/$value' + url = f"/users/{self.mailbox_name}/messages/{message_id}/$value" result = self._client.get(url) if result.status_code != 200: - raise RuntimeWarning(f"Failed to fetch message" - f"{result.status_code}: {result.json()}") + raise RuntimeWarning( + f"Failed to fetch message" f"{result.status_code}: {result.json()}" + ) self.mark_message_read(message_id) return result.text def delete_message(self, message_id: str): - url = f'/users/{self.mailbox_name}/messages/{message_id}' + url = f"/users/{self.mailbox_name}/messages/{message_id}" resp = self._client.delete(url) if resp.status_code != 204: - raise RuntimeWarning(f"Failed to delete message " - f"{resp.status_code}: {resp.json()}") + raise RuntimeWarning( + f"Failed to delete message " f"{resp.status_code}: {resp.json()}" + ) def move_message(self, message_id: str, folder_name: str): folder_id = self._find_folder_id_from_folder_path(folder_name) - request_body = { - 'destinationId': folder_id - } - url = f'/users/{self.mailbox_name}/messages/{message_id}/move' + request_body = {"destinationId": folder_id} + url = f"/users/{self.mailbox_name}/messages/{message_id}/move" resp = self._client.post(url, json=request_body) if resp.status_code != 201: - raise RuntimeWarning(f"Failed to move message " - f"{resp.status_code}: {resp.json()}") + raise RuntimeWarning( + f"Failed to move message " f"{resp.status_code}: {resp.json()}" + ) def keepalive(self): # Not needed pass def watch(self, check_callback, check_timeout): - """ Checks the mailbox for new messages every n seconds""" + """Checks the mailbox for new messages every n seconds""" while True: sleep(check_timeout) check_callback(self) @lru_cache(maxsize=10) def _find_folder_id_from_folder_path(self, folder_name: str) -> str: - path_parts = folder_name.split('/') + path_parts = folder_name.split("/") parent_folder_id = None if len(path_parts) > 1: for folder in path_parts[:-1]: - folder_id = self._find_folder_id_with_parent( - folder, parent_folder_id) + folder_id = self._find_folder_id_with_parent(folder, parent_folder_id) parent_folder_id = folder_id - return self._find_folder_id_with_parent( - path_parts[-1], parent_folder_id) + return self._find_folder_id_with_parent(path_parts[-1], parent_folder_id) else: return self._find_folder_id_with_parent(folder_name, None) - def _find_folder_id_with_parent(self, - folder_name: str, - parent_folder_id: Optional[str]): - sub_url = '' + def _find_folder_id_with_parent( + self, folder_name: str, parent_folder_id: Optional[str] + ): + sub_url = "" if parent_folder_id is not None: - sub_url = f'/{parent_folder_id}/childFolders' - url = f'/users/{self.mailbox_name}/mailFolders{sub_url}' + sub_url = f"/{parent_folder_id}/childFolders" + url = f"/users/{self.mailbox_name}/mailFolders{sub_url}" filter = f"?$filter=displayName eq '{folder_name}'" folders_resp = self._client.get(url + filter) if folders_resp.status_code != 200: - raise RuntimeWarning(f"Failed to list folders." - f"{folders_resp.json()}") - folders: list = folders_resp.json()['value'] - matched_folders = [folder for folder in folders - if folder['displayName'] == folder_name] + raise RuntimeWarning(f"Failed to list folders." f"{folders_resp.json()}") + folders: list = folders_resp.json()["value"] + matched_folders = [ + folder for folder in folders if folder["displayName"] == folder_name + ] if len(matched_folders) == 0: raise RuntimeError(f"folder {folder_name} not found") selected_folder = matched_folders[0] - return selected_folder['id'] + return selected_folder["id"] diff --git a/parsedmarc/mail/imap.py b/parsedmarc/mail/imap.py index 4ffa55fd..403bbeb7 100644 --- a/parsedmarc/mail/imap.py +++ b/parsedmarc/mail/imap.py @@ -9,30 +9,30 @@ class IMAPConnection(MailboxConnection): - def __init__(self, - host=None, - user=None, - password=None, - port=None, - ssl=True, - verify=True, - timeout=30, - max_retries=4): + def __init__( + self, + host=None, + user=None, + password=None, + port=None, + ssl=True, + verify=True, + timeout=30, + max_retries=4, + ): self._username = user self._password = password self._verify = verify - self._client = IMAPClient(host, user, password, port=port, - ssl=ssl, verify=verify, - timeout=timeout, - max_retries=max_retries) - - def get_folder_separator(self): - try: - namespaces = self._client.namespace() - personal = namespaces.personal[0] - return personal[1] - except (IndexError, NameError): - return '/' + self._client = IMAPClient( + host, + user, + password, + port=port, + ssl=ssl, + verify=verify, + timeout=timeout, + max_retries=max_retries, + ) def create_folder(self, folder_name: str): self._client.create_folder(folder_name) @@ -55,8 +55,8 @@ def keepalive(self): def watch(self, check_callback, check_timeout): """ - Use an IDLE IMAP connection to parse incoming emails, - and pass the results to a callback function + Use an IDLE IMAP connection to parse incoming emails, + and pass the results to a callback function """ # IDLE callback sends IMAPClient object, @@ -67,18 +67,21 @@ def idle_callback_wrapper(client: IMAPClient): while True: try: - IMAPClient(host=self._client.host, - username=self._username, - password=self._password, - port=self._client.port, - ssl=self._client.ssl, - verify=self._verify, - idle_callback=idle_callback_wrapper, - idle_timeout=check_timeout) + IMAPClient( + host=self._client.host, + username=self._username, + password=self._password, + port=self._client.port, + ssl=self._client.ssl, + verify=self._verify, + idle_callback=idle_callback_wrapper, + idle_timeout=check_timeout, + ) except (timeout, IMAPClientError): logger.warning("IMAP connection timeout. Reconnecting...") sleep(check_timeout) except Exception as e: - logger.warning("IMAP connection error. {0}. " - "Reconnecting...".format(e)) + logger.warning( + "IMAP connection error. {0}. " "Reconnecting...".format(e) + ) sleep(check_timeout) diff --git a/parsedmarc/mail/mailbox_connection.py b/parsedmarc/mail/mailbox_connection.py index ba7c2cf7..ef94b797 100644 --- a/parsedmarc/mail/mailbox_connection.py +++ b/parsedmarc/mail/mailbox_connection.py @@ -6,15 +6,11 @@ class MailboxConnection(ABC): """ Interface for a mailbox connection """ - def get_folder_separator(self): - return "/" def create_folder(self, folder_name: str): raise NotImplementedError - def fetch_messages(self, - reports_folder: str, - **kwargs) -> List[str]: + def fetch_messages(self, reports_folder: str, **kwargs) -> List[str]: raise NotImplementedError def fetch_message(self, message_id) -> str: diff --git a/parsedmarc/mail/maildir.py b/parsedmarc/mail/maildir.py index 17d3f54d..dd6481a8 100644 --- a/parsedmarc/mail/maildir.py +++ b/parsedmarc/mail/maildir.py @@ -7,28 +7,30 @@ class MaildirConnection(MailboxConnection): - def __init__(self, - maildir_path=None, - maildir_create=False, - ): + def __init__( + self, + maildir_path=None, + maildir_create=False, + ): self._maildir_path = maildir_path self._maildir_create = maildir_create maildir_owner = os.stat(maildir_path).st_uid if os.getuid() != maildir_owner: if os.getuid() == 0: - logger.warning("Switching uid to {} to access Maildir".format( - maildir_owner)) + logger.warning( + "Switching uid to {} to access Maildir".format(maildir_owner) + ) os.setuid(maildir_owner) else: - ex = 'runtime uid {} differ from maildir {} owner {}'.format( - os.getuid(), maildir_path, maildir_owner) + ex = "runtime uid {} differ from maildir {} owner {}".format( + os.getuid(), maildir_path, maildir_owner + ) raise Exception(ex) self._client = mailbox.Maildir(maildir_path, create=maildir_create) self._subfolder_client = {} def create_folder(self, folder_name: str): - self._subfolder_client[folder_name] = self._client.add_folder( - folder_name) + self._subfolder_client[folder_name] = self._client.add_folder(folder_name) self._client.add_folder(folder_name) def fetch_messages(self, reports_folder: str, **kwargs): @@ -43,8 +45,9 @@ def delete_message(self, message_id: str): def move_message(self, message_id: str, folder_name: str): message_data = self._client.get(message_id) if folder_name not in self._subfolder_client.keys(): - self._subfolder_client = mailbox.Maildir(os.join( - self.maildir_path, folder_name), create=self.maildir_create) + self._subfolder_client = mailbox.Maildir( + os.join(self.maildir_path, folder_name), create=self.maildir_create + ) self._subfolder_client[folder_name].add(message_data) self._client.remove(message_id) diff --git a/parsedmarc/opensearch.py b/parsedmarc/opensearch.py index f8a7b1e8..6bb41367 100644 --- a/parsedmarc/opensearch.py +++ b/parsedmarc/opensearch.py @@ -2,8 +2,21 @@ from collections import OrderedDict -from opensearchpy import Q, connections, Object, Document, Index, Nested, \ - InnerDoc, Integer, Text, Boolean, Ip, Date, Search +from opensearchpy import ( + Q, + connections, + Object, + Document, + Index, + Nested, + InnerDoc, + Integer, + Text, + Boolean, + Ip, + Date, + Search, +) from opensearchpy.helpers import reindex from parsedmarc.log import logger @@ -75,24 +88,21 @@ class Index: spf_results = Nested(_SPFResult) def add_policy_override(self, type_, comment): - self.policy_overrides.append(_PolicyOverride(type=type_, - comment=comment)) + self.policy_overrides.append(_PolicyOverride(type=type_, comment=comment)) def add_dkim_result(self, domain, selector, result): - self.dkim_results.append(_DKIMResult(domain=domain, - selector=selector, - result=result)) + self.dkim_results.append( + _DKIMResult(domain=domain, selector=selector, result=result) + ) def add_spf_result(self, domain, scope, result): - self.spf_results.append(_SPFResult(domain=domain, - scope=scope, - result=result)) + self.spf_results.append(_SPFResult(domain=domain, scope=scope, result=result)) - def save(self, ** kwargs): + def save(self, **kwargs): self.passed_dmarc = False self.passed_dmarc = self.spf_aligned or self.dkim_aligned - return super().save(** kwargs) + return super().save(**kwargs) class _EmailAddressDoc(InnerDoc): @@ -122,24 +132,25 @@ class _ForensicSampleDoc(InnerDoc): attachments = Nested(_EmailAttachmentDoc) def add_to(self, display_name, address): - self.to.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.to.append(_EmailAddressDoc(display_name=display_name, address=address)) def add_reply_to(self, display_name, address): - self.reply_to.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.reply_to.append( + _EmailAddressDoc(display_name=display_name, address=address) + ) def add_cc(self, display_name, address): - self.cc.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.cc.append(_EmailAddressDoc(display_name=display_name, address=address)) def add_bcc(self, display_name, address): - self.bcc.append(_EmailAddressDoc(display_name=display_name, - address=address)) + self.bcc.append(_EmailAddressDoc(display_name=display_name, address=address)) def add_attachment(self, filename, content_type, sha256): - self.attachments.append(_EmailAttachmentDoc(filename=filename, - content_type=content_type, sha256=sha256)) + self.attachments.append( + _EmailAttachmentDoc( + filename=filename, content_type=content_type, sha256=sha256 + ) + ) class _ForensicReportDoc(Document): @@ -184,13 +195,17 @@ class _SMTPTLSPolicyDoc(InnerDoc): failed_session_count = Integer() failure_details = Nested(_SMTPTLSFailureDetailsDoc) - def add_failure_details(self, result_type, ip_address, - receiving_ip, - receiving_mx_helo, - failed_session_count, - receiving_mx_hostname=None, - additional_information_uri=None, - failure_reason_code=None): + def add_failure_details( + self, + result_type, + ip_address, + receiving_ip, + receiving_mx_helo, + failed_session_count, + receiving_mx_hostname=None, + additional_information_uri=None, + failure_reason_code=None, + ): self.failure_details.append( result_type=result_type, ip_address=ip_address, @@ -199,12 +214,11 @@ def add_failure_details(self, result_type, ip_address, receiving_ip=receiving_ip, failed_session_count=failed_session_count, additional_information=additional_information_uri, - failure_reason_code=failure_reason_code + failure_reason_code=failure_reason_code, ) class _SMTPTLSFailureReportDoc(Document): - class Index: name = "smtp_tls" @@ -216,27 +230,40 @@ class Index: report_id = Text() policies = Nested(_SMTPTLSPolicyDoc) - def add_policy(self, policy_type, policy_domain, - successful_session_count, - failed_session_count, - policy_string=None, - mx_host_patterns=None, - failure_details=None): - self.policies.append(policy_type=policy_type, - policy_domain=policy_domain, - successful_session_count=successful_session_count, - failed_session_count=failed_session_count, - policy_string=policy_string, - mx_host_patterns=mx_host_patterns, - failure_details=failure_details) + def add_policy( + self, + policy_type, + policy_domain, + successful_session_count, + failed_session_count, + policy_string=None, + mx_host_patterns=None, + failure_details=None, + ): + self.policies.append( + policy_type=policy_type, + policy_domain=policy_domain, + successful_session_count=successful_session_count, + failed_session_count=failed_session_count, + policy_string=policy_string, + mx_host_patterns=mx_host_patterns, + failure_details=failure_details, + ) class AlreadySaved(ValueError): """Raised when a report to be saved matches an existing report""" -def set_hosts(hosts, use_ssl=False, ssl_cert_path=None, - username=None, password=None, apiKey=None, timeout=60.0): +def set_hosts( + hosts, + use_ssl=False, + ssl_cert_path=None, + username=None, + password=None, + apiKey=None, + timeout=60.0, +): """ Sets the OpenSearch hosts to use @@ -251,21 +278,18 @@ def set_hosts(hosts, use_ssl=False, ssl_cert_path=None, """ if not isinstance(hosts, list): hosts = [hosts] - conn_params = { - "hosts": hosts, - "timeout": timeout - } + conn_params = {"hosts": hosts, "timeout": timeout} if use_ssl: - conn_params['use_ssl'] = True + conn_params["use_ssl"] = True if ssl_cert_path: - conn_params['verify_certs'] = True - conn_params['ca_certs'] = ssl_cert_path + conn_params["verify_certs"] = True + conn_params["ca_certs"] = ssl_cert_path else: - conn_params['verify_certs'] = False + conn_params["verify_certs"] = False if username: - conn_params['http_auth'] = (username+":"+password) + conn_params["http_auth"] = username + ":" + password if apiKey: - conn_params['api_key'] = apiKey + conn_params["api_key"] = apiKey connections.create_connection(**conn_params) @@ -284,14 +308,12 @@ def create_indexes(names, settings=None): if not index.exists(): logger.debug("Creating OpenSearch index: {0}".format(name)) if settings is None: - index.settings(number_of_shards=1, - number_of_replicas=0) + index.settings(number_of_shards=1, number_of_replicas=0) else: index.settings(**settings) index.create() except Exception as e: - raise OpenSearchError( - "OpenSearch error: {0}".format(e.__str__())) + raise OpenSearchError("OpenSearch error: {0}".format(e.__str__())) def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): @@ -323,33 +345,31 @@ def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): fo_type = fo_mapping["type"] if fo_type == "long": new_index_name = "{0}-v{1}".format(aggregate_index_name, version) - body = {"properties": {"published_policy.fo": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 + body = { + "properties": { + "published_policy.fo": { + "type": "text", + "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, } } } - } - } Index(new_index_name).create() Index(new_index_name).put_mapping(doc_type=doc, body=body) - reindex(connections.get_connection(), aggregate_index_name, - new_index_name) + reindex(connections.get_connection(), aggregate_index_name, new_index_name) Index(aggregate_index_name).delete() for forensic_index in forensic_indexes: pass -def save_aggregate_report_to_opensearch(aggregate_report, - index_suffix=None, - index_prefix=None, - monthly_indexes=False, - number_of_shards=1, - number_of_replicas=0): +def save_aggregate_report_to_opensearch( + aggregate_report, + index_suffix=None, + index_prefix=None, + monthly_indexes=False, + number_of_shards=1, + number_of_replicas=0, +): """ Saves a parsed DMARC aggregate report to OpenSearch @@ -370,10 +390,8 @@ def save_aggregate_report_to_opensearch(aggregate_report, org_name = metadata["org_name"] report_id = metadata["report_id"] domain = aggregate_report["policy_published"]["domain"] - begin_date = human_timestamp_to_datetime(metadata["begin_date"], - to_utc=True) - end_date = human_timestamp_to_datetime(metadata["end_date"], - to_utc=True) + begin_date = human_timestamp_to_datetime(metadata["begin_date"], to_utc=True) + end_date = human_timestamp_to_datetime(metadata["end_date"], to_utc=True) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ") if monthly_indexes: @@ -382,8 +400,7 @@ def save_aggregate_report_to_opensearch(aggregate_report, index_date = begin_date.strftime("%Y-%m-%d") aggregate_report["begin_date"] = begin_date aggregate_report["end_date"] = end_date - date_range = [aggregate_report["begin_date"], - aggregate_report["end_date"]] + date_range = [aggregate_report["begin_date"], aggregate_report["end_date"]] org_name_query = Q(dict(match_phrase=dict(org_name=org_name))) report_id_query = Q(dict(match_phrase=dict(report_id=report_id))) @@ -405,18 +422,20 @@ def save_aggregate_report_to_opensearch(aggregate_report, try: existing = search.execute() except Exception as error_: - raise OpenSearchError("OpenSearch's search for existing report \ - error: {}".format(error_.__str__())) + raise OpenSearchError( + "OpenSearch's search for existing report \ + error: {}".format(error_.__str__()) + ) if len(existing) > 0: - raise AlreadySaved("An aggregate report ID {0} from {1} about {2} " - "with a date range of {3} UTC to {4} UTC already " - "exists in " - "OpenSearch".format(report_id, - org_name, - domain, - begin_date_human, - end_date_human)) + raise AlreadySaved( + "An aggregate report ID {0} from {1} about {2} " + "with a date range of {3} UTC to {4} UTC already " + "exists in " + "OpenSearch".format( + report_id, org_name, domain, begin_date_human, end_date_human + ) + ) published_policy = _PublishedPolicy( domain=aggregate_report["policy_published"]["domain"], adkim=aggregate_report["policy_published"]["adkim"], @@ -424,7 +443,7 @@ def save_aggregate_report_to_opensearch(aggregate_report, p=aggregate_report["policy_published"]["p"], sp=aggregate_report["policy_published"]["sp"], pct=aggregate_report["policy_published"]["pct"], - fo=aggregate_report["policy_published"]["fo"] + fo=aggregate_report["policy_published"]["fo"], ) for record in aggregate_report["records"]: @@ -447,28 +466,33 @@ def save_aggregate_report_to_opensearch(aggregate_report, source_name=record["source"]["name"], message_count=record["count"], disposition=record["policy_evaluated"]["disposition"], - dkim_aligned=record["policy_evaluated"]["dkim"] is not None and - record["policy_evaluated"]["dkim"].lower() == "pass", - spf_aligned=record["policy_evaluated"]["spf"] is not None and - record["policy_evaluated"]["spf"].lower() == "pass", + dkim_aligned=record["policy_evaluated"]["dkim"] is not None + and record["policy_evaluated"]["dkim"].lower() == "pass", + spf_aligned=record["policy_evaluated"]["spf"] is not None + and record["policy_evaluated"]["spf"].lower() == "pass", header_from=record["identifiers"]["header_from"], envelope_from=record["identifiers"]["envelope_from"], - envelope_to=record["identifiers"]["envelope_to"] + envelope_to=record["identifiers"]["envelope_to"], ) for override in record["policy_evaluated"]["policy_override_reasons"]: - agg_doc.add_policy_override(type_=override["type"], - comment=override["comment"]) + agg_doc.add_policy_override( + type_=override["type"], comment=override["comment"] + ) for dkim_result in record["auth_results"]["dkim"]: - agg_doc.add_dkim_result(domain=dkim_result["domain"], - selector=dkim_result["selector"], - result=dkim_result["result"]) + agg_doc.add_dkim_result( + domain=dkim_result["domain"], + selector=dkim_result["selector"], + result=dkim_result["result"], + ) for spf_result in record["auth_results"]["spf"]: - agg_doc.add_spf_result(domain=spf_result["domain"], - scope=spf_result["scope"], - result=spf_result["result"]) + agg_doc.add_spf_result( + domain=spf_result["domain"], + scope=spf_result["scope"], + result=spf_result["result"], + ) index = "dmarc_aggregate" if index_suffix: @@ -476,41 +500,43 @@ def save_aggregate_report_to_opensearch(aggregate_report, if index_prefix: index = "{0}{1}".format(index_prefix, index) index = "{0}-{1}".format(index, index_date) - index_settings = dict(number_of_shards=number_of_shards, - number_of_replicas=number_of_replicas) + index_settings = dict( + number_of_shards=number_of_shards, number_of_replicas=number_of_replicas + ) create_indexes([index], index_settings) agg_doc.meta.index = index try: agg_doc.save() except Exception as e: - raise OpenSearchError( - "OpenSearch error: {0}".format(e.__str__())) + raise OpenSearchError("OpenSearch error: {0}".format(e.__str__())) -def save_forensic_report_to_opensearch(forensic_report, - index_suffix=None, - index_prefix=None, - monthly_indexes=False, - number_of_shards=1, - number_of_replicas=0): +def save_forensic_report_to_opensearch( + forensic_report, + index_suffix=None, + index_prefix=None, + monthly_indexes=False, + number_of_shards=1, + number_of_replicas=0, +): """ - Saves a parsed DMARC forensic report to OpenSearch - - Args: - forensic_report (OrderedDict): A parsed forensic report - index_suffix (str): The suffix of the name of the index to save to - index_prefix (str): The prefix of the name of the index to save to - monthly_indexes (bool): Use monthly indexes instead of daily - indexes - number_of_shards (int): The number of shards to use in the index - number_of_replicas (int): The number of replicas to use in the - index - - Raises: - AlreadySaved + Saves a parsed DMARC forensic report to OpenSearch - """ + Args: + forensic_report (OrderedDict): A parsed forensic report + index_suffix (str): The suffix of the name of the index to save to + index_prefix (str): The prefix of the name of the index to save to + monthly_indexes (bool): Use monthly indexes instead of daily + indexes + number_of_shards (int): The number of shards to use in the index + number_of_replicas (int): The number of replicas to use in the + index + + Raises: + AlreadySaved + + """ logger.info("Saving forensic report to OpenSearch") forensic_report = forensic_report.copy() sample_date = None @@ -555,12 +581,12 @@ def save_forensic_report_to_opensearch(forensic_report, existing = search.execute() if len(existing) > 0: - raise AlreadySaved("A forensic sample to {0} from {1} " - "with a subject of {2} and arrival date of {3} " - "already exists in " - "OpenSearch".format( - to_, from_, subject, arrival_date_human - )) + raise AlreadySaved( + "A forensic sample to {0} from {1} " + "with a subject of {2} and arrival date of {3} " + "already exists in " + "OpenSearch".format(to_, from_, subject, arrival_date_human) + ) parsed_sample = forensic_report["parsed_sample"] sample = _ForensicSampleDoc( @@ -570,25 +596,25 @@ def save_forensic_report_to_opensearch(forensic_report, date=sample_date, subject=forensic_report["parsed_sample"]["subject"], filename_safe_subject=parsed_sample["filename_safe_subject"], - body=forensic_report["parsed_sample"]["body"] + body=forensic_report["parsed_sample"]["body"], ) for address in forensic_report["parsed_sample"]["to"]: - sample.add_to(display_name=address["display_name"], - address=address["address"]) + sample.add_to(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["reply_to"]: - sample.add_reply_to(display_name=address["display_name"], - address=address["address"]) + sample.add_reply_to( + display_name=address["display_name"], address=address["address"] + ) for address in forensic_report["parsed_sample"]["cc"]: - sample.add_cc(display_name=address["display_name"], - address=address["address"]) + sample.add_cc(display_name=address["display_name"], address=address["address"]) for address in forensic_report["parsed_sample"]["bcc"]: - sample.add_bcc(display_name=address["display_name"], - address=address["address"]) + sample.add_bcc(display_name=address["display_name"], address=address["address"]) for attachment in forensic_report["parsed_sample"]["attachments"]: - sample.add_attachment(filename=attachment["filename"], - content_type=attachment["mail_content_type"], - sha256=attachment["sha256"]) + sample.add_attachment( + filename=attachment["filename"], + content_type=attachment["mail_content_type"], + sha256=attachment["sha256"], + ) try: forensic_doc = _ForensicReportDoc( feedback_type=forensic_report["feedback_type"], @@ -604,12 +630,11 @@ def save_forensic_report_to_opensearch(forensic_report, source_country=forensic_report["source"]["country"], source_reverse_dns=forensic_report["source"]["reverse_dns"], source_base_domain=forensic_report["source"]["base_domain"], - authentication_mechanisms=forensic_report[ - "authentication_mechanisms"], + authentication_mechanisms=forensic_report["authentication_mechanisms"], auth_failure=forensic_report["auth_failure"], dkim_domain=forensic_report["dkim_domain"], original_rcpt_to=forensic_report["original_rcpt_to"], - sample=sample + sample=sample, ) index = "dmarc_forensic" @@ -622,26 +647,29 @@ def save_forensic_report_to_opensearch(forensic_report, else: index_date = arrival_date.strftime("%Y-%m-%d") index = "{0}-{1}".format(index, index_date) - index_settings = dict(number_of_shards=number_of_shards, - number_of_replicas=number_of_replicas) + index_settings = dict( + number_of_shards=number_of_shards, number_of_replicas=number_of_replicas + ) create_indexes([index], index_settings) forensic_doc.meta.index = index try: forensic_doc.save() except Exception as e: - raise OpenSearchError( - "OpenSearch error: {0}".format(e.__str__())) + raise OpenSearchError("OpenSearch error: {0}".format(e.__str__())) except KeyError as e: raise InvalidForensicReport( - "Forensic report missing required field: {0}".format(e.__str__())) + "Forensic report missing required field: {0}".format(e.__str__()) + ) -def save_smtp_tls_report_to_opensearch(report, - index_suffix=None, - index_prefix=None, - monthly_indexes=False, - number_of_shards=1, - number_of_replicas=0): +def save_smtp_tls_report_to_opensearch( + report, + index_suffix=None, + index_prefix=None, + monthly_indexes=False, + number_of_shards=1, + number_of_replicas=0, +): """ Saves a parsed SMTP TLS report to OpenSearch @@ -659,10 +687,8 @@ def save_smtp_tls_report_to_opensearch(report, logger.info("Saving aggregate report to OpenSearch") org_name = report["org_name"] report_id = report["report_id"] - begin_date = human_timestamp_to_datetime(report["begin_date"], - to_utc=True) - end_date = human_timestamp_to_datetime(report["end_date"], - to_utc=True) + begin_date = human_timestamp_to_datetime(report["begin_date"], to_utc=True) + end_date = human_timestamp_to_datetime(report["end_date"], to_utc=True) begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%SZ") end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%SZ") if monthly_indexes: @@ -691,15 +717,19 @@ def save_smtp_tls_report_to_opensearch(report, try: existing = search.execute() except Exception as error_: - raise OpenSearchError("OpenSearch's search for existing report \ - error: {}".format(error_.__str__())) + raise OpenSearchError( + "OpenSearch's search for existing report \ + error: {}".format(error_.__str__()) + ) if len(existing) > 0: - raise AlreadySaved(f"An SMTP TLS report ID {report_id} from " - f" {org_name} with a date range of " - f"{begin_date_human} UTC to " - f"{end_date_human} UTC already " - "exists in OpenSearch") + raise AlreadySaved( + f"An SMTP TLS report ID {report_id} from " + f" {org_name} with a date range of " + f"{begin_date_human} UTC to " + f"{end_date_human} UTC already " + "exists in OpenSearch" + ) index = "smtp_tls" if index_suffix: @@ -707,8 +737,9 @@ def save_smtp_tls_report_to_opensearch(report, if index_prefix: index = "{0}{1}".format(index_prefix, index) index = "{0}-{1}".format(index, index_date) - index_settings = dict(number_of_shards=number_of_shards, - number_of_replicas=number_of_replicas) + index_settings = dict( + number_of_shards=number_of_shards, number_of_replicas=number_of_replicas + ) smtp_tls_doc = _SMTPTLSFailureReportDoc( organization_name=report["organization_name"], @@ -716,10 +747,10 @@ def save_smtp_tls_report_to_opensearch(report, date_begin=report["date_begin"], date_end=report["date_end"], contact_info=report["contact_info"], - report_id=report["report_id"] + report_id=report["report_id"], ) - for policy in report['policies']: + for policy in report["policies"]: policy_strings = None mx_host_patterns = None if "policy_strings" in policy: @@ -730,7 +761,7 @@ def save_smtp_tls_report_to_opensearch(report, policy_domain=policy["policy_domain"], policy_type=policy["policy_type"], policy_string=policy_strings, - mx_host_patterns=mx_host_patterns + mx_host_patterns=mx_host_patterns, ) if "failure_details" in policy: failure_details = policy["failure_details"] @@ -738,11 +769,11 @@ def save_smtp_tls_report_to_opensearch(report, additional_information_uri = None failure_reason_code = None if "receiving_mx_hostname" in failure_details: - receiving_mx_hostname = failure_details[ - "receiving_mx_hostname"] + receiving_mx_hostname = failure_details["receiving_mx_hostname"] if "additional_information_uri" in failure_details: additional_information_uri = failure_details[ - "additional_information_uri"] + "additional_information_uri" + ] if "failure_reason_code" in failure_details: failure_reason_code = failure_details["failure_reason_code"] policy_doc.add_failure_details( @@ -753,7 +784,7 @@ def save_smtp_tls_report_to_opensearch(report, failed_session_count=failure_details["failed_session_count"], receiving_mx_hostname=receiving_mx_hostname, additional_information_uri=additional_information_uri, - failure_reason_code=failure_reason_code + failure_reason_code=failure_reason_code, ) smtp_tls_doc.policies.append(policy_doc) @@ -763,5 +794,4 @@ def save_smtp_tls_report_to_opensearch(report, try: smtp_tls_doc.save() except Exception as e: - raise OpenSearchError( - "OpenSearch error: {0}".format(e.__str__())) + raise OpenSearchError("OpenSearch error: {0}".format(e.__str__())) diff --git a/parsedmarc/s3.py b/parsedmarc/s3.py index d7060467..1b6c3743 100644 --- a/parsedmarc/s3.py +++ b/parsedmarc/s3.py @@ -10,8 +10,15 @@ class S3Client(object): """A client for a Amazon S3""" - def __init__(self, bucket_name, bucket_path, region_name, endpoint_url, - access_key_id, secret_access_key): + def __init__( + self, + bucket_name, + bucket_path, + region_name, + endpoint_url, + access_key_id, + secret_access_key, + ): """ Initializes the S3Client Args: @@ -34,7 +41,7 @@ def __init__(self, bucket_name, bucket_path, region_name, endpoint_url, # https://github.com/boto/boto3/blob/1.24.7/boto3/session.py#L312 self.s3 = boto3.resource( - 's3', + "s3", region_name=region_name, endpoint_url=endpoint_url, aws_access_key_id=access_key_id, @@ -43,10 +50,10 @@ def __init__(self, bucket_name, bucket_path, region_name, endpoint_url, self.bucket = self.s3.Bucket(self.bucket_name) def save_aggregate_report_to_s3(self, report): - self.save_report_to_s3(report, 'aggregate') + self.save_report_to_s3(report, "aggregate") def save_forensic_report_to_s3(self, report): - self.save_report_to_s3(report, 'forensic') + self.save_report_to_s3(report, "forensic") def save_smtp_tls_report_to_s3(self, report): self.save_report_to_s3(report, "smtp_tls") @@ -67,19 +74,18 @@ def save_report_to_s3(self, report, report_type): report_date.year, report_date.month, report_date.day, - report_id + report_id, + ) + logger.debug( + "Saving {0} report to s3://{1}/{2}".format( + report_type, self.bucket_name, object_path + ) ) - logger.debug("Saving {0} report to s3://{1}/{2}".format( - report_type, - self.bucket_name, - object_path)) object_metadata = { k: v for k, v in report["report_metadata"].items() if k in self.metadata_keys } self.bucket.put_object( - Body=json.dumps(report), - Key=object_path, - Metadata=object_metadata + Body=json.dumps(report), Key=object_path, Metadata=object_metadata ) diff --git a/parsedmarc/splunk.py b/parsedmarc/splunk.py index 9d94feb0..cf6a1e04 100644 --- a/parsedmarc/splunk.py +++ b/parsedmarc/splunk.py @@ -22,8 +22,9 @@ class HECClient(object): # http://docs.splunk.com/Documentation/Splunk/latest/Data/AboutHEC # http://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTinput#services.2Fcollector - def __init__(self, url, access_token, index, - source="parsedmarc", verify=True, timeout=60): + def __init__( + self, url, access_token, index, source="parsedmarc", verify=True, timeout=60 + ): """ Initializes the HECClient @@ -37,8 +38,9 @@ def __init__(self, url, access_token, index, data before giving up """ url = urlparse(url) - self.url = "{0}://{1}/services/collector/event/1.0".format(url.scheme, - url.netloc) + self.url = "{0}://{1}/services/collector/event/1.0".format( + url.scheme, url.netloc + ) self.access_token = access_token.lstrip("Splunk ") self.index = index self.host = socket.getfqdn() @@ -46,12 +48,11 @@ def __init__(self, url, access_token, index, self.session = requests.Session() self.timeout = timeout self.session.verify = verify - self._common_data = dict(host=self.host, source=self.source, - index=self.index) + self._common_data = dict(host=self.host, source=self.source, index=self.index) self.session.headers = { "User-Agent": "parsedmarc/{0}".format(__version__), - "Authorization": "Splunk {0}".format(self.access_token) + "Authorization": "Splunk {0}".format(self.access_token), } def save_aggregate_reports_to_splunk(self, aggregate_reports): @@ -78,36 +79,26 @@ def save_aggregate_reports_to_splunk(self, aggregate_reports): for metadata in report["report_metadata"]: new_report[metadata] = report["report_metadata"][metadata] new_report["published_policy"] = report["policy_published"] - new_report["source_ip_address"] = record["source"][ - "ip_address"] + new_report["source_ip_address"] = record["source"]["ip_address"] new_report["source_country"] = record["source"]["country"] - new_report["source_reverse_dns"] = record["source"][ - "reverse_dns"] - new_report["source_base_domain"] = record["source"][ - "base_domain"] + new_report["source_reverse_dns"] = record["source"]["reverse_dns"] + new_report["source_base_domain"] = record["source"]["base_domain"] new_report["source_type"] = record["source"]["type"] new_report["source_name"] = record["source"]["name"] new_report["message_count"] = record["count"] - new_report["disposition"] = record["policy_evaluated"][ - "disposition" - ] + new_report["disposition"] = record["policy_evaluated"]["disposition"] new_report["spf_aligned"] = record["alignment"]["spf"] new_report["dkim_aligned"] = record["alignment"]["dkim"] new_report["passed_dmarc"] = record["alignment"]["dmarc"] - new_report["header_from"] = record["identifiers"][ - "header_from"] - new_report["envelope_from"] = record["identifiers"][ - "envelope_from"] + new_report["header_from"] = record["identifiers"]["header_from"] + new_report["envelope_from"] = record["identifiers"]["envelope_from"] if "dkim" in record["auth_results"]: - new_report["dkim_results"] = record["auth_results"][ - "dkim"] + new_report["dkim_results"] = record["auth_results"]["dkim"] if "spf" in record["auth_results"]: - new_report["spf_results"] = record["auth_results"][ - "spf"] + new_report["spf_results"] = record["auth_results"]["spf"] data["sourcetype"] = "dmarc:aggregate" - timestamp = human_timestamp_to_unix_timestamp( - new_report["begin_date"]) + timestamp = human_timestamp_to_unix_timestamp(new_report["begin_date"]) data["time"] = timestamp data["event"] = new_report.copy() json_str += "{0}\n".format(json.dumps(data)) @@ -115,8 +106,7 @@ def save_aggregate_reports_to_splunk(self, aggregate_reports): if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: - response = self.session.post(self.url, data=json_str, - timeout=self.timeout) + response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) @@ -142,8 +132,7 @@ def save_forensic_reports_to_splunk(self, forensic_reports): for report in forensic_reports: data = self._common_data.copy() data["sourcetype"] = "dmarc:forensic" - timestamp = human_timestamp_to_unix_timestamp( - report["arrival_date_utc"]) + timestamp = human_timestamp_to_unix_timestamp(report["arrival_date_utc"]) data["time"] = timestamp data["event"] = report.copy() json_str += "{0}\n".format(json.dumps(data)) @@ -151,8 +140,7 @@ def save_forensic_reports_to_splunk(self, forensic_reports): if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: - response = self.session.post(self.url, data=json_str, - timeout=self.timeout) + response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) @@ -179,8 +167,7 @@ def save_smtp_tls_reports_to_splunk(self, reports): json_str = "" for report in reports: data["sourcetype"] = "smtp:tls" - timestamp = human_timestamp_to_unix_timestamp( - report["begin_date"]) + timestamp = human_timestamp_to_unix_timestamp(report["begin_date"]) data["time"] = timestamp data["event"] = report.copy() json_str += "{0}\n".format(json.dumps(data)) @@ -188,8 +175,7 @@ def save_smtp_tls_reports_to_splunk(self, reports): if not self.session.verify: logger.debug("Skipping certificate verification for Splunk HEC") try: - response = self.session.post(self.url, data=json_str, - timeout=self.timeout) + response = self.session.post(self.url, data=json_str, timeout=self.timeout) response = response.json() except Exception as e: raise SplunkError(e.__str__()) diff --git a/parsedmarc/syslog.py b/parsedmarc/syslog.py index 0fc47002..c656aa8e 100644 --- a/parsedmarc/syslog.py +++ b/parsedmarc/syslog.py @@ -4,8 +4,11 @@ import logging.handlers import json -from parsedmarc import parsed_aggregate_reports_to_csv_rows, \ - parsed_forensic_reports_to_csv_rows, parsed_smtp_tls_reports_to_csv_rows +from parsedmarc import ( + parsed_aggregate_reports_to_csv_rows, + parsed_forensic_reports_to_csv_rows, + parsed_smtp_tls_reports_to_csv_rows, +) class SyslogClient(object): @@ -20,10 +23,9 @@ def __init__(self, server_name, server_port): """ self.server_name = server_name self.server_port = server_port - self.logger = logging.getLogger('parsedmarc_syslog') + self.logger = logging.getLogger("parsedmarc_syslog") self.logger.setLevel(logging.INFO) - log_handler = logging.handlers.SysLogHandler(address=(server_name, - server_port)) + log_handler = logging.handlers.SysLogHandler(address=(server_name, server_port)) self.logger.addHandler(log_handler) def save_aggregate_report_to_syslog(self, aggregate_reports): diff --git a/parsedmarc/utils.py b/parsedmarc/utils.py index 994fbb0d..37136ff7 100644 --- a/parsedmarc/utils.py +++ b/parsedmarc/utils.py @@ -38,7 +38,7 @@ import parsedmarc.resources.maps -parenthesis_regex = re.compile(r'\s*\(.*\)\s*') +parenthesis_regex = re.compile(r"\s*\(.*\)\s*") null_file = open(os.devnull, "w") mailparser_logger = logging.getLogger("mailparser") @@ -67,7 +67,7 @@ def decode_base64(data): data = bytes(data, encoding="ascii") missing_padding = len(data) % 4 if missing_padding != 0: - data += b'=' * (4 - missing_padding) + data += b"=" * (4 - missing_padding) return base64.b64decode(data) @@ -116,24 +116,35 @@ def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0): resolver = dns.resolver.Resolver() timeout = float(timeout) if nameservers is None: - nameservers = ["1.1.1.1", "1.0.0.1", - "2606:4700:4700::1111", "2606:4700:4700::1001", - ] + nameservers = [ + "1.1.1.1", + "1.0.0.1", + "2606:4700:4700::1111", + "2606:4700:4700::1001", + ] resolver.nameservers = nameservers resolver.timeout = timeout resolver.lifetime = timeout if record_type == "TXT": - resource_records = list(map( - lambda r: r.strings, - resolver.resolve(domain, record_type, lifetime=timeout))) + resource_records = list( + map( + lambda r: r.strings, + resolver.resolve(domain, record_type, lifetime=timeout), + ) + ) _resource_record = [ resource_record[0][:0].join(resource_record) - for resource_record in resource_records if resource_record] + for resource_record in resource_records + if resource_record + ] records = [r.decode() for r in _resource_record] else: - records = list(map( - lambda r: r.to_text().replace('"', '').rstrip("."), - resolver.resolve(domain, record_type, lifetime=timeout))) + records = list( + map( + lambda r: r.to_text().replace('"', "").rstrip("."), + resolver.resolve(domain, record_type, lifetime=timeout), + ) + ) if cache: cache[cache_key] = records @@ -157,9 +168,9 @@ def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0): hostname = None try: address = dns.reversename.from_address(ip_address) - hostname = query_dns(address, "PTR", cache=cache, - nameservers=nameservers, - timeout=timeout)[0] + hostname = query_dns( + address, "PTR", cache=cache, nameservers=nameservers, timeout=timeout + )[0] except dns.exception.DNSException as e: logger.warning(f"get_reverse_dns({ip_address}) exception: {e}") @@ -256,9 +267,11 @@ def get_ip_address_country(ip_address, db_path=None): if db_path is not None: if os.path.isfile(db_path) is False: db_path = None - logger.warning(f"No file exists at {db_path}. Falling back to an " - "included copy of the IPDB IP to Country " - "Lite database.") + logger.warning( + f"No file exists at {db_path}. Falling back to an " + "included copy of the IPDB IP to Country " + "Lite database." + ) if db_path is None: for system_path in db_paths: @@ -267,12 +280,12 @@ def get_ip_address_country(ip_address, db_path=None): break if db_path is None: - with pkg_resources.path(parsedmarc.resources.dbip, - "dbip-country-lite.mmdb") as path: + with pkg_resources.path( + parsedmarc.resources.dbip, "dbip-country-lite.mmdb" + ) as path: db_path = path - db_age = datetime.now() - datetime.fromtimestamp( - os.stat(db_path).st_mtime) + db_age = datetime.now() - datetime.fromtimestamp(os.stat(db_path).st_mtime) if db_age > timedelta(days=30): logger.warning("IP database is more than a month old") @@ -288,12 +301,14 @@ def get_ip_address_country(ip_address, db_path=None): return country -def get_service_from_reverse_dns_base_domain(base_domain, - always_use_local_file=False, - local_file_path=None, - url=None, - offline=False, - reverse_dns_map=None): +def get_service_from_reverse_dns_base_domain( + base_domain, + always_use_local_file=False, + local_file_path=None, + url=None, + offline=False, + reverse_dns_map=None, +): """ Returns the service name of a given base domain name from reverse DNS. @@ -309,28 +324,27 @@ def get_service_from_reverse_dns_base_domain(base_domain, If the service is unknown, the name will be the supplied reverse_dns_base_domain and the type will be None """ + def load_csv(_csv_file): reader = csv.DictReader(_csv_file) for row in reader: key = row["base_reverse_dns"].lower().strip() - reverse_dns_map[key] = dict( - name=row["name"], - type=row["type"]) + reverse_dns_map[key] = dict(name=row["name"], type=row["type"]) base_domain = base_domain.lower().strip() if url is None: - url = ("https://raw.githubusercontent.com/domainaware" - "/parsedmarc/master/parsedmarc/" - "resources/maps/base_reverse_dns_map.csv") + url = ( + "https://raw.githubusercontent.com/domainaware" + "/parsedmarc/master/parsedmarc/" + "resources/maps/base_reverse_dns_map.csv" + ) if reverse_dns_map is None: reverse_dns_map = dict() csv_file = io.StringIO() - if (not (offline or always_use_local_file) - and len(reverse_dns_map) == 0): + if not (offline or always_use_local_file) and len(reverse_dns_map) == 0: try: - logger.debug(f"Trying to fetch " - f"reverse DNS map from {url}...") + logger.debug(f"Trying to fetch " f"reverse DNS map from {url}...") csv_file.write(requests.get(url).text) csv_file.seek(0) load_csv(csv_file) @@ -338,8 +352,9 @@ def load_csv(_csv_file): logger.warning(f"Failed to fetch reverse DNS map: {e}") if len(reverse_dns_map) == 0: logger.info("Loading included reverse DNS map...") - with pkg_resources.path(parsedmarc.resources.maps, - "base_reverse_dns_map.csv") as path: + with pkg_resources.path( + parsedmarc.resources.maps, "base_reverse_dns_map.csv" + ) as path: if local_file_path is not None: path = local_file_path with open(path) as csv_file: @@ -352,15 +367,18 @@ def load_csv(_csv_file): return service -def get_ip_address_info(ip_address, - ip_db_path=None, - reverse_dns_map_path=None, - always_use_local_files=False, - reverse_dns_map_url=None, - cache=None, - reverse_dns_map=None, - offline=False, - nameservers=None, timeout=2.0): +def get_ip_address_info( + ip_address, + ip_db_path=None, + reverse_dns_map_path=None, + always_use_local_files=False, + reverse_dns_map_url=None, + cache=None, + reverse_dns_map=None, + offline=False, + nameservers=None, + timeout=2.0, +): """ Returns reverse DNS and country information for the given IP address @@ -392,9 +410,9 @@ def get_ip_address_info(ip_address, if offline: reverse_dns = None else: - reverse_dns = get_reverse_dns(ip_address, - nameservers=nameservers, - timeout=timeout) + reverse_dns = get_reverse_dns( + ip_address, nameservers=nameservers, timeout=timeout + ) country = get_ip_address_country(ip_address, db_path=ip_db_path) info["country"] = country info["reverse_dns"] = reverse_dns @@ -410,7 +428,8 @@ def get_ip_address_info(ip_address, local_file_path=reverse_dns_map_path, url=reverse_dns_map_url, always_use_local_file=always_use_local_files, - reverse_dns_map=reverse_dns_map) + reverse_dns_map=reverse_dns_map, + ) info["base_domain"] = base_domain info["type"] = service["type"] info["name"] = service["name"] @@ -437,10 +456,14 @@ def parse_email_address(original_address): local = address_parts[0].lower() domain = address_parts[-1].lower() - return OrderedDict([("display_name", display_name), - ("address", address), - ("local", local), - ("domain", domain)]) + return OrderedDict( + [ + ("display_name", display_name), + ("address", address), + ("local", local), + ("domain", domain), + ] + ) def get_filename_safe_string(string): @@ -453,8 +476,7 @@ def get_filename_safe_string(string): Returns: str: A string safe for a filename """ - invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n', - '\r'] + invalid_filename_chars = ["\\", "/", ":", '"', "*", "?", "|", "\n", "\r"] if string is None: string = "None" for char in invalid_filename_chars: @@ -498,7 +520,8 @@ def is_outlook_msg(content): bool: A flag that indicates if the file is an Outlook MSG file """ return isinstance(content, bytes) and content.startswith( - b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1") + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1" + ) def convert_outlook_msg(msg_bytes): @@ -520,14 +543,16 @@ def convert_outlook_msg(msg_bytes): with open("sample.msg", "wb") as msg_file: msg_file.write(msg_bytes) try: - subprocess.check_call(["msgconvert", "sample.msg"], - stdout=null_file, stderr=null_file) + subprocess.check_call( + ["msgconvert", "sample.msg"], stdout=null_file, stderr=null_file + ) eml_path = "sample.eml" with open(eml_path, "rb") as eml_file: rfc822 = eml_file.read() except FileNotFoundError: raise EmailParserError( - "Failed to convert Outlook MSG: msgconvert utility not found") + "Failed to convert Outlook MSG: msgconvert utility not found" + ) finally: os.chdir(orig_dir) shutil.rmtree(tmp_dir) @@ -562,8 +587,7 @@ def parse_email(data, strip_attachment_payloads=False): if received["date_utc"] is None: del received["date_utc"] else: - received["date_utc"] = received["date_utc"].replace("T", - " ") + received["date_utc"] = received["date_utc"].replace("T", " ") if "from" not in parsed_email: if "From" in parsed_email["headers"]: @@ -579,33 +603,36 @@ def parse_email(data, strip_attachment_payloads=False): else: parsed_email["date"] = None if "reply_to" in parsed_email: - parsed_email["reply_to"] = list(map(lambda x: parse_email_address(x), - parsed_email["reply_to"])) + parsed_email["reply_to"] = list( + map(lambda x: parse_email_address(x), parsed_email["reply_to"]) + ) else: parsed_email["reply_to"] = [] if "to" in parsed_email: - parsed_email["to"] = list(map(lambda x: parse_email_address(x), - parsed_email["to"])) + parsed_email["to"] = list( + map(lambda x: parse_email_address(x), parsed_email["to"]) + ) else: parsed_email["to"] = [] if "cc" in parsed_email: - parsed_email["cc"] = list(map(lambda x: parse_email_address(x), - parsed_email["cc"])) + parsed_email["cc"] = list( + map(lambda x: parse_email_address(x), parsed_email["cc"]) + ) else: parsed_email["cc"] = [] if "bcc" in parsed_email: - parsed_email["bcc"] = list(map(lambda x: parse_email_address(x), - parsed_email["bcc"])) + parsed_email["bcc"] = list( + map(lambda x: parse_email_address(x), parsed_email["bcc"]) + ) else: parsed_email["bcc"] = [] if "delivered_to" in parsed_email: parsed_email["delivered_to"] = list( - map(lambda x: parse_email_address(x), - parsed_email["delivered_to"]) + map(lambda x: parse_email_address(x), parsed_email["delivered_to"]) ) if "attachments" not in parsed_email: @@ -622,9 +649,7 @@ def parse_email(data, strip_attachment_payloads=False): payload = str.encode(payload) attachment["sha256"] = hashlib.sha256(payload).hexdigest() except Exception as e: - logger.debug("Unable to decode attachment: {0}".format( - e.__str__() - )) + logger.debug("Unable to decode attachment: {0}".format(e.__str__())) if strip_attachment_payloads: for attachment in parsed_email["attachments"]: if "payload" in attachment: @@ -634,7 +659,8 @@ def parse_email(data, strip_attachment_payloads=False): parsed_email["subject"] = None parsed_email["filename_safe_subject"] = get_filename_safe_string( - parsed_email["subject"]) + parsed_email["subject"] + ) if "body" not in parsed_email: parsed_email["body"] = None diff --git a/parsedmarc/webhook.py b/parsedmarc/webhook.py index 632bf5a7..b54ab650 100644 --- a/parsedmarc/webhook.py +++ b/parsedmarc/webhook.py @@ -4,10 +4,9 @@ class WebhookClient(object): - """ A client for webhooks""" + """A client for webhooks""" - def __init__(self, aggregate_url, forensic_url, smtp_tls_url, - timeout=60): + def __init__(self, aggregate_url, forensic_url, smtp_tls_url, timeout=60): """ Initializes the WebhookClient Args: @@ -22,8 +21,8 @@ def __init__(self, aggregate_url, forensic_url, smtp_tls_url, self.timeout = timeout self.session = requests.Session() self.session.headers = { - 'User-Agent': 'parsedmarc', - 'Content-Type': 'application/json', + "User-Agent": "parsedmarc", + "Content-Type": "application/json", } def save_forensic_report_to_webhook(self, report): diff --git a/pyproject.toml b/pyproject.toml index 9a98018f..d6d1d719 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ dependencies = [ "imapclient>=2.1.0", "kafka-python-ng>=2.2.2", "lxml>=4.4.0", - "mailsuite>=1.6.1", + "mailsuite>=1.9.17", "msgraph-core==0.2.2", "opensearch-py>=2.4.2,<=3.0.0", "publicsuffixlist>=0.10.0", diff --git a/requirements.txt b/requirements.txt index 7483b230..ba292bb6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ elasticsearch<7.14.0 elasticsearch-dsl>=7.4.0 opensearch-py>=2.4.2,<=3.0.0 kafka-python-ng>=2.2.2 -mailsuite>=1.6.1 +mailsuite>=1.9.17 pygelf nose>=1.3.7 wheel>=0.37.0 @@ -43,3 +43,4 @@ myst-parser[linkify] requests bs4 pytest + diff --git a/senders/README.md b/senders/README.md deleted file mode 100644 index 6559090b..00000000 --- a/senders/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# About - -A database of information about services that send email. - -## Updates - -To update the database with the latest information from[ dmarc.io][dmarc.io], start a -[Splash][splash] Docker container, then run `updatedb.py`. - -## Credit - -`senders.sqlite` contains information from [dmarc.io][dmarc.io] that is licensed under a -[Creative Commons Attribution-ShareAlike 4.0 International License][cc]. - -## License - -`senders.sqlite` is licensed under a -[Creative Commons Attribution-ShareAlike 4.0 International License][cc]. - -[dmarc.io]: https://dmarc.io/ -[splash]: https://splash.readthedocs.io/en/stable/ -[cc]: https://creativecommons.org/licenses/by-sa/4.0/ \ No newline at end of file diff --git a/senders/__init__.py b/senders/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/senders/senders.sqlite b/senders/senders.sqlite deleted file mode 100644 index 43e8e0ce61d91d46990cb7f18ee659dc55485af7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 208896 zcmeFa36xw_wKslG-I~Wx)sTb`QccrzH%aBDGY~=up}M-$X_8JS>5L498g6%2x~5cB zr@JFlRXZRm;(+3eGtMZAID!+RAmZ>4Q4}Wx72yd!dFs>ex6ip#-30Xg*MF_=z4bLN zQ+>|e_uO;#nfBTHxAz&{wLM=~<74G&FAjJzc>>E9e#5&qNjyfmLZ&w+Uk%yVF#1M?i1 z=fFG%<~i^`lLO&{EYNiNJPSKI*xX1xohhhQ^{85{sfTi#)b4@go`LwDWZ(9I z_!*WbXT-bxar}2iK6gfZXvdy`!GYcJy*q|>?H!2k7}*owv3L9S6@ns@rgF>0biQ;( ze9ypvJ3$7jAQ9>`=OpA1|pU8#T7&CiBJTdp21rPnQnW%ZDKSPp7NLy{ly(;$`GZ^>nsw z^paM*PC^B#rK4)0Trqn|>rjofHdU#VtMx-u)q-e@Uc+L#nmv>+<3YZqmE!ro)Lp5q z1F3D@XVk2G*c4wCFIVIC%wDaBwO3mCqrYR%wY_vahC|#!`33i zgwRP($a;Hu&qLJ9i^2fj>2xn#zMLHt!}mx5XTn1@^~jW3%Br>2q^o5(+a7k)SY3fP zdm>rsZT~aZElFAza4#I{U{W4S#f;X1^i;jf(}yhU)1lRt#M$fo|LSYKme+bB&q-e5 zM8fM{m|XIFU)7d{Lu<4@+zHRWLcUrfcP8Bb3VE+aE~7rYQZD?lAo4D%-|EO;BY%oK z6?r1^%g7^M3H{#z@UNVk<+g?KNE|^=7f5Xko@o%8%#=qW1FaGr$bl_h(@)P_kB_hv8 z9v6R7a{j-04$O04o&)n7nCHMe2j)32&w+Uk%yVF#1M?i1=fFG%{+DpzbdT(CI2St? zqrNfv+wqv=G5j5+zu7hH8an@bA_c9GK_8JO}ELlZh};DSddq)EFSR5k=`_E*(6O_c zKRPRu{e4k~JmeTwYqj*ax;>q##k(u%YCT>ai}zK_>0Gv4>~Rn_|0JJ$vSX+;IqRT< zdV5%2<`^v(j^<1G`mB5(|3WAzFL4Z%fbXxT*-m5yi7K@>-k-0k*}C)xWOYX5cE`Sa zI=*G9K2=RSw@4D9100cGNRdC{pZWjhIWW(Gc@E5TV4ef>9GK_8JO}1EFwcQ`4$O04 zo&)n7_+QF_utVIw<8~lTpu|S zIS|kn5W995rO{w4||G9F_p=f~z)v7X8%2T{g7@-s8u|&R5$d}6bT4Ji2Nte=z z>_oa)Nh2zy(nj9rzO&>J>&rzohmvxM%2cM1&n9ZwiE_E1#K_5g&8hNwhh7U5QytHb z%_a(ktRj6OB1@+{fBmJEy|QY&s#ekoM5WAE%cUaO#S4i_wOofaJYOy)U~(^IXBDG{|`#{NQm}#d*l_7Ok^OkF#Ngj zo5PocSBF{X>!J6Ct_%%_x`Y1_ygT^T;ERL%f(rsq2EG`0Lm(SC)BmLZe*XvjSNi_b z_n_}K-^+b3@@?>)=KW9aJ>K_uo8E)oUhe|WhdtMO_Ig%(PI5oy{<8aK_pE!Hdy(rI z*8{FkxSB52wcgd{e9HNN^8?PNbJV%g8Iivrzf+!(2jw#zzj1ud@lwa-jt%fuc!b@~ zUdB|`$2z3vQ012YRvz?A>wP|oX}CPOU(Kl1c&c11qQA0f>h798oYK?uQ0Y0~mpZ&& zqFz5Yml{<>LQL<1_YULikv_=ppNd%&-r)!h(?#ygFm#){#JqES*Na-B)OUnd(FVR=> zg|b;KBco%tUs@_^&WkPPkIh=k;^}p}{L(f-)JbLG9*BH4-k+|g<2$Qmg#WC8FDv3h zrR+-M-CRZK+}T=%Ou0O1G8yHFjE)hi0`C{9!o=wCo}ET9NY{$>in3;hUrGp~&B-Zz zP!;$WEF(FSnDP@D!o~3X<->ldQ@pf3IhD&(sM39Tb=rJE3%WWA0U z!D*1k%cXd~TC{$}c$UBtkrU6_w^@34&+egpLnFpV>cH~k%ZhT5Us^7P(}rZdkgmn| zrAzVc`I2h#gtF23SR|3zIRy5J7kc>%wY+6i5Gl`Cvehr0Eb85rtgC7ByZC$8AbL)e zaaj_}3-0kqVWdwLG5B|@wF>@Lx#9w&jq8)UhezY1<*_=J5p(KSi?y1bw`3FbB5z_>vRWvcrQz?y z`LsCj5)X*92=TY5x zX^CW|g5hbhT}yUsuuREHWjF2NW+7PfDBXLmF`zf09E8TL(-yk308u~_57K6Kg|b(y z2T#Pie!%jAo;jm*o?}@*K&{dWW2ICRd1NeGXIVNc73(AssgAWRt=Up8 zU^<#L9OLPpHI~)BqRzzk;#Dd z`uDWt`c;0ZSM*UT3AtmuT*59N-wIOeSB)nLDukXaQ7i+cG;WS2`a8SQFI^^TR!(A$ zgTJNtP_dXFuL?$QPfyQ69E&G0^0IlJXVgriPvlMdL_yFeu+|v#%MuoT7c=FW*?f^Y zYlUBG7j5fJ7SqSDgpLlFgOKOcFsZth8+2_ z3iW)ZYz}HIsdUoi^WTApZBfdv%WjD`F)V199F3UtIl!==Lv$W)rv(OuY;a$l> z!Q49O?~YDdw)h7XJgn8zb+fzlbk~^{CLlaM<`ASZ^E*b)*&QSR@Y1@H+eU}t`c7O! zKU>6-$y#1%Kf}U}aX>&VtF)GEZ?}?=U~W#Wagl7WctE;ZbY#Pm9K%wGA*1Fd`#*f4=p{yOXKWo_Hdjjvr8~<(W9Jsg-oeLOBnWU9_( z+AO+cB$-pOfuvW&vHiq#VDgrs)QG`&LBO{sdQPXnv3$0ybewEilyhn!U9wCLJ$-tc zvDEY=vvqX7QEs+gE-5`RV|1*;b8Jg{s&q`!o5r(HKV+?p8nb_cAdN$cpuu*RjCTBK~Og+Sam>UKz3U2Z*pXo}e1#l3tC@FztbSWb`Jp zN2W01_6(b)@{D>>Sr#HjakAx{%kZk(>M>KA6?Lw&>X z{b`KPl5vu#8YI2_OwPU&dwxS>0>ve(<&?6en}C9E6GfW zZpSNNLCoTgin_VxX%CboE{l|&+_}f7X}VHZI-O=Guj`NR%hxb2bc%Jk#8Hv4TsDt0 zYu1kLv`Y(X*48S9oUqv8mtvxNovT;n_0s6i1T&UvR$0S#+kHOXWYyUzgu1 zAC|Ysr#XJ@c&B61vDp!1KW6V{RW`)Bm&XAC$RkXp{N z1_Ua@oo0}XL z8Hol&tTozpl1lC|&kn7JY)H?Tv8r-T0}_`eO(s(~mRF_<^|-oSO;<~n-3H|RiCWMf ztZYC8(+<6fRC07H)}na#R_u7#+($~PF(I<)+60y)WmN;Bn5INgQpg<0VV?5Yv?WiQ zdCMCr9x6Qzh-MvPlCQ&ns^--yL_(9vmaN_eWVH3-q}Q8Dmmm~pF{`?_r>9EUiAvg> zp#?ohNi`s*Z4%{=;GHzYr&7ExT|JSI3wfA7Ph`^77NK@+1CpFJoj21$KUKq)TEZ$s zd$>L^Xsw^BK^vgwcQqj683PDUhv|Y4>*wh~>*uu`r2%=bM{sFlDqWbJIqp0PPS4IH zj1y@`BW~_zsq`ci9p=uO)zcjf2xz+Yrf;fHm?&3`c4hQ*R|CRXyCzokP0>D4z;Q=s zYsSo3)_{Db?_`8o8Z zeT|?UE+4ggo!%}|&S(R2S5_?7Lw!{>or`bDL+yt%8ixNgltnl|U|d5X#no;?Wm7X| zTUvXXfHp9#X=-XpvH?+TqbPN_uZlxlJc(^82l2iP5tVeBHIo&13=M+z7-}E!xT9ea z)KEq#*JtVxGnCFC-PwSoW^xZVaMi1-rJ;I8R|688$q{6aLU(V7U??wNs_~qj1_U-; z{fh7AOA}MH8e8gXWS!N3u+}N~oalp=(XRR7ipE)-I2(}Bv`M&u6Aqcf3-9IY(Ej7B z41uZvK}|p86Qybh7gQKh9Sw+P#>mHLQ>&(|oYB+E8<5Nty=vO&C2n7tK~6e9)DL2s zrm3**dV?Qm*EJx9DWd2ky)ZgC z8z0C`3Dr)^7ba&Ds_~$s0qM-(AKtU7NvL?9?rcEJ()HMVg>tz#1`*gSRnO>ZK)^C4 zI?<^Zn})JJW9D==xU|L9V`v~Kq3mxZnZT0sK5O)R(XC!O+7- zsH?H|=Z_iR$=B=jFk$Gprtz@j#TZ)Rbp@|O!)YGHGJ1OPF_Mt^H#+-<_E<_(md%=* zP#-3yRpH80oimnEiRH9j#fi!+LC@%%HU=f?Q7FXEitkJthl31qaIlzFI*+0>QQh9Y z{P=D)hdE@HHjV>S4H@lI#^RL#85WT-3lW66F{Ql@5>XP7jOA>d5YRqiZD?^)39KTSXLKUQc+RRvS#Ihu*Fq2fFd&J&oGKOd7Mc(p?vDB^a zvzn3hHOZQ{s%)OdL7bf+{YHGiDw4nyEwY86vaDoE;~90>BD-V+K~K>XOZ!yx4VN7a znLUH71Q!?6%F=>qG3bLVZ)p%E+9#=B_&`eZsp=@?5S=+0RfX*v3;SVXAglpNc7k+f z=8zLP387@IbYdK{RG$Hmu+!P8Tsl64ZJ89! zIyr6AJl#2F*}iphoeHwDcwbqqVtLUAYPvKzT*eqyabV5hd=0IFDJf6zGX#nGJvqxh z4#~NcP8G_S;(9mptSrx3mo+5$HGOF)ldEYV{PaN-gDOf*7Y&JrgfWq&bYv{Eon9+% z-E9_6>4~b+nYPSWtUgl}00vAdJ!Adlw7T)WO!a}R2FvLQmVE6Aq-A|GIQx*XhW7R) zQ`=J8hf+A<6*F6{9}$-N&KFtc`|7^R>kxL?HM_l3G0tN8y@O5D|yfM5CdVps_4+9PG;n3?sv!P2vn?p;X z4R|8>K=AXyH*!?KdBHP+zQFGS4+icEyg%^jz>&aEU{xUM|C|38{%`v~;lBymfdl?^ z{;=;E-xqvu@x9bH=^OR+`l3J!JmkI8`(E!$yan%G?`ChO*YEkg=ZBszdfw}KrKj$> z%+v2lcuscz)%|n#?d~_bU*ayg_qi`{ce?|wr(NHJ2H~BqYh4Fi=eXi7zw?jIA3Hzd ze7*B3=XPfTSb{&x56O4RZ<4Q;F9)Jvg&cA`==iwfO^$0ESx3sz?Fh3!L9=iNdn3CF z*n%$kF8MX`w8Y*dt!dRTu#dN<=TrZKIdG0#Zh;!%^pnm`Ox3VO)nJW)ew3``q^>4- zOsNc<%&ZbSdb+&1^|gFK0}SDW9;!?`Q@i9{)|wQ`r5x}GnJHi~0LU=FQaSr~VeKTPZY*p6Zh1Uk zGLDY1jQzX$5^2U_mo1eywR|^Uq8n|6)hc$Wy-@57)%vWa*NR=tp;wm1h{uT{f{g%r z1-2vR2oAZ1`6qS>M@U)T$d;7YL3^8^463U@h8OlYaH1W@%7@Ps z@;GN!(VP1@GRsnUIyVi@?6Wm}QmvM#^cS5hC#8*Y~eth9&I z7z^2SqMRWu8IG~B?TVakrNC~39^~8TY=U%;Ra92U=@=*5Y?XyhxPrEO$OE_oeQs>f)`gG))as13nZ^d}1=3TTPA8RE9|xD2{K}Wc%10r@LK~*UF0kWeWYcD}t1#R?rlVjJ!GllcU7$$9ot)H+I8ZyN<)=LRdnve(H32_>I6?PyTHN~w(8S6>WmUQ+tx`FdDKaX zb=d|iG#p29E>mJ1Hu`Z|F_hS;w(K$36pksesI3^_`yeEvIwzgrL8gZC7uZ@b#wQ#N zX1|T4W7I1V+sGJGbCALG@#M3O4A@@loLNfDWBZOelg6@`K}X284pr;0Psf<8f$HSs zR8fik#l}`O4}CoHpIas;t-Yz@%b1jj2{m1)Pbkr6Z8d{_yFS6kZ1fo$YjRMxWudW( zJ{6Z!)-rK%Jc1>a5n_pCy0Q{|^0+sl3Qp&h=T~5sg}UuI;%t{tbLlKA$hZI=Ub zLQoWda75u%>~>S7@mgYBt>L_bm($RkvFuG5d8K?8QPub!xZ$&f+EhT87Mz#Zor&D z`)N&y{>a`v=@Qo%MIW*koK~SL=MyXXLpvW=7nJCOHnwLm+bU@qSKqe{UKlRH_d9H} zBwI}%n-#r`dVSAUFF>-ed*CzQvF+ICbD${DAXCD~V-iNcZL2@%2&9Ue)|Kde#}!_L zX$JlBEqfaKvF61~UXhH?S)->@+vTf)G;+upDui9$5fu*gKeZvS>cEn+=YqF=QQ z=PcGpEYQ&}+ZzHlq^lL|#!B={XUf~GJ&cn!4ElgmCQ9lQh9Pa|(a+n)A&7)UNQr*# zxc7_DyhF90=v}ogQH72h^Xap;MrLNGl<22z&B#oRjiq5aZcEr7U>NXfe8yId4D>OC zFI}xfKV_SOnex=^I5eYt3i74vHrp^_Y4D9jHX#XXgZzh0&Ys<)mi|DzxO$ciK8MJyk1}$Cc1DFcTXEuY0a$}FYw}rl%hvg6U2~r%lEC#Q&btiy>1%T)=`buR$ zIU%>9TCcHJ3%aEVRcSlLo)1M3r19u0Z4y?xNV`TZT}<%5Fc=A9N+yhH6}xowI$J-d z3l;b$sN?91m)q-C$V_2ER`LJDVYos-tW=^eTP0^(np;R`Aoe9{QxF2gY{zPu2BaBS zGGGLvFSTtXfE`qTgh{|d0FD6BIeUFp(*j2Z-Dtzs(}Vf(G++Hs%=RUH+B)Wnl1ME|=t0 za$DrF$ODnjMcy8niKHXvMixh;@Xvq{cz5{K;c9pn`~((<{tleLw?ZEZy*_kBC>I(C ztqetj&j!C6{BZE)!D4W0Pzgo@{}uRA;7ft`1R8;fz_!4ez+(SD{15uS=zov@75*ds zJ$}XC=6lk2ukY=?rf!|`E$>Q@~j zjx~-&>>up+>_K)bdn;~1m|&yuDp1%WI?@Uj<5t9*DK-D!|9c#;EqbMN&5)8`ze&E* zx(Yz!SssUN2NEgP9f8TkCdW}`i859OAl0jo!1$bxz0S5K7iV)Sj#x^}zW5bqfYB%` zu~*r4pyCviwX=yyY@#T78b_*E+GMdJ6bzaurAzkL*_M-HRfsc6>=pLB7Y9T*~!@$dIay;ZRK%Z4zj*q?6wjaXt2~Z@l=Edf0XNF=P_Dv<$ zu&rA~d<)xW>`L33XUd+W)Zx->+c~x!qLn5Kr4J;Dass=0fiN6x>$>GpYv0=Fp?el! zg0{6UTyVOO$9c1qPs3ghwkd7BFIX@@LV%u=WZbsOCPcQ@07emL#UWX=DKDMY2)csNzEa9g>4g=D#AnWJUcDWbQ4P3hT~{+O53@1TIh)CO51XqEIO6NA+>GI zak&N5VYO}K0qVqexVCQl1I>8ecBXA45L_Q4q^-mLLcXZN#Y<^B!?ynl1fw=Lm9}NJ z(O93rc{?GTAKFg0DH!TFceOGZS{@qTO515GNwcBhl!zpAB7`reFARLkH7V>Zj4GP&Ao-+mMwt#KHfjg2|Gu!+& z;jA`G=Bk>tKa@6~O%|<9Ps5{Bz|YZiu@D15th9M;>m`5@HJsgx2?8aQRQMybd2AIG zBpfZMw7KmSovM!2%hQ-aF8gahyFxguU~f>`oX5RZfoO_@SDS2GP-{~eG#>CZrOlC$ zFSB-mU9zU74pmOf63)4e+4c%S4H9FhO17RiC}6F_{*_Bl!tqdv{li`(sLQwmkP`bZ zduPBnkF_pm9TTy?*%`sTi9tq+#Q(5OW1KN(4@1-WjGaO7ox(8;dda8lPfF>EkcOYK zO?{ZrGr+AVvESMjT=?bU_&ElQ6hw4$9xJiO>}BVnqr*a>#C~OeGLfE6xE-brYNz8Y`s=3Ph-wl>-eCpJF1xOg)|&}l<0fy zkH~V#@oKT}*fvPu#IuvFx`^2Qw#EGjHSgQDW*kZ5oHj;Y(-TU}uAu-FwWh+mFm|tP zU_d{J=;YA#WBg43-i33K68ol&r<{nk-q=01+TsbIMD;RUnpvtP#uzhF?KlGqqwJAcNv(9o1Hks&SU$IGl6-X$+Xw;4rl(t2-jjvKj zV>650Y1?Tkd5C?$4MR_vKdi*QWE(J*i3-j9*ca`krAq(~#Xf7x#&Ht+LG05u=?Ve^ z+{*YA#%NJupRzHxT&d$=Pv80EaZl9JG%$rq>=U-}gLRSu$k#AWKW-ba^r%z>_daH8 zEIlaX(iJXkDzT5+6tCF6XEA_rAtMxx@8;@l&2fg1mG|Jg*9*zU# z%&+ZfO6+~M%9f_G1#%;cy~kD??SX-#kG=9N8f7C z&7?6v-)I{FrSwrKKA;wh-AMTVz0%jE$d4i)1^Ry|{7m?L;j6_+y4&#QU68$Q+-eSzT~^sci7kGTjYJ* z`!(;0~+t`*MTJMVM8&)IP9b)Mr~ zApchWjC`G(k^AJOj=ws-?|7G^=Gf{ujs2Z{h26yJte?fDCsEbo|LbgW93cmG^!9Jv z9v|fAEe(W{n_!js4Zb857d zv%CTPp9bh%FGxqrgc{fRFKFSDN_k2HaK9eWyp3AHL!}zlSObS8=&?&n$ zyph;qI3a7qV>3Vj6A)08yj;+=)<8L<0VJRXHB9uUhIg-s@7Qk8WQ%ajE+|VIzya#< zA^OuZlLnSJ#}l0m001>ECJ5gRn*p#|hC?QSx{;-v-2ehm3j)!Vx;Va-o4-J*na%a@ zW{=^cs~W%pYJkg)DTqCA*EG;d+PKeBwuxctXaF0iM}y$6?8b4xN=vrejs{?X9b#f^tZAEcrDN8x60W_fIh2E8#swa%T$O2b^fdlxT4x>*2JV}7vm1fig$nQMs5cK%% z4WR$@$&kV}0a3%Km6klS0lc3EKv#?_O-!mB1_OYIOB+D_X;{6^6uc}E2gD$PVRBOg zP(RH9WhjN#!VI4)6K)R56z47vE~?mCv2#6t=w#2tJn%u#Ox_`(fh?ohoT zoWVnwDu@r*rV&*Sx%eiag}cnQl+>BoTD)(c&52R-wXqBu;I2WbY-|8-xJz5a@F_xL z?}xJ%wg8}BO&`!*Q{`H^V7z~XCzUk~pb#}qZr0vDejO2|nk$LY05q{j)On-!*08#- zOxO&4JrVB|)uTMOvjLc*IR?nj3g;Yth=!$Q^fY!_7JUj@HM)OjPs%{Za&)Zbhtk;?F&6Y*lnq*} zer6?*fePoq+a1JP;a9-vc8~5e;s@~(tF@yQY&gTVZx3Gx?6Toc#3@O&jjZL3?GQi2 zj9Jgi99CQWodvO$x4f~9gcV)`LA_-PldND48>;=7me2sS{;GcPDZ?8e1$2J9LO#Rb^B~OD7rwmMMiWNZE9Jv@)ABk<3}1 znW%}(HI06wp_^0OQ6o+cVu&$k`j2OMeuCV@M1DsjMNQ@G=}8gJhPHLUv9#(?IIkHD zfIgBEc{eG7Y9|ivI|=Ba5z;}P13y+Zl7vL(v|5Qn0Lt&g7Rfge#K0If#KnANqBsNB ztqU=0MDc4nhin8I&r0#PYc1fc-)jk0*2 zrikuoY_jz81jb}~g2V}vm}k+*M#<+}Mk@G6LG!HcWe^L@Kzdz`jh1zQo~R=&wV)iJ zsYot8&k(?SQ=Ae`4kW>@G^;_>aEDM5jSVy=cyDb0F}Yt2b3B)YmNJVXv!ijY(ZHV6 zAOd6LOJun>sL_bLvaPY60vzyCM^jsd5V!+NIO((FTL8O)1dR!1+hE7?x_$u1cAW;| zTvi!voa2{9L}{0$whY9($=5l)r9cu|39?g`PlKM;Q)G4w>U7X0#z2=)O*-vtnP{vt zT&I*2EP>opd24FeRxj-dgoU*f9)XX-F3@LqkSsOB)!y-8lAT&Q4-^>XYYc9!gP%GA zD$J5ZX>dzqk>IGD*;q|Omh&t@wAHEN%B4OZ_AMq`$|1x{N&cfv*Ne)oCq9`~v4pzA5umz=ZCOI#mwy&cGet6bx*{jMRG;yT6UcK*!y zdFO|m?{q58vz#|NuXWDK|BxS*?~`wn-^0}$_=(528t~TmP97{%bNQ>q);edUh(#Yl z+MhlRr4-*IY%$T>7RVirz8yQ^oyL(#(NFRWsN~|K9E}wH1l=78S2_gGnv8ywCx!;M zkMs>hKNx~CrcXl}Mc+rttUeW%r9ynK5&JXxUauT;^rc3lH1UGyO@tbf*kSqDYe{3r!;4{^ z9x@R`(bv#@mr0Hedg}9cMqf$l;-moz;(u{a5Ec)391B=b^g8~M4h4$7oEL7wf8rP6 zPzCr;^kt;4OA5$OeAEj0p-4>v@)Lb&RPJ&h6bZq6w06(Yjho5U`r#zX?b!h6VEwWJ zbfVXgRurHdaBbYY1a#}bU{CZ)j~rw0+PT;-FJ>YViuYt$VaJ6faeL*HSrXw#Tp@WG zGeV6(v)0M15k+Jw5TQaG3*;_eONbC^eb*FOb63Jz(}e?Unh@7*%c+A?JBFfnpCorW zQV4g)&&-g1qhIG;z9-(@2h7?>y#DDDFWpYPWX9}}K1Pf< zL1+%?qtx0Hh0>5d%qNO=Oq1S81)8A{q<8R%!=n~h!Wl?!<7DIx!jT2iTLn`n!X+Hw zq_^;SYK9z;-Yn?JQ<*{yNH>v<_`eZQKzb8tlrj2G6e~b_BX0st4Fmy@ZWMJ%17Qfm zxby~57bJzJzVtd#7i4fJf9dr!!~3}Ry!09xCiJ(i2bEsS7Zu7Gs!KQU57GG?Xcp;u zL2SCiORu7N)Z+Foy^_CzC5tS7(#!d`ks9T0?b3Cey_B|kr%Tt;0z*#g+A5-Xxl6C$ zO(W-UayOS=CQ3t+dzDKsH6BAw8JAwdM>zRH+g!w@IWiSlyrf4>M{MbusNBOW&mb;c zeTv*`&C*WJSHnH zD`+_p?$5KdUXTYfcXXZ!%k4ZR{F$c%@)9Juw{cw%YCgDgqD4^6D ztx(5#6<1G^yBPKlbiC#~e1sM!tAlWbb3^y`6$O)a?F8=z-RqTBGVUci&fBJEkDV%SV0|Zaf>n8KOkdaB zY4RqJ!V~ob4y{@7WpY#9x;rC2p?RaGMLq0ZsF%~~43G6=d;qohnjYfs>n^1);@xi# zIw8H9Zxh^w^fID1Tt_dZs^~7Fmrx@tagPrWOL=hP{lqsdj`2QT77tjwccHw7QBXo# zXyQHm8_4G&h)4N1kfO-KyF27<{FN7oD7;I59SoYR?bKf9QG!R>2yxKlq5Cx-6a!M`e?1h}|BD(BmUNOXp-NgA1baAzOK7TGExSq!w zV@6-4pseTeiddtruICKUBdeani$XB4bZH&`t`_)pgO=cNQ`hoQVa7*Y!#^s5n)cFg zO+sne8q9PRuPcQoT_In_{?)LgD|u}&E8xpWQ#fe^5?x_QB3|fnUIP&Z^lbh&RI&*F z*~1yz66dp9Jm!%+yLhb-rt>V`JS`k&Coh{!R@|ZSO#Xt1n0bcy8*5-b+rCM@n*Dzc zh#BW>2YQp6%1-CqVvXUrnAe===vw7P{NrRztfm((l{d5J4ZL^?Z;lo~@nqgx+-?Cu zEnkT7hdjM{?E?N8p3q|uM)|N2S^B3>(#gzFg%RF2Jo4>o94&Z?M`vu~Ez`pkhQ$Ml zKNvbg9)e{0LU9L!{5vh-1p~Y{i~xfE)8!-=Z%-U15D2gRc>FJf`SV0xBgHIdBKnhd z!4pP>Kgd}xVtQ}mePs>id;y=U6l575jadGU56s%Z`@jrcx~xkcW&c8q&?S5}A(m)s zFwj#`*R{5~|I;J^Ltf~NNc+?CMLRvsA3jUElRd%M6OOuLuOkVM_(cv!{f7Smf|NC*SulvXSyZv1_)dzqvxZC$x-!1SEc$IJ3 zm-p@QZSkG&OZYl`r}~24e|Ue2bNrXRx8fXsJ#HT;c`x(sz!`qI_ax7sJ-_ih?D>}G zGoH73UJXRT1kUeWo|s2=|J40W_b1)&a^K)?A|~Ns_r>rfxX`@}u?7C_`h)97uKQef zx^Ceo_m{Y)T>D(Zt_{x5yUuVeba|YAbAH$P4M!942Hxp>rSocM-8tsm?_BRZ+j+Y4 zWTzjo2Yx3%h}#T4BHsjGidpDXM&*8ao!lXxBD)>WIsV}Ix#LTY4>(@qXtH0!8{wPm z3+w~z4e0Sd{L^0l?-KAM&GA-c=})9H(3b-}9G&zYl8GsImS7x&yv?rYsfk2U4)%M-Z=cQxf%dg34V<4qhLbOJ4lw-W6nu8)42hL(b>`rd$B|k z`v7*60t}3RF=zqQ*$Bxx$WOzIeOzD9YHSCAy#`7-;a5(g9E^)x8Hm7Y90S6J>EJO? zN+U#Rm2IbkobxtE7sVB{N)0qrh1ueCbkb0%7E<%i{1z<%>}iGUBD|)C`=&;T#0R> z)73y>lCS{rk*~guolk5XDC8mD@d!+4HrvQ+DRlH}*Y&dV=wJd(ewLy(@6qEJz{`+v z)b;?O+7S{wS*lILtjo^jj4q^)mP>3sov;QV8TSwvsnC2i;Y9yv7pz zOF0A@V%Xl%`J}(Cv9o!TQ6==D27ZV>%Ric~!8BcAXYxYH`ImH_(P#L(iHo7BU{~;t z-7`8AV+2SZ*qMxX4_GgLAtNpzj65W7l3)DN!}jue3@3MQV=2zszWgzv-)3j=M}4EI z5!TKBgc&~1APy~dq`3cM^vhJ+{@GF*DD?O!tiS03JA?R!Jl0MVpr5pM&`*PVb<6`g z83AexMKz04L;9!6WJoBeY#CL)znZVGrThU!!r6*rE<24r-Cv%`7a8^o1%Doe0RUhE zV_uv}{Gn_(Jx8As36l7#jC+dp3$N#-=JgzXlA4K30G%laEL$kvKsud|{*Jc@Q3TLH zP0tv8g79;A0eM?stuCV--2`ilm&f9yoMNXCOZpLY0@Xd4dKm%pr-&V_O?)d0j3n%3 zRTdMJS@=Rnf1~}aX38we-{Js{pB26Ix#+{9WR50d4)IV&kugR~$BD3F z45E_pWEf+O*4ypmlO6pL?+Fv~CHU@qD(X7m5XIe;&&tJDtD1V{~99UuP%rk+@?tF_PS!iay3C^NzmgulV#F z9?i1_ym>mjkh%E-faaus@HaJlnDiVUimSI{%40tPSQB$T`XG_$s4M9iBH<}>AWF|t z+bobP>5tTIj;@maoj)^?RMOK_a}6#fJw@&hDN?{@(KIr85^v0@O4}4&PSO+oK8>jp zLZ77H)0j$;w|iX7_*(RPT<=?+k$y)b{a->gNx$Vy69A}T4T~f_!D}lpNYdjp#K0RJ zP-K9Oq~Fji;ZLCJvFQB2!3&+lIOIZu zfJpZlZwLU1^lgJg!+l8i@&QhTn%EBMTcRbc;0@`UqBO!*aCnAvk9fBQgCTu`k9rYr z!OHEg@=>p$FQmIUcdaN3>FfNXHXw!cHU3c(C?S27^OI-jm2th+R>*+# zSHjqJsnGSILTGKsAAAgU`+9Is za78d2_)6d{u*aVRTl_Emcf$UD#J|nI-0$-J()U&0YZ2Km<%@WK2QPq|y$$an?^;;U zf9d(S=ZI$zar}Pk{u(UhN8Cg1CAbCf`>qeVUh2BUwH9{({J?p;^LpoD=So@(~&Y?2MKHtBIx+WhbICZLyEeEuFp*_bfHSd*w*S9+Sj zS!xK{!~nEb@U@962yR)ePwDUpRb(rxn!r@*;A%Coi-I9z4~LL!V$MNp&24W2NU4Vj zMe*={G~kUO<;nIYV3RsbaR9eab1YZSyNyamr8=V9As~)a(Ua76^C!jAE67EN_Cuh(-^fjv**1 z@h0Gu8YfZXG$2d{>Jo8=W)a`!k|vOp`qdx*(pk^^@ISv0bXgNf%*CP$mg5kKc;LqC zB9&+Yb*W=X2MAe%Lu-yCPLoi}#L20kv^RmdG&&yiSU{|llw8&X+ETw@0%cn6JHfI= zsrDxDmBtVwKP_u1BDt~&T%{Hnw)c2FiuqAZ!=kKc0${0MbaI@6#X`?DFYq-1&T>!; zCzSsJiWl=$jR^Gz?otQ(YZ@7oiYsU^;DEN&T{l!7N*EK*pfWO+G=a0!Z`31&j!A;Q z*EfNvOlh@Li<g7fyuy18nc>4D4h>1G+3A@V|f!eNgevil|-5Xtc6t1>udrd z*-|QL!&*oIpTTu+O#mc~whA?wL8T>EGy#&-Tw#_E9Irhy=}EDMUIR)}_d^)abZRDz z(6K4qP2eLnr<64oU7JbB)o+RNl%g~Nj_eWZ*~V5aoJpMr2wT_9p?3h1)O!ayIjB_X z(9xOHM$X125R$sf#fh|cmUl?A*Xpy|+XP-xyJ&QiO~Yrt2*n@n_Cv4;Qv6|Q0eVvR zbAr+j3P6*Tr&ct9h}0Ie^(|^a3nA$WRlc5eHvx6jZsSa-8bk^jHmC^=`ok0nlD-X; zqw#I7Ni;}A>a->>jk+t*0O{FG0{q?H1dLH%_0&PI)x641PoCNAH6~g@-GXqVxM!+w zpHa>j&p}bEEF({7G7TCn+1o@!OPkBVK~Zkc0O?gtLP{)a zo^9D$xU$tGAk`~s)?-c}crS5itwEF3(o37&mhX^q)F8v@&H z#v7z1CdXo~C59xVYl$(emYEBxp)EGw!m?rjWT z>S@x4DNCA5&3% zgFjCmr_?rSg|^!e6S1!^Huv>hxtl24Q=15r&D7=@R|8 z6OA&pof9K~C%c*p&4Fnu$xSkRfUC_>|5FT(qcnWu#M*Jot7GKrwXAuvIT3}@+$7_>J(v2S2{j0c@T%-?Mht0ajYV~D5OHlj-#9v&VRMBLq(i$}>GDT#R0|m! z^}2qxz5dQHGHtpaAz{CXR#m}z+FH3Nn}%qCqX)GP+J7Z81d z`&THtnttPw^vnKfMG@J~lVc zvz{M&zTx?#=Vs4!K*=XP6uJLD-M@C<@4n6bc6Y-)<{kzzo^Jko!gW7l^}Wk=gX>Dy zm}>{y6!~4`LEJ*{k;t`?YUJ|BHlPkp#Z3fH0&(z_@T2X+R| z3!D`=$^TdQ4}8b}dH;v}Z}4B`KkOg%pYJ~n`irN05Bl!(eGoc~D||WM2+#!`&}2O0 zxWbV^+`w}ke)eT-UCt{#Gj0SI806iduW}i2*8xKMd?6*55!PyjnC6|xvgz*NaG$%ZI_WE&-x)XGpVX@ zcUIaiC1rl=BUMwPZpY>`J$p0)tuibSRD;^RNZV+FPySS^pYlnHetR!tPDZ}SLC zih+m#tqCa7DKH&CoxQEEOirf}N@|ZSYzl>eS1Eii?2^!3D{aHJ&N-UT@g-Sl+h!~B zs0!mSxPFmsfFVDdSK5YNFq9r*b7f(N$s@iqB5dQ>(w4LZIm5yVzQLM_2LiUJHl^bjxLcm+TO`(>S^VqmJjpS5MX_Qy)be$7llPy_$-i$xIZ~ez(=!Q z1X)x10A;W*I3UIU@1_*=`#l2*>k$Gg4-(9Og+a+MUvhE=j^`KG`vzR-=sfEZY~ ztluo`CQPDHb-GU2#TQ=fgRpW@FmCxRWKYq_M$?F4t;omtLZJ~36(J$>coYizvz2%d z?jCuCiO@?L@c;+aDC`O=JI$NGs@81t0%L@@TW?gg<+ftkI;iQ6V#i{6gHNw+>wU!5 zy7i)Ny^6ygiLiAdmS=Ey!n%0AEh1AZl^J)uZKJj4ZTE_mtv#$=C5BDS9M#r~!;01! zkl{Cn6|A#NzaDJTI-HORp%MZ?T{+xJEbjlxw-Q5#1NR$TS(&vC4}=#N7lUzM2Az@S zOLnVd2@l(MlwRV)JR<@VRIWE52n zwPYLjUA@T4TJ4If%dFK&(;ZiIrPZY^x%wSemskf8-CT8XOM&DDUR7{+jM-PvfZ+;M z3KKW39I*CP`Y6S1*iY6|op%U`1-*P;k$-OOlr*A~LzkrNYxxxShfwym)KYVdQ1)0G zkDzzLEkYT!6O(_0vfEC~ogft4Ib&G~u+7wp^@=h=z;L6w+${p(&B;{^vqafpoqTX3 z%2T*C9;X3!)eM^N!Sx32(s%~tB{p;@Z*$7_mL`J>unZ~NT2@(dqd>E1Gpi}s$}PK@ z?qQ(}wJcR|t5CL*OMp>j?q2~B3t?4&yTt7jwF>@*t&T^y2qf;%thr3yD|3_cT*T@?~Nw&PKxn(F9wlGKdXDFLn zxLqyQD4GcYY?KRHm;hNs+0^nSnE7CoYbjIsbSStR$I|N+3|n%(-$3APgN3HcM+Gh) z%DFAofEGgox6LT)Te^t!8{G5#oEA=SXAotb?Yp>z0NuZ~Wg@{NBth|ZF_G4^6j8?I zCN*VsOM8U7h|+6aJs>e?ej|pYzN&>&h2NTIiqgpiQeBKV;eF2%{G~0`hXzZyu_$M@ zd}9(SnU0pK2oD$KjMhx<>Y}uh{eQpoGYJv?-xfIvU;nP~e}#V-{%rWha6Y^_yfE}s z=pJDDuMF)EDIrhrTfw&l3&Askj=%$fj|E;8I2`B;ECOEtPXD|7s(*#w@B4x8W?#j( z%==gGZQg0`h&SeW!gHtRdQZ-Cf#+2BbI=3af*brUb}McMbp2;suXPn&Dc32^KRCYt z|M#8Fg#2^d)i)~l%09;vj;}h7ICeSCL=3?1u@A9p+2!n9b_%Ndul~1o4z`a$p=#_J z#6=oFRG4>fC-b;HSA@zlEEnUr_()kc2ZM;NG9E0bqX!Mm^EjfqBJNV>9Bd%ExoeQT z%yMNjAjr6p(K!bLh;FDK%;%uZx0oiz^EoK;6PcsfLyu!QKg z(ZMb7X~i;csPo3haIWJSJ#(;z=)u<#!= ziuBoYFp6kVnG&FirvYP`#!;9YIgMf=s$g$Xmd?Q%vQVg6dtk4@X-pf|I6NXTG6yTj zHqj#o@tn9B?>_*qdY0O` zMDKttQ^yb%t>t#@F=T0~Yvs@!Y$gXqttPjSgE;N6<_+6%V7F;22N;@s4@k$Ye{D>s zHm}`Iz`#n>31O7w)I)Qyx9Hdm{)IdMAO<=E4wg8h(Ggv1DK8czhT0u>n&>3)Iap*g z9xA-7mbY-Pi?VJG1{qzqzon{9AyzB7Q44nrliYZqg-_|7gONrH%Cj8r)h761DbD}Y zY}>|I8!Pgmrp%dUn~j!tQoDyOAJR%nWO0#h=N#-ddMEQYta{Wj`YczYo;?Q}j%MNP z<#h1QgYUW}<_#yNT%~Ich8kUw%ZXw8nl#29Clr!h`y5O)dPqKa2GmPt5J8+`c(P*- zCK!E65)GGO^v==K%jRH;(MKU(g(XIUW zUNMPSVE`}~*XI%`mN${tJqHtvW+_%c%|}aodp)<(@_C*Kw!r4nE{66RkY-g8MBHqO zp3^=D%ZqN~g(E60EM{>c*)a#3i*6a&Qkb2FX5N6?jOppFIapYXZz50+^`^N-QRp5m zXW1MqEjj}c`p0r#FsLZiJ_jp{4paaI?`ysCTC!sf_7$^M>I@$}29cKTnuCp{T`W** zwiMJEvSOQ&635J(m29m+{BE4Y__Lou7ah*ijy#*1M0ce}W1mL5AgfU~|?9mIr zedL&6u*Ya$S-yqi!&>7Xfe0I}vSAKJ7_FfLf{wg4vRDIE;Qx(E339T-AjldXQGEyI zV2s%;*tv`9r}7xODx6bP9xjH~J#+fyYgiyDsxAl+2#*}m1VQ^83@}Cq=MnjGuVL>* zUJB9+) zfmyZHYDqCE)LH1O`H8)E&TU$&h@PySv_ge>D}GR!uFntodRK1$T+mzlUhBaFB#O_ZTRI z&2ag^G3d07rm7;PG-nNAXd@^vQiP$^tUD-iY{{YD$)gKp&RD)WtKv{&d{v|dn$sqt zVPFR0BZAhU+r%C(tzlFld~-0bv1F+;bX_p5 zTg{reTMo?N-V_9AGE*A3>*ObTp5cyZFFNx<~?-NtP~> z#y~l{S)tXEm$nX6+|8a_42%NF>rNOEmNd&IHbdQ?JZKa*J(~t5v1BwrQ4{-cyo0=G zkmwirb~cL!S?>TsHiD<7><7_W%jj$ttaPxd%vyn0SW87lS98+b+6Is<9E8p98aX}9 z!=_XW$`TGH(54#eH0(;yz=^E(X5L(xp|;NB){gwPKk3j4k;EoBj>E9YV2zX>@uX2Z4n2_ph9}uj%Lo% z@f3EIPK)}7^>jxwYgSJkg|WDlH)Jy$&P2MmnK9RX)o>4L32v_%VR?DrK+LhUWscQw zyR|)(GelNQNIizL-?2lw!_g->`sAJRCY-v@lx4@Wj{6*6blmEAkK=mBiyg(tA0v-P zei->i;Ew#0(`{(M*mCvGyc5)MYwaI(|@Yp>3hca zYu}H2-|~F{asJ-ryUKUKH{k0=Jb)150X*jYf%h}sx5Eda3WUOlcQaxEEc1pv|L{Bo ze}wOQ?)H4x^A67q9@Vqev&s{7|IPjH?nm7Bxxe7P*?m1?`PJR3`+)l*_xbK_+%5PQ zAQm2ReaH0`*KMx%x!&Zu)-~mta9rvbbzSJ{cAf0<$(K2Q?fg1?7(V2DGok@hof+po z=Y`G{&P7g1ep>#Se2@Gw`R(#+2++ZMbhtj-zi z5c#iYE*0!Do)FFw>{9aPGMpdS#bg^cycC|-$AKLbuW8N;>;M_NU%+XB?HAwVo(OE8 zXfAmLusys5R{TFh03=jaK>XQmau6Nbk=znHpISZyB`17($_|;V=MbfV=7I(s8v%v> z+@_{04u_zli`Q#u2O|cO$gQp3u?w^p;a;%^PPV|ijb^!~-yxBColECJm=vNI`-ffv zoQO$yWq&9CAEJYXAfKLOeVeU+d@Zztpcd!a1oP+_+{`h9<{(UKe9g)hlX$q1DL^{Wq>6~Pk_wVgdl7h;DJ(&{fvJK5+H;F1dsTsNJ80ylc35TAus2xc}RCLXv;hC z=Qw~B@$KdDe3tQh#JA=lY~px`5RwzqY51P<8pyCvr#WDy?|;)=^y8aFmdEJ?!6T z9N;K$6sARsuV$=+FiFTKkrO_(9Q~ySKV;u0#1S46EXbmL)-}&`z7}Vx9`-%XMJUo{ zx0GktcR3dc?}xea0Ktj2V%pD|hd7~CW8dL)yW^`@ty;x?$Jv5tuP}VpM@HFi`6w9J zv14$6JxtFaKTOvrE(X|$amQgUPhjGYuzP8c@FZ_~jonA@A_Ea7*tdwAgH^cP7IJy* z!`~F;sR)NmVd)vaj(ia3*HW^IuV|q~zCqRFx$(8^Zt8s`()^sozD`Ie_~&C`gxg~G zc=@P)nD1dlOdc2 z5q-8sfj!8+jFWWiRGQD4abX|7NsN7#qR(nsyMdSy@jrk`5aB1lZ3>L8}D& z4DSJMfnc|D*6F4N_Gw;2?QZDUBK}Y}FtAVY_qgSN-Np$uZ9cn|cd}_0V4t8h+^*JV zAE${jNGf~wF@nM2&lghFvybxO(V~54AK_(S)aTiUdG*qoTAh7}SC8w<*#~)l?dbQ*v;4#CTmBo1f!f)a@jU2R*iE>V^zC_x(tdprMxt}144rH;ZwLq*KqLL(nJT;t0XDU8RcMHWuNWh_AKCQ&@s;<7jK z;x%n8dn2#1p_64d(x4E!SoQ|~nozT{*NfMLI+gLrv!)uA@rckTP?oYAMESZ3l<}~% zLRqJ6yRV{!;RNbO_DcSFT6FeS*el35%U7W1RsOMSIVYe~G4y%t<-D7pNBzcLO0up^ zp~qfEyRg0+9H&`hb0p#ZbNv}>a(?Aucfpx9#sl1JF?3_>8s3d+j^dT_&@M3_EyAP| zn}7g7Ym=CV)S)pCAH#Ni8M~VE*3gTwtN2jSm0j#g-jL_h&9duw6)Z|VB5{8F}O1us`e!{T;Ce|A^ZL z9t%AhdN}lh(EUItd-3&^**brO*U?1TGEjgP*`f zq5jYXp>sp4Ln}gOh2o)8aYI2kFKqW8{xEwJKcS5IeVc=ZEI_wU_1B(I+0^We+f6o6jd=eh@ zKjeSFe~!7-waS06S(={fN!U7tM5YJ zxxQ7os6E`DV=snlF%G>RYdlz{ZkP^=GoabrJbZ%V|Kc?B0}T2?p4sr#NCVB3*26}2Y^x83a!q$&Q;EC zXB@hn1x_#YJI_JG^EmEqcu0N#dY-%FJLFsCTjZM&`SAwCvA9N_kt^a@$Zk5p@o+xt z|1t+WvctiyW>@pytJqci_ezG#7m$AiyMq6|n7x?)9%IM&?<||;zcXxx|4y@M{(F=i z<-b#GivQMGo&VNYjsI3zmH!@LNBD1rRrqh2mHBUpmH2Ow75Q(075MKYo8-TT*h zXL`L(L;Uwe>_ziR8xRh(cF8jGqzE297o2=I2Npf9eY>Q(ZP;o6gzgT^FC{x9#H4|`{Vcd zzVBz4@jN;E?z7K6yYIE1wU%BXSgw}~&e7)x9;hEEI9s1BI7^=;I8&b~I76QyI9;DE zI8C1>I8~o2I7Oc#2!q3dll94h2j~X~?yv7JI7y!*xSzhC;6#0*;J*64f)n%!g5&k^ zg5&gYf@Afuf@AbCf}{1(f}`|Nf+O{jf~9(?;0S$$APAs>!}Z~UC3=bAFnyTdP<^Q2 z5PgVXv0f}VSRX7{q!$Se(gz6^>V<;)==%r`)CUR<&<6w63K z)%yzWrSB!!NADxJr@p7)9{L`Fz4hLLyX(6P?xyc1xU0Ua;4bI+Cm7Kqf?+)@7}7(6K|LrK&;x>g-7o0VeS%7d z?-RTq{}a@8T~O0CM(y9)zXgBReir;m`$_Oe?MJ~Mv>yb&*S;70PWw)9o3>5xTkTuH zZ?ta&zt+AM{7U;u@JsDW!7sEg1V7h47u>3C75q&5Oz>0fQ^8NPPXs^KJ{J5)`$+IZ z?L)y2v=0QgXj=r|*WMR=PkT@BU)sL}-__m~d`Ejn@NMmF!MC)x1UGA&1>e-(6nsN_ zL-2L&b-~xP*92eHUKM;rdqwbN?PbAD+9tt&YX1~`Nqb4~MeRkwe`x;@d_j9b@OkZd z!RNH+1UG6M1)tTP6?{f}M(}CvX~Czorv#tWo)mmSdqVJW?Qy}!w8sP=)gBdmM0-T= zVeMhThqQ+TAJiTcd_a3Z@P6%n!4295!TYrP1n<@E6}(5gNAPa#Zo#{>y9Dpl?i9R3 zyF+ljwqEe>+TR6l*KQZQO}kC-R_#{7TeMpQZ`N)Wyh*!BaGkbJ@J8)M!5g$21h3bw z7raipPVjHq-vqDKt`+>N_E*8R+FHSDv}**f)~*)3O1nz%O6^L)E3_*FFV`*?{EPM% z!8O_%!OOJE1plo4S@2TrQo%oIe-gYzyF~Dh+8+fk)-D!Yt*sWkNV`bzLhVAq3$zOa z|DgRr@O+#3) z^|<5sdhD@$?dsy|%9VUwv4XG5m-Dr=ldl~ed~I*%Yg-#%k2!{~t*v}*Y2j;gGhdsU z_}bXW*M@imp=Yi%uGYijtqbSYn}tNFTQ315#oimyi= z$=4%};OpUs^YyU9_y#;c zEi2>euEAuVcsZb<7yPjvmd| zQKR@eawK0%OZhrt1YeU$z78MG*OC&x4jabTp+osPWC&l2i}^ZuFkg#`_&R71UkeNQ zy3am*9XOD$0|xN5e}BFf6!5iQKfdn0H(&eqc!VYg0Jy7Ut=-8Mx%Vq%j0V#!q;$^ub~iM zgF(Iq0(|xR`ReoWRVls_DHRS!q8s&;nye+ZVfVi!u^D^+jfuyw^IxA>hkgH=#A@vN zPfM)Ap1(Cwj~)MF=m^Y$N5Jve?GH{AV6WdR5yDP?TYM|_`J3aLu*-iez5#pub@8>> z;jfOLgZ=%g_)6^V>;JnO{ok`2RrT;kuo$e5S!(kCOA?Rx63CCL;k}3r&s6w!u z$_3}p9Ki$WK*8BGTW}W562u{#;0&4}IGv^oPNQjpQ)#N;6q+JfMrDGNX|mt}bb#Rg zw7=jank2X%?I$>qCJOFL`wC8=34-Hkyx=$*CpebI3XY*Mf}?4);3yg;IFd#RmQtzU z2pS;>(;b4tX}Dksl?V=_VS+C%>SNe1eLUAbt|W22xNXjZy!%{%^sb^`8ZQ(ti^CQU6i!2mJ@Z@AdBmztg`H z+@^06{8s-~@EiRb!LRkN1;5h268uvCQt%7?3&GFz&jq*YTLnMUKNI{^|5Wf3{S(2D z^^XNV(mxXXQ2$Wy1N{TRE&3M0_x1M$-_zd{{FnYO!FTm{1>e!%5qw*JTktLYEy2zD zX2Cc0HwE9&-w=FVe_il3{WZZ?^;ZR7(O(gKS$|n@lfFstpZY%qU(#O^d{KW<@E`g= z1Ygi!5PV*LUhp~nIl+zkM!{$GX9b_ppAmdoe_HS<{VBmG^(O_N(4P=|Tz_2fG5s;Y zNA*VqAJHEXd{}>2@FD#n!3XsR1s~8K5WHW%UvPuILGV8PKEZqSdj;>&?-9IPzgzGw z{Vu^f^*aUc(C-jjudf&UyZ(2<+x6Q8Z_{rRyj8!Ik^V(eAM`Fseb753^+9iw)Car> zNqx{}lKP-GN$P{%AgK>}ouoeKHIn+ES4rvvzL%vw=w*`npiLz8LH{JF4|<8DKIlb~ z`k;T1)CavlQXlj@NqxX3l+*`pB&iR2mZU!D8It;-r%CDqtWv2DdXl6*=n0bgpvOt- zgB~NP4|VqC4sSjv{N`24+B=te}lhg-oAgK?!kEA}}TS@8zTvn+M zx|^gv=q{4_pgT$GgYF=y4_Z%BAM|&U`k>oM>Vs}0sSjAbGV0?Nx<%^cX1ZDMCb~&* z9jz0*k!}>cfo>4Io~{?Xj;<5@8~si2TDn&7uk=^JwX|078oEaCYPwqRD!NMWO1e_; z3c5n@a=KjbFZ36|HMBIgP)4wux&=?6Qv^?@lLc4NDjaA2_k6`#oc=FDt{?pt zUHXlFvo8GxcGRTb=<9UpH{d`^ztL~drQhh+>(X!Z>vZWi`rmZvH_!x^exv_YmwuzK z)urFScbfDY{c2tMjeeCb{YJl1mwuyPp-aEfFW05t=zr0r-{@;}={Ndiy7U|U&${#* z{Zd`}js7QH`i*{xF8xOTqb~ghR^X)H=&N<ZFZLnM)lZ2X3hR1W^rh&vI0vtej)~^wy`Fa^PP7k={1|yWa(?8{$i9(K z__gpYu$osJ9uWFI^nBC@ zwf=7ZQLy~Ki|<3llnRzsw9)U~h#1c1195OBwg7J1k z8)&K-v$h}w_sVU}E`>c%=-6>mZ3$?w<}WidP%11lbYbT6lcuLLII4#Oq7F+}%l%Fx zMoGI7!&25tPR~HyaFL@D zPClB1H_ogh{=hyNXdqfv-Dfv9*t%_S;n9^Gn1S}8r5J5->7ArNkp;Itnan`daE#q3 zw&&P8PZ+-;T__k@I=oDIXYRc#mT^Bi1ERQTwi^YrY�R^#uHHftcTJ@baM!?`R#C z8IOsHTCb{Pn$t5BK2amALpiV;>W2f3woq(5Cj@@$ElZi2mK@g&)x%Pww$NgFviPmY zoII#V!tXCht=Pp}iFWqGPo-FBx z-eQr_=qIGvflmQExUew;`!!7nEU7UAi@Kr3Xn(R}56#rga#R#xDgt_nmi^*1lh5rG znHHHG+zq`%O9|eiCt$@Bj(n}WL{f>^9}Xtw?|JV<#4B-mb(`@Y>!JG4w;q^HiV zuytzFHR&eU!|8^mp=A;vJ*Be^PA`wGb}6-{m%76Gc0=>fF0CnW>1=0;{X3%wpmJzw z_nRsZR*ZT>BLdA4>V{^CmUuKHoR~{tFRCM1&<%w{vp|Q=GAQqMoHi!FC->=wrlDC> zC9`ZIycLTLf_1dCqZXlLXc|Zu$C?kKdSS)lF-Zd0%J_ipdVA<8;i&K=XFFBknC+bn zP<;oP&QcBKNFH+s9Losk-wmZhyF3ugP0DP99E)#@yP=2A)^lg`n)Ru=WU?DNiB|9S zDug(XGKt$#6G5AVMxr%t^AoHt;*Kl#1G|@SEW`5kEV!s-H4-P5dEdYLC@wpRg^CdM z5+{;zTiAW1t2`{N6FZh2P}qHhJD#bI;>5E84(L8SqD?T~IcLYJD9%Ipa2P8v6jfju zN)%O+L%I*M`_mY=dW-WMtGr;CgKvX=-G@5r*t0}!!;Y3}W;2of zh1t|jZbge-t;kV~amBQBWl*n+T=nYFnejv~ftp4Ks;3R34E+U7 z9Q$EW)JY!JJ*u%<;K(JI++a)(+D4A0XAWY!2x#{QxmvWT(&Hom6`y68 zi2HX}xkdv+)yIh>nm;JXxe;xg(E=y#Py@*IM5@cMmct9Gq1~0-@T4Ti&2(v4Sn=yR zYGD}8V0&W_u0XpoN^lgEeoQ4HJFbKr%QlLOyUWqAjF@8(v$NIE7ji}IY?aDv$7npq z?q*4e%c@4+m@s42ulqo!*&`Y>j$Cp0uDE+PTBz|-B;?jZa$*{x_>@_87s01k`bkba zJ7iS%OnaevKu^sjPkgkQmK7z22bBKZGn{eA)NJxZE*Xt+dAj}KrMAM8E0&=Fm7LT) z%^qK7?pPDbdC1U{f|r!#mPr%5XDZ%fluKzHG^uK*md%^)ex#PWNpfWO6nh1WO50wm z%Jm4eOqR(Cv#7hw?u$h;duZg?5AZZUse3Z_KN*^*yS1-es#(qhthvP|3E6zIsBPgh z4zLHwq3-9s4<$>x_s4$DYJ1!3bh)C7x)-c_qbp8wx}v4-<&FsBhADK#{hY2i(@+C* zBGs~17*d*ul&nVBVrYjsk!nO+4D0fJopt&4s!4YIrWU*RnP#v`x+ge`T8~PZ{g4>3 zE5Rm(4NvUfJwBpU7^ObuxAe`tNwl$AT9UP2Po>-Z@L_*}ufaDqvBkHSFQ`6KFRSa- z#p)EC60==^!KxQ{0e2-XOJoxriTPmr4@xBBU&mhnoBwa|bK_m{h4KAy)*8kM>&vl+ zV%Nvc2lxJ{*c9;Wca6RStNb@cFNmHTt)<(d2S!Im`$E(I_XV#Go)zp29v++?92VRq@C|t7PX=xa{3Y zmGD3?AQ1C^3%>Y${y+IM{zm@-|9<`=NFsjrz3Y35E~V3`9lHH9DM@?6`ry0zv(VB1 z3#1Us^~3eD#Fo$l$#zW({}aWh|J{&B{P+6*R|&9!kJXs#d99N486E~)%ji@syEsq! zmblIQicAJV8Fd^h4O&TH95Q!C^qM=|vCgR9>bAOV8f4UOVND?`5wRPo|K0PvZIe;I znN?Zss4feP`c14~<@&`ixu~yme-juU%S`Hjb?4b_v`}9=S~b`i5#2vlq5O3_g!(n?q}qD7JoF1z2KB4m zMIte=dP?e7v6{b=fUQ+&4xRdy?y^4`YjC(>t8Puw5chW+-PzO%VGFD7CaKt6NJqDH z;D@Fp4R%-S(bz8}smNVEN2fcMVn0^^5>a|ub#JC(5w+%_^<8Gko_GO(g zdcUV9?3m@SEJS;Fasc5IyDdvnZ%@5q743dc?wbvxd$fxu z_i&}lb(HV6wu{~m!#A+@RL7c`_BXN2sJ2CRYs;}Br=8vPVXTf}{4Ys6xee+|coXb+ z;uCv$K9QHxP1Q&zv0Z%B4)!+`-!482sww7fr;@%Yj;3hOlQ%G#+Tg}OqBX1`qrl9yQu_IbyVwU_;z+1Rp5ij}OolE4p1UEy@2#9Tq zA;=}=4K}5HY^i_eNzWsq)VFzBy9gWgZ#=Etlo{$@^$cYpq5h?(Js2`U{R>Y% ztf@}_-19CG{OMc8WsUVcZYiJsnJ2nP_ViEPnXqJe`X}B{LvE*k?9N)fA*s_pa=(v- zU$)3i|G<+9eqtG$zQx@)`2ja7#p`zb=l7 z%z6{6_uEUM^w->0(d)&^3UB69oZvkA_@uw$$pWme7_uh)Wlu6LVUxbe zQ(F9sBSg~w>26l_B1O_)@+Rd-hV&O%b=$6<6#wE90qOtnWY`e$=r5G2l=F-s<@r^? zjsCo+s#tQvk{bOvPgR+s8hxYZ>)1StBY)AKb+>Piutk5yJ>FnEhqOgxEBe#!ug4Fz zBt?J9oiF1TNKW)8-CvDh7MAEwc;lLK5&d!RTWv9j{+PRD>sVsqmUrlny1!k1hJ-_Z z#N8GUYDzTphutd|7D=#i7mv_Ff5`hNq!uEi&>!@EJxeJJNre7@d*;Aet39dT@A+~u z2b0wA6Axf!U#OEnucSfW;CY8BLSQ+8ey^w0@QVlr^m{yC-(rXY^t-*0&Aq>VmwTqE z+kT6$-^t4PRwJ%!O6u2n@+13uV{@*rch41do}Isbhv(xBtMvNqo?^93#U%CH+=aJ{ z?W*dh$Zn0#{}0uk)1dEvb>etv^B2T_j=zFE{h9HGcv(Ce`zZEc>@TqYe{d`r{W5w_ z^rYzF@T(Wi`!Mgpyvy?1^Y)K?7kMsnW8`;{rO-;>8+zz3VV{0-_@HoMScM*f2Kma+ zg3uUv27Cp(@|Dp49u7-;pTf%C+Q9Mf1z6<&0T%UE``i5G{ylx)_|EsW_~!WfsIS%2 z>O$29cKSZ_Bba7?rlYBh^7WU%-dLp%(!N2acl2*S2FEa_6tlPj?WdqZR%vE~Xlmk_ zp(KMd80+k?qPnJj)Razkfa=fznmBN&6}Tva^O%DS32aSy7h8G4|JfQ*84QF~|B`Lp zMvhs6|05>Ms3WE!KZJdD%L;G53{Gz>JJjW!D`C*hY8s85D{+hr0t3!%h8jZLN#&g| zA$73r^vsD0VYK7%D7kM2r!=P9gHd>b2|X=w&$HJ351tn~6qyLuxP@#gzbifz5Tc z;U4!h9o4eMEX?2#$2tn+#~R^5&z7S%+5rPIIJ&WOjdeIzp6;l3dBKMDaJlWD!6}UshYw0@yQgf- zk8hV`a4d6}@$n68pax&feH6>#VQwc8RkIg6{m5!SMs)7IERk?|i)c+_pJ?^E{u!Lj zxU;bo6TB-4zD;IuGGm=|mZxErvbJDRYALMa*yNo?2`xbd@(cqe3C?M(Gt6>`XQ#1y z06ST>xr+ON863(uh2Pwes&zFdY>6;jaeLy!7^?-alm@IG$2SKI)mkBAGdPTyXv7>> zo?e#fXz8q}FQ|gd9|j^T!LWfbdovU0{s?R~!iqCEnXwL;%hPZ$!G@$9tH*E((Jbu> zK?5^5p0Pg-J9ro+wo}F2W84qR;E2XN#vf<#mI|s`TI};)i?Y}hxLtEl{|wG)>>8_XMZdO5I*sOSzYI=gEFUW6)y-Y5{%73v&)`hP?#b273)-+z zw<-uuiuiVq49;WnjZvX+&Z61Y?+q)`s1=;GShX^zvl#}DMGS8y(cpyi&ET}f;#eSN ztxeTHB-{+m;9$iZd&bP^TnbxEgVX%|_K4iTfpC9>Fw6lsTp4QA@uWFz@R$V)UOa~L zDWCOnBiJ68hi04um2nNcI7=@#1QzxKuCO5)9IIHSsOMBwqN%qvz;Be@ej8ewS6D%N zWpJL7Z)9gEBHLMLWmk--D>+CpPU+`h?wgVtZAZcFQrs41aFAjRRdd*J+=4WOd{)*t zpo0VYXK<8a)%%>r>gDM!rzFkW{W3T-sW57+bPmK>hGFu-PVX-$vph4}#anHKB>QD> zhGKE}=0GOuw5$f>t~fK!=_zxNwU%Ye%HeCp&aM&E$U!BUvCh(BPI_qz+k=Em4uT)+ z;}DpF#K#Eiml@+M0Onx72x^KI(YPCt8EqfjmCi|Z&F_TS%&vmL;=rM-s>=SPhE!L( z{Dw~&Q1K}&Tn0D_^w}OgsbpC z6v4^2m3`xGa0Z7=W-lK#r~2rYrnFe_bE0vOH1mF$;qKQRUA;8zip6*RG9~U9buI1; z$z5S)m~$L@Ajk?+xh+}A9E||OP2rTxQ1)_RoQ)rdwG(DB44Pd{7$C7wg11<#8(_Jx zxo!c*s&>1O`M45+9UKUn;}HAowiJmNBIkn2>gE<}_;cQD38afN#dd*=IS}&b8Kyki zEH}4^x_@RcH-E`x$$_SX+KOl1?vp99hmxWLF(2FBee8E55JfkLi%yandSJR^3Is-0 zN`^$cFjMIC$pcxSJ-=c>l^w~Ja2ICw=@E|yE>AqE&VtN9=5xv$OAoAWTINKtgx?c0 z1MIdv^FYYB3udv5n;EMx^Xi(6b}0WImFaI!eRzTW^q>Mbu@Mh(;KW(qh@TC?`h-k@ z{m!YgcVK+jdSnLz^?{jwPPa)fGUe&!D{T3CVPVOJ+c1 zZ%1PJ{m9H7pb;DORs75LlUzp?!@yErq)Z1a@BdZBsSFOzQ<$u|Uj61x{4Y2|{6JC+;6cyT>yyYRU%)PHZ!vDAUVXO-MY? z{G67P1lha}%p{yV7dj`Z2fwcSbLZ z=3q^4esn-IkoQ5}3$XZq1-uC!lXnO#;|nAr{E94cY{v{Z-NE7Rl#M! zxxvxF-C$?#J$N9zIdE~{#6S(C4`Tv*2lD*i`Cs*4?LW_doWIsT$6xCEH#F;>fp*wh z_&-=lhk*q!nu7Z0`UYrXw7{m7B@WUw_17KAAW)_iI|~2*6y`oo;mHS{PCM#Lz^-a* zNUG<3??ETxzyG`?;FhN~)|cAOE_}^yVXd(pY;T6o;d(HOeLHXeBPM#|kXEg7S3^UO zr8Z4=%XvL(ZGx{@=j7kLh^GDB0zCY}OJ|y7wJlySHkgZL+RrW4Yp@r`G|>~=Tn^K| zo`j7hEWufw=D{V1yoN_u0|{W(iSku z=mu|a6CAnofw$Pvw&seabS=D*(zR~B1;R?|J8QAy=0ko+8Nfc315aou;ZF1&m>cjn zcZ+RG*I;7-;ao}ev5=xANsMmWuYnVAy3Q>kZekaFFn$X^n%MgrHSoRXIt0uCa^jF>~5`1 z@Jh&AE1s88IhVQ{CZuif$&AT2Nq_QGS_%S%PEgdDA8?5`%OJ{dC;X$Q=u&ke1rcur zoa>9dxkjGB9caQ{IZ3NMb(*TJW>3CJy2$(fYV6Vs9w}Yu$+7K|i!Shder+uuMe<&s z{^0ri8hB>J7lK9EhR;0T^Y*3fhSNzp&l}7Ka5(m(YdoJ`-G#cv$DQj9U0DxRvm~A4 zNuqkCnftRnxsD9qL!pHm-P~I#-QJl+{06j)om;X`9n`_%x>493jX*8tTO@CQ*il-r9J0-5ol610rJmYZ>P6=t1 zrv)Og9czM`dLExn>KVVSGf5}*Ox!qLq7%HiOgFZ+aPvRj)BKTqE1YLxx!816Eqdmu z-fu`ZHt@Tl$3(|@%T0bs(y={5kLGuD^-KlD&tjV-t@KXA;%x=GXc#q6)GK=Cm>qT{ zX?f3g)G-~Yvu82cZZoOF+gMoaDK2HHefu9EyG9pi>-mb>7IepBdPWC7Lzr2uJ&PO- zjEAr0o_VUNug1&|Spqfn{5B?%B&o4yN~|=75N!=hSulfqe(=d_Ay+@juGw7thb+khBdM3rkT$f zMyS&JY4$+~Rk+6#_vr=A@uXxOSI~i;=H(Xb(NOPChN88cTF`9IYg}g(G|N**)t${X z^{v&lNo}pCNX*j;n(1ke#sLM*@J2O`BxpK24X{>gd=Np?+=bvecc7{6mT8F8kPzGzy*~PU?#gTBZ(FGoVc|K;@$( zKZD(yOQ{(ZFm#7|lkEd}m@Y@}fUC7Bd>D*|4BuDv5Jl(LvV)8AGz*&YkKpwPpFuZU z=h^v7d6>h`oh#vJ4r|{=ld{0atL^zm$^+03gH43$+NId^Rng4Y3U zEihC7$z^^UcMlLNuqvo6sOnsbEvc20l^Q$15&0hNj&IT3l=BSKyKGAxvwoXrU5+MxPEaVE1A&PO=mUssL*RoVWQ7d_nys+7$JTrTUM&jgUw!!H|9Z{HvBJb{uD`HBpx)zY{qj$ra_~)x~wxZ8fj#NB^Xp{{LO+38o^_>RO4by zr|=D%Kb;%Ejt=l>w5IVg3-#5J?&yTeceD_k;;?3^qcdY+dsFgSWv+n2fBk_P%(R2*?hW| zKc&Lhy3gdTJFPK7j2%11l?J+u`$vVbQMU|^(4S@GkWIR2n}Niq6vhK%Z%%*WEaKMK zlFwd1mq;G4=SF9yKk{r?fg5gO!hFFxoh~+Bj*T}Y@x1Y-)m$SOj%2@07qOe9ij=+0 z_F681JKv4^Jf`i}4XJg|m(0+?a{7)IF^R}3N&;8f9p8|;&{Z2}lH^Fq4lpZryvG>KvJ`_CK z=}h^mYPe&>%9G9*sp{~wDax-hw`3kh440Di!|>V$I@Nd~Ke7;eWIElsUx2MKiPI`1 zDVyK9y!oY^Q2?^%rL0sAEI*jrR60dU3~wDbF&~oc1?9`~!$QSRz*ugioJNMubrKrt%hisy#nVUGj^59llWu;n$tt%aG zWH{Xj769)_={PH;X|gY)R%se|bSiJh3Q!8o?L5k|6s=%z1!g|ps25X=xD243g&eh?&s)4D}KSWmO47dieE4Vn>1?RMOsBQ@4^P-Y!Q7>Wcq+df9!Pm=UTiqfG@52bP1&+k zs+jI#c6-JAs!?M{xICdwZEWeRosw$9u`5Uo#k5}XYI%*P3nVMkrkQjT`h)yDWqyJ9 znWOWh$U#FWnBH86ttn-s=p?`pkWe=l`fsx)DaG|_a3HCcI|RswZnh)UFs~Sazf5PO zrTlh|V$owroC=qFxr1~he^fcTl|c?9bIIhQ0Mg+`6x0!hrRJX*xf!lK;KLaxJ})H*s?5;wMHw7|HL9U09ZrVc}V*@*oXC64A< zPhtZ`2XTw@ve>A~$cw>Kqq)YN!AGM? zSqRCDX3G-}k&I@^S70NC@ic$2aU;|*nkj8fHeEEs_(9lUG+i==F|VMkt{Oz?V+-Js z*cc!1QEk=m54=dq(nEEnCDLBKtX4WIew#LH`(^z9JcJizm^d(0)x*tdhxq8TmI^J) zFbhKX*8-74SN?K)py6hce~f%uXM4-Ss`>d1a)Y_AwWT$`h9{r$vT2K|@)z=4#*1Z~ zDWHN-T5wlt$kfe6eS7{K?hROOn2a}?%BAn&tx*{lK@V~oP3HdS=9AI>oC}i`M*DGg zZTc5Y8x*XK>m>lgVfa%rSJGCMLb{0-Ru z*M(0BSBCRLpTUpZ>QHlNMo0x;58fC&Hn=c2BuIhh16Kq(12Y40|A+qDVGn<%zu5PK z?^)j^zTi9d4LG<1QFnT?1w_%n9=PwsQ8==>>55Io$a+eg@eGr_mx|R zS@0MwGH+80l-^SFAt`9Gb1Ffw9fY8PSuh$cGi4R&m902-v-x99*8T7-NQ@JW%okOp zmsc00EGnfvOW3L7=j7llIE^Nuc~k|4KQQ`uPH`e}5Ee|0$wRXsHky^4sxS|J3ow*2 zV-N?w1y~7UzhQVfA68IV?xgYChs4EMa3ZauK;nExynxS$ts-+B8y984jN=wX@6?IG(Xz^B>2N8%VeNiQs9)5>)?NA z7A#6j!w511IrnENf)m@t1sIkEo6=N(8r4I>;CjGf2$W<&sx+IxgbKIp!HMi<4HRZU zu(X)ywqU}EYq1E5v!Gd8>Jl!Ag)64bE9jpE)zT?z5oS2?43@#jEZCNⅆMsG7fiS z9qf^lgN1{NK`u5*)~FgXj}rK#LD=C{Qr&YNrj zOR_KvYNlm_s>0ej+A(cjLs1r-OnbDm_moaVo9IxI1vAsu&~wXZ?8r4uF7m@ZS@1MX z&SkQ~+MU{QEiy#^EU1}%3^r{^g-Lp_3dFn}p9L**w$Zr|=hs9$cKX!@QKB>po~CIl zFu7yNk?RqIMNyOmYtzzKbEHL1Jf5_KR#B9Vxk^typ5;j1n8LH@n~l1LR-swgAIY4H zA=x~8Xhl9fVkaj#lZi1f8*#NFQyk@rWe_upvSDP-B%c~$Dwa99;#o|MqHL&VOqND@ zV?u&@Omc2E7}4sC;b_wTNc80-=4CIGWCO0j!7cf6o^W$P`e*&zUJU-dNXkG3af(uy zAp^2LdtfN4a3mp4JTO9rWEF-y(@>WY$)u0isX2_2;wCxw|4oE&mQgMDIuw-qojb39Ld^3`aR5M?qO#T8B1DaD!NT#GBjGDUsv zh!!KIf96=%FpRkPp+`J$@VmTmn4aQ#Dc=@nRyyx&LOK>%#TCyAN@iBDnL_!BiDq1j zzT%B;g$~RtclR$tK(J^iXcUlsy5lixrYO_uP0kR`dZHOLnn{@! zduoy<@#|t+Ck2ymRhVh!c}Xgxl$(?Ykez5&z|c&SYx#nBQ!Q4GtKe*=&cIBgx58K? z>`9dG3o{MgW-p>&cQk{rQ$<;$+9Ztjn<|9_bG|ChtZUpegh`Qp#TFN(Lsr^h~vogFKRej9x-dTDe; zv@Dv)`y}u2ymRyB-?JVEg~Q@B`rs!cF0Q;YI!_c#S_RR2~`xjsFkf z3I57pb8sGP@O=!g?q>!j1a<`v;1Pe$ztF!I7z0oH&V;_fF6wP{uR0x8|EEGW#OLDI z&?>O*`{>{4ufpoyN%|4`7(HM6T6=hh+y9arrd?~vSCy)6un*0wuWm(PNm>qS(VzR| zFy)$xNkvstr%bV<)hu0_?3cqdYbi=WBdp4?DBfD#)rz~q9Hv)m%r$k!U~S86D~QBO zWC-Y&!!&Ct_ah!u+pS1oWpzM3wOM&Tka%uk3YW=WJ4wI)@bw#b!={D=^ z&{dr2Hslu5r=_D=g%07&UToO_v)8~J=1gm?TUA|?Zo#J3$*Or@oWpEsebd}UWd+mH zb=aoaB7&9`)g`rNP+<-;rZsrXT{s_0lI4z=qGdUYOvamR*bHE;wX9 zHvKhiAVKgUzb$J{JGyl(9bN4pqg!!6hc@NW9A{7#6Rf#4ESZbbu4*WN&PzL*@gN>^ zSn_}@W?6IbJO>fchYF@vw{rCqG^Mc~1jE{Fp?Czpg7z%GmRP^`%VO@e+Vor;IKW!A z{e;{N$zrZGWeQ2esA)_gFQAa&n8S)H2XfH7NRG>5%C)+l85spADWWK>_^@GtWJ*wo z_Aez_OvI*k_XP7vLmJWJp(YV)BqwDt6`Rt^*=GFdJb@QXL;pdKwsq~zm&w+l#I^xE zC@T}RF^8DXmo1p-eOYx!9mESfKO0q>oZFgec3XaE!M#|1=%2*|Z8s}~&Yw5mP7Al~ zpux}0VrrgaOf`paO5iIrc5?f6NegDc{6yPwTUNj?C~3E%*>>fZNv!0)SxnY*jD{+C zTLx?3!^Fx8l$|1yf#f2GK&HscE5T{0X{^C;fce}W4xF)%2kTK; z%;A%)Pi$&VBM&w01q&+X7Yv@?1#9t`HSE%AHv{l(nAPoXgG8qdN(I)pnYaD2b*`pA zXAwpwJ4(X}zUz}+=1d238yqF~7XG_WHs$)Y7Uo8+7x3Q$v$giJE?L>xQG>C`PQTf@ zpOmd}T5;vP1ydn?W!^SRQrnh8>SmAW&21euFoGjL+NRS{a!Z|(tE?JpWudusEXtso z%fMJz!l*^(6d0#8vj}?@oouVJIJ?9d+$&pPlf0=4&Q9#U)7%2Bxh9a<5OhHHC}*&# zL^G(z?AFGF01WBI5x$eW`D=+W;G6bFM`Vw*hmwg%p%z1$VQk2egf^SOe3F3B!t4=F z1FTFpEp1GdInFnlC7>D(IEG{ox0i9HB9u=z_jKlA+pO)MJq#sdw5gKH)XL6UXDQNb z-uBBL%B@36Zzu=`9gbZGUWnkXfA$bg#aLZ31p&2V7l3&?BfFSI!V+f!$7zJWz}05hFmD|W z4xI58`apKj!0dcy@IP9%jVe12~ay28MOsm`#*wQ?8UWN0uEzLDJ zcj=d%>uLtm=gypK+h}S|wZN!+GMTNkTi(RVR3kE0a8PHu0n0}xF%B)ksZj$Kk{Hq| zc#@QQnpin?p0%O^myPFD>nU?l1zwtj4$PKwQOZ-vN?7&a>b2rFTlWLAb6mp?LYYF~ zjwSb{*#lWGT+$j_f=9+@Rl~bn9cAA3qlBFg!7;iIUW9L&-(!3PL z#O7yb;E>Vyj-%}KEo%vUl!FhMH@&QU!CYs`Z?;kf13|1RARgx*OVmc3=fUJwcDmh( zN-L|O6W#h?-6}(OcuL)MwxR%_HA=q-#)h}D-%$-eL zZlf1?K~o1d7um^9L#)7#5ITqUo}meQLb)%_9$>pBD6HsmM#vsbtEqV@ifn&pV^Gn> z>sC@|Rnwf`RfU;L-}kMSSspX49x-`V%Q@2$l9@Dgx4yab$(s7uUAj7b#0pWxT= z*W(-Fm&8woMZrVjV_+K~51!aJgITaXb~!w+uY`4gv9Jvgj(!1#!5xq_93QQX&W#R< z?gEyr23`0@R9G zv$eXTKB+z6)}v{1`XSxx)|qklF}laC%G9Gf(cNzS9gA8*ce$15{sX=U-Ptp@O=9k0 zm^-)p5m;TbzNs3@+$(E%1HG%WF`ZOH+<8GDLR*rm*sY`_r_>l58EHg*_|ubGK-#f7Xwy5#yTrW9xJ~H zmxU8;bf;~umFQV_LjZ%ty)2?<+%;&gXvky3mxE;qLr;3LX|G|D^n|+CY+mrNft}*B_W|CWt#eVMBmJRgiaCOx7kNnblfINnQVMNfJ{pn#(q1)3xJ><5T zyMsxc9&{HqM{rHw^nkl^%o&^RcbC6CMbie1Q#*OaDt9MoGPziE2NP|rB5L||k`=9I zE~Fh<)ab8bil`kVBr5v*UTTu=gtc`rUAhJqYI$W6_GANl0qmiYbi2E^8Kf$MK}EMQ zuhLEo3ktf=^GY#uOLd+wVQSD{JOyKst>{+IM=+g=ZgIE0)^uY_hizn46}YV|w}OH` z0n)f82ypB8=mp{i)z6*x)_RCPU{4-gsHEE4ty?L-j5Ij-N!8b_j@w$FZfCXp5_qj_ zfbdUwd{MO4<0yJ1`dm`=aU1_{T~^ZBP_neSmE}um9ET^>p6+JXn(FE}mX|+CwTHXc zwPM_Xwe1cp+f{Gx4=otzmb1K6?d};QQ~U$>`elvmY1BHo@Cw@(drF<~QA?d*L7d#jdaEQFJ4CwFVY*@xI?RlPj*={XTm z33v6k@-c{tySo*(dVKPsVxFS3PdZezM`S*_P!V^xMhZPoC{)<Gkcy`j=cgatV`Q8f$T>03{6h_IYR(e#D8b(nH(WLtJX zEMBLt-R;$kU<%CXE9R`*-(yJNdkDqJ;|-&w74}bIE&mYr@MlZB>2uFVn=)?N+B34q zn(0%wBc@*$YSU+)giT2_ec~w-Tk=dFdzuJF9Qx4nyeaLaEuMll1ibXVyElncm)`Td z$EH5gT2E9K@zTFMK`hCpkKCVci+Sl?ciHb)luPfpKZFIi^tSs$ERij}5xky+AyHHiN?f{iHgK%-*DfqiK6%q@lWG#$6xeC z;*Z2{i?4}i)F<&3>J@kb*r5IvPpdQHN5m(_N5%)ncUI@a!r;%b&tosdo>0fd)~FQh z3}#|Ssd;L8Y+h_yte+YSuZ4TY649@sZ$%%E-Wpv?KSj@q9!sA@mqibWPK}O=7DsoY zm!ewUSD|xqhVJd68<*)diV)?5cUw(!}G!U@O@AlUKBnsynlFjxNkTLslwS|jXFc0 zhTegkVKY5V570WgoPI~mR7vA#Ani<~f30tZwBbH|9rz9(1$_Ztxv>CN(}Ghx>(&1| zZ&>N4!XO>f<=lL$;D);fw#u7b?hNuD;^vEAYEp0nWHSNsALPNL3J1*M^s->~g8Td)_!EyqHgx4MUgg3~y673??+hYtCtipCtAE8wKPA(fxwjt6(^ zELwpvVso^K0V4?4Lj~Hi0J7Nsb+}$pOy!P_XXzboggAMnw`KTYbs+oXkzV6oRVk-N^ei{h z?N58?X>OzyUa31h!1eFQxanSgczdBfJt*~JO4n%vx56Dr(dj<^K9?Yz?vX0illW^?#s_L^m0a+PX!w&d7(M7DP8lk=yD-bffXBq5ebH8^2m=KXe`UedzeqwB(=6 zE%ZNA_o2T@WqTEK=&#%$4Xz9Of`Ot9iTNW0c@%8u8Xhe$C3Mi$Y-S37{ZEu;=qmXl zQ$dEVl-_RXy0{f#=n8&M)%>Xkt9@kyhUo=7td!7e)Cid=;7mMqkVWfL~k(UDH6!Q3XQOo3{_JP1_T zJh>rIRYfwjL5^xsz0@Fii7h`>;!YG(T{A1aHDl~S^TBAY_TgTQAVXfN26DMud^;x8 zwyOa$&x&-kfEftYL(LN})!&E=iH`l+0`6{AEXV=r$`IsCV?mDUC#Ashn<*l{QF}|4 zIbdX&m`+gQIJC-?))XudX^q-T@@ENYR3G^)mc7VmtoAVO+dJlA&zEXfy(LwbL`3ak z#9$c<3qn-BbH5P67`3~Ut2}`zx?l#&U`)nIo7zosEkDj=(Tm#E2`FRfiQ3snPJ|{n zS5!Mm1sbvv)k_M<%_LO``5sH+p`udv!s%3EhzCdH5PGNx4{rDyvJN*-Q-!4&Ld2m$ z=9A!+s+jRNOF2|fa%c%RW-m|yBdMNbN(HN2Sl(cMI=EFS?gkd!Q4to0?9m(lxI3b6g#Qfj2Ub4J0zbA}a6sjOl*GAu(=drI~} zl~r*rH;2kdKl5Zfw+CSnOKn~g8KMdLkq2npxTuHp1J^e%#w*29XNBe)q4I{$Z_p)(Qc_>))rYw5BID+9U?i3)~bjQaNlSdXd`)f_{x7M9&+mZu5kyCliRCGx8>cAKEDEHV8gJ2NkpR|d`huOm-I)93B}SiHF|_-xa<%+zfkq@zA@Wd*S=<#L%Ik z5wM&0eDIdwslgPipljc*Aw|2L@Bus2vrUqb`n zY)aEK+D-pLzhA!$R`-YMN_$JYO~$t39EgS%C1PHx6|&p;aQ1E!h}tBG{l7DF9Hc_a zf5yC2GhBFCVnZ-@*!Nmt4%9+R0Ru5Q=EEI@%@si;xZdJ`vK;7z6AfZl#k>?|XfPXX zt+WCmTR2I}&yJ(N5@B5=OLCwcT565+QXm6CH+(9#QZ~J+%?!kD2!Z`_AQ@W3y?LqH z1<=MfDI(C>$LbYrJs4JeXEFnAVtV@$tgTMahni5W;u5+PZf1E+AH!JI;tt6_sn z%;dOVVOi0hkYjG4QD)QSEz>ddfV|a18-SB9!B!bR*;)ZZa^M!4V^AE(dmDslk|>IuHBdf(kK%P%yt5e%hQ0!Y?Jp@m@*0@k?P2Ttkbi45D&c zJfYbiV+V}Nfo*6iu^%w6ta6%r?dwE`!QC2{U&Xo{NQN~=F&{Ipx&bRNSzd~jXE8np zBCKT@3w-VwX-sZ4^-j4PkCe2U%TzW%!ID|~5q7Z4Z7w+}2U_A><4vW%`6egB$Py=) z9G?SaajMlIc9>JBYBku5FgOqdZ9zh2{FD2|61360rdLNjp+h>rZr=|F=kIXwr?qnXT(^J5nCFDJvLnjDh@D>7+hr_9QZ zJr23Q0JZ9jK*`b^c#@VQQGTus|ACsFZm+hfz8u8=fs`%5g*osft!6k6a+Mh^aOLZ~ zgB=fWKv53-NK4OUUUe-bc)U+>BHAGb=}Ro4!lD*jwpjp^Ka?J*p}UhCRHSH+*p<;Q+GuoW#` zB9L2~yKH?NkXre!e-11~i&BjkH4UgfbJ#@`xh>9tp=fCm9mEVg9KNC*Iq{4jv?7e+ z;3Qh3IO4(uqbnNU73V-fGzZ+$gJ3cX9DgVVl$TF+BAS88y>cKUnpF6K2aWa&J;#hj zQeYw485>!Unu6ZoQdc=b3G7Ad?^#9(7Jj`P4)J792br0b>5Ue9@Hv$^|A_RW<#F1T1wVoy=6ax*kH!q(*) z=N62x&2hLFV-uPJp5VhnfHzidgJ7`#D6Yc4_NP4=rxIAcd+c}=^B zp_7Ah!|V@DvY5t{+cIsBS8H-d$e7$dHp%}0#Oxu9auV-B?kmgO@&c|09caiZ+ki$TD1cSm-Co#150uR&a%6TnM{`4%ZccwH%-X( zbCgeZAQW?++Mf7SoZH*gVja1e{fvtWH8j`P)t_7!ktD;hF^I&#c2^S%yH(7)A zRg~MwT{@Qh(ur!*!20KU*$agdsKpBl%zn9X8wsz`N2*8y1hQB&~PW&`j{9hSwjn~JQ#23e_;wUPNo;YfDmE)N8Qug( z#0JL-V!dO%VxgE8-4@*%-4fj#-4xvj>;C72PYbUKuY@K*eRv6c1y+S;g(pKUF(N!T zToCRZ?iCJ&HCQXy8rlM@|C>S^!G72fS|3^m>ji6IweXzKX`xl2m7&&9eP~H&ai}UZ zD>OMYJ~SdUI8+eo9qPrSW^h|@D;N@+!I9VqO9vZ*>x1ipYlCaxaqt{?2v`+d8Eg&K z2bVyXqAEBmI5{{zI3hSWSOC3>UcnH25pD}?4QvT)4r~f+gmr`sf%SoP(6Cq&SRFVg za9UthU}c~+P#;(lSRAMd%nD2fr(#54aG(IZie7G zU*})zU*lg5tBR-jSNT`^TmAL^CH}?!D*r67FUI>vKvSc@-`n5IAM$IyZN9C(ExygZ zO}>r3$9x-n>wW8dYaxqS?K{VJns1eFrLWaj?_1(q?5pz4@=f-Q_l@ukhG)p$zFxkN zPgC2}R<%WKR-0h6@i9nm)~j`Dty%*q&N=Ed*l=8_T2;MTq86(vHA_v7J{H{&T_0T+ zT^n5!T^&6qdRlZn zSgF_+*&5jr*&Nvv*%)~&vLUh_UM1H;X0sSnYotE1B(gYC6`2*892p-O z5g8mQi1d#1ii9Fscw2aDcuRP5cvE;|__6SY@cQsN_>Wj4awuGaaO8mU=qf2Rt7vC;|`M!L?oqanC?&RA^u$QlwV8WLWjQiq(F<(qD>Wd2I`SJuKzKCGh z7ZwcpLV`hGP%z*N2>N|~L7&ejsC-J0d?cv*bV1E0-=Y4k{>}I5XZ5q-PwFSZAJvb7 zKd2uBzgOQ2ey6?@+@`h(eyhF}{6>8v__YF02yl z^_k$O>Qli_)F*-;tB(agQXdI^s6G_@Kz$&%MQss$U%fB*o_bI4U+Q0i@2YnN-%;-f zzOCLCd`rD0xLIu$d{ezC_=b8z@OAaN;A`qN!B^F*g0HAo1YcG!3vN=I1plf2Dfp6l zN$^GWqToN&KLlS;F9<%bo)>&hJtw$PZ4`V~JuCQ(dPeYR^|atq>M6k|)suoxs3!y; zSC0!mrXCY~R6Q#Ah8&&je<9*8w9Uc*9%^!t`q#5`kUaj>RQ3Ss=o@ZRci&WQP&7wt*#clN?j#*rMgn^ z3U!6xwz1QSf4QvEXX8TJR!u zk>G{uLct5v1%iK2e-J!hoiBKvI#2Lib*|t!>Kwtd)!Bl-SHBnho%)^NS?VmoGu4@b zXQ(p-Pgkc4o~BL{JXM`4m{U2ytjY>zR7S8{bqk)NP7yp=oh-OYtr9#*og{doI#KWh zb%NmW>UhE9)Nz8xs$&JaRF~jNwNh|}S|PYxEf?%moq`>zL$F=73%02?!DG}hf~~4m zutl{9HmhdACeS)2VN(<`L@g0KN*yJ5q&iaY2z7+u;p%X~!_;AdhpIyb4^f8*E>?>L7pX;p3)MowgVn); z3)BL^`D(u4JT*`7Aa#&nm8ue)tL6$;s!G8MRRI^nBdodRzxdx0_-_gPdI?CsDOcr! zbJQHc1J!|ov(;?DS!$NxOf^$*hMFNbT}>C9rltu_RZ|70s40SFs!VXQnk;yLIzVuL zwZGsbHA!$kwV&WbHBoS1wXfg=H9>H^8ZS6bjT0QJ#tM#6V+2R5(SoDYD8Z3xq+qEk z6&#^P2qslhaJU*SSfWY+Vu__iEtOg4fsUpEaYLH-|DiqvD?ISo) z4HO)p1_<_7{RInDfnYz?PjGLww_sn@S8y-2mtY^&M{rNIr{ErH55eB5x8Uw-cfsA% zZi2h2T?Kbhy9nm1e8HX7&VoCsodkQSUV;gg5R9w1U`)jXqbe$xr}6|NDk2zGVZo3J z2?kYAFrWg0e&rYRDW9NHN{|!@>Pi>Xl*UN^rhg0mOg{_$L_Z1sNIwexKtBk6Pu~lE zN8breLjV84`1SE~f%CTI6}W`hfK?`^Wzf3aC~)rT|P_K zI2x*nCTAfm{uZ0mHe6v!yfx8CN_1wkAw6$u2^$S zhAF=ls~uh`Sb(btYQ?H!-ZRCrVx`&`>PJXo#d&IDkZ6cvMT4^xOU^1*ke;5|N?o=p z>eUXXG^icvf>g0wr8tf65(K@jSDjUXeDw;P2%U*6L>0?a8gkV7qAoHODfOepoE?=B<-V;4?EK$%{m1k;-q9VIVYFrlt<}U42XQll@Cb^Ayv>O|P|Q%wC3ugx&iMs+LNr?)kR2a4=Y6Vou(MASgN|Ck$PjDDfU_t#0KtN+iGFol7l4`_ z8|($`52=F$CYYH5RFOnZ9Sa!Bh!qCcn&f<>&(SAYm>*)&b`6Zc^G2`p%SD*(z$2yT zanKZTXw5@V)=3F+$D)aD=a+|}B&};ii!LWRD?y7+XR}PDXOU4oo>1flXg%8-rJ>d= z4m#h4Ka_D^ufM4p#XD7-#haYfU~Oh`x$^~8aammEq%iVm`?=KFPE1%sEm>UR#MkwZ z#Q`UA4aH+|v6Hx#Ua{EkET4RE>Wal;pK8BDsd}|4`eWSM-wyr%<7)K5{>%N#AzgnK()1&~PT%>yi1%mS&wKCk7Q7dE7sI;!Tb>6X;~w&?_rxH*{{dS=hPRy+h@}gaj$6GIS>mfAK4IOfu;w|EA8*!+0FY+JZ6^ zC`~JebZZ-$n6=!Sq6Yf&qB7JdO|9y*b*VSI)Jmu+LxIw+r*)gR5zYJkqB0aI4OKoZ zjnhnO&+sx_@GVT^!e3K{I;E-dM|j44U%@j{v@(<_>vZM7b?MeE$u|40Z)2+*fVpL8 zRoeZ?ECU;p7zp#q(55t1g3|_zz(g?~7nY$NSd(=O!w=_M(h4%w(0nrSxp(bk`}@A{2glfe3zpW zm{x|Cq%}lz%;aYMrsgc)yuZyYLpRc{H+DXIh{AvZPiK{(5orc3Nw>7`w1||&y>4C^ z8j#iiGOl&jr}SH%R))r7ovur_F3p#`W0gH|Z&0s(bIZ_uv<5CNLpVMeh2njBnpcLB zqv?GfX?!=dzsrw{%Fu7LCN*$apWQ47pERzHHDyJ+k;>9trK^ZW%7QYq8?D6zm|N&7 zseG2NmUGL{UsP&OH%kQvPc>y|Et->_Dq>tSRV@ADSyzU}qBX36xyiV9DyZgFbWs^P zi*|d>o2Z3pCd?~CU(u$WelxYP_@X+u3{6FQT<|^BLgA-H?PF;2~jUIZQ@0TwY_knKs;Um`>sLn%rtTPCx!yyQCbl)}7U9 zy1l~Qoo}yZebpQ7qH++m(HGHr#^yV)ipmM~<$$%#Y)JFPx6QiVTG&{;2JBn!S!F+F zie7e2x^Z`l)$4QtZYV=}&6xk|((pu+9_a!Hrnnip#N@KuEL2a0c{8V;TlQM5TS;&2 z9BS6D%_hEblddUyl!XGwP5O1&Ml~$_gY{aad&JCG+WQ5jN?x7{5vkqp6S&sg+;7G})*KU(DV&NE^k_AVKDwe@0v9OHi znj;0Jl{VkR!m~IjbBF&=o@ZiB`1}<}Wnux<=6{>K3798nO+fr<1rjL!O zGl7;I$y(FC0jBQ;4@jN%uzoi=LvxRG)4s}mwFKD27_v&_r+{=CT!d$XUkH8%I^A~!uY)G{?%R?^)tYFkv6Zo6YANX0|p8}r`d>A_0 zlkoRH7`QmFEwBMv+ViA8aGLig{(txX#{WbAH~f$I@9-b?clmd+=e>E}zxqSIKl^^^ z`=Rd{@AKX-c|YTwhGzbx_bPZYJm~F!K7NC@#tRJ-&o4bcfd>8)o{vNK{$|g$h;Y#9 z>43)l8fe>}?(w^S<^G}j%kIy(XQ1hDgZoN&GCb(+aM!q9@?YhT<=5on^5gOY@>Y30 zd>{5eTYi&VArm0%>~EF=|65XkT^X5TO3@GqRY`IImTuqPx>J0IWGx8h245`uaP=;l z(YxR#r`RvPMlNz_g9e)ho4)ueFP%0T8$6urG(NnUe8?QXA z7ePB2ak#{n8F?r(Oy=M5-&#-PkLXK`i#)g-q|ZG??p)G?!jZ$$#D`N)1OU#kzfyUHlIm6 zpjVc7KH?+XEu=D1%5}xr)erbCqz)oJOirIvZjCs~-!<7h;zRr$i%TPBNE<-m#fWJx zye~7BY)!YvU(&NJ_KTR};!LiI_y?{Qb40`ki8p>cCq#UJKWXtf#Jwa6GVly;hPX#B zhd31C{oIE-??K$9BNIEpJky9fb!1{3h&v2q;t+`2^)Hz>e(^p75jS@69=+lQ*FfCH z%`~q3;=SBV`#KKiw=|(}o}kdsCBe=y@kn=Raf@Cm-I~SCJeBwDYqI8WQ$EkGHR0JU z!H&q%n4l9Q@gMLf8n?G#lp`!(KF+?9c6V$)u&<>x&i;#c!^yeBW$1UP7M2T#IQxWT zZwX1&-&p(`)whRNoJ%J zXMdKQ0YW7=?TtT8J&A)Ta~F! zL+-%R`!+R+mzip9`*jB$;)_gfJ;1>ILif3(-UU?ffp=eIb#KZ(8i z=T+BEVvqhU-M5uEK!b7jasHe{2gP~XP4dOe}*nNk@CK{R!r#+&PU)Fu|h>iTR?TJTRNUgP<=ZFo2 zZMeu0>#1bk6oX;KTwp^EV*8>ItLW=UHGR#9 zmHYwLTomV#{Qu&rXR2WJ|6$nqcSkRbo)-Ca9|}*x>b@@Y*U)!E z_lK?zy(zRZbV~4FgZBsD8SDzK5BxUp6twVr0uZnJzvKTXEblM%uYg_sSABPaYtZC7 z8MXyac|U-df){$vf+qdvJnw_#{{qjM?%%t=;C`n&=WcP&lm8+gk+;hUxl7IwFF_Cg z&9E3hv-;l+C>^kZibM11~xLm06@xS^CL@S20c0-5Zj*Q*e+FpU6 zf5=|SI9w5u7gQ7>3*PBh!!QC_f3B%O$YP36fYqJdlI`lZgjc}phL3hiSx|wn#T43f zB%q;Nw8(iS5o`?`+&F!>0> z`3DCgeWE4PxFbR4Y4Ij;#UY(5GqtV)DU2!Eq4#mXQU$A}{4i+@uy*!>3ZyZ16R;0Z zq)3Br1VsILUIh{vQx?~;v)w_r{4hV(Rv?WrzS_{y)@YuQt-^45BWK}=c|5NIfs9pL z4#9>>ISb8rTvUPZ#S}SpK;M;n!p0)ErsxQ5ZUtf&lV#hHgV5b_D}^Av^pvbX;9`iA zYOw#5@-2IJUu>3?9fi8pk_zN2M%?<<9pv;2{)9L*okdFwl zSPL6tHg@N2855NUK@=w~DMJKfO=)bQ!0T4I_a!535D+~NQH)K1UM$`Aij>hG&n-g; zQ)dWxJ64$M8}*h|*A?axx1tP*jIo97YK9ixO`6-Eb>n%5RMqofdd#<(bwFTyee zGNv!WH1t_V?U_(C9_N-JlCi|+69`#p4K1M1Q%xBX7?Ymq2{4j!UCP`q+9_HYG8dzZ z)}<%)6NmXN5{yPsxZZ4$TorW-QGS++8gyj&*ivTgs5dH0o0; zoreBnwg5L~-9{XClRaV$+oFY$f(1n<+`xig#x@KfCFC?#b4LnL`I|Vb^qHMfSB9*{ zQYacBKknqc)>>|gO47VCBsFL0n?WiK?~fU*r6xs;$C`4N@?~CEY2jDUu+q}Pk2-b& zQ!lLnrD2kjriqG~86-^*NUt-Ne^z{#HilYK>_%NDlGqk0x=K-N9K{?e*sT&n#RWwY1dn(}2TVW6Tsgb3*!Dkf4=5Zqo=zBEvkH8l}R7y%IC z8%M8fLD#kH$V^~ifHuS6;0W2a@%%K)qP4YWAU{U6U0A-v`YdRd861ZD<<^}hDlV0j zg3dT#p;4nc2eQQIR?v__O2yio^2PR^mxg7Pt@p(bjphA;s$Du6SEq;bC2;(bwQx?2 zy=0__4m0Z9OQCb>lI9n{A*PmaD%O*I_Nr!-VImgwB2kaM_TJ1w0UKig+g43^4-GFv zX#wZKERn^YJa)>0a)*7CObzn;eBn(qRO?kIfuMKI! zGjXuS!vZCTfW@>}^+OMHm-^kzAm+QhCJr0Vb>((@??8EjEo?2RGz39&jHIRIot8Mg zKAlI{(jvx8NQz^Utw>TH&<#=^bAi*(2sP!4lpe;;t4n+A z_pFrqa;rs`b#yLNIFoUW)XTb2ICJ1iGckyBX+iqPL%l$pcqHWC=wh;%4t zo0|pY%}RfhC<($N3lGnI^juSJQCfvmU8tnu!J-HY4r(l7b z&KvXm)$aAD@Izy8WJYp84+oaKGgKru$L&o11iB;XdGQbf?^>xdl7`JO}TA z_e9Ild~`2-0Mx(-z-y83MjnXV2|s{2`2TB&AUI6;H}LiMrSJpr@OM?XE4(GVEPQ6@ zKiLPsL!sMYWiS}p2TsGhP&oL<;17e31wR;kORy8#0V{%M24&!vf#(8`1?~&HGcX#s z46*oX12On&)9wH7Kn%VCe+NAN&GiRhhwzf`8Q(*`yWFR^tK}>5+w!yWKDPbulx=dY zTqsWxPl*TMwQfvwiHiin{|Z<2FRGudehjhu-ikN`d#X2p`|vl{uUy}8J?^^Kb%W~) z*8$h&5op(s6so{2gde!Q$Fn15Q)VeJOM(CGDL~wQYjNarmiQiVQmr)OR3yIZz{C{_ zMuOi_Evt(5!AE?XnBjI2#>q$g6G;y2WbMg(b02*`UWh%aBM1Vx#IH<60%N-CO+q= z2yL^pt%@gAyTl@xtCp6DCZ2FqlJazmG4a{sGIRqm@%VA2flUa$5!rtKBSpir2>#zQ z?-GwWn}TyD>inpKMwSJac*L1*nsAAS9o4lBw8Uo|rRz3X;?quSQ#VBupK@YTh9(|5 z4qG>{5}$O8aJIbCO{v5u)K^Bx(z`Pc7P}^wsP%_IZ6VWzo0cZeReKrfIk25HS*KQK zq`wDjQcW%0X{6$?Mo&0X`AI?0Lw=Mqc7%bp>F)v&EyW=AU_gcK}D9FLS z7XRjGIGD)TF#hDAC^MFsD=q%$EQp!YT?k|dL!;^{$B0z8(SidJTJ%s^&*Jya`s&PP z@jFMyS$t*juj*<>Ok?qy`o75~7Qa=;uE7}=zfl{dFoMOeRZu{J#gI*GDtZ@b^c45_udq_05sR z;a`QniW~Yn!k59@ekk;_(4(OnL!I!_UWFU^CxbTy4+Wcp^8H}7qR{eKmF-haaLZqI;cyJxA#>wd-k zg!>-%guBgM=l059$fx9#%)_tzqvAcHU(^dx{iEuSR9{`azj`VB^1bZ(4730?xaL&- z@&Do7Vo?Q>9g`isFV~ON9o7}*z#6j>=2RfvIZfwt*6r&9bKJt=ht(AbbPSa2qu?=MK;aM=5a&axV_3ws?rTAqRz0vR*&+6GfJp00*>TwOPYiZp`4hmmI)TEf)nc3hRbJZ88WV8NdhN?c}(NN@n)G-zvRS_J|gLmI6C8JbNH4b&pM{UBKFUHz7H0xv;+ z0WRKx3WPp$bQvnF1K8Pbmdq{A}})ak9Vts+vOJ3=FNQW5Mt&yX~f zCQ{^@gtu)f0`)bDXsAG7wAPRu?8^)dW#G}S8_^v>s%$rDhFiTz)XR25rDa%~Q-S!% z)D_qVDqGqpWBf3;0syqR8s($~707zb zvAGw*e0FeV6=*!qsX*dmu0X)D=j13q)KnnNF(om`?}Zl%=r!0Mv{KHhK&EqwE)uK( zrZrMgPzECo5*>5I?ajBgCznAd$Eri27_idPr3ueKF(7?rq&+$C&9Oi&CG74t;q6)+@h9o^juVvcvWUg$zz+w7`91zD?dIPdruxU4_GX}l zv>mSXj#Dp#ip2<2vxfRHk`2YPvnp$p8A&&DbL`OH^D3*YS)JOG-rBOe-RiB8B4okC zq^PzEtC9Zg`aMwZa4gj$r0>BC_1wxz+8T8G?>%W~z%`rS8Y#k)UN^esJgcPCo@MD} zNwRuVkT~ks{7Qqh;G<5Bxn+u$*hn#h-MWN|ub_(a#|R0QTh<~99PrF0w8fQrYYkbl z2hMA;fs(v@k2PROSUxh0!j>yk7|!$$*WXhyYjX;^NjlI5Q05P}iMQ4mMUh z%*@nSw$nV@VB=gyGsvLMP!4(a3;a}9Nm+Au}cuS`5QQm2<8Bsoz8V!Y1MunurzyabBfPp%?gv z81@PlvlGv))DrQzy$yS|LvTH^BTGv~uZfxM9s!*>Wof0x(&t;UXWN#fmfVKr7McU; zOePI~#6}g0x_9EamBscUWY4zE#HlnZsq-z*s4NOp#r57=v}fB2J9`CsVxjW3&cHO^ z)}K~a7T8DE4&r9=&I3(CottT3pMcLfr!t?G6mE502eUa%WPVs(nMYBYIlrFrndJ%Z zA#?^!Hg@r?N1>6;6+OR_v{YK^Iz|eew7}MmWs1p*GcW_RZFX3B!a5vIP$rr@*B*Qw zI!D#4m;O{!ImbR9b=d4x6U$CXRpwa4(K-x%tG#ZUl(smriz;W+VMbSJB7B_@YZYuJ z%&VM*L1id*0goB8HWokDR?f7BeM1Kv*&wbw^Jy(Si*s97Im1F-*};t4CT84Ps5UQm zUgdOqJOg*A*xGKlL+9)UT(?(!!gXVn>qf-?`*!q+=mXKaqc=t079EN9M)yT8iq4Oo z7WGE{E%H?4qp%5hGd%cT6gfXKClU$&4f6Y^5vT7X;k&{&K!)EJJ`lbrd~W!(uq*Vh zh{X4m&_nR$e|zX1p<*Z-YGPUbNx{Dbe;53h;Pb)H2R{*ffAE&z)xmz){I|fT{~5s$ zy!!t*@I2zQJrcMlP=>Al?!czNIe}{bxBZX#@A8lN_xe}+=lf3vXW>`AANij1eI9cC z5BctbXaAh<65l4@dA^0dQ+x_T=yN{HN!4p6_|S zmV#r;+H1Mo&T0BwfT<$uWU$|vNzt#ax zSv)TugHOV1#R0Jc@eme+V=AlvT>T5zJ=H&~{$}+Ts~@bst@>@%Z>sL7-dw$=`t0hH ztKF{GTt9O?=X%ohA&~mN__2K+iS5KQ)sI$rs;bERjZVOF7xB;N2Kv2BOA#eWN#J%hD_ojssjG*$|YKfsd?f9jNC|u zDdssvUT8e=`9ofy7lEUO+^|q=tHwd#?;RrKI^wiCPYd#VVu&4oAdqXBAlTyN8jzBd zR|rs*=}spvBSRYFawad~bh@U<4yKBFJeI9o5>7vIJ((g|2LQQ>a2?yKtfyl4=sTud zPAtn4?vzp^LSoy(CMcJYcolz-Lb6AD9_%gl<%+nL7jr}Ze9f(GuyulBSE7-Kd<+`y z)X)^oCpMDM<=8y)`kuIuNaW1jQ_A!uE;#j*=f4fFW^W{xomGf(QvNVckGueA}>vD z$6nuvvk81lC05X6IHpnxYI6oR(nS67wc13cFoRH4Pb@#4ZjFV062=u1nm#Pm@zyxd zNh?EQ*)ds}61LTzp3Fc#k)rhjbutfS(o8UqS#l{Enc2rHxrAqnd5Dtd^3>4JO0tf} zKOZ*aLe9{S-f}f95ID~AnO8RGS$w#aEBGBgR?C$Xong<8mgdG}Ehc1la!0mAJkMkc z)G~&N1)6MzJtt;Vvawji8pXP~b%~`sW!1x&tR;^3365N{hQG&$E4i2~({YTFi?~td z$z8HL|KkrxasjOlIo+MUF2M%wS7vF=g{yz{wyNr zP<5>{hdi6;X&f!&S=6J-8A6_^C(yBgPYUu3+5`AF0CuBc9VbuI%iuGDJe|4=7;O6{ zPt|LMMVD>fEc>>64%1EJCi4q5b^IBvdI`%NtegUFp~+l zldgPXfu1p##X6gq&$KXY!DL9sBO@;v)UyoZE*T&xS%)H=m3|sGdWtd95_7qpeXv0R4RJBQ{hh5>@G&{X1ESmJbA zQwX^U8bDd9zQyoOoJPwi+DPc5!;(u&4=py}xZng&EM$d3iBq`7 zoYk0YQ*#nL&`lY(r6Mie`Wp~kB4I9pVk9hNBot0g7kCpgUMFtC4j|6TL2_H*q9Pw9hW87;xBsfih-74 zt^OZ8*i`c@@n=p_<<8>YxGYZEFJ7mU5m`UkW>Vr$JR2?9v-l$qSblCKBPa0(9_EHL zxeZYKUN4h{$l`bWpqp#TD8FpmWy%OAG^f#b+}cEttKcwf6939GJEh<55({Y}|2=Ca z@fzW$CyM%j3lM?Q6HwX#k3}5>eyUkoH)#^TrFo18Xsd|dP?K$=CGl&jlhZUw{EE)U zFcK=_h&7lbq5qETSTW(H_Hc3&hCHUThsz@QGaD(1Uy{5O2o6&u@hWwUo@$sJiC5TP zhb411CzRnRrYL^F1zVzK@pEqUNTH{c9~(-zX(Xn>>r0OelfUfb_U%mxsiO=RGFW{Q zf~#tq1c`s4A)X%Tg!x#qF`cMp@|0NiEPlpcW05m)H^oclvu@}k{+SMeFtYjk<~-u3 z{0UPIEnd_g4f7oF6FQ5Kl;{LjIO50p7jYQtc#Rz)HPSaDq0P!R6d6$n=}15 zEXM!F%*&F{W4;o+MnV%9uc-#ig=b2pmW}p-JFL!A^s;Gy~h}%h;K5{^;DhY+RyQuhXT0ABweh4;K}xl;T^Jcs*r4lBeT zs-LXB7WZh)^;g%|UH7}jUCogAzs487|AqgZI|W${gs{Z8S=*!=Mj=?0hA{>0Rq~x| z$S^5!;S{7V;KmYi1CYtY+KI_2^-~b4z`U5yR^py9GT4a?!XDI)SM*$M#S{cA8}xTl zcn5BTb)(*V?}TlQsMczrq@6tlIZIrBX-U(LR<+=voVIKV!W9T@s2&@d+R~6UV96u4 zvQo|VD=hE9^{?krCJ#*I^bi*sS1lp3;f>!%=csMlX#jn|2J z0*O-Xo=)gljMO>b)-$#BQ;<5q%o;!Xq;cm<)-PX~+>L8_-tm%=n?i01_ebP5OhNQ; zp-#0FZiT(Ly5bOquNr=2Sfs8NQUFuDehMOql?JUhbtG#y!NfjS>@#;-wW_#;!j5wQ z??Cim43Z_l@5RLzOCxk)PuxqL6C+yf6bU2r@2uVg`(|93yH_Z+f%hjPZPgSc5-A!ZG5WI-g!hXQTs6r>c8bK}*jb(?mylTWK$XGZPe&Y@unL&2#_ry#Jf z(6KcqVUkK3RQAZB4IA17oVsud0t0i_ZOWB4=Lh;#)Ev~Sk+N`#x%E8S8#c9W*}ijA zivx`eceC2!DX`2TXQvUna#Qwj_6iJt70*msJ_Y_cM3aQLdQ-LpJ(d$w8Q}w9bT)yd z;_N9fuj4u$mTYQDtHnYHUq1!*wAJOXD$Qair-5SLwpDyR8xssXTSz2{wDoY;zhmbX z1q<=hhVs4InNz0)s!rCcTGu$@K)}j!iZ!X%lGQ9+N^~VR74jMGb`{yk?CjJwPn~Ry zv^|Z?J;V|Bn9W&zRYx%tKt?P!7v@&fS_`)|4#P--Y+{GVGF6KiNWeW-!HnVZ8wZJ%uI%LUpO2zHXsH zB@e@yP*ad)8i=)1UhDI#8wZ9l-?F{xI7PG|BW?YZM``Fl=Lj~80{%;0g#CF4u@jYs z+PSGiMsCNH8)8^ zpAsydqCtFNV;AftAc5s`w_1pvZpz%IT-Iv6x^ac6&!pn7Ff^I8^QXXbH8zaa)yr3y zIzy(BwB2T8l9rNcL0x~}RU~TU307afq5)H(7uL%#mtb#yEnCzj5c!2{ueM+kQ)q!9 zcseiw4xZB!#$ig*l9Q0~7_yj}10w??4kz%3k)X|=gm}kPa5(_$?c_3B7z$o_HigI3 zBxE`@x~yjHfqY*+X<7Xn;+4ZlG%aO|THPdMJEp!5<2xOFU2G;QWb5Ly2&{8KEJW@0331~w=S!? zNHY90pvo~@*afvB)g2L(iuiHvBvv_7%j*DUKP)QDFY8Y=lPA$+;4d~D$YS;kb>nho zV{r-{sp=-N&{=iMWcu05N7ho)JFFztPe!08s*|$@`*#7HgB*C;y~*v+*COo-(!8<6 z!4rc6P;;6LlU5?vWHsKw*#>N({x(a|VFDbu%n*f-7|d#OCPTK)$HjYG&$;DY{KG|b$!g#Ek#G0zGK-5Z#Ta@UOW#TdQmqlt){}p{@vL3tX>|cU9hMvoRn7IE!n?oB#UKR^+Z38bsWe4 zMUz-eO%d;YbBbAC>oTSn@Tx7}PmA`m<4Iguah4%nPJ=4rnPF56E3K(3u^-G=TbplO ze_Aw&_0`ZWsM|LIxr*5ZW;tV&sBNpfB~Z0R@2Wlf;6Wu@=x04jEWjWWJVD?%2kV3V zmcV?>LLx;?#(~}@;fzD;;%aLqYx~+^6pwSfbDbR{!#kuxYX*zBNfRh54|J1=->RLJ zu&8n+;prLyq}1F4j;uA0h^en9tf;)1cNAI_))L-kjsaTV9?!xfC=3fl%o#_X9mrl+R* z_0;*5F-r}iVPEd>pf$_J=z&^Aj?6}Bs_APdiTsrH1N^BjT0G9GjM&;s`{+Pw)}H>J zTPZ132YI*dQu4EXS7=)*MN&NHZ=~^lH;&HWEGC;^KbA-5g7XW(kF~o%EN5V(z;t7b z6mg+|K_b+GyRj<#2y^UwB=(HhdDi{J#=<4qpC06uLEZO{h1N4y_N(4~2q%3ceWp8oc{| zFnD9IDY!IvYT%E7pF+NWf8f3F<)01g2&@g@*zNxXtOg$QzsrA>{}Al<*ZI%E*#5om zd%kx;vfl|Q|2E$|_$7GV`*ZI%ypMSA^WFqYf=hf4csF}jc+d8H-|K^4!6!X4o_Be! zgzx?qPn{?3{;T^A_toy7y1(ZBl)KNp2aTNl%~D{N0<#qOf0F_vi!fJb{Wc=+A?bmg zhTDgHziO6cUN_|3Bzv|>Sr-R+hw~HqO+ns%>?h1q3+y;= zbG%_3{pI_dsruPpzSmKX<6JM_LvjMEs?H<4bc8ZLtQ^?ot&R^FM|Alvl9X75>8Eme z3$gy~MC%YPZ+1{gIdaRJ)K1_NwY-t|;$|t10JKIuZ$5 z={SkXw>n0$MhU(h*>NjO`TV z6^^>vW{7f_B>d*c7(1b6Pabmgg<{bsbH}`Zqbtnz zWZyB#x}BcvRp&2sKgdu|H_DTa(CElAE$L*pI%JT6>rT}dRLgm!nab>@p`Oe-dJqV> zeQL7HQ7hBHPIg`-uRX=4;s1hhoXj{X%cgD85hdNQW|Nnzh`I%vyiDy8!)Q%js61DS26$3Wu2&4`xGTnkrUhJqFTd7HQwQmkHpqgfCa-TXY z*b+_dbyh<+J(GK!rINXs>~N&9iJ45RvjPfrFvTL1GP&FFA+jTryPU6+HJIFa%p*Q&7phT*)t21mESfB}LeQ-^@fIpZp|b&s#C=> zUy>IdldRh-$qO9RVuK~Q!9j2`Op@yz14CCT*5o=z^&Hkk@_coopTLMnu62BcEQjP8 z^(&^WkYq(ulc=_BkX+@&){S^%!!fCBwj)=l)HO_p`cf3p;rrNAr&W+^aBfmsU7Qec(>vlN)6 zz$^u3DKJZcSql6gPl1F>R@GG(VJTE(mbxCCmgN7g=wGVvZ}vA!fmsU7Qec(>vlN)6 zz$^u3DKJZcSqjWjV3q>26qu#J|3C@|;fECu-T!}6g@3cZSqjWjV3q>26qu#JECpsM zFiU}13d~YqmIAXBn5Doh1^!1+;ABzfcWMC8{ePS9*{bL-qo0d@D0)kDBH9&gi!P6z z8TCY7jeH%R0N)>}L`EW)Mp_~(B4@#&3qKP6VECr+mEq3tws1rEtgwXr|1eTza4x$cr^I-U@^EaxFmQAJO%y$zWwhD+z==QE(tUTmIdJM z#{Y`{pZuToPx){37yXy`H~TgJ8U8AG4}A821kq<3F-w723d~YqmIAXBn5Dq~#}rTv zQhQ+a+Q**GTpu_togKot?p3X58J`@g6*bpAss$$WB}+s3an1F9)qs^!VYFY;Tz9KR zmq;Ei<}}w`j!&9FYBkrLs!cDD5sAK-MVN9pEx6;D;&m5UuG<}zBrjNOvaPus4m{u# z6(OiloA)_tGgyRY4bAlrj-m%9#`@sgPIJB2Q8Zj65V_$0%Jm*cAp>~?4ANZhb`WJC zuhZ~WXVFqc0ZWh0S%7qbfOa$*8&B zp~j%^;lva|6m-FjM3&8MT{oysFMy%B-bt>!O(R~Uq_W*`SBM}F6RB*Ge4SEQ1mL~h z(acp=3Y~e33x_+8o(wz{_xHoT-gTAQN!h~# z6GNCTZ+0f;#>g|rVb!Td7TzSYscuk#f=Fww3C9}~+2I_@9#{Jf-tG~mf?PmzXqaLu zlP%=?(2JUDOr?5u99H+m;xIz_!V8~kRGoFmgIzpL3>xYqDhaaV135GbBrB=&H#^=n zkbw_Fpga6-AU!+Ge*RnqXEE?EORnl%SE#nvh<$-}a#TFcHLP}1c5DJYns+3P7A79tAdA2Qa1X7ya*isN5FFR#aMzKAQ|rt?1}*P(e3G2)qA%e0#MR>{qBycjb7_um z>E7%#SJu&XER#cUvZ=YcoyqW)JCy6!TwUr6f}=pgweYNTd@1x#3 zy#wAY-uYga=Ubjh&*h$!$LIc$yW-yIUgi$VSLOTUu-t`sE)R)G(T!*vf3AMM`o`*9 z^{VP9BIkYFwZj#B!$U*sG`yX|5hA(5+?U)sQbL5WA@XA1KGKP>h{-fONarz_vCkUr z{rfDO)G*_uiU!WcY50RQ{1fkN%XZ@(1XfkcQHmMV^POdA2dCltai3nDd>b})gw27s zh*GHtFS{5jB{+5_XUDd$f3-*zA6~HyeA-tyk~#vIVZ~lPU9deow_&#)F2E@Zuby@z zX%V;6Yp1VZUz|jw)$|IxoppRUH;{re+>*9%x}Tj_5^BTtkz#&tyXw^(K3?*p!$nl0 z*RI6&j^-1(7au5*Yi-l*`26WCdu64P>$m6g!&t5C3M1SdC2X4RM(kvre!I3~f}$rk z!LJPB%OPee_mkr#LP&KQ2u4Su4P8nb5Y=!PA>`Fh=_$*nJC!yd0_u?EYgI)?y5)4` zyy?p=FX{E$b3-HJ$!!#10iOSf;!IX`tYMKsrZ#{2GNtK=7z95f$|v-My6H=;T5mwv z{X6nwPON^MG%)NyroGdb!27vQCPYl8C~t^LjR@O_L4a|F(A)6qO1`-f;zRWwj;)PQ zQ-h33Lrg~{|3-|uaryxK%KG4074E~$g15krJ__c6(~gxLRY&M%wEX5(Bk74vqL%}`i~G(T{@z*qSa2fV&dyf zUfr5*O41xxyAICDc-|K=MiGw$PLp|h=HQFGpaw}W5|&N3z$dkiw4s%vi0n7EYP)6d zY?dvI8_BiPh_7x2qu>|dEm#faz}TF&c^Z*Ly7aO&d$B~pa1)M5e>AdDGLC)bn%s`5 z-KF+_u9zDh)|O5q`UqUcQ%lyhB3O`lW>V{Dr=Byt-tN-Y9gUlnUV?+M9#94WO3a81 zt%G!`R8jPV`O^qPVv?_QTWiuWFyWjng@mQkt0`6q&#qK!OUGu18*oRd*-{T?(kgo< zwKg@XUq#rJrPBxwVzzVZ1WvU19vt@7rKL|v)fP`98n_t~y9!?w{&oyBf#{WLt$r}g5OY{p7}Ybln85l|!eT{mmg0?+9M3glw<5N=U=(mB&`sBW&;Egflfz0Q`vgh@`p zQMlPTE&cttXe-6=^V}(%`iz)L^({GdbY^oDi~|8x;EuaaUqu^RP)n>r$*mN|rB5wQ z&q(#@8MRaJ&~2`KEm%$aXisBcpzUHVAi@NokjG41=N;%QtwJa_xN;QDaDfLr1@&V7o_(Gs%FzJnj)`?9S z`oURSHPvTtb|?s02x`oG(TekqdrQfhKZWBX9O2V3qrN#0B0F2a35%zCFbeecu58|s zPVH>m-K3T}R7~YFg)T(l!J*NNiq*VhlLMy{wLECovN=0a!g)W-QI7i2x%P1AHnKR% z5oH~mdXs&aw_ec%+PT^!|j*{kV zvA>aT$Z9dge%$wQYu-48^P#ac>}-aM_f6!{zX;wLoeRdYId$CuUjxTtBcF}~oWEuY z$3{)3=$7VeduHSfA{=#tgElgdQo{P2JB5Rxv4pK{Mtw2^yAW*B(O^ovf)dwH;Rv`= z@7~sC`03C^7-x6buY+kV)M=Sekth}*eK-)Bx4pi;YQCBu$0q1~X?U_;^R<15PV9id1 zM8NKmRioB)0eu*lnl`oDx?F5%Dr62%Ab=@N5Vh@Hl&;U4BHq|(Z-S^DE)H+dJGj+T zCMt@xTOta#LJZ6)FiL=6UnU6w@_$!ZJY5xiJ^EVo<>-si7oyKb zpN>8meK`7H^l0?n=xx!Pqm}42(edbTv@d!vxP+__grM;TOX%gr5yR9ey(WaQMOS(eSWH)|d31?mLM>U?8@_Yu=Z=FM40_KI?tj`=s|_?}Of>-g~{b zd2jYsyw@Olcntjm|;GVXE>A?V%VSPXLu-a zh+!^~W7wPMW!RJGVVF&18D!~ur;6Z;wNN$g?R zk?3I9mS|(REwPQ^MTv_TwkBE`HYOSwZcJ=scwyo~h8H9*V7MW%f#LeZdWP!~>lmJ& zIG^F##9D@H5^ET)PON6QDzS>;%EU^B=OxZ#*pO&oxFWHFVSS>W;qt_ChFU^nxGb@Z zVJeYgxHPep;gZA>hP8=Wh6@u587@dHU^qW9pW)oZT!!Z)&S7|V;%tUzCC*}aX5vhS zXC%&GczWV=hNmS?V|Z%fREDP{PGOixBp8MhVTQp(kYOMZVCYZy8Tt}FhTepip(o*C z=uWs9%7kPn5`tlMqMD&A;bK^os3I8uTl{Yf{~G@*!@tD;!thV=KQa7${PzrBi@(P3 zxAEUHd^P?m!=J~0&hTgPpD}zX{u09<#DBo>`|+!EM{962L48Icp3d5)4Pc!^d{7VeK82=)}FT}sV@bmG{GkhZc1jEn9Kg;m( z_~Q&8i9f>dq4+}#KNbHJ!%xIN!SLhpk2CyO{9_Csh(Eya{`mb2KN|li!;i#2!f-l1 z&2TC{#qhrPeGLC0{tpa482=!{d*k;qyeEDS!}rJE&+zW}-3;%H-^uXy`0Whe8-Fju z_r%}B@YeXP4Br)h7sH$4H!*xi{2dH$h~L2Q?eVuWoQzL0ti&q}%keV9Bk>~)ua95P z@VfYQ46lt}%kZlBRSd`D;|xdRqYMl20>dlfS1|04_cQE`^Li4`#(5oycgMRqy(`|u zuruDt@bdWO3@?jc#_-ZOuPgCO;+OFAfjF-#@r&aZ^Yi}reun$v`xx$x?`7B?Z)eyR zZ)3PUzMbK=_%?>Eab9QQTjE>zxh3Aha8rB}!^SwTKk*CV7x44?IIlDDb@6rle14qQ znfU5BuPgCY@l~9@GS2Ht{Jc1?EAi!VUO(bX<4ZaJ-1xZ+YvMHw7sMAZJUhW-& z&;R)8ah~__SUkr0C&y1_cvAc%hS7MGVIa=)JnoP4JdbIzN9k_Em;Y$9TTSz8w29KYuC4^FH=e>?wZ!LhK6+ zKNtHP!_UTezQ-PmJ;u+E#vWz(NQ~!w?BUqM{QQ~NXBa*d<8>hRsTi*Vu}{W6$?2bn zeS+b~W4s>3?vL^Ok9{ciAx@u)O)?3&m${Cri6 z=c~L&@^|I?C9f;;Zh1H5%e&-V4DXb8GQ30H!SHs;>kQ6HTu(^kxSsO8^1Yn@9?A8U z@0Rc8=UXMOKk{AjUHp8D3~xn(?PyPzJ=4TmRB>p zO6qjEQeMf=Z~a|V{(k)s2pWDB1agOWQk!>78w>~f#DVM3WmdS zm|E8|6lZ7s?A6ULY@ExIu1UxL&Sj zxK6HPc)mQJ;aa(t;TpMy;cB^>;VQX`;YzuZ;d$~rh7GcT;R?BeVZE$pxLhu0s7a0C zGP#UlN~Rbtl}i~ekxLk!E6-(EC+is2%36jsvWDSextQT1xrpIHxsc%kxq#t(IiKM? zIgepdCK=9^a~Yl^&tW)6&S7}AJe%QJ@+^jDN^r)js&@G9sEU3o`YA;JyE57t-4{t)>=yo7?%-#Jt3$sEJswxjre~H{UP6ZuU1zfmsU7Qec(>vlN)6 z!2eGu;80f_8qN1Z!?MGn#0jk!L^IBIvxLW$cIa7>#!LzNu)CebK-(yoq2BBOv_qk)xznL;I@FtiN?5Pv+Tld+)>WxASDUl&OegEpZ+Bor+p0^~8s6s6 zg&iD3JuY&h_6&?*_JwY{(9zO?{1qdh)&&l#3=E7wR|p^7;Aqi6Kdb8J zM>N-ZhX(IJE|n)`dR+;`wa)n#8w8xUvR-qYKVNLIwDYqA(6`KXI|XU3wGQ>;0f=A+ zGGm#cBEpHIMQfaO&y121@M;HT2B1Lz{S19HI|7Sk53w%0=340}rI!@kHP?BLA=I0P z?TI4*S(YQiVNldNTGu@?gf6C`ENdO)?j9M0ErjM;?&wcaB5ahriHZB>m2WQLdigL)jCjN zw?mp4nybdq%Ff(jh~_D(q-(LGm6_s1H>9zyMUGY?wW}}!wQbpc3^wUfYp#WkI%m4` z!@5KBY79L6aS z-44PQ20A^OE2&nudjy(sg{}cKZmy~a-d#-f7Ba(q_@F7o&kUr-au}4F>l}5sbQf}H z)f`8m6iJ?C?yj>{s(0tdbLjoE)DX6$n^MB8wN%bE*O`v8@@!8+-ExLXB+zY`m&Mba zse>F`)OD)bg%FTKVG2t5sOV`b`MaV22c0a}De5@DxPv-yFDR987Sc65fYqF85_cBT zmF>h}ia9DWRLqW(?*7RRS`Cs}*GZ0I*>FXlu~A1gI3st3=8CBH6;L%}MKYcfVRiO* z=Xw#qasV@iROx~=lI;_EqEB-LQ{pmfrGm0B)FQfh1VNia zR|P|78Kx1ME1>KDf4wUD14#TQqaD#Y$nw7y`Cz0svMk~c|8qDW-V{Cq()F)|ZVh#W z>VvNaKLd$)F1RA_$G~?29|;s8%Ra;Z8~;=OyZo2?SNY?<-}oMb)VbHU)cZ^C1KziK zo4jXxe(m{&=N3;7bpJi>@47$Xo`fX#4Ea0x6-a2uAdmHnAB#Ihr#QL#=hcr_zr8wB zy~6cxt_NIs*JjsgRbT(_9eK7M4F{?&)HOHub@9oCXLd zMU+Pb&0=OKg)NS7mL3h-%9S0>Y1S74s-jR)!Ynfi&=wz6bm-C(#Vu@eV(XP)lhw^o z)*poqU4vf5ZRv?Z2BS55VqM3gk!#LiL~YbIABC#G23><^PkP_JCR3>o`YhONapWOG zDCN6jV_hjyKE}R+L13e-ISSQ0LxFNjdJL-z(TdIvJF;V{+DOsJOHtks=bYL8aidRFO?PV`5zG3~&lv{a_3?KldZ!WNyxm!-)n zQ0X4B-2j1-7b7mU`Zl2{{X`}kpjB#u&{iF-g8rFaV2fTLO4*w&U}ich%Z1=gXo_>z z#+kR+8ei6n>HG*Zk(x98d7N35rkZaTDIb=JoL@V0mC|nL`|77a$7@E~`k5=0ky*+> zar%w4aG@p4xk`(*r8A?-sK}4NNOg0bwq500oQh|pE}nsUq@lc3pUw|WBrnRq0>Obj zG;wGch{MY8fnn38j6;o*Y@Ik}>f#w_QJPAl>7mTTGh%yxtIQi|dh&!2%7pQ&|?Wvzc=L@iHGSULlJJySV6-MGCa&1Y_P z@nb(QM(V;DMdfaH$MysJT2x)yLTTVIGzhg-Gtio4l@m=OO&;8K}gWs-U|Z z^t%G2TRH>l43jFm8llhElc&NJ`dyKMSxgUERP|{KXJC_YmcCdww2urFvsrZm7&cPY z&%nx}L7(8;+6%BZAnO~<@vYGJ%Z#Xl3rAwkr3IqT$gP{%YE^0_zmBUza?@U=Qv4cH zwex2-+r$Ab4&|kMG1=5NQYhej?tDeh#L2+OykMpoYMFX-+S;+rb!B0Tdg300L86|m z_TrkECVSIB0bNBKFxW! z6RBiUtIk8h)YBHuEVBncq3&o?H%vktf*IKInUviLgtoo8v7POVj`{vTcV<|(FauNI7I2!O{ET2CXP>f=6N#^t(K8M0+5_0eEZkdaQeNIkuI zX0A0B(>o}24Er3}GVB>5CkRD4V5%?M80sRBoH@sy9XoP8&X@Rk{mdNuC}AF#-ry_7 zDM|f8!^~OMF+N4Fw?H!-cG`yhTXJVla(hO#jl}J60BbuuiJ5CV=x`X^p>C`RYLk@I zb7oGpO-FWYYE-SOhB`AD%&AlCIkh7*n(f8yOfAezI(H^+tp#g$G^QJmv$7k?6f=fN z%t&B$5lHTckJAftmVE)HiDst3!1f_CNY^%{d}RA-KUXf zkZkb`u1w~^sI8!@(W_M}=qmKuiWyvvR_kU54Q=_u#7unS@8l2jwSDmnE<#r6*qyQZ zs#lpoGpTk4*B;ZBvMrBm+dvL1X?wy9T4^h0aOJ7f4+M>EIOT)cdAz|8o$--j8VoI+ z!Cl52ziqfb9lxXy6Ubank~6rbn3hd#xo$ATl&bLa{AnEi&4Xeas_VEg4C)C1ccf}c z{-sL(#r?s^!_i+x?vLCNITE=tl89->9$Cx7XL|tM|?K#iMh)KlFYD+UEaPd)FQ%M^)x;b=`S%_dJj^ z23Qgc!Xq*1Tw@3bgvU(hHAyC~dGyRgf$Hw+nW7&x=|^TJ0ue+Mc_;+Y2*d{~l*{VA z0<617fuqq?m(|5plyzCn`dD{0BCsfexWDgKRd?MP!tQ^&=Quf>!}P6tzq)*k{{{#fhb)(5)2(t20RqSotM zPHZg$OMZ6ixvdLfS9reV$1UG!IRf;-vW`U^9r2f2Zizn~KN5c!oCSwqv$#H9j$a<% z8NWE5j4zHyV=u>k7W;ne$=H$D;n;!L4PYdkXxY&^BS1f z!2hiV$Ou1-=+=Pufp6@*k@1+mFfwM3k2{IUPJK~i41s%OR!TS>`X5DVRfpE)#L^S= zC6UlBW5L0=eoDmqY%aKk{`NxV7i{d-PmeTXeiyufjK1`HPGs+b6UYSZeao7$k|+K< zd7g&toU|LSFu(y}&>@G;mqBH=P-VvIh{s)lt#o!J|5%s)CL|Q=*eGSH{E%!1cE`p_ z%i{NvO?%9la17rtSmpOf0hF}Ea$uO}cS`}3K;LQpQBMGVrgru7gX9&E0#YU%nJ~6j z`A5hqK4wo-SY4n_stf-xX$>S#<9WNr?~)}+x5gMa9g6l<1^yu~Esrq9z%0%0l=Xo< z+wiz2_yLkKMtn`x0>6XQJ4SRuRen3EcMMc!*dL;k_y=W4(=5Q62M=b6e}LQ-qp&t>5Ce(f}BBYLgDXg_$_H5nCZV<|QWs0Gejy$BM|S|iK&eo}sV2R7J<>h%ZDwe;q+Qz^nrSGM>y{!b9AVZ*|& zCif1W7B&zM9s10p8shIHRUti>M`iX=#nO(Gai;lvMja(D^=u;u)C*PZ*-DK)?v@B?89^}Yrm8ad!1@kJ4P*o z6SAC3_3T=&eUYk*@dx@V?H+mo@C7~}n3kukRsf06witQ}KmBvRvCeahVfqp%0ov61 zNY_@W7hNhRwYHJQkY!e95GAgiMPtZH_l#LGW0gpXQ z%hCk18cUEoa%mY^Gg!JLIhVFuR@|!yT<0D*!rF|*h4^S-)k3@1$hvqwVC}84zy`4G zR4iEZv{f`HECjs!eMPNVA*DSLhTe%BOHVn2+4j7L9LwdvOr@Ubu9x5+o4`LG;o{Brqs4SahQ{&wotVOX>)#;^msEFYs{!}!>e z36_siHSh~vG(sA~pW`?L13JLdG?nn%; zq{xJXP0bpaEK)#yEiq0x(>tJK^b&eJk(XKzzJ&e=Nmz^ZOO?-&HwZl6dNpCvPACnF zlr^KcM-o@cFCFFr#!BKHRR1X}Qvg}1RY|8D6L6O>*4O?Z z>;6gi*SjC-KGeM*{Qp}K0dP{+%U#bPj{j3#HzRU?1eSoayE+l0|Bszt=)Ax4*3Lb^ z|F7*ly)z2*{}UY#ciho2(~*(P|EI=(4W7U+#&3`B2Tovpe0BV^cx&wEK>i{`UZ55_KuogIrtUxoE{Riz|LY3zK&1+y@1M?b~*TB36 z<~1;{0Tp;j=N39S3$0xN+R6h>fR)tR6g28Y(f5gItqRQ_xy zUJsOZIY;bILf@~70-<~bu=I$UJp)>(nMruiG>y(>0nfX~e&yTYeB?Kyy zqE*=oVTi4Kh%>if&xc=_B?@!Fo>N4Fc3H4rg|!IWr8FX40VFzmHe9@F3HD3n zZFP^NUa(_y#0Z)PM7LnSSfckeQ&1&9A9D`Whcg!)J^{&OKUZEsCLSmtvRILY8z$I) zuF|chESMImRuV=F7{8=jRNgiKdVO#S0^{uz^Md_H7!C~bJt_g~7*Ow|ho33$C4*TG z4VFgbMjPn(gX;`%8J7_IA=ptxnLxwTiy)a#V(w&pYqMZKoF%htO@kMn{VSc!nyrE@ zGz$}XfOTg7q6{tAN$|L2?WHrZVBc5U1+Fwn-_QOzJda>Fp!5E?>^%Lu>g!5b#BVgn zs0Djk)jVQwfXQLs3okK%l)(0w1F&r`jpOgCI+1V_983j!O068MmsS>@efAw?kUQC| zVBc0MRjg+w$SsY2C1b8&|D@;uk8$$_g+Ix@{g(26G7=$_hLrQ9GIPMXTYymrAqs+h zQ>mGJg0U*tH|C^Qs(Vr6e^As(`Ae`p;;=2)-_LmoE$_58Ju#>g7=Ijb{1+gC|DM=b?9}MDqSr=y+Mfo;T&;ag+p)IKwC!p; zsr9MWJ6bPnUD)zm%i)%5T1NOQ{M-DKyv{fCCHjx_gZh>FD!n~&H1bH~=14J;!i(qs z{lC_L;R1gh;7p`jG=wwRN3J}e@I%P!z+D>7MIKaI$4+z0O;jGNbxIybEOQGDjOgmH z4KI{$hD>|4DFFHb;`Y%pe%{#ta+wtf@+B^qZT%Lkkz!}d%TMLd2vE`nIH15J7|J;9 zTL}^V09<&0(+OD%LwTnrryH=p^IWTmn1Vsq^Q!r_#BiD5Q4$z17=^_q*KDBR(qZqH zVr~peXn4rfaG$DJ>w*C{2s(KU(_~nPZ@3KO@y>ML(FBmepiTx%rHGfZ)CECpfGus9 zHr0%txl_E^1%y4IZiPhO?5XCZL~KdKTUC?5tk%cNe}_!VDi8h0nBtt3r-1EXZ(?3;6iOJ~n)|kieg#ikWKkskqPu zvV4_Sx=$@VL_v0NWcs(ssBNj!!x0x4^X(q`Z(J=bV|NH~n?k%9gut_LTv;-X2YIm` zH1b~Ju5BXtRngn0Aul{F)tWip<8|&uO$@LqV4DT7Ce#g_v?5K z#Uabjak0C)(aFi#VuB&LLIzf@vk|utu3#3|5?tycdLiw>abKtWR>ZIL5ZQt{OtJ|0 z7;q8Ru;sP2)0h-15DN#?ZusrW`qG>}2#TYO>s;J}agjGaXQ`(boc@U8Qh3eliZtAC zo zo+|7|wgeu9q39qHz-DEoi_nbKUSErI>bR+>wi0oE=NN+(lcF?2Rge$ygg}*CXJ(xtW2%Tb^YQ+XYrkvp-xFUc$587BU!mUP+QNo#SD6|5Z zEbIfqviy{{xQLSo_8WsZGg>vkZwS<*WTtd%YGwKg7cmjRo^Wsi=sshYJb8s`N~!*N zY?FIhb89z<%Lqc1ablq#=y?4K3LE|gacS!ywNLhJR!l8Uhe81(C51g8();2Bq@a> zJKZ7xfQ%RF6Q01-M65|(h9DNGiq#)sBz;~y$0`!igTO}oFTlkiEGM{+B~%N9x+0Uf z%teSvF#88_t4USj974k(R0fypm=tghVu*TUNfMX1iRO;L6VYSzzn%m$Sqcjv|NrLp z2gv{b1Q-DB>%O&nFIfKjy4Q3s==y!vG4T06)O83E0pnd4cAXAJfM>zb|9IyozyLVe zIn;RxIQZYt@d|kNzXrbj4|nVb|6ecg_uaq%JQM$F{BZp4_zm$AIP=esFN(Ltj>WzK zR{YyyQ!z8vgPQ{r(O05BjXnu{{e97!qI;uPM6vB^e*rA^kD*fY|K>F?uYq|D%xhp? z1M?dAL)L)e2$6hVV4i|!<8Ku|lYEe|;5qpl#p!~>Iu27M_~)q1=M?A3xSfX!_pg`g z15MXq-VSzn2%ir2Z!qt{zxGI&UMZW$rVwVMWWIanV2oxczXnfs6mI6h9)K&~@?bFBOd-6&_!@SC;)n*t-u$G(xq^RzEBmZcX&C3oH!Ii=lzQM&YBJ8U&xGl~fPpo^ib1SMCX8F; z)%#PmR8@wkvj>$|K!cu3?oSjYG?D0nefn&Dd$WJYLJmbHGiC*pJ&kjzU=Jw%G?-}W zKphj;Aa#mh_bW=n;}UGA}0n4CZ9}B-DQ>XK$N7yutROYd*=3~t)3f<*~a zEeDI4gG+eHPKp6|m(oTG3(4S;L-rxH9ws0YI3^2rr}{RelUEdz;ec8}JeZ&>grWI6 z!d-}CCq;k>c6)dopj6z?R&fxQf+_StMP+yfj}-fWQc8pa;YPBG^sg`9)Do)xu2DA~HtFAIfB$ zS%7(<=hAM%-m8jpoKjvQd2UqSk90s(Xr8`jP64c#NWVcX0MHTe4`88V?_QzrZuWZ- z4-m33A@L~`pO}PcpOy{}?Q#8Ly+>=-5}76ECNdBRLtML1U;P@7#r`+yt7fH}^_;lo z485