diff --git a/.flake8 b/.flake8 index 82defde..37321bd 100644 --- a/.flake8 +++ b/.flake8 @@ -17,5 +17,4 @@ ignore = ; line break after binary operator W504 exclude = - src/archive/* - ; src/analysis.py \ No newline at end of file + src/archive/* \ No newline at end of file diff --git a/.mypy.ini b/.mypy.ini index 9bf4787..0451a0a 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -17,18 +17,12 @@ warn_unused_configs = True disallow_any_generics = True warn_return_any = True warn_unreachable = True -; Pydantic integration -; https://docs.pydantic.dev/latest/integrations/mypy/ -; plugins = -; pydantic.mypy -; Do not type-check the specified subpkgs/modules ; Inline silencing of specific errors: ; https://mypy.readthedocs.io/en/stable/error_codes.html ; Exclude specific files or directories exclude = (?x)( - ; analysis.py - ; | archive/.* archive/.* + ; | colors.py ) \ No newline at end of file diff --git a/_quarto.yml b/_quarto.yml index ace1ca7..376e3a2 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -15,12 +15,9 @@ website: search: false navbar: title: false - # background: "#eee" left: - href: notebooks/report.ipynb text: "Report" - # - href: notebooks/sql_qs.ipynb - # text: "SQL exercises" page-footer: center: - href: https://bainmatt.github.io/ @@ -29,7 +26,3 @@ website: format: html: theme: ember - # css: styles.css - # theme: - # light: flatly - # dark: darkly diff --git a/environment.yml b/environment.yml index 30f4805..38ce059 100644 --- a/environment.yml +++ b/environment.yml @@ -4,18 +4,19 @@ # (2.0) $ conda activate {name} # # This env was pointed to, created and activated on, an external drive by: -# +# # (0.0) $ conda config --append envs_dirs /Volumes/Samsung_T5/conda-environments # (1.0) $ conda env create --prefix /Volumes/Samsung_T5/conda-environments/streamlit-geomaps --file environment.yml # (2.1) $ conda deactivate # (2.2) $ conda activate /Volumes/Samsung_T5/conda-environments/streamlit-geomaps # (2.3) # (3.0) =19.0 - jupyter - pandas - - numpy + - requests + - python-dotenv + # - gdown # Dataviz - matplotlib - - seaborn - plotly - geopandas - - geopy + # - geopy - geojson + - seaborn # Stats - # - scikit-learn - # - scipy - # - statsmodels - # - arviz - - # ETL - - requests - - gdown - # - pydantic - # - bs4 - # - mysql - - # Ensures pycurl and wptools are installed correctly - # - openssl - # - pycurl - - # Secure API key retrieval - - python-dotenv + - scipy - pip: - # Stats - # - bambi - - # Tables - # - great-tables - # - polars - # - pyarrow - # - tabulate - # Dashboards - streamlit - # EDA - # - holidays - - # SQL - # - mysql - # - mysqlclient - # - mysql-connector-python - # - PyMySQL - # - jsonschema - # Typing / testing / linting # run `mypy --install-types` to install missing stubs. Pandas given below. - mypy diff --git a/notebooks/report.ipynb b/notebooks/report.ipynb index 2fcc6ff..bbcea9a 100644 --- a/notebooks/report.ipynb +++ b/notebooks/report.ipynb @@ -71,7 +71,6 @@ "#| code-fold: true\n", "\n", "import pandas as pd\n", - "import matplotlib.pyplot as plt\n", "from IPython.display import display as display3\n", "\n", "from src.paths import get_path_to\n", @@ -81,7 +80,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -89,16 +88,10 @@ "\n", "customize_plots()\n", "%config InlineBackend.figure_format = 'svg'\n", - "# plt.style.use(\"_matplotlibrc\")\n", "\n", "import plotly.io as pio\n", "pio.renderers.default = \"notebook\"\n", - "# pio.renderers.default = \"plotly_mimetype+notebook_connected\"\n", - "\n", - "# pd.set_option('display.expand_frame_repr', False)\n", - "# pd.set_option('display.max_columns', 8, 'display.max_colwidth', 30)\n", - "# pd.reset_option('display.max_columns')\n", - "# pd.reset_option('display.max_colwidth')" + "# pio.renderers.default = \"plotly_mimetype+notebook_connected\"" ] }, { @@ -120,11 +113,6 @@ "#| code-summary: Load\n", "\n", "if 'data' not in locals():\n", - " # data = pd.read_csv(\n", - " # get_path_to(\"data\", \"raw\", \"\"),\n", - " # encoding='ISO-8859-1',\n", - " # low_memory=False\n", - " # )\n", " pass\n", "else:\n", " print(\"data loaded.\")" diff --git a/pytest.ini b/pytest.ini index fa05e36..9099b29 100644 --- a/pytest.ini +++ b/pytest.ini @@ -3,7 +3,6 @@ [pytest] pythonpath = src ; minversion = "6.0" -; addopts = -q addopts = -ra -vv --ignore=src/app.py --ignore-glob=*/archive/* testpaths = tests diff --git a/requirements.txt b/requirements.txt index dff26f5..f0f82bc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,28 +1,22 @@ jupyter>=1.1.1 requests>=2.32.3 python-dotenv>=1.0.1 +pandas>=2.2.3 -numpy>=1.24.2 -scipy>=1.9.3 -pandas>=2.0.0 - -matplotlib>=3.6.2 -seaborn>=0.12.1 +matplotlib>=3.9.3 plotly>=5.24.1 -geopandas>=0.12.1 -geopy>=2.4.1 +geopandas>=1.0.1 +# geopy>=2.4.1 geojson>=3.1.0 +seaborn>=0.13.2 streamlit>=1.40.2 -gdown>=5.2.0 - -# great-tables>=0.11.1 -# polars>=0.14.28 -# pyarrow>=17.0.0 +scipy>=1.14.1 +# gdown>=5.2.0 mypy>=1.13.0 typing_extensions>=4.12.2 pandas-stubs>=2.2.3.241126 types-requests>=2.32.0.20241016 pytest>=8.3.4 -coverage>=7.6.8 +coverage>=7.6.9 flake8>=7.1.1 diff --git a/src/analysis.py b/src/analysis.py deleted file mode 100644 index d8d3f00..0000000 --- a/src/analysis.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -Utility functions for data wrangling, preprocessing, and analysis. -""" - -import pandas as pd - -from src.inspection import display - -# z: str = 4 - -x = pd.DataFrame([1, 2, 3]) -display("x", globs=globals()) diff --git a/src/app.py b/src/app.py index 12cac22..8d9a614 100644 --- a/src/app.py +++ b/src/app.py @@ -5,11 +5,15 @@ $ streamlit run src/app.py +To locally run the GitHub-hosted app: + + $ streamlit run https://raw.githubusercontent.com/bainmatt/streamlit-geomaps/main/src/app.py + """ + import os import re import json -import gdown import geojson import requests import urllib.request @@ -30,109 +34,12 @@ from dotenv import load_dotenv load_dotenv() - -GDRIVE_URL = ( - "https://drive.google.com/drive/folders/" - "1Zdt8bkAWc_iIwSCUNhkXEjXxPLraKkxd?usp=sharing" -) DATA_PATH = Path(os.getenv('DATA_PATH', get_path_to('data'))) -lookup_dict_type = dict[str, dict[str, list[str]]] - - -def download_gdrive_folder( - folder_url: str = GDRIVE_URL, - download_path: Path = get_path_to('data'), - download_dir: str = "geojson_files" -): - """ - Download all files from a Google Drive folder. - """ - download_path = download_path / download_dir - download_path.mkdir(parents=True, exist_ok=True) - - print(f'Downloading folder from: {folder_url}') - gdown.download_folder( - folder_url, output=str(download_path), quiet=False - ) - - -def extract_zip_lookup( - input_file: str, - data_path: Path = DATA_PATH -) -> lookup_dict_type: - """ - Extract a lookup dictionary with the structure - {ste_name: {coty_name: [zcta5_code]}} from the GeoJSON data. - - Parameters - ---------- - input_file : str - Name of the GeoJSON file to parse. - - data_path : Path, default=DATA_PATH - Path to the directory where the GeoJSON file is located. - - Returns - ------- - dict - A nested dictionary with states as keys and counties as values, - containing lists of ZCTA codes. - """ - input_filepath = data_path / input_file - zcta_lookup: dict[str, dict[str, list[str]]] = defaultdict( - lambda: defaultdict(list) - ) - - # Open and read the local GeoJSON file - with open(input_filepath, 'r') as f: - geojson_data = json.load(f) - - # Extract states, counties, and ZCTA codes - for feature in geojson_data.get('features', []): - properties = feature.get('properties', {}) - state = properties.get('ste_name', [None])[0] - county = properties.get('coty_name', [None])[0] - zcta_code = properties.get('zcta5_code', [None])[0] - # Check for valid state, county, and zcta_code - if state and county and zcta_code: - zcta_lookup[state][county].append(zcta_code) - - return dict(zcta_lookup) - - -def save_lookup_dict( - lookup_dict: lookup_dict_type, - output_file: str, - output_path: Path = DATA_PATH, - output_dir: str | None = None -) -> None: - """ - Save the lookup dictionary to a JSON file. - """ - if output_dir: - output_filepath = output_path / output_dir / output_file - else: - output_filepath = output_path / output_file - with open(output_filepath, 'w') as f: - json.dump(lookup_dict, f, indent=4) +lookup_dict_type = dict[str, dict[str, list[str]]] -def load_lookup_dict( - input_file: str, - input_path: Path = DATA_PATH, - input_dir: str | None = None -) -> lookup_dict_type: - """ - Load the lookup dictionary from a JSON file. - """ - if input_dir: - input_filepath = input_path / input_dir / input_file - else: - input_filepath = input_path / input_file - with open(input_filepath, 'r') as f: - lookup_dict: lookup_dict_type = json.load(f) - return lookup_dict +# -- Get the data ------------------------------------------------------------ def download_gh_files( @@ -299,13 +206,21 @@ def download_data( # Data saved to /...geojson """ if output_dir: - output_filepath = output_path / output_dir / output_file + output_path = output_path / output_dir + output_filepath = output_path / output_file else: - output_filepath = output_path / "downloads" / output_file + # output_path = output_path / "downloads" + output_filepath = output_path / output_file + + if os.path.isfile(output_filepath): + print(f"File already exists at {output_filepath}.") + return response = requests.get(url) if response.status_code == 200: + os.makedirs(output_path, exist_ok=True) + with open(output_filepath, 'wb') as file: file.write(response.content) print(f"Data saved to {output_filepath}") @@ -316,6 +231,90 @@ def download_data( ) +# -- Process ----------------------------------------------------------------- + + +@st.cache_data +def extract_zip_lookup( + input_file: str, + data_path: Path = DATA_PATH +) -> lookup_dict_type: + """ + Extract a lookup dictionary with the structure + {ste_name: {coty_name: [zcta5_code]}} from the GeoJSON data. + + Parameters + ---------- + input_file : str + Name of the GeoJSON file to parse. + + data_path : Path, default=DATA_PATH + Path to the directory where the GeoJSON file is located. + + Returns + ------- + dict + A nested dictionary with states as keys and counties as values, + containing lists of ZCTA codes. + """ + input_filepath = data_path / input_file + zcta_lookup: dict[str, dict[str, list[str]]] = defaultdict( + lambda: defaultdict(list) + ) + + # Open and read the local GeoJSON file + with open(input_filepath, 'r') as f: + geojson_data = json.load(f) + + # Extract states, counties, and ZCTA codes + for feature in geojson_data.get('features', []): + properties = feature.get('properties', {}) + state = properties.get('ste_name', [None])[0] + county = properties.get('coty_name', [None])[0] + zcta_code = properties.get('zcta5_code', [None])[0] + + # Check for valid state, county, and zcta_code + if state and county and zcta_code: + zcta_lookup[state][county].append(zcta_code) + + return dict(zcta_lookup) + + +def save_lookup_dict( + lookup_dict: lookup_dict_type, + output_file: str, + output_path: Path = DATA_PATH, + output_dir: str | None = None +) -> None: + """ + Save the lookup dictionary to a JSON file. + """ + if output_dir: + output_filepath = output_path / output_dir / output_file + else: + output_filepath = output_path / output_file + with open(output_filepath, 'w') as f: + json.dump(lookup_dict, f, indent=4) + + +@st.cache_data +def load_lookup_dict( + input_file: str, + input_path: Path = DATA_PATH, + input_dir: str | None = None +) -> lookup_dict_type: + """ + Load the lookup dictionary from a JSON file. + """ + if input_dir: + input_filepath = input_path / input_dir / input_file + else: + input_filepath = input_path / input_file + with open(input_filepath, 'r') as f: + lookup_dict: lookup_dict_type = json.load(f) + return lookup_dict + + def parse_geojson_zips( input_file: str, zip_lookup: lookup_dict_type, @@ -395,6 +394,7 @@ def parse_geojson_zips( print(f"Saved: {output_filepath}") +@st.cache_data def parse_geojson_states( input_file: str, zip_lookup: lookup_dict_type, @@ -532,6 +532,7 @@ def get_available_regions( ] +@st.cache_data def load_geojson_as_json( region: str, data_path: Path = DATA_PATH, @@ -551,6 +552,7 @@ def load_geojson_as_json( return geojson.load(f) +@st.cache_data def load_geojson_as_gdf( region: str, data_path: Path = DATA_PATH, @@ -572,7 +574,11 @@ def load_geojson_as_gdf( return None +# -- Plot -------------------------------------------------------------------- + + def plot_zips( + state_list: list[str], data_path: Path = DATA_PATH, data_dir: str = "geojson_files" ) -> None: @@ -581,15 +587,17 @@ def plot_zips( Parameters ---------- + state_list : list[str] + A list of valid state names to choose from a dropdown. + data_path : Path Path to the directory containing the data files. data_dir : str Subdirectory where parsed GeoJSON files are stored. """ - area_list = get_available_regions(data_path, data_dir) - state_list = [area for area in area_list if '_' not in area] - county_list = [area for area in area_list if area not in state_list] + # area_list = get_available_regions(data_path, data_dir) + # state_list = [area for area in area_list if '_' not in area] # Primary dropdown for state selection selected_state = st.selectbox( @@ -597,7 +605,13 @@ def plot_zips( sorted(state_list) # sorted(get_available_regions(data_path, data_dir)) ) + + # Dynamically parse the constituent counties and zip codes + extract_state_data(selected_state) + # Secondary dropdown for county selection + area_list = get_available_regions(data_path, data_dir) + county_list = [area for area in area_list if area not in state_list] filtered_county_list = [ cty for cty in county_list if selected_state in cty ] @@ -633,7 +647,9 @@ def plot_zips( # Generate discrete color palette discrete_palette = [ rgb_to_hex(color) - for color in get_palette("plasma", n_colors=len(df)) + for color in get_palette( + "plasma", n_colors=len(df) + ) # type: ignore[union-attr] ] discrete_color_scale = { df['id'][i]: discrete_palette[i] for i in range(len(df)) @@ -771,6 +787,8 @@ def extract_state_data( Perform parsing at the state and county level for the state given by name. """ output_path = data_path / output_dir + os.makedirs(output_path, exist_ok=True) + existing_states = [ os.path.splitext(region)[0].lower() for region in os.listdir(output_path) @@ -800,40 +818,36 @@ def main(): # exit() - # -- Run once ------------------------------------------------------------ - - # 0. Download data - - # download_gh_files( - # {"codeforgermany/click_that_hood/main": "public/data/*.geojson"}, - # save=True - # ) - # url = ( - # "https://public.opendatasoft.com/api/explore/v2.1/catalog/datasets/" - # "georef-united-states-of-america-zcta5/exports/geojson" - # ) - # download_data(url, output_file="georef_united_states.geojson") - - # 1. Extract a {state: county: [zip]} lookup table - - # zip_lookup = extract_zip_lookup(input_file="georef_united_states.geojson") - # save_lookup_dict(zip_lookup, 'zip_lookup.json') - - # For use in a cloud-based application - geojson_folder = Path(DATA_PATH / 'geojson_files') - if not geojson_folder.exists() or not any(geojson_folder.iterdir()): - download_gdrive_folder() - # -- Orchestrate --------------------------------------------------------- - # 2. Parse GeoJSON files + data_url = ( + "https://public.opendatasoft.com/api/explore/v2.1/catalog/datasets/" + "georef-united-states-of-america-zcta5/exports/geojson" + ) + data_file = "georef_united_states.geojson" + data_filepath = DATA_PATH / data_file + + if not os.path.isfile(data_filepath): + # 0. Download data + download_data(url=data_url, output_file=data_file) + # download_gh_files( + # {"codeforgermany/click_that_hood/main": "public/data/*.geojson"}, + # output_dir="click_that_hood_files", + # save=True + # ) + + # 1. Extract a {state: county: [zip]} lookup table + zip_lookup = extract_zip_lookup(input_file=data_file) + save_lookup_dict(zip_lookup, 'zip_lookup.json') + else: + zip_lookup = load_lookup_dict('zip_lookup.json') + # 2. Parse GeoJSON files # extract_state_data('New Mexico') - # 4. Run app - + # 3. Run app + plot_zips(state_list=list(zip_lookup.keys())) # plot_hoods(data_dir='click_that_hood_files_small') - plot_zips() if __name__ == "__main__": diff --git a/src/colors.py b/src/colors.py index ac8933e..325dd31 100644 --- a/src/colors.py +++ b/src/colors.py @@ -257,7 +257,12 @@ def create_gradient( r = np.linspace(rgb1[0], rgb2[0], n_colors) g = np.linspace(rgb1[1], rgb2[1], n_colors) b = np.linspace(rgb1[2], rgb2[2], n_colors) - gradient = list(zip(r, g, b)) + + # gradient = list(zip(r, g, b)) + gradient = [ + (float(r), float(g), float(b)) + for r, g, b in zip(r, g, b) + ] if as_hex: return [rgb_to_hex(rgb) for rgb in gradient] @@ -298,7 +303,11 @@ def get_palette( >>> print(pal) [(0.78, 0.86, 0.94), (0.42, 0.68, 0.84), (0.13, 0.44, 0.71)] """ - return sns.color_palette(palette, n_colors=n_colors, as_cmap=as_cmap) + return sns.color_palette( # type: ignore[no-any-return] + palette, + n_colors=n_colors, + as_cmap=as_cmap + ) def create_monochromatic_palette( @@ -389,6 +398,9 @@ def spherical_to_rgb( -------- >>> from src.colors import spherical_to_rgb """ + theta = float(theta) + psi = float(psi) + d2r = np.pi / 180. theta_rad = theta * d2r psi_rad = psi * d2r @@ -440,8 +452,8 @@ def get_spherical_palette(n_colors: int) -> list[RGBType]: new_color = spherical_to_rgb( shift=center, radius=radius, - theta=theta, - psi=psi + theta=float(theta), + psi=float(psi) ) if new_color not in color_list: new_colors.append(new_color) @@ -505,7 +517,7 @@ def get_cubic_palette(n_colors: int) -> list[RGBType]: for i in grid: for j in grid: for k in grid: - new_color = (i, j, k) + new_color = (float(i), float(j), float(k)) if new_color not in color_list: new_colors.append(new_color) @@ -572,11 +584,13 @@ def display_palette( ... ]) """ if isinstance(gradient, ListedColormap): - gradient = gradient.colors + gradient = gradient.colors # type: ignore[assignment] # palplot doesn't handle LinearSegmented cmaps like "Blues" and "PiYG" elif isinstance(gradient, LinearSegmentedColormap): - gradient = [gradient(i / (30 - 1)) for i in range(30)] + gradient = [ + gradient(i / (30 - 1)) for i in range(30) + ] # type: ignore[assignment] sns.palplot(gradient) plt.show() @@ -621,6 +635,9 @@ def _process_rgb( ... ) [(255, 0, 0), (0, 128, 255)] """ + if isinstance(gradient, ListedColormap): + gradient = list(gradient.colors) # type: ignore[arg-type] + if isinstance(gradient, tuple): gradient = [gradient] @@ -658,7 +675,6 @@ def _process_rgb( def main(): - # Comment out (2) to run all tests in script; (1) to run specific tests import doctest doctest.testmod(verbose=True) diff --git a/src/inspection.py b/src/inspection.py index 436192f..a3d1793 100644 --- a/src/inspection.py +++ b/src/inspection.py @@ -217,7 +217,6 @@ def display2( def main(): - # Comment out (2) to run all tests in script; (1) to run specific tests import doctest doctest.testmod(verbose=True) diff --git a/src/main_notebook.py b/src/main_notebook.py index 2fe3e3a..a9ed4d8 100644 --- a/src/main_notebook.py +++ b/src/main_notebook.py @@ -6,17 +6,8 @@ # flake8: noqa: E403 -import re import sys -import numpy as np -import pandas as pd -import seaborn as sns -import geopandas as gpd -import plotly.express as px -import matplotlib.pyplot as plt - from pathlib import Path -from pandas.plotting import scatter_matrix src_path = Path('..') sys.path.append(str(src_path.resolve())) @@ -24,21 +15,10 @@ from src.paths import get_path_to from src.inspection import display from src.stylesheet import customize_plots -from src.inspection import make_df, display, display2 +from src.inspection import display, display2 customize_plots() # %config InlineBackend.figure_format = 'svg' # %% -# Load data -if 'data' not in locals(): - # data = pd.read_csv( - # get_path_to("data", "raw", "PBJ_Daily_Nurse_Staffing_Q1_2024.zip"), - # encoding='ISO-8859-1', - # low_memory=False - # ) - pass -else: - print("data loaded.") - -# %% +# xx diff --git a/src/run_doctests.py b/src/run_doctests.py index fcb3ab2..cca4b92 100644 --- a/src/run_doctests.py +++ b/src/run_doctests.py @@ -52,7 +52,7 @@ def run_doctest_suite(modules_to_test: tuple[str, ...]): 'src.stylesheet', 'src.workflow', 'src.inspection', - 'src.analysis', + 'src.util', ) run_doctest_suite(MODULES_TO_TEST) diff --git a/src/stylesheet.py b/src/stylesheet.py index 50aeed0..27cb240 100644 --- a/src/stylesheet.py +++ b/src/stylesheet.py @@ -2,10 +2,8 @@ Plotting customization. """ -# import numpy as np import matplotlib as mpl from cycler import cycler -# import matplotlib.pyplot as plt def customize_plots() -> None: @@ -264,7 +262,6 @@ def customize_plots() -> None: def main(): - # Comment out (2) to run all tests in script; (1) to run specific tests import doctest doctest.testmod(verbose=True) diff --git a/src/util.py b/src/util.py index 3626b1e..3781fa8 100644 --- a/src/util.py +++ b/src/util.py @@ -6,22 +6,6 @@ from typing import Optional, Tuple from contextlib import contextmanager -# from src.paths import get_path_to - -# -- I/O utilities ----------------------------------------------------------- - -# import pandas as pd -# print(pd.read_csv( -# "data/raw/PBJ_Daily_Nurse_Staffing_Q1_2024.zip", -# encoding='ISO-8859-1', -# low_memory=False -# )) -# print(pd.read_csv( -# get_path_to("data", "/raw/PBJ_Daily_Nurse_Staffing_Q1_2024.zip"), -# encoding='ISO-8859-1', -# low_memory=False -# )) - # -- SQL utilities ----------------------------------------------------------- @@ -129,25 +113,13 @@ def close(self) -> None: def main(): - # Comment out (2) to run all tests in script; (1) to run specific tests import doctest doctest.testmod(verbose=True) # from src.workflow import doctest_function # doctest_function(DBManager, globs=globals()) - # One-off tests - # import pandas as pd - # pd.read_csv( - # "data/raw/PBJ_Daily_Nurse_Staffing_Q1_2024.zip", - # encoding='ISO-8859-1', - # low_memory=False - # ) - - # from pathlib import Path - # print(Path(__file__)) - - return None + pass if __name__ == "__main__": diff --git a/src/validate.py b/src/validate.py index 98339b8..31e22f3 100644 --- a/src/validate.py +++ b/src/validate.py @@ -155,7 +155,6 @@ def validate_column_list(var, name, df=None): def main(): - # Comment out (2) to run all tests in script; (1) to run specific tests import doctest doctest.testmod(verbose=True) diff --git a/src/workflow.py b/src/workflow.py index e40b65e..5799be7 100644 --- a/src/workflow.py +++ b/src/workflow.py @@ -36,7 +36,6 @@ def doctest_function( def main(): - # Comment out (2) to run all tests in script; (1) to run specific tests import doctest doctest.testmod(verbose=True)