diff --git a/dlt/common/utils.py b/dlt/common/utils.py index 692f8452c9..0214bc037a 100644 --- a/dlt/common/utils.py +++ b/dlt/common/utils.py @@ -144,7 +144,7 @@ def flatten_list_of_str_or_dicts(seq: Sequence[Union[StrAny, str]]) -> DictStrAn # return dicts -def flatten_list_or_items(_iter: Union[Iterator[TAny], Iterator[List[TAny]]]) -> Iterator[TAny]: +def flatten_list_or_items(_iter: Union[Iterable[TAny], Iterable[List[TAny]]]) -> Iterator[TAny]: for items in _iter: if isinstance(items, List): yield from items diff --git a/dlt/helpers/streamlit_helper.py b/dlt/helpers/streamlit_helper.py index d505ccaff3..7921e4e2e1 100644 --- a/dlt/helpers/streamlit_helper.py +++ b/dlt/helpers/streamlit_helper.py @@ -1,5 +1,5 @@ import sys -from typing import Dict, List +from typing import Dict, List, Iterator import humanize from dlt.common import pendulum @@ -253,7 +253,7 @@ def _query_data(query: str, chunk_size: int = None) -> pd.DataFrame: if "write_disposition" in table: table_hints.append("write disposition: **%s**" % table["write_disposition"]) columns = table["columns"] - primary_keys = flatten_list_or_items([ + primary_keys: Iterator[str] = flatten_list_or_items([ col_name for col_name in columns.keys() if not col_name.startswith("_") and not columns[col_name].get("primary_key") is None ]) diff --git a/tests/helpers/streamlit_tests/test_streamlit_show_resources.py b/tests/helpers/streamlit_tests/test_streamlit_show_resources.py index c550b45654..fcf232ea76 100644 --- a/tests/helpers/streamlit_tests/test_streamlit_show_resources.py +++ b/tests/helpers/streamlit_tests/test_streamlit_show_resources.py @@ -57,10 +57,13 @@ def test_multiple_resources_pipeline(): ) load_info = pipeline.run([source1(10), source2(20)]) - assert load_info.pipeline.schema_names == ["source2", "source1"] - assert load_info.pipeline.schemas.get("source1").data_tables()[0]["name"] == "one" - assert load_info.pipeline.schemas.get("source1").data_tables()[0]["columns"]["column_1"].get("primary_key") is True - assert load_info.pipeline.schemas.get("source1").data_tables()[0]["columns"]["column_1"].get("merge_key") is True - assert load_info.pipeline.schemas.get("source1").data_tables()[0]["write_disposition"] == "merge" + source1_schema = load_info.pipeline.schemas.get("source1") # type: ignore[attr-defined] + + assert load_info.pipeline.schema_names == ["source2", "source1"] # type: ignore[attr-defined] + + assert source1_schema.data_tables()[0]["name"] == "one" + assert source1_schema.data_tables()[0]["columns"]["column_1"].get("primary_key") is True + assert source1_schema.data_tables()[0]["columns"]["column_1"].get("merge_key") is True + assert source1_schema.data_tables()[0]["write_disposition"] == "merge" # The rest should be inspected using the streamlit tool.