From dc5c0f5acb8df8c34d4c56e385aa38cc65b4582f Mon Sep 17 00:00:00 2001 From: Colin Rogers <111200756+colin-rogers-dbt@users.noreply.github.com> Date: Mon, 13 May 2024 15:44:51 -0700 Subject: [PATCH] delete unneeded list command (#94) --- tests/functional/list/fixtures.py | 213 -------- tests/functional/list/test_list.py | 798 ----------------------------- 2 files changed, 1011 deletions(-) delete mode 100644 tests/functional/list/fixtures.py delete mode 100644 tests/functional/list/test_list.py diff --git a/tests/functional/list/fixtures.py b/tests/functional/list/fixtures.py deleted file mode 100644 index ae5514c6..00000000 --- a/tests/functional/list/fixtures.py +++ /dev/null @@ -1,213 +0,0 @@ -import pytest -from dbt.tests.fixtures.project import write_project_files - - -snapshots__snapshot_sql = """ -{% snapshot my_snapshot %} - {{ - config( - target_database=var('target_database', database), - target_schema=schema, - unique_key='id', - strategy='timestamp', - updated_at='updated_at', - ) - }} - select * from {{database}}.{{schema}}.seed -{% endsnapshot %} - -""" - -tests__t_sql = """ -select 1 as id limit 0 - -""" - -models__schema_yml = """ -version: 2 -models: - - name: outer - description: The outer table - columns: - - name: id - description: The id value - data_tests: - - unique - - not_null - -sources: - - name: my_source - tables: - - name: my_table - -""" - -models__ephemeral_sql = """ - -{{ config(materialized='ephemeral') }} - -select - 1 as id, - {{ dbt.date_trunc('day', dbt.current_timestamp()) }} as created_at - -""" - -models__metric_flow = """ - -select - {{ dbt.date_trunc('day', dbt.current_timestamp()) }} as date_day - -""" - -models__incremental_sql = """ -{{ - config( - materialized = "incremental", - incremental_strategy = "delete+insert", - ) -}} - -select * from {{ ref('seed') }} - -{% if is_incremental() %} - where a > (select max(a) from {{this}}) -{% endif %} - -""" - -models__docs_md = """ -{% docs my_docs %} - some docs -{% enddocs %} - -""" - -models__outer_sql = """ -select * from {{ ref('ephemeral') }} - -""" - -models__sub__inner_sql = """ -select * from {{ ref('outer') }} - -""" - -macros__macro_stuff_sql = """ -{% macro cool_macro() %} - wow! -{% endmacro %} - -{% macro other_cool_macro(a, b) %} - cool! -{% endmacro %} - -""" - -seeds__seed_csv = """a,b -1,2 -""" - -analyses__a_sql = """ -select 4 as id - -""" - -semantic_models__sm_yml = """ -semantic_models: - - name: my_sm - model: ref('outer') - defaults: - agg_time_dimension: created_at - entities: - - name: my_entity - type: primary - expr: id - dimensions: - - name: created_at - type: time - type_params: - time_granularity: day - measures: - - name: total_outer_count - agg: count - expr: 1 - -""" - -metrics__m_yml = """ -metrics: - - name: total_outer - type: simple - description: The total count of outer - label: Total Outer - type_params: - measure: total_outer_count -""" - - -@pytest.fixture(scope="class") -def snapshots(): - return {"snapshot.sql": snapshots__snapshot_sql} - - -@pytest.fixture(scope="class") -def tests(): - return {"t.sql": tests__t_sql} - - -@pytest.fixture(scope="class") -def models(): - return { - "schema.yml": models__schema_yml, - "ephemeral.sql": models__ephemeral_sql, - "incremental.sql": models__incremental_sql, - "docs.md": models__docs_md, - "outer.sql": models__outer_sql, - "metricflow_time_spine.sql": models__metric_flow, - "sm.yml": semantic_models__sm_yml, - "m.yml": metrics__m_yml, - "sub": {"inner.sql": models__sub__inner_sql}, - } - - -@pytest.fixture(scope="class") -def macros(): - return {"macro_stuff.sql": macros__macro_stuff_sql} - - -@pytest.fixture(scope="class") -def seeds(): - return {"seed.csv": seeds__seed_csv} - - -@pytest.fixture(scope="class") -def analyses(): - return {"a.sql": analyses__a_sql} - - -@pytest.fixture(scope="class") -def semantic_models(): - return {"sm.yml": semantic_models__sm_yml} - - -@pytest.fixture(scope="class") -def metrics(): - return {"m.yml": metrics__m_yml} - - -@pytest.fixture(scope="class") -def project_files( - project_root, - snapshots, - tests, - models, - macros, - seeds, - analyses, -): - write_project_files(project_root, "snapshots", snapshots) - write_project_files(project_root, "tests", tests) - write_project_files(project_root, "models", models) - write_project_files(project_root, "macros", macros) - write_project_files(project_root, "seeds", seeds) - write_project_files(project_root, "analyses", analyses) diff --git a/tests/functional/list/test_list.py b/tests/functional/list/test_list.py deleted file mode 100644 index f932cba7..00000000 --- a/tests/functional/list/test_list.py +++ /dev/null @@ -1,798 +0,0 @@ -import json -from os.path import normcase, normpath - -from dbt.logger import log_manager -from dbt.tests.util import run_dbt -import pytest - - -class TestList: - def dir(self, value): - return normpath(value) - - @pytest.fixture(scope="class") - def project_config_update(self): - return { - "config-version": 2, - "analysis-paths": [self.dir("analyses")], - "snapshot-paths": [self.dir("snapshots")], - "macro-paths": [self.dir("macros")], - "seed-paths": [self.dir("seeds")], - "test-paths": [self.dir("tests")], - "seeds": { - "quote_columns": False, - }, - } - - def run_dbt_ls(self, args=None, expect_pass=True): - log_manager.stdout_console() - full_args = ["ls"] - if args is not None: - full_args += args - - result = run_dbt(args=full_args, expect_pass=expect_pass) - - log_manager.stdout_console() - return result - - def assert_json_equal(self, json_str, expected): - assert json.loads(json_str) == expected - - def expect_given_output(self, args, expectations): - for key, values in expectations.items(): - ls_result = self.run_dbt_ls(args + ["--output", key]) - if not isinstance(values, (list, tuple)): - values = [values] - assert len(ls_result) == len(values) - for got, expected in zip(ls_result, values): - if key == "json": - self.assert_json_equal(got, expected) - else: - assert got == expected - - def expect_snapshot_output(self, project): - expectations = { - "name": "my_snapshot", - "selector": "test.snapshot.my_snapshot", - "json": { - "name": "my_snapshot", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "snapshot", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "target_database": project.database, - "target_schema": project.test_schema, - "unique_key": "id", - "strategy": "timestamp", - "updated_at": "updated_at", - "full_refresh": None, - "database": None, - "schema": None, - "alias": None, - "check_cols": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - }, - "unique_id": "snapshot.test.my_snapshot", - "original_file_path": normalize("snapshots/snapshot.sql"), - "alias": "my_snapshot", - "resource_type": "snapshot", - }, - "path": self.dir("snapshots/snapshot.sql"), - } - self.expect_given_output(["--resource-type", "snapshot"], expectations) - - def expect_analyses_output(self): - expectations = { - "name": "a", - "selector": "test.analysis.a", - "json": { - "name": "a", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "unique_key": None, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - }, - "unique_id": "analysis.test.a", - "original_file_path": normalize("analyses/a.sql"), - "alias": "a", - "resource_type": "analysis", - }, - "path": self.dir("analyses/a.sql"), - } - self.expect_given_output(["--resource-type", "analysis"], expectations) - - def expect_model_output(self): - expectations = { - "name": ("ephemeral", "incremental", "inner", "metricflow_time_spine", "outer"), - "selector": ( - "test.ephemeral", - "test.incremental", - "test.sub.inner", - "test.metricflow_time_spine", - "test.outer", - ), - "json": ( - { - "name": "ephemeral", - "package_name": "test", - "depends_on": { - "nodes": [], - "macros": ["macro.dbt.current_timestamp", "macro.dbt.date_trunc"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "ephemeral", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/ephemeral.sql"), - "unique_id": "model.test.ephemeral", - "alias": "ephemeral", - "resource_type": "model", - }, - { - "name": "incremental", - "package_name": "test", - "depends_on": { - "nodes": ["seed.test.seed"], - "macros": ["macro.dbt.is_incremental"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "incremental", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": "delete+insert", - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/incremental.sql"), - "unique_id": "model.test.incremental", - "alias": "incremental", - "resource_type": "model", - }, - { - "name": "inner", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.outer"], - "macros": [], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/sub/inner.sql"), - "unique_id": "model.test.inner", - "alias": "inner", - "resource_type": "model", - }, - { - "name": "metricflow_time_spine", - "package_name": "test", - "depends_on": { - "nodes": [], - "macros": ["macro.dbt.current_timestamp", "macro.dbt.date_trunc"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/metricflow_time_spine.sql"), - "unique_id": "model.test.metricflow_time_spine", - "alias": "metricflow_time_spine", - "resource_type": "model", - }, - { - "name": "outer", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.ephemeral"], - "macros": [], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - "access": "protected", - }, - "original_file_path": normalize("models/outer.sql"), - "unique_id": "model.test.outer", - "alias": "outer", - "resource_type": "model", - }, - ), - "path": ( - self.dir("models/ephemeral.sql"), - self.dir("models/incremental.sql"), - self.dir("models/sub/inner.sql"), - self.dir("models/metricflow_time_spine.sql"), - self.dir("models/outer.sql"), - ), - } - self.expect_given_output(["--resource-type", "model"], expectations) - - # Do not include ephemeral model - it was not selected - def expect_model_ephemeral_output(self): - expectations = { - "name": ("outer"), - "selector": ("test.outer"), - "json": ( - { - "name": "outer", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "materialized": "view", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "persist_docs": {}, - "full_refresh": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "access": "protected", - }, - "unique_id": "model.test.ephemeral", - "original_file_path": normalize("models/ephemeral.sql"), - "alias": "outer", - "resource_type": "model", - }, - ), - "path": (self.dir("models/outer.sql"),), - } - self.expect_given_output(["--model", "outer"], expectations) - - def expect_source_output(self): - expectations = { - "name": "my_source.my_table", - "selector": "source:test.my_source.my_table", - "json": { - "config": { - "enabled": True, - }, - "unique_id": "source.test.my_source.my_table", - "original_file_path": normalize("models/schema.yml"), - "package_name": "test", - "name": "my_table", - "source_name": "my_source", - "resource_type": "source", - "tags": [], - }, - "path": self.dir("models/schema.yml"), - } - # should we do this --select automatically for a user if if 'source' is - # in the resource types and there is no '--select' or '--exclude'? - self.expect_given_output( - ["--resource-type", "source", "--select", "source:*"], expectations - ) - - def expect_seed_output(self): - expectations = { - "name": "seed", - "selector": "test.seed", - "json": { - "name": "seed", - "package_name": "test", - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "seed", - "post-hook": [], - "tags": [], - "pre-hook": [], - "quoting": {}, - "column_types": {}, - "delimiter": ",", - "persist_docs": {}, - "quote_columns": False, - "full_refresh": None, - "unique_key": None, - "on_schema_change": "ignore", - "on_configuration_change": "apply", - "database": None, - "schema": None, - "alias": None, - "meta": {}, - "grants": {}, - "packages": [], - "incremental_strategy": None, - "docs": {"node_color": None, "show": True}, - "contract": {"enforced": False, "alias_types": True}, - }, - "depends_on": {"macros": []}, - "unique_id": "seed.test.seed", - "original_file_path": normalize("seeds/seed.csv"), - "alias": "seed", - "resource_type": "seed", - }, - "path": self.dir("seeds/seed.csv"), - } - self.expect_given_output(["--resource-type", "seed"], expectations) - - def expect_test_output(self): - expectations = { - "name": ("not_null_outer_id", "t", "unique_outer_id"), - "selector": ("test.not_null_outer_id", "test.t", "test.unique_outer_id"), - "json": ( - { - "name": "not_null_outer_id", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.outer"], - "macros": ["macro.dbt.test_not_null"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "test", - "severity": "ERROR", - "store_failures": None, - "store_failures_as": None, - "warn_if": "!= 0", - "error_if": "!= 0", - "fail_calc": "count(*)", - "where": None, - "limit": None, - "tags": [], - "database": None, - "schema": "dbt_test__audit", - "alias": None, - "meta": {}, - }, - "unique_id": "test.test.not_null_outer_id.a226f4fb36", - "original_file_path": normalize("models/schema.yml"), - "alias": "not_null_outer_id", - "resource_type": "test", - }, - { - "name": "t", - "package_name": "test", - "depends_on": {"nodes": [], "macros": []}, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "test", - "severity": "ERROR", - "store_failures": None, - "store_failures_as": None, - "warn_if": "!= 0", - "error_if": "!= 0", - "fail_calc": "count(*)", - "where": None, - "limit": None, - "tags": [], - "database": None, - "schema": "dbt_test__audit", - "alias": None, - "meta": {}, - }, - "unique_id": "test.test.t", - "original_file_path": normalize("tests/t.sql"), - "alias": "t", - "resource_type": "test", - }, - { - "name": "unique_outer_id", - "package_name": "test", - "depends_on": { - "nodes": ["model.test.outer"], - "macros": ["macro.dbt.test_unique"], - }, - "tags": [], - "config": { - "enabled": True, - "group": None, - "materialized": "test", - "severity": "ERROR", - "store_failures": None, - "store_failures_as": None, - "warn_if": "!= 0", - "error_if": "!= 0", - "fail_calc": "count(*)", - "where": None, - "limit": None, - "tags": [], - "database": None, - "schema": "dbt_test__audit", - "alias": None, - "meta": {}, - }, - "unique_id": "test.test.unique_outer_id.2195e332d3", - "original_file_path": normalize("models/schema.yml"), - "alias": "unique_outer_id", - "resource_type": "test", - }, - ), - "path": ( - self.dir("models/schema.yml"), - self.dir("tests/t.sql"), - self.dir("models/schema.yml"), - ), - } - self.expect_given_output(["--resource-type", "test"], expectations) - - def expect_all_output(self): - # generic test FQNS include the resource + column they're defined on - # models are just package, subdirectory path, name - # sources are like models, ending in source_name.table_name - expected_default = { - "test.ephemeral", - "test.incremental", - "test.snapshot.my_snapshot", - "test.sub.inner", - "test.outer", - "test.seed", - "source:test.my_source.my_table", - "test.not_null_outer_id", - "test.unique_outer_id", - "test.metricflow_time_spine", - "test.t", - "semantic_model:test.my_sm", - "metric:test.total_outer", - } - # analyses have their type inserted into their fqn like tests - expected_all = expected_default | {"test.analysis.a"} - - results = self.run_dbt_ls(["--resource-type", "all", "--select", "*", "source:*"]) - assert set(results) == expected_all - - results = self.run_dbt_ls(["--select", "*", "source:*"]) - assert set(results) == expected_default - - results = self.run_dbt_ls(["--resource-type", "default", "--select", "*", "source:*"]) - assert set(results) == expected_default - - results = self.run_dbt_ls - - def expect_select(self): - results = self.run_dbt_ls(["--resource-type", "test", "--select", "outer"]) - assert set(results) == {"test.not_null_outer_id", "test.unique_outer_id"} - - self.run_dbt_ls(["--resource-type", "test", "--select", "inner"], expect_pass=True) - - results = self.run_dbt_ls(["--resource-type", "test", "--select", "+inner"]) - assert set(results) == {"test.not_null_outer_id", "test.unique_outer_id"} - - results = self.run_dbt_ls(["--resource-type", "semantic_model"]) - assert set(results) == {"semantic_model:test.my_sm"} - - results = self.run_dbt_ls(["--resource-type", "metric"]) - assert set(results) == {"metric:test.total_outer"} - - results = self.run_dbt_ls(["--resource-type", "model", "--select", "outer+"]) - assert set(results) == {"test.outer", "test.sub.inner"} - - results = self.run_dbt_ls(["--resource-type", "model", "--exclude", "inner"]) - assert set(results) == { - "test.ephemeral", - "test.outer", - "test.metricflow_time_spine", - "test.incremental", - } - - results = self.run_dbt_ls(["--select", "config.incremental_strategy:delete+insert"]) - assert set(results) == {"test.incremental"} - - self.run_dbt_ls( - ["--select", "config.incremental_strategy:insert_overwrite"], expect_pass=True - ) - - def expect_resource_type_multiple(self): - """Expect selected resources when --resource-type given multiple times""" - results = self.run_dbt_ls(["--resource-type", "test", "--resource-type", "model"]) - assert set(results) == { - "test.ephemeral", - "test.incremental", - "test.not_null_outer_id", - "test.outer", - "test.sub.inner", - "test.metricflow_time_spine", - "test.t", - "test.unique_outer_id", - } - - results = self.run_dbt_ls( - [ - "--resource-type", - "test", - "--resource-type", - "model", - "--exclude", - "unique_outer_id", - ] - ) - assert set(results) == { - "test.ephemeral", - "test.incremental", - "test.not_null_outer_id", - "test.outer", - "test.metricflow_time_spine", - "test.sub.inner", - "test.t", - } - - results = self.run_dbt_ls( - [ - "--resource-type", - "test", - "--resource-type", - "model", - "--select", - "+inner", - "outer+", - "--exclude", - "inner", - ] - ) - assert set(results) == { - "test.ephemeral", - "test.not_null_outer_id", - "test.unique_outer_id", - "test.outer", - } - - def expect_selected_keys(self, project): - """Expect selected fields of the the selected model""" - expectations = [ - {"database": project.database, "schema": project.test_schema, "alias": "inner"} - ] - results = self.run_dbt_ls( - [ - "--model", - "inner", - "--output", - "json", - "--output-keys", - "database", - "schema", - "alias", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip(results, expectations): - self.assert_json_equal(got, expected) - - """Expect selected fields when --output-keys given multiple times - """ - expectations = [{"database": project.database, "schema": project.test_schema}] - results = self.run_dbt_ls( - [ - "--model", - "inner", - "--output", - "json", - "--output-keys", - "database", - "--output-keys", - "schema", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip(results, expectations): - self.assert_json_equal(got, expected) - - """Expect selected fields of the test resource types - """ - expectations = [ - {"name": "not_null_outer_id", "column_name": "id"}, - {"name": "t"}, - {"name": "unique_outer_id", "column_name": "id"}, - ] - results = self.run_dbt_ls( - [ - "--resource-type", - "test", - "--output", - "json", - "--output-keys", - "name", - "column_name", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip( - sorted(results, key=lambda x: json.loads(x).get("name")), - sorted(expectations, key=lambda x: x.get("name")), - ): - self.assert_json_equal(got, expected) - - """Expect nothing (non-existent keys) for the selected models - """ - expectations = [{}, {}] - results = self.run_dbt_ls( - [ - "--model", - "inner outer", - "--output", - "json", - "--output-keys", - "non_existent_key", - ] - ) - assert len(results) == len(expectations) - - for got, expected in zip(results, expectations): - self.assert_json_equal(got, expected) - - @pytest.mark.skip("The actual is not getting loaded, so all actuals are 0.") - def test_ls(self, project): - self.expect_snapshot_output(project) - self.expect_analyses_output() - self.expect_model_output() - self.expect_source_output() - self.expect_seed_output() - self.expect_test_output() - self.expect_select() - self.expect_resource_type_multiple() - self.expect_all_output() - self.expect_selected_keys(project) - - -def normalize(path): - """On windows, neither is enough on its own: - normcase('C:\\documents/ALL CAPS/subdir\\..') - 'c:\\documents\\all caps\\subdir\\..' - normpath('C:\\documents/ALL CAPS/subdir\\..') - 'C:\\documents\\ALL CAPS' - normpath(normcase('C:\\documents/ALL CAPS/subdir\\..')) - 'c:\\documents\\all caps' - """ - return normcase(normpath(path))