diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..02ed72d4 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +# This codeowners file is used to ensure all PRs require reviews from the adapters team + +* @dbt-labs/adapters diff --git a/tests/functional/defer_state/fixtures.py b/tests/functional/defer_state/fixtures.py deleted file mode 100644 index 8b1d3d35..00000000 --- a/tests/functional/defer_state/fixtures.py +++ /dev/null @@ -1,424 +0,0 @@ -seed_csv = """id,name -1,Alice -2,Bob -""" - -table_model_sql = """ -{{ config(materialized='table') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} -""" - -table_model_now_view_sql = """ -{{ config(materialized='view') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} -""" - -table_model_now_incremental_sql = """ -{{ config(materialized='incremental', on_schema_change='append_new_columns') }} -select * from {{ ref('ephemeral_model') }} - --- establish a macro dependency to trigger state:modified.macros --- depends on: {{ my_macro() }} -""" - -changed_table_model_sql = """ -{{ config(materialized='table') }} -select 1 as fun -""" - -view_model_sql = """ -select * from {{ ref('seed') }} - --- establish a macro dependency that trips infinite recursion if not handled --- depends on: {{ my_infinitely_recursive_macro() }} -""" - -view_model_now_table_sql = """ -{{ config(materialized='table') }} -select * from {{ ref('seed') }} - --- establish a macro dependency that trips infinite recursion if not handled --- depends on: {{ my_infinitely_recursive_macro() }} -""" - -changed_view_model_sql = """ -select * from no.such.table -""" - -ephemeral_model_sql = """ -{{ config(materialized='ephemeral') }} -select * from {{ ref('view_model') }} -""" - -changed_ephemeral_model_sql = """ -{{ config(materialized='ephemeral') }} -select * from no.such.table -""" - -schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name -""" - -no_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: {} - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -modified_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: user_name - data_type: text -""" - -disabled_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: False - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -versioned_no_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: {} - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -versioned_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -versioned_modified_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: True - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: user_name - data_type: text -""" - -versioned_disabled_contract_schema_yml = """ -version: 2 -models: - - name: table_model - config: - contract: - enforced: False - versions: - - v: 1 - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -constraint_schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name - - name: table_model - config: - contract: - enforced: True - constraints: - - type: primary_key - columns: [id] - columns: - - name: id - constraints: - - type: not_null - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -modified_column_constraint_schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name - - name: table_model - config: - contract: - enforced: True - constraints: - - type: primary_key - columns: [id] - columns: - - name: id - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -modified_model_constraint_schema_yml = """ -version: 2 -models: - - name: view_model - columns: - - name: id - data_tests: - - unique: - severity: error - - not_null - - name: name - - name: table_model - config: - contract: - enforced: True - columns: - - name: id - constraints: - - type: not_null - data_type: integer - data_tests: - - unique: - severity: error - - not_null - - name: name - data_type: text -""" - -exposures_yml = """ -version: 2 -exposures: - - name: my_exposure - type: application - depends_on: - - ref('view_model') - owner: - email: test@example.com -""" - -macros_sql = """ -{% macro my_macro() %} - {% do log('in a macro' ) %} -{% endmacro %} -""" - -infinite_macros_sql = """ -{# trigger infinite recursion if not handled #} - -{% macro my_infinitely_recursive_macro() %} - {{ return(adapter.dispatch('my_infinitely_recursive_macro')()) }} -{% endmacro %} - -{% macro default__my_infinitely_recursive_macro() %} - {% if unmet_condition %} - {{ my_infinitely_recursive_macro() }} - {% else %} - {{ return('') }} - {% endif %} -{% endmacro %} -""" - -snapshot_sql = """ -{% snapshot my_cool_snapshot %} - - {{ - config( - target_database=database, - target_schema=schema, - unique_key='id', - strategy='check', - check_cols=['id'], - ) - }} - select * from {{ ref('view_model') }} - -{% endsnapshot %} -""" - -model_1_sql = """ -select * from {{ ref('seed') }} -""" - -modified_model_1_sql = """ -select * from {{ ref('seed') }} -order by 1 -""" - -model_2_sql = """ -select id from {{ ref('model_1') }} -""" - -modified_model_2_sql = """ -select * from {{ ref('model_1') }} -order by 1 -""" - - -group_schema_yml = """ -groups: - - name: finance - owner: - email: finance@jaffleshop.com - -models: - - name: model_1 - config: - group: finance - - name: model_2 - config: - group: finance -""" - - -group_modified_schema_yml = """ -groups: - - name: accounting - owner: - email: finance@jaffleshop.com -models: - - name: model_1 - config: - group: accounting - - name: model_2 - config: - group: accounting -""" - -group_modified_fail_schema_yml = """ -groups: - - name: finance - owner: - email: finance@jaffleshop.com -models: - - name: model_1 - config: - group: accounting - - name: model_2 - config: - group: finance -""" diff --git a/tests/functional/defer_state/test_defer_state.py b/tests/functional/defer_state/test_defer_state.py deleted file mode 100644 index 45c1d93c..00000000 --- a/tests/functional/defer_state/test_defer_state.py +++ /dev/null @@ -1,329 +0,0 @@ -from copy import deepcopy -import json -import os -import shutil - -from dbt.contracts.results import RunStatus -from dbt.exceptions import DbtRuntimeError -from dbt.tests.util import rm_file, run_dbt, write_file -import pytest - -from tests.functional.defer_state import fixtures - - -class BaseDeferState: - @pytest.fixture(scope="class") - def models(self): - return { - "table_model.sql": fixtures.table_model_sql, - "view_model.sql": fixtures.view_model_sql, - "ephemeral_model.sql": fixtures.ephemeral_model_sql, - "schema.yml": fixtures.schema_yml, - "exposures.yml": fixtures.exposures_yml, - } - - @pytest.fixture(scope="class") - def macros(self): - return { - "macros.sql": fixtures.macros_sql, - "infinite_macros.sql": fixtures.infinite_macros_sql, - } - - @pytest.fixture(scope="class") - def seeds(self): - return { - "seed.csv": fixtures.seed_csv, - } - - @pytest.fixture(scope="class") - def snapshots(self): - return { - "snapshot.sql": fixtures.snapshot_sql, - } - - @pytest.fixture(scope="class") - def other_schema(self, unique_schema): - return unique_schema + "_other" - - @property - def project_config_update(self): - return { - "seeds": { - "test": { - "quote_columns": False, - } - } - } - - @pytest.fixture(scope="class") - def profiles_config_update(self, dbt_profile_target, unique_schema, other_schema): - outputs = {"default": dbt_profile_target, "otherschema": deepcopy(dbt_profile_target)} - outputs["default"]["schema"] = unique_schema - outputs["otherschema"]["schema"] = other_schema - return {"test": {"outputs": outputs, "target": "default"}} - - def copy_state(self, project_root): - state_path = os.path.join(project_root, "state") - if not os.path.exists(state_path): - os.makedirs(state_path) - shutil.copyfile( - f"{project_root}/target/manifest.json", f"{project_root}/state/manifest.json" - ) - - def run_and_save_state(self, project_root, with_snapshot=False): - results = run_dbt(["seed"]) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - results = run_dbt(["run"]) - assert len(results) == 2 - assert not any(r.node.deferred for r in results) - results = run_dbt(["test"]) - assert len(results) == 2 - - if with_snapshot: - results = run_dbt(["snapshot"]) - assert len(results) == 1 - assert not any(r.node.deferred for r in results) - - # copy files - self.copy_state(project_root) - - -class TestDeferStateUnsupportedCommands(BaseDeferState): - def test_no_state(self, project): - # no "state" files present, snapshot fails - with pytest.raises(DbtRuntimeError): - run_dbt(["snapshot", "--state", "state", "--defer"]) - - -class TestRunCompileState(BaseDeferState): - def test_run_and_compile_defer(self, project): - self.run_and_save_state(project.project_root) - - # defer test, it succeeds - # Change directory to ensure that state directory is underneath - # project directory. - os.chdir(project.profiles_dir) - results = run_dbt(["compile", "--state", "state", "--defer"]) - assert len(results.results) == 6 - assert results.results[0].node.name == "seed" - - -class TestSnapshotState(BaseDeferState): - def test_snapshot_state_defer(self, project): - self.run_and_save_state(project.project_root) - # snapshot succeeds without --defer - run_dbt(["snapshot"]) - # copy files - self.copy_state(project.project_root) - # defer test, it succeeds - run_dbt(["snapshot", "--state", "state", "--defer"]) - # favor_state test, it succeeds - run_dbt(["snapshot", "--state", "state", "--defer", "--favor-state"]) - - -class TestRunDeferState(BaseDeferState): - def test_run_and_defer(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - self.run_and_save_state(project.project_root) - - # test tests first, because run will change things - # no state, wrong schema, failure. - run_dbt(["test", "--target", "otherschema"], expect_pass=False) - - # test generate docs - # no state, wrong schema, empty nodes - catalog = run_dbt(["docs", "generate", "--target", "otherschema"]) - assert not catalog.nodes - - # no state, run also fails - run_dbt(["run", "--target", "otherschema"], expect_pass=False) - - # defer test, it succeeds - results = run_dbt( - ["test", "-m", "view_model+", "--state", "state", "--defer", "--target", "otherschema"] - ) - - # defer docs generate with state, catalog refers schema from the happy times - catalog = run_dbt( - [ - "docs", - "generate", - "-m", - "view_model+", - "--state", - "state", - "--defer", - "--target", - "otherschema", - ] - ) - assert "seed.test.seed" not in catalog.nodes - - # with state it should work though - results = run_dbt( - ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"] - ) - assert other_schema not in results[0].node.compiled_code - assert unique_schema in results[0].node.compiled_code - - with open("target/manifest.json") as fp: - data = json.load(fp) - assert data["nodes"]["seed.test.seed"]["deferred"] - - assert len(results) == 1 - - -class TestRunDeferStateChangedModel(BaseDeferState): - def test_run_defer_state_changed_model(self, project): - self.run_and_save_state(project.project_root) - - # change "view_model" - write_file(fixtures.changed_view_model_sql, "models", "view_model.sql") - - # the sql here is just wrong, so it should fail - run_dbt( - ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=False, - ) - # but this should work since we just use the old happy model - run_dbt( - ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=True, - ) - - # change "ephemeral_model" - write_file(fixtures.changed_ephemeral_model_sql, "models", "ephemeral_model.sql") - # this should fail because the table model refs a broken ephemeral - # model, which it should see - run_dbt( - ["run", "-m", "table_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=False, - ) - - -class TestRunDeferStateIFFNotExists(BaseDeferState): - def test_run_defer_iff_not_exists(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - self.run_and_save_state(project.project_root) - - results = run_dbt(["seed", "--target", "otherschema"]) - assert len(results) == 1 - results = run_dbt(["run", "--state", "state", "--defer", "--target", "otherschema"]) - assert len(results) == 2 - - # because the seed now exists in our "other" schema, we should prefer it over the one - # available from state - assert other_schema in results[0].node.compiled_code - - # this time with --favor-state: even though the seed now exists in our "other" schema, - # we should still favor the one available from state - results = run_dbt( - ["run", "--state", "state", "--defer", "--favor-state", "--target", "otherschema"] - ) - assert len(results) == 2 - assert other_schema not in results[0].node.compiled_code - - -class TestDeferStateDeletedUpstream(BaseDeferState): - def test_run_defer_deleted_upstream(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - self.run_and_save_state(project.project_root) - - # remove "ephemeral_model" + change "table_model" - rm_file("models", "ephemeral_model.sql") - write_file(fixtures.changed_table_model_sql, "models", "table_model.sql") - - # ephemeral_model is now gone. previously this caused a - # keyerror (dbt#2875), now it should pass - run_dbt( - ["run", "-m", "view_model", "--state", "state", "--defer", "--target", "otherschema"], - expect_pass=True, - ) - - # despite deferral, we should use models just created in our schema - results = run_dbt(["test", "--state", "state", "--defer", "--target", "otherschema"]) - assert other_schema in results[0].node.compiled_code - - # this time with --favor-state: prefer the models in the "other" schema, even though they exist in ours - run_dbt( - [ - "run", - "-m", - "view_model", - "--state", - "state", - "--defer", - "--favor-state", - "--target", - "otherschema", - ], - expect_pass=True, - ) - results = run_dbt(["test", "--state", "state", "--defer", "--favor-state"]) - assert other_schema not in results[0].node.compiled_code - - -class TestDeferStateFlag(BaseDeferState): - def test_defer_state_flag(self, project, unique_schema, other_schema): - project.create_test_schema(other_schema) - - # test that state deferral works correctly - run_dbt(["compile", "--target-path", "target_compile"]) - write_file(fixtures.view_model_now_table_sql, "models", "table_model.sql") - - results = run_dbt(["ls", "--select", "state:modified", "--state", "target_compile"]) - assert results == ["test.table_model"] - - run_dbt(["seed", "--target", "otherschema", "--target-path", "target_otherschema"]) - - # this will fail because we haven't loaded the seed in the default schema - run_dbt( - [ - "run", - "--select", - "state:modified", - "--defer", - "--state", - "target_compile", - "--favor-state", - ], - expect_pass=False, - ) - - # this will fail because we haven't passed in --state - with pytest.raises( - DbtRuntimeError, match="Got a state selector method, but no comparison manifest" - ): - run_dbt( - [ - "run", - "--select", - "state:modified", - "--defer", - "--defer-state", - "target_otherschema", - "--favor-state", - ], - expect_pass=False, - ) - - # this will succeed because we've loaded the seed in other schema and are successfully deferring to it instead - results = run_dbt( - [ - "run", - "--select", - "state:modified", - "--defer", - "--state", - "target_compile", - "--defer-state", - "target_otherschema", - "--favor-state", - ] - ) - - assert len(results.results) == 1 - assert results.results[0].status == RunStatus.Success - assert results.results[0].node.name == "table_model" - assert results.results[0].adapter_response["rows_affected"] == 2 diff --git a/tests/functional/defer_state/test_group_updates.py b/tests/functional/defer_state/test_group_updates.py deleted file mode 100644 index 5f3e8006..00000000 --- a/tests/functional/defer_state/test_group_updates.py +++ /dev/null @@ -1,108 +0,0 @@ -import os - -from dbt.exceptions import ParsingError -from dbt.tests.util import copy_file, run_dbt, write_file -import pytest - -from tests.functional.defer_state import fixtures - - -class GroupSetup: - @pytest.fixture(scope="class") - def models(self): - return { - "model_1.sql": fixtures.model_1_sql, - "model_2.sql": fixtures.model_2_sql, - "schema.yml": fixtures.group_schema_yml, - } - - @pytest.fixture(scope="class") - def seeds(self): - return {"seed.csv": fixtures.seed_csv} - - def group_setup(self): - # save initial state - run_dbt(["seed"]) - results = run_dbt(["compile"]) - - # add sanity checks for first result - assert len(results) == 3 - seed_result = results[0].node - assert seed_result.unique_id == "seed.test.seed" - model_1_result = results[1].node - assert model_1_result.unique_id == "model.test.model_1" - assert model_1_result.group == "finance" - model_2_result = results[2].node - assert model_2_result.unique_id == "model.test.model_2" - assert model_2_result.group == "finance" - - -class TestFullyModifiedGroups(GroupSetup): - def test_changed_groups(self, project): - self.group_setup() - - # copy manifest.json to "state" directory - os.makedirs("state") - target_path = os.path.join(project.project_root, "target") - copy_file(target_path, "manifest.json", project.project_root, ["state", "manifest.json"]) - - # update group name, modify model so it gets picked up - write_file(fixtures.modified_model_1_sql, "models", "model_1.sql") - write_file(fixtures.modified_model_2_sql, "models", "model_2.sql") - write_file(fixtures.group_modified_schema_yml, "models", "schema.yml") - - # this test is flaky if you don't clean first before the build - run_dbt(["clean"]) - # only thing in results should be model_1 - results = run_dbt(["build", "-s", "state:modified", "--defer", "--state", "./state"]) - - assert len(results) == 2 - model_1_result = results[0].node - assert model_1_result.unique_id == "model.test.model_1" - assert model_1_result.group == "accounting" # new group name! - model_2_result = results[1].node - assert model_2_result.unique_id == "model.test.model_2" - assert model_2_result.group == "accounting" # new group name! - - -class TestPartiallyModifiedGroups(GroupSetup): - def test_changed_groups(self, project): - self.group_setup() - - # copy manifest.json to "state" directory - os.makedirs("state") - target_path = os.path.join(project.project_root, "target") - copy_file(target_path, "manifest.json", project.project_root, ["state", "manifest.json"]) - - # update group name, modify model so it gets picked up - write_file(fixtures.modified_model_1_sql, "models", "model_1.sql") - write_file(fixtures.group_modified_schema_yml, "models", "schema.yml") - - # this test is flaky if you don't clean first before the build - run_dbt(["clean"]) - # only thing in results should be model_1 - results = run_dbt(["build", "-s", "state:modified", "--defer", "--state", "./state"]) - - assert len(results) == 1 - model_1_result = results[0].node - assert model_1_result.unique_id == "model.test.model_1" - assert model_1_result.group == "accounting" # new group name! - - -class TestBadGroups(GroupSetup): - def test_changed_groups(self, project): - self.group_setup() - - # copy manifest.json to "state" directory - os.makedirs("state") - target_path = os.path.join(project.project_root, "target") - copy_file(target_path, "manifest.json", project.project_root, ["state", "manifest.json"]) - - # update group with invalid name, modify model so it gets picked up - write_file(fixtures.modified_model_1_sql, "models", "model_1.sql") - write_file(fixtures.group_modified_fail_schema_yml, "models", "schema.yml") - - # this test is flaky if you don't clean first before the build - run_dbt(["clean"]) - with pytest.raises(ParsingError, match="Invalid group 'accounting'"): - run_dbt(["build", "-s", "state:modified", "--defer", "--state", "./state"]) diff --git a/tests/functional/defer_state/test_modified_state.py b/tests/functional/defer_state/test_modified_state.py deleted file mode 100644 index e108fe9f..00000000 --- a/tests/functional/defer_state/test_modified_state.py +++ /dev/null @@ -1,964 +0,0 @@ -import os -import random -import shutil -import string - -from dbt.exceptions import ContractBreakingChangeError -from dbt.tests.util import get_manifest, update_config_file, write_file -from dbt_common.exceptions import CompilationError -import pytest - -from tests.functional.defer_state import fixtures -from tests.functional.utils import run_dbt, run_dbt_and_capture - - -class BaseModifiedState: - @pytest.fixture(scope="class") - def models(self): - return { - "table_model.sql": fixtures.table_model_sql, - "view_model.sql": fixtures.view_model_sql, - "ephemeral_model.sql": fixtures.ephemeral_model_sql, - "schema.yml": fixtures.schema_yml, - "exposures.yml": fixtures.exposures_yml, - } - - @pytest.fixture(scope="class") - def macros(self): - return { - "macros.sql": fixtures.macros_sql, - "infinite_macros.sql": fixtures.infinite_macros_sql, - } - - @pytest.fixture(scope="class") - def seeds(self): - return {"seed.csv": fixtures.seed_csv} - - @property - def project_config_update(self): - return { - "seeds": { - "test": { - "quote_columns": False, - } - } - } - - def copy_state(self): - if not os.path.exists("state"): - os.makedirs("state") - shutil.copyfile("target/manifest.json", "state/manifest.json") - - def run_and_save_state(self): - run_dbt(["seed"]) - run_dbt(["run"]) - self.copy_state() - - -class TestChangedSeedContents(BaseModifiedState): - def test_changed_seed_contents_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 1 - - # add a new row to the seed - changed_seed_contents = fixtures.seed_csv + "\n" + "3,carl" - write_file(changed_seed_contents, "seeds", "seed.csv") - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:unmodified", "--state", "./state"] - ) - assert len(results) == 0 - - results = run_dbt(["ls", "--select", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "state:unmodified", "--state", "./state"]) - assert len(results) == 6 - - results = run_dbt(["ls", "--select", "state:modified+", "--state", "./state"]) - assert len(results) == 7 - assert set(results) == { - "test.seed", - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - results = run_dbt(["ls", "--select", "state:unmodified+", "--state", "./state"]) - assert len(results) == 6 - assert set(results) == { - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - shutil.rmtree("./state") - self.copy_state() - - # make a very big seed - # assume each line is ~2 bytes + len(name) - target_size = 1 * 1024 * 1024 - line_size = 64 - num_lines = target_size // line_size - maxlines = num_lines + 4 - seed_lines = [fixtures.seed_csv] - for idx in range(4, maxlines): - value = "".join(random.choices(string.ascii_letters, k=62)) - seed_lines.append(f"{idx},{value}") - seed_contents = "\n".join(seed_lines) - write_file(seed_contents, "seeds", "seed.csv") - - # now if we run again, we should get a warning - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - with pytest.raises(CompilationError) as exc: - run_dbt( - [ - "--warn-error", - "ls", - "--resource-type", - "seed", - "--select", - "state:modified", - "--state", - "./state", - ] - ) - assert ">1MB" in str(exc.value) - - # now check if unmodified returns none - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:unmodified", "--state", "./state"] - ) - assert len(results) == 0 - - shutil.rmtree("./state") - self.copy_state() - - # once it"s in path mode, we don"t mark it as modified if it changes - write_file(seed_contents + "\n1,test", "seeds", "seed.csv") - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 1 - - -class TestChangedSeedConfig(BaseModifiedState): - def test_changed_seed_config(self, project): - self.run_and_save_state() - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 1 - - update_config_file({"seeds": {"test": {"quote_columns": False}}}, "dbt_project.yml") - - # quoting change -> seed changed - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - [ - "ls", - "--resource-type", - "seed", - "--exclude", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "state:unmodified", "--state", "./state"] - ) - assert len(results) == 0 - - -class TestUnrenderedConfigSame(BaseModifiedState): - def test_unrendered_config_same(self, project): - self.run_and_save_state() - results = run_dbt( - ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--exclude", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 0 - - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--select", - "state:unmodified", - "--state", - "./state", - ], - expect_pass=True, - ) - assert len(results) == 3 - - # although this is the default value, dbt will recognize it as a change - # for previously-unconfigured models, because it"s been explicitly set - update_config_file({"models": {"test": {"materialized": "view"}}}, "dbt_project.yml") - results = run_dbt( - ["ls", "--resource-type", "model", "--select", "state:modified", "--state", "./state"] - ) - assert len(results) == 1 - assert results[0] == "test.view_model" - - # converse of above statement - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--exclude", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 1 - assert results[0] == "test.view_model" - - results = run_dbt( - [ - "ls", - "--resource-type", - "model", - "--select", - "state:unmodified", - "--state", - "./state", - ] - ) - assert len(results) == 2 - assert set(results) == { - "test.table_model", - "test.ephemeral_model", - } - - -class TestChangedModelContents(BaseModifiedState): - def test_changed_model_contents(self, project): - self.run_and_save_state() - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 0 - - table_model_update = """ - {{ config(materialized="table") }} - - select * from {{ ref("seed") }} - """ - - write_file(table_model_update, "models", "table_model.sql") - - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - -class TestNewMacro(BaseModifiedState): - def test_new_macro(self, project): - self.run_and_save_state() - - new_macro = """ - {% macro my_other_macro() %} - {% endmacro %} - """ - - # add a new macro to a new file - write_file(new_macro, "macros", "second_macro.sql") - - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 0 - - os.remove("macros/second_macro.sql") - # add a new macro to the existing file - with open("macros/macros.sql", "a") as fp: - fp.write(new_macro) - - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 0 - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 0 - - -class TestChangedMacroContents(BaseModifiedState): - def test_changed_macro_contents(self, project): - self.run_and_save_state() - - # modify an existing macro - updated_macro = """ - {% macro my_macro() %} - {% do log("in a macro", info=True) %} - {% endmacro %} - """ - write_file(updated_macro, "macros", "macros.sql") - - # table_model calls this macro - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - - -class TestChangedExposure(BaseModifiedState): - def test_changed_exposure(self, project): - self.run_and_save_state() - - # add an "owner.name" to existing exposure - updated_exposure = fixtures.exposures_yml + "\n name: John Doe\n" - write_file(updated_exposure, "models", "exposures.yml") - - results = run_dbt(["run", "--models", "+state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "view_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 0 - - -class TestChangedContractUnversioned(BaseModifiedState): - MODEL_UNIQUE_ID = "model.test.table_model" - CONTRACT_SCHEMA_YML = fixtures.contract_schema_yml - MODIFIED_SCHEMA_YML = fixtures.modified_contract_schema_yml - DISABLED_SCHEMA_YML = fixtures.disabled_contract_schema_yml - NO_CONTRACT_SCHEMA_YML = fixtures.no_contract_schema_yml - - def test_changed_contract(self, project): - self.run_and_save_state() - - # update contract for table_model - write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml") - - # This will find the table_model node modified both through a config change - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = self.MODEL_UNIQUE_ID - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because a column name has changed - write_file(self.MODIFIED_SCHEMA_YML, "models", "schema.yml") - results = run_dbt(["run"], expect_pass=False) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"], expect_pass=False - ) - expected_error = "This model has an enforced contract that failed." - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Please ensure the name, data_type, and number of columns in your contract match the columns in your model's definition" - assert expected_error in logs - assert expected_warning in logs - assert expected_change in logs - - # Go back to schema file without contract. Should throw a warning. - write_file(self.NO_CONTRACT_SCHEMA_YML, "models", "schema.yml") - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Contract enforcement was removed" - - # Now disable the contract. Should throw a warning - force warning into an error. - write_file(self.DISABLED_SCHEMA_YML, "models", "schema.yml") - with pytest.raises(CompilationError): - _, logs = run_dbt_and_capture( - [ - "--warn-error", - "run", - "--models", - "state:modified.contract", - "--state", - "./state", - ] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Contract enforcement was removed" - - -class TestChangedContractVersioned(BaseModifiedState): - MODEL_UNIQUE_ID = "model.test.table_model.v1" - CONTRACT_SCHEMA_YML = fixtures.versioned_contract_schema_yml - MODIFIED_SCHEMA_YML = fixtures.versioned_modified_contract_schema_yml - DISABLED_SCHEMA_YML = fixtures.versioned_disabled_contract_schema_yml - NO_CONTRACT_SCHEMA_YML = fixtures.versioned_no_contract_schema_yml - - def test_changed_contract_versioned(self, project): - self.run_and_save_state() - - # update contract for table_model - write_file(self.CONTRACT_SCHEMA_YML, "models", "schema.yml") - - # This will find the table_model node modified both through a config change - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = self.MODEL_UNIQUE_ID - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because a column name has changed - write_file(self.MODIFIED_SCHEMA_YML, "models", "schema.yml") - results = run_dbt(["run"], expect_pass=False) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - - # Go back to schema file without contract. Should raise an error. - write_file(self.NO_CONTRACT_SCHEMA_YML, "models", "schema.yml") - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - - # Now disable the contract. Should raise an error. - write_file(self.DISABLED_SCHEMA_YML, "models", "schema.yml") - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - - -class TestChangedConstraintUnversioned(BaseModifiedState): - def test_changed_constraint(self, project): - self.run_and_save_state() - - # update constraint for table_model - write_file(fixtures.constraint_schema_yml, "models", "schema.yml") - - # This will find the table_model node modified both through adding constraint - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = "model.test.table_model" - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because a column level constraint was removed - write_file(fixtures.modified_column_constraint_schema_yml, "models", "schema.yml") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - # since the models are unversioned, they raise a warning but not an error - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Enforced column level constraints were removed" - assert expected_warning in logs - assert expected_change in logs - - # This should raise because a model level constraint was removed (primary_key on id) - write_file(fixtures.modified_model_constraint_schema_yml, "models", "schema.yml") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Enforced model level constraints were removed" - assert expected_warning in logs - assert expected_change in logs - - -class TestChangedMaterializationConstraint(BaseModifiedState): - def test_changed_materialization(self, project): - self.run_and_save_state() - - # update constraint for table_model - write_file(fixtures.constraint_schema_yml, "models", "schema.yml") - - # This will find the table_model node modified both through adding constraint - # and by a non-breaking change to contract: true - results = run_dbt(["run", "--models", "state:modified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - manifest = get_manifest(project.project_root) - model_unique_id = "model.test.table_model" - model = manifest.nodes[model_unique_id] - expected_unrendered_config = {"contract": {"enforced": True}, "materialized": "table"} - assert model.unrendered_config == expected_unrendered_config - - # Run it again with "state:modified:contract", still finds modified due to contract: true - results = run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 1 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - first_contract_checksum = model.contract.checksum - assert first_contract_checksum - # save a new state - self.copy_state() - - # This should raise because materialization changed from table to view - write_file(fixtures.table_model_now_view_sql, "models", "table_model.sql") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # double check different contract_checksums - assert first_contract_checksum != second_contract_checksum - _, logs = run_dbt_and_capture( - ["run", "--models", "state:modified.contract", "--state", "./state"] - ) - expected_warning = "While comparing to previous project state, dbt detected a breaking change to an unversioned model" - expected_change = "Materialization changed with enforced constraints" - assert expected_warning in logs - assert expected_change in logs - - # This should not raise because materialization changed from table to incremental, both enforce constraints - write_file(fixtures.table_model_now_incremental_sql, "models", "table_model.sql") - # we don't have a way to know this failed unless we have a previous state to refer to, so the run succeeds - results = run_dbt(["run"]) - assert len(results) == 2 - - # This should pass because materialization changed from view to table which is the same as just adding new constraint, not breaking - write_file(fixtures.view_model_now_table_sql, "models", "view_model.sql") - write_file(fixtures.table_model_sql, "models", "table_model.sql") - results = run_dbt(["run"]) - assert len(results) == 2 - manifest = get_manifest(project.project_root) - model = manifest.nodes[model_unique_id] - second_contract_checksum = model.contract.checksum - # contract_checksums should be equal because we only save constraint related changes if the materialization is table/incremental - assert first_contract_checksum == second_contract_checksum - run_dbt(["run", "--models", "state:modified.contract", "--state", "./state"]) - assert len(results) == 2 - - -my_model_sql = """ -select 1 as id -""" - -modified_my_model_sql = """ --- a comment -select 1 as id -""" - -modified_my_model_non_breaking_sql = """ --- a comment -select 1 as id, 'blue' as color -""" - -my_model_yml = """ -models: - - name: my_model - latest_version: 1 - config: - contract: - enforced: true - columns: - - name: id - data_type: int - versions: - - v: 1 -""" - -modified_my_model_yml = """ -models: - - name: my_model - latest_version: 1 - config: - contract: - enforced: true - columns: - - name: id - data_type: text - versions: - - v: 1 -""" - -modified_my_model_non_breaking_yml = """ -models: - - name: my_model - latest_version: 1 - config: - contract: - enforced: true - columns: - - name: id - data_type: int - - name: color - data_type: text - versions: - - v: 1 -""" - - -class TestModifiedBodyAndContract: - @pytest.fixture(scope="class") - def models(self): - return { - "my_model.sql": my_model_sql, - "my_model.yml": my_model_yml, - } - - def copy_state(self): - if not os.path.exists("state"): - os.makedirs("state") - shutil.copyfile("target/manifest.json", "state/manifest.json") - - def test_modified_body_and_contract(self, project): - results = run_dbt(["run"]) - assert len(results) == 1 - self.copy_state() - - # Change both body and contract in a *breaking* way (= changing data_type of existing column) - write_file(modified_my_model_yml, "models", "my_model.yml") - write_file(modified_my_model_sql, "models", "my_model.sql") - - # Should raise even without specifying state:modified.contract - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "-s", "state:modified", "--state", "./state"]) - - with pytest.raises(ContractBreakingChangeError): - results = run_dbt(["run", "--exclude", "state:unmodified", "--state", "./state"]) - - # Change both body and contract in a *non-breaking* way (= adding a new column) - write_file(modified_my_model_non_breaking_yml, "models", "my_model.yml") - write_file(modified_my_model_non_breaking_sql, "models", "my_model.sql") - - # Should pass - run_dbt(["run", "-s", "state:modified", "--state", "./state"]) - - # The model's contract has changed, even if non-breaking, so it should be selected by 'state:modified.contract' - results = run_dbt(["list", "-s", "state:modified.contract", "--state", "./state"]) - assert results == ["test.my_model.v1"] - - -modified_table_model_access_yml = """ -version: 2 -models: - - name: table_model - access: public -""" - - -class TestModifiedAccess(BaseModifiedState): - def test_changed_access(self, project): - self.run_and_save_state() - - # No access change - assert not run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - # Modify access (protected -> public) - write_file(modified_table_model_access_yml, "models", "schema.yml") - assert run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model"] - - -modified_table_model_access_yml = """ -version: 2 -models: - - name: table_model - deprecation_date: 2020-01-01 -""" - - -class TestModifiedDeprecationDate(BaseModifiedState): - def test_changed_access(self, project): - self.run_and_save_state() - - # No access change - assert not run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - # Modify deprecation_date (None -> 2020-01-01) - write_file(modified_table_model_access_yml, "models", "schema.yml") - assert run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model"] - - -modified_table_model_version_yml = """ -version: 2 -models: - - name: table_model - versions: - - v: 1 - defined_in: table_model -""" - - -class TestModifiedVersion(BaseModifiedState): - def test_changed_access(self, project): - self.run_and_save_state() - - # Change version (null -> v1) - write_file(modified_table_model_version_yml, "models", "schema.yml") - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model.v1"] - - -table_model_latest_version_yml = """ -version: 2 -models: - - name: table_model - latest_version: 1 - versions: - - v: 1 - defined_in: table_model -""" - - -modified_table_model_latest_version_yml = """ -version: 2 -models: - - name: table_model - latest_version: 2 - versions: - - v: 1 - defined_in: table_model - - v: 2 -""" - - -class TestModifiedLatestVersion(BaseModifiedState): - def test_changed_access(self, project): - # Setup initial latest_version: 1 - write_file(table_model_latest_version_yml, "models", "schema.yml") - - self.run_and_save_state() - - # Bump latest version - write_file(fixtures.table_model_sql, "models", "table_model_v2.sql") - write_file(modified_table_model_latest_version_yml, "models", "schema.yml") - - results = run_dbt(["list", "-s", "state:modified", "--state", "./state"]) - assert results == ["test.table_model.v1", "test.table_model.v2"] diff --git a/tests/functional/defer_state/test_run_results_state.py b/tests/functional/defer_state/test_run_results_state.py deleted file mode 100644 index ae5941c7..00000000 --- a/tests/functional/defer_state/test_run_results_state.py +++ /dev/null @@ -1,481 +0,0 @@ -import os -import shutil - -from dbt.tests.util import run_dbt, write_file -import pytest - -from tests.functional.defer_state import fixtures - - -class BaseRunResultsState: - @pytest.fixture(scope="class") - def models(self): - return { - "table_model.sql": fixtures.table_model_sql, - "view_model.sql": fixtures.view_model_sql, - "ephemeral_model.sql": fixtures.ephemeral_model_sql, - "schema.yml": fixtures.schema_yml, - "exposures.yml": fixtures.exposures_yml, - } - - @pytest.fixture(scope="class") - def macros(self): - return { - "macros.sql": fixtures.macros_sql, - "infinite_macros.sql": fixtures.infinite_macros_sql, - } - - @pytest.fixture(scope="class") - def seeds(self): - return {"seed.csv": fixtures.seed_csv} - - @property - def project_config_update(self): - return { - "seeds": { - "test": { - "quote_columns": False, - } - } - } - - def clear_state(self): - shutil.rmtree("./state") - - def copy_state(self): - if not os.path.exists("state"): - os.makedirs("state") - shutil.copyfile("target/manifest.json", "state/manifest.json") - shutil.copyfile("target/run_results.json", "state/run_results.json") - - def run_and_save_state(self): - run_dbt(["build"]) - self.copy_state() - - def rebuild_run_dbt(self, expect_pass=True): - self.clear_state() - run_dbt(["build"], expect_pass=expect_pass) - self.copy_state() - - def update_view_model_bad_sql(self): - # update view model to generate a failure case - not_unique_sql = "select * from forced_error" - write_file(not_unique_sql, "models", "view_model.sql") - - def update_view_model_failing_tests(self, with_dupes=True, with_nulls=False): - # test failure on build tests - # fail the unique test - select_1 = "select 1 as id" - select_stmts = [select_1] - if with_dupes: - select_stmts.append(select_1) - if with_nulls: - select_stmts.append("select null as id") - failing_tests_sql = " union all ".join(select_stmts) - write_file(failing_tests_sql, "models", "view_model.sql") - - def update_unique_test_severity_warn(self): - # change the unique test severity from error to warn and reuse the same view_model.sql changes above - new_config = fixtures.schema_yml.replace("error", "warn") - write_file(new_config, "models", "schema.yml") - - -class TestSeedRunResultsState(BaseRunResultsState): - def test_seed_run_results_state(self, project): - self.run_and_save_state() - self.clear_state() - run_dbt(["seed"]) - self.copy_state() - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "result:success", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:success", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:success+", "--state", "./state"]) - assert len(results) == 7 - assert set(results) == { - "test.seed", - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - # add a new faulty row to the seed - changed_seed_contents = fixtures.seed_csv + "\n" + "\\\3,carl" - write_file(changed_seed_contents, "seeds", "seed.csv") - - self.clear_state() - run_dbt(["seed"], expect_pass=False) - self.copy_state() - - results = run_dbt( - ["ls", "--resource-type", "seed", "--select", "result:error", "--state", "./state"], - expect_pass=True, - ) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.seed" - - results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) - assert len(results) == 7 - assert set(results) == { - "test.seed", - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - -class TestBuildRunResultsState(BaseRunResultsState): - def test_build_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt(["build", "--select", "result:error", "--state", "./state"]) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.rebuild_run_dbt(expect_pass=False) - - results = run_dbt( - ["build", "--select", "result:error", "--state", "./state"], expect_pass=False - ) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"view_model", "not_null_view_model_id", "unique_view_model_id"} - - results = run_dbt(["ls", "--select", "result:error", "--state", "./state"]) - assert len(results) == 3 - assert set(results) == { - "test.view_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - } - - results = run_dbt( - ["build", "--select", "result:error+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 4 - nodes = set([elem.node.name for elem in results]) - assert nodes == { - "table_model", - "view_model", - "not_null_view_model_id", - "unique_view_model_id", - } - - results = run_dbt(["ls", "--select", "result:error+", "--state", "./state"]) - assert len(results) == 6 # includes exposure - assert set(results) == { - "test.table_model", - "test.view_model", - "test.ephemeral_model", - "test.not_null_view_model_id", - "test.unique_view_model_id", - "exposure:test.my_exposure", - } - - self.update_view_model_failing_tests() - self.rebuild_run_dbt(expect_pass=False) - - results = run_dbt( - ["build", "--select", "result:fail", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - results = run_dbt(["ls", "--select", "result:fail", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.unique_view_model_id" - - results = run_dbt( - ["build", "--select", "result:fail+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id"} - - results = run_dbt(["ls", "--select", "result:fail+", "--state", "./state"]) - assert len(results) == 1 - assert set(results) == {"test.unique_view_model_id"} - - self.update_unique_test_severity_warn() - self.rebuild_run_dbt(expect_pass=True) - - results = run_dbt( - ["build", "--select", "result:warn", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - results = run_dbt(["ls", "--select", "result:warn", "--state", "./state"]) - assert len(results) == 1 - assert results[0] == "test.unique_view_model_id" - - results = run_dbt( - ["build", "--select", "result:warn+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id"} - - results = run_dbt(["ls", "--select", "result:warn+", "--state", "./state"]) - assert len(results) == 1 - assert set(results) == {"test.unique_view_model_id"} - - -class TestRunRunResultsState(BaseRunResultsState): - def test_run_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["run", "--select", "result:success", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - assert results[0].node.name == "view_model" - assert results[1].node.name == "table_model" - - # clear state and rerun upstream view model to test + operator - self.clear_state() - run_dbt(["run", "--select", "view_model"], expect_pass=True) - self.copy_state() - results = run_dbt( - ["run", "--select", "result:success+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - assert results[0].node.name == "view_model" - assert results[1].node.name == "table_model" - - # check we are starting from a place with 0 errors - results = run_dbt(["run", "--select", "result:error", "--state", "./state"]) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.clear_state() - run_dbt(["run"], expect_pass=False) - self.copy_state() - - # test single result selector on error - results = run_dbt( - ["run", "--select", "result:error", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "view_model" - - # test + operator selection on error - results = run_dbt( - ["run", "--select", "result:error+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 2 - assert results[0].node.name == "view_model" - assert results[1].node.name == "table_model" - - # single result selector on skipped. Expect this to pass becase underlying view already defined above - results = run_dbt( - ["run", "--select", "result:skipped", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "table_model" - - # add a downstream model that depends on table_model for skipped+ selector - downstream_model_sql = "select * from {{ref('table_model')}}" - write_file(downstream_model_sql, "models", "table_model_downstream.sql") - - self.clear_state() - run_dbt(["run"], expect_pass=False) - self.copy_state() - - results = run_dbt( - ["run", "--select", "result:skipped+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - assert results[0].node.name == "table_model" - assert results[1].node.name == "table_model_downstream" - - -class TestTestRunResultsState(BaseRunResultsState): - def test_test_run_results_state(self, project): - self.run_and_save_state() - # run passed nodes - results = run_dbt( - ["test", "--select", "result:pass", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id", "not_null_view_model_id"} - - # run passed nodes with + operator - results = run_dbt( - ["test", "--select", "result:pass+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 2 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id", "not_null_view_model_id"} - - self.update_view_model_failing_tests() - self.rebuild_run_dbt(expect_pass=False) - - # test with failure selector - results = run_dbt( - ["test", "--select", "result:fail", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - # test with failure selector and + operator - results = run_dbt( - ["test", "--select", "result:fail+", "--state", "./state"], expect_pass=False - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - self.update_unique_test_severity_warn() - # rebuild - expect_pass = True because we changed the error to a warning this time around - self.rebuild_run_dbt(expect_pass=True) - - # test with warn selector - results = run_dbt( - ["test", "--select", "result:warn", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - # test with warn selector and + operator - results = run_dbt( - ["test", "--select", "result:warn+", "--state", "./state"], expect_pass=True - ) - assert len(results) == 1 - assert results[0].node.name == "unique_view_model_id" - - -class TestConcurrentSelectionRunResultsState(BaseRunResultsState): - def test_concurrent_selection_run_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["run", "--select", "state:modified+", "result:error+", "--state", "./state"] - ) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.clear_state() - run_dbt(["run"], expect_pass=False) - self.copy_state() - - # add a new failing dbt model - bad_sql = "select * from forced_error" - write_file(bad_sql, "models", "table_model_modified_example.sql") - - results = run_dbt( - ["run", "--select", "state:modified+", "result:error+", "--state", "./state"], - expect_pass=False, - ) - assert len(results) == 3 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"view_model", "table_model_modified_example", "table_model"} - - -class TestConcurrentSelectionTestRunResultsState(BaseRunResultsState): - def test_concurrent_selection_test_run_results_state(self, project): - self.run_and_save_state() - # create failure test case for result:fail selector - self.update_view_model_failing_tests(with_nulls=True) - - # run dbt build again to trigger test errors - self.rebuild_run_dbt(expect_pass=False) - - # get the failures from - results = run_dbt( - [ - "test", - "--select", - "result:fail", - "--exclude", - "not_null_view_model_id", - "--state", - "./state", - ], - expect_pass=False, - ) - assert len(results) == 1 - nodes = set([elem.node.name for elem in results]) - assert nodes == {"unique_view_model_id"} - - -class TestConcurrentSelectionBuildRunResultsState(BaseRunResultsState): - def test_concurrent_selectors_build_run_results_state(self, project): - self.run_and_save_state() - results = run_dbt( - ["build", "--select", "state:modified+", "result:error+", "--state", "./state"] - ) - assert len(results) == 0 - - self.update_view_model_bad_sql() - self.rebuild_run_dbt(expect_pass=False) - - # add a new failing dbt model - bad_sql = "select * from forced_error" - write_file(bad_sql, "models", "table_model_modified_example.sql") - - results = run_dbt( - ["build", "--select", "state:modified+", "result:error+", "--state", "./state"], - expect_pass=False, - ) - assert len(results) == 5 - nodes = set([elem.node.name for elem in results]) - assert nodes == { - "table_model_modified_example", - "view_model", - "table_model", - "not_null_view_model_id", - "unique_view_model_id", - } - - self.update_view_model_failing_tests() - - # create error model case for result:error selector - more_bad_sql = "select 1 as id from not_exists" - write_file(more_bad_sql, "models", "error_model.sql") - - # create something downstream from the error model to rerun - downstream_model_sql = "select * from {{ ref('error_model') }} )" - write_file(downstream_model_sql, "models", "downstream_of_error_model.sql") - - # regenerate build state - self.rebuild_run_dbt(expect_pass=False) - - # modify model again to trigger the state:modified selector - bad_again_sql = "select * from forced_anothererror" - write_file(bad_again_sql, "models", "table_model_modified_example.sql") - - results = run_dbt( - [ - "build", - "--select", - "state:modified+", - "result:error+", - "result:fail+", - "--state", - "./state", - ], - expect_pass=False, - ) - assert len(results) == 4 - nodes = set([elem.node.name for elem in results]) - assert nodes == { - "error_model", - "downstream_of_error_model", - "table_model_modified_example", - "unique_view_model_id", - } diff --git a/tests/functional/materializations/test_custom_materialization.py b/tests/functional/materializations/test_custom_materialization.py deleted file mode 100644 index 6aa69a4b..00000000 --- a/tests/functional/materializations/test_custom_materialization.py +++ /dev/null @@ -1,80 +0,0 @@ -from dbt.tests.util import run_dbt -import pytest - - -models__model_sql = """ -{{ config(materialized='view') }} -select 1 as id - -""" - - -@pytest.fixture(scope="class") -def models(): - return {"model.sql": models__model_sql} - - -class TestOverrideAdapterDependency: - # make sure that if there's a dependency with an adapter-specific - # materialization, we honor that materialization - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-adapter-dep"}]} - - def test_adapter_dependency(self, project, override_view_adapter_dep): - run_dbt(["deps"]) - # this should error because the override is buggy - run_dbt(["run"], expect_pass=False) - - -class TestOverrideDefaultDependency: - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-default-dep"}]} - - def test_default_dependency(self, project, override_view_default_dep): - run_dbt(["deps"]) - # this should error because the override is buggy - run_dbt(["run"], expect_pass=False) - - -class TestOverrideAdapterDependencyPassing: - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-adapter-pass-dep"}]} - - def test_default_dependency(self, project, override_view_adapter_pass_dep): - run_dbt(["deps"]) - # this should pass because the override is ok - run_dbt(["run"]) - - -class TestOverrideAdapterLocal: - # make sure that the local default wins over the dependency - # adapter-specific - - @pytest.fixture(scope="class") - def packages(self): - return {"packages": [{"local": "override-view-adapter-pass-dep"}]} - - @pytest.fixture(scope="class") - def project_config_update(self): - return {"macro-paths": ["override-view-adapter-macros"]} - - def test_default_dependency( - self, project, override_view_adapter_pass_dep, override_view_adapter_macros - ): - run_dbt(["deps"]) - # this should error because the override is buggy - run_dbt(["run"], expect_pass=False) - - -class TestOverrideDefaultReturn: - @pytest.fixture(scope="class") - def project_config_update(self): - return {"macro-paths": ["override-view-return-no-relation"]} - - def test_default_dependency(self, project, override_view_return_no_relation): - run_dbt(["deps"]) - results = run_dbt(["run"], expect_pass=False) - assert "did not explicitly return a list of relations" in results[0].message