diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 00000000..35f63a37 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,85 @@ +name: Integration Tests +on: + push: + branches: + - "main" + - "*.latest" + - "releases/*" + paths-ignore: + - "**.MD" + - "**.md" + +permissions: + id-token: write + contents: read + +defaults: + run: + shell: bash + +jobs: + integ: + name: integration test / python ${{ matrix.python-version }} + + runs-on: ubuntu-latest + timeout-minutes: 60 + + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] # Use single version to avoid resource conflicts in an AWS account + + env: + TOXENV: "integration" + PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv" + DBT_AWS_ACCOUNT: ${{ secrets.DBT_AWS_ACCOUNT }} + DBT_GLUE_ROLE_ARN: ${{ secrets.DBT_GLUE_ROLE_ARN }} + DBT_GLUE_REGION: ${{ secrets.DBT_GLUE_REGION }} + + steps: + - name: Check out the repository + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install python dependencies + run: | + sudo apt-get update + sudo apt-get install libsasl2-dev + python -m pip install --user --upgrade pip + python -m pip --version + python -m pip install tox + tox --version + + - name: Generate session name + id: session + run: | + repo="${GITHUB_REPOSITORY#${GITHUB_REPOSITORY_OWNER}/}" + echo "name=${repo}-${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}" >> "${GITHUB_OUTPUT}" + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-session-name: ${{ steps.session.outputs.name }} + role-to-assume: arn:aws:iam::${{ secrets.DBT_AWS_ACCOUNT }}:role/dbt-glue + aws-region: ${{ secrets.DBT_GLUE_REGION }} + mask-aws-account-id: true + + - name: Run tox + run: | + export DBT_S3_LOCATION=${{ secrets.DBT_S3_LOCATION }}/${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}/${{ matrix.python-version }} + tox + + - name: Get current date + if: always() + id: date + run: echo "date=$(date +'%Y-%m-%dT%H_%M_%S')" >> $GITHUB_OUTPUT #no colons allowed for artifacts + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv + path: unit_results.csv diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 00000000..30532e65 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,218 @@ +# **what?** +# Runs code quality checks, unit tests, and verifies python build on +# all code commited to the repository. This workflow should not +# require any secrets since it runs for PRs from forked repos. +# By default, secrets are not passed to workflows running from +# a forked repo. + +# **why?** +# Ensure code for dbt meets a certain quality standard. + +# **when?** +# This will run for all PRs, when code is pushed to a release +# branch, and when manually triggered. + +name: Tests and Code Checks + +on: + push: + branches: + - "main" + - "*.latest" + - "releases/*" + paths-ignore: + - "**.MD" + - "**.md" + pull_request: + workflow_dispatch: + +permissions: + id-token: write + contents: read + +# will cancel previous workflows triggered by the same event and for the same ref for PRs or same SHA otherwise +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ contains(github.event_name, 'pull_request') && github.event.pull_request.head.ref || github.sha }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: +# code-quality: +# name: code-quality +# +# runs-on: ubuntu-latest +# timeout-minutes: 10 +# +# steps: +# - name: Check out the repository +# uses: actions/checkout@v3 +# with: +# persist-credentials: false +# +# - name: Set up Python +# uses: actions/setup-python@v4 +# with: +# python-version: '3.8' +# +# - name: Install python dependencies +# run: | +# sudo apt-get update +# sudo apt-get install libsasl2-dev +# python -m pip install --user --upgrade pip +# python -m pip --version +# python -m pip install pre-commit +# pre-commit --version +# python -m pip install mypy==0.942 +# python -m pip install types-requests +# mypy --version +# python -m pip install -r dev-requirements.txt +# dbt --version +# +# - name: Run pre-commit hooks +# run: pre-commit run --all-files --show-diff-on-failure + + unit: + name: unit test / python ${{ matrix.python-version }} + + runs-on: ubuntu-latest + timeout-minutes: 10 + + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.9", "3.10", "3.11"] + + env: + TOXENV: "unit" + PYTEST_ADDOPTS: "-v --color=yes --csv unit_results.csv" + + steps: + - name: Check out the repository + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install python dependencies + run: | + sudo apt-get update + sudo apt-get install libsasl2-dev + python -m pip install --user --upgrade pip + python -m pip --version + python -m pip install tox + tox --version + + - name: Run tox + run: tox + + - name: Get current date + if: always() + id: date + run: echo "date=$(date +'%Y-%m-%dT%H_%M_%S')" >> $GITHUB_OUTPUT #no colons allowed for artifacts + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: unit_results_${{ matrix.python-version }}-${{ steps.date.outputs.date }}.csv + path: unit_results.csv + + + build: + name: build packages + + runs-on: ubuntu-latest + + outputs: + is_alpha: ${{ steps.check-is-alpha.outputs.is_alpha }} + + steps: + - name: Check out the repository + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.8' + + - name: Install python dependencies + run: | + python -m pip install --user --upgrade pip + python -m pip install --upgrade setuptools wheel twine check-wheel-contents + python -m pip --version + + - name: Build distributions + run: ./scripts/build-dist.sh + + - name: Show distributions + run: ls -lh dist/ + + - name: Check distribution descriptions + run: | + twine check dist/* + - name: Check wheel contents + run: | + check-wheel-contents dist/*.whl --ignore W007,W008 + + - name: Check if this is an alpha version + id: check-is-alpha + run: | + export is_alpha=0 + if [[ "$(ls -lh dist/)" == *"a1"* ]]; then export is_alpha=1; fi + echo "is_alpha=$is_alpha" >> $GITHUB_OUTPUT + + - uses: actions/upload-artifact@v3 + with: + name: dist + path: dist/ + + test-build: + name: verify packages / python ${{ matrix.python-version }} / ${{ matrix.os }} + + if: needs.build.outputs.is_alpha == 0 + + needs: build + + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ["3.8", "3.9", "3.10", "3.11"] + + steps: + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install python dependencies + run: | + python -m pip install --user --upgrade pip + python -m pip install --upgrade wheel + python -m pip --version + - uses: actions/download-artifact@v3 + with: + name: dist + path: dist/ + + - name: Show distributions + run: ls -lh dist/ + + - name: Install wheel distributions + run: | + find ./dist/*.whl -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/ + - name: Check wheel distributions + run: | + dbt --version + - name: Install source distributions + run: | + find ./dist/*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/ + - name: Check source distributions + run: | + dbt --version diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..b95efacd --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,63 @@ +# For more on configuring pre-commit hooks (see https://pre-commit.com/) + +# Force all unspecified python hooks to run python 3.8 +default_language_version: + python: python3 + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-yaml + args: [--unsafe] + - id: check-json + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + additional_dependencies: ['click~=8.1'] + args: + - "--line-length=99" + - "--target-version=py38" + - id: black + alias: black-check + stages: [manual] + additional_dependencies: ['click~=8.1'] + args: + - "--line-length=99" + - "--target-version=py38" + - "--check" + - "--diff" + - repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + - id: flake8 + alias: flake8-check + stages: [manual] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.2.0 + hooks: + - id: mypy + # N.B.: Mypy is... a bit fragile. + # + # By using `language: system` we run this hook in the local + # environment instead of a pre-commit isolated one. This is needed + # to ensure mypy correctly parses the project. + + # It may cause trouble in that it adds environmental variables out + # of our control to the mix. Unfortunately, there's nothing we can + # do about per pre-commit's author. + # See https://github.com/pre-commit/pre-commit/issues/730 for details. + args: [--show-error-codes, --ignore-missing-imports, --explicit-package-bases, --warn-unused-ignores, --disallow-untyped-defs] + files: ^dbt/adapters/.* + language: system + - id: mypy + alias: mypy-check + stages: [manual] + args: [--show-error-codes, --pretty, --ignore-missing-imports, --explicit-package-bases] + files: ^dbt/adapters + language: system diff --git a/dbt/adapters/glue/__init__.py b/dbt/adapters/glue/__init__.py index 5693b276..bf1a2c82 100644 --- a/dbt/adapters/glue/__init__.py +++ b/dbt/adapters/glue/__init__.py @@ -9,5 +9,5 @@ adapter=GlueAdapter, credentials=GlueCredentials, include_path=glue.PACKAGE_PATH, - dependencies = ["spark"], + dependencies=["spark"], ) diff --git a/dev-requirements.txt b/dev-requirements.txt index 688eaa17..18e36dac 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -18,6 +18,13 @@ pytest-xdist pytest-dotenv pytest-csv flaky -dbt-tests-adapter==1.7.0 mypy==1.6.1 black==23.10.1 + +# Adapter specific dependencies +waiter +boto3 + +dbt-core==1.7.0 +dbt-tests-adapter==1.7.0 +dbt-spark==1.7.0 \ No newline at end of file diff --git a/scripts/build-dist.sh b/scripts/build-dist.sh new file mode 100755 index 00000000..3c380839 --- /dev/null +++ b/scripts/build-dist.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -eo pipefail + +DBT_PATH="$( cd "$(dirname "$0")/.." ; pwd -P )" + +PYTHON_BIN=${PYTHON_BIN:-python} + +echo "$PYTHON_BIN" + +set -x + +rm -rf "$DBT_PATH"/dist +rm -rf "$DBT_PATH"/build +mkdir -p "$DBT_PATH"/dist + +cd "$DBT_PATH" +$PYTHON_BIN setup.py sdist bdist_wheel + +set +x diff --git a/tests/conftest.py b/tests/conftest.py index 9eded411..238cee28 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -19,7 +19,7 @@ def dbt_profile_target(): 'worker_type': 'G.1X', 'schema': 'dbt_functional_test_01', 'database': 'dbt_functional_test_01', - 'session_provisioning_timeout_in_seconds': 120, + 'session_provisioning_timeout_in_seconds': 300, 'location': os.getenv('DBT_S3_LOCATION'), 'datalake_formats': 'delta', 'conf': "spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension --conf spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog --conf spark.sql.legacy.allowNonEmptyLocationInCTAS=true", diff --git a/tests/functional/adapter/test_basic.py b/tests/functional/adapter/test_basic.py index 736f0276..87e40140 100644 --- a/tests/functional/adapter/test_basic.py +++ b/tests/functional/adapter/test_basic.py @@ -247,6 +247,32 @@ class TestGenericTestsGlue(BaseGenericTests): @pytest.fixture(scope="class") def unique_schema(request, prefix) -> str: return schema_name + + @pytest.fixture(scope='class', autouse=True) + def cleanup(self): + cleanup_s3_location() + yield + + def test_generic_tests(self, project): + # seed command + results = run_dbt(["seed"]) + + relation = relation_from_name(project.adapter, "base") + # run refresh table to disable the previous parquet file paths + project.run_sql(f"refresh table {relation}") + + # test command selecting base model + results = run_dbt(["test", "-m", "base"]) + assert len(results) == 1 + + # run command + results = run_dbt(["run"]) + assert len(results) == 2 + + # test command, all tests + results = run_dbt(["test"]) + assert len(results) == 3 + pass # To test diff --git a/tests/unit/test_glue_session.py b/tests/functional_test/adapter/test_glue_session.py similarity index 96% rename from tests/unit/test_glue_session.py rename to tests/functional_test/adapter/test_glue_session.py index d9f4a7be..75932d27 100644 --- a/tests/unit/test_glue_session.py +++ b/tests/functional_test/adapter/test_glue_session.py @@ -1,6 +1,8 @@ from dbt.adapters.glue.gluedbapi import GlueConnection, GlueCursor import boto3 import uuid +import string +import random from tests.util import get_account_id, get_s3_location @@ -62,7 +64,8 @@ def __test_query_with_comments(session): def test_create_database(session, region): client = boto3.client("glue", region_name=region) schema = "testdb111222333" - table_name = "test123" + table_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)) + table_name = f"test123_{table_suffix}" try: response = client.create_database( DatabaseInput={ diff --git a/tests/unit/test_glue_session_demo.py b/tests/functional_test/adapter/test_glue_session_demo.py similarity index 100% rename from tests/unit/test_glue_session_demo.py rename to tests/functional_test/adapter/test_glue_session_demo.py diff --git a/tests/single_functional/adapter/single_test.py b/tests/functional_test/adapter/test_single_functional.py similarity index 100% rename from tests/single_functional/adapter/single_test.py rename to tests/functional_test/adapter/test_single_functional.py diff --git a/tests/unit/conftest.py b/tests/functional_test/conftest.py similarity index 96% rename from tests/unit/conftest.py rename to tests/functional_test/conftest.py index bfbcfc7e..1a97833e 100644 --- a/tests/unit/conftest.py +++ b/tests/functional_test/conftest.py @@ -47,7 +47,7 @@ def credentials(): database=None, schema="airbotinigo", worker_type="G.1X", - session_provisioning_timeout_in_seconds=120, + session_provisioning_timeout_in_seconds=300, workers=3 ) diff --git a/tests/unit/test_adapter.py b/tests/unit/test_adapter.py new file mode 100644 index 00000000..0e9b19fd --- /dev/null +++ b/tests/unit/test_adapter.py @@ -0,0 +1,60 @@ +from typing import Any, Dict, Optional +import unittest +from unittest import mock + +from dbt.config import RuntimeConfig + +import dbt.flags as flags +from dbt.adapters.glue import GlueAdapter +from tests.util import config_from_parts_or_dicts + + +class TestGlueAdapter(unittest.TestCase): + def setUp(self): + flags.STRICT_MODE = False + + self.project_cfg = { + "name": "X", + "version": "0.1", + "profile": "test", + "project-root": "/tmp/dbt/does-not-exist", + "quoting": { + "identifier": False, + "schema": False, + }, + "config-version": 2, + } + + self.profile_cfg = { + "outputs": { + "test": { + "type": "glue", + "role_arn": "arn:aws:iam::123456789101:role/GlueInteractiveSessionRole", + "region": "us-east-1", + "workers": 2, + "worker_type": "G.1X", + "schema": "dbt_functional_test_01", + "database": "dbt_functional_test_01", + } + }, + "target": "test", + } + + def _get_config(self, **kwargs: Any) -> RuntimeConfig: + for key, val in kwargs.items(): + self.profile_cfg["outputs"]["test"][key] = val + + return config_from_parts_or_dicts(self.project_cfg, self.profile_cfg) + + def test_glue_connection(self): + config = self._get_config() + adapter = GlueAdapter(config) + + with mock.patch("dbt.adapters.glue.connections.open"): + connection = adapter.acquire_connection("dummy") + connection.handle # trigger lazy-load + + self.assertEqual(connection.state, "open") + self.assertEqual(connection.type, "glue") + self.assertEqual(connection.credentials.schema, "dbt_functional_test_01") + self.assertIsNotNone(connection.handle) diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py new file mode 100644 index 00000000..30286582 --- /dev/null +++ b/tests/unit/test_credentials.py @@ -0,0 +1,18 @@ +import unittest + +from dbt.adapters.glue.connections import GlueCredentials + + +class TestGlueCredentials(unittest.TestCase): + def test_credentials(self) -> None: + credentials = GlueCredentials( + database="tests", + schema="tests", + role_arn="arn:aws:iam::123456789101:role/GlueInteractiveSessionRole", + region="ap-northeast-1", + workers=4, + worker_type="G.2X", + ) + assert credentials.schema == "tests" + assert credentials.database is None + assert credentials.glue_version == "4.0" # default Glue version is 4.0 diff --git a/tests/unit/test_lakeformation.py b/tests/unit/test_lakeformation.py new file mode 100644 index 00000000..c125c076 --- /dev/null +++ b/tests/unit/test_lakeformation.py @@ -0,0 +1,23 @@ +import unittest + +from dbt.adapters.glue.lakeformation import FilterConfig + + +class TestLakeFormation(unittest.TestCase): + def test_lakeformation_filter_api_repr(self) -> None: + expected_filter = { + "TableCatalogId": "123456789101", + "DatabaseName": "some_database", + "TableName": "some_table", + "Name": "some_filter", + "RowFilter": {"FilterExpression": "product_name='Heater'"}, + "ColumnWildcard": {"ExcludedColumnNames": []} + } + filter_config = FilterConfig( + row_filter="product_name='Heater'", + principals=[], + column_names=[], + excluded_column_names=[] + ) + ret = filter_config.to_api_repr("123456789101", "some_database", "some_table", "some_filter") + self.assertDictEqual(ret, expected_filter) diff --git a/tests/unit/test_relation.py b/tests/unit/test_relation.py new file mode 100644 index 00000000..e3d8f311 --- /dev/null +++ b/tests/unit/test_relation.py @@ -0,0 +1,104 @@ +import unittest + +from dbt.adapters.glue.relation import SparkRelation +from dbt.exceptions import DbtRuntimeError + + +class TestGlueRelation(unittest.TestCase): + def test_pre_deserialize(self): + data = { + "quote_policy": { + "database": False, + "schema": False, + "identifier": False + }, + "path": { + "database": "some_database", + "schema": "some_schema", + "identifier": "some_table", + }, + "type": None, + } + + relation = SparkRelation.from_dict(data) + self.assertEqual(relation.database, "some_database") + self.assertEqual(relation.schema, "some_schema") + self.assertEqual(relation.identifier, "some_table") + + data = { + "quote_policy": { + "database": False, + "schema": False, + "identifier": False + }, + "path": { + "database": None, + "schema": "some_schema", + "identifier": "some_table", + }, + "type": None, + } + + relation = SparkRelation.from_dict(data) + self.assertIsNone(relation.database) + self.assertEqual(relation.schema, "some_schema") + self.assertEqual(relation.identifier, "some_table") + + data = { + "quote_policy": { + "database": False, + "schema": False, + "identifier": False + }, + "path": { + "schema": "some_schema", + "identifier": "some_table", + }, + "type": None, + } + + relation = SparkRelation.from_dict(data) + self.assertIsNone(relation.database) + self.assertEqual(relation.schema, "some_schema") + self.assertEqual(relation.identifier, "some_table") + + def test_render(self): + data = { + "path": { + "database": "some_database", + "schema": "some_database", + "identifier": "some_table", + }, + "type": None, + } + + relation = SparkRelation.from_dict(data) + self.assertEqual(relation.render(), "some_database.some_table") + + data = { + "path": { + "schema": "some_schema", + "identifier": "some_table", + }, + "type": None, + } + + relation = SparkRelation.from_dict(data) + self.assertEqual(relation.render(), "some_schema.some_table") + + data = { + "path": { + "database": "some_database", + "schema": "some_database", + "identifier": "some_table", + }, + "include_policy": { + "database": True, + "schema": True, + }, + "type": None, + } + + relation = SparkRelation.from_dict(data) + with self.assertRaises(DbtRuntimeError): + relation.render() diff --git a/tests/util.py b/tests/util.py index 6b2feb96..faef0108 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,9 +1,93 @@ import os +from dbt.config.project import PartialProject DEFAULT_REGION = "eu-west-1" +class Obj: + which = "blah" + single_threaded = False + + +def profile_from_dict(profile, profile_name, cli_vars="{}"): + from dbt.config import Profile + from dbt.config.renderer import ProfileRenderer + from dbt.config.utils import parse_cli_vars + + if not isinstance(cli_vars, dict): + cli_vars = parse_cli_vars(cli_vars) + + renderer = ProfileRenderer(cli_vars) + + # in order to call dbt's internal profile rendering, we need to set the + # flags global. This is a bit of a hack, but it's the best way to do it. + from dbt.flags import set_from_args + from argparse import Namespace + + set_from_args(Namespace(), None) + return Profile.from_raw_profile_info( + profile, + profile_name, + renderer, + ) + + +def project_from_dict(project, profile, packages=None, selectors=None, cli_vars="{}"): + from dbt.config.renderer import DbtProjectYamlRenderer + from dbt.config.utils import parse_cli_vars + + if not isinstance(cli_vars, dict): + cli_vars = parse_cli_vars(cli_vars) + + renderer = DbtProjectYamlRenderer(profile, cli_vars) + + project_root = project.pop("project-root", os.getcwd()) + + partial = PartialProject.from_dicts( + project_root=project_root, + project_dict=project, + packages_dict=packages, + selectors_dict=selectors, + ) + return partial.render(renderer) + + +def config_from_parts_or_dicts(project, profile, packages=None, selectors=None, cli_vars="{}"): + from dbt.config import Project, Profile, RuntimeConfig + from dbt.config.utils import parse_cli_vars + from copy import deepcopy + + if not isinstance(cli_vars, dict): + cli_vars = parse_cli_vars(cli_vars) + + if isinstance(project, Project): + profile_name = project.profile_name + else: + profile_name = project.get("profile") + + if not isinstance(profile, Profile): + profile = profile_from_dict( + deepcopy(profile), + profile_name, + cli_vars, + ) + + if not isinstance(project, Project): + project = project_from_dict( + deepcopy(project), + profile, + packages, + selectors, + cli_vars, + ) + + args = Obj() + args.vars = cli_vars + args.profile_dir = "/dev/null" + return RuntimeConfig.from_parts(project=project, profile=profile, args=args) + + def get_account_id(): if "DBT_AWS_ACCOUNT" in os.environ: return os.environ.get("DBT_AWS_ACCOUNT") diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..f4c81be6 --- /dev/null +++ b/tox.ini @@ -0,0 +1,26 @@ +[tox] +skipsdist = True +envlist = unit, flake8, integration-spark-thrift + +[testenv:{unit}] +allowlist_externals = + /bin/bash +commands = /bin/bash -c '{envpython} -m pytest -v {posargs} tests/unit' +passenv = + DBT_* + PYTEST_ADDOPTS +deps = + -rdev-requirements.txt + -e. + +[testenv:{integration}] +allowlist_externals = + /bin/bash +commands = /bin/bash -c '{envpython} -m pytest -v {posargs}' +passenv = + DBT_* + PYTEST_ADDOPTS + AWS_* +deps = + -rdev-requirements.txt + -e.