Skip to content

Commit

Permalink
Add validation warning when filter is applied to conversion input mea…
Browse files Browse the repository at this point in the history
…sure (#350)

Resolves SL-2862

<!---
Include the number of the issue addressed by this PR above if
applicable.
  PRs for code changes without an associated issue *will not be merged*.
  See CONTRIBUTING.md for more information.
-->

### Description
Since filtering on a conversion input measure is not fully supported, we
will add a warning when this is added to let the user know.
<!---
Describe the Pull Request here. Add any references and info to help
reviewers
  understand your changes. Include any tradeoffs you considered.
-->

### Checklist

- [x] I have read [the contributing
guide](https://github.com/dbt-labs/dbt-semantic-interfaces/blob/main/CONTRIBUTING.md)
and understand what's expected of me
- [x] I have signed the
[CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [x] This PR includes tests, or tests are not required/relevant for
this PR
- [x] I have run `changie new` to [create a changelog
entry](https://github.com/dbt-labs/dbt-semantic-interfaces/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)
  • Loading branch information
WilliamDee authored Sep 30, 2024
1 parent b81abec commit a462190
Show file tree
Hide file tree
Showing 3 changed files with 85 additions and 44 deletions.
6 changes: 6 additions & 0 deletions .changes/unreleased/Features-20240927-154038.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
kind: Features
body: Added validation warning when using filter on conversion input measure
time: 2024-09-27T15:40:38.968626-04:00
custom:
Author: WilliamDee
Issue: None
25 changes: 21 additions & 4 deletions dbt_semantic_interfaces/validations/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
ConversionTypeParams,
Dimension,
Metric,
MetricInputMeasure,
SemanticManifest,
SemanticManifestT,
SemanticModel,
Expand Down Expand Up @@ -494,10 +495,12 @@ def _validate_measures(
) -> List[ValidationIssue]:
issues: List[ValidationIssue] = []

def _validate_measure(measure_reference: MeasureReference, semantic_model: SemanticModel) -> None:
def _validate_measure(
input_measure: MetricInputMeasure, semantic_model: SemanticModel, is_base_measure: bool = True
) -> None:
measure = None
for model_measure in semantic_model.measures:
if model_measure.reference == measure_reference:
if model_measure.reference == input_measure.measure_reference:
measure = model_measure
break

Expand All @@ -519,17 +522,31 @@ def _validate_measure(measure_reference: MeasureReference, semantic_model: Seman
)
)

if input_measure.filter is not None and not is_base_measure:
# Filters for conversion measure input are not fully supported.
issues.append(
ValidationWarning(
context=MetricContext(
file_context=FileContext.from_metadata(metadata=metric.metadata),
metric=MetricModelReference(metric_name=metric.name),
),
message=f"Measure input {measure.name} has a filter. For conversion metrics,"
" filtering on a conversion input measure is not fully supported yet.",
)
)

conversion_type_params = metric.type_params.conversion_type_params
assert (
conversion_type_params is not None
), "For a conversion metric, type_params.conversion_type_params must exist."
_validate_measure(
measure_reference=conversion_type_params.base_measure.measure_reference,
input_measure=conversion_type_params.base_measure,
semantic_model=base_semantic_model,
)
_validate_measure(
measure_reference=conversion_type_params.conversion_measure.measure_reference,
input_measure=conversion_type_params.conversion_measure,
semantic_model=conversion_semantic_model,
is_base_measure=False,
)
return issues

Expand Down
98 changes: 58 additions & 40 deletions tests/validations/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from copy import deepcopy
from typing import List, Tuple

import pytest

Expand Down Expand Up @@ -54,10 +55,21 @@
)
from dbt_semantic_interfaces.validations.validator_helpers import (
SemanticManifestValidationException,
ValidationIssue,
)
from tests.example_project_configuration import EXAMPLE_PROJECT_CONFIGURATION


def check_error_in_issues(error_substrings: List[str], issues: Tuple[ValidationIssue, ...]) -> None:
"""Check error substrings in build issues."""
missing_error_strings = set()
for expected_str in error_substrings:
if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in issues):
missing_error_strings.add(expected_str)
assert len(missing_error_strings) == 0, "Failed to match one or more expected issues: "
f"{missing_error_strings} in {set([x.as_readable_str() for x in issues])}"


def test_metric_no_time_dim_dim_only_source() -> None: # noqa:D
dim_name = "country"
dim2_name = "ename"
Expand Down Expand Up @@ -328,14 +340,7 @@ def test_derived_metric() -> None: # noqa: D
"No input metrics found for derived metric",
"No `expr` set for derived metric",
]
missing_error_strings = set()
for expected_str in expected_substrings:
if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in build_issues):
missing_error_strings.add(expected_str)
assert len(missing_error_strings) == 0, (
f"Failed to match one or more expected errors: {missing_error_strings} in "
f"{set([x.as_readable_str() for x in build_issues])}"
)
check_error_in_issues(error_substrings=expected_substrings, issues=build_issues)


def test_where_filter_validations_happy( # noqa: D
Expand Down Expand Up @@ -578,24 +583,43 @@ def test_conversion_metrics() -> None: # noqa: D
)
),
),
metric_with_guaranteed_meta(
name="filter_on_conversion_measure",
type=MetricType.CONVERSION,
type_params=PydanticMetricTypeParams(
conversion_type_params=PydanticConversionTypeParams(
base_measure=PydanticMetricInputMeasure(name=base_measure_name),
conversion_measure=PydanticMetricInputMeasure(
name=conversion_measure_name,
filter=PydanticWhereFilterIntersection(
where_filters=[
PydanticWhereFilter(where_sql_template="""{{ dimension('some_bool') }}""")
]
),
),
window=window,
entity=entity,
)
),
),
],
project_configuration=EXAMPLE_PROJECT_CONFIGURATION,
)
)

build_issues = result.errors
assert len(build_issues) == 5
expected_substr1 = f"{invalid_entity} not found in base semantic model"
expected_substr2 = f"{invalid_entity} not found in conversion semantic model"
expected_substr3 = "the measure must be COUNT/SUM(1)/COUNT_DISTINCT"
expected_substr4 = "The provided constant property: bad_dim, cannot be found"
expected_substr5 = "The provided constant property: bad_dim2, cannot be found"
missing_error_strings = set()
for expected_str in [expected_substr1, expected_substr2, expected_substr3, expected_substr4, expected_substr5]:
if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in build_issues):
missing_error_strings.add(expected_str)
assert len(missing_error_strings) == 0, "Failed to match one or more expected errors: "
f"{missing_error_strings} in {set([x.as_readable_str() for x in build_issues])}"
build_issues = result.all_issues
assert len(result.errors) == 5
assert len(result.warnings) == 1

expected_substrings = [
f"{invalid_entity} not found in base semantic model",
f"{invalid_entity} not found in conversion semantic model",
"the measure must be COUNT/SUM(1)/COUNT_DISTINCT",
"The provided constant property: bad_dim, cannot be found",
"The provided constant property: bad_dim2, cannot be found",
"filtering on a conversion input measure is not fully supported yet",
]
check_error_in_issues(error_substrings=expected_substrings, issues=build_issues)


def test_cumulative_metrics() -> None: # noqa: D
Expand Down Expand Up @@ -715,17 +739,14 @@ def test_cumulative_metrics() -> None: # noqa: D

build_issues = validation_results.all_issues
assert len(build_issues) == 4
expected_substr1 = "Both window and grain_to_date set for cumulative metric. Please set one or the other."
expected_substr2 = "Got differing values for `window`"
expected_substr3 = "Got differing values for `grain_to_date`"
expected_substr4 = "Cumulative fields `type_params.window` and `type_params.grain_to_date` have been moved"
expected_substr5 = str(sorted({"what_a_metric", "dis_bad", "woooooo", "metric1", "metric2"}))
missing_error_strings = set()
for expected_str in [expected_substr1, expected_substr2, expected_substr3, expected_substr4, expected_substr5]:
if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in build_issues):
missing_error_strings.add(expected_str)
assert len(missing_error_strings) == 0, "Failed to match one or more expected issues: "
f"{missing_error_strings} in {set([x.as_readable_str() for x in build_issues])}"
expected_substrings = [
"Both window and grain_to_date set for cumulative metric. Please set one or the other.",
"Got differing values for `window`",
"Got differing values for `grain_to_date`",
"Cumulative fields `type_params.window` and `type_params.grain_to_date` have been moved",
str(sorted({"what_a_metric", "dis_bad", "woooooo", "metric1", "metric2"})),
]
check_error_in_issues(error_substrings=expected_substrings, issues=build_issues)


def test_time_granularity() -> None:
Expand Down Expand Up @@ -829,11 +850,8 @@ def test_time_granularity() -> None:

build_issues = validation_results.all_issues
assert len(build_issues) == 2
expected_substr1 = "`time_granularity` for metric 'month_metric_with_invalid_time_granularity' must be >= MONTH."
expected_substr2 = "`time_granularity` for metric 'derived_metric_with_invalid_time_granularity' must be >= MONTH."
missing_error_strings = set()
for expected_str in [expected_substr1, expected_substr2]:
if not any(actual_str.as_readable_str().find(expected_str) != -1 for actual_str in build_issues):
missing_error_strings.add(expected_str)
assert len(missing_error_strings) == 0, "Failed to match one or more expected issues: "
f"{missing_error_strings} in {set([x.as_readable_str() for x in build_issues])}"
expected_substrings = [
"`time_granularity` for metric 'month_metric_with_invalid_time_granularity' must be >= MONTH.",
"`time_granularity` for metric 'derived_metric_with_invalid_time_granularity' must be >= MONTH.",
]
check_error_in_issues(error_substrings=expected_substrings, issues=build_issues)

0 comments on commit a462190

Please sign in to comment.