diff --git a/README.md b/README.md index 6ab853a..a0c5b31 100644 --- a/README.md +++ b/README.md @@ -52,22 +52,19 @@ With your schemas defined in code, you can now take advantage of the additional 1. Entity validation: Easily create custom validation rules for your Benchling entities. ```python - from liminal.validation import BenchlingValidator, BenchlingValidatorReport, BenchlingReportLevel - from liminal.orm.base_model import BaseModel - - class CookTempValidator(BenchlingValidator): - """Validates that a field value is a valid enum value for a Benchling entity""" - - def validate(self, entity: type[BaseModel]) -> BenchlingValidatorReport: - valid = True - message = None - if entity.cook_time is not None and entity.cook_temp is None: - valid = False - message = "Cook temp is required if cook time is set" - if entity.cook_time is None and entity.cook_temp is not None: - valid = False - message = "Cook time is required if cook temp is set" - return self.create_report(valid, BenchlingReportLevel.MED, entity, message) + from liminal.validation import ValidationSeverity, liminal_validator + + class Pizza(BaseModel, CustomEntityMixin): + ... + + @liminal_validator(ValidationSeverity.MED) + def cook_time_and_temp_validator(self) -> None: + if self.cook_time is not None and self.cook_temp is None: + raise ValueError("Cook temp is required if cook time is set") + if self.cook_time is None and self.cook_temp is not None: + raise ValueError("Cook time is required if cook temp is set") + + validation_reports = Pizza.validate(session) ``` 2. Strongly typed queries: Write type-safe queries using SQLAlchemy to access your Benchling entities. @@ -75,7 +72,6 @@ With your schemas defined in code, you can now take advantage of the additional ```python with BenchlingSession(benchling_connection, with_db=True) as session: pizza = session.query(Pizza).filter(Pizza.name == "Margherita").first() - print(pizza) ``` 3. CI/CD integration: Use Liminal to automatically generate and apply your revision files to your Benchling tenant(s) as part of your CI/CD pipeline. diff --git a/docs/reference/entity-schemas.md b/docs/reference/entity-schemas.md index 9b9618c..61ba9f3 100644 --- a/docs/reference/entity-schemas.md +++ b/docs/reference/entity-schemas.md @@ -193,44 +193,6 @@ multi_relationship(target_class_name: str, current_class_name: str, entity_link_ The `query()` method must be implemented for the entity schema class to define a custom query. This is useful if you want to add additional filtering or joins to the query. -## Validators: [class](https://github.com/dynotx/liminal-orm/blob/main/liminal/validation/__init__.py) - -As seen in the example above, the `get_validators` method is used to define a list of validators for the entity schema. These validators run on entities of the schema that are queried from Benchling's Postgres database. For example: - -```python -pizza_entity = Pizza.query(session).first() - -# Validate a single entity from a query -report = CookTempValidator().validate(pizza_entity) - -# Validate all entities for a schema -reports = Pizza.validate(session) -``` - -The list of validators within `get_validators` are used to run on all entities of the schema. - -The `BenchlingValidator` object is used to define the validator classes, that can be defined with custom logic to validate entities of a schema. Refer to the [Validators](./validators.md) page to learn more about how to define validators. - -## Additional Functionality - -Below is additional functionality that is provided by the Liminal BaseModel class. - -```python -connection = BenchlingConnection(...) -benchling_service = BenchlingService(connection, use_db=True) - -with benchling_service as session: - - # Get all entities for a schema and return a dataframe - df = Pizza.df(session) - - # Validate all entities for a schema and return a list of ValidatorReports - reports = Pizza.validate(session) - - # Validate all entities for a schema and return a dataframe - validated_df = Pizza.validate_to_df(session) -``` - ## Notes - Note that the Entity Schema definition in Liminal does not cover 100% of the properties that can be set through the Benchling website. However, the goal is to have 100% parity! If you find any missing properties that are not covered in the definition or migration service, please open an issue on [Github](https://github.com/dynotx/liminal-orm/issues). In the meantime, you can manually set the properties through the Benchling website. diff --git a/docs/reference/validation.md b/docs/reference/validation.md new file mode 100644 index 0000000..2347dfa --- /dev/null +++ b/docs/reference/validation.md @@ -0,0 +1,141 @@ +When using Benchling to store essential data, it is important to validate the data to ensure it is accurate and consistent. Liminal provides a way to validate entities that follow key business logic by defining validators. Below is an example of a Liminal Validator that validates the cook temp of a pizza. Validators must be defined within the entity schema, and run on data queried from the warehouse. + +## Defining a Liminal Validator [decorator](https://github.com/dynotx/liminal-orm/blob/main/liminal/validation/__init__.py#L61) + +Any function decorated with `liminal_validator` are detected as validators for the entity schema. +Each validator returns a `BenchlingValidatorReport` object per entity it is run on, with either `valid=True` or `valid=False`. + +```python +from liminal.validation import ValidationSeverity, liminal_validator + +class Pizza(BaseModel, CustomEntityMixin): + ... + + @liminal_validator(ValidationSeverity.MED) + def cook_time_and_temp_validator(self) -> None: + if self.cook_time is not None and self.cook_temp is None: + raise ValueError("Cook temp is required if cook time is set") + if self.cook_time is None and self.cook_temp is not None: + raise ValueError("Cook time is required if cook temp is set") +``` + +### Parameters + +**validator_level: ValidationSeverity** + +> The severity of the validator. Defaults to `ValidationSeverity.LOW`. + +**validator_name: str | None** + +> The name of the validator. Defaults to the pascalized name of the function. + +## BenchlingValidatorReport: [class](https://github.com/dynotx/liminal-orm/blob/main/liminal/validation/__init__.py#L13) + +### Parameters + +**valid : bool** + +> Indicates whether the validation passed or failed. + +**model : str** + +> The name of the model being validated. (eg: Pizza) + +**level : ValidationSeverity** + +> The severity level of the validation report. + +**validator_name : str | None** + +> The name of the validator that generated this report. (eg: CookTimeAndTempValidator) + +**entity_id : str | None** + +> The entity ID of the entity being validated. + +**registry_id : str | None** + +> The registry ID of the entity being validated. + +**entity_name : str | None** + +> The name of the entity being validated. + +**message : str | None** + +> A message describing the result of the validation. + +**creator_name : str | None** + +> The name of the creator of the entity being validated. + +**creator_email : str | None** + +> The email of the creator of the entity being validated. + +**updated_date : datetime | None** + +> The date the entity was last updated. + +## Running Validation + +To run validation using Liminal, you can call the `validate()` method on the entity schema. + +```python +with BenchlingSession(benchling_connection, with_db=True) as session: + reports = Pizza.validate(session) +``` + +### Parameters + +**session : Session** + +> The Benchling database session. + +**base_filters: BaseValidatorFilters | None** + +> Filters to apply to the query. + +**only_invalid: bool** + +> If True, only returns reports for entities that failed validation. + +### Returns + +**list[BenchlingValidatorReport]** + +> List of reports from running all validators on all entities returned from the query. + +!!! note + The `validate_to_df` method returns a pandas dataframe with all the reports. + +## BaseValidatorFilters: [class](https://github.com/dynotx/liminal-orm/blob/main/liminal/base/base_validation_filters.py) + +This class is used to pass base filters to benchling warehouse database queries. +These columns are found on all tables in the benchling warehouse database. + +### Parameters + +**created_date_start: date | None** + +> Start date for created date filter. + +**created_date_end: date | None** + +> End date for created date filter. + +**updated_date_start: date | None** + +> Start date for updated date filter. + +**updated_date_end: date | None** + +> End date for updated date filter. + +**entity_ids: list[str] | None** + +> List of entity IDs to filter by. + +**creator_full_names: list[str] | None** + +> List of creator full names to filter by. diff --git a/docs/reference/validators.md b/docs/reference/validators.md deleted file mode 100644 index ed523ce..0000000 --- a/docs/reference/validators.md +++ /dev/null @@ -1,24 +0,0 @@ -## Benchling Base Validator: [class](https://github.com/dynotx/liminal-orm/blob/main/liminal/validation/__init__.py) - -Below is an example of a Benchling Validator defined in Liminal for validating the cook temp of a pizza. - -```python -from liminal.validation import BenchlingValidator, BenchlingValidatorReport, BenchlingReportLevel -from liminal.orm.base_model import BaseModel - -class CookTempValidator(BenchlingValidator): - """Validates that a field value is a valid enum value for a Benchling entity""" - - def validate(self, entity: type[BaseModel]) -> BenchlingValidatorReport: - valid = True - message = None - if entity.cook_time is not None and entity.cook_temp is None: - valid = False - message = "Cook temp is required if cook time is set" - if entity.cook_time is None and entity.cook_temp is not None: - valid = False - message = "Cook time is required if cook temp is set" - return self.create_report(valid, BenchlingReportLevel.MED, entity, message) -``` - -A `validate(entity)` function is required to be defined in the BenchlingValidator subclass. This function should contain the logic to validate the entity. The function should return a `BenchlingValidatorReport` object, which can be easily created using the `create_report` method. diff --git a/liminal/base/base_validation_filters.py b/liminal/base/base_validation_filters.py index ab554be..fd9d1be 100644 --- a/liminal/base/base_validation_filters.py +++ b/liminal/base/base_validation_filters.py @@ -7,6 +7,21 @@ class BaseValidatorFilters(BaseModel): """ This class is used to pass base filters to benchling warehouse database queries. These columns are found on all tables in the benchling warehouse database. + + Parameters + ---------- + created_date_start: date | None + Start date for created date filter. + created_date_end: date | None + End date for created date filter. + updated_date_start: date | None + Start date for updated date filter. + updated_date_end: date | None + End date for updated date filter. + entity_ids: list[str] | None + List of entity IDs to filter by. + creator_full_names: list[str] | None + List of creator full names to filter by. """ created_date_start: date | None = None diff --git a/liminal/base/properties/base_field_properties.py b/liminal/base/properties/base_field_properties.py index e5b8b14..0dd3aee 100644 --- a/liminal/base/properties/base_field_properties.py +++ b/liminal/base/properties/base_field_properties.py @@ -65,7 +65,7 @@ def set_warehouse_name(self, wh_name: str) -> BaseFieldProperties: self.warehouse_name = wh_name return self - def validate_column(self, wh_name: str) -> bool: + def validate_column_definition(self, wh_name: str) -> bool: """If the Field Properties are meant to represent a column in Benchling, this will validate the properties and ensure that the entity_link and dropdowns are valid names that exist in our code. """ diff --git a/liminal/entity_schemas/compare.py b/liminal/entity_schemas/compare.py index fda6cb2..a97d695 100644 --- a/liminal/entity_schemas/compare.py +++ b/liminal/entity_schemas/compare.py @@ -63,7 +63,7 @@ def compare_entity_schemas( exclude_base_columns=True ) # Validate the entity_link and dropdown_link reference an entity_schema or dropdown that exists in code. - model.validate_model() + model.validate_model_definition() # if the model table_name is found in the benchling schemas, check for changes... if (model_wh_name := model.__schema_properties__.warehouse_name) in [ s.warehouse_name for s, _, _ in benchling_schemas diff --git a/liminal/entity_schemas/generate_files.py b/liminal/entity_schemas/generate_files.py index f510f60..efbe8ce 100644 --- a/liminal/entity_schemas/generate_files.py +++ b/liminal/entity_schemas/generate_files.py @@ -79,7 +79,6 @@ def generate_all_entity_schema_files( "from liminal.orm.base_model import BaseModel", "from liminal.orm.schema_properties import SchemaProperties", "from liminal.enums import BenchlingEntityType, BenchlingFieldType, BenchlingNamingStrategy", - "from liminal.validation import BenchlingValidator", f"from liminal.orm.mixins import {get_entity_mixin(schema_properties.entity_type)}", ] init_strings = [f"{tab}def __init__(", f"{tab}self,"] @@ -151,11 +150,7 @@ def generate_all_entity_schema_files( relationship_string = "\n".join(relationship_strings) import_string = "\n".join(list(set(import_strings))) init_string = f"\n{tab}".join(init_strings) if len(columns) > 0 else "" - functions_string = """ - def get_validators(self) -> list[BenchlingValidator]: - return []""" - - content = f"""{import_string} + full_content = f"""{import_string} class {classname}(BaseModel, {get_entity_mixin(schema_properties.entity_type)}): @@ -168,7 +163,6 @@ class {classname}(BaseModel, {get_entity_mixin(schema_properties.entity_type)}): {init_string} -{functions_string} """ write_directory_path = write_path / get_file_subdirectory( schema_properties.entity_type @@ -181,7 +175,7 @@ class {classname}(BaseModel, {get_entity_mixin(schema_properties.entity_type)}): ) write_directory_path.mkdir(exist_ok=True) with open(write_directory_path / filename, "w") as file: - file.write(content) + file.write(full_content) for subdir, names in subdirectory_map.items(): init_content = ( diff --git a/liminal/enums/__init__.py b/liminal/enums/__init__.py index 02310f9..57e333d 100644 --- a/liminal/enums/__init__.py +++ b/liminal/enums/__init__.py @@ -4,5 +4,4 @@ from liminal.enums.benchling_field_type import BenchlingFieldType from liminal.enums.benchling_folder_item_type import BenchlingFolderItemType from liminal.enums.benchling_naming_strategy import BenchlingNamingStrategy -from liminal.enums.benchling_report_level import BenchlingReportLevel from liminal.enums.benchling_sequence_type import BenchlingSequenceType diff --git a/liminal/external/__init__.py b/liminal/external/__init__.py index df39b25..d8861fb 100644 --- a/liminal/external/__init__.py +++ b/liminal/external/__init__.py @@ -40,6 +40,5 @@ BenchlingFieldType, BenchlingFolderItemType, BenchlingNamingStrategy, - BenchlingReportLevel, BenchlingSequenceType, ) diff --git a/liminal/orm/base_model.py b/liminal/orm/base_model.py index c2f62b8..b9e3d0e 100644 --- a/liminal/orm/base_model.py +++ b/liminal/orm/base_model.py @@ -1,7 +1,8 @@ from __future__ import annotations +import inspect import logging -from abc import abstractmethod +from types import FunctionType from typing import TYPE_CHECKING, Any, Generic, Type, TypeVar # noqa: UP035 import pandas as pd # type: ignore @@ -17,10 +18,7 @@ from liminal.orm.base_tables.user import User from liminal.orm.name_template import NameTemplate from liminal.orm.schema_properties import SchemaProperties -from liminal.validation import ( - BenchlingValidator, - BenchlingValidatorReport, -) +from liminal.validation import BenchlingValidatorReport if TYPE_CHECKING: from liminal.orm.column import Column @@ -170,7 +168,7 @@ def get_columns_dict( return {c.name: c for c in columns} @classmethod - def validate_model(cls) -> bool: + def validate_model_definition(cls) -> bool: model_columns = cls.get_columns_dict(exclude_base_columns=True) properties = {n: c.properties for n, c in model_columns.items()} errors = [] @@ -185,7 +183,7 @@ def validate_model(cls) -> bool: ) for wh_name, field in properties.items(): try: - field.validate_column(wh_name) + field.validate_column_definition(wh_name) except ValueError as e: errors.append(str(e)) if errors: @@ -273,25 +271,33 @@ def query(cls, session: Session) -> Query: """ return session.query(cls) - @abstractmethod - def get_validators(self) -> list[BenchlingValidator]: - """Abstract method that all subclasses must implement. Each subclass will have a differently defined list of - validators to validate the entity. These validators will be run on each entity returned from the query. - """ - raise NotImplementedError + @classmethod + def get_validators(cls) -> list[FunctionType]: + """Returns a list of all validators defined on the class. Validators are functions that are decorated with @validator.""" + validators = [] + for name, method in inspect.getmembers(cls, predicate=inspect.isfunction): + if hasattr(method, "_is_liminal_validator"): + validators.append(method) + return validators @classmethod def validate( - cls, session: Session, base_filters: BaseValidatorFilters | None = None + cls, + session: Session, + base_filters: BaseValidatorFilters | None = None, + only_invalid: bool = False, ) -> list[BenchlingValidatorReport]: """Runs all validators for all entities returned from the query and returns a list of reports. + This returns a report for each entity, validator pair, regardless of whether the validation passed or failed. Parameters ---------- session : Session Benchling database session. - base_filters: BenchlingBaseValidatorFilters + base_filters: BaseValidatorFilters Filters to apply to the query. + only_invalid: bool + If True, only returns reports for entities that failed validation. Returns ------- @@ -303,15 +309,21 @@ def validate( cls.query(session), base_filters=base_filters ).all() logger.info(f"Validating {len(table)} entities for {cls.__name__}...") + validator_functions = cls.get_validators() for entity in table: - for validator in entity.get_validators(): - report: BenchlingValidatorReport = validator.validate(entity) + for validator_func in validator_functions: + report: BenchlingValidatorReport = validator_func(entity) + if only_invalid and report.valid: + continue results.append(report) return results @classmethod def validate_to_df( - cls, session: Session, base_filters: BaseValidatorFilters | None = None + cls, + session: Session, + base_filters: BaseValidatorFilters | None = None, + only_invalid: bool = False, ) -> pd.DataFrame: """Runs all validators for all entities returned from the query and returns reports as a pandas dataframe. @@ -319,7 +331,7 @@ def validate_to_df( ---------- session : Session Benchling database session. - base_filters: BenchlingBaseValidatorFilters + base_filters: BaseValidatorFilters Filters to apply to the query. Returns @@ -327,5 +339,5 @@ def validate_to_df( pd.Dataframe Dataframe of reports from running all validators on all entities returned from the query. """ - results = cls.validate(session, base_filters) + results = cls.validate(session, base_filters, only_invalid) return pd.DataFrame([r.model_dump() for r in results]) diff --git a/liminal/tests/conftest.py b/liminal/tests/conftest.py index 5ba9aba..92a558c 100644 --- a/liminal/tests/conftest.py +++ b/liminal/tests/conftest.py @@ -429,7 +429,7 @@ def __init__( self.datetime_field = datetime_field self.list_dropdown_field = list_dropdown_field - return [MockEntity] # type: ignore[type-abstract] + return [MockEntity] @pytest.fixture @@ -446,7 +446,7 @@ class MockEntitySmall(BaseModel): name="String Field Required", type=Type.TEXT, required=True, is_multi=False ) - return [MockEntitySmall] # type: ignore[type-abstract] + return [MockEntitySmall] @pytest.fixture @@ -539,4 +539,4 @@ def __init__( self.datetime_field = datetime_field self.list_dropdown_field = list_dropdown_field - return [MockEntityOne, MockEntityTwo] # type: ignore[type-abstract] + return [MockEntityOne, MockEntityTwo] diff --git a/liminal/validation/__init__.py b/liminal/validation/__init__.py index 71556d1..f30735c 100644 --- a/liminal/validation/__init__.py +++ b/liminal/validation/__init__.py @@ -1,13 +1,15 @@ -from abc import ABC, abstractmethod +import inspect from datetime import datetime -from typing import TYPE_CHECKING, Any +from functools import wraps +from typing import TYPE_CHECKING, Callable from pydantic import BaseModel, ConfigDict -from liminal.enums import BenchlingReportLevel +from liminal.utils import pascalize +from liminal.validation.validation_severity import ValidationSeverity if TYPE_CHECKING: - from liminal.orm.base_model import BaseModel as BaseModelBenchling + from liminal.orm.base_model import BaseModel as BenchlingBaseModel class BenchlingValidatorReport(BaseModel): @@ -20,7 +22,7 @@ class BenchlingValidatorReport(BaseModel): Indicates whether the validation passed or failed. model : str The name of the model being validated. (eg: NGSSample) - level : BenchlingReportLevel + level : ValidationSeverity The severity level of the validation report. validator_name : str | None The name of the validator that generated this report. (eg: BioContextValidator) @@ -44,7 +46,7 @@ class BenchlingValidatorReport(BaseModel): valid: bool model: str - level: BenchlingReportLevel + level: ValidationSeverity validator_name: str | None = None entity_id: str | None = None registry_id: str | None = None @@ -57,89 +59,35 @@ class BenchlingValidatorReport(BaseModel): model_config = ConfigDict(extra="allow") - -class BenchlingValidator(ABC): - """Base class for benchling validators.""" - - def __str__(self) -> str: - return self.__class__.__name__ + "()" - - def _prefix(self) -> str: - """Creates a prefix for the formatted error message which includes the class name and any instance variables. - Ex: "BenchlingValidator(field_name=sample_code, field_value=123):" - """ - prefix = f"{self.__class__.__name__}" - if vars(self): - prefix += "(" - for key, val in vars(self).items(): - prefix += f"{key}={self.truncate_msg(val, max_len=50)}, " - prefix = prefix[:-2] + "):" - else: - prefix += ":" - return prefix - - @abstractmethod - def validate(self, entity: type["BaseModelBenchling"]) -> BenchlingValidatorReport: - """Abstract method that all validator subclass must implement. Each subclass will have a differently defined validation - function that runs on the given benchling entity. - - Parameters - ---------- - entity : type["BaseModelBenchling"] - The Benchling entity to validate. - - Returns - ------- - BenchlingValidatorReport - A report indicating whether the validation passed or failed, and any additional metadata. - """ - raise NotImplementedError - - def __getattribute__(self, name: str) -> Any: - attr = super().__getattribute__(name) - if name == "validate": - # Wrap the validate method in a try-except block to catch any unexpected errors that occur during validation. - # If an unexpected error occurs, return a BenchlingValidatorReport with the unexpected error message. - def try_except_wrapped_func( - *args: Any, **kwargs: dict - ) -> BenchlingValidatorReport: - try: - return attr(*args, **kwargs) - except Exception as e: - entity: type[BaseModelBenchling] = args[0] - return BenchlingValidatorReport( - valid=False, - model=entity.__class__.__name__, - validator_name=self.__class__.__name__, - level=BenchlingReportLevel.UNEXPECTED, - entity_id=entity.id, - registry_id=entity.file_registry_id, - entity_name=entity.name, - web_url=entity.url if entity.url else None, - creator_name=entity.creator.name if entity.creator else None, - creator_email=entity.creator.email if entity.creator else None, - updated_date=entity.modified_at, - message=f"Unexpected exception: {e}", - ) - - return try_except_wrapped_func - return attr - @classmethod - def create_report( + def create_validation_report( cls, valid: bool, - level: BenchlingReportLevel, - entity: type["BaseModelBenchling"], + level: ValidationSeverity, + entity: type["BenchlingBaseModel"], + validator_name: str, message: str | None = None, - **kwargs: Any, - ) -> BenchlingValidatorReport: - """Creates a BenchlingValidatorReport with the given parameters.""" - return BenchlingValidatorReport( + ) -> "BenchlingValidatorReport": + """Creates a BenchlingValidatorReport with the given parameters. + + Parameters + ---------- + valid: bool + Indicates whether the validation passed or failed. + level: ValidationSeverity + The severity level of the validation report. + entity: type[BenchlingBaseModel] + The entity being validated. + validator_name: str + The name of the validator that generated this report. + message: str | None + A message describing the result of the validation. + """ + return cls( valid=valid, level=level, model=entity.__class__.__name__, - validator_name=cls.__name__, + validator_name=validator_name, entity_id=entity.id, registry_id=entity.file_registry_id, entity_name=entity.name, @@ -148,31 +96,59 @@ def create_report( creator_email=entity.creator.email if entity.creator else None, updated_date=entity.modified_at, message=message, - **kwargs, ) - def format_err(self, *msgs: str | None) -> str: - """Creates a formatted error message from the given messages. The first message is prefixed with the class name and any instance variables. - Ex: "BenchlingValidator(field_name=sample_code, field_value=123): The field value is invalid | The field value is too long" - """ - ret_val = "" - for ind, msg in enumerate(msgs): - if ind == 0: - if (msg is None) or (msg == ""): - continue - elif not msg.startswith(self._prefix()): - ret_val += f"{self._prefix()} {msg}" - else: - ret_val += f"{msg}" - elif ((msgs[0] is None) or (msgs[0] == "")) and (ind == 1): - ret_val += f"{self._prefix()} {msg}" - else: - ret_val += f" | {msg}" - return ret_val - - def truncate_msg(self, msg: Any, max_len: int = 150) -> str: - """Shortens the given message to the given max length. If the message is longer than the max length, it is truncated and an ellipsis is added to the end.""" - msg = str(msg) - if len(msg) > max_len: - return f"{msg[:max_len]}..." - return msg + +def liminal_validator( + validator_level: ValidationSeverity = ValidationSeverity.LOW, + validator_name: str | None = None, +) -> Callable: + """A decorator that validates a function that takes a Benchling entity as an argument and returns None. + + Parameters: + validator_level: ValidationSeverity + The level of the validator. + validator_name: str | None + The name of the validator. Defaults to the pascalized name of the function. + """ + + def decorator(func: Callable[[type["BenchlingBaseModel"]], None]) -> Callable: + """Decorator that validates a function that takes a Benchling entity as an argument and returns None.""" + sig = inspect.signature(func) + params = list(sig.parameters.values()) + if not params or params[0].name != "self" or len(params) > 1: + raise TypeError( + "Validator must defined in a schema class, where the only argument to this validator must be 'self'." + ) + + if sig.return_annotation is not None: + raise TypeError("The return type must be None.") + + nonlocal validator_name + if validator_name is None: + validator_name = pascalize(func.__name__) + + @wraps(func) + def wrapper(self: type["BenchlingBaseModel"]) -> BenchlingValidatorReport: + """Wrapper that runs the validator function and returns a BenchlingValidatorReport.""" + try: + func(self) + except Exception as e: + return BenchlingValidatorReport.create_validation_report( + valid=False, + level=validator_level, + entity=self, + validator_name=validator_name, + message=str(e), + ) + return BenchlingValidatorReport.create_validation_report( + valid=True, + level=validator_level, + entity=self, + validator_name=validator_name, + ) + + setattr(wrapper, "_is_liminal_validator", True) + return wrapper + + return decorator diff --git a/liminal/enums/benchling_report_level.py b/liminal/validation/validation_severity.py similarity index 71% rename from liminal/enums/benchling_report_level.py rename to liminal/validation/validation_severity.py index b1bae23..9329cbe 100644 --- a/liminal/enums/benchling_report_level.py +++ b/liminal/validation/validation_severity.py @@ -1,8 +1,8 @@ from liminal.base.str_enum import StrEnum -class BenchlingReportLevel(StrEnum): - """This enum represents the different levels of validation that can be returned by Benchling.""" +class ValidationSeverity(StrEnum): + """This enum represents the different levels of validation that can be returned by Liminal.""" LOW = "LOW" MED = "MED" diff --git a/mkdocs.yml b/mkdocs.yml index f7923a6..ef82ca9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -14,7 +14,7 @@ nav: - reference/benchling-session.md - reference/dropdowns.md - reference/entity-schemas.md - - reference/validators.md + - reference/validation.md - reference/revision-file.md - reference/operations.md - Case Studies: