From 65446a788f8556e9aae612ab3b670e58c7a5138c Mon Sep 17 00:00:00 2001 From: srh-sloan Date: Fri, 12 Apr 2024 08:57:27 +0100 Subject: [PATCH] fs-3530 removing eoi from decision naming (#135) * fs-3530 removing eoi from decision naming * Update version to 2.0.45 --------- Co-authored-by: FSD Github Actions --- fsd_utils/__init__.py | 8 +- fsd_utils/{eoi => decision}/__init__.py | 0 .../evaluate_response_against_schema.py} | 22 ++-- pyproject.toml | 2 +- tests/{test_eoi.py => test_decisions.py} | 110 +++++++++--------- 5 files changed, 72 insertions(+), 70 deletions(-) rename fsd_utils/{eoi => decision}/__init__.py (100%) rename fsd_utils/{eoi/evaluate_eoi_response.py => decision/evaluate_response_against_schema.py} (86%) rename tests/{test_eoi.py => test_decisions.py} (76%) diff --git a/fsd_utils/__init__.py b/fsd_utils/__init__.py index 9ee9616..12a5f1f 100644 --- a/fsd_utils/__init__.py +++ b/fsd_utils/__init__.py @@ -7,8 +7,8 @@ from fsd_utils.config.commonconfig import CommonConfig # noqa from fsd_utils.config.configclass import configclass # noqa from fsd_utils.config.notify_constants import NotifyConstants # noqa -from fsd_utils.eoi.evaluate_eoi_response import Eoi_Decision -from fsd_utils.eoi.evaluate_eoi_response import evaluate_eoi_response +from fsd_utils.decision.evaluate_response_against_schema import Decision +from fsd_utils.decision.evaluate_response_against_schema import evaluate_response from fsd_utils.locale_selector.set_lang import LanguageSelector from fsd_utils.mapping.application.application_utils import generate_text_of_application from fsd_utils.mapping.application.qa_mapping import ( @@ -34,7 +34,7 @@ toggles, generate_text_of_application, extract_questions_and_answers, - evaluate_eoi_response, - Eoi_Decision, + evaluate_response, + Decision, services, ] diff --git a/fsd_utils/eoi/__init__.py b/fsd_utils/decision/__init__.py similarity index 100% rename from fsd_utils/eoi/__init__.py rename to fsd_utils/decision/__init__.py diff --git a/fsd_utils/eoi/evaluate_eoi_response.py b/fsd_utils/decision/evaluate_response_against_schema.py similarity index 86% rename from fsd_utils/eoi/evaluate_eoi_response.py rename to fsd_utils/decision/evaluate_response_against_schema.py index 8235ed5..fde9803 100644 --- a/fsd_utils/eoi/evaluate_eoi_response.py +++ b/fsd_utils/decision/evaluate_response_against_schema.py @@ -1,7 +1,7 @@ from enum import IntEnum -class Eoi_Decision(IntEnum): +class Decision(IntEnum): PASS = 0 PASS_WITH_CAVEATS = 1 FAIL = 2 @@ -12,7 +12,7 @@ class Eoi_Decision(IntEnum): def _evaluate_with_supplied_operators( conditions_to_evaluate: list, supplied_answer: any -) -> tuple[Eoi_Decision, list]: +) -> tuple[Decision, list]: """Evaluates an expression built from the operator in the schmea, the value to compare, and the supplied answer. Uses the result of the evaluation to return a decision and applicable caveats Casts the value to an integer for comparison @@ -26,9 +26,9 @@ def _evaluate_with_supplied_operators( cannot be converted to a float Returns: - tuple[Eoi_Decision, list]: Tuple of the decision and the caveats (if there are any) + tuple[Decision, list]: Tuple of the decision and the caveats (if there are any) """ - decision = Eoi_Decision.PASS + decision = Decision.PASS caveats = [] for ec in conditions_to_evaluate: # validate supplied operator @@ -54,14 +54,14 @@ def _evaluate_with_supplied_operators( if ec["caveat"]: caveats.append(ec["caveat"]) - if decision == Eoi_Decision.FAIL: + if decision == Decision.FAIL: return decision, [] # don't return caveats for failure else: return decision, caveats -def evaluate_eoi_response(schema: dict, forms: dict) -> dict: - """Takes in an EOI schema and a set of forms containing responses, then makes a decision on the EOI outcome +def evaluate_response(schema: dict, forms: dict) -> dict: + """Takes in a decision schema and a set of forms containing responses, then makes a decision on the outcome Args: schema (dict): Schema defining decisions based on answers @@ -69,11 +69,11 @@ def evaluate_eoi_response(schema: dict, forms: dict) -> dict: Returns: dict: Results of decision: - decision: value of Eoi_Decision ENUM + decision: value of Decision ENUM caveats: list of strings of caveats for answers if decision is 'Pass with caveats', otherwise empty list """ - result = {"decision": Eoi_Decision.PASS, "caveats": []} + result = {"decision": Decision.PASS, "caveats": []} # Loop through every form, then every response in that form for form in forms: @@ -112,7 +112,7 @@ def evaluate_eoi_response(schema: dict, forms: dict) -> dict: result["caveats"] += caveats # If we failed on this question, we don't need to evaluate any further, just return a fail - if result["decision"] == Eoi_Decision.FAIL: - return {"decision": Eoi_Decision.FAIL, "caveats": []} + if result["decision"] == Decision.FAIL: + return {"decision": Decision.FAIL, "caveats": []} return result diff --git a/pyproject.toml b/pyproject.toml index 4fc818b..7a045eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "funding-service-design-utils" -version = "2.0.44" +version = "2.0.45" authors = [ { name="DLUHC", email="FundingServiceDesignTeam@levellingup.gov.uk" }, diff --git a/tests/test_eoi.py b/tests/test_decisions.py similarity index 76% rename from tests/test_eoi.py rename to tests/test_decisions.py index f26667f..27352b2 100644 --- a/tests/test_eoi.py +++ b/tests/test_decisions.py @@ -1,81 +1,83 @@ from copy import deepcopy import pytest -from fsd_utils import Eoi_Decision -from fsd_utils import evaluate_eoi_response -from fsd_utils.eoi.evaluate_eoi_response import _evaluate_with_supplied_operators +from fsd_utils import Decision +from fsd_utils import evaluate_response +from fsd_utils.decision.evaluate_response_against_schema import ( + _evaluate_with_supplied_operators, +) TEST_SCHEMA_1 = { "aaa111": [ - {"answerValue": "charity", "result": Eoi_Decision.PASS, "caveat": None}, + {"answerValue": "charity", "result": Decision.PASS, "caveat": None}, { "answerValue": "parish_council", - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, }, { "answerValue": "plc", - "result": Eoi_Decision.FAIL, + "result": Decision.FAIL, "caveat": None, }, ], "bbb222": [ - {"answerValue": True, "result": Eoi_Decision.PASS, "caveat": None}, - {"answerValue": False, "result": Eoi_Decision.FAIL, "caveat": None}, + {"answerValue": True, "result": Decision.PASS, "caveat": None}, + {"answerValue": False, "result": Decision.FAIL, "caveat": None}, ], "ccc333": [ - {"answerValue": "10", "result": Eoi_Decision.PASS, "caveat": None}, + {"answerValue": "10", "result": Decision.PASS, "caveat": None}, { "answerValue": "15", - "result": Eoi_Decision.PASS_WITH_CAVEATS, + "result": Decision.PASS_WITH_CAVEATS, "caveat": "Try not to cut down trees: This is a bit high", }, - {"answerValue": "20", "result": Eoi_Decision.FAIL, "caveat": None}, + {"answerValue": "20", "result": Decision.FAIL, "caveat": None}, ], "eee555": [ - {"answerValue": "a", "result": Eoi_Decision.PASS, "caveat": None}, + {"answerValue": "a", "result": Decision.PASS, "caveat": None}, { "answerValue": "b", - "result": Eoi_Decision.PASS_WITH_CAVEATS, + "result": Decision.PASS_WITH_CAVEATS, "caveat": "Caveat heading: some caveat text", }, - {"answerValue": "c", "result": Eoi_Decision.FAIL, "caveat": None}, + {"answerValue": "c", "result": Decision.FAIL, "caveat": None}, ], "fff666": [ { "operator": "<=", "compareValue": 4, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, }, { "operator": ">=", "compareValue": 5, - "result": Eoi_Decision.PASS_WITH_CAVEATS, + "result": Decision.PASS_WITH_CAVEATS, "caveat": "A caveat: Try and reduce this", }, { "operator": ">=", "compareValue": 10, - "result": Eoi_Decision.FAIL, + "result": Decision.FAIL, "caveat": None, }, ], # add a deliberatly weird set of conditions "ggg777": [ - {"answerValue": "10", "result": Eoi_Decision.PASS, "caveat": None}, - {"answerValue": "7", "result": Eoi_Decision.PASS, "caveat": None}, + {"answerValue": "10", "result": Decision.PASS, "caveat": None}, + {"answerValue": "7", "result": Decision.PASS, "caveat": None}, { "operator": "<", "compareValue": 10, - "result": Eoi_Decision.PASS_WITH_CAVEATS, + "result": Decision.PASS_WITH_CAVEATS, "caveat": "A caveat", }, { "operator": ">", "compareValue": 10, - "result": Eoi_Decision.FAIL, + "result": Decision.FAIL, "caveat": None, }, ], @@ -166,31 +168,31 @@ ( # All pass (no changes) {}, TEST_SCHEMA_1, - Eoi_Decision.PASS, + Decision.PASS, [], ), ( # All pass but one fail {"bbb222": False}, TEST_SCHEMA_1, - Eoi_Decision.FAIL, + Decision.FAIL, [], ), ( # One pass with caveats, one fail {"ccc333": "15", "bbb222": False}, TEST_SCHEMA_1, - Eoi_Decision.FAIL, + Decision.FAIL, [], ), ( # All pass but one pass with caveats {"ccc333": "15"}, TEST_SCHEMA_1, - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, ["Try not to cut down trees: This is a bit high"], ), ( # Most pass but 2 pass with caveats - one number, one string, both value based {"eee555": "b", "ccc333": "15"}, TEST_SCHEMA_1, - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, [ "Try not to cut down trees: This is a bit high", "Caveat heading: some caveat text", @@ -199,7 +201,7 @@ ( # Most pass but 2 pass with caveats - one number - operator based, one string {"fff666": "6", "eee555": "b"}, TEST_SCHEMA_1, - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, [ "Caveat heading: some caveat text", "A caveat: Try and reduce this", @@ -208,7 +210,7 @@ ( # Most pass, one pass with caveats based on operator with contradicting value condition {"ggg777": "7"}, TEST_SCHEMA_1, - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, [ "A caveat", ], @@ -216,7 +218,7 @@ ( # Most pass, one pass with caveats based on operator {"ggg777": "3"}, TEST_SCHEMA_1, - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, [ "A caveat", ], @@ -225,7 +227,7 @@ # most pass, one fails based on operator {"ggg777": "12"}, TEST_SCHEMA_1, - Eoi_Decision.FAIL, + Decision.FAIL, [], ), ], @@ -242,7 +244,7 @@ def test_schema_parsing(answers: dict, schema, exp_result, exp_caveats): x["answer"] = answer[1] # get a result - result = evaluate_eoi_response(schema, input_forms) + result = evaluate_response(schema, input_forms) # confirm result is as expected assert result["decision"] == exp_result @@ -253,31 +255,31 @@ def test_schema_parsing(answers: dict, schema, exp_result, exp_caveats): { "operator": "<=", "compareValue": 4, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, }, { "operator": ">=", "compareValue": 5, - "result": Eoi_Decision.PASS_WITH_CAVEATS, + "result": Decision.PASS_WITH_CAVEATS, "caveat": "A caveat: more than 5", }, { "operator": "==", "compareValue": 7, - "result": Eoi_Decision.PASS_WITH_CAVEATS, + "result": Decision.PASS_WITH_CAVEATS, "caveat": "A caveat: equals 7", }, { "operator": ">", "compareValue": 10, - "result": Eoi_Decision.FAIL, + "result": Decision.FAIL, "caveat": None, }, { "operator": "<", "compareValue": 0, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, }, ] @@ -286,24 +288,24 @@ def test_schema_parsing(answers: dict, schema, exp_result, exp_caveats): @pytest.mark.parametrize( "answer,exp_decision,exp_caveats", [ - (2, Eoi_Decision.PASS, []), - ("2", Eoi_Decision.PASS, []), - (-1, Eoi_Decision.PASS, []), - ("-1", Eoi_Decision.PASS, []), - (5, Eoi_Decision.PASS_WITH_CAVEATS, ["A caveat: more than 5"]), - ("5", Eoi_Decision.PASS_WITH_CAVEATS, ["A caveat: more than 5"]), + (2, Decision.PASS, []), + ("2", Decision.PASS, []), + (-1, Decision.PASS, []), + ("-1", Decision.PASS, []), + (5, Decision.PASS_WITH_CAVEATS, ["A caveat: more than 5"]), + ("5", Decision.PASS_WITH_CAVEATS, ["A caveat: more than 5"]), ( 7, - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, ["A caveat: more than 5", "A caveat: equals 7"], ), ( "7", - Eoi_Decision.PASS_WITH_CAVEATS, + Decision.PASS_WITH_CAVEATS, ["A caveat: more than 5", "A caveat: equals 7"], ), - (12, Eoi_Decision.FAIL, []), - ("12", Eoi_Decision.FAIL, []), + (12, Decision.FAIL, []), + ("12", Decision.FAIL, []), ], ) def test_evaluate_operators(answer, exp_decision, exp_caveats): @@ -326,7 +328,7 @@ def test_operator_validation_success(operator): condition = { "operator": operator, "compareValue": 4, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, } _evaluate_with_supplied_operators([condition], 1) @@ -348,7 +350,7 @@ def test_operator_validation_failures(operator): condition = { "operator": operator, "compareValue": 4, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, } @@ -359,12 +361,12 @@ def test_operator_validation_failures(operator): def test_no_questions_hit_conditions(): TEST_SCHEMA = { "does_not_exist": [ - {"answerValue": True, "result": Eoi_Decision.PASS, "caveat": None}, - {"answerValue": False, "result": Eoi_Decision.FAIL, "caveat": None}, + {"answerValue": True, "result": Decision.PASS, "caveat": None}, + {"answerValue": False, "result": Decision.FAIL, "caveat": None}, ], } - result = evaluate_eoi_response(TEST_SCHEMA, TEST_EOI_FORMS_1) - assert result["decision"] == Eoi_Decision.PASS + result = evaluate_response(TEST_SCHEMA, TEST_EOI_FORMS_1) + assert result["decision"] == Decision.PASS assert result["caveats"] == [] @@ -386,7 +388,7 @@ def test_answer_validation_failure(supplied_answer): condition = { "operator": "<", "compareValue": 4, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, } with pytest.raises(ValueError): @@ -401,7 +403,7 @@ def test_answer_validation_success(supplied_answer): condition = { "operator": "<", "compareValue": 4, - "result": Eoi_Decision.PASS, + "result": Decision.PASS, "caveat": None, } _evaluate_with_supplied_operators([condition], supplied_answer)