Skip to content
This repository has been archived by the owner on Dec 18, 2023. It is now read-only.

Commit

Permalink
Merge pull request #25 from credo-ai/release/0.0.8
Browse files Browse the repository at this point in the history
Release/0.0.8
  • Loading branch information
IanAtCredo authored Jan 19, 2023
2 parents d5adf5a + 8efa6a8 commit 8733806
Show file tree
Hide file tree
Showing 7 changed files with 212 additions and 12 deletions.
35 changes: 35 additions & 0 deletions .github/workflows/merge-main-develop.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
name: Merge main into develop

on:
workflow_dispatch:
push:
branches:
- main

env:
SRC_BRANCH: main
TGT_BRANCH: develop

jobs:
main:
name: Merge main into develop
runs-on: ubuntu-latest
steps:
- name: git checkout
uses: actions/checkout@v3
with:
token: ${{ secrets.CREDOAIBOT_TOKEN }}
ref: refs/heads/main
fetch-depth: 0

- name: perform merge
run: |
git config --global user.email "${GITHUB_ACTOR}"
git config --global user.name "${GITHUB_ACTOR}@users.noreply.github.com"
git status
git pull
git checkout "$TGT_BRANCH"
git status
git merge "$SRC_BRANCH" --no-edit
git push
git status
2 changes: 1 addition & 1 deletion connect/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
__version__ = "0.0.7.1"
__version__ = "0.0.8"
5 changes: 4 additions & 1 deletion connect/adapters/adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,14 @@ def __init__(
governance: Governance,
model_name: str,
model_tags: Optional[dict] = None,
model_version: Optional[str] = None,
assessment_dataset_name: str = None,
):

self.governance = governance
self.governance.set_artifacts(model_name, model_tags, assessment_dataset_name)
self.governance.set_artifacts(
model_name, model_tags, model_version, assessment_dataset_name
)

def metrics_to_governance(
self,
Expand Down
2 changes: 1 addition & 1 deletion connect/evidence/evidence.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def __init__(
self.significant = (
True if self.p_value <= self.significance_threshold else False
)
super().__init__("statisticTest", additional_labels, **metadata)
super().__init__("statistical_test", additional_labels, **metadata)

@property
def data(self):
Expand Down
62 changes: 62 additions & 0 deletions connect/governance/credo_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,3 +160,65 @@ def get_assessment(self, use_case_id: str, id: str):
"""
path = f"use_cases/{use_case_id}/assessments/{id}"
return self._client.get(path)

def update_use_case_model_link_tags(
self, use_case_id: str, model_link_id: str, tags: dict
):
"""
Update tags of use case model link
Parameters
----------
use_case_id : str
use case id
model_link_id : str
use case model link id
tags : dict
model tags like {"key": "value"}
Returns
-------
None
Raises
------
HTTPError
When API request returns error
"""

path = f"use_cases/{use_case_id}/model_links/{model_link_id}"
data = {"tags": tags, "$type": "use_case_model_links", "id": model_link_id}
return self._client.patch(path, data)

def update_use_case_model_link_version(
self, use_case_id: str, model_link_id: str, version: str
):
"""
Update version of use case model link
Parameters
----------
use_case_id : str
use case id
model_link_id : str
use case model link id
version : str
model version
Returns
-------
None
Raises
------
HTTPError
When API request returns error
"""

path = f"use_cases/{use_case_id}/model_links/{model_link_id}"
data = {
"model_version": version,
"$type": "use_case_model_links",
"id": model_link_id,
}
return self._client.patch(path, data)
8 changes: 8 additions & 0 deletions connect/governance/credo_api_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,14 @@ def __make_request(self, method: str, path: str, **kwargs):
self.refresh_token()
response = self._session.request(method, endpoint, **kwargs)

if response.status_code >= 400:
data = response.json()
if data:
for error in data.get("errors", []):
global_logger.error(
f"Error happened from [{method.upper()}] {endpoint} : Message={error['title']}, Error Detail={error['detail']}"
)

response.raise_for_status()

if response.content:
Expand Down
110 changes: 101 additions & 9 deletions connect/governance/governance.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,40 @@ def add_evidence(self, evidences: Union[Evidence, List[Evidence]]):
"""
self._evidences += wrap_list(evidences)

def apply_model_changes(self):
"""
Update Platform model's tags and version to CredoAI Governance if changed
This function will update the platform model associated with the assessment plan with
the tags and version associated with the local model associated with Governance. If no
model has been registered on the platform, nothing will be updated.
"""
# association between keys and api calls:
api_calls = {
"tags": self._api.update_use_case_model_link_tags,
"model_version": self._api.update_use_case_model_link_version,
}

# find model_link with model name from assessment plan
plan_model = self._find_plan_model()
if plan_model is None:
return

model_info = self.get_model_info()
plan_model_info = self._get_model_info(plan_model)
for key in model_info.keys():
model_value = model_info[key]
plan_model_value = plan_model[key]
if model_value != plan_model_value:
global_logger.info(
"%s\n%s",
f"Platform model and local model {key} do not match. Platform {key}: {plan_model_value}, Local {key}: {model_value}\n",
f"Updated platform model {key}...",
)
api_call = api_calls[key]
api_call(self._use_case_id, plan_model["id"], model_value)
plan_model[key] = model_value

def clear_evidence(self):
self.set_evidence([])

Expand Down Expand Up @@ -174,7 +208,7 @@ def get_evidence_requirements(self, tags: dict = None, verbose=False):
List[EvidenceRequirement]
"""
if tags is None:
tags = self.get_model_tags()
tags = self.get_model_info()["tags"]

reqs = [e for e in self._evidence_requirements if check_subset(e.tags, tags)]
if verbose:
Expand All @@ -185,12 +219,9 @@ def get_requirement_tags(self):
"""Return the unique tags used for all evidence requirements"""
return self._unique_tags

def get_model_tags(self):
"""Get the tags for the associated model"""
if self._model:
return self._model["tags"]
else:
return {}
def get_model_info(self):
"""Get the tags and version for the associated model"""
return self._get_model_info(self._model)

def register(
self,
Expand Down Expand Up @@ -290,6 +321,7 @@ def set_artifacts(
self,
model: str,
model_tags: dict,
model_version: str = None,
training_dataset: str = None,
assessment_dataset: str = None,
):
Expand All @@ -311,15 +343,21 @@ def set_artifacts(
"""

global_logger.info(
f"Adding model ({model}) to governance. Model has tags: {model_tags}"
f"Adding model ({model}) to governance. Model has tags: {model_tags} and version: {model_version}"
)
prepared_model = {"name": model, "tags": model_tags}
prepared_model = {
"name": model,
"tags": model_tags,
"model_version": model_version,
}
if training_dataset:
prepared_model["training_dataset_name"] = training_dataset
if assessment_dataset:
prepared_model["assessment_dataset_name"] = assessment_dataset
self._model = prepared_model

self._print_model_changes_log()

def set_evidence(self, evidences: List[Evidence]):
"""
Update evidences
Expand Down Expand Up @@ -348,6 +386,9 @@ def _api_export(self):
f"Uploading {len(self._evidences)} evidences.. for use_case_id={self._use_case_id} policy_pack_id={self._policy_pack_id}"
)

# update when model tags are changed
self.apply_model_changes()

assessment = self._api.create_assessment(
self._use_case_id, self._prepare_export_data()
)
Expand Down Expand Up @@ -379,6 +420,32 @@ def _api_export(self):
error = assessment["error"]
global_logger.error(f"Error in uploading evidences : {error}")

def _print_model_changes_log(self):
# find model_link with model name from assessment plan
plan_model = self._find_plan_model()
if plan_model is None:
return

model_info = self.get_model_info()
plan_model_info = self._get_model_info(plan_model)
match = True
for key in model_info.keys():
model_value = model_info[key]
plan_model_value = plan_model[key]
if model_value != plan_model_value:
match = False
global_logger.info(
f"Platform model and local model {key} do not match. Platform {key}: {plan_model_value}, Local {key}: {model_value}"
)
if not match:
global_logger.info(
"""
You can apply changes to governance by calling the following method:
gov.apply_model_changes()
Alternatively, calling gov.export() method will automatically apply changes to governance.
"""
)

def _check_inclusion(self, label, evidence):
matching_evidence = []
for e in evidence:
Expand Down Expand Up @@ -408,6 +475,31 @@ def _file_export(self, filename):
with open(filename, "w") as f:
f.write(data)

def _find_plan_model(self):
"""Return model from assessment plan who matches name of associated model"""
if self.model is None or self._plan is None:
return None

model_name = self.model.get("name", None)
if model_name is None:
return None

for link in self._plan.get("model_links", []):
if link["model_name"] == model_name:
return link

return None

def _get_model_info(self, model):
"""Get the tags and version for a model"""
if model:
return {
"tags": model.get("tags", {}),
"model_version": model.get("model_version", None),
}
else:
return {"tags": {}, "model_version": None}

def _match_requirements(self):
missing = []
required_labels = [e.label for e in self.get_evidence_requirements()]
Expand Down

0 comments on commit 8733806

Please sign in to comment.