Skip to content

Commit

Permalink
Remove few dev deps (#493)
Browse files Browse the repository at this point in the history
* Remove mlflow, evidently and prefect package from dev deps

* Update ruff to 0.1.0, unused remove type ignores

* fix type annotation

* Remove category-encoders dependency

---------

Co-authored-by: Franklin <[email protected]>
  • Loading branch information
amrit110 and fcogidi authored Oct 20, 2023
1 parent 2300338 commit 3065072
Show file tree
Hide file tree
Showing 5 changed files with 517 additions and 1,842 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ repos:
- id: black

- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: 'v0.0.288'
rev: 'v0.1.0'
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
4 changes: 2 additions & 2 deletions cyclops/evaluate/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@ def _prepare_models(
) -> Dict[str, WrappedModel]:
"""Prepare models for evaluation."""
if isinstance(model, get_args(WrappedModel)):
model_name: str = model.model_.__class__.__name__ # type: ignore
return {model_name: model} # type: ignore[dict-item]
model_name: str = model.model_.__class__.__name__
return {model_name: model}
if isinstance(model, (list, tuple)):
assert all(isinstance(m, get_args(WrappedModel)) for m in model)
return {m.getattr("model_").__class__.__name__: m for m in model}
Expand Down
16 changes: 8 additions & 8 deletions cyclops/evaluate/fairness/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def evaluate_fairness(
# since we have base values, remove overall slice
slice_spec._registry.pop("overall", None)

results: Dict[str, Dict[str, Any]] = {}
results: Dict[str, Dict[str, Dict[str, Any]]] = {}

for slice_name, slice_fn in slice_spec.slices():
sliced_dataset = dataset.remove_columns(remove_columns or []).filter(
Expand Down Expand Up @@ -933,32 +933,33 @@ def _construct_base_slice_name(base_values: Dict[str, Any]) -> str:


def _compute_parity_metrics(
results: Dict[str, Dict[str, Dict[str, Dict[str, float]]]],
results: Dict[str, Dict[str, Dict[str, Any]]],
base_slice_name: str,
) -> Dict[str, Dict[str, Dict[str, Dict[str, float]]]]:
) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""Compute the parity metrics for each group and threshold if specified.
Parameters
----------
results : Dict[str, Dict[str, Dict[str, Dict[str, float]]]]
results : Dict[str, Dict[str, Dict[str, Any]]]
A dictionary mapping the prediction column to the metrics dictionary.
base_slice_name : str
The name of the base slice.
Returns
-------
Dict[str, Dict[str, Dict[str, Dict[str, float]]]]
Dict[str, Dict[str, Dict[str, Any]]]
A dictionary mapping the prediction column to the metrics dictionary.
"""
parity_results: Dict[str, Dict[str, Any]] = {}
parity_results: Dict[str, Dict[str, Dict[str, Any]]] = {}

for key, prediction_result in results.items():
parity_results[key] = {}
for slice_name, slice_result in prediction_result.items():
for metric_name, metric_value in slice_result.items():
if metric_name == "Group Size":
continue

# add 'Parity' to the metric name before @threshold, if specified
metric_name_parts = metric_name.split("@")
parity_metric_name = f"{metric_name_parts[0]} Parity"
Expand All @@ -967,14 +968,13 @@ def _compute_parity_metrics(

numerator = metric_value
denominator = prediction_result[base_slice_name][metric_name]
parity_metric_value = np.divide( # type: ignore[call-overload]
parity_metric_value = np.divide(
numerator,
denominator,
out=np.zeros_like(numerator, dtype=np.float_),
where=denominator != 0,
)

# add the parity metric to the results
parity_results[key].setdefault(slice_name, {}).update(
{
parity_metric_name: _get_value_if_singleton_array(
Expand Down
Loading

0 comments on commit 3065072

Please sign in to comment.