Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove few dev deps #493

Merged
merged 4 commits into from
Oct 20, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ repos:
- id: black

- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: 'v0.0.288'
rev: 'v0.1.0'
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
4 changes: 2 additions & 2 deletions cyclops/evaluate/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@
) -> Dict[str, WrappedModel]:
"""Prepare models for evaluation."""
if isinstance(model, get_args(WrappedModel)):
model_name: str = model.model_.__class__.__name__ # type: ignore
return {model_name: model} # type: ignore[dict-item]
model_name: str = model.model_.__class__.__name__
return {model_name: model}

Check warning on line 297 in cyclops/evaluate/evaluator.py

View check run for this annotation

Codecov / codecov/patch

cyclops/evaluate/evaluator.py#L296-L297

Added lines #L296 - L297 were not covered by tests
if isinstance(model, (list, tuple)):
assert all(isinstance(m, get_args(WrappedModel)) for m in model)
return {m.getattr("model_").__class__.__name__: m for m in model}
Expand Down
16 changes: 8 additions & 8 deletions cyclops/evaluate/fairness/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@
# since we have base values, remove overall slice
slice_spec._registry.pop("overall", None)

results: Dict[str, Dict[str, Any]] = {}
results: Dict[str, Dict[str, Dict[str, Any]]] = {}

Check warning on line 237 in cyclops/evaluate/fairness/evaluator.py

View check run for this annotation

Codecov / codecov/patch

cyclops/evaluate/fairness/evaluator.py#L237

Added line #L237 was not covered by tests

for slice_name, slice_fn in slice_spec.slices():
sliced_dataset = dataset.remove_columns(remove_columns or []).filter(
Expand Down Expand Up @@ -933,32 +933,33 @@


def _compute_parity_metrics(
results: Dict[str, Dict[str, Dict[str, Dict[str, float]]]],
results: Dict[str, Dict[str, Dict[str, Any]]],
base_slice_name: str,
) -> Dict[str, Dict[str, Dict[str, Dict[str, float]]]]:
) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""Compute the parity metrics for each group and threshold if specified.

Parameters
----------
results : Dict[str, Dict[str, Dict[str, Dict[str, float]]]]
results : Dict[str, Dict[str, Dict[str, Any]]]
A dictionary mapping the prediction column to the metrics dictionary.
base_slice_name : str
The name of the base slice.

Returns
-------
Dict[str, Dict[str, Dict[str, Dict[str, float]]]]
Dict[str, Dict[str, Dict[str, Any]]]
A dictionary mapping the prediction column to the metrics dictionary.

"""
parity_results: Dict[str, Dict[str, Any]] = {}
parity_results: Dict[str, Dict[str, Dict[str, Any]]] = {}

Check warning on line 954 in cyclops/evaluate/fairness/evaluator.py

View check run for this annotation

Codecov / codecov/patch

cyclops/evaluate/fairness/evaluator.py#L954

Added line #L954 was not covered by tests

for key, prediction_result in results.items():
parity_results[key] = {}
for slice_name, slice_result in prediction_result.items():
for metric_name, metric_value in slice_result.items():
if metric_name == "Group Size":
continue

# add 'Parity' to the metric name before @threshold, if specified
metric_name_parts = metric_name.split("@")
parity_metric_name = f"{metric_name_parts[0]} Parity"
Expand All @@ -967,14 +968,13 @@

numerator = metric_value
denominator = prediction_result[base_slice_name][metric_name]
parity_metric_value = np.divide( # type: ignore[call-overload]
parity_metric_value = np.divide(

Check warning on line 971 in cyclops/evaluate/fairness/evaluator.py

View check run for this annotation

Codecov / codecov/patch

cyclops/evaluate/fairness/evaluator.py#L971

Added line #L971 was not covered by tests
numerator,
denominator,
out=np.zeros_like(numerator, dtype=np.float_),
where=denominator != 0,
)

# add the parity metric to the results
parity_results[key].setdefault(slice_name, {}).update(
{
parity_metric_name: _get_value_if_singleton_array(
Expand Down
Loading
Loading