Skip to content

Commit

Permalink
Pre-commit changes
Browse files Browse the repository at this point in the history
  • Loading branch information
ThomasMeissnerDS committed Oct 14, 2023
1 parent 2006906 commit 550aac7
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 10 deletions.
3 changes: 1 addition & 2 deletions bluecast/config/base_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,8 @@ class BaseClassExperimentTracker(ABC):
def add_results(
self,
experiment_id: Union[int, str, float],
experiment_name: Union[int, str, float],
score_category: Literal["hyperparameter_tuning", "oof_score"],
training_configs: TrainingConfig,
training_config: TrainingConfig,
model_parameters: Dict[
Union[str, int, float, None], Union[str, int, float, None]
],
Expand Down
2 changes: 1 addition & 1 deletion bluecast/config/training_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
"""
from typing import Dict, Optional

from pydantic.dataclasses import dataclass
from pydantic import BaseModel
from pydantic.dataclasses import dataclass


class Config:
Expand Down
34 changes: 27 additions & 7 deletions bluecast/tests/test_experiment_tracker.py
Original file line number Diff line number Diff line change
@@ -1,48 +1,68 @@
import pytest

from bluecast.config.training_config import TrainingConfig
from bluecast.experimentation.tracking import ExperimentTracker


@pytest.fixture
def experiment_tracker():
return ExperimentTracker()


def test_add_results(experiment_tracker):
# Add some sample data
experiment_id = 1
score_category = "hyperparameter_tuning"
training_config = TrainingConfig() # You may need to create a valid TrainingConfig instance
training_config = (
TrainingConfig()
) # You may need to create a valid TrainingConfig instance
model_parameters = {"param1": 1, "param2": "abc"}
eval_scores = 0.95
metric_used = "accuracy"
metric_higher_is_better = True

experiment_tracker.add_results(
experiment_id, score_category, training_config,
model_parameters, eval_scores, metric_used, metric_higher_is_better
experiment_id,
score_category,
training_config,
model_parameters,
eval_scores,
metric_used,
metric_higher_is_better,
)

# Check if the data was added correctly
assert experiment_tracker.experiment_id == [1]
assert experiment_tracker.score_category == ["hyperparameter_tuning"]
assert experiment_tracker.training_configs == [training_config.model_dump(mode="json")]
assert experiment_tracker.training_configs == [
training_config.model_dump(mode="json")
]
assert experiment_tracker.model_parameters == [model_parameters]
assert experiment_tracker.eval_scores == [0.95]
assert experiment_tracker.metric_used == ["accuracy"]
assert experiment_tracker.metric_higher_is_better == [True]


def test_retrieve_results_as_df(experiment_tracker):
# Add some sample data
experiment_id = 1
score_category = "hyperparameter_tuning"
training_config = TrainingConfig() # You may need to create a valid TrainingConfig instance
training_config = (
TrainingConfig()
) # You may need to create a valid TrainingConfig instance
model_parameters = {"param1": 1, "param2": "abc"}
eval_scores = 0.95
metric_used = "accuracy"
metric_higher_is_better = True

experiment_tracker.add_results(
experiment_id, score_category, training_config,
model_parameters, eval_scores, metric_used, metric_higher_is_better
experiment_id,
score_category,
training_config,
model_parameters,
eval_scores,
metric_used,
metric_higher_is_better,
)

# Retrieve the results as a DataFrame
Expand Down

0 comments on commit 550aac7

Please sign in to comment.