Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
type hint fixes for adaptive/tests/test_learners.py
Browse files Browse the repository at this point in the history
basnijholt committed Dec 15, 2019

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
1 parent cc296f4 commit 4b4546c
Showing 1 changed file with 21 additions and 110 deletions.
131 changes: 21 additions & 110 deletions adaptive/tests/test_learners.py
Original file line number Diff line number Diff line change
@@ -20,6 +20,7 @@
from adaptive.learner import (
AverageLearner,
BalancingLearner,
BaseLearner,
DataSaver,
IntegratorLearner,
Learner1D,
@@ -92,28 +93,15 @@ def uniform(a: Union[int, float], b: int) -> Callable:
learner_function_combos = collections.defaultdict(list)


def learn_with(
learner_type: Union[
Type[Learner2D],
Type[SequenceLearner],
Type[AverageLearner],
Type[Learner1D],
Type[LearnerND],
],
**init_kwargs,
) -> Callable:
def learn_with(learner_type: Type[BaseLearner], **init_kwargs,) -> Callable:
def _(f):
learner_function_combos[learner_type].append((f, init_kwargs))
return f

return _


def xfail(
learner: Union[Type[Learner2D], Type[LearnerND]]
) -> Union[
Tuple[MarkDecorator, Type[Learner2D]], Tuple[MarkDecorator, Type[LearnerND]]
]:
def xfail(learner: Type[BaseLearner]) -> Tuple[MarkDecorator, Type[BaseLearner]]:
return pytest.mark.xfail, learner


@@ -141,14 +129,7 @@ def linear_with_peak(x: Union[int, float], d: uniform(-1, 1)) -> float:
@learn_with(Learner2D, bounds=((-1, 1), (-1, 1)))
@learn_with(SequenceLearner, sequence=np.random.rand(1000, 2))
def ring_of_fire(
xy: Union[
Tuple[float, float],
np.ndarray,
Tuple[int, int],
Tuple[float, float],
Tuple[float, float],
],
d: uniform(0.2, 1),
xy: Union[np.ndarray, Tuple[float, float]], d: uniform(0.2, 1),
) -> float:
a = 0.2
x, y = xy
@@ -158,8 +139,7 @@ def ring_of_fire(
@learn_with(LearnerND, bounds=((-1, 1), (-1, 1), (-1, 1)))
@learn_with(SequenceLearner, sequence=np.random.rand(1000, 3))
def sphere_of_fire(
xyz: Union[Tuple[float, float, float], Tuple[int, int, int], np.ndarray],
d: uniform(0.2, 1),
xyz: Union[Tuple[float, float, float], np.ndarray], d: uniform(0.2, 1),
) -> float:
a = 0.2
x, y, z = xyz
@@ -177,16 +157,7 @@ def gaussian(n: int) -> float:

# Create a sequence of learner parameters by adding all
# possible loss functions to an existing parameter set.
def add_loss_to_params(
learner_type: Union[
Type[Learner2D],
Type[SequenceLearner],
Type[AverageLearner],
Type[Learner1D],
Type[LearnerND],
],
existing_params: Dict[str, Any],
) -> Any:
def add_loss_to_params(learner_type, existing_params: Dict[str, Any],) -> Any:
if learner_type not in LOSS_FUNCTIONS:
return [existing_params]
loss_param, loss_functions = LOSS_FUNCTIONS[learner_type]
@@ -216,12 +187,7 @@ def ask_randomly(
learner: Union[Learner1D, LearnerND, Learner2D],
rounds: Tuple[int, int],
points: Tuple[int, int],
) -> Union[
Tuple[List[Union[Tuple[float, float, float], Tuple[int, int, int]]], List[float]],
Tuple[List[Union[Tuple[float, float], Tuple[int, int]]], List[float]],
Tuple[List[float], List[float]],
Tuple[List[Union[Tuple[int, int], Tuple[float, float]]], List[float]],
]:
):
n_rounds = random.randrange(*rounds)
n_points = [random.randrange(*points) for _ in range(n_rounds)]

@@ -240,7 +206,7 @@ def ask_randomly(

@run_with(Learner1D)
def test_uniform_sampling1D(
learner_type: Type[Learner1D],
learner_type,
f: Callable,
learner_kwargs: Dict[str, Union[Tuple[int, int], Callable]],
) -> None:
@@ -262,7 +228,7 @@ def test_uniform_sampling1D(
@pytest.mark.xfail
@run_with(Learner2D, LearnerND)
def test_uniform_sampling2D(
learner_type: Union[Type[Learner2D], Type[LearnerND]],
learner_type,
f: Callable,
learner_kwargs: Dict[
str,
@@ -304,8 +270,7 @@ def test_uniform_sampling2D(
],
)
def test_learner_accepts_lists(
learner_type: Union[Type[Learner2D], Type[LearnerND], Type[Learner1D]],
bounds: Union[Tuple[int, int], List[Tuple[int, int]]],
learner_type, bounds: Union[Tuple[int, int], List[Tuple[int, int]]],
) -> None:
def f(x):
return [0, 1]
@@ -316,11 +281,7 @@ def f(x):

@run_with(Learner1D, Learner2D, LearnerND, SequenceLearner)
def test_adding_existing_data_is_idempotent(
learner_type: Union[
Type[SequenceLearner], Type[LearnerND], Type[Learner1D], Type[Learner2D]
],
f: Callable,
learner_kwargs: Dict[str, Any],
learner_type, f: Callable, learner_kwargs: Dict[str, Any],
) -> None:
"""Adding already existing data is an idempotent operation.
@@ -369,15 +330,7 @@ def test_adding_existing_data_is_idempotent(
# but we xfail it now, as Learner2D will be deprecated anyway
@run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner, SequenceLearner)
def test_adding_non_chosen_data(
learner_type: Union[
Type[Learner2D],
Type[SequenceLearner],
Type[AverageLearner],
Type[Learner1D],
Type[LearnerND],
],
f: Callable,
learner_kwargs: Dict[str, Any],
learner_type, f: Callable, learner_kwargs: Dict[str, Any],
) -> None:
"""Adding data for a point that was not returned by 'ask'."""
# XXX: learner, control and bounds are not defined
@@ -421,9 +374,7 @@ def test_adding_non_chosen_data(

@run_with(Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner)
def test_point_adding_order_is_irrelevant(
learner_type: Union[
Type[AverageLearner], Type[LearnerND], Type[Learner1D], Type[Learner2D]
],
learner_type,
f: Callable,
learner_kwargs: Dict[
str,
@@ -478,9 +429,7 @@ def test_point_adding_order_is_irrelevant(
# see https://github.com/python-adaptive/adaptive/issues/55
@run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner)
def test_expected_loss_improvement_is_less_than_total_loss(
learner_type: Union[
Type[AverageLearner], Type[LearnerND], Type[Learner1D], Type[Learner2D]
],
learner_type,
f: Callable,
learner_kwargs: Dict[
str,
@@ -519,7 +468,7 @@ def test_expected_loss_improvement_is_less_than_total_loss(
# but we xfail it now, as Learner2D will be deprecated anyway
@run_with(Learner1D, xfail(Learner2D), LearnerND)
def test_learner_performance_is_invariant_under_scaling(
learner_type: Union[Type[Learner2D], Type[LearnerND], Type[Learner1D]],
learner_type,
f: Callable,
learner_kwargs: Dict[
str,
@@ -583,15 +532,7 @@ def test_learner_performance_is_invariant_under_scaling(
with_all_loss_functions=False,
)
def test_balancing_learner(
learner_type: Union[
Type[Learner2D],
Type[SequenceLearner],
Type[AverageLearner],
Type[Learner1D],
Type[LearnerND],
],
f: Callable,
learner_kwargs: Dict[str, Any],
learner_type, f: Callable, learner_kwargs: Dict[str, Any],
) -> None:
"""Test if the BalancingLearner works with the different types of learners."""
learners = [
@@ -638,17 +579,7 @@ def test_balancing_learner(
SequenceLearner,
with_all_loss_functions=False,
)
def test_saving(
learner_type: Union[
Type[Learner2D],
Type[SequenceLearner],
Type[AverageLearner],
Type[Learner1D],
Type[LearnerND],
],
f: Callable,
learner_kwargs: Dict[str, Any],
) -> None:
def test_saving(learner_type, f: Callable, learner_kwargs: Dict[str, Any],) -> None:
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
@@ -680,15 +611,7 @@ def test_saving(
with_all_loss_functions=False,
)
def test_saving_of_balancing_learner(
learner_type: Union[
Type[Learner2D],
Type[SequenceLearner],
Type[AverageLearner],
Type[Learner1D],
Type[LearnerND],
],
f: Callable,
learner_kwargs: Dict[str, Any],
learner_type, f: Callable, learner_kwargs: Dict[str, Any],
) -> None:
f = generate_random_parametrization(f)
learner = BalancingLearner([learner_type(f, **learner_kwargs)])
@@ -727,9 +650,7 @@ def fname(learner):
with_all_loss_functions=False,
)
def test_saving_with_datasaver(
learner_type: Union[
Type[Learner2D], Type[AverageLearner], Type[LearnerND], Type[Learner1D]
],
learner_type,
f: Callable,
learner_kwargs: Dict[
str,
@@ -770,7 +691,7 @@ def test_saving_with_datasaver(
@pytest.mark.xfail
@run_with(Learner1D, Learner2D, LearnerND)
def test_convergence_for_arbitrary_ordering(
learner_type: Union[Type[Learner2D], Type[LearnerND], Type[Learner1D]],
learner_type,
f: Callable,
learner_kwargs: Dict[
str,
@@ -794,17 +715,7 @@ def test_convergence_for_arbitrary_ordering(
@pytest.mark.xfail
@run_with(Learner1D, Learner2D, LearnerND)
def test_learner_subdomain(
learner_type: Union[Type[Learner2D], Type[LearnerND], Type[Learner1D]],
f: Callable,
learner_kwargs: Dict[
str,
Union[
Tuple[Tuple[int, int], Tuple[int, int]],
Callable,
Tuple[int, int],
Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int]],
],
],
learner_type, f: Callable, learner_kwargs,
):
"""Learners that never receive data outside of a subdomain should
perform 'similarly' to learners defined on that subdomain only."""

0 comments on commit 4b4546c

Please sign in to comment.