diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 234fe69a154..e970404e112 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -12,8 +12,8 @@ import pandas as pd from ax.benchmark.benchmark_metric import BenchmarkMetric -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.core.data import Data from ax.core.experiment import Experiment from ax.core.objective import MultiObjective, Objective @@ -309,7 +309,7 @@ def create_problem_from_botorch( Create a `BenchmarkProblem` from a BoTorch `BaseTestProblem`. Uses specialized Metrics and Runners for benchmarking. The test problem's - result will be computed by the Runner, `BoTorchTestProblemRunner`, and + result will be computed by the Runner, `BenchmarkRunner`, and retrieved by the Metric(s), which are `BenchmarkMetric`s. Args: @@ -378,7 +378,7 @@ def create_problem_from_botorch( search_space=search_space, optimization_config=optimization_config, runner=BenchmarkRunner( - test_problem=BoTorchTestProblem(botorch_problem=test_problem), + test_problem=BoTorchTestFunction(botorch_problem=test_problem), outcome_names=outcome_names, search_space_digest=extract_search_space_digest( search_space=search_space, diff --git a/ax/benchmark/runners/base.py b/ax/benchmark/benchmark_runner.py similarity index 98% rename from ax/benchmark/runners/base.py rename to ax/benchmark/benchmark_runner.py index 3471231ebf2..55e934780dd 100644 --- a/ax/benchmark/runners/base.py +++ b/ax/benchmark/benchmark_runner.py @@ -13,7 +13,7 @@ import numpy.typing as npt import torch -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.base_trial import BaseTrial, TrialStatus from ax.core.batch_trial import BatchTrial from ax.core.runner import Runner @@ -48,7 +48,7 @@ class BenchmarkRunner(Runner): Args: outcome_names: The names of the outcomes returned by the problem. - test_problem: A ``ParamBasedTestProblem`` from which to generate + test_problem: A ``BenchmarkTestFunction`` from which to generate deterministic data before adding noise. noise_std: The standard deviation of the noise added to the data. Can be a list or dict to be per-metric. @@ -56,7 +56,7 @@ class BenchmarkRunner(Runner): """ outcome_names: list[str] - test_problem: ParamBasedTestProblem + test_problem: BenchmarkTestFunction noise_std: float | list[float] | dict[str, float] = 0.0 # pyre-fixme[16]: Pyre doesn't understand InitVars search_space_digest: InitVar[SearchSpaceDigest | None] = None diff --git a/ax/benchmark/benchmark_test_function.py b/ax/benchmark/benchmark_test_function.py new file mode 100644 index 00000000000..f7546961156 --- /dev/null +++ b/ax/benchmark/benchmark_test_function.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-strict + +from abc import ABC, abstractmethod +from collections.abc import Mapping +from dataclasses import dataclass + +from ax.core.types import TParamValue +from torch import Tensor + + +@dataclass(kw_only=True) +class BenchmarkTestFunction(ABC): + """ + The basic Ax class for generating deterministic data to benchmark against. + + (Noise - if desired - is added by the runner.) + """ + + @abstractmethod + def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: + """ + Evaluate noiselessly. + + Returns: + 1d tensor of shape (num_outcomes,). + """ + ... diff --git a/ax/benchmark/runners/__init__.py b/ax/benchmark/benchmark_test_functions/__init__.py similarity index 100% rename from ax/benchmark/runners/__init__.py rename to ax/benchmark/benchmark_test_functions/__init__.py diff --git a/ax/benchmark/runners/botorch_test.py b/ax/benchmark/benchmark_test_functions/botorch_test.py similarity index 82% rename from ax/benchmark/runners/botorch_test.py rename to ax/benchmark/benchmark_test_functions/botorch_test.py index 321675afab3..af29f6166a1 100644 --- a/ax/benchmark/runners/botorch_test.py +++ b/ax/benchmark/benchmark_test_functions/botorch_test.py @@ -5,39 +5,18 @@ # pyre-strict -from abc import ABC, abstractmethod from collections.abc import Mapping from dataclasses import dataclass from itertools import islice import torch -from ax.core.types import TParamValue +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from botorch.test_functions.synthetic import BaseTestProblem, ConstrainedBaseTestProblem from botorch.utils.transforms import normalize, unnormalize -from torch import Tensor @dataclass(kw_only=True) -class ParamBasedTestProblem(ABC): - """ - The basic Ax class for generating deterministic data to benchmark against. - - (Noise - if desired - is added by the runner.) - """ - - @abstractmethod - def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: - """ - Evaluate noiselessly. - - Returns: - 1d tensor of shape (num_outcomes,). - """ - ... - - -@dataclass(kw_only=True) -class BoTorchTestProblem(ParamBasedTestProblem): +class BoTorchTestFunction(BenchmarkTestFunction): """ Class for generating data from a BoTorch ``BaseTestProblem``. diff --git a/ax/benchmark/runners/surrogate.py b/ax/benchmark/benchmark_test_functions/surrogate.py similarity index 96% rename from ax/benchmark/runners/surrogate.py rename to ax/benchmark/benchmark_test_functions/surrogate.py index ac562506d5d..381b013fd86 100644 --- a/ax/benchmark/runners/surrogate.py +++ b/ax/benchmark/benchmark_test_functions/surrogate.py @@ -9,7 +9,7 @@ from dataclasses import dataclass import torch -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.observation import ObservationFeatures from ax.core.types import TParamValue from ax.modelbridge.torch import TorchModelBridge @@ -21,7 +21,7 @@ @dataclass(kw_only=True) -class SurrogateTestFunction(ParamBasedTestProblem): +class SurrogateTestFunction(BenchmarkTestFunction): """ Data-generating function for surrogate benchmark problems. diff --git a/ax/benchmark/problems/hpo/torchvision.py b/ax/benchmark/problems/hpo/torchvision.py index 3f7f7008537..8616dab4b47 100644 --- a/ax/benchmark/problems/hpo/torchvision.py +++ b/ax/benchmark/problems/hpo/torchvision.py @@ -14,8 +14,8 @@ BenchmarkProblem, get_soo_config_and_outcome_names, ) -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.parameter import ParameterType, RangeParameter from ax.core.search_space import SearchSpace from ax.exceptions.core import UserInputError @@ -113,7 +113,7 @@ def train_and_evaluate( @dataclass(kw_only=True) -class PyTorchCNNTorchvisionParamBasedProblem(ParamBasedTestProblem): +class PyTorchCNNTorchvisionBenchmarkTestFunction(BenchmarkTestFunction): name: str # The name of the dataset to load -- MNIST or FashionMNIST device: torch.device = field( default_factory=lambda: torch.device( @@ -151,7 +151,7 @@ def __post_init__(self, train_loader: None, test_loader: None) -> None: transform=transforms.ToTensor(), ) # pyre-fixme: Undefined attribute [16]: - # `PyTorchCNNTorchvisionParamBasedProblem` has no attribute + # `PyTorchCNNTorchvisionBenchmarkTestFunction` has no attribute # `train_loader`. self.train_loader = DataLoader(train_set, num_workers=1) # pyre-fixme @@ -163,10 +163,10 @@ def evaluate_true(self, params: Mapping[str, int | float]) -> Tensor: frac_correct = train_and_evaluate( **params, device=self.device, - # pyre-fixme[16]: `PyTorchCNNTorchvisionParamBasedProblem` has no + # pyre-fixme[16]: `PyTorchCNNTorchvisionBenchmarkTestFunction` has no # attribute `train_loader`. train_loader=self.train_loader, - # pyre-fixme[16]: `PyTorchCNNTorchvisionParamBasedProblem` has no + # pyre-fixme[16]: `PyTorchCNNTorchvisionBenchmarkTestFunction` has no # attribute `test_loader`. test_loader=self.test_loader, ) @@ -215,7 +215,7 @@ def get_pytorch_cnn_torchvision_benchmark_problem( objective_name="accuracy", ) runner = BenchmarkRunner( - test_problem=PyTorchCNNTorchvisionParamBasedProblem(name=name), + test_problem=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name), outcome_names=outcome_names, ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py index b3d66ef062b..38659846f17 100644 --- a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py +++ b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py @@ -21,8 +21,8 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.core.parameter import ParameterType, RangeParameter @@ -47,7 +47,7 @@ def _get_problem_from_common_inputs( Args: bounds: The parameter bounds. These will be passed to - `BotorchTestProblemRunner` as `modified_bounds`, and the parameters + `BotorchTestFunction` as `modified_bounds`, and the parameters will be renormalized from these bounds to the bounds of the original problem. For example, if `bounds` are [(0, 3)] and the test problem's original bounds are [(0, 2)], then the original problem @@ -103,7 +103,7 @@ def _get_problem_from_common_inputs( else: test_problem = test_problem_class(dim=dim, bounds=test_problem_bounds) runner = BenchmarkRunner( - test_problem=BoTorchTestProblem( + test_problem=BoTorchTestFunction( botorch_problem=test_problem, modified_bounds=bounds ), outcome_names=[metric_name], diff --git a/ax/benchmark/problems/synthetic/hss/jenatton.py b/ax/benchmark/problems/synthetic/hss/jenatton.py index 69cd824759d..3e0e5fb2f53 100644 --- a/ax/benchmark/problems/synthetic/hss/jenatton.py +++ b/ax/benchmark/problems/synthetic/hss/jenatton.py @@ -11,8 +11,8 @@ import torch from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.core.parameter import ChoiceParameter, ParameterType, RangeParameter @@ -50,7 +50,7 @@ def jenatton_test_function( @dataclass(kw_only=True) -class Jenatton(ParamBasedTestProblem): +class Jenatton(BenchmarkTestFunction): """Jenatton test function for hierarchical search spaces.""" # pyre-fixme[14]: Inconsistent override diff --git a/ax/benchmark/tests/problems/test_mixed_integer_problems.py b/ax/benchmark/tests/problems/test_mixed_integer_problems.py index c744c50f516..90676a343a0 100644 --- a/ax/benchmark/tests/problems/test_mixed_integer_problems.py +++ b/ax/benchmark/tests/problems/test_mixed_integer_problems.py @@ -9,13 +9,13 @@ import torch from ax.benchmark.benchmark_problem import BenchmarkProblem +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.benchmark.problems.synthetic.discretized.mixed_integer import ( get_discrete_ackley, get_discrete_hartmann, get_discrete_rosenbrock, ) -from ax.benchmark.runners.botorch_test import BoTorchTestProblem from ax.core.arm import Arm from ax.core.parameter import ParameterType from ax.core.trial import Trial @@ -35,7 +35,7 @@ def test_problems(self) -> None: problem = constructor() self.assertEqual(f"Discrete {name}", problem.name) runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) botorch_problem = test_problem.botorch_problem self.assertIsInstance(botorch_problem, problem_cls) self.assertEqual(len(problem.search_space.parameters), dim) @@ -97,7 +97,7 @@ def test_problems(self) -> None: for problem, params, expected_arg in cases: runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) trial = Trial(experiment=MagicMock()) # pyre-fixme: Incompatible parameter type [6]: In call # `Arm.__init__`, for argument `parameters`, expected `Dict[str, diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 67ed96f4095..2753221b9df 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -15,10 +15,10 @@ import numpy as np import torch +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction +from ax.benchmark.benchmark_test_functions.surrogate import SurrogateTestFunction from ax.benchmark.problems.synthetic.hss.jenatton import get_jenatton_benchmark_problem -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem -from ax.benchmark.runners.surrogate import SurrogateTestFunction from ax.core.arm import Arm from ax.core.base_trial import TrialStatus from ax.core.trial import Trial @@ -26,15 +26,15 @@ from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast from ax.utils.testing.benchmark_stubs import ( + DummyTestFunction, get_soo_surrogate_test_function, - TestParamBasedTestProblem, ) from botorch.test_functions.multi_objective import BraninCurrin from botorch.test_functions.synthetic import Ackley, ConstrainedHartmann, Hartmann from botorch.utils.transforms import normalize -class TestBoTorchTestProblem(TestCase): +class TestBoTorchTestFunction(TestCase): def setUp(self) -> None: super().setUp() botorch_base_test_functions = { @@ -44,7 +44,7 @@ def setUp(self) -> None: "negated constrained Hartmann": ConstrainedHartmann(dim=6, negate=True), } self.botorch_test_problems = { - k: BoTorchTestProblem(botorch_problem=v) + k: BoTorchTestFunction(botorch_problem=v) for k, v in botorch_base_test_functions.items() } @@ -73,9 +73,9 @@ def test_negation(self) -> None: def test_raises_for_botorch_attrs(self) -> None: msg = "noise should be set on the `BenchmarkRunner`, not the test function." with self.assertRaisesRegex(ValueError, msg): - BoTorchTestProblem(botorch_problem=Hartmann(dim=6, noise_std=0.1)) + BoTorchTestFunction(botorch_problem=Hartmann(dim=6, noise_std=0.1)) with self.assertRaisesRegex(ValueError, msg): - BoTorchTestProblem( + BoTorchTestFunction( botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) ) @@ -84,7 +84,7 @@ def test_tensor_shapes(self) -> None: evaluate_true_results = { k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() } - evaluate_true_results["BraninCurrin"] = BoTorchTestProblem( + evaluate_true_results["BraninCurrin"] = BoTorchTestFunction( botorch_problem=BraninCurrin() ).evaluate_true(params) expected_len = { @@ -108,7 +108,7 @@ def setUp(self) -> None: def test_synthetic_runner(self) -> None: botorch_cases = [ ( - BoTorchTestProblem( + BoTorchTestFunction( botorch_problem=test_problem_class(dim=6), modified_bounds=modified_bounds, ), @@ -126,7 +126,7 @@ def test_synthetic_runner(self) -> None: ] param_based_cases = [ ( - TestParamBasedTestProblem(dim=6, num_outcomes=num_outcomes), + DummyTestFunction(dim=6, num_outcomes=num_outcomes), noise_std, num_outcomes, ) @@ -141,12 +141,12 @@ def test_synthetic_runner(self) -> None: botorch_cases + param_based_cases + surrogate_cases ): # Set up outcome names - if isinstance(test_problem, BoTorchTestProblem): + if isinstance(test_problem, BoTorchTestFunction): if isinstance(test_problem.botorch_problem, ConstrainedHartmann): outcome_names = ["objective_0", "constraint"] else: outcome_names = ["objective_0"] - elif isinstance(test_problem, TestParamBasedTestProblem): + elif isinstance(test_problem, DummyTestFunction): outcome_names = [f"objective_{i}" for i in range(num_outcomes)] else: # SurrogateTestFunction outcome_names = ["branin"] @@ -177,17 +177,17 @@ def test_synthetic_runner(self) -> None: # check equality new_runner = replace( - runner, test_problem=BoTorchTestProblem(botorch_problem=Ackley()) + runner, test_problem=BoTorchTestFunction(botorch_problem=Ackley()) ) self.assertNotEqual(runner, new_runner) self.assertEqual(runner, runner) - if isinstance(test_problem, BoTorchTestProblem): + if isinstance(test_problem, BoTorchTestFunction): self.assertEqual( test_problem.botorch_problem.bounds.dtype, torch.double ) - is_botorch = isinstance(test_problem, BoTorchTestProblem) + is_botorch = isinstance(test_problem, BoTorchTestFunction) with self.subTest(f"test `get_Y_true()`, {test_description}"): dim = 6 if is_botorch else 9 X = torch.rand(1, dim, dtype=torch.double) @@ -204,7 +204,7 @@ def test_synthetic_runner(self) -> None: nullcontext() if not isinstance(test_problem, SurrogateTestFunction) else patch.object( - # pyre-fixme: ParamBasedTestProblem` has no attribute + # pyre-fixme: BenchmarkTestFunction` has no attribute # `_surrogate`. runner.test_problem._surrogate, "predict", @@ -215,7 +215,7 @@ def test_synthetic_runner(self) -> None: oracle = runner.evaluate_oracle(parameters=params) if ( - isinstance(test_problem, BoTorchTestProblem) + isinstance(test_problem, BoTorchTestFunction) and test_problem.modified_bounds is not None ): X_tf = normalize( @@ -226,7 +226,7 @@ def test_synthetic_runner(self) -> None: ) else: X_tf = X - if isinstance(test_problem, BoTorchTestProblem): + if isinstance(test_problem, BoTorchTestFunction): botorch_problem = test_problem.botorch_problem obj = botorch_problem.evaluate_true(X_tf) if isinstance(botorch_problem, ConstrainedHartmann): @@ -261,6 +261,8 @@ def test_synthetic_runner(self) -> None: nullcontext() if not isinstance(test_problem, SurrogateTestFunction) else patch.object( + # pyre-fixme: BenchmarkTestFunction` has no attribute + # `_surrogate`. runner.test_problem._surrogate, "predict", return_value=({"branin": [4.2]}, None), @@ -298,7 +300,7 @@ def test_synthetic_runner(self) -> None: def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]: runner = BenchmarkRunner( - test_problem=BoTorchTestProblem( + test_problem=BoTorchTestFunction( botorch_problem=ConstrainedHartmann(dim=6) ), noise_std=noise_std, diff --git a/ax/benchmark/tests/runners/test_surrogate_runner.py b/ax/benchmark/tests/runners/test_surrogate_runner.py index 9ad256f7f90..6a8abdcb113 100644 --- a/ax/benchmark/tests/runners/test_surrogate_runner.py +++ b/ax/benchmark/tests/runners/test_surrogate_runner.py @@ -8,7 +8,7 @@ from unittest.mock import MagicMock, patch import torch -from ax.benchmark.runners.surrogate import SurrogateTestFunction +from ax.benchmark.benchmark_test_functions.surrogate import SurrogateTestFunction from ax.modelbridge.torch import TorchModelBridge from ax.utils.common.testutils import TestCase from ax.utils.testing.benchmark_stubs import get_soo_surrogate_test_function diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index b8f948a1607..1b3d649d184 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -14,8 +14,8 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.core.objective import MultiObjective, Objective from ax.core.optimization_config import ( MultiObjectiveOptimizationConfig, @@ -53,7 +53,7 @@ def test_inference_value_not_implemented(self) -> None: ] optimization_config = OptimizationConfig(objective=objectives[0]) runner = BenchmarkRunner( - test_problem=BoTorchTestProblem(botorch_problem=Branin()), + test_problem=BoTorchTestFunction(botorch_problem=Branin()), outcome_names=["foo"], ) with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"): @@ -214,7 +214,7 @@ def _test_constrained_from_botorch( noise_std=noise_std, ) runner = ax_problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) botorch_problem = assert_is_instance( test_problem.botorch_problem, ConstrainedBaseTestProblem ) diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 0c462ac32eb..46ce4c089a7 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -15,9 +15,9 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem -from ax.benchmark.runners.surrogate import SurrogateTestFunction +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction +from ax.benchmark.benchmark_test_functions.surrogate import SurrogateTestFunction from ax.core.experiment import Experiment from ax.core.objective import MultiObjective, Objective from ax.core.optimization_config import ( @@ -242,7 +242,7 @@ def get_aggregated_benchmark_result() -> AggregatedBenchmarkResult: @dataclass(kw_only=True) -class TestParamBasedTestProblem(ParamBasedTestProblem): +class DummyTestFunction(BenchmarkTestFunction): num_outcomes: int = 1 dim: int = 6 diff --git a/sphinx/source/benchmark.rst b/sphinx/source/benchmark.rst index 50214b268ca..3a92d5b3605 100644 --- a/sphinx/source/benchmark.rst +++ b/sphinx/source/benchmark.rst @@ -34,6 +34,13 @@ Benchmark Problem :undoc-members: :show-inheritance: +Test Function +~~~~~~~~~~~~~ + +.. automodule:: + :members: + :undoc-members: + :show-inheritance: Benchmark Result ~~~~~~~~~~~~~~~~ @@ -51,6 +58,14 @@ Benchmark :undoc-members: :show-inheritance: +Benchmark Runner +~~~~~~~~~~~~~~~~ + +.. automodule:: ax.benchmark.benchmark_runner + :members: + :undoc-members: + :show-inheritance: + Benchmark Methods Modular BoTorch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -83,14 +98,6 @@ Benchmark Problems High Dimensional Embedding :undoc-members: :show-inheritance: -Benchmark Problems Surrogate -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: ax.benchmark.problems.surrogate - :members: - :undoc-members: - :show-inheritance: - Benchmark Problems Mixed Integer Synthetic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -123,26 +130,19 @@ Benchmark Problems PyTorchCNN TorchVision :undoc-members: :show-inheritance: -Benchmark Runners Base -~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: ax.benchmark.runners.base - :members: - :undoc-members: - :show-inheritance: +Benchmark Test Functions: BoTorch Test +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Benchmark Runners BoTorch Test -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: ax.benchmark.runners.botorch_test +.. automodule:: s.botorch_test :members: :undoc-members: :show-inheritance: -Benchmark Runners Surrogate -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Benchmark Test Functions: Surrogate +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: ax.benchmark.runners.surrogate +.. automodule:: s.surrogate :members: :undoc-members: :show-inheritance: