diff --git a/adaptive/domain.py b/adaptive/domain.py new file mode 100644 index 000000000..c512382c9 --- /dev/null +++ b/adaptive/domain.py @@ -0,0 +1,741 @@ +import abc +import functools +import itertools +from collections import defaultdict + +import numpy as np +import scipy.linalg +import scipy.spatial +from sortedcontainers import SortedList + +from adaptive.learner.triangulation import Triangulation, circumsphere, point_in_simplex + +__all__ = ["Domain", "Interval", "ConvexHull"] + + +class Domain(metaclass=abc.ABCMeta): + @abc.abstractmethod + def insert_points(self, subdomain, n): + """Insert 'n' points into 'subdomain'. + + Returns + ------- + affected_subdomains : Iterable of subdomains + If some points were added on the boundary of 'subdomain' + then they will also have been added to the neighboring + subdomains. + """ + + @abc.abstractmethod + def insert(self, x): + """Insert 'x' into any subdomains to which it belongs. + + Returns + ------- + affected_subdomains : Iterable of subdomains + The subdomains to which 'x' was added. + + Raises + ------ + ValueError : if x is outside the domain or exists already + """ + + @abc.abstractmethod + def remove(self, x): + """Remove 'x' from any subdomains to which it belongs. + + Returns + ------- + affected_subdomains : Iterable of subdomains + The subdomains from which 'x' was removed. + + Raises + ------ + ValueError : if x is a subdomain vertex + ValueError : if x is not in any subdomain + """ + + @abc.abstractmethod + def split_at(self, x): + """Split the domain at 'x'. + + Removes and adds subdomains. + + Returns + ------- + old_subdomains : list of subdomains + The subdomains that were removed when splitting at 'x'. + new_subdomains : list of subdomains + The subdomains that were added when splitting at 'x'. + + Raises + ------ + ValueError : if x is outside of the domain or exists already + """ + + @abc.abstractmethod + def which_subdomains(self, x): + """Return the subdomains that contains 'x'. + + Return + ------ + subdomains : Iterable of subdomains + The subdomains to which 'x' belongs. + + Raises + ------ + ValueError : if x is outside of the domain + """ + + @abc.abstractmethod + def encloses(self, points): + """Return whether the domain encloses the points + + Parameters + ---------- + points : a point or sequence of points + + Returns + ------- + Boolean (if a single point was provided) or an array of booleans + if a sequence of points was provided) that is True when + the domain encloses the point. + """ + + @abc.abstractmethod + def vertices(self): + """Returns the vertices of the domain.""" + + @abc.abstractmethod + def neighbors(self, subdomain, n=1): + "Return all neighboring subdomains up to degree 'n'." + + @abc.abstractmethod + def subdomains(self): + "Return all the subdomains in the domain." + + @abc.abstractmethod + def subpoints(self, subdomain): + "Return all points in the interior of a subdomain." + + @abc.abstractmethod + def clear_subdomains(self): + """Remove all points from the interior of subdomains. + + Returns + ------- + subdomains : the subdomains who's interior points were removed + """ + + @abc.abstractmethod + def volume(self, subdomain): + "Return the volume of a subdomain." + + @abc.abstractmethod + def subvolumes(self, subdomain): + "Return the volumes of the sub-subdomains." + + +def _choose_point_in_subinterval(a, b): + m = a + (b - a) / 2 + if not a < m < b: + raise ValueError(f"{(a, b)} cannot be split further") + return m + + +class Interval(Domain): + """A 1D domain (an interval). + + Subdomains are pairs of floats (a, b). + """ + + def __init__(self, a, b): + if a >= b: + raise ValueError("'a' must be less than 'b'") + + # If a sub-interval contains points in its interior, they are stored + # in 'sub_intervals' in a SortedList. + self.bounds = (a, b) + self.sub_intervals = dict() + self.points = SortedList([a, b]) + self.ndim = 1 + + def insert_points(self, subdomain, n, *, _check_membership=True): + if n <= 0: + raise ValueError("n must be positive") + if _check_membership and not self.contains_subdomain(subdomain): + raise ValueError(f"{subdomain} is not present in this interval") + try: + p = self.sub_intervals[subdomain] + except KeyError: # No points yet in the interior of this subdomain + p = SortedList(subdomain) + self.sub_intervals[subdomain] = p + + # Choose new points in the centre of the largest subdomain + # of this subinterval. + points = [] + subsubdomains = SortedList(zip(p, p.islice(1)), key=self.volume) + for _ in range(n): + a, b = subsubdomains.pop() + m = _choose_point_in_subinterval(a, b) + subsubdomains.update([(a, m), (m, b)]) + points.append(m) + p.update(points) + + return points, [subdomain] + + def insert(self, x, *, _check_membership=True): + if _check_membership: + a, b = self.bounds + if not (a <= x <= b): + raise ValueError(f"{x} is outside of this interval") + + p = self.points + i = p.bisect_left(x) + if p[i] == x: + raise ValueError(f"{x} exists in this interval already") + subdomain = (a, b) = p[i - 1], p[i] + + try: + p = self.sub_intervals[subdomain] + except KeyError: + self.sub_intervals[subdomain] = SortedList([a, x, b]) + else: + if x in p: + raise ValueError(f"{x} exists in a subinterval already") + p.add(x) + + return [subdomain] + + def remove(self, x, *, _check_membership=True): + if _check_membership: + a, b = self.bounds + if not (a <= x <= b): + raise ValueError(f"{x} is outside of this interval") + + p = self.points + i = p.bisect_left(x) + if p[i] == x: + raise ValueError("Cannot remove subdomain vertices") + subdomain = (p[i - 1], p[i]) + + try: + sub_points = self.sub_intervals[subdomain] + except KeyError: + raise ValueError(f"{x} not in any subdomain") + else: + sub_points.remove(x) + if len(sub_points) == 2: + del self.sub_intervals[subdomain] + return [subdomain] + + def split_at(self, x, *, _check_membership=True): + a, b = self.bounds + if _check_membership: + if not (a <= x <= b): + raise ValueError("Can only split at points within the interval") + + p = self.points + i = p.bisect_left(x) + if p[i] == x: + raise ValueError("Cannot split at an existing point") + a, b = old_interval = p[i - 1], p[i] + new_intervals = [(a, x), (x, b)] + + p.add(x) + try: + sub_points = self.sub_intervals.pop(old_interval) + except KeyError: + pass + else: + # Update subintervals + for ival in new_intervals: + new_sub_points = SortedList(sub_points.irange(*ival)) + if x not in new_sub_points: + # This should add 'x' to the start or the end + new_sub_points.add(x) + if len(new_sub_points) > 2: + # We don't store subintervals if they don't contain + # any points in their interior. + self.sub_intervals[ival] = new_sub_points + + return [old_interval], new_intervals + + def which_subdomains(self, x): + a, b = self.bounds + if not (a <= x <= b): + raise ValueError(f"{x} is outside the interval") + p = self.points + i = p.bisect_left(x) + if p[i] != x: + # general point inside a subinterval + return [(p[i - 1], p[i])] + else: + # boundary of a subinterval + neighbors = [] + if i > 0: + neighbors.append((p[i - 1], p[i])) + if i < len(p) - 1: + neighbors.append((p[i], p[i + 1])) + return neighbors + + def contains_subdomain(self, subdomain): + a, b = subdomain + try: + ia = self.points.index(a) + ib = self.points.index(b) + except ValueError: + return False + return ia + 1 == ib + + def vertices(self): + return self.points + + def encloses(self, points): + a, b = self.bounds + points = np.asarray(points) + if points.shape == (): # single point + return a <= points <= b + else: + return np.logical_and(a <= points, points <= b) + + def neighbors(self, subdomain, n=1): + a, b = subdomain + p = self.points + ia = p.index(a) + neighbors = [] + for i in range(n): + if ia - i > 0: # left neighbor exists + neighbors.append((p[ia - i - 1], p[ia - i])) + if ia + i < len(p) - 2: # right neighbor exists + neighbors.append((p[ia + i + 1], p[ia + i + 2])) + return neighbors + + def subdomains(self): + p = self.points + return zip(p, p.islice(1)) + + def subpoints(self, subdomain, *, _check_membership=True): + if _check_membership and not self.contains_subdomain(subdomain): + raise ValueError(f"{subdomain} is not present in this interval") + try: + p = self.sub_intervals[subdomain] + except KeyError: + return [] + else: + # subinterval points contain the vertex points + return p[1:-1] + + def clear_subdomains(self): + subdomains = list(self.sub_intervals.keys()) + self.sub_intervals = dict() + return subdomains + + def volume(self, subdomain): + a, b = subdomain + return b - a + + def subvolumes(self, subdomain): + try: + p = self.sub_intervals[subdomain] + except KeyError: + return [self.volume(subdomain)] + else: + return [self.volume(s) for s in zip(p, p.islice(1))] + + +def _choose_point_in_simplex(simplex, transform=None): + """Choose a good point at which to split a simplex. + + Parameters + ---------- + simplex : (n+1, n) array + The simplex vertices + transform : (n, n) array + The linear transform to apply to the simplex vertices + before determining which point to choose. Must be + invertible. + + Returns + ------- + point : (n,) array + The point that was chosen in the simplex + """ + if transform is not None: + simplex = np.dot(simplex, transform) + + # Choose center only if the shape of the simplex is nice, + # otherwise: the center the longest edge + center, _radius = circumsphere(simplex) + if point_in_simplex(center, simplex): + point = np.average(simplex, axis=0) + else: + distances = scipy.spatial.distance.pdist(simplex) + distance_matrix = scipy.spatial.distance.squareform(distances) + i, j = np.unravel_index(np.argmax(distance_matrix), distance_matrix.shape) + point = (simplex[i, :] + simplex[j, :]) / 2 + + if transform is not None: + point = np.linalg.solve(transform, point) # undo the transform + + return tuple(point) + + +def _simplex_facets(ndim): + """Return the facets of a simplex in 'ndim' dimensions + + Parameters + ---------- + ndim : positive int + + Returns + ------- + facets : Iterable of integer tuples + Contains 'ndim + 1' tuples, and each tuple contains + 'ndim' integers. + + Examples + -------- + In 2D a simplex is a triangle (3 points) and the facets are the lines + joining these points: + + >>> list(_simplex_facets(2)) + [(0, 1), (0, 2), (1, 2)] + """ + return itertools.combinations(range(ndim + 1), ndim) + + +def _boundary_equations(simplex): + """Return the set of equations defining the boundary of a simplex + + Parameters + ---------- + simplex : (N + 1, N) float array-like + The vertices of an N-dimensional simplex. + + Returns + ------- + A : (N + 1, N) float array + Each row is a normal vector to a facet of 'simplex'. + The facets are in the same order as returned by + '_simplex_facets(N)'. + b : (N + 1,) float array + Each element is the offset from the origin of the + corresponding facet of 'simplex' + + Notes + ----- + + This is slower than using scipy.spatial.ConvexHull, however the ordering + of the equations as returned by scipy.spatial.ConvexHull is not clear. + + Care is not taken to orient the facets to point out of the simplex; the + equations should only be used for verifying if a point lies on a boundary, + rather than if it lies inside the simplex. + + Examples + -------- + >>> simplex = [(0, 0), (1, 0), (0, 1)] + >>> A, b = _boundary_equations(simplex) + >>> x = [0.5, 0] + >>> which_boundary = np.isclose(A @ x + b, 0) + >>> # facet #0 is the line between (0, 0) and (1, 0) + >>> assert which_boundary[0] == True + """ + points = np.asarray(simplex) + ndim = points.shape[1] + assert points.shape == (ndim + 1, ndim) + A = np.empty((ndim + 1, ndim), dtype=float) + b = np.empty((ndim + 1), dtype=float) + for i, (x0, *v) in enumerate(_simplex_facets(ndim)): + facet_tangent_space = points[list(v)] - points[x0] + facet_normal = scipy.linalg.null_space(facet_tangent_space).squeeze() + A[i, :] = facet_normal + b[i] = -np.dot(points[x0], facet_normal) + return A, b + + +def _on_which_boundary(equations, x, eps=1e-8): + """Returns the simplex boundary on which 'x' is found. + + Parameters + ---------- + equations : the output of _boundary_equations + The equations defining a simplex in 'N' dimensions + x : (N,) float array-like + + Returns + ------- + None if 'x' is not on a simplex boundary. + Otherwise, returns a tuple containing integers defining + the boundary on which 'x' is found. + + Examples + -------- + >>> simplex = [(0., 0.), (2., 0.), (0., 4.)] + >>> eq = _boundary_equations(simplex) + >>> x = [0.5, 0.] + >>> _on_which_boundary(eq, x) == (0, 1) + >>> x = [2., 0.] + >>> _on_which_boundary(eq, x) == (1,) + """ + ndim = len(x) + A, b = equations + assert len(b) == ndim + 1 + on_boundary = np.isclose(A @ x + b, 0, atol=1e-8) + if not any(on_boundary): + return None + # The point is on the boundary of all the following facets + facets = [facet for i, facet in enumerate(_simplex_facets(ndim)) if on_boundary[i]] + # If the point is on the boundary of more than 1 facet, then it is on a lower-dimension facet. + boundary_facet = set.intersection(*map(set, facets)) + return tuple(sorted(boundary_facet)) + + +def _make_new_subtriangulation(points): + points = np.asarray(points) + ndim = points.shape[1] + boundary_points = points[: ndim + 1] + # _check_vertices=False to speed up the initial triangulation + subtri = Triangulation(points, _check_vertices=False) + subtri.on_which_boundary = functools.partial( + _on_which_boundary, _boundary_equations(boundary_points) + ) + return subtri + + +class ConvexHull(Domain): + """A convex hull domain in $ℝ^N$ (N >=2). + + Subdomains are simplices represented by integer tuples of length (N + 1). + """ + + def __init__(self, points): + hull = scipy.spatial.ConvexHull(points) + + self.bounds = hull + self.triangulation = Triangulation(hull.points[hull.vertices]) + # if a subdomain has interior points, then it appears as a key + # in 'sub_triangulations' and maps to a 'Triangulation' of the + # interior of the subdomain. By definition the triangulation + # is over a simplex, and the first 'ndim + 1' points in the + # triangulation are the boundary points. + self.sub_triangulations = dict() + self.ndim = self.bounds.points.shape[1] + + # As an optimization we store any points inserted with 'insert_points' + # and 'insert' and point to the subdomains to which they belong. This + # allows 'which_subdomains' and 'split_at' to work faster when given points + # that were previously added with 'insert' or 'insert_points' + self.subpoints_to_subdomains = defaultdict(set) + + @property + def bounding_box(self): + hull_points = self.bounds.points[self.bounds.vertices] + return tuple(zip(hull_points.min(axis=0), hull_points.max(axis=0))) + + def _get_subtriangulation(self, subdomain): + try: + subtri = self.sub_triangulations[subdomain] + except KeyError: # No points in the interior of this subdomain yet + points = [self.triangulation.vertices[x] for x in subdomain] + subtri = _make_new_subtriangulation(points) + self.sub_triangulations[subdomain] = subtri + return subtri + + def insert_points(self, subdomain, n, *, _check_membership=True): + if n <= 0: + raise ValueError("n must be positive") + tri = self.triangulation + if _check_membership and subdomain not in tri.simplices: + raise ValueError(f"{subdomain} is not present in this domain") + + subtri = self._get_subtriangulation(subdomain) + + # Choose the largest volume sub-simplex and insert a point into it. + # Also insert the point into neighboring subdomains if it was chosen + # on the subdomain boundary. + points = [] + affected_subdomains = {subdomain} + for _ in range(n): + # O(N) in the number of sub-simplices, but typically we only have a few + largest_simplex = max(subtri.simplices, key=subtri.volume) + simplex_vertices = np.array([subtri.vertices[s] for s in largest_simplex]) + point = _choose_point_in_simplex(simplex_vertices) + points.append(point) + subtri.add_point(point, largest_simplex) + self.subpoints_to_subdomains[point].add(subdomain) + # If the point was added to a boundary of the subdomain we should + # add it to the neighboring subdomains. If we do not do this, then + # if 'insert_points' is called for the neighboring subdomains, it is + # possible that 'point' may be returned, which is inconsistent. + boundary = subtri.on_which_boundary(point) + if boundary is not None: + # Convert subtriangulation indices to triangulation indices + boundary = tuple(sorted(subdomain[i] for i in boundary)) + neighbors = set(tri.containing(boundary)) + neighbors.remove(subdomain) + for sd in neighbors: + self._get_subtriangulation(sd).add_point(point) + affected_subdomains.update(neighbors) + self.subpoints_to_subdomains[point].update(neighbors) + + return [tuple(p) for p in points], affected_subdomains + + def insert(self, x, *, _check_membership=True): + x = tuple(x) + # XXX: O(N) in the number of simplices + affected_subdomains = self.which_subdomains(x) + if not affected_subdomains: + raise ValueError(f"{x} is not present in this domain") + for subdomain in affected_subdomains: + subtri = self._get_subtriangulation(subdomain) + if x in subtri.vertices: # O(N) in the number of vertices + raise ValueError(f"{x} exists in a subinterval already") + subtri.add_point(x) + self.subpoints_to_subdomains[x].update(affected_subdomains) + + return affected_subdomains + + def remove(self, x): + x = tuple(x) + try: + affected_subdomains = self.subpoints_to_subdomains.pop(x) + except KeyError: + raise ValueError("Can only remove points inside subdomains") + for subdomain in affected_subdomains: + # Check that it's not a vertex of the subdomain + subtri = self.sub_triangulations[subdomain] + assert x in subtri.vertices + points = [v for v in subtri.vertices if v != x] + if len(points) == self.ndim + 1: + # No more points inside the subdomain + del self.sub_triangulations[subdomain] + else: + # Rebuild the subtriangulation from scratch + self.sub_triangulations[subdomain] = _make_new_subtriangulation(points) + + return affected_subdomains + + def split_at(self, x, *, _check_membership=True): + x = tuple(x) + tri = self.triangulation + try: + containing_subdomains = self.subpoints_to_subdomains.pop(x) + # Only need a single subdomaing 'x' to make 'tri.add_point' fast. + subdomain = next(iter(containing_subdomains)) + except KeyError: + # XXX: O(N) in the number of simplices. + subdomain = tri.locate_point(x) + if not subdomain: + raise ValueError("Can only split at points within the domain.") + + old_subdomains, new_subdomains = tri.add_point(x, subdomain) + + if _check_membership: + assert not any(s in self.sub_triangulations for s in new_subdomains) + + # Re-assign all the interior points of 'old_subdomains' to 'new_subdomains' + + # Keep the interior points as a set, because interior points on a shared face + # appear in the subtriangulations of both the neighboring simplices, and we + # don't want those points to appear twice. + interior_points = set() + for d in old_subdomains: + try: + subtri = self.sub_triangulations.pop(d) + except KeyError: + continue + else: + # Get all points in the subtriangulation except the boundary + # points. Because a subtriangulation is always defined over + # a simplex, the first ndim + 1 points are the boundary points. + interior = [v for v in subtri.vertices[self.ndim + 1 :] if v != x] + for v in interior: + s = self.subpoints_to_subdomains[v] + s.remove(d) + if not s: + del self.subpoints_to_subdomains[v] + interior_points.update(interior) + for p in interior_points: + # Try to add 'p' to all the new subdomains. It may belong to more than 1 + # if it lies on a subdomain boundary. + p_was_added = False + for subdomain in new_subdomains: + if tri.point_in_simplex(p, subdomain): + subtri = self._get_subtriangulation(subdomain) + subtri.add_point(p) + self.subpoints_to_subdomains[p].add(subdomain) + p_was_added = True + assert p_was_added, f"{x} was not in the interior of any new simplices" + + return old_subdomains, new_subdomains + + def which_subdomains(self, x): + x = tuple(x) + tri = self.triangulation + if x in self.subpoints_to_subdomains: + subdomains = self.subpoints_to_subdomains[x] + else: + # XXX: O(N) in the number of simplices + subdomains = [s for s in tri.simplices if tri.point_in_simplex(x, s)] + if not subdomains: + raise ValueError(f"{x} is not in the domain") + return list(subdomains) + + def contains_subdomain(self, subdomain): + return subdomain in self.triangulation.simplices + + def transform(self, x): + # XXX: implement this + raise NotImplementedError() + + def neighbors(self, subdomain, n=1): + tri = self.triangulation + neighbors = {subdomain} + for _ in range(n): + for face in list(tri.faces(simplices=neighbors)): + neighbors.update(tri.containing(face)) + neighbors.remove(subdomain) + return neighbors + + def subdomains(self): + return self.triangulation.simplices + + def encloses(self, points): + points = np.asarray(points).T + A, b = self.bounds.equations[:, :-1], self.bounds.equations[:, -1:] + if len(points.shape) == 1: + points = points[:, None] + return np.all(A @ points + b <= 0, axis=0) + + def vertices(self): + return self.triangulation.vertices + + def subpoints(self, subdomain, *, _check_membership=True): + if _check_membership and not self.contains_subdomain(subdomain): + raise ValueError(f"{subdomain} is not present in this domain") + try: + subtri = self.sub_triangulations[subdomain] + except KeyError: + return [] + else: + # Subtriangulations are, by definition, over simplices. This means + # that the first ndim + 1 points are the simplex vertices, which we skip + return subtri.vertices[self.ndim + 1 :] + + def clear_subdomains(self): + sub_triangulations = list(self.sub_triangulations.keys()) + self.sub_triangulations = dict() + return sub_triangulations + + def volume(self, subdomain): + return self.triangulation.volume(subdomain) + + def subvolumes(self, subdomain): + try: + subtri = self.sub_triangulations[subdomain] + except KeyError: + return [self.triangulation.volume(subdomain)] + else: + return [subtri.volume(s) for s in subtri.simplices] diff --git a/adaptive/learner/new_learnerND.py b/adaptive/learner/new_learnerND.py new file mode 100644 index 000000000..6237a717c --- /dev/null +++ b/adaptive/learner/new_learnerND.py @@ -0,0 +1,521 @@ +import abc +import itertools +from collections.abc import Iterable +import math + +import numpy as np +import scipy.spatial +import scipy.interpolate + +from adaptive.learner.base_learner import BaseLearner +from adaptive.learner.triangulation import simplex_volume_in_embedding +from adaptive.notebook_integration import ensure_holoviews +from adaptive.priority_queue import Queue +from adaptive.domain import Interval, ConvexHull + + +class LossFunction(metaclass=abc.ABCMeta): + @abc.abstractproperty + def n_neighbors(self): + "The maximum degree of neighboring subdomains required." + + @abc.abstractmethod + def __call__(self, domain, subdomain, codomain_bounds, data): + """Return the loss for 'subdomain' given 'data' + + Neighboring subdomains can be obtained with + 'domain.neighbors(subdomain, self.n_neighbors)'. + """ + + +class DistanceLoss(LossFunction): + @property + def n_neighbors(self): + return 0 + + def __call__(self, domain, subdomain, codomain_bounds, data): + assert isinstance(domain, Interval) + a, b = subdomain + ya, yb = data[a], data[b] + return math.sqrt((b - a) ** 2 + (yb - ya) ** 2) + + +class EmbeddedVolumeLoss(LossFunction): + @property + def n_neighbors(self): + return 0 + + def __call__(self, domain, subdomain, codomain_bounds, data): + assert isinstance(domain, ConvexHull) + xs = [tuple(domain.triangulation.vertices[x]) for x in subdomain] + ys = [data[x] for x in xs] + if isinstance(ys[0], Iterable): + pts = [(*x, *y) for x, y in zip(xs, ys)] + else: + pts = [(*x, y) for x, y in zip(xs, ys)] + return simplex_volume_in_embedding(pts) + + +class TriangleLoss(LossFunction): + @property + def n_neighbors(self): + return 1 + + def __call__(self, domain, subdomain, codomain_bounds, data): + assert isinstance(domain, ConvexHull) + neighbors = domain.neighbors(subdomain, self.n_neighbors) + if not neighbors: + return 0 + neighbor_points = set.union(*(set(n) - set(subdomain) for n in neighbors)) + + neighbor_points = [domain.triangulation.vertices[p] for p in neighbor_points] + + simplex = [domain.triangulation.vertices[p] for p in subdomain] + + z = data[simplex[0]] + if isinstance(z, Iterable): + s = [(*x, *data[x]) for x in simplex] + n = [(*x, *data[x]) for x in neighbor_points] + else: + s = [(*x, data[x]) for x in simplex] + n = [(*x, data[x]) for x in neighbor_points] + + return sum(simplex_volume_in_embedding([*s, neigh]) for neigh in n) / len( + neighbors + ) + + +class CurvatureLoss(LossFunction): + def __init__(self, exploration=0.05): + self.exploration = exploration + + @property + def n_neighbors(self): + return 1 + + def __call__(self, domain, subdomain, codomain_bounds, data): + dim = domain.ndim + + loss_input_volume = domain.volume(subdomain) + triangle_loss = TriangleLoss() + + loss_curvature = triangle_loss(domain, subdomain, codomain_bounds, data) + return ( + loss_curvature + self.exploration * loss_input_volume ** ((2 + dim) / dim) + ) ** (1 / (2 + dim)) + + +class LearnerND(BaseLearner): + """Learns a function 'f: ℝ^N → ℝ^m'. + + Parameters + --------- + f : callable + The function to learn. Must take a tuple of N real parameters and return a real + number or an arraylike of length M. + bounds : list of 2-tuples or `scipy.spatial.ConvexHull` + A list ``[(a_1, b_1), (a_2, b_2), ..., (a_N, b_N)]`` describing a bounding box + in N dimensions, or a convex hull that defines the boundary of the domain. + loss : callable, optional + An instance of a subclass of `LossFunction` that describes the loss + of a subdomain. + """ + + def __init__(self, f, bounds, loss=None): + + if len(bounds) == 1: + (a, b), = (boundary_points,) = bounds + self.domain = Interval(a, b) + self.loss_function = loss or DistanceLoss() + self.ndim = 1 + else: + if isinstance(bounds, scipy.spatial.ConvexHull): + boundary_points = bounds.points[bounds.vertices] + else: + boundary_points = sorted(tuple(p) for p in itertools.product(*bounds)) + self.domain = ConvexHull(boundary_points) + self.loss_function = loss or EmbeddedVolumeLoss() + self.ndim = len(boundary_points[0]) + + self.boundary_points = boundary_points + self.data = dict() # Contains the evaluated data only + self.pending_points = set() + self.function = f + + # The loss function may depend on the "scale" (i.e. the difference between + # the maximum and the minimum) of the function values, in addition to the + # function values themselves. In order to take into account this "global" + # information we recompute the losses for all subdomains when the scale + # changes by more than this factor from the last time we recomputed all + # the losses. + self._recompute_losses_factor = 1.1 + + # As an optimization we keep a map from subdomain to loss. + # This is updated in 'self.priority' whenever the loss function is evaluated + # for a new subdomain. 'self.tell_many' removes subdomains from here when + # they are split, and also removes neighboring subdomains from here (to force + # a loss function recomputation). + self.losses = dict() + + # We must wait until the boundary points have been evaluated before we can + # set these attributes. + self._initialized = False + # The dimension of the output space. + self.vdim = None + # The maximum and minimum values of 'f' seen thus far. + self.codomain_bounds = None + # The difference between the maximum and minimum of 'f' at the last + # time all the losses were recomputed. + self.codomain_scale_at_last_update = None + + # A priority queue of subdomains, which is used to determine where to add + # points. + self.queue = Queue() + for subdomain in self.domain.subdomains(): + self.queue.insert(subdomain, priority=self.priority(subdomain)) + + def _finalize_initialization(self): + assert all(x in self.data for x in self.boundary_points) + + self._initialized = True + + vals = list(self.data.values()) + codomain_min = np.min(vals, axis=0) + codomain_max = np.max(vals, axis=0) + self.codomain_bounds = (codomain_min, codomain_max) + self.codomain_scale_at_last_update = codomain_max - codomain_min + + try: + self.vdim = len(np.squeeze(self.data[self.boundary_points[0]])) + except TypeError: # Trying to take the length of a number + self.vdim = 1 + + # Generate new subdomains using any evaluated points, skipping the boundary + # points (these are already vertices in the domain) and discarding any points + # that are outside the domain. + xs = list(x for x in self.data.keys() if x not in self.boundary_points) + if xs: + xs = np.array(xs) + xs = xs[self.domain.encloses(xs)] + for x in xs: + self.domain.split_at(x) + + # Recompute all the losses from scratch + self.losses = dict() + self.queue = Queue( + (subdomain, self.priority(subdomain)) + for subdomain in self.domain.subdomains() + ) + + @property + def npoints(self): + return len(self.data) + + def priority(self, subdomain): + # Compute the loss of 'subdomain' + if self._initialized: + if subdomain in self.losses: + L_0 = self.losses[subdomain] + else: + L_0 = self.loss_function( + self.domain, subdomain, self.codomain_bounds, self.data + ) + self.losses[subdomain] = L_0 + else: + # Before we have all the boundary points we can't calculate losses because we + # do not have enough data. We just assign the subdomain volume as the loss. + L_0 = self.domain.volume(subdomain) + + # Scale the subdomain loss by the maximum relative volume of its own subdomains + # (those formed of pending points within the subdomain). If there are no pending + # points in the subdomain then the scaling is 1 and the priority is just the loss. + subvolumes = self.domain.subvolumes(subdomain) + return (max(subvolumes) / sum(subvolumes)) * L_0 + + def ask(self, n, tell_pending=True): + if self._initialized: + points, point_priorities = self._ask(n, tell_pending) + else: + # Give priority to boundary points, but don't include points that + # we have data for or have already asked for. + points = [ + x + for x in self.boundary_points + if x not in self.data and x not in self.pending_points + ] + # Make sure we don't give more points than asked for + points = points[:n] + # Infinite priority so that the boundary points are prioritized + point_priorities = [math.inf] * len(points) + if tell_pending: + for x in points: + self.pending_points.add(x) + n_extra = n - len(points) + if n_extra > 0: + extra_points, extra_point_priorities = self._ask(n_extra, tell_pending) + points += tuple(extra_points) + point_priorities += tuple(extra_point_priorities) + + return points, point_priorities + + def _ask(self, n, tell_pending): + new_points = [] + point_priorities = [] + # Insert a point into the subdomain at the front of the queue, and update the + # priorities of that subdomain and any neighbors (if the point was added on + # a subdomain boundary). + for _ in range(n): + subdomain, _ = self.queue.peek() + (new_point,), affected_subdomains = self.domain.insert_points(subdomain, 1) + self.pending_points.add(new_point) + for subdomain in affected_subdomains: + self.queue.update(subdomain, priority=self.priority(subdomain)) + new_points.append(new_point) + # TODO: don't call 'priority' again here: we already called it above, we just + # need to identify 'subdomin' within 'affected_subdomains'. Maybe change + # the API of 'Domain.insert_points' to not return 'subdomain'... + point_priorities.append(self.priority(subdomain)) + + # Remove all the points we just added and update the priorities of any subdomains + # we touched. + if not tell_pending: + affected_subdomains = set() + for point in new_points: + self.pending_points.remove(point) + sd = self.domain.remove(point) + affected_subdomains.update(sd) + for subdomain in affected_subdomains: + self.queue.update(subdomain, priority=self.priority(subdomain)) + + return new_points, point_priorities + + def tell_pending(self, x): + if x in self.data: + raise ValueError("Data already present for point {}".format(x)) + + self.pending_points.add(x) + + # We cannot 'insert' a boundary point into the domain because it already + # exists as a vertex. This does not affect the queue ordering. + if not self._initialized and x in self.boundary_points: + return + + affected_subdomains = self.domain.insert(x) + for subdomain in affected_subdomains: + self.queue.update(subdomain, priority=self.priority(subdomain)) + + def tell_many(self, xs, ys): + # Filter out points that are already present + if all(x in self.data for x in xs): + return + xs, ys = zip(*((x, y) for x, y in zip(xs, ys) if x not in self.data)) + + self.data.update(zip(xs, ys)) + self.pending_points -= set(xs) + + if not self._initialized: + if all(x in self.data for x in self.boundary_points): + self._finalize_initialization() + return + + # Filter out any points that are outside the domain. These still appear in + # 'self.data', but they are not added to the domain, and so have no effect + # on the learning. + are_inside = self.domain.encloses(xs) + if not np.any(are_inside): + return + xs, ys = zip(*((x, y) for x, y, inside in zip(xs, ys, are_inside) if inside)) + + need_loss_update = self._update_codomain_bounds(ys) + + to_remove = set() + to_add = set() + for x in xs: + old_subdomains, new_subdomains = map(set, self.domain.split_at(x)) + # Subdomains that were added in a prior iteration of this loop, + # but which have now been removed to make way for others. + temp_subdomains = to_add.intersection(old_subdomains) + # We no longer want to add subdomains that have now been removed, + # and we want to add the new subdomains. + to_add -= temp_subdomains + to_add.update(new_subdomains) + # We do not want to remove subdomains that were produced on a + # prior iteration of this loop, as these will not be in the queue. + to_remove.update(old_subdomains - temp_subdomains) + + for subdomain in to_remove: + self.queue.remove(subdomain) + del self.losses[subdomain] + + if need_loss_update: + self.queue = Queue( + (subdomain, self.priority(subdomain)) + for subdomain in itertools.chain(self.queue.items(), to_add) + ) + else: + # Insert the newly created subdomains into the queue. + for subdomain in to_add: + self.queue.insert(subdomain, priority=self.priority(subdomain)) + + # If the loss function depends on data in neighboring subdomains then + # we must recompute the priorities of all neighboring subdomains of + # the subdomains we just added. + if self.loss_function.n_neighbors > 0: + subdomains_to_update = set() + for subdomain in to_add: + subdomains_to_update.update( + self.domain.neighbors(subdomain, self.loss_function.n_neighbors) + ) + subdomains_to_update -= to_add + for subdomain in subdomains_to_update: + # We have more data, so we must force a loss recomputation by + # removing the subdomain from the loss cache. + del self.losses[subdomain] + self.queue.update(subdomain, priority=self.priority(subdomain)) + + def _update_codomain_bounds(self, ys): + # Update the codomain bounds: the minimum and the maximum values that the + # learner has seen thus far. + mn, mx = self.codomain_bounds + if self.vdim == 1: + mn = min(mn, *ys) + mx = max(mx, *ys) + else: + mn = np.min(np.vstack([mn, ys]), axis=0) + mx = np.max(np.vstack([mx, ys]), axis=0) + self.codomain_bounds = (mn, mx) + + scale = mx - mn + # How much has the scale of the outputs changed since the last time + # we recomputed the losses? + if np.any(self.codomain_scale_at_last_update == 0): + scale_factor = math.inf + else: + scale_factor = scale / self.codomain_scale_at_last_update + + # We need to recompute all losses if the scale has increased by more + # than a certain factor since the last time we recomputed all the losses + if self.vdim == 1: + need_loss_update = scale_factor > self._recompute_losses_factor + else: + need_loss_update = np.any(scale_factor > self._recompute_losses_factor) + + if need_loss_update: + self.codomain_scale_at_last_update = scale + return True + else: + return False + + def remove_unfinished(self): + self.pending_points = set() + cleared_subdomains = self.domain.clear_subdomains() + # Subdomains that had points removed need their priority updating + for subdomain in cleared_subdomains: + self.queue.update(subdomain, priority=self.priority(subdomain)) + + def loss(self, real=True): + if real: + if not self.losses: + return math.inf + # NOTE: O(N) in the number of subintervals, but with a low prefactor. + # We have to do this because the queue is sorted in *priority* + # order, and it's possible that a subinterval with a high loss + # may have a low priority (if it has many pending points). + return max(self.losses.values()) + else: + # The 'not real' loss (which takes pending points into account) is + # just the priority in the subdomain queue. + _, priority = self.queue.peek() + return priority + + def plot(self, **kwargs): + if isinstance(self.domain, Interval): + return self._plot_1d(**kwargs) + elif isinstance(self.domain, ConvexHull): + return self._plot_nd(**kwargs) + + def _plot_nd(self, n=None, tri_alpha=0): + # XXX: Copied from LearnerND. At the moment we reach deep into internal + # datastructures of self.domain. We should see what data we need and + # add APIs to 'Domain' to support this. + hv = ensure_holoviews() + if self.vdim > 1: + raise NotImplementedError( + "holoviews currently does not support", "3D surface plots in bokeh." + ) + if self.ndim != 2: + raise NotImplementedError( + "Only 2D plots are implemented: You can " + "plot a 2D slice with 'plot_slice'." + ) + x, y = self.domain.bounding_box + lbrt = x[0], y[0], x[1], y[1] + + if len(self.data) >= 4: + if n is None: + # Calculate how many grid points are needed. + # factor from A=√3/4 * a² (equilateral triangle) + scale_factor = 1 # np.product(np.diag(self._transform)) + min_volume = min(map(self.domain.volume, self.domain.subdomains())) + a_sq = np.sqrt(scale_factor * min_volume) + n = max(10, int(0.658 / a_sq)) + + xs = ys = np.linspace(0, 1, n) + xs = xs * (x[1] - x[0]) + x[0] + ys = ys * (y[1] - y[0]) + y[0] + keys = np.array(list(self.data.keys())) + values = np.array(list(self.data.values())) + interpolator = scipy.interpolate.LinearNDInterpolator(keys, values) + z = interpolator(xs[:, None], ys[None, :]).squeeze() + + im = hv.Image(np.rot90(z), bounds=lbrt) + + if tri_alpha: + points = np.array( + [ + [self.domain.triangulation.vertices[i] for i in s] + for s in self.domain.subdomains() + ] + ) + points = np.pad( + points[:, [0, 1, 2, 0], :], + pad_width=((0, 0), (0, 1), (0, 0)), + mode="constant", + constant_values=np.nan, + ).reshape(-1, 2) + tris = hv.EdgePaths([points]) + else: + tris = hv.EdgePaths([]) + else: + im = hv.Image([], bounds=lbrt) + tris = hv.EdgePaths([]) + + im_opts = dict(cmap="viridis") + tri_opts = dict(line_width=0.5, alpha=tri_alpha) + no_hover = dict(plot=dict(inspection_policy=None, tools=[])) + + return im.opts(style=im_opts) * tris.opts(style=tri_opts, **no_hover) + + def _plot_1d(self): + assert isinstance(self.domain, Interval) + hv = ensure_holoviews() + + xs, ys = zip(*sorted(self.data.items())) if self.data else ([], []) + if self.vdim == 1: + p = hv.Path([]) * hv.Scatter((xs, ys)) + else: + p = hv.Path((xs, ys)) * hv.Scatter([]) + + # Plot with 5% empty margins such that the boundary points are visible + a, b = self.domain.bounds + margin = 0.05 * (b - a) + plot_bounds = (a - margin, b + margin) + + return p.redim(x=dict(range=plot_bounds)) + + def _get_data(self): + return self.data + + def _set_data(self, data): + if data: + self.tell_many(*zip(*data.items())) diff --git a/adaptive/learner/triangulation.py b/adaptive/learner/triangulation.py index bb2482ce6..a54eb1571 100644 --- a/adaptive/learner/triangulation.py +++ b/adaptive/learner/triangulation.py @@ -266,7 +266,7 @@ class Triangulation: or more simplices in the """ - def __init__(self, coords): + def __init__(self, coords, *, _check_vertices=True): if not is_iterable_and_sized(coords): raise TypeError("Please provide a 2-dimensional list of points") coords = list(coords) @@ -287,23 +287,28 @@ def __init__(self, coords): raise ValueError("Please provide at least one simplex") coords = list(map(tuple, coords)) - vectors = np.subtract(coords[1:], coords[0]) - if np.linalg.matrix_rank(vectors) < dim: - raise ValueError( - "Initial simplex has zero volumes " - "(the points are linearly dependent)" - ) + if _check_vertices: + vectors = np.subtract(coords[1:], coords[0]) + if np.linalg.matrix_rank(vectors) < dim: + raise ValueError( + "Initial simplex has zero volumes " + "(the points are linearly dependent)" + ) self.vertices = list(coords) self.simplices = set() # initialise empty set for each vertex self.vertex_to_simplices = [set() for _ in coords] - # find a Delaunay triangulation to start with, then we will throw it - # away and continue with our own algorithm - initial_tri = scipy.spatial.Delaunay(coords) - for simplex in initial_tri.simplices: - self.add_simplex(simplex) + if len(coords) == dim + 1: + # There is just a single simplex + self.add_simplex(tuple(range(dim + 1))) + else: + # find a Delaunay triangulation to start with, then we will throw it + # away and continue with our own algorithm + initial_tri = scipy.spatial.Delaunay(coords) + for simplex in initial_tri.simplices: + self.add_simplex(simplex) def delete_simplex(self, simplex): simplex = tuple(sorted(simplex)) diff --git a/adaptive/priority_queue.py b/adaptive/priority_queue.py new file mode 100644 index 000000000..2509b3cac --- /dev/null +++ b/adaptive/priority_queue.py @@ -0,0 +1,115 @@ +from sortedcontainers import SortedDict, SortedList + +__all__ = ["Empty", "Queue"] + + +class Empty(KeyError): + pass + + +class Queue: + """Priority queue supporting update and removal at arbitrary position. + + Parameters + ---------- + entries : iterable of (item, priority) + The initial data in the queue. Providing this is faster than + calling 'insert' a bunch of times. + """ + + def __init__(self, entries=()): + self._queue = SortedDict( + ((priority, -n), item) for n, (item, priority) in enumerate(entries) + ) + # 'self._queue' cannot be keyed only on priority, as there may be several + # items that have the same priority. To keep unique elements the key + # will be '(priority, self._n)', where 'self._n' is decremented whenever + # we add a new element. 'self._n' is negative so that elements with equal + # priority are sorted by insertion order. + self._n = -len(self._queue) + # To efficiently support updating and removing items if their priority + # is unknown we have to keep the reverse map of 'self._queue'. Because + # items may not be hashable we cannot use a SortedDict, so we use a + # SortedList storing '(item, key)'. + self._items = SortedList(((v, k) for k, v in self._queue.items())) + + def __len__(self): + return len(self._queue) + + def items(self): + "Return an iterator over the items in the queue in priority order." + return reversed(self._queue.values()) + + def peek(self): + """Return the item and priority at the front of the queue. + + Raises + ------ + Empty : if the queue is empty + """ + self._check_nonempty() + ((priority, _), item) = self._queue.peekitem() + return item, priority + + def pop(self): + """Remove and return the item and priority at the front of the queue. + + Raises + ------ + Empty : if the queue is empty + """ + self._check_nonempty() + (key, item) = self._queue.popitem() + i = self._items.index((item, key)) + del self._items[i] + priority, _ = key + return item, priority + + def insert(self, item, priority): + "Insert 'item' into the queue with the given priority." + key = (priority, self._n) + self._items.add((item, key)) + self._queue[key] = item + self._n -= 1 + + def _check_nonempty(self): + if not self._queue: + raise Empty() + + def _find_first(self, item): + self._check_nonempty() + i = self._items.bisect_left((item, ())) + try: + should_be, key = self._items[i] + except IndexError: + raise KeyError("item is not in queue") from None + if item != should_be: + raise KeyError("item is not in queue") + return i, key + + def remove(self, item): + """Remove the 'item' from the queue. + + Raises + ------ + KeyError : if 'item' is not in the queue. + """ + i, key = self._find_first(item) + del self._queue[key] + del self._items[i] + + def update(self, item, priority): + """Update 'item' in the queue to have the given priority. + + Raises + ------ + KeyError : if 'item' is not in the queue. + """ + i, key = self._find_first(item) + _, n = key + new_key = (priority, n) + + del self._queue[key] + del self._items[i] + self._queue[new_key] = item + self._items.add((item, new_key)) diff --git a/adaptive/tests/domain_utils.py b/adaptive/tests/domain_utils.py new file mode 100644 index 000000000..0e472d406 --- /dev/null +++ b/adaptive/tests/domain_utils.py @@ -0,0 +1,151 @@ +import itertools + +import numpy as np + +import hypothesis.strategies as st +from adaptive.learner.new_learnerND import ConvexHull, Interval + +# This module contains utilities for producing domains and points inside and outside of them. +# Because we typically do not want to test very degenerate cases (e.g. points that are almost +# coincident, very large or very small) we prefer generating points in the interval [0, 1) +# using numpy.random, rather than drawing from Hypothesis' "floats" strategy. + + +# Return an iterator that yields matrices reflecting in the cartesian +# coordinate axes in 'ndim' dimensions. +def reflections(ndim): + return map(np.diag, itertools.product([1, -1], repeat=ndim)) + + +def point_inside_simplex(simplex): + simplex = np.asarray(simplex) + dim = simplex.shape[1] + # Generate a point in the unit simplex. + # https://cs.stackexchange.com/questions/3227/uniform-sampling-from-a-simplex + # We avoid using Hypothesis to generate the points as it typically chooses + # very annoying points, which we want to avoid testing for now. + xb = np.random.rand(dim) + xb = np.array(sorted(xb)) + xb[1:] = xb[1:] - xb[:-1] + # Transform into the simplex we need + v0, vecs = simplex[0], simplex[1:] - simplex[0] + x = tuple(v0 + (vecs.T @ xb)) + return x + + +@st.composite +def points_inside(draw, domain, n): + # Set the numpy random seed + draw(st.random_module()) + if isinstance(domain, Interval): + a, b = domain.bounds + return a + (b - a) * np.random.rand(n) + else: + assert isinstance(domain, ConvexHull) + tri = domain.triangulation + simplices = list(tri.simplices) + simplex = st.sampled_from(simplices).map( + lambda simplex: [tri.vertices[s] for s in simplex] + ) + # "point_inside_simplex" uses the numpy RNG, and we set the seed above. + # Together this means we're almost guaranteed not to get coinciding points. + # Note that we draw from the 'simplex' strategy on each iteration, so we + # distribute the points between the different simplices in the domain. + return [tuple(point_inside_simplex(draw(simplex))) for _ in range(n)] + + +@st.composite +def point_inside(draw, domain): + return draw(points_inside(domain, 1))[0] + + +@st.composite +def a_few_points_inside(draw, domain): + n = draw(st.integers(3, 20)) + return draw(points_inside(domain, n)) + + +@st.composite +def points_outside(draw, domain, n): + # set numpy random seed + draw(st.random_module()) + + if isinstance(domain, Interval): + a, b = domain.bounds + ndim = 1 + else: + assert isinstance(domain, ConvexHull) + hull = domain.bounds + points = hull.points[hull.vertices] + ndim = points.shape[1] + a, b = points.min(axis=0)[None, :], points.max(axis=0)[None, :] + + # Generate a point outside the bounding box of the domain. + center = (a + b) / 2 + border = (b - a) / 2 + r = border + 10 * border * np.random.rand(n, ndim) + quadrant = np.sign(np.random.rand(n, ndim) - 0.5) + assert not np.any(quadrant == 0) + return center + quadrant * r + + +@st.composite +def point_outside(draw, domain): + return draw(points_outside(domain, 1))[0] + + +@st.composite +def point_on_shared_face(draw, domain, dim): + # Return a point that is shared by at least 2 subdomains + assert isinstance(domain, ConvexHull) + assert 0 < dim < domain.ndim + + # Set the numpy random seed + draw(st.random_module()) + + tri = domain.triangulation + + for face in tri.faces(dim + 1): + containing_subdomains = tri.containing(face) + if len(containing_subdomains) > 1: + break + + vertices = np.array([tri.vertices[i] for i in face]) + + xb = np.random.rand(dim) + + x = tuple(vertices[0] + xb @ (vertices[1:] - vertices[0])) + + assert all(tri.point_in_simplex(x, s) for s in containing_subdomains) + + return x + + +@st.composite +def make_random_domain(draw, ndim, fill=True): + # Set the numpy random seed + draw(st.random_module()) + + if ndim == 1: + a, b = sorted(np.random.rand(2) - 0.5) + domain = Interval(a, b) + else: + # Generate points in a hypercube around the origin + points = np.random.rand(10, ndim) - 0.5 + domain = ConvexHull(points) + return domain + + +@st.composite +def make_hypercube_domain(draw, ndim, fill=True): + # Set the numpy random seed + draw(st.random_module()) + limit = np.random.rand() + + if ndim == 1: + subdomain = Interval(-limit, limit) + else: + point = np.full(ndim, limit) + boundary_points = [r @ point for r in reflections(ndim)] + subdomain = ConvexHull(boundary_points) + return subdomain diff --git a/adaptive/tests/test_domain.py b/adaptive/tests/test_domain.py new file mode 100644 index 000000000..a2435f36a --- /dev/null +++ b/adaptive/tests/test_domain.py @@ -0,0 +1,226 @@ +import numpy as np + +import hypothesis.strategies as st +import pytest +from adaptive.tests.domain_utils import ( + a_few_points_inside, + make_random_domain, + point_inside, + point_on_shared_face, + point_outside, + points_inside, + points_outside, +) +from hypothesis import given, settings + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +@settings(deadline=500) +def test_getting_points_are_unique(data, ndim): + domain = data.draw(make_random_domain(ndim)) + points = [] + for subdomain in domain.subdomains(): + p, _ = domain.insert_points(subdomain, 10) + assert len(p) == len(set(p)) + points.extend(p) + assert len(points) == len(set(points)) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_sum_subvolumes_equals_volume(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.split_at(x) + for subdomain in domain.subdomains(): + assert np.isclose(domain.volume(subdomain), sum(domain.subvolumes(subdomain))) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_split_at_vertex_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_inside(domain)) + domain.split_at(x) + with pytest.raises(ValueError): + domain.split_at(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_inserting_point_twice_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_inside(domain)) + domain.insert(x) + with pytest.raises(ValueError): + domain.insert(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_insert_points_outside_domain_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_outside(domain)) + with pytest.raises(ValueError): + domain.insert(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_encloses(data, ndim): + domain = data.draw(make_random_domain(ndim)) + + xin = data.draw(point_inside(domain)) + assert domain.encloses(xin) + + xout = data.draw(point_outside(domain)) + assert not domain.encloses(xout) + + xins = data.draw(points_inside(domain, 20)) + assert np.all(domain.encloses(xins)) + + xouts = data.draw(points_outside(domain, 20)) + assert not np.any(domain.encloses(xouts)) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_insert_point_outside_domain_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_outside(domain)) + with pytest.raises(ValueError): + domain.insert(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_split_at_point_outside_domain_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_outside(domain)) + with pytest.raises(ValueError): + domain.split_at(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_removing_domain_vertex_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_inside(domain)) + domain.split_at(x) + with pytest.raises(ValueError): + domain.remove(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_removing_nonexistant_point_raises(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x = data.draw(point_inside(domain)) + with pytest.raises(ValueError): + domain.remove(x) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_splitting_at_point_adds_to_vertices(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.split_at(x) + vertices = set(domain.vertices()) + assert all(x in vertices for x in xs) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_inserting_points_adds_to_subpoints(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + subdomains = dict() + for x in xs: + subdomains[x] = domain.insert(x) + for x in xs: + for subdomain in subdomains[x]: + assert x in domain.subpoints(subdomain) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_inserting_then_removing_points_removes_from_subpoints(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.insert(x) + for x in xs: + domain.remove(x) + assert not any(domain.subpoints(s) for s in domain.subdomains()) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +@settings(deadline=500) +def test_inserting_then_splitting_at_points_removes_from_subpoints(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.insert(x) + for x in xs: + domain.split_at(x) + assert not any(domain.subpoints(s) for s in domain.subdomains()) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_clear_subdomains_removes_all_points(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.insert(x) + assert len(xs) == sum(len(domain.subpoints(s)) for s in domain.subdomains()) + domain.clear_subdomains() + assert 0 == sum(len(domain.subpoints(s)) for s in domain.subdomains()) + + +@pytest.mark.parametrize("ndim", [1, 2, 3]) +@given(data=st.data()) +def test_split_at_reassigns_all_internal_points(data, ndim): + domain = data.draw(make_random_domain(ndim)) + x_split, *xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.insert(x) + + # The subdomains where the points were assigned initially + subpoints = {s: set(domain.subpoints(s)) for s in domain.subdomains()} + # Sanity check; all the inserted points are in *some* subdomain + assert set.union(*subpoints.values()) == set(xs) + + old_subdomains, new_subdomains = domain.split_at(x_split) + + old_subpoints = set.union(*(subpoints[s] for s in old_subdomains)) + new_subpoints = set.union(*(set(domain.subpoints(s)) for s in new_subdomains)) + assert old_subpoints == new_subpoints + + +### ConvexHull-specific tests + + +@pytest.mark.parametrize("ndim", [2, 3]) +@given(data=st.data()) +def test_inserting_point_on_boundary_adds_to_all_subtriangulations(data, ndim): + domain = data.draw(make_random_domain(ndim)) + xs = data.draw(a_few_points_inside(domain)) + + for x in xs: + domain.split_at(x) + x = data.draw(point_on_shared_face(domain, 1)) + affected_subdomains = domain.insert(x) + assert all(x in set(domain.subpoints(s)) for s in affected_subdomains) diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py index bf84fcc14..4d5fd6aed 100644 --- a/adaptive/tests/test_learners.py +++ b/adaptive/tests/test_learners.py @@ -11,6 +11,8 @@ import shutil import tempfile +import hypothesis.strategies as st +import hypothesis.stateful as stateful import numpy as np import pytest import scipy.spatial @@ -26,6 +28,7 @@ LearnerND, SequenceLearner, ) +from adaptive.learner.new_learnerND import LearnerND as NewLearnerND from adaptive.runner import simple try: @@ -112,11 +115,13 @@ def maybe_skip(learner): @learn_with(Learner1D, bounds=(-1, 1)) +@learn_with(NewLearnerND, bounds=[(-1, 1)]) def quadratic(x, m: uniform(0, 10), b: uniform(0, 1)): return m * x ** 2 + b @learn_with(Learner1D, bounds=(-1, 1)) +@learn_with(NewLearnerND, bounds=[(-1, 1)]) @learn_with(SequenceLearner, sequence=np.linspace(-1, 1, 201)) def linear_with_peak(x, d: uniform(-1, 1)): a = 0.01 @@ -124,6 +129,7 @@ def linear_with_peak(x, d: uniform(-1, 1)): @learn_with(LearnerND, bounds=((-1, 1), (-1, 1))) +@learn_with(NewLearnerND, bounds=((-1, 1), (-1, 1))) @learn_with(Learner2D, bounds=((-1, 1), (-1, 1))) @learn_with(SequenceLearner, sequence=np.random.rand(1000, 2)) def ring_of_fire(xy, d: uniform(0.2, 1)): @@ -133,6 +139,7 @@ def ring_of_fire(xy, d: uniform(0.2, 1)): @learn_with(LearnerND, bounds=((-1, 1), (-1, 1), (-1, 1))) +@learn_with(NewLearnerND, bounds=((-1, 1), (-1, 1), (-1, 1))) @learn_with(SequenceLearner, sequence=np.random.rand(1000, 3)) def sphere_of_fire(xyz, d: uniform(0.2, 1)): a = 0.2 @@ -242,6 +249,7 @@ def test_uniform_sampling2D(learner_type, f, learner_kwargs): (Learner1D, (-1, 1)), (Learner2D, [(-1, 1), (-1, 1)]), (LearnerND, [(-1, 1), (-1, 1), (-1, 1)]), + (NewLearnerND, [(-1, 1), (-1, 1), (-1, 1)]), ], ) def test_learner_accepts_lists(learner_type, bounds): @@ -252,7 +260,7 @@ def f(x): simple(learner, goal=lambda l: l.npoints > 10) -@run_with(Learner1D, Learner2D, LearnerND, SequenceLearner) +@run_with(Learner1D, Learner2D, LearnerND, NewLearnerND, SequenceLearner) def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): """Adding already existing data is an idempotent operation. @@ -299,7 +307,14 @@ def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs): # XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55) # but we xfail it now, as Learner2D will be deprecated anyway -@run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner, SequenceLearner) +@run_with( + Learner1D, + xfail(Learner2D), + LearnerND, + NewLearnerND, + AverageLearner, + SequenceLearner, +) def test_adding_non_chosen_data(learner_type, f, learner_kwargs): """Adding data for a point that was not returned by 'ask'.""" # XXX: learner, control and bounds are not defined @@ -383,7 +398,7 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs): # XXX: the Learner2D fails with ~50% chance # see https://github.com/python-adaptive/adaptive/issues/55 -@run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner) +@run_with(Learner1D, xfail(Learner2D), LearnerND, NewLearnerND, AverageLearner) def test_expected_loss_improvement_is_less_than_total_loss( learner_type, f, learner_kwargs ): @@ -409,7 +424,7 @@ def test_expected_loss_improvement_is_less_than_total_loss( # XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55) # but we xfail it now, as Learner2D will be deprecated anyway -@run_with(Learner1D, xfail(Learner2D), LearnerND) +@run_with(Learner1D, xfail(Learner2D), LearnerND, xfail(NewLearnerND)) def test_learner_performance_is_invariant_under_scaling( learner_type, f, learner_kwargs ): @@ -460,6 +475,7 @@ def test_learner_performance_is_invariant_under_scaling( Learner1D, Learner2D, LearnerND, + NewLearnerND, AverageLearner, SequenceLearner, with_all_loss_functions=False, @@ -504,6 +520,7 @@ def test_balancing_learner(learner_type, f, learner_kwargs): Learner1D, Learner2D, LearnerND, + NewLearnerND, AverageLearner, maybe_skip(SKOptLearner), IntegratorLearner, @@ -535,6 +552,7 @@ def test_saving(learner_type, f, learner_kwargs): Learner1D, Learner2D, LearnerND, + NewLearnerND, AverageLearner, maybe_skip(SKOptLearner), IntegratorLearner, @@ -573,6 +591,7 @@ def fname(learner): Learner1D, Learner2D, LearnerND, + NewLearnerND, AverageLearner, maybe_skip(SKOptLearner), IntegratorLearner, @@ -605,8 +624,111 @@ def test_saving_with_datasaver(learner_type, f, learner_kwargs): os.remove(path) +@run_with(Learner1D, Learner2D, LearnerND, NewLearnerND, with_all_loss_functions=False) +def test_adding_data_outside_of_bounds(learner_type, f, learner_kwargs): + # Just test this does not throw an error for now + f = generate_random_parametrization(f) + learner = learner_type(f, **learner_kwargs) + + points, _ = learner.ask(20) + learner.tell_many(points, [learner.function(x) for x in points]) + + points, _ = learner.ask(10) + points = 1e5 * np.asarray( + points + ) # outside the bounds for all the test functions we have + if len(points.shape) > 1: + points = [tuple(x) for x in points] + + learner.tell_many(points, [learner.function(x) for x in points]) + + learner.ask(10) + + +# Hypothesis RuleBasedStateMachine does not allow for parametrization so we have to +# wrap it in a parametrized test +@run_with( + Learner1D, + Learner2D, + LearnerND, + NewLearnerND, + SequenceLearner, + AverageLearner, + with_all_loss_functions=False, +) +def test_simulate_runner(learner_type, f, learner_kwargs): + + g = generate_random_parametrization(f) + + # This simulates the current algorithm used by the Runner, i.e. ask for + # 'ncores' points and then tell the results one at a time and ask for + # one more point. + class Machine(stateful.RuleBasedStateMachine): + def __init__(self): + super().__init__() + self.learner = learner_type(g, **learner_kwargs) + + pending = stateful.Bundle("pending") + + # TODO: add some invariant checking here + + @stateful.initialize(target=pending, ncores=st.integers(1, 10)) + def init_learner(self, ncores): + points, _ = self.learner.ask(ncores) + data = [(x, self.learner.function(x)) for x in points] + return stateful.multiple(*data) + + @stateful.rule(target=pending, xy=stateful.consumes(pending)) + def ask_and_tell(self, xy): + x, y = xy + self.learner.tell(x, y) + (x,), _ = self.learner.ask(1) + return (x, self.learner.function(x)) + + Machine.TestCase().runTest() + + +# Hypothesis RuleBasedStateMachine does not allow for parametrization so we have to +# wrap it in a parametrized test +@run_with( + Learner1D, + Learner2D, + LearnerND, + NewLearnerND, + SequenceLearner, + AverageLearner, + with_all_loss_functions=False, +) +def test_randomly_ask_tell(learner_type, f, learner_kwargs): + + g = generate_random_parametrization(f) + + # This simulates a strategy where we ask for a random number of points, + # and then tell a random selection of all the points we've asked for so far + class Machine(stateful.RuleBasedStateMachine): + def __init__(self): + super().__init__() + self.learner = learner_type(g, **learner_kwargs) + + pending = stateful.Bundle("pending") + + # TODO: add some invariant checking here + + @stateful.rule(target=pending, n=st.integers(1, 10)) + def ask(self, n): + points, _ = self.learner.ask(n) + return stateful.multiple(*[(x, self.learner.function(x)) for x in points]) + + @stateful.rule(xys=st.lists(stateful.consumes(pending), min_size=1)) + def tell(self, xys): + xs, ys = zip(*xys) + self.learner.tell_many(xs, ys) + + Machine.TestCase().runTest() + + @pytest.mark.xfail -@run_with(Learner1D, Learner2D, LearnerND) +@run_with(Learner1D, Learner2D, LearnerND, NewLearnerND) def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs): """Learners that are learning the same function should converge to the same result "eventually" if given the same data, regardless @@ -618,7 +740,7 @@ def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs): @pytest.mark.xfail -@run_with(Learner1D, Learner2D, LearnerND) +@run_with(Learner1D, Learner2D, LearnerND, NewLearnerND) def test_learner_subdomain(learner_type, f, learner_kwargs): """Learners that never receive data outside of a subdomain should perform 'similarly' to learners defined on that subdomain only.""" diff --git a/adaptive/tests/test_priority_queue.py b/adaptive/tests/test_priority_queue.py new file mode 100644 index 000000000..60464f87f --- /dev/null +++ b/adaptive/tests/test_priority_queue.py @@ -0,0 +1,80 @@ +from hypothesis import given, assume +import hypothesis.strategies as st +import pytest + +from adaptive.priority_queue import Queue, Empty + + +item = st.floats(allow_nan=False) +priority = st.floats(allow_nan=False) +items = st.lists(st.tuples(item, priority)) + + +@given(items, item) +def test_remove_nonexisting_item_raises(items, missing_item): + if items: + i, p = zip(*items) + assume(missing_item not in i) + q = Queue(items) + with pytest.raises(KeyError): + q.remove(missing_item) + + +@given(items, item) +def test_update_nonexisting_item_raises(items, missing_item): + if items: + i, p = zip(*items) + assume(missing_item not in i) + q = Queue(items) + with pytest.raises(KeyError): + q.update(missing_item, 0) + + +@given(items, item) +def test_remove_item_inserted_twice_removes_lowest_priority(items, missing_item): + if items: + i, p = zip(*items) + assume(missing_item not in i) + q = Queue(items) + + q.insert(missing_item, 0) + q.insert(missing_item, 1) + q.remove(missing_item) # should remove priority 0 item + # Get 'missing_item' out of the queue + t = None + while t != missing_item: + t, prio = q.pop() + assert prio == 1 + + +@given(items) +def test_all_items_in_queue(items): + # Items should be sorted from largest priority to smallest + sorted_items = [item for item, _ in sorted(items, key=lambda x: -x[1])] + assert sorted_items == list(Queue(items).items()) + + +@given(items) +def test_pop_gives_max(items): + q = Queue(items) + if items: + lq = len(q) + should_pop = max(items, key=lambda x: x[1]) + assert should_pop == q.pop() + assert len(q) == lq - 1 + else: + with pytest.raises(Empty): + q.pop() + + +@given(items) +def test_peek_gives_max(items): + q = Queue(items) + if items: + lq = len(q) + should_peek = max(items, key=lambda x: x[1]) + assert should_peek == q.peek() + assert len(q) == lq + else: + with pytest.raises(Empty): + q.peek() diff --git a/proof-of-concept-learner.ipynb b/proof-of-concept-learner.ipynb new file mode 100644 index 000000000..d6f0d00e6 --- /dev/null +++ b/proof-of-concept-learner.ipynb @@ -0,0 +1,364 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "import math\n", + "import numpy as np\n", + "\n", + "from sortedcontainers import SortedList\n", + "import adaptive\n", + "from adaptive.learner.new_learnerND import LearnerND, CurvatureLoss\n", + "\n", + "import holoviews as hv\n", + "\n", + "adaptive.notebook_extension()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "offset = random.uniform(-0.5, 0.5)\n", + "\n", + "def peak(x, offset=offset):\n", + " a = 0.02\n", + " return x + a**2 / (a**2 + (x - offset)**2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The new implementation has the same API as the old one (except bounds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner = adaptive.Learner1D(peak, (-1, 1))\n", + "adaptive.runner.simple(learner, goal=lambda l: len(l.data) > 50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner2 = LearnerND(peak, [(-1, 1)])\n", + "adaptive.runner.simple(learner2, goal=lambda l: len(l.data) > 50)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The new implementation is as fast as the old one" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%time adaptive.runner.simple(learner, goal=lambda l: len(l.data) > 10000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%time adaptive.runner.simple(learner2, goal=lambda l: len(l.data) > 10000)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Also works in parallel" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "def peak(x, offset=offset):\n", + " time.sleep(0.5)\n", + " a = 0.02\n", + " return x + a**2 / (a**2 + (x - offset)**2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner2 = LearnerND(peak, [(-1, 1)])\n", + "runner = adaptive.Runner(learner2, goal=lambda l: len(l.data) > 1000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runner.live_info()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runner.live_plot(update_interval=0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Works for ND input also" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def ring_of_fire(xy, d=0.75):\n", + " a = 0.2\n", + " x, y = xy\n", + " return x + math.exp(-(x ** 2 + y ** 2 - d ** 2) ** 2 / a ** 4)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner3 = adaptive.LearnerND(ring_of_fire, [(-1, 1), (-1, 1)],\n", + " loss_per_simplex=adaptive.learner.learnerND.curvature_loss_function())\n", + "adaptive.runner.simple(learner3, goal=lambda l: len(l.data) > 50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner4 = adaptive.Learner2D(ring_of_fire, [(-1, 1), (-1, 1)])\n", + "adaptive.runner.simple(learner4, goal=lambda l: len(l.data) > 50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner5 = LearnerND(ring_of_fire, [(-1, 1), (-1, 1)])\n", + "adaptive.runner.simple(learner5, goal=lambda l: len(l.data) > 50)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner6 = LearnerND(ring_of_fire, [(-1, 1), (-1, 1)], loss=CurvatureLoss())\n", + "adaptive.runner.simple(learner6, goal=lambda l: len(l.data) > 50)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the points are not exactly the same as for the old LearnerND.\n", + "\n", + "This is because for the moment it is illegal to insert points onto the boundary of an existing simplex.\n", + "\n", + "We shall lift this restriction soon." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner3.plot(tri_alpha=0.5) + learner4.plot(tri_alpha=0.5) + learner5.plot(tri_alpha=0.5) + learner6.plot(tri_alpha=0.5)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%time adaptive.runner.simple(learner3, goal=lambda l: len(l.data) > 1000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%time adaptive.runner.simple(learner4, goal=lambda l: len(l.data) > 1000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%time adaptive.runner.simple(learner5, goal=lambda l: len(l.data) > 1000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%time adaptive.runner.simple(learner6, goal=lambda l: len(l.data) > 1000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner3.plot(tri_alpha=0.5) + learner4.plot(tri_alpha=0.5) + learner5.plot(tri_alpha=0.5) + learner6.plot(tri_alpha=0.5)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "def ring_of_fire(xy, d=0.75):\n", + " time.sleep(0.5)\n", + " a = 0.2\n", + " x, y = xy\n", + " return x + math.exp(-(x ** 2 + y ** 2 - d ** 2) ** 2 / a ** 4)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner6 = LearnerND(ring_of_fire, [(-1, 1), (-1, 1)], loss=CurvatureLoss())\n", + "runner = adaptive.Runner(learner6, log=True, goal=lambda l: len(l.data) > 1000)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runner.live_info()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "runner.live_plot(plotter=lambda l: l.plot(tri_alpha=0.5), update_interval=0.5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Works for ND output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def ring_of_fire2(xy, d=0.75):\n", + " a = 0.2\n", + " x, y = xy\n", + " z = x + math.exp(-(x ** 2 + y ** 2 - d ** 2) ** 2 / a ** 4)\n", + " return [z, z]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "learner7 = LearnerND(ring_of_fire2, [(-1, 1), (-1, 1)])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "adaptive.runner.simple(learner7, goal=lambda l: len(l.data)> 100)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "adaptive", + "language": "python", + "name": "adaptive" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/test-requirements.txt b/test-requirements.txt index 193a0531f..fc78090fe 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,4 +3,5 @@ pytest pytest-cov pytest-randomly pytest-timeout +hypothesis[numpy,pytest] pre_commit