From b9aef8f7a9d7e27bc7999ad9e76f8ed8e3c314cb Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 3 Feb 2024 23:37:49 -0500 Subject: [PATCH 001/241] Fix coordinate map logic for bounce_integral function - Clean up api for bounce_integral function --- desc/backend.py | 4 + desc/compute/_basis_vectors.py | 20 ++++ desc/compute/_core.py | 23 ++++ desc/compute/_field.py | 18 ++++ desc/compute/utils.py | 186 ++++++++++++++++++++++++++++++++- desc/equilibrium/coords.py | 3 +- 6 files changed, 251 insertions(+), 3 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index f6cb348bf5..d57539eb7c 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -599,6 +599,10 @@ def bincount(x, weights=None, minlength=None, length=None): """Same as np.bincount but with a dummy parameter to match jnp.bincount API.""" return np.bincount(x, weights, minlength) + def repeat(a, repeats, axis=None, total_repeat_length=None): + """Same as np.repeat but with a dummy parameter to match jnp.repeat API.""" + return np.repeat(a, repeats, axis) + def custom_jvp(fun, *args, **kwargs): """Dummy function for custom_jvp without JAX.""" fun.defjvp = lambda *args, **kwargs: None diff --git a/desc/compute/_basis_vectors.py b/desc/compute/_basis_vectors.py index 833bcdec1e..70c4788426 100644 --- a/desc/compute/_basis_vectors.py +++ b/desc/compute/_basis_vectors.py @@ -504,6 +504,26 @@ def _e_sup_theta(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="e^theta_PEST", + label="\\mathbf{e}^{\\theta_{PEST}}", + units="m^{-1}", + units_long="inverse meters", + description="Contravariant straight field line (PEST) poloidal basis vector", + dim=3, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["e_rho", "e_phi", "sqrt(g)_PEST"], +) +def _e_sup_theta_pest(params, transforms, profiles, data, **kwargs): + data["e^theta_PEST"] = ( + cross(data["e_phi"], data["e_rho"]).T / data["sqrt(g)_PEST"] + ).T + return data + + @register_compute_fun( name="e^theta*sqrt(g)", label="\\mathbf{e}^{\\theta} \\sqrt{g}", diff --git a/desc/compute/_core.py b/desc/compute/_core.py index 1ee079be15..8e444698bb 100644 --- a/desc/compute/_core.py +++ b/desc/compute/_core.py @@ -28,6 +28,29 @@ def _0(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="1", + label="1", + units="~", + units_long="None", + description="Ones", + dim=1, + params=[], + transforms={"grid": []}, + profiles=[], + coordinates="rtz", + data=[], + parameterization=[ + "desc.equilibrium.equilibrium.Equilibrium", + "desc.geometry.core.Surface", + "desc.geometry.core.Curve", + ], +) +def _1(params, transforms, profiles, data, **kwargs): + data["1"] = jnp.ones(transforms["grid"].num_nodes) + return data + + @register_compute_fun( name="R", label="R", diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 9be619fcb8..bf00a5f041 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -86,6 +86,24 @@ def _B_sup_theta(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="B^theta_PEST", + label="B^{\\theta}", + units="T \\cdot m^{-1}", + units_long="Tesla / meter", + description="Contravariant straight field line (PEST) component of magnetic field", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["B", "e^theta_PEST"], +) +def _B_sup_theta_PEST(params, transforms, profiles, data, **kwargs): + data["B^theta_PEST"] = dot(data["B"], data["e^theta_PEST"]) + return data + + @register_compute_fun( name="B^zeta", label="B^{\\zeta}", diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 0cb98628c2..0d80a348ca 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -5,10 +5,11 @@ import warnings import numpy as np +from numpy.polynomial.chebyshev import chebgauss from termcolor import colored from desc.backend import cond, fori_loop, jnp, put -from desc.grid import ConcentricGrid, LinearGrid +from desc.grid import ConcentricGrid, Grid, LinearGrid from .data_index import data_index @@ -1325,6 +1326,189 @@ def body(i, mins): return grid.expand(mins, surface_label) +def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): + """Returns a method to compute the bounce integral of any quantity. + + The bounce integral is defined as F_ℓ(λ) = ∫ f(ℓ) / √(1 − λ |B|) dℓ, where + dℓ parameterizes the distance along the field line, + λ is a constant proportional to the magnetic moment over energy, + |B| is the norm of the magnetic field, + f(ℓ) is the quantity to integrate along the field line, + and the endpoints of the integration are at the bounce points. + For a particle with fixed λ, bounce points are defined to be the location + on the field line such that the particle's velocity parallel to the + magnetic field is zero, i.e. λ |B| = 1. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which the bounce integral is defined. + lambdas : ndarray + λ values to evaluate the bounce integral at. + rho : int + Unique flux surface label coordinates. + alpha : ndarray + Unique field line label coordinates over a constant rho surface. + resolution : int + Number of quadrature points used to compute the bounce integral. + + Returns + ------- + bi : callable + This callable method computes the bounce integral F_ℓ(λ) for every + specified field line ℓ (constant rho and alpha), for every λ value in + ``lambdas``. + + Examples + -------- + .. code-block:: python + + bi = bounce_integral(eq, lambdas) + F = bi(name) + + """ + if rho is None: + rho = jnp.linspace(0, 1, 10) + if alpha is None: + alpha = jnp.linspace(0, 2 * jnp.pi, 20) + + # Use Gauss-Chebyshev quadrature as the integrand blows up at integration boundary. + x, w = chebgauss(deg=resolution) + # TODO: Write code to compute bounce points given lambda. + # Vectorize it for multiple lambdas. Then vectorize coordinate mapping logic + # for multiple lambdas. For now, let's pretend bounce points do not depend on + # lambda so that bounce_point() returns either one or two numbers for + # endpoints of all the integrals. + bp = bounce_point(eq, lambdas) + if bp.size == 1: + zeta = -2 * bp * jnp.arcsin(x) / jnp.pi + else: + zeta = (2 * jnp.arcsin(x) / jnp.pi - 1) / 2 * (bp[1] - bp[0]) + bp[1] + + r, a, z = jnp.meshgrid(rho, alpha, zeta, copy=False, indexing="ij") + r, a, z = r.ravel(), a.ravel(), z.ravel() + # Now we map these Clebsch-Type field-line coordinates to DESC coordinates. + # Note that the rotational transform can be computed apriori because it is a single + # variable function of rho, and the coordinate mapping does not change rho. Once + # this is known, it is simple to compute theta_PEST from alpha. Then we transform + # from straight field-line coordinates to DESC coordinates with the method + # compute_theta_coords. This is preferred over transforming from Clebsch-Type + # coordinates to DESC coordinates directly with the more general method + # map_coordinates. That method requires an initial guess to be compatible with JIT, + # and generating a reasonable initial guess requires computing the rotational + # transform to approximate theta_PEST and the poloidal stream function anyway. + # TODO: In general, Linear Grid construction is not jit compatible. + # This issue can be worked around with a specific routine for this. + lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + iota = lg.compress(eq.compute("iota", grid=lg)["iota"]) + iota = jnp.tile( + jnp.repeat(iota, zeta.size, total_repeat_length=rho.size * zeta.size), + alpha.size, + ) + sfl_coords = jnp.column_stack([r, (a + iota * z) % (2 * jnp.pi), z]) + desc_coords = eq.compute_theta_coords(sfl_coords) + grid = Grid(desc_coords, jitable=True) + data = eq.compute(names=["B^zeta", "|B|"], grid=grid, override_grid=False) + + def _bounce_integral(name): + """Compute the bounce integral of the named quantity. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce integral of. + + Returns + ------- + F : ndarray, shape(lambdas.size, alpha.size, rho.size) + Bounce integral evaluated at ``lambdas`` for every field line. + + """ + f = eq.compute(name, grid=grid, override_grid=False, data=data)[name] + # If lambdas.size is large, we should loop to save memory. + F = f / (data["B^zeta"] * jnp.sqrt(1 - lambdas[:, jnp.newaxis] * data["|B|"])) + F = jnp.sum(F.reshape(lambdas.size, -1, zeta.size) * w, axis=-1) + F = F.reshape(lambdas.size, alpha.size, rho.size) + if bp.size == 1: + F *= -jnp.pi / (2 * bp[0]) + else: + F *= jnp.pi / (bp[1] - bp[0]) + return F + + return _bounce_integral + + +def bounce_average(eq, lambdas, rho=None, alpha=None, resolution=20): + """Returns a method to compute the bounce average of any quantity. + + The bounce average is defined as + G_ℓ(λ) = (∫ g(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ), where + dℓ parameterizes the distance along the field line, + λ is a constant proportional to the magnetic moment over energy, + |B| is the norm of the magnetic field, + g(ℓ) is the quantity to integrate along the field line, + and the endpoints of the integration are at the bounce points. + For a particle with fixed λ, bounce points are defined to be the location + on the field line such that the particle's velocity parallel to the + magnetic field is zero, i.e. λ |B| = 1. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which the bounce integral is defined. + lambdas : ndarray + λ values to evaluate the bounce integral at. + rho : int + Unique flux surface label coordinates. + alpha : ndarray + Unique field line label coordinates over a constant rho surface. + resolution : int + Number of quadrature points used to compute the bounce integral. + + Returns + ------- + ba : callable + This callable method computes the bounce integral G_ℓ(λ) for every + specified field line ℓ (constant rho and alpha), for every λ value in + ``lambdas``. + + Examples + -------- + .. code-block:: python + + ba = bounce_average(eq, lambdas) + G = ba(name) + + """ + bi = bounce_integral(eq, lambdas, rho, alpha, resolution) + + def _bounce_average(name): + """Compute the bounce average of the named quantity. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce average of. + + Returns + ------- + G : ndarray, shape(lambdas.size, alpha.size, rho.size) + Bounce average evaluated at ``lambdas`` for every field line. + + """ + den = bi("1", lambdas) + num = bi(name, lambdas) + G = jnp.reshape(num.ravel() / den.ravel(), den.shape) + return G + + return _bounce_average + + +def bounce_point(eq, lambdas): + """Todo.""" + return np.array([]) + + # defines the order in which objective arguments get concatenated into the state vector arg_order = ( "R_lmn", diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 7c0b4e0445..e970bc5baa 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -40,8 +40,7 @@ def map_coordinates( # noqa: C901 eq : Equilibrium Equilibrium to use coords : ndarray, shape(k,3) - 2D array of input coordinates. Each row is a different - point in space. + 2D array of input coordinates. Each row is a different point in space. inbasis, outbasis : tuple of str Labels for input and output coordinates, eg ("R", "phi", "Z") or ("rho", "alpha", "zeta") or any combination thereof. Labels should be the From eb09288fc54c31a08a3d51630819ec80b9dca071 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 3 Feb 2024 23:40:47 -0500 Subject: [PATCH 002/241] Remove ignore division by zero in desc.compute Such divisions should now be avoided with use of safediv or safenorm. By enabling division by zero warnings, we can make sure that nan do not propogate through autodiff stuff. --- setup.cfg | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 22fd610a64..033cbe2a0b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,8 +48,6 @@ markers= filterwarnings= error ignore::pytest.PytestUnraisableExceptionWarning - ignore::RuntimeWarning:desc.compute - # Ignore division by zero warnings. ignore:numpy.ndarray size changed:RuntimeWarning # ignore benign Cython warnings on ndarray size ignore::DeprecationWarning:ml_dtypes.* From 3cc3a33ff9d8a1ef70be9e7a1f7d55c9f51474c2 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 3 Feb 2024 23:56:17 -0500 Subject: [PATCH 003/241] Simplify broadcasting --- desc/compute/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 0d80a348ca..4731b55dd8 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1496,9 +1496,7 @@ def _bounce_average(name): Bounce average evaluated at ``lambdas`` for every field line. """ - den = bi("1", lambdas) - num = bi(name, lambdas) - G = jnp.reshape(num.ravel() / den.ravel(), den.shape) + G = bi(name, lambdas) / bi("1", lambdas) return G return _bounce_average From adf751834be40578dac28e923bc2ea83625e9bca Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 4 Feb 2024 00:59:41 -0500 Subject: [PATCH 004/241] Add axis limit quantities to relavant category to pass tests --- desc/compute/utils.py | 3 +-- tests/test_axis_limits.py | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 4731b55dd8..1280de526a 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1496,8 +1496,7 @@ def _bounce_average(name): Bounce average evaluated at ``lambdas`` for every field line. """ - G = bi(name, lambdas) / bi("1", lambdas) - return G + return bi(name) / bi("1") return _bounce_average diff --git a/tests/test_axis_limits.py b/tests/test_axis_limits.py index 9a37933fec..44356475cc 100644 --- a/tests/test_axis_limits.py +++ b/tests/test_axis_limits.py @@ -42,6 +42,7 @@ "curvature_k2_zeta", "e^helical", "e^theta", + "e^theta_PEST", "e^theta_r", "e^theta_t", "e^theta_z", @@ -64,6 +65,7 @@ } not_implemented_limits = { # reliant limits will be added to this set automatically + "B^theta_PEST", "D_current", "n_rho_z", "|e_theta x e_zeta|_z", From 9609409cee504e90decd70e58599bf72b7c4097a Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 4 Feb 2024 11:10:09 -0500 Subject: [PATCH 005/241] Move meshgrid broadcasting logic into grid.py --- desc/compute/utils.py | 24 ++++++++++++---------- desc/grid.py | 46 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 10 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 1280de526a..31cda4aa28 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -9,7 +9,7 @@ from termcolor import colored from desc.backend import cond, fori_loop, jnp, put -from desc.grid import ConcentricGrid, Grid, LinearGrid +from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand from .data_index import data_index @@ -1345,7 +1345,7 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): Equilibrium on which the bounce integral is defined. lambdas : ndarray λ values to evaluate the bounce integral at. - rho : int + rho : ndarray Unique flux surface label coordinates. alpha : ndarray Unique field line label coordinates over a constant rho surface. @@ -1400,15 +1400,19 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): # TODO: In general, Linear Grid construction is not jit compatible. # This issue can be worked around with a specific routine for this. lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - iota = lg.compress(eq.compute("iota", grid=lg)["iota"]) - iota = jnp.tile( - jnp.repeat(iota, zeta.size, total_repeat_length=rho.size * zeta.size), - alpha.size, - ) - sfl_coords = jnp.column_stack([r, (a + iota * z) % (2 * jnp.pi), z]) + lg_data = eq.compute("iota", grid=lg) + p = "desc.equilibrium.equilibrium.Equilibrium" + data = { + d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) + for d in get_data_deps("iota", obj=p) + if data_index[p][d]["coordinates"] == "r" + } + sfl_coords = jnp.column_stack([r, (a + data["iota"] * z) % (2 * jnp.pi), z]) desc_coords = eq.compute_theta_coords(sfl_coords) grid = Grid(desc_coords, jitable=True) - data = eq.compute(names=["B^zeta", "|B|"], grid=grid, override_grid=False) + data = eq.compute( + names=["B^zeta", "|B|"], grid=grid, data=data, override_grid=False + ) def _bounce_integral(name): """Compute the bounce integral of the named quantity. @@ -1458,7 +1462,7 @@ def bounce_average(eq, lambdas, rho=None, alpha=None, resolution=20): Equilibrium on which the bounce integral is defined. lambdas : ndarray λ values to evaluate the bounce integral at. - rho : int + rho : ndarray Unique flux surface label coordinates. alpha : ndarray Unique field line label coordinates over a constant rho surface. diff --git a/desc/grid.py b/desc/grid.py index 5578911a80..2ed4fe2b68 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1478,3 +1478,49 @@ def find_least_rational_surfaces( io = find_most_distant(io_rat, n, a, b, tol=atol, **kwargs) rho = _find_rho(iota, io, tol=atol) return rho, io + + +def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): + """Expand ``x`` by duplicating elements to match a meshgrid pattern. + + It is common to construct a meshgrid in the following manner. + .. code-block:: python + + # In this meshgrid, the fastest (slowest) changing coordinate is zeta (theta). + r, t, z = jnp.meshgrid(rho, theta, zeta, indexing="ij") + r, t, z = r.ravel(), t.ravel(), z.ravel() + nodes = jnp.column_stack([r, t, z]) + grid = Grid(nodes, sort=False, jitable=True) + + Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` + is not computed, which is needed for the method ``grid.expand(x)``. + On such grids, this method should be used instead. + + Parameters + ---------- + x : ndarray + Stores the values of a surface function (constant over a surface) + for all unique surfaces of the specified label on the grid. + The length of ``x`` should match the number of unique surfaces of + the corresponding label in this grid. ``x`` should be sorted such + that x[i] corresponds to the value associated with surface_label[i]. + + Returns + ------- + expand_x : ndarray + ``x`` expanded to match the meshgrid pattern. + + """ + if surface_label == "rho": + return jnp.tile( + jnp.repeat(x, zeta_size, total_repeat_length=rho_size * zeta_size), + theta_size, + ) + if surface_label == "theta": + return jnp.repeat( + x, + rho_size * zeta_size, + total_repeat_length=rho_size * theta_size * zeta_size, + ) + if surface_label == "zeta": + return jnp.tile(x, rho_size * theta_size) From 46888daa7424a5bb885ddcad83805294f60313ab Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 4 Feb 2024 11:19:01 -0500 Subject: [PATCH 006/241] Add assert statement that clarifies surface_label parameter --- desc/compute/_basis_vectors.py | 2 +- desc/grid.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/desc/compute/_basis_vectors.py b/desc/compute/_basis_vectors.py index 70c4788426..6577960873 100644 --- a/desc/compute/_basis_vectors.py +++ b/desc/compute/_basis_vectors.py @@ -517,7 +517,7 @@ def _e_sup_theta(params, transforms, profiles, data, **kwargs): coordinates="rtz", data=["e_rho", "e_phi", "sqrt(g)_PEST"], ) -def _e_sup_theta_pest(params, transforms, profiles, data, **kwargs): +def _e_sup_theta_PEST(params, transforms, profiles, data, **kwargs): data["e^theta_PEST"] = ( cross(data["e_phi"], data["e_rho"]).T / data["sqrt(g)_PEST"] ).T diff --git a/desc/grid.py b/desc/grid.py index 2ed4fe2b68..760070caf6 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1511,6 +1511,11 @@ def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): ``x`` expanded to match the meshgrid pattern. """ + assert surface_label in {"rho", "theta", "zeta"}, ( + "These labels need not correspond to DESC coordinates. " + "They should correspond to the order the arrays were given to construct " + "the meshgrid as shown in the example code in the docstring." + ) if surface_label == "rho": return jnp.tile( jnp.repeat(x, zeta_size, total_repeat_length=rho_size * zeta_size), From 28c152780bc2472d01db1bb805963aeb9ec227bb Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 4 Feb 2024 11:28:26 -0500 Subject: [PATCH 007/241] Add useful guards to grid broadcasting function that have saved developers a few times. --- desc/grid.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/desc/grid.py b/desc/grid.py index 760070caf6..1b60b35ccc 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1517,15 +1517,18 @@ def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): "the meshgrid as shown in the example code in the docstring." ) if surface_label == "rho": + assert len(x) == rho_size return jnp.tile( jnp.repeat(x, zeta_size, total_repeat_length=rho_size * zeta_size), theta_size, ) if surface_label == "theta": + assert len(x) == theta_size return jnp.repeat( x, rho_size * zeta_size, total_repeat_length=rho_size * theta_size * zeta_size, ) if surface_label == "zeta": + assert len(x) == zeta_size return jnp.tile(x, rho_size * theta_size) From 35880ba1acb1104661078207818b50ad6354cb33 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 7 Feb 2024 15:32:40 -0500 Subject: [PATCH 008/241] loop over data instead of data deps --- desc/compute/utils.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 31cda4aa28..d66dd79d20 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1375,7 +1375,7 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): # Use Gauss-Chebyshev quadrature as the integrand blows up at integration boundary. x, w = chebgauss(deg=resolution) # TODO: Write code to compute bounce points given lambda. - # Vectorize it for multiple lambdas. Then vectorize coordinate mapping logic + # Then vectorize coordinate mapping logic # for multiple lambdas. For now, let's pretend bounce points do not depend on # lambda so that bounce_point() returns either one or two numbers for # endpoints of all the integrals. @@ -1401,11 +1401,11 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): # This issue can be worked around with a specific routine for this. lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) lg_data = eq.compute("iota", grid=lg) - p = "desc.equilibrium.equilibrium.Equilibrium" data = { d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) - for d in get_data_deps("iota", obj=p) - if data_index[p][d]["coordinates"] == "r" + for d in lg_data + if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] + == "r" } sfl_coords = jnp.column_stack([r, (a + data["iota"] * z) % (2 * jnp.pi), z]) desc_coords = eq.compute_theta_coords(sfl_coords) @@ -1507,6 +1507,9 @@ def _bounce_average(name): def bounce_point(eq, lambdas): """Todo.""" + # coordinate mapping with dense field line grid to get b on field lines + # bounce point idx np.nonzero(jnp.diff(jnp.sign(lambdas[:, jnp.newaxis] - B)) + # downsample to course grid and return return np.array([]) From 0ab3b4108448aac3f4d362de0373efdc5800c45a Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 13 Feb 2024 02:15:56 -0500 Subject: [PATCH 009/241] Basics of bounce point finding algorithm --- desc/compute/_basis_vectors.py | 2 +- desc/compute/_field.py | 42 +++++++++++ desc/compute/utils.py | 134 +++++++++++++++++++++------------ 3 files changed, 130 insertions(+), 48 deletions(-) diff --git a/desc/compute/_basis_vectors.py b/desc/compute/_basis_vectors.py index 6577960873..aa27c52c2f 100644 --- a/desc/compute/_basis_vectors.py +++ b/desc/compute/_basis_vectors.py @@ -2397,7 +2397,7 @@ def _e_sub_theta_over_sqrt_g(params, transforms, profiles, data, **kwargs): data=["e_theta", "theta_PEST_t"], ) def _e_sub_theta_pest(params, transforms, profiles, data, **kwargs): - # dX/dv at const r,z = dX/dt * dt/dv / dX/dt / dv/dt + # dX/dv at const r,z = dX/dt * dt/dv = dX/dt / dv/dt data["e_theta_PEST"] = (data["e_theta"].T / data["theta_PEST_t"]).T return data diff --git a/desc/compute/_field.py b/desc/compute/_field.py index bf00a5f041..8e731d6693 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2313,6 +2313,48 @@ def _B_mag_z(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="|B|_alpha", + label="\\partial_{\\alpha} |\\mathbf{B}|", + units="T", + units_long="Tesla", + description="Magnitude of magnetic field, derivative wrt field line angle", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["|B|_t", "alpha_t"], +) +def _B_mag_alpha(params, transforms, profiles, data, **kwargs): + # constant ρ and ζ + data["|B|_alpha"] = data["|B|_t"] / data["alpha_t"] + return data + + +@register_compute_fun( + # TODO: pick a name + name="|B|_z constant rho alpha", + label="\\(partial_{\\zeta} |\\mathbf{B}|)_{\\rho, \\alpha}", + units="T", + units_long="Tesla", + description="Magnitude of magnetic field, derivative along field line", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["|B|_z", "|B|_alpha", "alpha_z"], +) +def _B_mag_z_constant_rho_alpha(params, transforms, profiles, data, **kwargs): + # ∂|B|/∂ζ (constant ρ and α) = ∂|B|/∂ζ (constant ρ and θ) + # - ∂|B|/∂α (constant ρ and ζ) * ∂α/∂ζ (constant ρ and θ) + data["|B|_z constant rho alpha"] = ( + data["|B|_z"] - data["|B|_alpha"] * data["alpha_z"] + ) + return data + + @register_compute_fun( name="|B|_rr", label="\\partial_{\\rho\\rho} |\\mathbf{B}|", diff --git a/desc/compute/utils.py b/desc/compute/utils.py index d66dd79d20..eb92ea2c37 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -8,7 +8,7 @@ from numpy.polynomial.chebyshev import chebgauss from termcolor import colored -from desc.backend import cond, fori_loop, jnp, put +from desc.backend import cond, fori_loop, jnp, put, root_scalar from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand from .data_index import data_index @@ -1370,46 +1370,16 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): if rho is None: rho = jnp.linspace(0, 1, 10) if alpha is None: - alpha = jnp.linspace(0, 2 * jnp.pi, 20) + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) # Use Gauss-Chebyshev quadrature as the integrand blows up at integration boundary. x, w = chebgauss(deg=resolution) - # TODO: Write code to compute bounce points given lambda. - # Then vectorize coordinate mapping logic - # for multiple lambdas. For now, let's pretend bounce points do not depend on + # TODO: For now, let's pretend bounce points do not depend on # lambda so that bounce_point() returns either one or two numbers for # endpoints of all the integrals. - bp = bounce_point(eq, lambdas) - if bp.size == 1: - zeta = -2 * bp * jnp.arcsin(x) / jnp.pi - else: - zeta = (2 * jnp.arcsin(x) / jnp.pi - 1) / 2 * (bp[1] - bp[0]) + bp[1] - - r, a, z = jnp.meshgrid(rho, alpha, zeta, copy=False, indexing="ij") - r, a, z = r.ravel(), a.ravel(), z.ravel() - # Now we map these Clebsch-Type field-line coordinates to DESC coordinates. - # Note that the rotational transform can be computed apriori because it is a single - # variable function of rho, and the coordinate mapping does not change rho. Once - # this is known, it is simple to compute theta_PEST from alpha. Then we transform - # from straight field-line coordinates to DESC coordinates with the method - # compute_theta_coords. This is preferred over transforming from Clebsch-Type - # coordinates to DESC coordinates directly with the more general method - # map_coordinates. That method requires an initial guess to be compatible with JIT, - # and generating a reasonable initial guess requires computing the rotational - # transform to approximate theta_PEST and the poloidal stream function anyway. - # TODO: In general, Linear Grid construction is not jit compatible. - # This issue can be worked around with a specific routine for this. - lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - lg_data = eq.compute("iota", grid=lg) - data = { - d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) - for d in lg_data - if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] - == "r" - } - sfl_coords = jnp.column_stack([r, (a + data["iota"] * z) % (2 * jnp.pi), z]) - desc_coords = eq.compute_theta_coords(sfl_coords) - grid = Grid(desc_coords, jitable=True) + bp = bounce_point(eq, lambdas, rho, alpha) + zeta = (2 * jnp.arcsin(x) / jnp.pi - 1) / 2 * (bp[1] - bp[0]) + bp[1] + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute( names=["B^zeta", "|B|"], grid=grid, data=data, override_grid=False ) @@ -1430,13 +1400,14 @@ def _bounce_integral(name): """ f = eq.compute(name, grid=grid, override_grid=False, data=data)[name] # If lambdas.size is large, we should loop to save memory. - F = f / (data["B^zeta"] * jnp.sqrt(1 - lambdas[:, jnp.newaxis] * data["|B|"])) + F = ( + f + / (data["B^zeta"] * jnp.sqrt(1 - lambdas[:, jnp.newaxis] * data["|B|"])) + * jnp.pi + / (bp[1] - bp[0]) + ) F = jnp.sum(F.reshape(lambdas.size, -1, zeta.size) * w, axis=-1) F = F.reshape(lambdas.size, alpha.size, rho.size) - if bp.size == 1: - F *= -jnp.pi / (2 * bp[0]) - else: - F *= jnp.pi / (bp[1] - bp[0]) return F return _bounce_integral @@ -1505,12 +1476,81 @@ def _bounce_average(name): return _bounce_average -def bounce_point(eq, lambdas): - """Todo.""" - # coordinate mapping with dense field line grid to get b on field lines - # bounce point idx np.nonzero(jnp.diff(jnp.sign(lambdas[:, jnp.newaxis] - B)) - # downsample to course grid and return - return np.array([]) +def bounce_point(eq, lambdas, rho, alpha, num_roots=50, max_field_line=10 * jnp.pi): + """Find bounce points.""" + # TODO: Main algorithm here. + # 1. fix some broadcasting and vectorization. + # (trying to solve rho.size * alpha.size * lambda.size * num_roots + # root finding problems at once)... better to scan over lambdas for memory + # saving. + # 2. add boundary to root finding logic to keep root searches separate + # will probably just make another version of desc.backend.root_scalar + # to avoid separate root finding routines in residual and jac. + # 3. write docstrings and use transforms in api instead of eq + def residual(zeta): + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|"], grid=grid, data=data) + # change this to 1 lambda at a time to save memory + return data["|B|"] - lambdas + + def jac(zeta): + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) + return data["|B|_z constant rho alpha"] + + # residual = R = |B| - lambda + # Suppose we only cared about finding one roots of R on each field line. + # Then we would solve (rho.size * alpha.size * lambdas.size) independent + # (scalar) root finding problems + # Instead we want to find all the roots R on each field line. + # First, on a dense grid, compute R, and for every field line, find the zeta + # that are roots of this linear spline of R. These are estimates for the true roots + # of R, and will serve as an initial guess for the newton iteration. + # Also, we compute the midpoints between these root estimates; these will serve + # as boundaries of the domain for the resulting root finding problems. + zeta = np.linspace(0, max_field_line, num_roots) + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|"], grid=grid, data=data) + guess_idx = np.nonzero(jnp.diff(jnp.sign(data["|B|"] - lambdas[:, jnp.newaxis]))) + guess = grid.nodes[guess_idx, 2] + boundary = (guess[:-1] + guess[1:]) / 2 + boundary = jnp.insert(boundary, [0, -1], [zeta[0], zeta[-1]]) + # could enforce this with fixup method + # could vmap over this, ... but probably easier to instead implement + # a modified version of what's in desc.backend. Then we won't need to do + # separate root finding routines in residual and jac. + bounce_points = root_scalar(residual, guess, jac=jac) + return bounce_points + + +def field_line_to_desc_coords(rho, alpha, zeta, eq): + """Get desc grid from unique field line coords.""" + r, a, z = jnp.meshgrid(rho, alpha, zeta, copy=False, indexing="ij") + r, a, z = r.ravel(), a.ravel(), z.ravel() + # Now we map these Clebsch-Type field-line coordinates to DESC coordinates. + # Note that the rotational transform can be computed apriori because it is a single + # variable function of rho, and the coordinate mapping does not change rho. Once + # this is known, it is simple to compute theta_PEST from alpha. Then we transform + # from straight field-line coordinates to DESC coordinates with the method + # compute_theta_coords. This is preferred over transforming from Clebsch-Type + # coordinates to DESC coordinates directly with the more general method + # map_coordinates. That method requires an initial guess to be compatible with JIT, + # and generating a reasonable initial guess requires computing the rotational + # transform to approximate theta_PEST and the poloidal stream function anyway. + # TODO: In general, Linear Grid construction is not jit compatible. + # This issue can be worked around with a specific routine for this. + lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + lg_data = eq.compute("iota", grid=lg) + data = { + d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) + for d in lg_data + if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] + == "r" + } + sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) + desc_coords = eq.compute_theta_coords(sfl_coords) + grid = Grid(desc_coords, jitable=True) + return grid, data # defines the order in which objective arguments get concatenated into the state vector From dc3422125ace3fa4c26c5667138407e34a0287cf Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 14 Feb 2024 15:05:40 -0500 Subject: [PATCH 010/241] Adding logic to vectorize bounce point finding algorithm --- desc/compute/utils.py | 94 ++++++++++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 37 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index eb92ea2c37..c3616cfe53 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1374,9 +1374,7 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): # Use Gauss-Chebyshev quadrature as the integrand blows up at integration boundary. x, w = chebgauss(deg=resolution) - # TODO: For now, let's pretend bounce points do not depend on - # lambda so that bounce_point() returns either one or two numbers for - # endpoints of all the integrals. + # TODO: Generalize logic now that bounce_points return a tensor bp = bounce_point(eq, lambdas, rho, alpha) zeta = (2 * jnp.arcsin(x) / jnp.pi - 1) / 2 * (bp[1] - bp[0]) + bp[1] grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) @@ -1399,7 +1397,6 @@ def _bounce_integral(name): """ f = eq.compute(name, grid=grid, override_grid=False, data=data)[name] - # If lambdas.size is large, we should loop to save memory. F = ( f / (data["B^zeta"] * jnp.sqrt(1 - lambdas[:, jnp.newaxis] * data["|B|"])) @@ -1476,50 +1473,73 @@ def _bounce_average(name): return _bounce_average -def bounce_point(eq, lambdas, rho, alpha, num_roots=50, max_field_line=10 * jnp.pi): +def bounce_point( + eq, lambdas, rho, alpha, max_bounce_points=20, max_field_line=10 * jnp.pi +): """Find bounce points.""" - # TODO: Main algorithm here. - # 1. fix some broadcasting and vectorization. - # (trying to solve rho.size * alpha.size * lambda.size * num_roots - # root finding problems at once)... better to scan over lambdas for memory - # saving. - # 2. add boundary to root finding logic to keep root searches separate - # will probably just make another version of desc.backend.root_scalar - # to avoid separate root finding routines in residual and jac. - # 3. write docstrings and use transforms in api instead of eq - def residual(zeta): + # TODO: + # 1. make another version of desc.backend.root_scalar + # to avoid separate root finding routines in residual and jac + # and use previous desc coords as initial guess for next iteration + # 2. write docstrings and use transforms in api instead of eq + def residual(zeta, i): grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|"], grid=grid, data=data) - # change this to 1 lambda at a time to save memory - return data["|B|"] - lambdas + return data["|B|"] - lambdas[i] def jac(zeta): grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) return data["|B|_z constant rho alpha"] - # residual = R = |B| - lambda - # Suppose we only cared about finding one roots of R on each field line. - # Then we would solve (rho.size * alpha.size * lambdas.size) independent - # (scalar) root finding problems - # Instead we want to find all the roots R on each field line. - # First, on a dense grid, compute R, and for every field line, find the zeta - # that are roots of this linear spline of R. These are estimates for the true roots - # of R, and will serve as an initial guess for the newton iteration. - # Also, we compute the midpoints between these root estimates; these will serve - # as boundaries of the domain for the resulting root finding problems. - zeta = np.linspace(0, max_field_line, num_roots) + # Compute |B| - lambda on a dense grid. + # For every field line, find the roots of this linear spline. + # These estimates for the true roots will serve as an initial guess, and + # let us form a boundary mesh around root estimates to limit search domain + # of the root finding algorithms. + zeta = np.linspace(0, max_field_line, 3 * max_bounce_points) grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|"], grid=grid, data=data) - guess_idx = np.nonzero(jnp.diff(jnp.sign(data["|B|"] - lambdas[:, jnp.newaxis]))) - guess = grid.nodes[guess_idx, 2] - boundary = (guess[:-1] + guess[1:]) / 2 - boundary = jnp.insert(boundary, [0, -1], [zeta[0], zeta[-1]]) - # could enforce this with fixup method - # could vmap over this, ... but probably easier to instead implement - # a modified version of what's in desc.backend. Then we won't need to do - # separate root finding routines in residual and jac. - bounce_points = root_scalar(residual, guess, jac=jac) + B_norm = data["|B|"].reshape(alpha.size, rho.size) # constant field line chunks + + boundary_lt = jnp.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + boundary_rt = jnp.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + guess = jnp.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + # todo: scan over this + for i in range(lambdas.size): + for j in range(alpha.size): + for k in range(rho.size): + # indices of zeta values observed prior to sign change + idx = jnp.nonzero(jnp.diff(jnp.sign(B_norm[j, k] - lambdas[i])))[0] + guess[i, :, j, k] = grid.nodes[idx, 2] + boundary_lt[i, :, j, k] = jnp.append(zeta[0], guess[:-1]) + boundary_rt[i, :, j, k] = jnp.append(guess[1:], zeta[-1]) + guess = guess.reshape(lambdas.size, max_bounce_points, alpha.size * rho.size) + boundary_lt = boundary_lt.reshape( + lambdas.size, max_bounce_points, alpha.size * rho.size + ) + boundary_rt = boundary_rt.reshape( + lambdas.size, max_bounce_points, alpha.size * rho.size + ) + + def body_lambdas(i, out): + def body_roots(j, out_i): + def fixup(z): + return jnp.clip(z, boundary_lt[i, j], boundary_rt[i, j]) + + # todo: call vmap to vectorize or guess[i, j] so that we solve + # guess[i, j].size independent root finding problems + root = root_scalar(residual, guess[i, j], jac=jac, args=i, fixup=fixup) + out_i = put(out_i, j, root) + return out_i + + out = put(out, i, fori_loop(0, max_bounce_points, body_roots, out[i])) + return out + + bounce_points = jnp.zeros( + shape=(lambdas.size, alpha.size, rho.size, max_bounce_points) + ) + bounce_points = fori_loop(0, lambdas.size, body_lambdas, bounce_points) return bounce_points From edb5a7fcc86a76edfc03d3c17637288a29290fd1 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 18 Feb 2024 01:42:00 -0500 Subject: [PATCH 011/241] Bounce averaging with splines --- desc/backend.py | 41 +++++ desc/compute/_field.py | 23 +-- desc/compute/utils.py | 290 +++++++++++++++++++++++------------- tests/test_compute_utils.py | 143 ++++++++++++++++-- 4 files changed, 371 insertions(+), 126 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index c7280c6580..abc5d5812b 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -357,6 +357,26 @@ def tangent_solve(g, y): ) return x, (jnp.linalg.norm(res), niter) + def complex_sqrt(x): + """Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike numpy.sqrt which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray + The square root of x. + + """ + out = jnp.sqrt(x.astype("complex128")) + return out + # we can't really test the numpy backend stuff in automated testing, so we ignore it # for coverage purposes @@ -750,3 +770,24 @@ def root( """ out = scipy.optimize.root(fun, x0, args, jac=jac, tol=tol) return out.x, out + + def complex_sqrt(x): + """Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike numpy.sqrt which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of x. If x was a scalar, so is out, + otherwise an array is returned. + + """ + out = np.emath.sqrt(x) + return out diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 8e731d6693..dcf222f858 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2238,12 +2238,13 @@ def _B_mag(params, transforms, profiles, data, **kwargs): ], ) def _B_mag_r(params, transforms, profiles, data, **kwargs): - data["|B|_r"] = ( + data["|B|_r"] = safediv( data["B^theta_r"] * data["B_theta"] + data["B^theta"] * data["B_theta_r"] + data["B^zeta_r"] * data["B_zeta"] - + data["B^zeta"] * data["B_zeta_r"] - ) / (2 * data["|B|"]) + + data["B^zeta"] * data["B_zeta_r"], + 2 * data["|B|"], + ) return data @@ -2271,12 +2272,13 @@ def _B_mag_r(params, transforms, profiles, data, **kwargs): ], ) def _B_mag_t(params, transforms, profiles, data, **kwargs): - data["|B|_t"] = ( + data["|B|_t"] = safediv( data["B^theta_t"] * data["B_theta"] + data["B^theta"] * data["B_theta_t"] + data["B^zeta_t"] * data["B_zeta"] - + data["B^zeta"] * data["B_zeta_t"] - ) / (2 * data["|B|"]) + + data["B^zeta"] * data["B_zeta_t"], + 2 * data["|B|"], + ) return data @@ -2304,12 +2306,13 @@ def _B_mag_t(params, transforms, profiles, data, **kwargs): ], ) def _B_mag_z(params, transforms, profiles, data, **kwargs): - data["|B|_z"] = ( + data["|B|_z"] = safediv( data["B^theta_z"] * data["B_theta"] + data["B^theta"] * data["B_theta_z"] + data["B^zeta_z"] * data["B_zeta"] - + data["B^zeta"] * data["B_zeta_z"] - ) / (2 * data["|B|"]) + + data["B^zeta"] * data["B_zeta_z"], + 2 * data["|B|"], + ) return data @@ -2328,7 +2331,7 @@ def _B_mag_z(params, transforms, profiles, data, **kwargs): ) def _B_mag_alpha(params, transforms, profiles, data, **kwargs): # constant ρ and ζ - data["|B|_alpha"] = data["|B|_t"] / data["alpha_t"] + data["|B|_alpha"] = safediv(data["|B|_t"], data["alpha_t"]) return data diff --git a/desc/compute/utils.py b/desc/compute/utils.py index cb52431bfb..e65217b932 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -5,10 +5,10 @@ import warnings import numpy as np -from numpy.polynomial.chebyshev import chebgauss +from scipy.interpolate import CubicHermiteSpline, CubicSpline from termcolor import colored -from desc.backend import cond, fori_loop, jnp, put, root_scalar +from desc.backend import complex_sqrt, cond, fori_loop, jnp, put from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand from .data_index import data_index @@ -1350,7 +1350,123 @@ def body(i, mins): return grid.expand(mins, surface_label) -def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): +# probably best to add these to interpax +def cubic_poly_roots(coef, shift=0, real=True): + """Roots of cubic polynomial. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. For a polynomial + given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. + It is assumed that c₁ is nonzero. + shift : float + Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``shift``. + real : bool + Whether to return only real solutions. If true complex solutions are + returned as nan values. + + Returns + ------- + xi : ndarray + The three roots of the cubic polynomial, sorted by real part then imaginary. + + """ + # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula + # The common libraries use root-finding which isn't compatible with JAX. + a, b, c, d = coef + d = d - shift + t_0 = b**2 - 3 * a * c + t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d + C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) + C_is_zero = jnp.isclose(C, 0) + + def roots(xi_k): + t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) + r = -(b + xi_k * C + t_3) / (3 * a) + if real: + # TODO: Do we need a sentinel besides nan to avoid it in gradient? + # can't jax condition on different types + r = jnp.where(jnp.isreal(r), jnp.real(r), jnp.nan) + return r + + xi_1 = (-1 + (-3) ** 0.5) / 2 + xi_2 = xi_1**2 + xi_3 = 1 + xi = jnp.sort(jnp.stack([roots(xi_1), roots(xi_2), roots(xi_3)], axis=-1), axis=-1) + return xi + + +def polyint(coef): + """Coefficients for the primitives of the given set of polynomials. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``coef.shape[0] - 1``, + coefficient cᵢ should be stored at ``coef[n - i]``. + + Returns + ------- + poly : ndarray + Coefficients of polynomial primitive, ignoring the arbitrary constant. + That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, + where n is ``coef.shape[0] - 1``. + + """ + poly = (coef.T / jnp.arange(coef.shape[0], 0, -1)).T + return poly + + +def polyder(coef): + """Coefficients for the derivatives of the given set of polynomials. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``coef.shape[0] - 1``, + coefficient cᵢ should be stored at ``coef[n - i]``. + + Returns + ------- + poly : ndarray + Coefficients of polynomial derivative, ignoring the arbitrary constant. + That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, + where n is ``coef.shape[0] - 1``. + + """ + poly = (coef[:-1].T * jnp.arange(coef.shape[0] - 1, 0, -1)).T + return poly + + +def polyeval(coef, x): + """Evaluate the set of polynomials at the points x. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``coef.shape[0] - 1``, + coefficient cᵢ should be stored at ``coef[n - i]``. + x : ndarray + Coordinates at which to evaluate the set of polynomials. + The first ``coef.ndim`` axes should have shape ``coef.shape[1:]``. + + Returns + ------- + f : ndarray + ``f[j, k, ...]`` is the polynomial with coefficients ``coef[:, j, k, ...]`` + evaluated at the point ``x[j, k, ...]``. + + """ + X = (x[jnp.newaxis, :].T ** jnp.arange(coef.shape[0] - 1, -1, -1)).T + f = jnp.einsum("ijk...,ijk...->jk...", coef, X) + return f + + +def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=20): """Returns a method to compute the bounce integral of any quantity. The bounce integral is defined as F_ℓ(λ) = ∫ f(ℓ) / √(1 − λ |B|) dℓ, where @@ -1367,12 +1483,12 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): ---------- eq : Equilibrium Equilibrium on which the bounce integral is defined. - lambdas : ndarray - λ values to evaluate the bounce integral at. rho : ndarray Unique flux surface label coordinates. alpha : ndarray Unique field line label coordinates over a constant rho surface. + zeta_max : float + Max value for field line following coordinate. resolution : int Number of quadrature points used to compute the bounce integral. @@ -1381,14 +1497,14 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): bi : callable This callable method computes the bounce integral F_ℓ(λ) for every specified field line ℓ (constant rho and alpha), for every λ value in - ``lambdas``. + ``lambda_pitch``. Examples -------- .. code-block:: python - bi = bounce_integral(eq, lambdas) - F = bi(name) + bi = bounce_integral(eq) + F = bi(name, lambda_pitch) """ if rho is None: @@ -1396,45 +1512,77 @@ def bounce_integral(eq, lambdas, rho=None, alpha=None, resolution=20): if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) - # Use Gauss-Chebyshev quadrature as the integrand blows up at integration boundary. - x, w = chebgauss(deg=resolution) - # TODO: Generalize logic now that bounce_points return a tensor - bp = bounce_point(eq, lambdas, rho, alpha) - zeta = (2 * jnp.arcsin(x) / jnp.pi - 1) / 2 * (bp[1] - bp[0]) + bp[1] + zeta = np.linspace(0, zeta_max, resolution) grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute( - names=["B^zeta", "|B|"], grid=grid, data=data, override_grid=False + ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data ) - - def _bounce_integral(name): + # TODO: https://github.com/f0uriest/interpax/issues/19 + coef = CubicHermiteSpline( + zeta, + data["|B|"].reshape(alpha.size, rho.size, zeta.size), + data["|B|_z constant rho alpha"].reshape(alpha.size, rho.size, zeta.size), + axis=-1, + extrapolate="periodic", + ).c + der = polyder(coef) + + def _bounce_integral(name, lambda_pitch): """Compute the bounce integral of the named quantity. Parameters ---------- name : ndarray Name of quantity in ``data_index`` to compute the bounce integral of. + lambda_pitch : ndarray + λ values to evaluate the bounce integral at. Returns ------- - F : ndarray, shape(lambdas.size, alpha.size, rho.size) - Bounce integral evaluated at ``lambdas`` for every field line. + F : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, 2) + Bounce integrals evaluated at ``lambda_pitch`` for every field line. """ - f = eq.compute(name, grid=grid, override_grid=False, data=data)[name] - F = ( - f - / (data["B^zeta"] * jnp.sqrt(1 - lambdas[:, jnp.newaxis] * data["|B|"])) - * jnp.pi - / (bp[1] - bp[0]) + # Gauss-Quadrature is expensive to perform because evaluating the integrand + # at the optimal quadrature points along the field line would require + # root finding to map field line coordinates to desc coordinates. + # Newton-Cotes quadrature is inaccurate as the bounce points are not + # guaranteed to be near the fixed quadrature points. Instead, we + # construct interpolating cubic splines of the integrand and integrate + # the spline exactly between the bounce points. This should give + # comparable results to a composite Simpson Newton-Cotes with quadrature + # points near bounce points. + y = ( + eq.compute(name, grid=grid, override_grid=False, data=data)[name] + / data["B^zeta"] ) - F = jnp.sum(F.reshape(lambdas.size, -1, zeta.size) * w, axis=-1) - F = F.reshape(lambdas.size, alpha.size, rho.size) + + def body(i, out): + bp = cubic_poly_roots(coef, 1 / lambda_pitch[i]) + # number of splines per field line is zeta.size - 1 + # assert bp.shape == (zeta.size - 1, alpha.size, rho.size, 3) # noqa E800 + Y = jnp.reshape( + y / (jnp.sqrt(1 - lambda_pitch[i] * data["|B|"])), + (alpha.size, rho.size, zeta.size), + ) + Y = polyint(CubicSpline(zeta, Y, axis=-1, extrapolate="periodic").c) + integrals = polyeval(Y, bp) + integrals = integrals[:, :, :, 1:] - integrals[:, :, :, :-1] + # Mask the integrals that were outside the potential wells. + b_norm_z = polyeval(der, bp) + wells = (b_norm_z[:, :, :, :-1] <= 0) & (b_norm_z[:, :, :, 1:] >= 0) + out = put(out, i, wells * integrals) + return out + + # TODO: add periodic boundary condition on leftmost and rightmost bounce points + F = jnp.empty((lambda_pitch.size, alpha.size, rho.size, zeta.size, 2)) + F = fori_loop(0, lambda_pitch.size, body, F) return F return _bounce_integral -def bounce_average(eq, lambdas, rho=None, alpha=None, resolution=20): +def bounce_average(eq, rho=None, alpha=None, resolution=20): """Returns a method to compute the bounce average of any quantity. The bounce average is defined as @@ -1452,8 +1600,6 @@ def bounce_average(eq, lambdas, rho=None, alpha=None, resolution=20): ---------- eq : Equilibrium Equilibrium on which the bounce integral is defined. - lambdas : ndarray - λ values to evaluate the bounce integral at. rho : ndarray Unique flux surface label coordinates. alpha : ndarray @@ -1472,104 +1618,36 @@ def bounce_average(eq, lambdas, rho=None, alpha=None, resolution=20): -------- .. code-block:: python - ba = bounce_average(eq, lambdas) - G = ba(name) + ba = bounce_average(eq) + G = ba(name, lambda_pitch) """ - bi = bounce_integral(eq, lambdas, rho, alpha, resolution) + bi = bounce_integral(eq, rho, alpha, resolution) - def _bounce_average(name): + def _bounce_average(name, lambda_pitch): """Compute the bounce average of the named quantity. Parameters ---------- name : ndarray Name of quantity in ``data_index`` to compute the bounce average of. + lambda_pitch : ndarray + λ values to evaluate the bounce integral at. Returns ------- - G : ndarray, shape(lambdas.size, alpha.size, rho.size) + G : ndarray, shape(lambda_pitch.size, alpha.size, rho.size) Bounce average evaluated at ``lambdas`` for every field line. """ - return bi(name) / bi("1") + return bi(name, lambda_pitch) / bi("1", lambda_pitch) return _bounce_average -def bounce_point( - eq, lambdas, rho, alpha, max_bounce_points=20, max_field_line=10 * jnp.pi -): - """Find bounce points.""" - # TODO: - # 1. make another version of desc.backend.root_scalar - # to avoid separate root finding routines in residual and jac - # and use previous desc coords as initial guess for next iteration - # 2. write docstrings and use transforms in api instead of eq - def residual(zeta, i): - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|"], grid=grid, data=data) - return data["|B|"] - lambdas[i] - - def jac(zeta): - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) - return data["|B|_z constant rho alpha"] - - # Compute |B| - lambda on a dense grid. - # For every field line, find the roots of this linear spline. - # These estimates for the true roots will serve as an initial guess, and - # let us form a boundary mesh around root estimates to limit search domain - # of the root finding algorithms. - zeta = np.linspace(0, max_field_line, 3 * max_bounce_points) - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|"], grid=grid, data=data) - B_norm = data["|B|"].reshape(alpha.size, rho.size) # constant field line chunks - - boundary_lt = jnp.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - boundary_rt = jnp.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - guess = jnp.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - # todo: scan over this - for i in range(lambdas.size): - for j in range(alpha.size): - for k in range(rho.size): - # indices of zeta values observed prior to sign change - idx = jnp.nonzero(jnp.diff(jnp.sign(B_norm[j, k] - lambdas[i])))[0] - guess[i, :, j, k] = grid.nodes[idx, 2] - boundary_lt[i, :, j, k] = jnp.append(zeta[0], guess[:-1]) - boundary_rt[i, :, j, k] = jnp.append(guess[1:], zeta[-1]) - guess = guess.reshape(lambdas.size, max_bounce_points, alpha.size * rho.size) - boundary_lt = boundary_lt.reshape( - lambdas.size, max_bounce_points, alpha.size * rho.size - ) - boundary_rt = boundary_rt.reshape( - lambdas.size, max_bounce_points, alpha.size * rho.size - ) - - def body_lambdas(i, out): - def body_roots(j, out_i): - def fixup(z): - return jnp.clip(z, boundary_lt[i, j], boundary_rt[i, j]) - - # todo: call vmap to vectorize or guess[i, j] so that we solve - # guess[i, j].size independent root finding problems - root = root_scalar(residual, guess[i, j], jac=jac, args=i, fixup=fixup) - out_i = put(out_i, j, root) - return out_i - - out = put(out, i, fori_loop(0, max_bounce_points, body_roots, out[i])) - return out - - bounce_points = jnp.zeros( - shape=(lambdas.size, alpha.size, rho.size, max_bounce_points) - ) - bounce_points = fori_loop(0, lambdas.size, body_lambdas, bounce_points) - return bounce_points - - def field_line_to_desc_coords(rho, alpha, zeta, eq): """Get desc grid from unique field line coords.""" - r, a, z = jnp.meshgrid(rho, alpha, zeta, copy=False, indexing="ij") + r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") r, a, z = r.ravel(), a.ravel(), z.ravel() # Now we map these Clebsch-Type field-line coordinates to DESC coordinates. # Note that the rotational transform can be computed apriori because it is a single diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 02a6947b83..e9352335d5 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -4,12 +4,17 @@ import numpy as np import pytest -from desc.backend import jnp +from desc.backend import fori_loop, jnp, put, root_scalar from desc.basis import FourierZernikeBasis from desc.compute.geom_utils import rotation_matrix from desc.compute.utils import ( _get_grid_surface, + cubic_poly_roots, + field_line_to_desc_coords, line_integrals, + polyder, + polyeval, + polyint, surface_averages, surface_integrals, surface_integrals_transform, @@ -572,14 +577,132 @@ def test_surface_min_max(self): np.testing.assert_allclose(Bmax_alt, grid.compress(surface_max(grid, B))) np.testing.assert_allclose(Bmin_alt, grid.compress(surface_min(grid, B))) + @pytest.mark.unit + def test_rotation_matrix(self): + """Test that rotation_matrix works with fwd & rev AD for axis=[0, 0, 0].""" + dfdx_fwd = jax.jacfwd(rotation_matrix) + dfdx_rev = jax.jacrev(rotation_matrix) + x0 = jnp.array([0.0, 0.0, 0.0]) + + np.testing.assert_allclose(rotation_matrix(x0), np.eye(3)) + np.testing.assert_allclose(dfdx_fwd(x0), np.zeros((3, 3, 3))) + np.testing.assert_allclose(dfdx_rev(x0), np.zeros((3, 3, 3))) + + @pytest.mark.unit + def test_cubic_poly_roots(self): + """Test vectorized computation of cubic polynomial exact roots.""" + cubic = 4 + poly = np.arange(-60, 60).reshape(cubic, 6, -1) + poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) + poly = poly * np.e * np.pi + out = cubic_poly_roots(poly, real=False) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + root_finds = np.sort_complex(np.roots(poly[:, j, k])) + np.testing.assert_allclose(out[j, k], root_finds) + + @pytest.mark.unit + def test_polyint(self): + """Test vectorized computation of polynomial primitive.""" + quintic = 6 + poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + out = polyint(poly) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + np.testing.assert_allclose(out[:, j, k], np.polyint(poly[:, j, k])[:-1]) + + @pytest.mark.unit + def test_polyder(self): + """Test vectorized computation of polynomial derivative.""" + quintic = 6 + poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + out = polyder(poly) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + np.testing.assert_allclose(out[:, j, k], np.polyder(poly[:, j, k])) -@pytest.mark.unit -def test_rotation_matrix(): - """Test that rotation_matrix works with fwd & rev AD for axis=[0, 0, 0].""" - dfdx_fwd = jax.jacfwd(rotation_matrix) - dfdx_rev = jax.jacrev(rotation_matrix) - x0 = jnp.array([0.0, 0.0, 0.0]) + @pytest.mark.unit + def test_polyeval(self): + """Test vectorized computation of polynomial evaluation.""" + quintic = 6 + poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + x = np.linspace(0, 20, poly.shape[1] * poly.shape[2]).reshape( + poly.shape[1], poly.shape[2] + ) + x = np.stack([x, x * 2], axis=-1) + out = polyeval(poly, x) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + np.testing.assert_allclose(out[j, k], np.poly1d(poly[:, j, k])(x[j, k])) + + # TODO: FIXME + def bounce_point( + self, eq, lambdas, rho, alpha, max_bounce_points=20, max_field_line=10 * np.pi + ): + """Find bounce points.""" + # TODO: + # 1. make another version of desc.backend.root_scalar + # to avoid separate root finding routines in residual and jac + # and use previous desc coords as initial guess for next iteration + # 2. write docstrings and use transforms in api instead of eq + def residual(zeta, i): + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|"], grid=grid, data=data) + return data["|B|"] - lambdas[i] + + def jac(zeta): + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) + return data["|B|_z constant rho alpha"] + + # Compute |B| - lambda on a dense grid. + # For every field line, find the roots of this linear spline. + # These estimates for the true roots will serve as an initial guess, and + # let us form a boundary mesh around root estimates to limit search domain + # of the root finding algorithms. + zeta = np.linspace(0, max_field_line, 3 * max_bounce_points) + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|"], grid=grid, data=data) + B_norm = data["|B|"].reshape( + alpha.size, rho.size, -1 + ) # constant field line chunks + + boundary_lt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + boundary_rt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + guess = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + # todo: scan over this + for i in range(lambdas.size): + for j in range(alpha.size): + for k in range(rho.size): + # indices of zeta values observed prior to sign change + idx = np.nonzero(np.diff(np.sign(B_norm[j, k] - lambdas[i])))[0] + guess[i, :, j, k] = grid.nodes[idx, 2] + boundary_lt[i, :, j, k] = np.append(zeta[0], guess[:-1]) + boundary_rt[i, :, j, k] = np.append(guess[1:], zeta[-1]) + guess = guess.reshape(lambdas.size, max_bounce_points, alpha.size * rho.size) + boundary_lt = boundary_lt.reshape( + lambdas.size, max_bounce_points, alpha.size * rho.size + ) + boundary_rt = boundary_rt.reshape( + lambdas.size, max_bounce_points, alpha.size * rho.size + ) - np.testing.assert_allclose(rotation_matrix(x0), np.eye(3)) - np.testing.assert_allclose(dfdx_fwd(x0), np.zeros((3, 3, 3))) - np.testing.assert_allclose(dfdx_rev(x0), np.zeros((3, 3, 3))) + def body_lambdas(i, out): + def body_roots(j, out_i): + def fixup(z): + return np.clip(z, boundary_lt[i, j], boundary_rt[i, j]) + + # todo: call vmap to vectorize on guess[i, j] so that we solve + # guess[i, j].size independent root finding problems + root = root_scalar(residual, guess[i, j], jac=jac, args=i, fixup=fixup) + out_i = put(out_i, j, root) + return out_i + + out = put(out, i, fori_loop(0, max_bounce_points, body_roots, out[i])) + return out + + bounce_points = np.zeros( + shape=(lambdas.size, alpha.size, rho.size, max_bounce_points) + ) + bounce_points = fori_loop(0, lambdas.size, body_lambdas, bounce_points) + return bounce_points From 0a857732e63c01f201cc940641b597364d0c610b Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 18 Feb 2024 02:02:52 -0500 Subject: [PATCH 012/241] Use vmap instead of fori_loop for computing over lambda_pitch --- desc/compute/utils.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index e65217b932..05026c49d2 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -8,7 +8,7 @@ from scipy.interpolate import CubicHermiteSpline, CubicSpline from termcolor import colored -from desc.backend import complex_sqrt, cond, fori_loop, jnp, put +from desc.backend import complex_sqrt, cond, fori_loop, jnp, put, vmap from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand from .data_index import data_index @@ -1539,7 +1539,7 @@ def _bounce_integral(name, lambda_pitch): Returns ------- - F : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, 2) + F : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, resolution, 2) Bounce integrals evaluated at ``lambda_pitch`` for every field line. """ @@ -1557,26 +1557,24 @@ def _bounce_integral(name, lambda_pitch): / data["B^zeta"] ) - def body(i, out): - bp = cubic_poly_roots(coef, 1 / lambda_pitch[i]) + def body(lambda_pitch_single): + bp = cubic_poly_roots(coef, 1 / lambda_pitch_single) # number of splines per field line is zeta.size - 1 # assert bp.shape == (zeta.size - 1, alpha.size, rho.size, 3) # noqa E800 + b_norm_z = polyeval(der, bp) + wells = (b_norm_z[:, :, :, :-1] <= 0) & (b_norm_z[:, :, :, 1:] >= 0) Y = jnp.reshape( - y / (jnp.sqrt(1 - lambda_pitch[i] * data["|B|"])), + y / (jnp.sqrt(1 - lambda_pitch_single * data["|B|"])), (alpha.size, rho.size, zeta.size), ) Y = polyint(CubicSpline(zeta, Y, axis=-1, extrapolate="periodic").c) integrals = polyeval(Y, bp) - integrals = integrals[:, :, :, 1:] - integrals[:, :, :, :-1] - # Mask the integrals that were outside the potential wells. - b_norm_z = polyeval(der, bp) - wells = (b_norm_z[:, :, :, :-1] <= 0) & (b_norm_z[:, :, :, 1:] >= 0) - out = put(out, i, wells * integrals) - return out + # Mask the integrations outside the potential wells. + integrals = wells * (integrals[:, :, :, 1:] - integrals[:, :, :, :-1]) + return integrals # TODO: add periodic boundary condition on leftmost and rightmost bounce points - F = jnp.empty((lambda_pitch.size, alpha.size, rho.size, zeta.size, 2)) - F = fori_loop(0, lambda_pitch.size, body, F) + F = vmap(body)(lambda_pitch) return F return _bounce_integral From a96eb0df0dff8f79980e29560a3a909a5650a95d Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 18 Feb 2024 03:22:58 -0500 Subject: [PATCH 013/241] Remove vmap, stengthen tests --- desc/compute/utils.py | 42 +++++++++++++++++-------------------- tests/test_compute_utils.py | 19 ++++++++++++++--- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 05026c49d2..f2c4eeab15 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -8,7 +8,7 @@ from scipy.interpolate import CubicHermiteSpline, CubicSpline from termcolor import colored -from desc.backend import complex_sqrt, cond, fori_loop, jnp, put, vmap +from desc.backend import complex_sqrt, cond, fori_loop, jnp, put from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand from .data_index import data_index @@ -1360,7 +1360,7 @@ def cubic_poly_roots(coef, shift=0, real=True): First axis should store coefficients of a polynomial. For a polynomial given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. It is assumed that c₁ is nonzero. - shift : float + shift : ndarray, shape(shift.size, ) Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``shift``. real : bool Whether to return only real solutions. If true complex solutions are @@ -1375,7 +1375,7 @@ def cubic_poly_roots(coef, shift=0, real=True): # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula # The common libraries use root-finding which isn't compatible with JAX. a, b, c, d = coef - d = d - shift + d = (d[jnp.newaxis, :].T - shift).T t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) @@ -1552,29 +1552,25 @@ def _bounce_integral(name, lambda_pitch): # the spline exactly between the bounce points. This should give # comparable results to a composite Simpson Newton-Cotes with quadrature # points near bounce points. - y = ( + bp = cubic_poly_roots(coef, 1 / lambda_pitch) + # Recall there are zeta.size - 1 cubic splines per field line. + assert bp.shape == (lambda_pitch.size, zeta.size - 1, alpha.size, rho.size, 3) + b_norm_z = polyeval(der, bp) + wells = (b_norm_z[:, :, :, :, :-1] <= 0) & (b_norm_z[:, :, :, :, 1:] >= 0) + + Y = jnp.reshape( eq.compute(name, grid=grid, override_grid=False, data=data)[name] - / data["B^zeta"] + / ( + data["B^zeta"] + * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) + ), + (lambda_pitch.size, alpha.size, rho.size, zeta.size), ) - - def body(lambda_pitch_single): - bp = cubic_poly_roots(coef, 1 / lambda_pitch_single) - # number of splines per field line is zeta.size - 1 - # assert bp.shape == (zeta.size - 1, alpha.size, rho.size, 3) # noqa E800 - b_norm_z = polyeval(der, bp) - wells = (b_norm_z[:, :, :, :-1] <= 0) & (b_norm_z[:, :, :, 1:] >= 0) - Y = jnp.reshape( - y / (jnp.sqrt(1 - lambda_pitch_single * data["|B|"])), - (alpha.size, rho.size, zeta.size), - ) - Y = polyint(CubicSpline(zeta, Y, axis=-1, extrapolate="periodic").c) - integrals = polyeval(Y, bp) - # Mask the integrations outside the potential wells. - integrals = wells * (integrals[:, :, :, 1:] - integrals[:, :, :, :-1]) - return integrals - + Y = polyint(CubicSpline(zeta, Y, axis=-1, extrapolate="periodic").c) # TODO: add periodic boundary condition on leftmost and rightmost bounce points - F = vmap(body)(lambda_pitch) + integrals = polyeval(Y, bp) + # Mask the integrations outside the potential wells. + F = wells * (integrals[:, :, :, :, 1:] - integrals[:, :, :, :, :-1]) return F return _bounce_integral diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index e9352335d5..2d112255d3 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -595,17 +595,24 @@ def test_cubic_poly_roots(self): poly = np.arange(-60, 60).reshape(cubic, 6, -1) poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) poly = poly * np.e * np.pi - out = cubic_poly_roots(poly, real=False) + assert np.unique(poly.shape).size == poly.ndim + shift = np.arange(10) + assert np.unique(poly.shape + shift.shape).size == poly.ndim + shift.ndim + out = cubic_poly_roots(poly, shift, real=False) for j in range(poly.shape[1]): for k in range(poly.shape[2]): - root_finds = np.sort_complex(np.roots(poly[:, j, k])) - np.testing.assert_allclose(out[j, k], root_finds) + for s in range(shift.size): + a, b, c, d = poly[:, j, k] + d = d - shift[s] + root_finds = np.sort_complex(np.roots([a, b, c, d])) + np.testing.assert_allclose(out[s, j, k], root_finds) @pytest.mark.unit def test_polyint(self): """Test vectorized computation of polynomial primitive.""" quintic = 6 poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim out = polyint(poly) for j in range(poly.shape[1]): for k in range(poly.shape[2]): @@ -616,6 +623,7 @@ def test_polyder(self): """Test vectorized computation of polynomial derivative.""" quintic = 6 poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim out = polyder(poly) for j in range(poly.shape[1]): for k in range(poly.shape[2]): @@ -626,10 +634,15 @@ def test_polyeval(self): """Test vectorized computation of polynomial evaluation.""" quintic = 6 poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim x = np.linspace(0, 20, poly.shape[1] * poly.shape[2]).reshape( poly.shape[1], poly.shape[2] ) x = np.stack([x, x * 2], axis=-1) + x = np.stack([x, x * 2, x * 3, x * 4], axis=-1) + assert np.unique(x.shape).size == x.ndim + assert poly.shape[1:] == x.shape[:2] + assert np.unique((poly.shape[0],) + x.shape[2:]).size == x.ndim - 1 out = polyeval(poly, x) for j in range(poly.shape[1]): for k in range(poly.shape[2]): From 8d1841ba395322845040c4b0669b0e002d0cfb70 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 18 Feb 2024 07:00:00 -0500 Subject: [PATCH 014/241] Fix einsum and reshaping bugs --- desc/compute/utils.py | 37 +++++++++++++++++++++++-------------- tests/test_compute_utils.py | 4 ++-- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index f2c4eeab15..2ca7b3c239 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1351,7 +1351,7 @@ def body(i, mins): # probably best to add these to interpax -def cubic_poly_roots(coef, shift=0, real=True): +def cubic_poly_roots(coef, shift=jnp.array([0]), real=True): """Roots of cubic polynomial. Parameters @@ -1375,7 +1375,7 @@ def cubic_poly_roots(coef, shift=0, real=True): # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula # The common libraries use root-finding which isn't compatible with JAX. a, b, c, d = coef - d = (d[jnp.newaxis, :].T - shift).T + d = jnp.squeeze((d[jnp.newaxis, :].T - shift).T) t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) @@ -1462,7 +1462,9 @@ def polyeval(coef, x): """ X = (x[jnp.newaxis, :].T ** jnp.arange(coef.shape[0] - 1, -1, -1)).T - f = jnp.einsum("ijk...,ijk...->jk...", coef, X) + subscripts = "abcdefghijklmnopqrstuvwxyz" + sub = subscripts[: coef.ndim] + f = jnp.einsum(sub + "," + sub + "...->" + sub[1:] + "...", coef, X) return f @@ -1526,6 +1528,9 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 extrapolate="periodic", ).c der = polyder(coef) + # There are zeta.size - 1 splines per field line. + assert coef.shape == (4, zeta.size - 1, alpha.size, rho.size) + assert der.shape == (3, zeta.size - 1, alpha.size, rho.size) def _bounce_integral(name, lambda_pitch): """Compute the bounce integral of the named quantity. @@ -1539,7 +1544,7 @@ def _bounce_integral(name, lambda_pitch): Returns ------- - F : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, resolution, 2) + F : ndarray, shape(rho.size, alpha.size, resolution, 2, lambda_pitch.size) Bounce integrals evaluated at ``lambda_pitch`` for every field line. """ @@ -1552,25 +1557,29 @@ def _bounce_integral(name, lambda_pitch): # the spline exactly between the bounce points. This should give # comparable results to a composite Simpson Newton-Cotes with quadrature # points near bounce points. + lambda_pitch = jnp.atleast_1d(lambda_pitch) bp = cubic_poly_roots(coef, 1 / lambda_pitch) - # Recall there are zeta.size - 1 cubic splines per field line. - assert bp.shape == (lambda_pitch.size, zeta.size - 1, alpha.size, rho.size, 3) + bp = jnp.moveaxis(bp, 0, -(lambda_pitch.size > 1)) + assert bp.shape == (zeta.size - 1, alpha.size, rho.size, 3, lambda_pitch.size) b_norm_z = polyeval(der, bp) - wells = (b_norm_z[:, :, :, :, :-1] <= 0) & (b_norm_z[:, :, :, :, 1:] >= 0) + wells = (b_norm_z[:, :, :, :-1] <= 0) & (b_norm_z[:, :, :, 1:] >= 0) - Y = jnp.reshape( + Y = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] / ( data["B^zeta"] * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) - ), - (lambda_pitch.size, alpha.size, rho.size, zeta.size), - ) + ) + ).reshape(lambda_pitch.size, alpha.size, rho.size, zeta.size) Y = polyint(CubicSpline(zeta, Y, axis=-1, extrapolate="periodic").c) + Y = jnp.moveaxis(Y, 2, -1) + assert Y.shape == (4, zeta.size - 1, alpha.size, rho.size, lambda_pitch.size) # TODO: add periodic boundary condition on leftmost and rightmost bounce points - integrals = polyeval(Y, bp) + integrals = polyeval(Y[:, :, :, :, jnp.newaxis], bp) # Mask the integrations outside the potential wells. - F = wells * (integrals[:, :, :, :, 1:] - integrals[:, :, :, :, :-1]) + F = wells * (integrals[:, :, :, 1:] - integrals[:, :, :, :-1]) + F = jnp.swapaxes(F, 0, 2) + assert F.shape == (rho.size, alpha.size, zeta.size - 1, 2, lambda_pitch.size) return F return _bounce_integral @@ -1630,7 +1639,7 @@ def _bounce_average(name, lambda_pitch): Returns ------- - G : ndarray, shape(lambda_pitch.size, alpha.size, rho.size) + G : ndarray, shape(rho.size, alpha.size, resolution, 2, lambda_pitch.size) Bounce average evaluated at ``lambdas`` for every field line. """ diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 2d112255d3..5701ab6eae 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -641,8 +641,8 @@ def test_polyeval(self): x = np.stack([x, x * 2], axis=-1) x = np.stack([x, x * 2, x * 3, x * 4], axis=-1) assert np.unique(x.shape).size == x.ndim - assert poly.shape[1:] == x.shape[:2] - assert np.unique((poly.shape[0],) + x.shape[2:]).size == x.ndim - 1 + assert poly.shape[1:] == x.shape[: poly.ndim - 1] + assert np.unique((poly.shape[0],) + x.shape[poly.ndim - 1 :]).size == x.ndim - 1 out = polyeval(poly, x) for j in range(poly.shape[1]): for k in range(poly.shape[2]): From 25a1f5edc4ae3c84d9d944c380a0b4684001c85e Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 18 Feb 2024 17:31:57 -0500 Subject: [PATCH 015/241] Add periodicity, still need to sum over complex bounce points --- desc/compute/utils.py | 80 ++++++++++++++++++++++--------------- tests/test_compute_utils.py | 2 +- 2 files changed, 48 insertions(+), 34 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 2ca7b3c239..b23139ff0b 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1351,7 +1351,7 @@ def body(i, mins): # probably best to add these to interpax -def cubic_poly_roots(coef, shift=jnp.array([0]), real=True): +def cubic_poly_roots(coef, shift=jnp.array([0])): """Roots of cubic polynomial. Parameters @@ -1362,13 +1362,11 @@ def cubic_poly_roots(coef, shift=jnp.array([0]), real=True): It is assumed that c₁ is nonzero. shift : ndarray, shape(shift.size, ) Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``shift``. - real : bool - Whether to return only real solutions. If true complex solutions are - returned as nan values. Returns ------- - xi : ndarray + xi : ndarray, shape(shift.size, coef.shape, 3) + If shift has one element, the first axis will be squeezed out. The three roots of the cubic polynomial, sorted by real part then imaginary. """ @@ -1384,10 +1382,6 @@ def cubic_poly_roots(coef, shift=jnp.array([0]), real=True): def roots(xi_k): t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) r = -(b + xi_k * C + t_3) / (3 * a) - if real: - # TODO: Do we need a sentinel besides nan to avoid it in gradient? - # can't jax condition on different types - r = jnp.where(jnp.isreal(r), jnp.real(r), jnp.nan) return r xi_1 = (-1 + (-3) ** 0.5) / 2 @@ -1462,9 +1456,9 @@ def polyeval(coef, x): """ X = (x[jnp.newaxis, :].T ** jnp.arange(coef.shape[0] - 1, -1, -1)).T - subscripts = "abcdefghijklmnopqrstuvwxyz" - sub = subscripts[: coef.ndim] - f = jnp.einsum(sub + "," + sub + "...->" + sub[1:] + "...", coef, X) + alphabet = "abcdefghijklmnopqrstuvwxyz" + sub = alphabet[: coef.ndim] + f = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", coef, X) return f @@ -1481,6 +1475,10 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 on the field line such that the particle's velocity parallel to the magnetic field is zero, i.e. λ |B| = 1. + The bounce integral is defined up to a sign. + We choose the sign that corresponds the particle's guiding center trajectory + traveling in the direction of increasing field-line-following label. + Parameters ---------- eq : Equilibrium @@ -1527,10 +1525,14 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 axis=-1, extrapolate="periodic", ).c + # Enforce a periodic boundary condition to compute bounce integrals + # of particles trapped outside this snapshot of the field lines. + coef = jnp.append(coef, coef[:, 0][:, jnp.newaxis], axis=1) der = polyder(coef) - # There are zeta.size - 1 splines per field line. - assert coef.shape == (4, zeta.size - 1, alpha.size, rho.size) - assert der.shape == (3, zeta.size - 1, alpha.size, rho.size) + # There are zeta.size splines per field line. + # The last spline is a duplicate of the first. + assert coef.shape == (4, zeta.size, alpha.size, rho.size) + assert der.shape == (3, zeta.size, alpha.size, rho.size) def _bounce_integral(name, lambda_pitch): """Compute the bounce integral of the named quantity. @@ -1544,7 +1546,8 @@ def _bounce_integral(name, lambda_pitch): Returns ------- - F : ndarray, shape(rho.size, alpha.size, resolution, 2, lambda_pitch.size) + F : ndarray, shape(lambda_pitch.size, resolution, alpha.size, rho.size, 2) + Axes with size one will be squeezed out. Bounce integrals evaluated at ``lambda_pitch`` for every field line. """ @@ -1558,28 +1561,35 @@ def _bounce_integral(name, lambda_pitch): # comparable results to a composite Simpson Newton-Cotes with quadrature # points near bounce points. lambda_pitch = jnp.atleast_1d(lambda_pitch) - bp = cubic_poly_roots(coef, 1 / lambda_pitch) - bp = jnp.moveaxis(bp, 0, -(lambda_pitch.size > 1)) - assert bp.shape == (zeta.size - 1, alpha.size, rho.size, 3, lambda_pitch.size) - b_norm_z = polyeval(der, bp) - wells = (b_norm_z[:, :, :, :-1] <= 0) & (b_norm_z[:, :, :, 1:] >= 0) - - Y = jnp.nan_to_num( + bp = cubic_poly_roots(coef, 1 / lambda_pitch).reshape( + lambda_pitch.size, zeta.size, alpha.size, rho.size, 3 + ) + real_bp = jnp.real(bp) + b_norm_z = polyeval(der[:, jnp.newaxis], real_bp) + is_well = (b_norm_z[..., :-1] <= 0) & (b_norm_z[..., 1:] >= 0) + is_real = jnp.isreal(bp[..., :-1]) & jnp.isreal(bp[..., 1:]) + # Can precompute everything above if lambda_pitch given to parent function. + + # Goal: Integrate between potential wells with real bounce points. + # Strategy: 1. Integrate between real parts of all complex bounce points. + # 2. Sum integrals between real bounce points. + # 3. Keep only results with two real bounce points in a well. + y = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] / ( data["B^zeta"] * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) ) ).reshape(lambda_pitch.size, alpha.size, rho.size, zeta.size) - Y = polyint(CubicSpline(zeta, Y, axis=-1, extrapolate="periodic").c) - Y = jnp.moveaxis(Y, 2, -1) - assert Y.shape == (4, zeta.size - 1, alpha.size, rho.size, lambda_pitch.size) - # TODO: add periodic boundary condition on leftmost and rightmost bounce points - integrals = polyeval(Y[:, :, :, :, jnp.newaxis], bp) - # Mask the integrations outside the potential wells. - F = wells * (integrals[:, :, :, 1:] - integrals[:, :, :, :-1]) - F = jnp.swapaxes(F, 0, 2) - assert F.shape == (rho.size, alpha.size, zeta.size - 1, 2, lambda_pitch.size) + y = CubicSpline(zeta, y, axis=-1, extrapolate="periodic").c + # Enforce a periodic boundary condition to compute bounce integrals + # of particles trapped outside this snapshot of the field lines. + y = jnp.append(y, y[:, 0][:, jnp.newaxis], axis=1) + Y = jnp.swapaxes(polyint(y), 1, 2) + Y = polyeval(Y, real_bp) + integral = Y[..., 1:] - Y[..., :-1] + # TODO: sum across real bounce points + F = jnp.squeeze((is_well & is_real) * integral) return F return _bounce_integral @@ -1599,6 +1609,10 @@ def bounce_average(eq, rho=None, alpha=None, resolution=20): on the field line such that the particle's velocity parallel to the magnetic field is zero, i.e. λ |B| = 1. + The bounce integral is defined up to a sign. + We choose the sign that corresponds the particle's guiding center trajectory + traveling in the direction of increasing field-line-following label. + Parameters ---------- eq : Equilibrium @@ -1639,7 +1653,7 @@ def _bounce_average(name, lambda_pitch): Returns ------- - G : ndarray, shape(rho.size, alpha.size, resolution, 2, lambda_pitch.size) + G : ndarray, shape(lambda_pitch.size, resolution - 1, alpha.size, rho.size, 2) Bounce average evaluated at ``lambdas`` for every field line. """ diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 5701ab6eae..936693a4c4 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -598,7 +598,7 @@ def test_cubic_poly_roots(self): assert np.unique(poly.shape).size == poly.ndim shift = np.arange(10) assert np.unique(poly.shape + shift.shape).size == poly.ndim + shift.ndim - out = cubic_poly_roots(poly, shift, real=False) + out = cubic_poly_roots(poly, shift) for j in range(poly.shape[1]): for k in range(poly.shape[2]): for s in range(shift.size): From 88e03ae247cfe678436799fae8af23b6845d4697 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 19 Feb 2024 00:10:24 -0500 Subject: [PATCH 016/241] Fix algorithm to compute integrals along splines --- desc/compute/utils.py | 107 ++++++++++++++++++++++++------------ tests/test_compute_utils.py | 16 +++--- 2 files changed, 81 insertions(+), 42 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index b23139ff0b..78363a0ed7 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1350,8 +1350,14 @@ def body(i, mins): return grid.expand(mins, surface_label) -# probably best to add these to interpax -def cubic_poly_roots(coef, shift=jnp.array([0])): +def cubic_poly_roots( + coef, + constant=jnp.array([0]), + a_min=-jnp.inf, + a_max=jnp.inf, + return_complex=False, + fill=False, +): """Roots of cubic polynomial. Parameters @@ -1360,20 +1366,35 @@ def cubic_poly_roots(coef, shift=jnp.array([0])): First axis should store coefficients of a polynomial. For a polynomial given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. It is assumed that c₁ is nonzero. - shift : ndarray, shape(shift.size, ) - Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``shift``. + constant : ndarray, shape(constant.size, ) + Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``constant``. + a_min : ndarray + Return nan if real part of root is less than ``a_min``. + Should broadcast with arrays of shape ``coef.shape[1:]``. + a_max : ndarray + Return nan if real part of root is less than ``a_max``. + Should broadcast with arrays of shape ``coef.shape[1:]``. + return_complex : bool + If set to false, will return nan for complex roots. + fill : bool + If set to True, then the last axis of the output has size 5 instead + of 3, where the first element is ``a_min`` and the last is ``a_max``. + This option also replaces undesirable roots with by duplicating a + desirable root with smaller real part. If no such root exists, + then ``a_min`` is used. The roots will be sorted from from smallest + to largest real part. Returns ------- - xi : ndarray, shape(shift.size, coef.shape, 3) - If shift has one element, the first axis will be squeezed out. - The three roots of the cubic polynomial, sorted by real part then imaginary. + xi : ndarray, shape(constant.size, coef.shape, ?) + If constant has one element, the first axis will be squeezed out. + The roots of the cubic polynomial. """ # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula # The common libraries use root-finding which isn't compatible with JAX. a, b, c, d = coef - d = jnp.squeeze((d[jnp.newaxis, :].T - shift).T) + d = jnp.squeeze((d[jnp.newaxis].T - constant).T) t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) @@ -1382,12 +1403,21 @@ def cubic_poly_roots(coef, shift=jnp.array([0])): def roots(xi_k): t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) r = -(b + xi_k * C + t_3) / (3 * a) + r = jnp.where( + (return_complex | jnp.isreal(r)) & (a_min <= r) & (r <= a_max), r, jnp.nan + ) return r xi_1 = (-1 + (-3) ** 0.5) / 2 xi_2 = xi_1**2 xi_3 = 1 - xi = jnp.sort(jnp.stack([roots(xi_1), roots(xi_2), roots(xi_3)], axis=-1), axis=-1) + xi = jnp.stack([roots(xi_1), roots(xi_2), roots(xi_3)], axis=-1) + if fill: + xi_1, xi_2, xi_3 = jnp.sort(xi, axis=-1).T + xi_1 = jnp.where(jnp.isnan(xi_1), a_min, xi_1) + xi_2 = jnp.where(jnp.isnan(xi_2), xi_1, xi_2) + xi_3 = jnp.where(jnp.isnan(xi_3), xi_2, xi_3) + xi = jnp.stack(np.broadcast_arrays(a_min, xi_1, xi_2, xi_3, a_max), axis=-1) return xi @@ -1455,7 +1485,7 @@ def polyeval(coef, x): evaluated at the point ``x[j, k, ...]``. """ - X = (x[jnp.newaxis, :].T ** jnp.arange(coef.shape[0] - 1, -1, -1)).T + X = (x[jnp.newaxis].T ** jnp.arange(coef.shape[0] - 1, -1, -1)).T alphabet = "abcdefghijklmnopqrstuvwxyz" sub = alphabet[: coef.ndim] f = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", coef, X) @@ -1525,14 +1555,12 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 axis=-1, extrapolate="periodic", ).c - # Enforce a periodic boundary condition to compute bounce integrals - # of particles trapped outside this snapshot of the field lines. - coef = jnp.append(coef, coef[:, 0][:, jnp.newaxis], axis=1) + coef = jnp.swapaxes(coef, 1, -1) der = polyder(coef) # There are zeta.size splines per field line. # The last spline is a duplicate of the first. - assert coef.shape == (4, zeta.size, alpha.size, rho.size) - assert der.shape == (3, zeta.size, alpha.size, rho.size) + assert coef.shape == (4, rho.size, alpha.size, zeta.size - 1) + assert der.shape == (3, rho.size, alpha.size, zeta.size - 1) def _bounce_integral(name, lambda_pitch): """Compute the bounce integral of the named quantity. @@ -1546,7 +1574,7 @@ def _bounce_integral(name, lambda_pitch): Returns ------- - F : ndarray, shape(lambda_pitch.size, resolution, alpha.size, rho.size, 2) + F : ndarray, shape(lambda_pitch.size, rho.size, alpha.size, resolution, 2) Axes with size one will be squeezed out. Bounce integrals evaluated at ``lambda_pitch`` for every field line. @@ -1561,19 +1589,23 @@ def _bounce_integral(name, lambda_pitch): # comparable results to a composite Simpson Newton-Cotes with quadrature # points near bounce points. lambda_pitch = jnp.atleast_1d(lambda_pitch) - bp = cubic_poly_roots(coef, 1 / lambda_pitch).reshape( - lambda_pitch.size, zeta.size, alpha.size, rho.size, 3 + bp = cubic_poly_roots( + coef, + constant=1 / lambda_pitch, + a_min=zeta[:-1], + a_max=zeta[1:], + fill=True, + ).reshape(lambda_pitch.size, rho.size, alpha.size, zeta.size - 1, 5) + # Use the filter bp[..., 1:-1] to compute only on potential bounce points. + b_norm_z = polyeval(der[:, jnp.newaxis], bp[..., 1:-1]).reshape( + lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 3 ) - real_bp = jnp.real(bp) - b_norm_z = polyeval(der[:, jnp.newaxis], real_bp) - is_well = (b_norm_z[..., :-1] <= 0) & (b_norm_z[..., 1:] >= 0) - is_real = jnp.isreal(bp[..., :-1]) & jnp.isreal(bp[..., 1:]) + # Check sign of gradient to determine whether root is a valid bounce point. + # Periodic boundary to compute bounce integrals of particles + # trapped outside this snapshot of the field lines. + is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) # noqa: F841 # Can precompute everything above if lambda_pitch given to parent function. - # Goal: Integrate between potential wells with real bounce points. - # Strategy: 1. Integrate between real parts of all complex bounce points. - # 2. Sum integrals between real bounce points. - # 3. Keep only results with two real bounce points in a well. y = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] / ( @@ -1582,14 +1614,19 @@ def _bounce_integral(name, lambda_pitch): ) ).reshape(lambda_pitch.size, alpha.size, rho.size, zeta.size) y = CubicSpline(zeta, y, axis=-1, extrapolate="periodic").c - # Enforce a periodic boundary condition to compute bounce integrals - # of particles trapped outside this snapshot of the field lines. - y = jnp.append(y, y[:, 0][:, jnp.newaxis], axis=1) - Y = jnp.swapaxes(polyint(y), 1, 2) - Y = polyeval(Y, real_bp) - integral = Y[..., 1:] - Y[..., :-1] - # TODO: sum across real bounce points - F = jnp.squeeze((is_well & is_real) * integral) + y = jnp.moveaxis(y, [1, -1], [-1, 2]) + assert y.shape == (4, lambda_pitch.size, rho.size, alpha.size, zeta.size - 1) + Y = polyeval(polyint(y), bp).reshape( + lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5 + ) + integrals = jnp.roll(Y, -1, axis=-1) - Y + # TODO: For each every two True values along last axis of is_well, indexed + # along the last axis by i, j, we should + # compute jnp.sum(match[..., i:j], axis=-1) where + # the variable match is integrals.reshape(..., (zeta.size - 1), 5))[..., 1:-1] + # then we are done. maybe can do this with a mask + # like F = jnp.squeeze(is_well * integrals) + F = integrals return F return _bounce_integral @@ -1653,7 +1690,7 @@ def _bounce_average(name, lambda_pitch): Returns ------- - G : ndarray, shape(lambda_pitch.size, resolution - 1, alpha.size, rho.size, 2) + G : ndarray, shape(lambda_pitch.size, rho.size, alpha.size, resolution, 2) Bounce average evaluated at ``lambdas`` for every field line. """ diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 936693a4c4..e1ecaabafa 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -596,16 +596,18 @@ def test_cubic_poly_roots(self): poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) poly = poly * np.e * np.pi assert np.unique(poly.shape).size == poly.ndim - shift = np.arange(10) - assert np.unique(poly.shape + shift.shape).size == poly.ndim + shift.ndim - out = cubic_poly_roots(poly, shift) + constant = np.arange(10) + assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim + out = cubic_poly_roots(poly, constant, return_complex=True) for j in range(poly.shape[1]): for k in range(poly.shape[2]): - for s in range(shift.size): + for s in range(constant.size): a, b, c, d = poly[:, j, k] - d = d - shift[s] - root_finds = np.sort_complex(np.roots([a, b, c, d])) - np.testing.assert_allclose(out[s, j, k], root_finds) + d = d - constant[s] + np.testing.assert_allclose( + np.sort_complex(out[s, j, k]), + np.sort_complex(np.roots([a, b, c, d])), + ) @pytest.mark.unit def test_polyint(self): From 6d7ef6eac924c6be18e581a7f2c2cc356affa6ea Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 21 Feb 2024 15:03:40 -0500 Subject: [PATCH 017/241] Fix algorithm, just need to change broadcasting --- desc/compute/utils.py | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 78363a0ed7..424b357a79 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1414,10 +1414,13 @@ def roots(xi_k): xi = jnp.stack([roots(xi_1), roots(xi_2), roots(xi_3)], axis=-1) if fill: xi_1, xi_2, xi_3 = jnp.sort(xi, axis=-1).T - xi_1 = jnp.where(jnp.isnan(xi_1), a_min, xi_1) + xi_1 = jnp.where( + jnp.isnan(xi_1), a_min[:, jnp.newaxis, jnp.newaxis, jnp.newaxis], xi_1 + ) xi_2 = jnp.where(jnp.isnan(xi_2), xi_1, xi_2) xi_3 = jnp.where(jnp.isnan(xi_3), xi_2, xi_3) - xi = jnp.stack(np.broadcast_arrays(a_min, xi_1, xi_2, xi_3, a_max), axis=-1) + # todo: use correct stacking method + xi = jnp.vstack([a_min, xi_1, xi_2, xi_3, a_max]) return xi @@ -1557,8 +1560,6 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 ).c coef = jnp.swapaxes(coef, 1, -1) der = polyder(coef) - # There are zeta.size splines per field line. - # The last spline is a duplicate of the first. assert coef.shape == (4, rho.size, alpha.size, zeta.size - 1) assert der.shape == (3, rho.size, alpha.size, zeta.size - 1) @@ -1589,21 +1590,26 @@ def _bounce_integral(name, lambda_pitch): # comparable results to a composite Simpson Newton-Cotes with quadrature # points near bounce points. lambda_pitch = jnp.atleast_1d(lambda_pitch) - bp = cubic_poly_roots( + interpolation_points = cubic_poly_roots( coef, constant=1 / lambda_pitch, a_min=zeta[:-1], a_max=zeta[1:], fill=True, ).reshape(lambda_pitch.size, rho.size, alpha.size, zeta.size - 1, 5) - # Use the filter bp[..., 1:-1] to compute only on potential bounce points. - b_norm_z = polyeval(der[:, jnp.newaxis], bp[..., 1:-1]).reshape( - lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 3 - ) + b_norm_z = polyeval( + der[:, jnp.newaxis], interpolation_points[..., 1:-1] + ).reshape(lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 3) # Check sign of gradient to determine whether root is a valid bounce point. # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. - is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) # noqa: F841 + is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) + is_well_broadcast = jnp.zeros( + shape=(lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5), + dtype=bool, + ) + idx = jnp.arange((zeta.size - 1) * 3) + is_well_broadcast[..., (idx // 3) * 5 + 1 + (idx % 3)] = is_well # Can precompute everything above if lambda_pitch given to parent function. y = jnp.nan_to_num( @@ -1616,17 +1622,16 @@ def _bounce_integral(name, lambda_pitch): y = CubicSpline(zeta, y, axis=-1, extrapolate="periodic").c y = jnp.moveaxis(y, [1, -1], [-1, 2]) assert y.shape == (4, lambda_pitch.size, rho.size, alpha.size, zeta.size - 1) - Y = polyeval(polyint(y), bp).reshape( + Y = polyeval(polyint(y), interpolation_points).reshape( lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5 ) - integrals = jnp.roll(Y, -1, axis=-1) - Y - # TODO: For each every two True values along last axis of is_well, indexed - # along the last axis by i, j, we should - # compute jnp.sum(match[..., i:j], axis=-1) where - # the variable match is integrals.reshape(..., (zeta.size - 1), 5))[..., 1:-1] - # then we are done. maybe can do this with a mask - # like F = jnp.squeeze(is_well * integrals) - F = integrals + mask = jnp.append(jnp.arange(1, Y.size) % 5 != 0, True) + sums = jnp.cumsum( + (jnp.diff(Y, axis=-1, append=Y[..., 0]) - Y[..., -1]) * mask, + axis=-1, + ) + # todo: there should be a jitable way for this, then we are done + F = jnp.diff(sums[..., is_well_broadcast], axis=-1) return F return _bounce_integral From ac82269bc38922ed020bf86e37d38c3af57ceeda Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 22 Feb 2024 00:55:00 -0500 Subject: [PATCH 018/241] Use jax methods for array indexing --- desc/backend.py | 91 +++++++++++++--------------------- desc/compute/utils.py | 112 +++++++++++++++++++++++++++++++----------- 2 files changed, 116 insertions(+), 87 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index abc5d5812b..4989af69ca 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -73,6 +73,7 @@ vmap = jax.vmap scan = jax.lax.scan bincount = jnp.bincount + nonzero = jnp.nonzero from jax import custom_jvp from jax.experimental.ode import odeint from jax.scipy.linalg import block_diag, cho_factor, cho_solve, qr, solve_triangular @@ -104,6 +105,32 @@ def put(arr, inds, vals): return arr return jnp.asarray(arr).at[inds].set(vals) + # TODO: Add axis parameter. + def put_along_axis(arr, inds, vals): + """Functional interface for array "fancy indexing". + + Provides a way to do arr[..., inds] = vals in a way that works with JAX. + + Parameters + ---------- + arr : array-like + Array to populate + inds : array-like of int + Indices to populate + vals : array-like + Values to insert + + Returns + ------- + arr : array-like + Input array with vals inserted at inds. + + """ + if isinstance(arr, np.ndarray): + arr[..., inds] = vals + return arr + return jnp.asarray(arr).at[..., inds].set(vals) + def sign(x): """Sign function, but returns 1 for x==0. @@ -393,6 +420,9 @@ def complex_sqrt(x): ) from scipy.special import gammaln, logsumexp # noqa: F401 + complex_sqrt = np.emath.sqrt + put_along_axis = np.put_along_axis + def tree_stack(*args, **kwargs): """Stack pytree for numpy backend.""" raise NotImplementedError @@ -629,44 +659,6 @@ def custom_jvp(fun, *args, **kwargs): fun.defjvps = lambda *args, **kwargs: None return fun - def tanh_sinh_quadrature(N, quad_limit=3.16): - """ - tanh_sinh quadrature. - - This function outputs the quadrature points and weights - for a tanh-sinh quadrature. - - ∫₋₁¹ f(x) dx = Σ wₖ f(xₖ) - - Parameters - ---------- - N: int - Number of quadrature points, preferable odd - quad_limit: float - The range of quadrature points to be mapped. - Larger quad_limit implies better result but limited due to overflow in sinh - - Returns - ------- - x_k : numpy array - Quadrature points - w_k : numpy array - Quadrature weights - - """ - initial_points = np.linspace(-quad_limit, quad_limit, N) - h = np.diff(initial_points)[0] - x_k = np.tanh(0.5 * np.pi * np.sinh(initial_points)) - w_k = ( - 0.5 - * np.pi - * h - * np.cosh(initial_points) - / (np.cosh(0.5 * np.pi * np.sinh(initial_points))) ** 2 - ) - - return x_k, w_k - def root_scalar( fun, x0, @@ -771,23 +763,6 @@ def root( out = scipy.optimize.root(fun, x0, args, jac=jac, tol=tol) return out.x, out - def complex_sqrt(x): - """Compute the square root of x. - - For negative input elements, a complex value is returned - (unlike numpy.sqrt which returns NaN). - - Parameters - ---------- - x : array_like - The input value(s). - - Returns - ------- - out : ndarray or scalar - The square root of x. If x was a scalar, so is out, - otherwise an array is returned. - - """ - out = np.emath.sqrt(x) - return out + def nonzero(a, *, size=None, fill_value=None): + """Same as np.nonzero but with dummy parameters to match jnp.nonzero API.""" + return np.nonzero(a) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 424b357a79..9ee2369946 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -5,10 +5,18 @@ import warnings import numpy as np -from scipy.interpolate import CubicHermiteSpline, CubicSpline +from scipy.interpolate import Akima1DInterpolator, CubicHermiteSpline from termcolor import colored -from desc.backend import complex_sqrt, cond, fori_loop, jnp, put +from desc.backend import ( + complex_sqrt, + cond, + fori_loop, + jnp, + nonzero, + put, + put_along_axis, +) from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand from .data_index import data_index @@ -1413,14 +1421,11 @@ def roots(xi_k): xi_3 = 1 xi = jnp.stack([roots(xi_1), roots(xi_2), roots(xi_3)], axis=-1) if fill: - xi_1, xi_2, xi_3 = jnp.sort(xi, axis=-1).T - xi_1 = jnp.where( - jnp.isnan(xi_1), a_min[:, jnp.newaxis, jnp.newaxis, jnp.newaxis], xi_1 - ) - xi_2 = jnp.where(jnp.isnan(xi_2), xi_1, xi_2) - xi_3 = jnp.where(jnp.isnan(xi_3), xi_2, xi_3) - # todo: use correct stacking method - xi = jnp.vstack([a_min, xi_1, xi_2, xi_3, a_max]) + xi = jnp.sort(xi, axis=-1) + xi_1 = jnp.where(jnp.isnan(xi[..., 0]), a_min, xi[..., 0]) + xi_2 = jnp.where(jnp.isnan(xi[..., 1]), xi[..., 0], xi[..., 1]) + xi_3 = jnp.where(jnp.isnan(xi[..., 2]), xi[..., 1], xi[..., 2]) + xi = jnp.stack(jnp.broadcast_arrays(a_min, xi_1, xi_2, xi_3, a_max), axis=-1) return xi @@ -1495,6 +1500,45 @@ def polyeval(coef, x): return f +def tanh_sinh_quadrature(N, quad_limit=3.16): + """ + tanh_sinh quadrature. + + This function outputs the quadrature points and weights + for a tanh-sinh quadrature. + + ∫₋₁¹ f(x) dx = Σ wₖ f(xₖ) + + Parameters + ---------- + N: int + Number of quadrature points, preferable odd + quad_limit: float + The range of quadrature points to be mapped. + Larger quad_limit implies better result but limited due to overflow in sinh + + Returns + ------- + x_k : numpy array + Quadrature points + w_k : numpy array + Quadrature weights + + """ + initial_points = jnp.linspace(-quad_limit, quad_limit, N) + h = 2 * quad_limit / (N - 1) + x_k = jnp.tanh(0.5 * jnp.pi * jnp.sinh(initial_points)) + w_k = ( + 0.5 + * jnp.pi + * h + * jnp.cosh(initial_points) + / jnp.cosh(0.5 * jnp.pi * jnp.sinh(initial_points)) ** 2 + ) + + return x_k, w_k + + def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=20): """Returns a method to compute the bounce integral of any quantity. @@ -1580,15 +1624,15 @@ def _bounce_integral(name, lambda_pitch): Bounce integrals evaluated at ``lambda_pitch`` for every field line. """ - # Gauss-Quadrature is expensive to perform because evaluating the integrand - # at the optimal quadrature points along the field line would require - # root finding to map field line coordinates to desc coordinates. - # Newton-Cotes quadrature is inaccurate as the bounce points are not - # guaranteed to be near the fixed quadrature points. Instead, we - # construct interpolating cubic splines of the integrand and integrate - # the spline exactly between the bounce points. This should give - # comparable results to a composite Simpson Newton-Cotes with quadrature - # points near bounce points. + # Newton-Cotes quadrature would be inaccurate as the bounce points are not + # guaranteed to be near the fixed quadrature points. + # Gauss-Quadrature on the exact integrand is expensive to perform because + # evaluating the integrand at the optimal quadrature points along the field + # line would require root finding to map field line coordinates to desc + # coordinates. So we approximate functions in the integrand with splines + # and perform Gauss-Quadrature. + # TODO: spline functions separately since no polynomial cannot capture the + # division accurately near the bounce points. lambda_pitch = jnp.atleast_1d(lambda_pitch) interpolation_points = cubic_poly_roots( coef, @@ -1604,12 +1648,16 @@ def _bounce_integral(name, lambda_pitch): # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) - is_well_broadcast = jnp.zeros( - shape=(lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5), - dtype=bool, - ) idx = jnp.arange((zeta.size - 1) * 3) - is_well_broadcast[..., (idx // 3) * 5 + 1 + (idx % 3)] = is_well + # Make is_well broadcast with interpolation_points. + is_well = put_along_axis( + jnp.zeros( + shape=(lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5), + dtype=bool, + ), + (idx // 3) * 5 + 1 + (idx % 3), + is_well, + ) # Can precompute everything above if lambda_pitch given to parent function. y = jnp.nan_to_num( @@ -1619,19 +1667,25 @@ def _bounce_integral(name, lambda_pitch): * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) ) ).reshape(lambda_pitch.size, alpha.size, rho.size, zeta.size) - y = CubicSpline(zeta, y, axis=-1, extrapolate="periodic").c + y = Akima1DInterpolator(zeta, y, axis=-1).c y = jnp.moveaxis(y, [1, -1], [-1, 2]) assert y.shape == (4, lambda_pitch.size, rho.size, alpha.size, zeta.size - 1) Y = polyeval(polyint(y), interpolation_points).reshape( lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5 ) - mask = jnp.append(jnp.arange(1, Y.size) % 5 != 0, True) sums = jnp.cumsum( - (jnp.diff(Y, axis=-1, append=Y[..., 0]) - Y[..., -1]) * mask, + jnp.diff(Y, axis=-1, append=Y[..., 0, jnp.newaxis]) + # Multiply by mask that is false at knots of piecewise spline + # to avoid adding difference between primitives of splines at knots. + * jnp.append(jnp.arange(1, Y.shape[-1]) % 5 != 0, True), axis=-1, ) - # todo: there should be a jitable way for this, then we are done - F = jnp.diff(sums[..., is_well_broadcast], axis=-1) + # TODO: jnp.diff(sums[is_well], axis=-1)[::2] except + # padded with zeros at end to avoid dynamically sized output. + F = jnp.diff( + sums[nonzero(is_well, size=sums.size, fill_value=sums.size - 1)], + axis=-1, + )[::2] return F return _bounce_integral From d498a501eb80dc0baedfd48bf55ab2ef3cd18739 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 23 Feb 2024 01:46:06 -0500 Subject: [PATCH 019/241] Add differentiable indexed difference, reduce memory in cubic_poly_roots... and reduce dimension of tensor by one throughout computation to take advantage of caching --- desc/backend.py | 69 +++++++++++------ desc/compute/utils.py | 146 +++++++++++++++++------------------- tests/test_compute_utils.py | 4 +- 3 files changed, 119 insertions(+), 100 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 4989af69ca..4990d16660 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -73,7 +73,7 @@ vmap = jax.vmap scan = jax.lax.scan bincount = jnp.bincount - nonzero = jnp.nonzero + flatnonzero = jnp.flatnonzero from jax import custom_jvp from jax.experimental.ode import odeint from jax.scipy.linalg import block_diag, cho_factor, cho_solve, qr, solve_triangular @@ -105,31 +105,37 @@ def put(arr, inds, vals): return arr return jnp.asarray(arr).at[inds].set(vals) - # TODO: Add axis parameter. - def put_along_axis(arr, inds, vals): - """Functional interface for array "fancy indexing". + def put_along_axis(arr, indices, values, axis): + """Put values into the destination array by matching 1d index and data slices. - Provides a way to do arr[..., inds] = vals in a way that works with JAX. + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. Parameters ---------- - arr : array-like - Array to populate - inds : array-like of int - Indices to populate - vals : array-like - Values to insert - - Returns - ------- - arr : array-like - Input array with vals inserted at inds. + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. """ + if axis != -1: + raise NotImplementedError( + "JAX put_along_axis currently only supports axis=-1." + ) if isinstance(arr, np.ndarray): - arr[..., inds] = vals + arr[..., indices] = values return arr - return jnp.asarray(arr).at[..., inds].set(vals) + return jnp.asarray(arr).at[..., indices].set(values) def sign(x): """Sign function, but returns 1 for x==0. @@ -580,7 +586,7 @@ def while_loop(cond_fun, body_fun, init_val): val = body_fun(val) return val - def vmap(fun, out_axes=0): + def vmap(fun, in_axes=0, out_axes=0): """A numpy implementation of jax.lax.map whose API is a subset of jax.vmap. Like Python's builtin map, @@ -591,6 +597,8 @@ def vmap(fun, out_axes=0): ---------- fun: callable Function (A -> B) + in_axes: int + Axis to map over. out_axes: int An integer indicating where the mapped axis should appear in the output. @@ -600,6 +608,10 @@ def vmap(fun, out_axes=0): Vectorized version of fun. """ + if in_axes != 0: + raise NotImplementedError( + "Backend for numpy vmap currently only supports in_axes=0." + ) def fun_vmap(fun_inputs): return np.stack([fun(fun_input) for fun_input in fun_inputs], axis=out_axes) @@ -763,6 +775,19 @@ def root( out = scipy.optimize.root(fun, x0, args, jac=jac, tol=tol) return out.x, out - def nonzero(a, *, size=None, fill_value=None): - """Same as np.nonzero but with dummy parameters to match jnp.nonzero API.""" - return np.nonzero(a) + def flatnonzero(a, *, size=None, fill_value=0): + """Numpy implementation of jnp.flatnonzero.""" + nz = np.flatnonzero(a) + if size is not None: + nz = np.append(nz, np.repeat(fill_value, max(size - nz.size, 0))) + return nz + + +def diff_mask(a, mask, n=1, axis=-1, prepend=None, append=None): + """Computes jnp.diff(a[mask], n, axis, prepend, append). + + The result is padded with zeros at the end to be jit compilable. + The shape matches the output of jnp.diff(a, n, axis, prepend, append). + """ + idx = flatnonzero(mask, size=a.size, fill_value=a.size - 1) + return jnp.diff(a[idx], n, axis, prepend, append) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 9ee2369946..87fc665d07 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -11,11 +11,12 @@ from desc.backend import ( complex_sqrt, cond, + diff_mask, fori_loop, jnp, - nonzero, put, put_along_axis, + vmap, ) from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand @@ -1359,12 +1360,7 @@ def body(i, mins): def cubic_poly_roots( - coef, - constant=jnp.array([0]), - a_min=-jnp.inf, - a_max=jnp.inf, - return_complex=False, - fill=False, + coef, constant=jnp.array([0]), a_min=-jnp.inf, a_max=jnp.inf, fill=False ): """Roots of cubic polynomial. @@ -1382,15 +1378,11 @@ def cubic_poly_roots( a_max : ndarray Return nan if real part of root is less than ``a_max``. Should broadcast with arrays of shape ``coef.shape[1:]``. - return_complex : bool - If set to false, will return nan for complex roots. fill : bool If set to True, then the last axis of the output has size 5 instead of 3, where the first element is ``a_min`` and the last is ``a_max``. - This option also replaces undesirable roots with by duplicating a - desirable root with smaller real part. If no such root exists, - then ``a_min`` is used. The roots will be sorted from from smallest - to largest real part. + This option also replaces complex roots with ``a_min``. + The roots will be sorted from smallest to largest real part. Returns ------- @@ -1411,21 +1403,22 @@ def cubic_poly_roots( def roots(xi_k): t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) r = -(b + xi_k * C + t_3) / (3 * a) - r = jnp.where( - (return_complex | jnp.isreal(r)) & (a_min <= r) & (r <= a_max), r, jnp.nan - ) + return r + + def replace_roots(r): + r = jnp.where(jnp.isreal(r) & (a_min <= r) & (r <= a_max), jnp.real(r), a_min) return r xi_1 = (-1 + (-3) ** 0.5) / 2 xi_2 = xi_1**2 xi_3 = 1 - xi = jnp.stack([roots(xi_1), roots(xi_2), roots(xi_3)], axis=-1) + xi_1, xi_2, xi_3 = map(roots, (xi_1, xi_2, xi_3)) if fill: - xi = jnp.sort(xi, axis=-1) - xi_1 = jnp.where(jnp.isnan(xi[..., 0]), a_min, xi[..., 0]) - xi_2 = jnp.where(jnp.isnan(xi[..., 1]), xi[..., 0], xi[..., 1]) - xi_3 = jnp.where(jnp.isnan(xi[..., 2]), xi[..., 1], xi[..., 2]) - xi = jnp.stack(jnp.broadcast_arrays(a_min, xi_1, xi_2, xi_3, a_max), axis=-1) + xi_1, xi_2, xi_3 = map(replace_roots, (xi_1, xi_2, xi_3)) + xi = jnp.sort(jnp.stack([xi_1, xi_2, xi_3], axis=0), axis=0) + xi = jnp.stack(jnp.broadcast_arrays(a_min, xi[0], xi[1], xi[2], a_max), axis=-1) + else: + xi = jnp.stack([xi_1, xi_2, xi_3], axis=-1) return xi @@ -1527,15 +1520,11 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): """ initial_points = jnp.linspace(-quad_limit, quad_limit, N) h = 2 * quad_limit / (N - 1) - x_k = jnp.tanh(0.5 * jnp.pi * jnp.sinh(initial_points)) + sinh = jnp.sinh(initial_points) + x_k = jnp.tanh(0.5 * jnp.pi * sinh) w_k = ( - 0.5 - * jnp.pi - * h - * jnp.cosh(initial_points) - / jnp.cosh(0.5 * jnp.pi * jnp.sinh(initial_points)) ** 2 + 0.5 * jnp.pi * h * jnp.cosh(initial_points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 ) - return x_k, w_k @@ -1594,18 +1583,21 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 data = eq.compute( ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data ) + ML = alpha.size * rho.size + N = zeta.size - 1 # number of splines per field line + NUM_ROOTS = 3 # max number of roots for cubic polynomial # TODO: https://github.com/f0uriest/interpax/issues/19 coef = CubicHermiteSpline( zeta, - data["|B|"].reshape(alpha.size, rho.size, zeta.size), - data["|B|_z constant rho alpha"].reshape(alpha.size, rho.size, zeta.size), + data["|B|"].reshape(ML, zeta.size), + data["|B|_z constant rho alpha"].reshape(ML, zeta.size), axis=-1, extrapolate="periodic", ).c coef = jnp.swapaxes(coef, 1, -1) der = polyder(coef) - assert coef.shape == (4, rho.size, alpha.size, zeta.size - 1) - assert der.shape == (3, rho.size, alpha.size, zeta.size - 1) + assert coef.shape == (4, ML, N) + assert der.shape == (3, ML, N) def _bounce_integral(name, lambda_pitch): """Compute the bounce integral of the named quantity. @@ -1619,8 +1611,8 @@ def _bounce_integral(name, lambda_pitch): Returns ------- - F : ndarray, shape(lambda_pitch.size, rho.size, alpha.size, resolution, 2) - Axes with size one will be squeezed out. + result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, + (resolution - 1) * 5 // 2) Bounce integrals evaluated at ``lambda_pitch`` for every field line. """ @@ -1635,62 +1627,62 @@ def _bounce_integral(name, lambda_pitch): # division accurately near the bounce points. lambda_pitch = jnp.atleast_1d(lambda_pitch) interpolation_points = cubic_poly_roots( - coef, - constant=1 / lambda_pitch, - a_min=zeta[:-1], - a_max=zeta[1:], - fill=True, - ).reshape(lambda_pitch.size, rho.size, alpha.size, zeta.size - 1, 5) - b_norm_z = polyeval( - der[:, jnp.newaxis], interpolation_points[..., 1:-1] - ).reshape(lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 3) - # Check sign of gradient to determine whether root is a valid bounce point. - # Periodic boundary to compute bounce integrals of particles - # trapped outside this snapshot of the field lines. - is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) - idx = jnp.arange((zeta.size - 1) * 3) - # Make is_well broadcast with interpolation_points. - is_well = put_along_axis( - jnp.zeros( - shape=(lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5), - dtype=bool, - ), - (idx // 3) * 5 + 1 + (idx % 3), - is_well, - ) - # Can precompute everything above if lambda_pitch given to parent function. + coef, constant=1 / lambda_pitch, a_min=zeta[:-1], a_max=zeta[1:], fill=True + ).reshape(lambda_pitch.size, ML, N, NUM_ROOTS + 2) - y = jnp.nan_to_num( + integrand = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] / ( data["B^zeta"] * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) ) - ).reshape(lambda_pitch.size, alpha.size, rho.size, zeta.size) - y = Akima1DInterpolator(zeta, y, axis=-1).c - y = jnp.moveaxis(y, [1, -1], [-1, 2]) - assert y.shape == (4, lambda_pitch.size, rho.size, alpha.size, zeta.size - 1) - Y = polyeval(polyint(y), interpolation_points).reshape( - lambda_pitch.size, rho.size, alpha.size, (zeta.size - 1) * 5 + ).reshape(lambda_pitch.size, ML, zeta.size) + integrand = Akima1DInterpolator(zeta, integrand, axis=-1).c + integrand = jnp.moveaxis(integrand, 1, -1) + assert integrand.shape == (4, lambda_pitch.size, ML, N) + primitive = polyeval(polyint(integrand), interpolation_points).reshape( + lambda_pitch.size, ML, N * (NUM_ROOTS + 2) ) sums = jnp.cumsum( - jnp.diff(Y, axis=-1, append=Y[..., 0, jnp.newaxis]) + # Periodic boundary to compute bounce integrals of particles + # trapped outside this snapshot of the field lines. + jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) # Multiply by mask that is false at knots of piecewise spline # to avoid adding difference between primitives of splines at knots. - * jnp.append(jnp.arange(1, Y.shape[-1]) % 5 != 0, True), + * jnp.append(jnp.arange(1, primitive.shape[-1]) % 5 != 0, True), axis=-1, ) - # TODO: jnp.diff(sums[is_well], axis=-1)[::2] except - # padded with zeros at end to avoid dynamically sized output. - F = jnp.diff( - sums[nonzero(is_well, size=sums.size, fill_value=sums.size - 1)], + + b_norm_z = polyeval( + der[:, jnp.newaxis], interpolation_points[..., 1:-1] + ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) + # Check sign of gradient to determine whether root is a valid bounce point. + # Periodic boundary to compute bounce integrals of particles + # trapped outside this snapshot of the field lines. + is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) + # Make is_well broadcast with interpolation_points. + idx = jnp.arange(N * NUM_ROOTS) + is_well = put_along_axis( + arr=jnp.zeros(shape=(lambda_pitch.size, ML, N * 5), dtype=bool), + indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), + values=is_well, axis=-1, - )[::2] - return F + ).reshape(lambda_pitch.size * ML, N * 5) + args = (sums.reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)), is_well) + result = vmap(_diff_between_bounce_points)(args).reshape( + lambda_pitch.size, alpha.size, rho.size, N * (NUM_ROOTS + 2) // 2 + ) + return result return _bounce_integral +def _diff_between_bounce_points(args): + """Compute difference between bounce points specified in mask.""" + a, mask = args + return diff_mask(a, mask)[::2] + + def bounce_average(eq, rho=None, alpha=None, resolution=20): """Returns a method to compute the bounce average of any quantity. @@ -1749,11 +1741,13 @@ def _bounce_average(name, lambda_pitch): Returns ------- - G : ndarray, shape(lambda_pitch.size, rho.size, alpha.size, resolution, 2) + result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, + (resolution - 1) * 5 // 2) Bounce average evaluated at ``lambdas`` for every field line. """ - return bi(name, lambda_pitch) / bi("1", lambda_pitch) + result = bi(name, lambda_pitch) / bi("1", lambda_pitch) + return result return _bounce_average diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index e1ecaabafa..cf8e3ba04f 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -598,14 +598,14 @@ def test_cubic_poly_roots(self): assert np.unique(poly.shape).size == poly.ndim constant = np.arange(10) assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim - out = cubic_poly_roots(poly, constant, return_complex=True) + out = np.sort(cubic_poly_roots(poly, constant), axis=-1) for j in range(poly.shape[1]): for k in range(poly.shape[2]): for s in range(constant.size): a, b, c, d = poly[:, j, k] d = d - constant[s] np.testing.assert_allclose( - np.sort_complex(out[s, j, k]), + out[s, j, k], np.sort_complex(np.roots([a, b, c, d])), ) From 8b83de0f4336cb59cad95a2f3a85d5fa18390204 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 23 Feb 2024 02:11:04 -0500 Subject: [PATCH 020/241] Spline integral implementation finished --- desc/compute/utils.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 87fc665d07..f0409b48a6 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1629,6 +1629,22 @@ def _bounce_integral(name, lambda_pitch): interpolation_points = cubic_poly_roots( coef, constant=1 / lambda_pitch, a_min=zeta[:-1], a_max=zeta[1:], fill=True ).reshape(lambda_pitch.size, ML, N, NUM_ROOTS + 2) + b_norm_z = polyeval( + der[:, jnp.newaxis], interpolation_points[..., 1:-1] + ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) + # Check sign of gradient to determine whether root is a valid bounce point. + # Periodic boundary to compute bounce integrals of particles + # trapped outside this snapshot of the field lines. + is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) + # Make is_well broadcast with interpolation_points. + idx = jnp.arange(N * NUM_ROOTS) + is_well = put_along_axis( + arr=jnp.zeros(shape=(lambda_pitch.size, ML, N * 5), dtype=bool), + indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), + values=is_well, + axis=-1, + ).reshape(lambda_pitch.size * ML, N * 5) + # Can precompute everything above if lambda_pitch given to parent function. integrand = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] @@ -1651,25 +1667,9 @@ def _bounce_integral(name, lambda_pitch): # to avoid adding difference between primitives of splines at knots. * jnp.append(jnp.arange(1, primitive.shape[-1]) % 5 != 0, True), axis=-1, - ) + ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) - b_norm_z = polyeval( - der[:, jnp.newaxis], interpolation_points[..., 1:-1] - ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) - # Check sign of gradient to determine whether root is a valid bounce point. - # Periodic boundary to compute bounce integrals of particles - # trapped outside this snapshot of the field lines. - is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) - # Make is_well broadcast with interpolation_points. - idx = jnp.arange(N * NUM_ROOTS) - is_well = put_along_axis( - arr=jnp.zeros(shape=(lambda_pitch.size, ML, N * 5), dtype=bool), - indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), - values=is_well, - axis=-1, - ).reshape(lambda_pitch.size * ML, N * 5) - args = (sums.reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)), is_well) - result = vmap(_diff_between_bounce_points)(args).reshape( + result = vmap(_diff_between_bounce_points)((sums, is_well)).reshape( lambda_pitch.size, alpha.size, rho.size, N * (NUM_ROOTS + 2) // 2 ) return result From 9f76d4fed587fbd9edf99cc91775bccab4baf0f0 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 23 Feb 2024 18:58:10 -0500 Subject: [PATCH 021/241] Cover edge case in diff_mask function, fix backend.bincount, backend.repeat... and add backend.take --- desc/backend.py | 58 ++++++++++++++++------- desc/compute/utils.py | 108 +++++++++++++++++++++++++++++++++++------- desc/grid.py | 13 +++-- 3 files changed, 139 insertions(+), 40 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 4990d16660..f079228341 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -73,7 +73,9 @@ vmap = jax.vmap scan = jax.lax.scan bincount = jnp.bincount + repeat = jnp.repeat flatnonzero = jnp.flatnonzero + take = jnp.take from jax import custom_jvp from jax.experimental.ode import odeint from jax.scipy.linalg import block_diag, cho_factor, cho_solve, qr, solve_triangular @@ -130,7 +132,7 @@ def put_along_axis(arr, indices, values, axis): """ if axis != -1: raise NotImplementedError( - "JAX put_along_axis currently only supports axis=-1." + f"put_along_axis for axis={axis} not implemented yet." ) if isinstance(arr, np.ndarray): arr[..., indices] = values @@ -610,7 +612,7 @@ def vmap(fun, in_axes=0, out_axes=0): """ if in_axes != 0: raise NotImplementedError( - "Backend for numpy vmap currently only supports in_axes=0." + f"Backend for numpy vmap for in_axes={in_axes} not implemented yet." ) def fun_vmap(fun_inputs): @@ -657,13 +659,21 @@ def scan(f, init, xs, length=None, reverse=False, unroll=1): ys.append(y) return carry, np.stack(ys) - def bincount(x, weights=None, minlength=None, length=None): - """Same as np.bincount but with a dummy parameter to match jnp.bincount API.""" - return np.bincount(x, weights, minlength) + def bincount(x, weights=None, minlength=0, length=None): + """Numpy implementation of jnp.bincount.""" + x = np.clip(x, 0, None) + if length is None: + length = max(minlength, x.max() + 1) + else: + minlength = max(minlength, length) + return np.bincount(x, weights, minlength)[:length] def repeat(a, repeats, axis=None, total_repeat_length=None): - """Same as np.repeat but with a dummy parameter to match jnp.repeat API.""" - return np.repeat(a, repeats, axis) + """Numpy implementation of jnp.repeat.""" + out = np.repeat(a, repeats, axis) + if total_repeat_length is not None: + out = out[:total_repeat_length] + return out def custom_jvp(fun, *args, **kwargs): """Dummy function for custom_jvp without JAX.""" @@ -775,19 +785,33 @@ def root( out = scipy.optimize.root(fun, x0, args, jac=jac, tol=tol) return out.x, out - def flatnonzero(a, *, size=None, fill_value=0): + def flatnonzero(a, size=None, fill_value=0): """Numpy implementation of jnp.flatnonzero.""" nz = np.flatnonzero(a) if size is not None: nz = np.append(nz, np.repeat(fill_value, max(size - nz.size, 0))) return nz - -def diff_mask(a, mask, n=1, axis=-1, prepend=None, append=None): - """Computes jnp.diff(a[mask], n, axis, prepend, append). - - The result is padded with zeros at the end to be jit compilable. - The shape matches the output of jnp.diff(a, n, axis, prepend, append). - """ - idx = flatnonzero(mask, size=a.size, fill_value=a.size - 1) - return jnp.diff(a[idx], n, axis, prepend, append) + def take( + a, + indices, + axis=None, + out=None, + mode="fill", + unique_indices=False, + indices_are_sorted=False, + fill_value=None, + ): + """Numpy implementation of jnp.take.""" + if mode == "fill": + if fill_value is None: + # TODO: Interpret default fill value based on dtype of a. + fill_value = np.nan + out = np.where( + (-a.size <= indices) & (indices < a.size), + np.take(a, indices, axis, out, mode="wrap"), + fill_value, + ) + else: + out = np.take(a, indices, axis, out, mode) + return out diff --git a/desc/compute/utils.py b/desc/compute/utils.py index f0409b48a6..5475fbe6b6 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -11,11 +11,13 @@ from desc.backend import ( complex_sqrt, cond, - diff_mask, + flatnonzero, fori_loop, jnp, put, put_along_axis, + take, + use_jax, vmap, ) from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand @@ -1406,7 +1408,7 @@ def roots(xi_k): return r def replace_roots(r): - r = jnp.where(jnp.isreal(r) & (a_min <= r) & (r <= a_max), jnp.real(r), a_min) + r = jnp.where(jnp.isreal(r), jnp.clip(jnp.real(r), a_min, a_max), a_min) return r xi_1 = (-1 + (-3) ** 0.5) / 2 @@ -1587,17 +1589,17 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 N = zeta.size - 1 # number of splines per field line NUM_ROOTS = 3 # max number of roots for cubic polynomial # TODO: https://github.com/f0uriest/interpax/issues/19 - coef = CubicHermiteSpline( + poly_B_norm = CubicHermiteSpline( zeta, data["|B|"].reshape(ML, zeta.size), data["|B|_z constant rho alpha"].reshape(ML, zeta.size), axis=-1, extrapolate="periodic", ).c - coef = jnp.swapaxes(coef, 1, -1) - der = polyder(coef) - assert coef.shape == (4, ML, N) - assert der.shape == (3, ML, N) + poly_B_norm = jnp.moveaxis(poly_B_norm, 1, -1) + poly_B_norm_z = polyder(poly_B_norm) + assert poly_B_norm.shape == (4, ML, N) + assert poly_B_norm_z.shape == (3, ML, N) def _bounce_integral(name, lambda_pitch): """Compute the bounce integral of the named quantity. @@ -1623,27 +1625,31 @@ def _bounce_integral(name, lambda_pitch): # line would require root finding to map field line coordinates to desc # coordinates. So we approximate functions in the integrand with splines # and perform Gauss-Quadrature. - # TODO: spline functions separately since no polynomial cannot capture the - # division accurately near the bounce points. + # TODO: spline functions separately since no polynomial can capture the + # division in integrand accurately near the bounce points. lambda_pitch = jnp.atleast_1d(lambda_pitch) interpolation_points = cubic_poly_roots( - coef, constant=1 / lambda_pitch, a_min=zeta[:-1], a_max=zeta[1:], fill=True + poly_B_norm, + constant=1 / lambda_pitch, + a_min=zeta[:-1], + a_max=zeta[1:], + fill=True, ).reshape(lambda_pitch.size, ML, N, NUM_ROOTS + 2) - b_norm_z = polyeval( - der[:, jnp.newaxis], interpolation_points[..., 1:-1] + B_norm_z = polyeval( + poly_B_norm_z[:, jnp.newaxis], interpolation_points[..., 1:-1] ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) # Check sign of gradient to determine whether root is a valid bounce point. # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. - is_well = (b_norm_z <= 0) & (jnp.roll(b_norm_z, -1, axis=-1) >= 0) + is_well = (B_norm_z <= 0) & (jnp.roll(B_norm_z, -1, axis=-1) >= 0) # Make is_well broadcast with interpolation_points. idx = jnp.arange(N * NUM_ROOTS) is_well = put_along_axis( - arr=jnp.zeros(shape=(lambda_pitch.size, ML, N * 5), dtype=bool), + arr=jnp.zeros((lambda_pitch.size, ML, N * (NUM_ROOTS + 2)), dtype=bool), indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), values=is_well, axis=-1, - ).reshape(lambda_pitch.size * ML, N * 5) + ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) # Can precompute everything above if lambda_pitch given to parent function. integrand = jnp.nan_to_num( @@ -1665,7 +1671,9 @@ def _bounce_integral(name, lambda_pitch): jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) # Multiply by mask that is false at knots of piecewise spline # to avoid adding difference between primitives of splines at knots. - * jnp.append(jnp.arange(1, primitive.shape[-1]) % 5 != 0, True), + * jnp.append( + jnp.arange(1, N * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, True + ), axis=-1, ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) @@ -1683,6 +1691,74 @@ def _diff_between_bounce_points(args): return diff_mask(a, mask)[::2] +def diff_mask(a, mask, n=1, axis=-1, prepend=None): + """Calculate the n-th discrete difference along the given axis of ``a[mask]``. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + mask : array_like + Boolean mask to index like ``a[mask]`` prior to computing difference. + Should have same size as ``a``. + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend : array_like, optional + Values to prepend to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + Notes + ----- + The result is padded with zeros at the end to be jit compilable. + The current implementation removes all nan values in the output as a side effect. + + """ + if prepend is None and not use_jax: + # https://github.com/numpy/numpy/blob/ + # d35cd07ea997f033b2d89d349734c61f5de54b0d/ + # numpy/lib/function_base.py#L1324-L1454 + prepend = np._NoValue + indices = flatnonzero(mask, size=mask.size, fill_value=mask.size) + diff = jnp.nan_to_num( + jnp.diff( + take( + a, + indices, + axis=0, + mode="fill", + fill_value=jnp.nan, + unique_indices=True, + indices_are_sorted=True, + ), + n, + axis, + prepend, + ) + ) + return diff + + def bounce_average(eq, rho=None, alpha=None, resolution=20): """Returns a method to compute the bounce average of any quantity. diff --git a/desc/grid.py b/desc/grid.py index 1b60b35ccc..3c29fde955 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -5,7 +5,7 @@ import numpy as np from scipy import optimize, special -from desc.backend import jnp, put +from desc.backend import jnp, put, repeat, take from desc.io import IOAble from desc.utils import Index @@ -315,11 +315,11 @@ def compress(self, x, surface_label="rho"): assert surface_label in {"rho", "theta", "zeta"} assert len(x) == self.num_nodes if surface_label == "rho": - return x[self.unique_rho_idx] + return take(x, self.unique_rho_idx, axis=0, unique_indices=True) if surface_label == "theta": - return x[self.unique_theta_idx] + return take(x, self.unique_theta_idx, axis=0, unique_indices=True) if surface_label == "zeta": - return x[self.unique_zeta_idx] + return take(x, self.unique_zeta_idx, axis=0, unique_indices=True) def expand(self, x, surface_label="rho"): """Expand ``x`` by duplicating elements to match the grid's pattern. @@ -1519,12 +1519,11 @@ def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): if surface_label == "rho": assert len(x) == rho_size return jnp.tile( - jnp.repeat(x, zeta_size, total_repeat_length=rho_size * zeta_size), - theta_size, + repeat(x, zeta_size, total_repeat_length=rho_size * zeta_size), theta_size ) if surface_label == "theta": assert len(x) == theta_size - return jnp.repeat( + return repeat( x, rho_size * zeta_size, total_repeat_length=rho_size * theta_size * zeta_size, From db9185b8beaf756fc18d1769f1dbf48ae1391610 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 24 Feb 2024 02:19:27 -0500 Subject: [PATCH 022/241] Clean up bounce integrals API --- desc/compute/utils.py | 289 +++++++++++++++++++++++++++++------------- 1 file changed, 204 insertions(+), 85 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 5475fbe6b6..d6a5330fcd 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1361,9 +1361,7 @@ def body(i, mins): return grid.expand(mins, surface_label) -def cubic_poly_roots( - coef, constant=jnp.array([0]), a_min=-jnp.inf, a_max=jnp.inf, fill=False -): +def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, fill=False): """Roots of cubic polynomial. Parameters @@ -1374,27 +1372,34 @@ def cubic_poly_roots( It is assumed that c₁ is nonzero. constant : ndarray, shape(constant.size, ) Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``constant``. - a_min : ndarray - Return nan if real part of root is less than ``a_min``. - Should broadcast with arrays of shape ``coef.shape[1:]``. - a_max : ndarray - Return nan if real part of root is less than ``a_max``. - Should broadcast with arrays of shape ``coef.shape[1:]``. + a_min, a_max : ndarray + Minimum and maximum value to clip roots between. + Complex roots are always clipped to ``a_min``. + If None, clipping is not performed on the corresponding edge. + Both are broadcast against arrays of shape ``coef.shape[1:]``. fill : bool - If set to True, then the last axis of the output has size 5 instead - of 3, where the first element is ``a_min`` and the last is ``a_max``. - This option also replaces complex roots with ``a_min``. - The roots will be sorted from smallest to largest real part. + If true, the last axis of the output has size 5 instead of 3, + where the clipping boundaries surround the roots. + Along the last axis, the first element is ``a_min``, followed + by the three clipped roots in sorted order, then ``a_max``. + If no clipping boundaries are specified, the roots are clipped + to the real line. Returns ------- - xi : ndarray, shape(constant.size, coef.shape, ?) + xi : ndarray, shape(constant.size, coef.shape, 3 + 2 * bool(fill)) If constant has one element, the first axis will be squeezed out. The roots of the cubic polynomial. """ # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula # The common libraries use root-finding which isn't compatible with JAX. + clip = fill or a_min is not None or a_max is not None + if a_min is None: + a_min = -jnp.inf + if a_max is None: + a_max = jnp.inf + a, b, c, d = coef d = jnp.squeeze((d[jnp.newaxis].T - constant).T) t_0 = b**2 - 3 * a * c @@ -1407,7 +1412,7 @@ def roots(xi_k): r = -(b + xi_k * C + t_3) / (3 * a) return r - def replace_roots(r): + def clip_roots(r): r = jnp.where(jnp.isreal(r), jnp.clip(jnp.real(r), a_min, a_max), a_min) return r @@ -1415,10 +1420,13 @@ def replace_roots(r): xi_2 = xi_1**2 xi_3 = 1 xi_1, xi_2, xi_3 = map(roots, (xi_1, xi_2, xi_3)) - if fill: - xi_1, xi_2, xi_3 = map(replace_roots, (xi_1, xi_2, xi_3)) - xi = jnp.sort(jnp.stack([xi_1, xi_2, xi_3], axis=0), axis=0) - xi = jnp.stack(jnp.broadcast_arrays(a_min, xi[0], xi[1], xi[2], a_max), axis=-1) + if clip: + xi_1, xi_2, xi_3 = map(clip_roots, (xi_1, xi_2, xi_3)) + if fill: + xi = jnp.sort(jnp.stack([xi_1, xi_2, xi_3], axis=0), axis=0) + xi = jnp.stack( + jnp.broadcast_arrays(a_min, xi[0], xi[1], xi[2], a_max), axis=-1 + ) else: xi = jnp.stack([xi_1, xi_2, xi_3], axis=-1) return xi @@ -1530,7 +1538,46 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): return x_k, w_k -def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=20): +def _interp_well(lambda_pitch, zeta, ML, poly_B_norm, poly_B_norm_z): + # Helper function for bounce_integrals(). + N = zeta.size - 1 + NUM_ROOTS = 3 + + interpolation_points = cubic_poly_roots( + poly_B_norm, + constant=1 / lambda_pitch, + a_min=zeta[:-1], + a_max=zeta[1:], + fill=True, + ).reshape(lambda_pitch.size, ML, N, NUM_ROOTS + 2) + B_norm_z = polyeval( + poly_B_norm_z[:, jnp.newaxis], interpolation_points[..., 1:-1] + ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) + # Check sign of gradient to determine whether root is a valid bounce point. + # Periodic boundary to compute bounce integrals of particles + # trapped outside this snapshot of the field lines. + is_well = (B_norm_z <= 0) & (jnp.roll(B_norm_z, -1, axis=-1) >= 0) + # Make last axis of is_well broadcast with interpolation_points. + idx = jnp.arange(N * NUM_ROOTS) + is_well = put_along_axis( + arr=jnp.zeros((lambda_pitch.size, ML, N * (NUM_ROOTS + 2)), dtype=bool), + indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), + values=is_well, + axis=-1, + ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) + + return interpolation_points, is_well + + +def bounce_integral( + eq, + rho=None, + alpha=None, + zeta_max=10 * jnp.pi, + lambda_pitch=None, + resolution=20, + method="spline", +): """Returns a method to compute the bounce integral of any quantity. The bounce integral is defined as F_ℓ(λ) = ∫ f(ℓ) / √(1 − λ |B|) dℓ, where @@ -1557,8 +1604,25 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 Unique field line label coordinates over a constant rho surface. zeta_max : float Max value for field line following coordinate. + lambda_pitch : ndarray + λ values to evaluate the bounce integral at. resolution : int - Number of quadrature points used to compute the bounce integral. + Number of interpolation points (knots) used for splines in the quadrature. + A maximum of three bounce points can be detected in between knots. + The accuracy of the quadrature will increase as some function of + the number of knots over the number of detected bounce points. + So for well-behaved magnetic fields increasing resolution should increase + the accuracy of the quadrature. + method : str + The method to evaluate the integral. + The "spline" method exactly integrates a cubic spline of the integrand. + The "trapezoid" method performs a trapezoidal quadrature over evenly + spaced samples of the integrand. The integrand is estimated by using distinct + cubic splines for components in the integrand so that the singularity from + the division by zero near the bounce points can be captured more accurately + than can be represented by a polynomial. + The "quad" method performs a Gauss quadrature estimating the integrand + with splines as in the trapezoidal method. Returns ------- @@ -1579,14 +1643,15 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 rho = jnp.linspace(0, 1, 10) if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) - + rho = jnp.atleast_1d(rho) + alpha = jnp.atleast_1d(alpha) zeta = np.linspace(0, zeta_max, resolution) grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute( ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data ) ML = alpha.size * rho.size - N = zeta.size - 1 # number of splines per field line + N = zeta.size - 1 # number of piecewise cubic polynomials per field line NUM_ROOTS = 3 # max number of roots for cubic polynomial # TODO: https://github.com/f0uriest/interpax/issues/19 poly_B_norm = CubicHermiteSpline( @@ -1601,7 +1666,14 @@ def bounce_integral(eq, rho=None, alpha=None, zeta_max=10 * jnp.pi, resolution=2 assert poly_B_norm.shape == (4, ML, N) assert poly_B_norm_z.shape == (3, ML, N) - def _bounce_integral(name, lambda_pitch): + _lambda_pitch = ( + jnp.empty(0) if lambda_pitch is None else jnp.atleast_1d(lambda_pitch) + ) + _interpolation_points, _is_well = _interp_well( + _lambda_pitch, zeta, ML, poly_B_norm, poly_B_norm_z + ) + + def _spline(name, lambda_pitch=None): """Compute the bounce integral of the named quantity. Parameters @@ -1610,47 +1682,25 @@ def _bounce_integral(name, lambda_pitch): Name of quantity in ``data_index`` to compute the bounce integral of. lambda_pitch : ndarray λ values to evaluate the bounce integral at. + If None, uses the values given to the parent function. Returns ------- result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, (resolution - 1) * 5 // 2) - Bounce integrals evaluated at ``lambda_pitch`` for every field line. + The last axis iterates through every bounce integral performed + along that field line padded by zeros. """ - # Newton-Cotes quadrature would be inaccurate as the bounce points are not - # guaranteed to be near the fixed quadrature points. - # Gauss-Quadrature on the exact integrand is expensive to perform because - # evaluating the integrand at the optimal quadrature points along the field - # line would require root finding to map field line coordinates to desc - # coordinates. So we approximate functions in the integrand with splines - # and perform Gauss-Quadrature. - # TODO: spline functions separately since no polynomial can capture the - # division in integrand accurately near the bounce points. - lambda_pitch = jnp.atleast_1d(lambda_pitch) - interpolation_points = cubic_poly_roots( - poly_B_norm, - constant=1 / lambda_pitch, - a_min=zeta[:-1], - a_max=zeta[1:], - fill=True, - ).reshape(lambda_pitch.size, ML, N, NUM_ROOTS + 2) - B_norm_z = polyeval( - poly_B_norm_z[:, jnp.newaxis], interpolation_points[..., 1:-1] - ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) - # Check sign of gradient to determine whether root is a valid bounce point. - # Periodic boundary to compute bounce integrals of particles - # trapped outside this snapshot of the field lines. - is_well = (B_norm_z <= 0) & (jnp.roll(B_norm_z, -1, axis=-1) >= 0) - # Make is_well broadcast with interpolation_points. - idx = jnp.arange(N * NUM_ROOTS) - is_well = put_along_axis( - arr=jnp.zeros((lambda_pitch.size, ML, N * (NUM_ROOTS + 2)), dtype=bool), - indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), - values=is_well, - axis=-1, - ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) - # Can precompute everything above if lambda_pitch given to parent function. + if lambda_pitch is None: + lambda_pitch = _lambda_pitch + interpolation_points = _interpolation_points + is_well = _is_well + else: + lambda_pitch = jnp.atleast_1d(lambda_pitch) + interpolation_points, is_well = _interp_well( + lambda_pitch, zeta, ML, poly_B_norm, poly_B_norm_z + ) integrand = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] @@ -1659,9 +1709,13 @@ def _bounce_integral(name, lambda_pitch): * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) ) ).reshape(lambda_pitch.size, ML, zeta.size) + # TODO: https://github.com/f0uriest/interpax/issues/19 integrand = Akima1DInterpolator(zeta, integrand, axis=-1).c integrand = jnp.moveaxis(integrand, 1, -1) assert integrand.shape == (4, lambda_pitch.size, ML, N) + # Computing integral via difference of primitives is easiest. + # E.g. Simpson's rule on spline requires computing spline on 1.8x more + # knots for the same accuracy. primitive = polyeval(polyint(integrand), interpolation_points).reshape( lambda_pitch.size, ML, N * (NUM_ROOTS + 2) ) @@ -1677,16 +1731,56 @@ def _bounce_integral(name, lambda_pitch): axis=-1, ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) - result = vmap(_diff_between_bounce_points)((sums, is_well)).reshape( + result = vmap(_diff_mask_odd_pairs)((sums, is_well)).reshape( lambda_pitch.size, alpha.size, rho.size, N * (NUM_ROOTS + 2) // 2 ) return result - return _bounce_integral + def _trapezoid(name, lambda_pitch=None): + """Compute the bounce integral of the named quantity. + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce integral of. + lambda_pitch : ndarray + λ values to evaluate the bounce integral at. + If None, uses the values given to the parent function. -def _diff_between_bounce_points(args): - """Compute difference between bounce points specified in mask.""" + Returns + ------- + result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, + (resolution - 1) * 5 // 2) + The last axis iterates through every bounce integral performed + along that field line padded by zeros. + + """ + + def _quad(name, lambda_pitch=None): + """Compute the bounce integral of the named quantity. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce integral of. + lambda_pitch : ndarray + λ values to evaluate the bounce integral at. + If None, uses the values given to the parent function. + + Returns + ------- + result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, + (resolution - 1) * 5 // 2) + The last axis iterates through every bounce integral performed + along that field line padded by zeros. + + """ + + return {"spline": _spline, "trapezoid": _trapezoid, "quad": _quad}[method] + + +def _diff_mask_odd_pairs(args): + """Compute non-overlapping difference between indices specified in mask.""" a, mask = args return diff_mask(a, mask)[::2] @@ -1706,27 +1800,23 @@ def diff_mask(a, mask, n=1, axis=-1, prepend=None): Boolean mask to index like ``a[mask]`` prior to computing difference. Should have same size as ``a``. n : int, optional - The number of times values are differenced. If zero, the input - is returned as-is. + The number of times values are differenced. axis : int, optional The axis along which the difference is taken, default is the last axis. prepend : array_like, optional - Values to prepend to `a` along axis prior to - performing the difference. Scalar values are expanded to - arrays with length 1 in the direction of axis and the shape - of the input array in along all other axes. Otherwise the - dimension and shape must match `a` except along axis. + Values to prepend to `a` along axis prior to performing the difference. + Scalar values are expanded to arrays with length 1 in the direction of + axis and the shape of the input array in along all other axes. + Otherwise, the dimension and shape must match `a` except along axis. Returns ------- diff : ndarray - The n-th differences. The shape of the output is the same as `a` - except along `axis` where the dimension is smaller by `n`. The + The n-th differences. The shape of the output is the same as ``a`` + except along ``axis`` where the dimension is smaller by ``n``. The type of the output is the same as the type of the difference - between any two elements of `a`. This is the same as the type of - `a` in most cases. A notable exception is `datetime64`, which - results in a `timedelta64` output array. + between any two elements of ``a``. Notes ----- @@ -1759,7 +1849,15 @@ def diff_mask(a, mask, n=1, axis=-1, prepend=None): return diff -def bounce_average(eq, rho=None, alpha=None, resolution=20): +def bounce_average( + eq, + rho=None, + alpha=None, + zeta_max=10 * jnp.pi, + lambda_pitch=None, + resolution=20, + method="spline", +): """Returns a method to compute the bounce average of any quantity. The bounce average is defined as @@ -1780,18 +1878,38 @@ def bounce_average(eq, rho=None, alpha=None, resolution=20): Parameters ---------- eq : Equilibrium - Equilibrium on which the bounce integral is defined. + Equilibrium on which the bounce average is defined. rho : ndarray Unique flux surface label coordinates. alpha : ndarray Unique field line label coordinates over a constant rho surface. + zeta_max : float + Max value for field line following coordinate. + lambda_pitch : ndarray + λ values to evaluate the bounce average at. + Defaults to linearly spaced values between min and max of |B|. resolution : int - Number of quadrature points used to compute the bounce integral. + Number of interpolation points (knots) used for splines in the quadrature. + A maximum of three bounce points can be detected in between knots. + The accuracy of the quadrature will increase as some function of + the number of knots over the number of detected bounce points. + So for well-behaved magnetic fields increasing resolution should increase + the accuracy of the quadrature. + method : str + The method to evaluate the integral. + The "spline" method exactly integrates a cubic spline of the integrand. + The "trapezoid" method performs a trapezoidal quadrature over evenly + spaced samples of the integrand. The integrand is estimated by using distinct + cubic splines for components in the integrand so that the singularity from + the division by zero near the bounce points can be captured more accurately + than can be represented by a polynomial. + The "quad" method performs a Gauss quadrature estimating the integrand + with splines as in the trapezoidal method. Returns ------- ba : callable - This callable method computes the bounce integral G_ℓ(λ) for every + This callable method computes the bounce average G_ℓ(λ) for every specified field line ℓ (constant rho and alpha), for every λ value in ``lambdas``. @@ -1803,9 +1921,9 @@ def bounce_average(eq, rho=None, alpha=None, resolution=20): G = ba(name, lambda_pitch) """ - bi = bounce_integral(eq, rho, alpha, resolution) + bi = bounce_integral(eq, rho, alpha, zeta_max, lambda_pitch, resolution, method) - def _bounce_average(name, lambda_pitch): + def _bounce_average(name, lambda_pitch=None): """Compute the bounce average of the named quantity. Parameters @@ -1813,16 +1931,18 @@ def _bounce_average(name, lambda_pitch): name : ndarray Name of quantity in ``data_index`` to compute the bounce average of. lambda_pitch : ndarray - λ values to evaluate the bounce integral at. + λ values to evaluate the bounce average at. + If None, uses the values given to the parent function. Returns ------- result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, (resolution - 1) * 5 // 2) - Bounce average evaluated at ``lambdas`` for every field line. + The last axis iterates through every bounce average performed + along that field line padded by zeros. """ - result = bi(name, lambda_pitch) / bi("1", lambda_pitch) + result = safediv(bi(name, lambda_pitch), bi("1", lambda_pitch)) return result return _bounce_average @@ -1842,8 +1962,7 @@ def field_line_to_desc_coords(rho, alpha, zeta, eq): # map_coordinates. That method requires an initial guess to be compatible with JIT, # and generating a reasonable initial guess requires computing the rotational # transform to approximate theta_PEST and the poloidal stream function anyway. - # TODO: In general, Linear Grid construction is not jit compatible. - # This issue can be worked around with a specific routine for this. + # TODO: map coords recently updated, so maybe just switch to that lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) lg_data = eq.compute("iota", grid=lg) data = { From 69f7a0eb821a27cee48c359693d3d4cc8f593571 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Feb 2024 04:02:14 -0500 Subject: [PATCH 023/241] Add tanh_sinh quadrature bounce integrals. Clean up API. --- desc/backend.py | 4 + desc/compute/utils.py | 699 ++++++++++++++++++++++-------------- tests/test_compute_utils.py | 12 +- 3 files changed, 443 insertions(+), 272 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index f079228341..9270cd6751 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -616,6 +616,10 @@ def vmap(fun, in_axes=0, out_axes=0): ) def fun_vmap(fun_inputs): + if isinstance(fun_inputs, tuple): + raise NotImplementedError( + "Backend implementation of vmap fails for multiple arguments." + ) return np.stack([fun(fun_input) for fun_input in fun_inputs], axis=out_axes) return fun_vmap diff --git a/desc/compute/utils.py b/desc/compute/utils.py index d6a5330fcd..6d1a8f56c4 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -5,6 +5,7 @@ import warnings import numpy as np +from interpax import interp1d from scipy.interpolate import Akima1DInterpolator, CubicHermiteSpline from termcolor import colored @@ -1361,146 +1362,82 @@ def body(i, mins): return grid.expand(mins, surface_label) -def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, fill=False): - """Roots of cubic polynomial. - - Parameters - ---------- - coef : ndarray - First axis should store coefficients of a polynomial. For a polynomial - given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. - It is assumed that c₁ is nonzero. - constant : ndarray, shape(constant.size, ) - Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``constant``. - a_min, a_max : ndarray - Minimum and maximum value to clip roots between. - Complex roots are always clipped to ``a_min``. - If None, clipping is not performed on the corresponding edge. - Both are broadcast against arrays of shape ``coef.shape[1:]``. - fill : bool - If true, the last axis of the output has size 5 instead of 3, - where the clipping boundaries surround the roots. - Along the last axis, the first element is ``a_min``, followed - by the three clipped roots in sorted order, then ``a_max``. - If no clipping boundaries are specified, the roots are clipped - to the real line. - - Returns - ------- - xi : ndarray, shape(constant.size, coef.shape, 3 + 2 * bool(fill)) - If constant has one element, the first axis will be squeezed out. - The roots of the cubic polynomial. - - """ - # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula - # The common libraries use root-finding which isn't compatible with JAX. - clip = fill or a_min is not None or a_max is not None - if a_min is None: - a_min = -jnp.inf - if a_max is None: - a_max = jnp.inf - - a, b, c, d = coef - d = jnp.squeeze((d[jnp.newaxis].T - constant).T) - t_0 = b**2 - 3 * a * c - t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d - C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) - C_is_zero = jnp.isclose(C, 0) - - def roots(xi_k): - t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) - r = -(b + xi_k * C + t_3) / (3 * a) - return r - - def clip_roots(r): - r = jnp.where(jnp.isreal(r), jnp.clip(jnp.real(r), a_min, a_max), a_min) - return r - - xi_1 = (-1 + (-3) ** 0.5) / 2 - xi_2 = xi_1**2 - xi_3 = 1 - xi_1, xi_2, xi_3 = map(roots, (xi_1, xi_2, xi_3)) - if clip: - xi_1, xi_2, xi_3 = map(clip_roots, (xi_1, xi_2, xi_3)) - if fill: - xi = jnp.sort(jnp.stack([xi_1, xi_2, xi_3], axis=0), axis=0) - xi = jnp.stack( - jnp.broadcast_arrays(a_min, xi[0], xi[1], xi[2], a_max), axis=-1 - ) - else: - xi = jnp.stack([xi_1, xi_2, xi_3], axis=-1) - return xi - - -def polyint(coef): +def polyint(c): """Coefficients for the primitives of the given set of polynomials. Parameters ---------- - coef : ndarray + c : ndarray First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``coef.shape[0] - 1``, - coefficient cᵢ should be stored at ``coef[n - i]``. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. Returns ------- poly : ndarray Coefficients of polynomial primitive, ignoring the arbitrary constant. That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, - where n is ``coef.shape[0] - 1``. + where n is ``c.shape[0] - 1``. """ - poly = (coef.T / jnp.arange(coef.shape[0], 0, -1)).T + poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T return poly -def polyder(coef): +def polyder(c): """Coefficients for the derivatives of the given set of polynomials. Parameters ---------- - coef : ndarray + c : ndarray First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``coef.shape[0] - 1``, - coefficient cᵢ should be stored at ``coef[n - i]``. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. Returns ------- poly : ndarray Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, - where n is ``coef.shape[0] - 1``. + where n is ``c.shape[0] - 1``. """ - poly = (coef[:-1].T * jnp.arange(coef.shape[0] - 1, 0, -1)).T + poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T return poly -def polyeval(coef, x): - """Evaluate the set of polynomials at the points x. +def polyval(x, c): + """Evaluate the set of polynomials c at the points x. Parameters ---------- - coef : ndarray - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``coef.shape[0] - 1``, - coefficient cᵢ should be stored at ``coef[n - i]``. x : ndarray Coordinates at which to evaluate the set of polynomials. - The first ``coef.ndim`` axes should have shape ``coef.shape[1:]``. + The first ``c.ndim`` axes should have shape ``c.shape[1:]``. + c : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. Returns ------- - f : ndarray - ``f[j, k, ...]`` is the polynomial with coefficients ``coef[:, j, k, ...]`` + val : ndarray + ``val[j, k, ...]`` is the polynomial with coefficients ``c[:, j, k, ...]`` evaluated at the point ``x[j, k, ...]``. + Notes + ----- + This function does not perform the same operation as + ``np.polynomial.polynomial.polyval(x, c)``. + An example usage of this function is shown in + tests/test_compute_utils.py::TestComputeUtils::test_polyval. + """ - X = (x[jnp.newaxis].T ** jnp.arange(coef.shape[0] - 1, -1, -1)).T + X = (x[jnp.newaxis].T ** jnp.arange(c.shape[0] - 1, -1, -1)).T alphabet = "abcdefghijklmnopqrstuvwxyz" - sub = alphabet[: coef.ndim] - f = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", coef, X) - return f + sub = alphabet[: c.ndim] + val = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", c, X) + return val def tanh_sinh_quadrature(N, quad_limit=3.16): @@ -1538,35 +1475,231 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): return x_k, w_k -def _interp_well(lambda_pitch, zeta, ML, poly_B_norm, poly_B_norm_z): - # Helper function for bounce_integrals(). - N = zeta.size - 1 +def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, sort=False): + """Roots of cubic polynomial. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. For a polynomial + given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. + It is assumed that c₁ is nonzero. + constant : ndarray, shape(constant.size, ) + Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``constant``. + a_min, a_max : ndarray + Minimum and maximum value to clip roots between. + Complex roots are always clipped to ``a_min``. + If None, clipping is not performed on the corresponding edge. + Both arrays are broadcast against arrays of shape ``coef.shape[1:]``. + sort : bool + Whether to sort the roots. + + Returns + ------- + roots : ndarray, shape(constant.size, coef.shape, 3) + If constant has one element, the first axis will be squeezed out. + The roots of the cubic polynomial. + + """ + # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula + # The common libraries use root-finding which isn't compatible with JAX. + clip = a_min is not None or a_max is not None + if a_min is None: + a_min = -jnp.inf + if a_max is None: + a_max = jnp.inf + + a, b, c, d = coef + d = jnp.squeeze((d[jnp.newaxis].T - constant).T) + t_0 = b**2 - 3 * a * c + t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d + C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) + C_is_zero = jnp.isclose(C, 0) + + def compute_roots(xi_k): + t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) + r = -(b + xi_k * C + t_3) / (3 * a) + return r + + def clip_roots(r): + r = jnp.where(jnp.isreal(r), jnp.clip(jnp.real(r), a_min, a_max), a_min) + return r + + xi_1 = (-1 + (-3) ** 0.5) / 2 + xi_2 = xi_1**2 + xi_3 = 1 + roots = tuple(map(compute_roots, (xi_1, xi_2, xi_3))) + if clip: + roots = tuple(map(clip_roots, roots)) + roots = jnp.stack(roots, axis=-1) + if sort: + roots = jnp.sort(roots, axis=-1) + return roots + + +def _get_bounce_points(pitch, zeta, poly_B_norm, poly_B_norm_z, include_knots=False): + """Get the bounce points given |B| and 1 / λ. + + Parameters + ---------- + pitch : ndarray + λ values representing the constant function 1 / λ. + zeta : ndarray + Field line-following ζ coordinates of spline knots. + poly_B_norm : ndarray + Polynomial coefficients of the cubic spline of |B|. + poly_B_norm_z : ndarray + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + include_knots : bool + Whether to return the knots of the spline along with the intersection points. + If False, the last axis of the returned ``intersect`` array stores the following + points for each polynomial: + [intersect_1, intersect_2, intersect_3] + If True, the last axis takes the form: + [left_knot, intersect_1, intersect_2, intersect_3, right_knot] + + Returns + ------- + intersect, is_bp, bp1, bp2 : ndarray, ndarray, ndarray, ndarray + The polynomials' intersection points with 1 / λ is given by ``intersect``. + In order to be JIT compilable, the returned array must have a shape that + accommodates the case where each cubic polynomial intersects 1 / λ thrice. + This requires that ``intersect`` have shape + (pitch.size * poly_B_norm.shape[1], poly_B_norm.shape[2], 3 + 2 * fill) + + The boolean mask ``is_bp`` encodes whether a given entry in ``intersect`` + is a valid bounce point. The boolean masks ``bp1`` and ``bp2`` encode whether + a given entry in ``intersect`` is a valid starting and ending bounce point, + respectively. These arrays have shape + (pitch.size * poly_B_norm.shape[1], poly_B_norm.shape[2] * (3 + 2 * fill)) + + """ + ML = poly_B_norm.shape[1] + N = poly_B_norm.shape[2] NUM_ROOTS = 3 - interpolation_points = cubic_poly_roots( - poly_B_norm, - constant=1 / lambda_pitch, - a_min=zeta[:-1], - a_max=zeta[1:], - fill=True, - ).reshape(lambda_pitch.size, ML, N, NUM_ROOTS + 2) - B_norm_z = polyeval( - poly_B_norm_z[:, jnp.newaxis], interpolation_points[..., 1:-1] - ).reshape(lambda_pitch.size, ML, N * NUM_ROOTS) - # Check sign of gradient to determine whether root is a valid bounce point. - # Periodic boundary to compute bounce integrals of particles - # trapped outside this snapshot of the field lines. - is_well = (B_norm_z <= 0) & (jnp.roll(B_norm_z, -1, axis=-1) >= 0) - # Make last axis of is_well broadcast with interpolation_points. - idx = jnp.arange(N * NUM_ROOTS) - is_well = put_along_axis( - arr=jnp.zeros((lambda_pitch.size, ML, N * (NUM_ROOTS + 2)), dtype=bool), - indices=(idx // NUM_ROOTS) * (NUM_ROOTS + 2) + 1 + (idx % NUM_ROOTS), - values=is_well, + a_min = zeta[:-1] + a_max = zeta[1:] + intersect = cubic_poly_roots( + poly_B_norm, 1 / pitch, a_min, a_max, sort=True + ).reshape(pitch.size, ML, N, NUM_ROOTS) + + B_norm_z = polyval(intersect, poly_B_norm_z[:, jnp.newaxis]).reshape( + pitch.size * ML, N * NUM_ROOTS + ) + # Check sign of derivative to determine whether root is a valid bounce point. + bp1 = B_norm_z <= 0 + bp2 = B_norm_z >= 0 + # Periodic boundary to compute bounce integrals of particles trapped outside + # this snapshot of the field lines. + is_bp = bp1 & jnp.roll(bp2, -1, axis=-1) + bp1 = bp1 & is_bp + bp2 = bp2 & is_bp + + if include_knots: + arrays = jnp.broadcast_arrays( + a_min, intersect[..., 0], intersect[..., 1], intersect[..., 2], a_max + ) + intersect = jnp.stack(arrays, axis=-1) + R = NUM_ROOTS + 2 + is_bp = stretch_batches(is_bp, NUM_ROOTS, R, fill=False) + bp1 = stretch_batches(bp1, NUM_ROOTS, R, fill=False) + bp2 = stretch_batches(bp2, NUM_ROOTS, R, fill=False) + + return intersect, is_bp, bp1, bp2 + + +def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): + """Stretch batches of ``in_arr``. + + Given that ``in_arr`` is composed of N batches of ``in_batch_size`` + along its last axis, stretch the last axis so that it is composed of + N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` + missing elements in each batch are populated with ``fill``. + By default, these elements are populated evenly surrounding the input + batches. + + Parameters + ---------- + in_arr : ndarray, shape(..., in_batch_size * N) + Input array + in_batch_size : int + Length of batches along last axis of input array. + out_batch_size : int + Length of batches along last axis of output array. + fill : bool or int or float + Value to fill at missing indices of each batch. + + Returns + ------- + out_arr : ndarray, shape(..., out_batch_size * N) + Output array + + """ + assert out_batch_size >= in_batch_size + N = in_arr.shape[-1] // in_batch_size + out_shape = in_arr.shape[:-1] + (N * out_batch_size,) + offset = (out_batch_size - in_batch_size) // 2 + idx = jnp.arange(in_arr.shape[-1]) + out_arr = put_along_axis( + arr=jnp.full(out_shape, fill, dtype=in_arr.dtype), + indices=(idx // in_batch_size) * out_batch_size + + offset + + (idx % in_batch_size), + values=in_arr, axis=-1, - ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) + ) + return out_arr + + +def _compute_if_new_pitch(pitch, *original, method="quad", err=True, **kwargs): + """Return the quantities needed by the ``bounce_integrals`` function. + + Parameters + ---------- + pitch : ndarray + λ values representing the constant function 1 / λ. + If None, returns the given ``original`` tuple. + original : tuple + pitch, intersect, is_bp, bp1, bp2. + err : bool + Whether to raise an error if ``pitch`` is None and ``original`` is empty. + method : str + "quad" or "spline". + kwargs + Additional keyword arguments passed to ``_get_bounce_points``. + + Returns + ------- + output : tuple + If method is "quad", returns pitch, bp1, bp2, X. + If method is "spline", returns pitch intersect, is_bp. - return interpolation_points, is_well + """ + if pitch is None: + if err and not original: + raise ValueError("No pitch values were provided.") + return original + else: + pitch = jnp.atleast_1d(pitch) + if method == "quad": + fun = kwargs.pop("fun") + x = kwargs.pop("x") + intersect, _, bp1, bp2 = _get_bounce_points(pitch, **kwargs) + intersect = intersect.reshape(bp1.shape) + bp1 = fun((intersect, bp1)) + bp2 = fun((intersect, bp2)) + X = x * ((bp2 - bp1) + bp2)[..., jnp.newaxis] + output = pitch, bp1, bp2, X + else: + intersect, is_bp, _, _ = _get_bounce_points( + pitch, include_knots=True, **kwargs + ) + intersect = intersect.reshape( + (intersect.shape[0] * intersect.shape[1],) + intersect.shape[2:] + ) + output = pitch, intersect, is_bp + return output def bounce_integral( @@ -1574,9 +1707,9 @@ def bounce_integral( rho=None, alpha=None, zeta_max=10 * jnp.pi, - lambda_pitch=None, + pitch=None, resolution=20, - method="spline", + method="quad", ): """Returns a method to compute the bounce integral of any quantity. @@ -1604,7 +1737,7 @@ def bounce_integral( Unique field line label coordinates over a constant rho surface. zeta_max : float Max value for field line following coordinate. - lambda_pitch : ndarray + pitch : ndarray λ values to evaluate the bounce integral at. resolution : int Number of interpolation points (knots) used for splines in the quadrature. @@ -1616,181 +1749,235 @@ def bounce_integral( method : str The method to evaluate the integral. The "spline" method exactly integrates a cubic spline of the integrand. - The "trapezoid" method performs a trapezoidal quadrature over evenly - spaced samples of the integrand. The integrand is estimated by using distinct - cubic splines for components in the integrand so that the singularity from - the division by zero near the bounce points can be captured more accurately - than can be represented by a polynomial. - The "quad" method performs a Gauss quadrature estimating the integrand - with splines as in the trapezoidal method. + The "quad" method performs a Gauss quadrature and estimates the integrand + by using distinct cubic splines for components in the integrand so that + the singularity from the division by zero near the bounce points can be + captured more accurately than can be represented by a polynomial. Returns ------- bi : callable This callable method computes the bounce integral F_ℓ(λ) for every specified field line ℓ (constant rho and alpha), for every λ value in - ``lambda_pitch``. + ``pitch``. Examples -------- .. code-block:: python bi = bounce_integral(eq) - F = bi(name, lambda_pitch) + F = bi(name, pitch) """ - if rho is None: - rho = jnp.linspace(0, 1, 10) - if alpha is None: - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) - rho = jnp.atleast_1d(rho) - alpha = jnp.atleast_1d(alpha) - zeta = np.linspace(0, zeta_max, resolution) - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute( - ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data - ) - ML = alpha.size * rho.size - N = zeta.size - 1 # number of piecewise cubic polynomials per field line - NUM_ROOTS = 3 # max number of roots for cubic polynomial - # TODO: https://github.com/f0uriest/interpax/issues/19 - poly_B_norm = CubicHermiteSpline( - zeta, - data["|B|"].reshape(ML, zeta.size), - data["|B|_z constant rho alpha"].reshape(ML, zeta.size), - axis=-1, - extrapolate="periodic", - ).c - poly_B_norm = jnp.moveaxis(poly_B_norm, 1, -1) - poly_B_norm_z = polyder(poly_B_norm) - assert poly_B_norm.shape == (4, ML, N) - assert poly_B_norm_z.shape == (3, ML, N) - - _lambda_pitch = ( - jnp.empty(0) if lambda_pitch is None else jnp.atleast_1d(lambda_pitch) - ) - _interpolation_points, _is_well = _interp_well( - _lambda_pitch, zeta, ML, poly_B_norm, poly_B_norm_z - ) - def _spline(name, lambda_pitch=None): - """Compute the bounce integral of the named quantity. + def _spline(name, pitch=None): + """Compute the bounce integral of the named quantity using the spline method. Parameters ---------- name : ndarray Name of quantity in ``data_index`` to compute the bounce integral of. - lambda_pitch : ndarray + pitch : ndarray λ values to evaluate the bounce integral at. If None, uses the values given to the parent function. Returns ------- - result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, - (resolution - 1) * 5 // 2) + result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) The last axis iterates through every bounce integral performed along that field line padded by zeros. """ - if lambda_pitch is None: - lambda_pitch = _lambda_pitch - interpolation_points = _interpolation_points - is_well = _is_well - else: - lambda_pitch = jnp.atleast_1d(lambda_pitch) - interpolation_points, is_well = _interp_well( - lambda_pitch, zeta, ML, poly_B_norm, poly_B_norm_z - ) - + pitch, intersect, is_bp = _compute_if_new_pitch( + pitch, + *original, + method="spline", + zeta=zeta, + poly_B_norm=poly_B_norm, + poly_B_norm_z=poly_B_norm_z, + ) integrand = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] - / ( - data["B^zeta"] - * jnp.sqrt(1 - lambda_pitch[:, jnp.newaxis] * data["|B|"]) - ) - ).reshape(lambda_pitch.size, ML, zeta.size) + / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) + ).reshape(pitch.size * M * L, resolution) + # TODO: https://github.com/f0uriest/interpax/issues/19 integrand = Akima1DInterpolator(zeta, integrand, axis=-1).c + integrand = jnp.moveaxis(integrand, 1, -1) - assert integrand.shape == (4, lambda_pitch.size, ML, N) - # Computing integral via difference of primitives is easiest. - # E.g. Simpson's rule on spline requires computing spline on 1.8x more - # knots for the same accuracy. - primitive = polyeval(polyint(integrand), interpolation_points).reshape( - lambda_pitch.size, ML, N * (NUM_ROOTS + 2) + assert integrand.shape == (4, pitch.size * M * L, N) + # For this algorithm, computing integrals via differences of primitives + # is preferable to any numerical quadrature. For example, even if the + # intersection points were evenly spaced, a composite Simpson's quadrature + # would require computing the spline on 1.8x more knots for the same accuracy. + R = NUM_ROOTS + 2 + primitive = polyval(intersect, polyint(integrand)).reshape( + pitch.size * M * L, N * R ) sums = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. - jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) + jnp.diff(primitive, axis=-1, append=primitive[:, 0, jnp.newaxis]) # Multiply by mask that is false at knots of piecewise spline # to avoid adding difference between primitives of splines at knots. - * jnp.append( - jnp.arange(1, N * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, True - ), + * jnp.append(jnp.arange(1, N * R) % R != 0, True), axis=-1, - ).reshape(lambda_pitch.size * ML, N * (NUM_ROOTS + 2)) - - result = vmap(_diff_mask_odd_pairs)((sums, is_well)).reshape( - lambda_pitch.size, alpha.size, rho.size, N * (NUM_ROOTS + 2) // 2 ) + result = fun((sums, is_bp)).reshape(pitch.size, M, L, N * R // 2) return result - def _trapezoid(name, lambda_pitch=None): - """Compute the bounce integral of the named quantity. + def _quad(name, pitch=None): + """Compute the bounce integral of the named quantity using the quad method. Parameters ---------- name : ndarray Name of quantity in ``data_index`` to compute the bounce integral of. - lambda_pitch : ndarray + pitch : ndarray λ values to evaluate the bounce integral at. If None, uses the values given to the parent function. Returns ------- - result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, - (resolution - 1) * 5 // 2) + result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) The last axis iterates through every bounce integral performed along that field line padded by zeros. """ + pitch, bp1, bp2, X = _compute_if_new_pitch( + pitch, + *original, + method="quad", + zeta=zeta, + poly_B_norm=poly_B_norm, + poly_B_norm_z=poly_B_norm_z, + x=x, + fun=fun, + ) + assert X.shape == (pitch.size * M * L, N * 2, x.size) + + def body(i, integral): + k = i % (N * 2) + j = i // (N * 2) + p = i // (M * L * N * 2) + v = (i // (N * 2)) % pitch.size + # TODO: Add Hermite spline to interpax to pass in B_z[i]. + integrand = interp1d(X[j, k], zeta, f[v]) / ( + interp1d(X[j, k], zeta, B_sup_z[v]) + * jnp.sqrt(1 - pitch[p] * interp1d(X[j, k], zeta, B[v])) + ) + integral = put(integral, i, jnp.sum(w * integrand)) + return integral - def _quad(name, lambda_pitch=None): - """Compute the bounce integral of the named quantity. + f = eq.compute(name, grid=grid, override_grid=False, data=data)[name].reshape( + M * L, resolution + ) + B_sup_z = data["B^zeta"].reshape(M * L, resolution) + result = jnp.nan_to_num( + # TODO: Vectorize interpax to do this with 1 call with einsum. + fori_loop(0, pitch.size * M * L * N * 2, body, jnp.empty(X.shape[:-1])) + * jnp.pi + / (bp2 - bp1) + ).reshape(pitch.size, M, L, N * 2) + return result - Parameters - ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce integral of. - lambda_pitch : ndarray - λ values to evaluate the bounce integral at. - If None, uses the values given to the parent function. + if rho is None: + rho = jnp.linspace(0, 1, 10) + if alpha is None: + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) + rho = jnp.atleast_1d(rho) + alpha = jnp.atleast_1d(alpha) + zeta = np.linspace(0, zeta_max, resolution) + L = rho.size + M = alpha.size + N = resolution - 1 # number of piecewise cubic polynomials per field line + NUM_ROOTS = 3 # number of roots for cubic polynomial - Returns - ------- - result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, - (resolution - 1) * 5 // 2) - The last axis iterates through every bounce integral performed - along that field line padded by zeros. + grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) + data = eq.compute( + ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data + ) + B = data["|B|"].reshape(M * L, resolution) - """ + # TODO: https://github.com/f0uriest/interpax/issues/19 + poly_B_norm = CubicHermiteSpline( + zeta, + B, + data["|B|_z constant rho alpha"].reshape(M * L, resolution), + axis=-1, + extrapolate="periodic", + ).c - return {"spline": _spline, "trapezoid": _trapezoid, "quad": _quad}[method] + poly_B_norm = jnp.moveaxis(poly_B_norm, 1, -1) + poly_B_norm_z = polyder(poly_B_norm) + assert poly_B_norm.shape == (4, M * L, N) + assert poly_B_norm_z.shape == (3, M * L, N) + + kwargs = {} + if method == "quad": + x, w = tanh_sinh_quadrature(resolution) + x = jnp.arcsin(x) / jnp.pi - 0.5 + fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=0)) + bi = _quad + kwargs["fun"] = fun + kwargs["x"] = x + else: + fun = vmap(lambda args: mask_diff(*args)[::2]) + bi = _spline + original = _compute_if_new_pitch( + pitch, + method=method, + err=False, + zeta=zeta, + poly_B_norm=poly_B_norm, + poly_B_norm_z=poly_B_norm_z, + **kwargs, + ) + return bi -def _diff_mask_odd_pairs(args): - """Compute non-overlapping difference between indices specified in mask.""" - a, mask = args - return diff_mask(a, mask)[::2] +def mask_take(a, mask, size, fill_value=jnp.nan): + """JIT compilable method to return ``a[mask]`` padded by ``fill_value``. + Parameters + ---------- + a : ndarray + The source array. + mask : ndarray + Boolean mask to index into ``a``. + size : + Elements of ``a`` at the first size True indices of ``mask`` will be returned. + If there are fewer elements than size indicates, the returned array will be + padded with fill_value. + fill_value : + When there are fewer than the indicated number of elements, + the remaining elements will be filled with ``fill_value``. + + Returns + ------- + a_mask : ndarray, shape(size, ) + Output array. -def diff_mask(a, mask, n=1, axis=-1, prepend=None): + """ + idx = flatnonzero(mask, size=size, fill_value=mask.size) + a_mask = take( + a, + idx, + axis=0, + mode="fill", + fill_value=fill_value, + unique_indices=True, + indices_are_sorted=True, + ) + return a_mask + + +def mask_diff(a, mask, n=1, axis=-1, prepend=None): """Calculate the n-th discrete difference along the given axis of ``a[mask]``. The first difference is given by ``out[i] = a[i+1] - a[i]`` along the given axis, higher differences are calculated by using `diff` - recursively. + recursively. This method is JIT compatible. Parameters ---------- @@ -1829,23 +2016,7 @@ def diff_mask(a, mask, n=1, axis=-1, prepend=None): # d35cd07ea997f033b2d89d349734c61f5de54b0d/ # numpy/lib/function_base.py#L1324-L1454 prepend = np._NoValue - indices = flatnonzero(mask, size=mask.size, fill_value=mask.size) - diff = jnp.nan_to_num( - jnp.diff( - take( - a, - indices, - axis=0, - mode="fill", - fill_value=jnp.nan, - unique_indices=True, - indices_are_sorted=True, - ), - n, - axis, - prepend, - ) - ) + diff = jnp.nan_to_num(jnp.diff(mask_take(a, mask, mask.size), n, axis, prepend)) return diff @@ -1854,9 +2025,9 @@ def bounce_average( rho=None, alpha=None, zeta_max=10 * jnp.pi, - lambda_pitch=None, + pitch=None, resolution=20, - method="spline", + method="quad", ): """Returns a method to compute the bounce average of any quantity. @@ -1885,7 +2056,7 @@ def bounce_average( Unique field line label coordinates over a constant rho surface. zeta_max : float Max value for field line following coordinate. - lambda_pitch : ndarray + pitch : ndarray λ values to evaluate the bounce average at. Defaults to linearly spaced values between min and max of |B|. resolution : int @@ -1898,13 +2069,10 @@ def bounce_average( method : str The method to evaluate the integral. The "spline" method exactly integrates a cubic spline of the integrand. - The "trapezoid" method performs a trapezoidal quadrature over evenly - spaced samples of the integrand. The integrand is estimated by using distinct - cubic splines for components in the integrand so that the singularity from - the division by zero near the bounce points can be captured more accurately - than can be represented by a polynomial. - The "quad" method performs a Gauss quadrature estimating the integrand - with splines as in the trapezoidal method. + The "quad" method performs a Gauss quadrature and estimates the integrand + by using distinct cubic splines for components in the integrand so that + the singularity from the division by zero near the bounce points can be + captured more accurately than can be represented by a polynomial. Returns ------- @@ -1918,41 +2086,40 @@ def bounce_average( .. code-block:: python ba = bounce_average(eq) - G = ba(name, lambda_pitch) + G = ba(name, pitch) """ - bi = bounce_integral(eq, rho, alpha, zeta_max, lambda_pitch, resolution, method) + bi = bounce_integral(eq, rho, alpha, zeta_max, pitch, resolution, method) - def _bounce_average(name, lambda_pitch=None): - """Compute the bounce average of the named quantity. + def _bounce_average(name, pitch=None): + """Compute the bounce average of the named quantity using the spline method. Parameters ---------- name : ndarray Name of quantity in ``data_index`` to compute the bounce average of. - lambda_pitch : ndarray + pitch : ndarray λ values to evaluate the bounce average at. If None, uses the values given to the parent function. Returns ------- - result : ndarray, shape(lambda_pitch.size, alpha.size, rho.size, - (resolution - 1) * 5 // 2) + result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) The last axis iterates through every bounce average performed along that field line padded by zeros. """ - result = safediv(bi(name, lambda_pitch), bi("1", lambda_pitch)) + result = safediv(bi(name, pitch), bi("1", pitch)) return result return _bounce_average -def field_line_to_desc_coords(rho, alpha, zeta, eq): - """Get desc grid from unique field line coords.""" +def field_line_to_desc_coords(eq, rho, alpha, zeta): + """Get DESC grid from unique field line coordinates.""" r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") r, a, z = r.ravel(), a.ravel(), z.ravel() - # Now we map these Clebsch-Type field-line coordinates to DESC coordinates. + # Map these Clebsch-Type field-line coordinates to DESC coordinates. # Note that the rotational transform can be computed apriori because it is a single # variable function of rho, and the coordinate mapping does not change rho. Once # this is known, it is simple to compute theta_PEST from alpha. Then we transform diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index cf8e3ba04f..ad145e5f45 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -13,8 +13,8 @@ field_line_to_desc_coords, line_integrals, polyder, - polyeval, polyint, + polyval, surface_averages, surface_integrals, surface_integrals_transform, @@ -598,14 +598,14 @@ def test_cubic_poly_roots(self): assert np.unique(poly.shape).size == poly.ndim constant = np.arange(10) assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim - out = np.sort(cubic_poly_roots(poly, constant), axis=-1) + roots = cubic_poly_roots(poly, constant, sort=True) for j in range(poly.shape[1]): for k in range(poly.shape[2]): for s in range(constant.size): a, b, c, d = poly[:, j, k] d = d - constant[s] np.testing.assert_allclose( - out[s, j, k], + roots[s, j, k], np.sort_complex(np.roots([a, b, c, d])), ) @@ -632,7 +632,7 @@ def test_polyder(self): np.testing.assert_allclose(out[:, j, k], np.polyder(poly[:, j, k])) @pytest.mark.unit - def test_polyeval(self): + def test_polyval(self): """Test vectorized computation of polynomial evaluation.""" quintic = 6 poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi @@ -645,10 +645,10 @@ def test_polyeval(self): assert np.unique(x.shape).size == x.ndim assert poly.shape[1:] == x.shape[: poly.ndim - 1] assert np.unique((poly.shape[0],) + x.shape[poly.ndim - 1 :]).size == x.ndim - 1 - out = polyeval(poly, x) + val = polyval(x, poly) for j in range(poly.shape[1]): for k in range(poly.shape[2]): - np.testing.assert_allclose(out[j, k], np.poly1d(poly[:, j, k])(x[j, k])) + np.testing.assert_allclose(val[j, k], np.poly1d(poly[:, j, k])(x[j, k])) # TODO: FIXME def bounce_point( From 9471d22ab66f3419bd15a2b0fc86fef62081419d Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Feb 2024 17:18:59 -0500 Subject: [PATCH 024/241] Refactor code in bounce_integral --- desc/compute/utils.py | 182 +++++++++++++++++++----------------------- 1 file changed, 81 insertions(+), 101 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 6d1a8f56c4..454b93a9cd 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1447,32 +1447,30 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): This function outputs the quadrature points and weights for a tanh-sinh quadrature. - ∫₋₁¹ f(x) dx = Σ wₖ f(xₖ) + ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) Parameters ---------- N: int - Number of quadrature points, preferable odd + Number of quadrature points, preferably odd quad_limit: float The range of quadrature points to be mapped. Larger quad_limit implies better result but limited due to overflow in sinh Returns ------- - x_k : numpy array + x : numpy array Quadrature points - w_k : numpy array + w : numpy array Quadrature weights """ initial_points = jnp.linspace(-quad_limit, quad_limit, N) h = 2 * quad_limit / (N - 1) sinh = jnp.sinh(initial_points) - x_k = jnp.tanh(0.5 * jnp.pi * sinh) - w_k = ( - 0.5 * jnp.pi * h * jnp.cosh(initial_points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 - ) - return x_k, w_k + x = jnp.tanh(0.5 * jnp.pi * sinh) + w = 0.5 * jnp.pi * h * jnp.cosh(initial_points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 + return x, w def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, sort=False): @@ -1488,8 +1486,8 @@ def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, sort Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``constant``. a_min, a_max : ndarray Minimum and maximum value to clip roots between. - Complex roots are always clipped to ``a_min``. If None, clipping is not performed on the corresponding edge. + Otherwise, complex roots are clipped to ``a_min`` which defaults to -infinity. Both arrays are broadcast against arrays of shape ``coef.shape[1:]``. sort : bool Whether to sort the roots. @@ -1503,7 +1501,7 @@ def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, sort """ # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula # The common libraries use root-finding which isn't compatible with JAX. - clip = a_min is not None or a_max is not None + clip = not (a_min is None and a_max is None) if a_min is None: a_min = -jnp.inf if a_max is None: @@ -1537,7 +1535,7 @@ def clip_roots(r): return roots -def _get_bounce_points(pitch, zeta, poly_B_norm, poly_B_norm_z, include_knots=False): +def _get_bounce_points(pitch, zeta, poly_B, poly_B_z, **kwargs): """Get the bounce points given |B| and 1 / λ. Parameters @@ -1546,17 +1544,10 @@ def _get_bounce_points(pitch, zeta, poly_B_norm, poly_B_norm_z, include_knots=Fa λ values representing the constant function 1 / λ. zeta : ndarray Field line-following ζ coordinates of spline knots. - poly_B_norm : ndarray + poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. - poly_B_norm_z : ndarray + poly_B_z : ndarray Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - include_knots : bool - Whether to return the knots of the spline along with the intersection points. - If False, the last axis of the returned ``intersect`` array stores the following - points for each polynomial: - [intersect_1, intersect_2, intersect_3] - If True, the last axis takes the form: - [left_knot, intersect_1, intersect_2, intersect_3, right_knot] Returns ------- @@ -1565,47 +1556,36 @@ def _get_bounce_points(pitch, zeta, poly_B_norm, poly_B_norm_z, include_knots=Fa In order to be JIT compilable, the returned array must have a shape that accommodates the case where each cubic polynomial intersects 1 / λ thrice. This requires that ``intersect`` have shape - (pitch.size * poly_B_norm.shape[1], poly_B_norm.shape[2], 3 + 2 * fill) + (pitch.size, poly_B_norm.shape[1], poly_B_norm.shape[2], 3) The boolean mask ``is_bp`` encodes whether a given entry in ``intersect`` is a valid bounce point. The boolean masks ``bp1`` and ``bp2`` encode whether a given entry in ``intersect`` is a valid starting and ending bounce point, respectively. These arrays have shape - (pitch.size * poly_B_norm.shape[1], poly_B_norm.shape[2] * (3 + 2 * fill)) + (pitch.size * poly_B_norm.shape[1], poly_B_norm.shape[2] * 3) """ - ML = poly_B_norm.shape[1] - N = poly_B_norm.shape[2] + ML = poly_B.shape[1] + N = poly_B.shape[2] NUM_ROOTS = 3 a_min = zeta[:-1] a_max = zeta[1:] - intersect = cubic_poly_roots( - poly_B_norm, 1 / pitch, a_min, a_max, sort=True - ).reshape(pitch.size, ML, N, NUM_ROOTS) + intersect = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( + pitch.size, ML, N, NUM_ROOTS + ) - B_norm_z = polyval(intersect, poly_B_norm_z[:, jnp.newaxis]).reshape( + B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( pitch.size * ML, N * NUM_ROOTS ) # Check sign of derivative to determine whether root is a valid bounce point. - bp1 = B_norm_z <= 0 - bp2 = B_norm_z >= 0 + bp1 = B_z <= 0 + bp2 = B_z >= 0 # Periodic boundary to compute bounce integrals of particles trapped outside # this snapshot of the field lines. is_bp = bp1 & jnp.roll(bp2, -1, axis=-1) bp1 = bp1 & is_bp bp2 = bp2 & is_bp - - if include_knots: - arrays = jnp.broadcast_arrays( - a_min, intersect[..., 0], intersect[..., 1], intersect[..., 2], a_max - ) - intersect = jnp.stack(arrays, axis=-1) - R = NUM_ROOTS + 2 - is_bp = stretch_batches(is_bp, NUM_ROOTS, R, fill=False) - bp1 = stretch_batches(bp1, NUM_ROOTS, R, fill=False) - bp2 = stretch_batches(bp2, NUM_ROOTS, R, fill=False) - return intersect, is_bp, bp1, bp2 @@ -1616,8 +1596,7 @@ def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): along its last axis, stretch the last axis so that it is composed of N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` missing elements in each batch are populated with ``fill``. - By default, these elements are populated evenly surrounding the input - batches. + By default, these elements are populated evenly surrounding the input batches. Parameters ---------- @@ -1652,7 +1631,7 @@ def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): return out_arr -def _compute_if_new_pitch(pitch, *original, method="quad", err=True, **kwargs): +def _compute_bp_if_given_pitch(pitch, method, *original, err=False, **kwargs): """Return the quantities needed by the ``bounce_integrals`` function. Parameters @@ -1660,46 +1639,49 @@ def _compute_if_new_pitch(pitch, *original, method="quad", err=True, **kwargs): pitch : ndarray λ values representing the constant function 1 / λ. If None, returns the given ``original`` tuple. + method : str + "quad" or "spline". original : tuple pitch, intersect, is_bp, bp1, bp2. err : bool Whether to raise an error if ``pitch`` is None and ``original`` is empty. - method : str - "quad" or "spline". kwargs Additional keyword arguments passed to ``_get_bounce_points``. Returns ------- output : tuple - If method is "quad", returns pitch, bp1, bp2, X. - If method is "spline", returns pitch intersect, is_bp. + (pitch, intersect, is_bp, bp1, bp2). """ if pitch is None: if err and not original: - raise ValueError("No pitch values were provided.") + raise ValueError("No pitch values were given.") return original else: pitch = jnp.atleast_1d(pitch) - if method == "quad": - fun = kwargs.pop("fun") - x = kwargs.pop("x") - intersect, _, bp1, bp2 = _get_bounce_points(pitch, **kwargs) - intersect = intersect.reshape(bp1.shape) - bp1 = fun((intersect, bp1)) - bp2 = fun((intersect, bp2)) - X = x * ((bp2 - bp1) + bp2)[..., jnp.newaxis] - output = pitch, bp1, bp2, X - else: - intersect, is_bp, _, _ = _get_bounce_points( - pitch, include_knots=True, **kwargs - ) + intersect, is_bp, bp1, bp2 = _get_bounce_points(pitch, **kwargs) + if method == "spline": intersect = intersect.reshape( (intersect.shape[0] * intersect.shape[1],) + intersect.shape[2:] ) - output = pitch, intersect, is_bp - return output + # include knots of spline along with intersection points + intersect = jnp.stack( + jnp.broadcast_arrays( + kwargs["zeta"][:-1], + intersect[..., 0], + intersect[..., 1], + intersect[..., 2], + kwargs["zeta"][1:], + ), + axis=-1, + ) + is_bp = stretch_batches(is_bp, 3, 5, fill=False) + else: + intersect = intersect.reshape(bp1.shape) + bp1 = kwargs["fun"]((intersect, bp1)) + bp2 = kwargs["fun"]((intersect, bp2)) + return pitch, intersect, is_bp, bp1, bp2 def bounce_integral( @@ -1783,18 +1765,19 @@ def _spline(name, pitch=None): Returns ------- - result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) The last axis iterates through every bounce integral performed along that field line padded by zeros. """ - pitch, intersect, is_bp = _compute_if_new_pitch( + pitch, intersect, is_bp, _, _ = _compute_bp_if_given_pitch( pitch, + method, *original, - method="spline", + err=True, zeta=zeta, - poly_B_norm=poly_B_norm, - poly_B_norm_z=poly_B_norm_z, + poly_B=poly_B, + poly_B_z=poly_B_z, ) integrand = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] @@ -1823,8 +1806,7 @@ def _spline(name, pitch=None): * jnp.append(jnp.arange(1, N * R) % R != 0, True), axis=-1, ) - result = fun((sums, is_bp)).reshape(pitch.size, M, L, N * R // 2) - return result + return fun((sums, is_bp)).reshape(pitch.size, M, L, N * R // 2) def _quad(name, pitch=None): """Compute the bounce integral of the named quantity using the quad method. @@ -1839,28 +1821,29 @@ def _quad(name, pitch=None): Returns ------- - result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) The last axis iterates through every bounce integral performed along that field line padded by zeros. """ - pitch, bp1, bp2, X = _compute_if_new_pitch( + pitch, _, _, bp1, bp2 = _compute_bp_if_given_pitch( pitch, + method, *original, - method="quad", + err=True, zeta=zeta, - poly_B_norm=poly_B_norm, - poly_B_norm_z=poly_B_norm_z, - x=x, + poly_B=poly_B, + poly_B_z=poly_B_z, fun=fun, ) + X = x * ((bp2 - bp1) + bp2)[..., jnp.newaxis] assert X.shape == (pitch.size * M * L, N * 2, x.size) def body(i, integral): k = i % (N * 2) j = i // (N * 2) p = i // (M * L * N * 2) - v = (i // (N * 2)) % pitch.size + v = j % pitch.size # TODO: Add Hermite spline to interpax to pass in B_z[i]. integrand = interp1d(X[j, k], zeta, f[v]) / ( interp1d(X[j, k], zeta, B_sup_z[v]) @@ -1873,13 +1856,12 @@ def body(i, integral): M * L, resolution ) B_sup_z = data["B^zeta"].reshape(M * L, resolution) - result = jnp.nan_to_num( + return jnp.nan_to_num( # TODO: Vectorize interpax to do this with 1 call with einsum. fori_loop(0, pitch.size * M * L * N * 2, body, jnp.empty(X.shape[:-1])) * jnp.pi / (bp2 - bp1) ).reshape(pitch.size, M, L, N * 2) - return result if rho is None: rho = jnp.linspace(0, 1, 10) @@ -1900,7 +1882,7 @@ def body(i, integral): B = data["|B|"].reshape(M * L, resolution) # TODO: https://github.com/f0uriest/interpax/issues/19 - poly_B_norm = CubicHermiteSpline( + poly_B = CubicHermiteSpline( zeta, B, data["|B|_z constant rho alpha"].reshape(M * L, resolution), @@ -1908,30 +1890,27 @@ def body(i, integral): extrapolate="periodic", ).c - poly_B_norm = jnp.moveaxis(poly_B_norm, 1, -1) - poly_B_norm_z = polyder(poly_B_norm) - assert poly_B_norm.shape == (4, M * L, N) - assert poly_B_norm_z.shape == (3, M * L, N) + poly_B = jnp.moveaxis(poly_B, 1, -1) + poly_B_z = polyder(poly_B) + assert poly_B.shape == (4, M * L, N) + assert poly_B_z.shape == (3, M * L, N) - kwargs = {} if method == "quad": + bi = _quad + fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=0)) x, w = tanh_sinh_quadrature(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 - fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=0)) - bi = _quad - kwargs["fun"] = fun - kwargs["x"] = x else: - fun = vmap(lambda args: mask_diff(*args)[::2]) bi = _spline - original = _compute_if_new_pitch( + fun = vmap(lambda args: mask_diff(*args)[::2]) + original = _compute_bp_if_given_pitch( pitch, - method=method, + method, err=False, zeta=zeta, - poly_B_norm=poly_B_norm, - poly_B_norm_z=poly_B_norm_z, - **kwargs, + poly_B=poly_B, + poly_B_z=poly_B_z, + fun=fun, ) return bi @@ -2016,7 +1995,9 @@ def mask_diff(a, mask, n=1, axis=-1, prepend=None): # d35cd07ea997f033b2d89d349734c61f5de54b0d/ # numpy/lib/function_base.py#L1324-L1454 prepend = np._NoValue - diff = jnp.nan_to_num(jnp.diff(mask_take(a, mask, mask.size), n, axis, prepend)) + diff = jnp.nan_to_num( + jnp.diff(mask_take(a, mask, mask.size, fill_value=jnp.nan), n, axis, prepend) + ) return diff @@ -2104,13 +2085,12 @@ def _bounce_average(name, pitch=None): Returns ------- - result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + G : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) The last axis iterates through every bounce average performed along that field line padded by zeros. """ - result = safediv(bi(name, pitch), bi("1", pitch)) - return result + return safediv(bi(name, pitch), bi("1", pitch)) return _bounce_average From 7512f70af698ba18589e20cba105a639d0fa28ae Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Feb 2024 17:53:19 -0500 Subject: [PATCH 025/241] Fix bug where quadrature points are computed --- desc/compute/utils.py | 107 +++++++++++++++++------------------------- 1 file changed, 43 insertions(+), 64 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 454b93a9cd..04df3a2256 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1631,7 +1631,7 @@ def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): return out_arr -def _compute_bp_if_given_pitch(pitch, method, *original, err=False, **kwargs): +def _compute_bp_if_given_pitch(pitch, *original, err=False, **kwargs): """Return the quantities needed by the ``bounce_integrals`` function. Parameters @@ -1639,8 +1639,6 @@ def _compute_bp_if_given_pitch(pitch, method, *original, err=False, **kwargs): pitch : ndarray λ values representing the constant function 1 / λ. If None, returns the given ``original`` tuple. - method : str - "quad" or "spline". original : tuple pitch, intersect, is_bp, bp1, bp2. err : bool @@ -1661,26 +1659,6 @@ def _compute_bp_if_given_pitch(pitch, method, *original, err=False, **kwargs): else: pitch = jnp.atleast_1d(pitch) intersect, is_bp, bp1, bp2 = _get_bounce_points(pitch, **kwargs) - if method == "spline": - intersect = intersect.reshape( - (intersect.shape[0] * intersect.shape[1],) + intersect.shape[2:] - ) - # include knots of spline along with intersection points - intersect = jnp.stack( - jnp.broadcast_arrays( - kwargs["zeta"][:-1], - intersect[..., 0], - intersect[..., 1], - intersect[..., 2], - kwargs["zeta"][1:], - ), - axis=-1, - ) - is_bp = stretch_batches(is_bp, 3, 5, fill=False) - else: - intersect = intersect.reshape(bp1.shape) - bp1 = kwargs["fun"]((intersect, bp1)) - bp2 = kwargs["fun"]((intersect, bp2)) return pitch, intersect, is_bp, bp1, bp2 @@ -1691,7 +1669,7 @@ def bounce_integral( zeta_max=10 * jnp.pi, pitch=None, resolution=20, - method="quad", + method="tanh_sinh", ): """Returns a method to compute the bounce integral of any quantity. @@ -1730,11 +1708,10 @@ def bounce_integral( the accuracy of the quadrature. method : str The method to evaluate the integral. - The "spline" method exactly integrates a cubic spline of the integrand. - The "quad" method performs a Gauss quadrature and estimates the integrand - by using distinct cubic splines for components in the integrand so that - the singularity from the division by zero near the bounce points can be - captured more accurately than can be represented by a polynomial. + The "direct" method exactly integrates a cubic spline of the integrand. + The other methods perform a Gauss quadrature and use independent cubic splines + for components in the integrand so that the singularity near the bounce points + can be captured more accurately than can be represented by a polynomial. Returns ------- @@ -1752,8 +1729,8 @@ def bounce_integral( """ - def _spline(name, pitch=None): - """Compute the bounce integral of the named quantity using the spline method. + def _direct(name, pitch=None): + """Compute the bounce integral of the named quantity. Parameters ---------- @@ -1771,14 +1748,25 @@ def _spline(name, pitch=None): """ pitch, intersect, is_bp, _, _ = _compute_bp_if_given_pitch( - pitch, - method, - *original, - err=True, - zeta=zeta, - poly_B=poly_B, - poly_B_z=poly_B_z, + pitch, *original, err=True, zeta=zeta, poly_B=poly_B, poly_B_z=poly_B_z ) + intersect = intersect.reshape( + (intersect.shape[0] * intersect.shape[1],) + intersect.shape[2:] + ) + # include knots of spline along with intersection points + intersect = jnp.stack( + jnp.broadcast_arrays( + zeta[:-1], + intersect[..., 0], + intersect[..., 1], + intersect[..., 2], + zeta[1:], + ), + axis=-1, + ) + R = NUM_ROOTS + 2 + is_bp = stretch_batches(is_bp, NUM_ROOTS, R, fill=False) + integrand = jnp.nan_to_num( eq.compute(name, grid=grid, override_grid=False, data=data)[name] / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) @@ -1793,7 +1781,6 @@ def _spline(name, pitch=None): # is preferable to any numerical quadrature. For example, even if the # intersection points were evenly spaced, a composite Simpson's quadrature # would require computing the spline on 1.8x more knots for the same accuracy. - R = NUM_ROOTS + 2 primitive = polyval(intersect, polyint(integrand)).reshape( pitch.size * M * L, N * R ) @@ -1808,8 +1795,8 @@ def _spline(name, pitch=None): ) return fun((sums, is_bp)).reshape(pitch.size, M, L, N * R // 2) - def _quad(name, pitch=None): - """Compute the bounce integral of the named quantity using the quad method. + def _tanh_sinh(name, pitch=None): + """Compute the bounce integral of the named quantity. Parameters ---------- @@ -1826,17 +1813,13 @@ def _quad(name, pitch=None): along that field line padded by zeros. """ - pitch, _, _, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, - method, - *original, - err=True, - zeta=zeta, - poly_B=poly_B, - poly_B_z=poly_B_z, - fun=fun, + pitch, intersect, _, bp1, bp2 = _compute_bp_if_given_pitch( + pitch, *original, err=True, zeta=zeta, poly_B=poly_B, poly_B_z=poly_B_z ) - X = x * ((bp2 - bp1) + bp2)[..., jnp.newaxis] + intersect = intersect.reshape(bp1.shape) + bp1 = fun((intersect, bp1)) + bp2 = fun((intersect, bp2)) + X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] assert X.shape == (pitch.size * M * L, N * 2, x.size) def body(i, integral): @@ -1860,7 +1843,9 @@ def body(i, integral): # TODO: Vectorize interpax to do this with 1 call with einsum. fori_loop(0, pitch.size * M * L * N * 2, body, jnp.empty(X.shape[:-1])) * jnp.pi - / (bp2 - bp1) + / (bp2 - bp1), + posinf=0, + neginf=0, ).reshape(pitch.size, M, L, N * 2) if rho is None: @@ -1895,22 +1880,16 @@ def body(i, integral): assert poly_B.shape == (4, M * L, N) assert poly_B_z.shape == (3, M * L, N) - if method == "quad": - bi = _quad + if method == "direct": + bi = _direct + fun = vmap(lambda args: mask_diff(*args)[::2]) + else: + bi = _tanh_sinh fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=0)) x, w = tanh_sinh_quadrature(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 - else: - bi = _spline - fun = vmap(lambda args: mask_diff(*args)[::2]) original = _compute_bp_if_given_pitch( - pitch, - method, - err=False, - zeta=zeta, - poly_B=poly_B, - poly_B_z=poly_B_z, - fun=fun, + pitch, err=False, zeta=zeta, poly_B=poly_B, poly_B_z=poly_B_z ) return bi From 5afe5063de0fa3fbe123ef9fc3b5335e31ce4884 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Feb 2024 21:34:59 -0500 Subject: [PATCH 026/241] Clarify some comments and reshaping --- desc/compute/utils.py | 146 +++++++++++++++++++++--------------------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 04df3a2256..b1f741bcda 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1465,11 +1465,11 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): Quadrature weights """ - initial_points = jnp.linspace(-quad_limit, quad_limit, N) + points = jnp.linspace(-quad_limit, quad_limit, N) h = 2 * quad_limit / (N - 1) - sinh = jnp.sinh(initial_points) + sinh = jnp.sinh(points) x = jnp.tanh(0.5 * jnp.pi * sinh) - w = 0.5 * jnp.pi * h * jnp.cosh(initial_points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 + w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 return x, w @@ -1535,7 +1535,7 @@ def clip_roots(r): return roots -def _get_bounce_points(pitch, zeta, poly_B, poly_B_z, **kwargs): +def _get_bounce_points(pitch, zeta, poly_B, poly_B_z): """Get the bounce points given |B| and 1 / λ. Parameters @@ -1555,26 +1555,25 @@ def _get_bounce_points(pitch, zeta, poly_B, poly_B_z, **kwargs): The polynomials' intersection points with 1 / λ is given by ``intersect``. In order to be JIT compilable, the returned array must have a shape that accommodates the case where each cubic polynomial intersects 1 / λ thrice. - This requires that ``intersect`` have shape - (pitch.size, poly_B_norm.shape[1], poly_B_norm.shape[2], 3) + The returned array ``intersect`` has shape (pitch.size, M * L, N, NUM_ROOTS). The boolean mask ``is_bp`` encodes whether a given entry in ``intersect`` is a valid bounce point. The boolean masks ``bp1`` and ``bp2`` encode whether a given entry in ``intersect`` is a valid starting and ending bounce point, - respectively. These arrays have shape - (pitch.size * poly_B_norm.shape[1], poly_B_norm.shape[2] * 3) + respectively. These arrays have shape (pitch.size * M * L, N * NUM_ROOTS). """ ML = poly_B.shape[1] N = poly_B.shape[2] NUM_ROOTS = 3 - a_min = zeta[:-1] a_max = zeta[1:] intersect = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( pitch.size, ML, N, NUM_ROOTS ) - + # Typically, this is the shape that is needed later, (as is the case in + # bounce_integrals because jax.vmap works over one axis at a time), so we + # reshape early here to prevent unnecessary data movement. B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( pitch.size * ML, N * NUM_ROOTS ) @@ -1589,49 +1588,7 @@ def _get_bounce_points(pitch, zeta, poly_B, poly_B_z, **kwargs): return intersect, is_bp, bp1, bp2 -def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): - """Stretch batches of ``in_arr``. - - Given that ``in_arr`` is composed of N batches of ``in_batch_size`` - along its last axis, stretch the last axis so that it is composed of - N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` - missing elements in each batch are populated with ``fill``. - By default, these elements are populated evenly surrounding the input batches. - - Parameters - ---------- - in_arr : ndarray, shape(..., in_batch_size * N) - Input array - in_batch_size : int - Length of batches along last axis of input array. - out_batch_size : int - Length of batches along last axis of output array. - fill : bool or int or float - Value to fill at missing indices of each batch. - - Returns - ------- - out_arr : ndarray, shape(..., out_batch_size * N) - Output array - - """ - assert out_batch_size >= in_batch_size - N = in_arr.shape[-1] // in_batch_size - out_shape = in_arr.shape[:-1] + (N * out_batch_size,) - offset = (out_batch_size - in_batch_size) // 2 - idx = jnp.arange(in_arr.shape[-1]) - out_arr = put_along_axis( - arr=jnp.full(out_shape, fill, dtype=in_arr.dtype), - indices=(idx // in_batch_size) * out_batch_size - + offset - + (idx % in_batch_size), - values=in_arr, - axis=-1, - ) - return out_arr - - -def _compute_bp_if_given_pitch(pitch, *original, err=False, **kwargs): +def _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, *original, err=False): """Return the quantities needed by the ``bounce_integrals`` function. Parameters @@ -1639,12 +1596,16 @@ def _compute_bp_if_given_pitch(pitch, *original, err=False, **kwargs): pitch : ndarray λ values representing the constant function 1 / λ. If None, returns the given ``original`` tuple. + zeta : ndarray + Field line-following ζ coordinates of spline knots. + poly_B : ndarray + Polynomial coefficients of the cubic spline of |B|. + poly_B_z : ndarray + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. original : tuple pitch, intersect, is_bp, bp1, bp2. err : bool Whether to raise an error if ``pitch`` is None and ``original`` is empty. - kwargs - Additional keyword arguments passed to ``_get_bounce_points``. Returns ------- @@ -1658,7 +1619,7 @@ def _compute_bp_if_given_pitch(pitch, *original, err=False, **kwargs): return original else: pitch = jnp.atleast_1d(pitch) - intersect, is_bp, bp1, bp2 = _get_bounce_points(pitch, **kwargs) + intersect, is_bp, bp1, bp2 = _get_bounce_points(pitch, zeta, poly_B, poly_B_z) return pitch, intersect, is_bp, bp1, bp2 @@ -1707,11 +1668,12 @@ def bounce_integral( So for well-behaved magnetic fields increasing resolution should increase the accuracy of the quadrature. method : str - The method to evaluate the integral. + The quadrature scheme used to evaluate the integral. The "direct" method exactly integrates a cubic spline of the integrand. - The other methods perform a Gauss quadrature and use independent cubic splines - for components in the integrand so that the singularity near the bounce points - can be captured more accurately than can be represented by a polynomial. + The "tanh_sinh" method performs a Tanh-sinh quadrature, where independent cubic + splines are used for components in the integrand so that the singularity near + the bounce points can be captured more accurately than can be represented by a + polynomial. Returns ------- @@ -1748,12 +1710,10 @@ def _direct(name, pitch=None): """ pitch, intersect, is_bp, _, _ = _compute_bp_if_given_pitch( - pitch, *original, err=True, zeta=zeta, poly_B=poly_B, poly_B_z=poly_B_z + pitch, zeta, poly_B, poly_B_z, *original, err=True ) - intersect = intersect.reshape( - (intersect.shape[0] * intersect.shape[1],) + intersect.shape[2:] - ) - # include knots of spline along with intersection points + intersect = intersect.reshape(pitch.size * M * L, N, NUM_ROOTS) + # Include knots of spline along with intersection points. intersect = jnp.stack( jnp.broadcast_arrays( zeta[:-1], @@ -1787,7 +1747,7 @@ def _direct(name, pitch=None): sums = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. - jnp.diff(primitive, axis=-1, append=primitive[:, 0, jnp.newaxis]) + jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) # Multiply by mask that is false at knots of piecewise spline # to avoid adding difference between primitives of splines at knots. * jnp.append(jnp.arange(1, N * R) % R != 0, True), @@ -1795,7 +1755,7 @@ def _direct(name, pitch=None): ) return fun((sums, is_bp)).reshape(pitch.size, M, L, N * R // 2) - def _tanh_sinh(name, pitch=None): + def _quad_sin(name, pitch=None): """Compute the bounce integral of the named quantity. Parameters @@ -1814,9 +1774,9 @@ def _tanh_sinh(name, pitch=None): """ pitch, intersect, _, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, *original, err=True, zeta=zeta, poly_B=poly_B, poly_B_z=poly_B_z + pitch, zeta, poly_B, poly_B_z, *original, err=True ) - intersect = intersect.reshape(bp1.shape) + intersect = intersect.reshape(pitch.size * M * L, N * NUM_ROOTS) bp1 = fun((intersect, bp1)) bp2 = fun((intersect, bp2)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] @@ -1880,17 +1840,15 @@ def body(i, integral): assert poly_B.shape == (4, M * L, N) assert poly_B_z.shape == (3, M * L, N) + original = _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, err=False) if method == "direct": bi = _direct fun = vmap(lambda args: mask_diff(*args)[::2]) else: - bi = _tanh_sinh + bi = _quad_sin fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=0)) x, w = tanh_sinh_quadrature(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 - original = _compute_bp_if_given_pitch( - pitch, err=False, zeta=zeta, poly_B=poly_B, poly_B_z=poly_B_z - ) return bi @@ -1980,6 +1938,48 @@ def mask_diff(a, mask, n=1, axis=-1, prepend=None): return diff +def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): + """Stretch batches of ``in_arr``. + + Given that ``in_arr`` is composed of N batches of ``in_batch_size`` + along its last axis, stretch the last axis so that it is composed of + N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` + missing elements in each batch are populated with ``fill``. + By default, these elements are populated evenly surrounding the input batches. + + Parameters + ---------- + in_arr : ndarray, shape(..., in_batch_size * N) + Input array + in_batch_size : int + Length of batches along last axis of input array. + out_batch_size : int + Length of batches along last axis of output array. + fill : bool or int or float + Value to fill at missing indices of each batch. + + Returns + ------- + out_arr : ndarray, shape(..., out_batch_size * N) + Output array + + """ + assert out_batch_size >= in_batch_size + N = in_arr.shape[-1] // in_batch_size + out_shape = in_arr.shape[:-1] + (N * out_batch_size,) + offset = (out_batch_size - in_batch_size) // 2 + idx = jnp.arange(in_arr.shape[-1]) + out_arr = put_along_axis( + arr=jnp.full(out_shape, fill, dtype=in_arr.dtype), + indices=(idx // in_batch_size) * out_batch_size + + offset + + (idx % in_batch_size), + values=in_arr, + axis=-1, + ) + return out_arr + + def bounce_average( eq, rho=None, From c214f5c00eb85c3aac1133f031cc2e8c022d3f79 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 12 Mar 2024 23:41:44 -0500 Subject: [PATCH 027/241] Fix bugs with computing bounce point and others documented below Resolve https://github.com/PlasmaControl/DESC/pull/854#discussion_r1502008862 https://github.com/PlasmaControl/DESC/pull/854#discussion_r1504980225 https://github.com/PlasmaControl/DESC/pull/854#discussion_r1504985051 --- desc/compute/bounce_integral.py | 737 ++++++++++++++++++++++++++++++++ desc/compute/utils.py | 626 +-------------------------- tests/test_bounce_integral.py | 153 +++++++ tests/test_compute_utils.py | 158 +------ 4 files changed, 904 insertions(+), 770 deletions(-) create mode 100644 desc/compute/bounce_integral.py create mode 100644 tests/test_bounce_integral.py diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py new file mode 100644 index 0000000000..5efda73626 --- /dev/null +++ b/desc/compute/bounce_integral.py @@ -0,0 +1,737 @@ +"""Methods for computing bounce integrals.""" + +from interpax import interp1d +from scipy.interpolate import Akima1DInterpolator, CubicHermiteSpline + +from desc.backend import ( + complex_sqrt, + flatnonzero, + fori_loop, + jnp, + put, + put_along_axis, + vmap, +) +from desc.compute.utils import mask_diff, mask_take, safediv +from desc.grid import Grid, LinearGrid, _meshgrid_expand + +from .data_index import data_index + + +def polyint(c, k=jnp.array([0])): + """Coefficients for the primitives of the given set of polynomials. + + Parameters + ---------- + c : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + k : ndarray or float + Integration constants. + + Returns + ------- + poly : ndarray + Coefficients of polynomial primitive. + That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, + where n is ``c.shape[0] - 1``. + + """ + poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T + poly = jnp.append(poly, jnp.broadcast_to(k, c.shape[1:])[jnp.newaxis], axis=0) + return poly + + +def polyder(c): + """Coefficients for the derivatives of the given set of polynomials. + + Parameters + ---------- + c : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + + Returns + ------- + poly : ndarray + Coefficients of polynomial derivative, ignoring the arbitrary constant. + That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, + where n is ``c.shape[0] - 1``. + + """ + poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T + return poly + + +def polyval(x, c): + """Evaluate the set of polynomials c at the points x. + + Parameters + ---------- + x : ndarray + Coordinates at which to evaluate the set of polynomials. + The first ``c.ndim`` axes should have shape ``c.shape[1:]``. + c : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + + Returns + ------- + val : ndarray + ``val[j, k, ...]`` is the polynomial with coefficients ``c[:, j, k, ...]`` + evaluated at the point ``x[j, k, ...]``. + + Notes + ----- + This function does not perform the same operation as + ``np.polynomial.polynomial.polyval(x, c)``. + An example usage of this function is shown in + tests/test_compute_utils.py::TestComputeUtils::test_polyval. + + """ + X = (x[jnp.newaxis].T ** jnp.arange(c.shape[0] - 1, -1, -1)).T + alphabet = "abcdefghijklmnopqrstuvwxyz" + sub = alphabet[: c.ndim] + val = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", c, X) + return val + + +def tanh_sinh_quadrature(N, quad_limit=3.16): + """ + tanh_sinh quadrature. + + This function outputs the quadrature points and weights + for a tanh-sinh quadrature. + + ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) + + Parameters + ---------- + N: int + Number of quadrature points, preferably odd + quad_limit: float + The range of quadrature points to be mapped. + Larger quad_limit implies better result but limited due to overflow in sinh + + Returns + ------- + x : numpy array + Quadrature points + w : numpy array + Quadrature weights + + """ + points = jnp.linspace(-quad_limit, quad_limit, N) + h = 2 * quad_limit / (N - 1) + sinh = jnp.sinh(points) + x = jnp.tanh(0.5 * jnp.pi * sinh) + w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 + return x, w + + +def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False): + """Roots of cubic polynomial. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. For a polynomial + given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. + It is assumed that c₁ is nonzero. + k : ndarray, shape(constant.size, ) + Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. + a_min, a_max : ndarray + Minimum and maximum value to return roots between. + If specified only real roots are returned. + If None, returns all complex roots. + Both arrays are broadcast against arrays of shape ``coef.shape[1:]``. + sort : bool + Whether to sort the roots. + + Returns + ------- + roots : ndarray, shape(k.size, coef.shape, 3) + If ``k`` has one element, the first axis will be squeezed out. + The roots of the cubic polynomial. + + """ + # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula + # The common libraries use root-finding which isn't compatible with JAX. + clip = not (a_min is None and a_max is None) + if a_min is None: + a_min = -jnp.inf + if a_max is None: + a_max = jnp.inf + + a, b, c, d = coef + d = jnp.squeeze((d[jnp.newaxis].T - k).T) + t_0 = b**2 - 3 * a * c + t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d + C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) + C_is_zero = jnp.isclose(C, 0) + + def compute_roots(xi_k): + t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) + r = -(b + xi_k * C + t_3) / (3 * a) + return r + + def clip_to_nan(r): + r = jnp.where(jnp.isreal(r) & (a_min <= r) & (r <= a_max), jnp.real(r), jnp.nan) + return r + + xi_1 = (-1 + (-3) ** 0.5) / 2 + xi_2 = xi_1**2 + xi_3 = 1 + roots = tuple(map(compute_roots, (xi_1, xi_2, xi_3))) + if clip: + roots = tuple(map(clip_to_nan, roots)) + roots = jnp.stack(roots, axis=-1) + if sort: + roots = jnp.sort(roots, axis=-1) + return roots + + +def _get_bounce_points(pitch, zeta, poly_B, poly_B_z): + """Get the bounce points given |B| and 1 / λ. + + Parameters + ---------- + pitch : ndarray + λ values representing the constant function 1 / λ. + zeta : ndarray + Field line-following ζ coordinates of spline knots. + poly_B : ndarray + Polynomial coefficients of the cubic spline of |B|. + poly_B_z : ndarray + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + + Returns + ------- + intersect, bp1, bp2 : ndarray, ndarray, ndarray + The polynomials' intersection points with 1 / λ is given by ``intersect``. + In order to be JIT compilable, the returned array must have a shape that + accommodates the case where each cubic polynomial intersects 1 / λ thrice. + So ``intersect`` has shape (pitch.size * M * L, N * NUM_ROOTS), + where the last axis is padded with nan at the end to be JIT compilable. + The boolean masks ``bp1`` and ``bp2`` encode whether a given entry in + ``intersect`` is a valid starting and ending bounce point, respectively. + + """ + ML = poly_B.shape[1] + N = poly_B.shape[2] + NUM_ROOTS = 3 + a_min = zeta[:-1] + a_max = zeta[1:] + + intersect = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( + pitch.size, ML, N, NUM_ROOTS + ) + B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( + pitch.size * ML, N * NUM_ROOTS + ) + intersect = intersect.reshape(pitch.size * ML, N * NUM_ROOTS) + is_intersect = ~jnp.isnan(intersect) + # Rearrange so that all the intersects along field line are contiguous. + contiguous = vmap( + lambda args: mask_take(*args, size=N * NUM_ROOTS, fill_value=jnp.nan) + ) + intersect = contiguous((intersect, is_intersect)) + B_z = contiguous((B_z, is_intersect)) + # Check sign of derivative to determine whether root is a valid bounce point. + bp1 = B_z <= 0 + bp2 = B_z >= 0 + + # index of last intersect + idx = ( + jnp.squeeze( + vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size))(~is_intersect) + ) + - 1 + ) + assert idx.shape == (pitch.size * ML,) + # Periodic boundary to compute bounce integrals of particles trapped outside + # this snapshot of the field lines. + # Roll such that first intersect is moved to index of last intersect. + is_bp = bp1 & put_along_axis(jnp.roll(bp2, -1, axis=-1), idx, bp2[:, 0], axis=-1) + # B_z<=0 at intersect_i implies B_z>=0 at intersect_i+1 by continuity. + # I think this step is only needed to determine if the boundaries are bounce points. + bp1 = bp1 & is_bp + bp2 = bp2 & is_bp + return intersect, bp1, bp2 + + +def _get_bounce_points_include_knots(pitch, zeta, poly_B, poly_B_z): + """Get the bounce points given |B| and 1 / λ. + + Like ``_get_bounce_points`` but returns additional ingredients + needed by the algorithm in the direct method in ``bounce_integral``. + + Parameters + ---------- + pitch : ndarray + λ values representing the constant function 1 / λ. + zeta : ndarray + Field line-following ζ coordinates of spline knots. + poly_B : ndarray + Polynomial coefficients of the cubic spline of |B|. + poly_B_z : ndarray + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + + Returns + ------- + intersect_nan_to_right_knot, contiguous, is_intersect, is_bp + The polynomials' intersection points with 1 / λ is given by + ``intersect_nan_to_right_knot``. + In order to be JIT compilable, the returned array must have a shape that + accommodates the case where each cubic polynomial intersects 1 / λ thrice. + Rather than padding the nan values to the end, ``intersect_nan_to_right_knot`` + replaces the nan values with the right knot of the splines. This array + has shape (pitch.size * M * L, N, NUM_ROOTS + 2). + The boolean mask ``is_bp`` encodes whether a given entry in + + .. code-block:: python + contiguous( + (intersect_nan_to_right_knot.reshape(pitch.size * ML, -1), is_intersect) + ) + + is a valid bounce point. + + """ + ML = poly_B.shape[1] + N = poly_B.shape[2] + NUM_ROOTS = 3 + R = NUM_ROOTS + 2 + a_min = zeta[:-1] + a_max = zeta[1:] + + roots = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( + pitch.size, ML, N, 3 + ) + roots = (roots[..., 0], roots[..., 1], roots[..., 2]) + nan_to_right_knot = tuple(map(lambda r: jnp.where(jnp.isnan(r), a_max, r), roots)) + a_min = jnp.broadcast_to(a_min, shape=(pitch.size, ML, N)) + a_max = jnp.broadcast_to(a_max, shape=(pitch.size, ML, N)) + # Include the knots of the splines along with the intersection points. + intersect = jnp.stack((a_min, *roots, a_max), axis=-1) + intersect_nan_to_right_knot = jnp.stack( + (a_min, *nan_to_right_knot, a_max), axis=-1 + ).reshape(pitch.size * ML, N, R) + + B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape(pitch.size * ML, N * R) + is_intersect = jnp.reshape( + jnp.array([False, True, True, True, False], dtype=bool) & ~jnp.isnan(intersect), + newshape=(pitch.size * ML, N * R), + ) + # Rearrange so that all the intersects along field line are contiguous. + contiguous = vmap(lambda args: mask_take(*args, size=N * R, fill_value=jnp.nan)) + B_z = contiguous((B_z, is_intersect)) + # Check sign of derivative to determine whether root is a valid bounce point. + bp1 = B_z <= 0 + bp2 = B_z >= 0 + + # index of last intersect + idx = ( + jnp.squeeze( + vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size))(~is_intersect) + ) + - 1 + ) + assert idx.shape == (pitch.size * ML,) + # Periodic boundary to compute bounce integrals of particles trapped outside + # this snapshot of the field lines. + # Roll such that first intersect is moved to index of last intersect. + is_bp = bp1 & put_along_axis(jnp.roll(bp2, -1, axis=-1), idx, bp2[:, 0], axis=-1) + return intersect_nan_to_right_knot, contiguous, is_intersect, is_bp + + +def _compute_bp_if_given_pitch( + pitch, zeta, poly_B, poly_B_z, get_bounce_points, *original, err=False +): + """Return the ingredients needed by the ``bounce_integrals`` function. + + Parameters + ---------- + pitch : ndarray + λ values representing the constant function 1 / λ. + If None, returns the given ``original`` tuple. + zeta : ndarray + Field line-following ζ coordinates of spline knots. + poly_B : ndarray + Polynomial coefficients of the cubic spline of |B|. + poly_B_z : ndarray + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + get_bounce_points : callable + Method to return bounce points. + original : tuple + pitch, intersect, is_bp, bp1, bp2. + err : bool + Whether to raise an error if ``pitch`` is None and ``original`` is empty. + + """ + if pitch is None: + if err and not original: + raise ValueError("No pitch values were given.") + return original + else: + pitch = jnp.atleast_1d(pitch) + return pitch, *get_bounce_points(pitch, zeta, poly_B, poly_B_z) + + +def bounce_integral( + eq, + rho=None, + alpha=None, + zeta_max=10 * jnp.pi, + pitch=None, + resolution=20, + method="tanh_sinh", +): + """Returns a method to compute the bounce integral of any quantity. + + The bounce integral is defined as F_ℓ(λ) = ∫ f(ℓ) / √(1 − λ |B|) dℓ, where + dℓ parameterizes the distance along the field line, + λ is a constant proportional to the magnetic moment over energy, + |B| is the norm of the magnetic field, + f(ℓ) is the quantity to integrate along the field line, + and the endpoints of the integration are at the bounce points. + For a particle with fixed λ, bounce points are defined to be the location + on the field line such that the particle's velocity parallel to the + magnetic field is zero, i.e. λ |B| = 1. + + The bounce integral is defined up to a sign. + We choose the sign that corresponds the particle's guiding center trajectory + traveling in the direction of increasing field-line-following label. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which the bounce integral is defined. + rho : ndarray + Unique flux surface label coordinates. + alpha : ndarray + Unique field line label coordinates over a constant rho surface. + zeta_max : float + Max value for field line following coordinate. + pitch : ndarray + λ values to evaluate the bounce integral at. + resolution : int + Number of interpolation points (knots) used for splines in the quadrature. + A maximum of three bounce points can be detected in between knots. + The accuracy of the quadrature will increase as some function of + the number of knots over the number of detected bounce points. + So for well-behaved magnetic fields increasing resolution should increase + the accuracy of the quadrature. + method : str + The quadrature scheme used to evaluate the integral. + The "direct" method exactly integrates a cubic spline of the integrand. + The "tanh_sinh" method performs a Tanh-sinh quadrature, where independent cubic + splines are used for components in the integrand so that the singularity near + the bounce points can be captured more accurately than can be represented by a + polynomial. + + Returns + ------- + bi : callable + This callable method computes the bounce integral F_ℓ(λ) for every + specified field line ℓ (constant rho and alpha), for every λ value in + ``pitch``. + + Examples + -------- + .. code-block:: python + + bi = bounce_integral(eq) + F = bi(name, pitch) + + """ + if rho is None: + rho = jnp.linspace(0, 1, 10) + if alpha is None: + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) + rho = jnp.atleast_1d(rho) + alpha = jnp.atleast_1d(alpha) + zeta = jnp.linspace(0, zeta_max, resolution) + L = rho.size + M = alpha.size + N = resolution - 1 # number of piecewise cubic polynomials per field line + NUM_ROOTS = 3 # number of roots for cubic polynomial + + grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) + data = eq.compute( + ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data + ) + B = data["|B|"].reshape(M * L, resolution) + + # TODO: https://github.com/f0uriest/interpax/issues/19 + poly_B = CubicHermiteSpline( + zeta, + B, + data["|B|_z constant rho alpha"].reshape(M * L, resolution), + axis=-1, + extrapolate="periodic", + ).c + + poly_B = jnp.moveaxis(poly_B, 1, -1) + poly_B_z = polyder(poly_B) + assert poly_B.shape == (4, M * L, N) + assert poly_B_z.shape == (3, M * L, N) + + def _direct(name, pitch=None): + """Compute the bounce integral of the named quantity. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce integral of. + pitch : ndarray + λ values to evaluate the bounce integral at. + If None, uses the values given to the parent function. + + Returns + ------- + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + The last axis iterates through every bounce integral performed + along that field line padded by zeros. + + """ + ( + pitch, + intersect_nan_to_right_knot, + contiguous, + is_intersect, + is_bp, + ) = _compute_bp_if_given_pitch( + pitch, + zeta, + poly_B, + poly_B_z, + _get_bounce_points_include_knots, + *original, + err=True, + ) + + integrand = jnp.nan_to_num( + eq.compute(name, grid=grid, override_grid=False, data=data)[name] + / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) + ).reshape(pitch.size * M * L, resolution) + + # TODO: https://github.com/f0uriest/interpax/issues/19 + integrand = Akima1DInterpolator(zeta, integrand, axis=-1).c + + integrand = jnp.moveaxis(integrand, 1, -1) + assert integrand.shape == (4, pitch.size * M * L, N) + # For this algorithm, computing integrals via differences of primitives + # is preferable to any numerical quadrature. For example, even if the + # intersection points were evenly spaced, a composite Simpson's quadrature + # would require computing the spline on 1.8x more knots for the same accuracy. + R = NUM_ROOTS + 2 + primitive = polyval(intersect_nan_to_right_knot, polyint(integrand)).reshape( + pitch.size * M * L, N * R + ) + sums = jnp.cumsum( + # Periodic boundary to compute bounce integrals of particles + # trapped outside this snapshot of the field lines. + jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) + # Multiply by mask that is false at shared knots of piecewise spline + # to avoid adding difference between primitives of splines at knots. + * jnp.append(jnp.arange(1, N * R) % R != 0, True), + axis=-1, + ) + F = jnp.nan_to_num( + fun((contiguous((sums, is_intersect)), is_bp)), posinf=0, neginf=0 + ) + return F.reshape(pitch.size, M, L, N * R // 2) + + def _quad_sin(name, pitch=None): + """Compute the bounce integral of the named quantity. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce integral of. + pitch : ndarray + λ values to evaluate the bounce integral at. + If None, uses the values given to the parent function. + + Returns + ------- + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + The last axis iterates through every bounce integral performed + along that field line padded by zeros. + + """ + pitch, intersect, bp1, bp2 = _compute_bp_if_given_pitch( + pitch, zeta, poly_B, poly_B_z, _get_bounce_points, *original, err=True + ) + bp1 = fun((intersect, bp1)) + bp2 = fun((intersect, bp2)) + X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] + assert X.shape == (pitch.size * M * L, N * 2, x.size) + + def body(i, integral): + k = i % (N * 2) + j = i // (N * 2) + p = i // (M * L * N * 2) + v = j % pitch.size + # TODO: Add Hermite spline to interpax to pass in B_z[i]. + integrand = interp1d(X[j, k], zeta, f[v]) / ( + interp1d(X[j, k], zeta, B_sup_z[v]) + * jnp.sqrt(1 - pitch[p] * interp1d(X[j, k], zeta, B[v])) + ) + integral = put(integral, i, jnp.sum(w * integrand)) + return integral + + f = eq.compute(name, grid=grid, override_grid=False, data=data)[name].reshape( + M * L, resolution + ) + B_sup_z = data["B^zeta"].reshape(M * L, resolution) + F = jnp.nan_to_num( + # TODO: Vectorize interpax to do this with 1 call with einsum. + fori_loop(0, pitch.size * M * L * N * 2, body, jnp.zeros(X.shape[:-1])) + * jnp.pi + / (bp2 - bp1), + posinf=0, + neginf=0, + ) + return F.reshape(pitch.size, M, L, N * 2) + + if method == "direct": + bi = _direct + fun = vmap(lambda args: mask_diff(*args)[::2]) + get_bounce_points = _get_bounce_points_include_knots + else: + bi = _quad_sin + fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=jnp.nan)) + get_bounce_points = _get_bounce_points + x, w = tanh_sinh_quadrature(resolution) + x = jnp.arcsin(x) / jnp.pi - 0.5 + original = _compute_bp_if_given_pitch( + pitch, zeta, poly_B, poly_B_z, get_bounce_points, err=False + ) + return bi + + +def bounce_average( + eq, + rho=None, + alpha=None, + zeta_max=10 * jnp.pi, + pitch=None, + resolution=20, + method="quad", +): + """Returns a method to compute the bounce average of any quantity. + + The bounce average is defined as + G_ℓ(λ) = (∫ g(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ), where + dℓ parameterizes the distance along the field line, + λ is a constant proportional to the magnetic moment over energy, + |B| is the norm of the magnetic field, + g(ℓ) is the quantity to integrate along the field line, + and the endpoints of the integration are at the bounce points. + For a particle with fixed λ, bounce points are defined to be the location + on the field line such that the particle's velocity parallel to the + magnetic field is zero, i.e. λ |B| = 1. + + The bounce integral is defined up to a sign. + We choose the sign that corresponds the particle's guiding center trajectory + traveling in the direction of increasing field-line-following label. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which the bounce average is defined. + rho : ndarray + Unique flux surface label coordinates. + alpha : ndarray + Unique field line label coordinates over a constant rho surface. + zeta_max : float + Max value for field line following coordinate. + pitch : ndarray + λ values to evaluate the bounce average at. + Defaults to linearly spaced values between min and max of |B|. + resolution : int + Number of interpolation points (knots) used for splines in the quadrature. + A maximum of three bounce points can be detected in between knots. + The accuracy of the quadrature will increase as some function of + the number of knots over the number of detected bounce points. + So for well-behaved magnetic fields increasing resolution should increase + the accuracy of the quadrature. + method : str + The method to evaluate the integral. + The "spline" method exactly integrates a cubic spline of the integrand. + The "quad" method performs a Gauss quadrature and estimates the integrand + by using distinct cubic splines for components in the integrand so that + the singularity from the division by zero near the bounce points can be + captured more accurately than can be represented by a polynomial. + + Returns + ------- + ba : callable + This callable method computes the bounce average G_ℓ(λ) for every + specified field line ℓ (constant rho and alpha), for every λ value in + ``lambdas``. + + Examples + -------- + .. code-block:: python + + ba = bounce_average(eq) + G = ba(name, pitch) + + """ + bi = bounce_integral(eq, rho, alpha, zeta_max, pitch, resolution, method) + + def _bounce_average(name, pitch=None): + """Compute the bounce average of the named quantity using the spline method. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce average of. + pitch : ndarray + λ values to evaluate the bounce average at. + If None, uses the values given to the parent function. + + Returns + ------- + G : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + The last axis iterates through every bounce average performed + along that field line padded by zeros. + + """ + return safediv(bi(name, pitch), bi("1", pitch)) + + return _bounce_average + + +def field_line_to_desc_coords(eq, rho, alpha, zeta): + """Get DESC grid from unique field line coordinates.""" + r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") + r, a, z = r.ravel(), a.ravel(), z.ravel() + # Map these Clebsch-Type field-line coordinates to DESC coordinates. + # Note that the rotational transform can be computed apriori because it is a single + # variable function of rho, and the coordinate mapping does not change rho. Once + # this is known, it is simple to compute theta_PEST from alpha. Then we transform + # from straight field-line coordinates to DESC coordinates with the method + # compute_theta_coords. This is preferred over transforming from Clebsch-Type + # coordinates to DESC coordinates directly with the more general method + # map_coordinates. That method requires an initial guess to be compatible with JIT, + # and generating a reasonable initial guess requires computing the rotational + # transform to approximate theta_PEST and the poloidal stream function anyway. + # TODO: map coords recently updated, so maybe just switch to that + lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + lg_data = eq.compute("iota", grid=lg) + data = { + d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) + for d in lg_data + if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] + == "r" + } + sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) + desc_coords = eq.compute_theta_coords(sfl_coords) + grid = Grid(desc_coords, jitable=True) + return grid, data diff --git a/desc/compute/utils.py b/desc/compute/utils.py index f0b6e642af..3d3c298f0e 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -5,12 +5,9 @@ import warnings import numpy as np -from interpax import interp1d -from scipy.interpolate import Akima1DInterpolator, CubicHermiteSpline from termcolor import colored from desc.backend import ( - complex_sqrt, cond, flatnonzero, fori_loop, @@ -19,9 +16,8 @@ put_along_axis, take, use_jax, - vmap, ) -from desc.grid import ConcentricGrid, Grid, LinearGrid, _meshgrid_expand +from desc.grid import ConcentricGrid, LinearGrid from .data_index import data_index @@ -1380,496 +1376,6 @@ def body(i, mins): return grid.expand(mins, surface_label) -def polyint(c): - """Coefficients for the primitives of the given set of polynomials. - - Parameters - ---------- - c : ndarray - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - - Returns - ------- - poly : ndarray - Coefficients of polynomial primitive, ignoring the arbitrary constant. - That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, - where n is ``c.shape[0] - 1``. - - """ - poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T - return poly - - -def polyder(c): - """Coefficients for the derivatives of the given set of polynomials. - - Parameters - ---------- - c : ndarray - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - - Returns - ------- - poly : ndarray - Coefficients of polynomial derivative, ignoring the arbitrary constant. - That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, - where n is ``c.shape[0] - 1``. - - """ - poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T - return poly - - -def polyval(x, c): - """Evaluate the set of polynomials c at the points x. - - Parameters - ---------- - x : ndarray - Coordinates at which to evaluate the set of polynomials. - The first ``c.ndim`` axes should have shape ``c.shape[1:]``. - c : ndarray - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - - Returns - ------- - val : ndarray - ``val[j, k, ...]`` is the polynomial with coefficients ``c[:, j, k, ...]`` - evaluated at the point ``x[j, k, ...]``. - - Notes - ----- - This function does not perform the same operation as - ``np.polynomial.polynomial.polyval(x, c)``. - An example usage of this function is shown in - tests/test_compute_utils.py::TestComputeUtils::test_polyval. - - """ - X = (x[jnp.newaxis].T ** jnp.arange(c.shape[0] - 1, -1, -1)).T - alphabet = "abcdefghijklmnopqrstuvwxyz" - sub = alphabet[: c.ndim] - val = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", c, X) - return val - - -def tanh_sinh_quadrature(N, quad_limit=3.16): - """ - tanh_sinh quadrature. - - This function outputs the quadrature points and weights - for a tanh-sinh quadrature. - - ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) - - Parameters - ---------- - N: int - Number of quadrature points, preferably odd - quad_limit: float - The range of quadrature points to be mapped. - Larger quad_limit implies better result but limited due to overflow in sinh - - Returns - ------- - x : numpy array - Quadrature points - w : numpy array - Quadrature weights - - """ - points = jnp.linspace(-quad_limit, quad_limit, N) - h = 2 * quad_limit / (N - 1) - sinh = jnp.sinh(points) - x = jnp.tanh(0.5 * jnp.pi * sinh) - w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 - return x, w - - -def cubic_poly_roots(coef, constant=jnp.array([0]), a_min=None, a_max=None, sort=False): - """Roots of cubic polynomial. - - Parameters - ---------- - coef : ndarray - First axis should store coefficients of a polynomial. For a polynomial - given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. - It is assumed that c₁ is nonzero. - constant : ndarray, shape(constant.size, ) - Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``constant``. - a_min, a_max : ndarray - Minimum and maximum value to clip roots between. - If None, clipping is not performed on the corresponding edge. - Otherwise, complex roots are clipped to ``a_min`` which defaults to -infinity. - Both arrays are broadcast against arrays of shape ``coef.shape[1:]``. - sort : bool - Whether to sort the roots. - - Returns - ------- - roots : ndarray, shape(constant.size, coef.shape, 3) - If constant has one element, the first axis will be squeezed out. - The roots of the cubic polynomial. - - """ - # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula - # The common libraries use root-finding which isn't compatible with JAX. - clip = not (a_min is None and a_max is None) - if a_min is None: - a_min = -jnp.inf - if a_max is None: - a_max = jnp.inf - - a, b, c, d = coef - d = jnp.squeeze((d[jnp.newaxis].T - constant).T) - t_0 = b**2 - 3 * a * c - t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d - C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) - C_is_zero = jnp.isclose(C, 0) - - def compute_roots(xi_k): - t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) - r = -(b + xi_k * C + t_3) / (3 * a) - return r - - def clip_roots(r): - r = jnp.where(jnp.isreal(r), jnp.clip(jnp.real(r), a_min, a_max), a_min) - return r - - xi_1 = (-1 + (-3) ** 0.5) / 2 - xi_2 = xi_1**2 - xi_3 = 1 - roots = tuple(map(compute_roots, (xi_1, xi_2, xi_3))) - if clip: - roots = tuple(map(clip_roots, roots)) - roots = jnp.stack(roots, axis=-1) - if sort: - roots = jnp.sort(roots, axis=-1) - return roots - - -def _get_bounce_points(pitch, zeta, poly_B, poly_B_z): - """Get the bounce points given |B| and 1 / λ. - - Parameters - ---------- - pitch : ndarray - λ values representing the constant function 1 / λ. - zeta : ndarray - Field line-following ζ coordinates of spline knots. - poly_B : ndarray - Polynomial coefficients of the cubic spline of |B|. - poly_B_z : ndarray - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - - Returns - ------- - intersect, is_bp, bp1, bp2 : ndarray, ndarray, ndarray, ndarray - The polynomials' intersection points with 1 / λ is given by ``intersect``. - In order to be JIT compilable, the returned array must have a shape that - accommodates the case where each cubic polynomial intersects 1 / λ thrice. - The returned array ``intersect`` has shape (pitch.size, M * L, N, NUM_ROOTS). - - The boolean mask ``is_bp`` encodes whether a given entry in ``intersect`` - is a valid bounce point. The boolean masks ``bp1`` and ``bp2`` encode whether - a given entry in ``intersect`` is a valid starting and ending bounce point, - respectively. These arrays have shape (pitch.size * M * L, N * NUM_ROOTS). - - """ - ML = poly_B.shape[1] - N = poly_B.shape[2] - NUM_ROOTS = 3 - a_min = zeta[:-1] - a_max = zeta[1:] - intersect = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( - pitch.size, ML, N, NUM_ROOTS - ) - # Typically, this is the shape that is needed later, (as is the case in - # bounce_integrals because jax.vmap works over one axis at a time), so we - # reshape early here to prevent unnecessary data movement. - B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( - pitch.size * ML, N * NUM_ROOTS - ) - # Check sign of derivative to determine whether root is a valid bounce point. - bp1 = B_z <= 0 - bp2 = B_z >= 0 - # Periodic boundary to compute bounce integrals of particles trapped outside - # this snapshot of the field lines. - is_bp = bp1 & jnp.roll(bp2, -1, axis=-1) - bp1 = bp1 & is_bp - bp2 = bp2 & is_bp - return intersect, is_bp, bp1, bp2 - - -def _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, *original, err=False): - """Return the quantities needed by the ``bounce_integrals`` function. - - Parameters - ---------- - pitch : ndarray - λ values representing the constant function 1 / λ. - If None, returns the given ``original`` tuple. - zeta : ndarray - Field line-following ζ coordinates of spline knots. - poly_B : ndarray - Polynomial coefficients of the cubic spline of |B|. - poly_B_z : ndarray - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - original : tuple - pitch, intersect, is_bp, bp1, bp2. - err : bool - Whether to raise an error if ``pitch`` is None and ``original`` is empty. - - Returns - ------- - output : tuple - (pitch, intersect, is_bp, bp1, bp2). - - """ - if pitch is None: - if err and not original: - raise ValueError("No pitch values were given.") - return original - else: - pitch = jnp.atleast_1d(pitch) - intersect, is_bp, bp1, bp2 = _get_bounce_points(pitch, zeta, poly_B, poly_B_z) - return pitch, intersect, is_bp, bp1, bp2 - - -def bounce_integral( - eq, - rho=None, - alpha=None, - zeta_max=10 * jnp.pi, - pitch=None, - resolution=20, - method="tanh_sinh", -): - """Returns a method to compute the bounce integral of any quantity. - - The bounce integral is defined as F_ℓ(λ) = ∫ f(ℓ) / √(1 − λ |B|) dℓ, where - dℓ parameterizes the distance along the field line, - λ is a constant proportional to the magnetic moment over energy, - |B| is the norm of the magnetic field, - f(ℓ) is the quantity to integrate along the field line, - and the endpoints of the integration are at the bounce points. - For a particle with fixed λ, bounce points are defined to be the location - on the field line such that the particle's velocity parallel to the - magnetic field is zero, i.e. λ |B| = 1. - - The bounce integral is defined up to a sign. - We choose the sign that corresponds the particle's guiding center trajectory - traveling in the direction of increasing field-line-following label. - - Parameters - ---------- - eq : Equilibrium - Equilibrium on which the bounce integral is defined. - rho : ndarray - Unique flux surface label coordinates. - alpha : ndarray - Unique field line label coordinates over a constant rho surface. - zeta_max : float - Max value for field line following coordinate. - pitch : ndarray - λ values to evaluate the bounce integral at. - resolution : int - Number of interpolation points (knots) used for splines in the quadrature. - A maximum of three bounce points can be detected in between knots. - The accuracy of the quadrature will increase as some function of - the number of knots over the number of detected bounce points. - So for well-behaved magnetic fields increasing resolution should increase - the accuracy of the quadrature. - method : str - The quadrature scheme used to evaluate the integral. - The "direct" method exactly integrates a cubic spline of the integrand. - The "tanh_sinh" method performs a Tanh-sinh quadrature, where independent cubic - splines are used for components in the integrand so that the singularity near - the bounce points can be captured more accurately than can be represented by a - polynomial. - - Returns - ------- - bi : callable - This callable method computes the bounce integral F_ℓ(λ) for every - specified field line ℓ (constant rho and alpha), for every λ value in - ``pitch``. - - Examples - -------- - .. code-block:: python - - bi = bounce_integral(eq) - F = bi(name, pitch) - - """ - - def _direct(name, pitch=None): - """Compute the bounce integral of the named quantity. - - Parameters - ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce integral of. - pitch : ndarray - λ values to evaluate the bounce integral at. - If None, uses the values given to the parent function. - - Returns - ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) - The last axis iterates through every bounce integral performed - along that field line padded by zeros. - - """ - pitch, intersect, is_bp, _, _ = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, *original, err=True - ) - intersect = intersect.reshape(pitch.size * M * L, N, NUM_ROOTS) - # Include knots of spline along with intersection points. - intersect = jnp.stack( - jnp.broadcast_arrays( - zeta[:-1], - intersect[..., 0], - intersect[..., 1], - intersect[..., 2], - zeta[1:], - ), - axis=-1, - ) - R = NUM_ROOTS + 2 - is_bp = stretch_batches(is_bp, NUM_ROOTS, R, fill=False) - - integrand = jnp.nan_to_num( - eq.compute(name, grid=grid, override_grid=False, data=data)[name] - / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) - ).reshape(pitch.size * M * L, resolution) - - # TODO: https://github.com/f0uriest/interpax/issues/19 - integrand = Akima1DInterpolator(zeta, integrand, axis=-1).c - - integrand = jnp.moveaxis(integrand, 1, -1) - assert integrand.shape == (4, pitch.size * M * L, N) - # For this algorithm, computing integrals via differences of primitives - # is preferable to any numerical quadrature. For example, even if the - # intersection points were evenly spaced, a composite Simpson's quadrature - # would require computing the spline on 1.8x more knots for the same accuracy. - primitive = polyval(intersect, polyint(integrand)).reshape( - pitch.size * M * L, N * R - ) - sums = jnp.cumsum( - # Periodic boundary to compute bounce integrals of particles - # trapped outside this snapshot of the field lines. - jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) - # Multiply by mask that is false at knots of piecewise spline - # to avoid adding difference between primitives of splines at knots. - * jnp.append(jnp.arange(1, N * R) % R != 0, True), - axis=-1, - ) - return fun((sums, is_bp)).reshape(pitch.size, M, L, N * R // 2) - - def _quad_sin(name, pitch=None): - """Compute the bounce integral of the named quantity. - - Parameters - ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce integral of. - pitch : ndarray - λ values to evaluate the bounce integral at. - If None, uses the values given to the parent function. - - Returns - ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) - The last axis iterates through every bounce integral performed - along that field line padded by zeros. - - """ - pitch, intersect, _, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, *original, err=True - ) - intersect = intersect.reshape(pitch.size * M * L, N * NUM_ROOTS) - bp1 = fun((intersect, bp1)) - bp2 = fun((intersect, bp2)) - X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - assert X.shape == (pitch.size * M * L, N * 2, x.size) - - def body(i, integral): - k = i % (N * 2) - j = i // (N * 2) - p = i // (M * L * N * 2) - v = j % pitch.size - # TODO: Add Hermite spline to interpax to pass in B_z[i]. - integrand = interp1d(X[j, k], zeta, f[v]) / ( - interp1d(X[j, k], zeta, B_sup_z[v]) - * jnp.sqrt(1 - pitch[p] * interp1d(X[j, k], zeta, B[v])) - ) - integral = put(integral, i, jnp.sum(w * integrand)) - return integral - - f = eq.compute(name, grid=grid, override_grid=False, data=data)[name].reshape( - M * L, resolution - ) - B_sup_z = data["B^zeta"].reshape(M * L, resolution) - return jnp.nan_to_num( - # TODO: Vectorize interpax to do this with 1 call with einsum. - fori_loop(0, pitch.size * M * L * N * 2, body, jnp.empty(X.shape[:-1])) - * jnp.pi - / (bp2 - bp1), - posinf=0, - neginf=0, - ).reshape(pitch.size, M, L, N * 2) - - if rho is None: - rho = jnp.linspace(0, 1, 10) - if alpha is None: - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) - rho = jnp.atleast_1d(rho) - alpha = jnp.atleast_1d(alpha) - zeta = np.linspace(0, zeta_max, resolution) - L = rho.size - M = alpha.size - N = resolution - 1 # number of piecewise cubic polynomials per field line - NUM_ROOTS = 3 # number of roots for cubic polynomial - - grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) - data = eq.compute( - ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data - ) - B = data["|B|"].reshape(M * L, resolution) - - # TODO: https://github.com/f0uriest/interpax/issues/19 - poly_B = CubicHermiteSpline( - zeta, - B, - data["|B|_z constant rho alpha"].reshape(M * L, resolution), - axis=-1, - extrapolate="periodic", - ).c - - poly_B = jnp.moveaxis(poly_B, 1, -1) - poly_B_z = polyder(poly_B) - assert poly_B.shape == (4, M * L, N) - assert poly_B_z.shape == (3, M * L, N) - - original = _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, err=False) - if method == "direct": - bi = _direct - fun = vmap(lambda args: mask_diff(*args)[::2]) - else: - bi = _quad_sin - fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=0)) - x, w = tanh_sinh_quadrature(resolution) - x = jnp.arcsin(x) / jnp.pi - 0.5 - return bi - - def mask_take(a, mask, size, fill_value=jnp.nan): """JIT compilable method to return ``a[mask]`` padded by ``fill_value``. @@ -1941,8 +1447,7 @@ def mask_diff(a, mask, n=1, axis=-1, prepend=None): Notes ----- - The result is padded with zeros at the end to be jit compilable. - The current implementation removes all nan values in the output as a side effect. + The result is padded with nan at the end to be jit compilable. """ if prepend is None and not use_jax: @@ -1950,8 +1455,8 @@ def mask_diff(a, mask, n=1, axis=-1, prepend=None): # d35cd07ea997f033b2d89d349734c61f5de54b0d/ # numpy/lib/function_base.py#L1324-L1454 prepend = np._NoValue - diff = jnp.nan_to_num( - jnp.diff(mask_take(a, mask, mask.size, fill_value=jnp.nan), n, axis, prepend) + diff = jnp.diff( + mask_take(a, mask, size=mask.size, fill_value=jnp.nan), n, axis, prepend ) return diff @@ -1998,129 +1503,6 @@ def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): return out_arr -def bounce_average( - eq, - rho=None, - alpha=None, - zeta_max=10 * jnp.pi, - pitch=None, - resolution=20, - method="quad", -): - """Returns a method to compute the bounce average of any quantity. - - The bounce average is defined as - G_ℓ(λ) = (∫ g(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ), where - dℓ parameterizes the distance along the field line, - λ is a constant proportional to the magnetic moment over energy, - |B| is the norm of the magnetic field, - g(ℓ) is the quantity to integrate along the field line, - and the endpoints of the integration are at the bounce points. - For a particle with fixed λ, bounce points are defined to be the location - on the field line such that the particle's velocity parallel to the - magnetic field is zero, i.e. λ |B| = 1. - - The bounce integral is defined up to a sign. - We choose the sign that corresponds the particle's guiding center trajectory - traveling in the direction of increasing field-line-following label. - - Parameters - ---------- - eq : Equilibrium - Equilibrium on which the bounce average is defined. - rho : ndarray - Unique flux surface label coordinates. - alpha : ndarray - Unique field line label coordinates over a constant rho surface. - zeta_max : float - Max value for field line following coordinate. - pitch : ndarray - λ values to evaluate the bounce average at. - Defaults to linearly spaced values between min and max of |B|. - resolution : int - Number of interpolation points (knots) used for splines in the quadrature. - A maximum of three bounce points can be detected in between knots. - The accuracy of the quadrature will increase as some function of - the number of knots over the number of detected bounce points. - So for well-behaved magnetic fields increasing resolution should increase - the accuracy of the quadrature. - method : str - The method to evaluate the integral. - The "spline" method exactly integrates a cubic spline of the integrand. - The "quad" method performs a Gauss quadrature and estimates the integrand - by using distinct cubic splines for components in the integrand so that - the singularity from the division by zero near the bounce points can be - captured more accurately than can be represented by a polynomial. - - Returns - ------- - ba : callable - This callable method computes the bounce average G_ℓ(λ) for every - specified field line ℓ (constant rho and alpha), for every λ value in - ``lambdas``. - - Examples - -------- - .. code-block:: python - - ba = bounce_average(eq) - G = ba(name, pitch) - - """ - bi = bounce_integral(eq, rho, alpha, zeta_max, pitch, resolution, method) - - def _bounce_average(name, pitch=None): - """Compute the bounce average of the named quantity using the spline method. - - Parameters - ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce average of. - pitch : ndarray - λ values to evaluate the bounce average at. - If None, uses the values given to the parent function. - - Returns - ------- - G : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) - The last axis iterates through every bounce average performed - along that field line padded by zeros. - - """ - return safediv(bi(name, pitch), bi("1", pitch)) - - return _bounce_average - - -def field_line_to_desc_coords(eq, rho, alpha, zeta): - """Get DESC grid from unique field line coordinates.""" - r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") - r, a, z = r.ravel(), a.ravel(), z.ravel() - # Map these Clebsch-Type field-line coordinates to DESC coordinates. - # Note that the rotational transform can be computed apriori because it is a single - # variable function of rho, and the coordinate mapping does not change rho. Once - # this is known, it is simple to compute theta_PEST from alpha. Then we transform - # from straight field-line coordinates to DESC coordinates with the method - # compute_theta_coords. This is preferred over transforming from Clebsch-Type - # coordinates to DESC coordinates directly with the more general method - # map_coordinates. That method requires an initial guess to be compatible with JIT, - # and generating a reasonable initial guess requires computing the rotational - # transform to approximate theta_PEST and the poloidal stream function anyway. - # TODO: map coords recently updated, so maybe just switch to that - lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - lg_data = eq.compute("iota", grid=lg) - data = { - d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) - for d in lg_data - if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] - == "r" - } - sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) - desc_coords = eq.compute_theta_coords(sfl_coords) - grid = Grid(desc_coords, jitable=True) - return grid, data - - # defines the order in which objective arguments get concatenated into the state vector arg_order = ( "R_lmn", diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py new file mode 100644 index 0000000000..d04863409b --- /dev/null +++ b/tests/test_bounce_integral.py @@ -0,0 +1,153 @@ +"""Tests bounce integral.""" + +import numpy as np +import pytest + +from desc.backend import fori_loop, put, root_scalar +from desc.compute.bounce_integral import ( + cubic_poly_roots, + field_line_to_desc_coords, + polyder, + polyint, + polyval, +) + + +@pytest.mark.unit +def test_cubic_poly_roots(): + """Test vectorized computation of cubic polynomial exact roots.""" + cubic = 4 + poly = np.arange(-60, 60).reshape(cubic, 6, -1) + poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) + poly = poly * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim + constant = np.arange(10) + assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim + roots = cubic_poly_roots(poly, constant, sort=True) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + for s in range(constant.size): + a, b, c, d = poly[:, j, k] + d = d - constant[s] + np.testing.assert_allclose( + roots[s, j, k], + np.sort_complex(np.roots([a, b, c, d])), + ) + + +@pytest.mark.unit +def test_polyint(): + """Test vectorized computation of polynomial primitive.""" + quintic = 6 + poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim + constant = np.pi + out = polyint(poly, k=constant) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + np.testing.assert_allclose( + out[:, j, k], np.polyint(poly[:, j, k], k=constant) + ) + + +@pytest.mark.unit +def test_polyder(): + """Test vectorized computation of polynomial derivative.""" + quintic = 6 + poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim + out = polyder(poly) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + np.testing.assert_allclose(out[:, j, k], np.polyder(poly[:, j, k])) + + +@pytest.mark.unit +def test_polyval(): + """Test vectorized computation of polynomial evaluation.""" + quintic = 6 + poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + assert np.unique(poly.shape).size == poly.ndim + x = np.linspace(0, 20, poly.shape[1] * poly.shape[2]).reshape( + poly.shape[1], poly.shape[2] + ) + x = np.stack([x, x * 2], axis=-1) + x = np.stack([x, x * 2, x * 3, x * 4], axis=-1) + assert np.unique(x.shape).size == x.ndim + assert poly.shape[1:] == x.shape[: poly.ndim - 1] + assert np.unique((poly.shape[0],) + x.shape[poly.ndim - 1 :]).size == x.ndim - 1 + val = polyval(x, poly) + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + np.testing.assert_allclose(val[j, k], np.poly1d(poly[:, j, k])(x[j, k])) + + +# TODO: finish up details if deemed useful +def bounce_point( + self, eq, lambdas, rho, alpha, max_bounce_points=20, max_field_line=10 * np.pi +): + """Find bounce points.""" + # TODO: + # 1. make another version of desc.backend.root_scalar + # to avoid separate root finding routines in residual and jac + # and use previous desc coords as initial guess for next iteration + # 2. write docstrings and use transforms in api instead of eq + def residual(zeta, i): + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|"], grid=grid, data=data) + return data["|B|"] - lambdas[i] + + def jac(zeta): + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) + return data["|B|_z constant rho alpha"] + + # Compute |B| - lambda on a dense grid. + # For every field line, find the roots of this linear spline. + # These estimates for the true roots will serve as an initial guess, and + # let us form a boundary mesh around root estimates to limit search domain + # of the root finding algorithms. + zeta = np.linspace(0, max_field_line, 3 * max_bounce_points) + grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + data = eq.compute(["|B|"], grid=grid, data=data) + B_norm = data["|B|"].reshape(alpha.size, rho.size, -1) # constant field line chunks + + boundary_lt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + boundary_rt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + guess = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + # todo: scan over this + for i in range(lambdas.size): + for j in range(alpha.size): + for k in range(rho.size): + # indices of zeta values observed prior to sign change + idx = np.nonzero(np.diff(np.sign(B_norm[j, k] - lambdas[i])))[0] + guess[i, :, j, k] = grid.nodes[idx, 2] + boundary_lt[i, :, j, k] = np.append(zeta[0], guess[:-1]) + boundary_rt[i, :, j, k] = np.append(guess[1:], zeta[-1]) + guess = guess.reshape(lambdas.size, max_bounce_points, alpha.size * rho.size) + boundary_lt = boundary_lt.reshape( + lambdas.size, max_bounce_points, alpha.size * rho.size + ) + boundary_rt = boundary_rt.reshape( + lambdas.size, max_bounce_points, alpha.size * rho.size + ) + + def body_lambdas(i, out): + def body_roots(j, out_i): + def fixup(z): + return np.clip(z, boundary_lt[i, j], boundary_rt[i, j]) + + # todo: call vmap to vectorize on guess[i, j] so that we solve + # guess[i, j].size independent root finding problems + root = root_scalar(residual, guess[i, j], jac=jac, args=i, fixup=fixup) + out_i = put(out_i, j, root) + return out_i + + out = put(out, i, fori_loop(0, max_bounce_points, body_roots, out[i])) + return out + + bounce_points = np.zeros( + shape=(lambdas.size, alpha.size, rho.size, max_bounce_points) + ) + bounce_points = fori_loop(0, lambdas.size, body_lambdas, bounce_points) + return bounce_points diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index ad145e5f45..02a6947b83 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -4,17 +4,12 @@ import numpy as np import pytest -from desc.backend import fori_loop, jnp, put, root_scalar +from desc.backend import jnp from desc.basis import FourierZernikeBasis from desc.compute.geom_utils import rotation_matrix from desc.compute.utils import ( _get_grid_surface, - cubic_poly_roots, - field_line_to_desc_coords, line_integrals, - polyder, - polyint, - polyval, surface_averages, surface_integrals, surface_integrals_transform, @@ -577,147 +572,14 @@ def test_surface_min_max(self): np.testing.assert_allclose(Bmax_alt, grid.compress(surface_max(grid, B))) np.testing.assert_allclose(Bmin_alt, grid.compress(surface_min(grid, B))) - @pytest.mark.unit - def test_rotation_matrix(self): - """Test that rotation_matrix works with fwd & rev AD for axis=[0, 0, 0].""" - dfdx_fwd = jax.jacfwd(rotation_matrix) - dfdx_rev = jax.jacrev(rotation_matrix) - x0 = jnp.array([0.0, 0.0, 0.0]) - np.testing.assert_allclose(rotation_matrix(x0), np.eye(3)) - np.testing.assert_allclose(dfdx_fwd(x0), np.zeros((3, 3, 3))) - np.testing.assert_allclose(dfdx_rev(x0), np.zeros((3, 3, 3))) +@pytest.mark.unit +def test_rotation_matrix(): + """Test that rotation_matrix works with fwd & rev AD for axis=[0, 0, 0].""" + dfdx_fwd = jax.jacfwd(rotation_matrix) + dfdx_rev = jax.jacrev(rotation_matrix) + x0 = jnp.array([0.0, 0.0, 0.0]) - @pytest.mark.unit - def test_cubic_poly_roots(self): - """Test vectorized computation of cubic polynomial exact roots.""" - cubic = 4 - poly = np.arange(-60, 60).reshape(cubic, 6, -1) - poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) - poly = poly * np.e * np.pi - assert np.unique(poly.shape).size == poly.ndim - constant = np.arange(10) - assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim - roots = cubic_poly_roots(poly, constant, sort=True) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - for s in range(constant.size): - a, b, c, d = poly[:, j, k] - d = d - constant[s] - np.testing.assert_allclose( - roots[s, j, k], - np.sort_complex(np.roots([a, b, c, d])), - ) - - @pytest.mark.unit - def test_polyint(self): - """Test vectorized computation of polynomial primitive.""" - quintic = 6 - poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi - assert np.unique(poly.shape).size == poly.ndim - out = polyint(poly) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - np.testing.assert_allclose(out[:, j, k], np.polyint(poly[:, j, k])[:-1]) - - @pytest.mark.unit - def test_polyder(self): - """Test vectorized computation of polynomial derivative.""" - quintic = 6 - poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi - assert np.unique(poly.shape).size == poly.ndim - out = polyder(poly) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - np.testing.assert_allclose(out[:, j, k], np.polyder(poly[:, j, k])) - - @pytest.mark.unit - def test_polyval(self): - """Test vectorized computation of polynomial evaluation.""" - quintic = 6 - poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi - assert np.unique(poly.shape).size == poly.ndim - x = np.linspace(0, 20, poly.shape[1] * poly.shape[2]).reshape( - poly.shape[1], poly.shape[2] - ) - x = np.stack([x, x * 2], axis=-1) - x = np.stack([x, x * 2, x * 3, x * 4], axis=-1) - assert np.unique(x.shape).size == x.ndim - assert poly.shape[1:] == x.shape[: poly.ndim - 1] - assert np.unique((poly.shape[0],) + x.shape[poly.ndim - 1 :]).size == x.ndim - 1 - val = polyval(x, poly) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - np.testing.assert_allclose(val[j, k], np.poly1d(poly[:, j, k])(x[j, k])) - - # TODO: FIXME - def bounce_point( - self, eq, lambdas, rho, alpha, max_bounce_points=20, max_field_line=10 * np.pi - ): - """Find bounce points.""" - # TODO: - # 1. make another version of desc.backend.root_scalar - # to avoid separate root finding routines in residual and jac - # and use previous desc coords as initial guess for next iteration - # 2. write docstrings and use transforms in api instead of eq - def residual(zeta, i): - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|"], grid=grid, data=data) - return data["|B|"] - lambdas[i] - - def jac(zeta): - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) - return data["|B|_z constant rho alpha"] - - # Compute |B| - lambda on a dense grid. - # For every field line, find the roots of this linear spline. - # These estimates for the true roots will serve as an initial guess, and - # let us form a boundary mesh around root estimates to limit search domain - # of the root finding algorithms. - zeta = np.linspace(0, max_field_line, 3 * max_bounce_points) - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|"], grid=grid, data=data) - B_norm = data["|B|"].reshape( - alpha.size, rho.size, -1 - ) # constant field line chunks - - boundary_lt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - boundary_rt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - guess = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - # todo: scan over this - for i in range(lambdas.size): - for j in range(alpha.size): - for k in range(rho.size): - # indices of zeta values observed prior to sign change - idx = np.nonzero(np.diff(np.sign(B_norm[j, k] - lambdas[i])))[0] - guess[i, :, j, k] = grid.nodes[idx, 2] - boundary_lt[i, :, j, k] = np.append(zeta[0], guess[:-1]) - boundary_rt[i, :, j, k] = np.append(guess[1:], zeta[-1]) - guess = guess.reshape(lambdas.size, max_bounce_points, alpha.size * rho.size) - boundary_lt = boundary_lt.reshape( - lambdas.size, max_bounce_points, alpha.size * rho.size - ) - boundary_rt = boundary_rt.reshape( - lambdas.size, max_bounce_points, alpha.size * rho.size - ) - - def body_lambdas(i, out): - def body_roots(j, out_i): - def fixup(z): - return np.clip(z, boundary_lt[i, j], boundary_rt[i, j]) - - # todo: call vmap to vectorize on guess[i, j] so that we solve - # guess[i, j].size independent root finding problems - root = root_scalar(residual, guess[i, j], jac=jac, args=i, fixup=fixup) - out_i = put(out_i, j, root) - return out_i - - out = put(out, i, fori_loop(0, max_bounce_points, body_roots, out[i])) - return out - - bounce_points = np.zeros( - shape=(lambdas.size, alpha.size, rho.size, max_bounce_points) - ) - bounce_points = fori_loop(0, lambdas.size, body_lambdas, bounce_points) - return bounce_points + np.testing.assert_allclose(rotation_matrix(x0), np.eye(3)) + np.testing.assert_allclose(dfdx_fwd(x0), np.zeros((3, 3, 3))) + np.testing.assert_allclose(dfdx_rev(x0), np.zeros((3, 3, 3))) From ffabc47c49e2674d1f99e60ce925b517891d8149 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 14 Mar 2024 01:49:45 -0500 Subject: [PATCH 028/241] Switch to interpax, vectorize loop, fix some bugs --- desc/backend.py | 11 +- desc/compute/_field.py | 7 +- desc/compute/bounce_integral.py | 478 ++++++++++++++++++-------------- tests/test_bounce_integral.py | 15 +- 4 files changed, 294 insertions(+), 217 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index ecdb7acb7b..93e7381f25 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -600,6 +600,7 @@ def while_loop(cond_fun, body_fun, init_val): val = body_fun(val) return val + # TODO: generalize this, maybe use np.vectorize def vmap(fun, in_axes=0, out_axes=0): """A numpy implementation of jax.lax.map whose API is a subset of jax.vmap. @@ -630,7 +631,7 @@ def vmap(fun, in_axes=0, out_axes=0): def fun_vmap(fun_inputs): if isinstance(fun_inputs, tuple): raise NotImplementedError( - "Backend implementation of vmap fails for multiple arguments." + "Backend implementation of vmap fails for multiple args in tuple." ) return np.stack([fun(fun_input) for fun_input in fun_inputs], axis=out_axes) @@ -676,7 +677,7 @@ def scan(f, init, xs, length=None, reverse=False, unroll=1): return carry, np.stack(ys) def bincount(x, weights=None, minlength=0, length=None): - """Numpy implementation of jnp.bincount.""" + """A numpy implementation of jnp.bincount.""" x = np.clip(x, 0, None) if length is None: length = max(minlength, x.max() + 1) @@ -685,7 +686,7 @@ def bincount(x, weights=None, minlength=0, length=None): return np.bincount(x, weights, minlength)[:length] def repeat(a, repeats, axis=None, total_repeat_length=None): - """Numpy implementation of jnp.repeat.""" + """A numpy implementation of jnp.repeat.""" out = np.repeat(a, repeats, axis) if total_repeat_length is not None: out = out[:total_repeat_length] @@ -802,7 +803,7 @@ def root( return out.x, out def flatnonzero(a, size=None, fill_value=0): - """Numpy implementation of jnp.flatnonzero.""" + """A numpy implementation of jnp.flatnonzero.""" nz = np.flatnonzero(a) if size is not None: nz = np.append(nz, np.repeat(fill_value, max(size - nz.size, 0))) @@ -818,7 +819,7 @@ def take( indices_are_sorted=False, fill_value=None, ): - """Numpy implementation of jnp.take.""" + """A numpy implementation of jnp.take.""" if mode == "fill": if fill_value is None: # TODO: Interpret default fill value based on dtype of a. diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 9c63d6cc70..1948832432 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2336,8 +2336,7 @@ def _B_mag_alpha(params, transforms, profiles, data, **kwargs): @register_compute_fun( - # TODO: pick a name - name="|B|_z constant rho alpha", + name="|B|_z|r,a", label="\\(partial_{\\zeta} |\\mathbf{B}|)_{\\rho, \\alpha}", units="T", units_long="Tesla", @@ -2352,9 +2351,7 @@ def _B_mag_alpha(params, transforms, profiles, data, **kwargs): def _B_mag_z_constant_rho_alpha(params, transforms, profiles, data, **kwargs): # ∂|B|/∂ζ (constant ρ and α) = ∂|B|/∂ζ (constant ρ and θ) # - ∂|B|/∂α (constant ρ and ζ) * ∂α/∂ζ (constant ρ and θ) - data["|B|_z constant rho alpha"] = ( - data["|B|_z"] - data["|B|_alpha"] * data["alpha_z"] - ) + data["|B|_z|r,a"] = data["|B|_z"] - data["|B|_alpha"] * data["alpha_z"] return data diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 5efda73626..1552a6793c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1,22 +1,110 @@ """Methods for computing bounce integrals.""" -from interpax import interp1d -from scipy.interpolate import Akima1DInterpolator, CubicHermiteSpline - -from desc.backend import ( - complex_sqrt, - flatnonzero, - fori_loop, - jnp, - put, - put_along_axis, - vmap, -) +from interpax import Akima1DInterpolator, CubicHermiteSpline + +from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, vmap from desc.compute.utils import mask_diff, mask_take, safediv from desc.grid import Grid, LinearGrid, _meshgrid_expand from .data_index import data_index +NUM_ROOTS = 3 # max number of roots of a cubic polynomial +# returns index of first nonzero element in a +v_first_flatnonzero = vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size)) +v_mask_diff = vmap(lambda a, mask: mask_diff(a, mask)) +v_mask_take = vmap(lambda a, mask: mask_take(a, mask, size=a.size, fill_value=jnp.nan)) + + +# TODO: fix up some logic with the periodic boundary bounce integral thing + + +def _inmost(pitch, w, X, f, B_sup_z, B): + """Compute a single bounce integral. + + Parameters + ---------- + pitch : float + λ values. + w : ndarray, shape(w.size, ) + Quadrature weights. + X : ndarray, shape(w.size, ) + Quadrature points. + f : Akima1DInterpolator + Spline of function to compute bounce integral of. + B_sup_z : Akima1DInterpolator + Contravariant field-line following toroidal component of magnetic field. + B : CubicHermiteSpline + Norm of magnetic field. + + """ + assert jnp.size(pitch) == 1 + assert w.shape == X.shape + return jnp.sum(w * f(X) / (B_sup_z(X) * jnp.sqrt(1 - pitch * B(X)))) + + +def _inner(pitch, w, X, f, B_sup_z, B): + """Compute bounce integrals along a particular field line and pitch. + + Parameters + ---------- + pitch : float + λ values. + w : ndarray, shape(w.size, ) + Quadrature weights. + X : ndarray, shape(:, w.size, ) + Quadrature points. + f : Akima1DInterpolator + Spline of function to compute bounce integral of. + B_sup_z : Akima1DInterpolator + Contravariant field-line following toroidal component of magnetic field. + B : CubicHermiteSpline + Norm of magnetic field. + + """ + assert jnp.size(pitch) == 1 + assert w.shape == X.shape[1:] + return vmap(_inmost, in_axes=(None, None, 0, None, None, None))( + pitch, w, X, f, B_sup_z, B + ) + + +def _outer(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): + """Compute bounce integrals for every pitch along a single field line. + + Parameters + ---------- + knots : ndarray, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. + pitch : ndarray, shape(pitch.size, ) + λ values. + w : ndarray, shape(w.size, ) + Quadrature weights. + X : ndarray, shape(pitch.size, :, w.size) + Quadrature points. + f : ndarray, shape(knots.size, ) + Spline of function to compute bounce integral of. + B_sup_z : ndarray, shape(knots.size, ) + Contravariant field-line following toroidal component of magnetic field. + B : ndarray, shape(knots.size, ) + Norm of magnetic field. + B_z_ra : ndarray, shape(knots.size, ) + Norm of magnetic field derivative with respect to field-line following label. + + """ + assert pitch.size == X.shape[0] + # FIXME: https://github.com/f0uriest/interpax/issues/26 + f = Akima1DInterpolator(knots, f, check=False) + B_sup_z = Akima1DInterpolator(knots, B_sup_z, check=False) + B = CubicHermiteSpline(knots, B, B_z_ra, check=False) + return vmap(_inner, in_axes=(0, None, 0, None, None, None))( + pitch, w, X, f, B_sup_z, B + ) + + +# ...maybe there's a better way to do this with jnp.vectorize(..., excluded={...})? +"""Compute bounce integrals for every pitch along every field line.""" +_compute_quad = vmap(_outer, in_axes=(None, None, None, 1, 0, 0, 0, 0)) + def polyint(c, k=jnp.array([0])): """Coefficients for the primitives of the given set of polynomials. @@ -27,7 +115,7 @@ def polyint(c, k=jnp.array([0])): First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, coefficient cᵢ should be stored at ``c[n - i]``. - k : ndarray or float + k : ndarray Integration constants. Returns @@ -92,14 +180,14 @@ def polyval(x, c): tests/test_compute_utils.py::TestComputeUtils::test_polyval. """ - X = (x[jnp.newaxis].T ** jnp.arange(c.shape[0] - 1, -1, -1)).T + X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) alphabet = "abcdefghijklmnopqrstuvwxyz" - sub = alphabet[: c.ndim] - val = jnp.einsum(f"{sub},{sub}...->{sub[1:]}...", c, X) + s = alphabet[: c.ndim] + val = jnp.einsum(f"{s},{s[1:]}...{s[0]}->{s[1:]}...", c, X) return val -def tanh_sinh_quadrature(N, quad_limit=3.16): +def tanh_sinh_quadrature(N): """ tanh_sinh quadrature. @@ -112,9 +200,6 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): ---------- N: int Number of quadrature points, preferably odd - quad_limit: float - The range of quadrature points to be mapped. - Larger quad_limit implies better result but limited due to overflow in sinh Returns ------- @@ -124,8 +209,19 @@ def tanh_sinh_quadrature(N, quad_limit=3.16): Quadrature weights """ - points = jnp.linspace(-quad_limit, quad_limit, N) - h = 2 * quad_limit / (N - 1) + # Copied from quadax. + # https://github.com/f0uriest/quadax/blob/main/quadax/utils.py#L166 + def get_tmax(xmax): + """Inverse of tanh-sinh transform.""" + tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x)) + sinhinv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) + return sinhinv(2 / jnp.pi * tanhinv(xmax)) + + # inverse of tanh-sinh transformation for x = 1-eps with small buffer + tmax = get_tmax(jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps) + + points = jnp.linspace(-tmax, tmax, N) + h = 2 * tmax / (N - 1) sinh = jnp.sinh(points) x = jnp.tanh(0.5 * jnp.pi * sinh) w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 @@ -141,7 +237,7 @@ def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False) First axis should store coefficients of a polynomial. For a polynomial given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. It is assumed that c₁ is nonzero. - k : ndarray, shape(constant.size, ) + k : ndarray, shape(k.size, ) Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. a_min, a_max : ndarray Minimum and maximum value to return roots between. @@ -167,7 +263,7 @@ def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False) a_max = jnp.inf a, b, c, d = coef - d = jnp.squeeze((d[jnp.newaxis].T - k).T) + d = jnp.squeeze(jnp.moveaxis(d[..., jnp.newaxis] - k, -1, 0)) t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) @@ -194,14 +290,14 @@ def clip_to_nan(r): return roots -def _get_bounce_points(pitch, zeta, poly_B, poly_B_z): - """Get the bounce points given |B| and 1 / λ. +def _compute_bp(pitch, knots, poly_B, poly_B_z): + """Compute the bounce points given |B| and pitch λ. Parameters ---------- pitch : ndarray - λ values representing the constant function 1 / λ. - zeta : ndarray + λ values. + knots : ndarray Field line-following ζ coordinates of spline knots. poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. @@ -210,70 +306,71 @@ def _get_bounce_points(pitch, zeta, poly_B, poly_B_z): Returns ------- - intersect, bp1, bp2 : ndarray, ndarray, ndarray - The polynomials' intersection points with 1 / λ is given by ``intersect``. - In order to be JIT compilable, the returned array must have a shape that - accommodates the case where each cubic polynomial intersects 1 / λ thrice. - So ``intersect`` has shape (pitch.size * M * L, N * NUM_ROOTS), - where the last axis is padded with nan at the end to be JIT compilable. - The boolean masks ``bp1`` and ``bp2`` encode whether a given entry in - ``intersect`` is a valid starting and ending bounce point, respectively. + bp1, bp2 : ndarray, ndarray + Field line-following ζ coordinates of bounce points for a given pitch + along a field line. Has shape (pitch.size * M * L, N * NUM_ROOTS). + If there were less than N * NUM_ROOTS bounce points along a field line, + then the last axis is padded with nan. """ - ML = poly_B.shape[1] - N = poly_B.shape[2] - NUM_ROOTS = 3 - a_min = zeta[:-1] - a_max = zeta[1:] - - intersect = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( - pitch.size, ML, N, NUM_ROOTS - ) + ML = jnp.array(poly_B.shape[1:-1]).prod() + N = poly_B.shape[-1] + + # The polynomials' intersection points with 1 / λ is given by ``intersect``. + # In order to be JIT compilable, this must have a shape that accommodates the + # case where each cubic polynomial intersects 1 / λ thrice. + # nan values in ``intersect`` denote a polynomial has less than three intersects. + intersect = cubic_poly_roots( + coef=poly_B, k=1 / pitch, a_min=knots[:-1], a_max=knots[1:], sort=True + ).reshape(pitch.size, *poly_B.shape[1:], NUM_ROOTS) + # Reshape to unsqueeze pitch axis. + + # Reshape so that last axis enumerates intersects of a pitch along a field line. + # Condense the first and second axes to vmap over them. B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( pitch.size * ML, N * NUM_ROOTS ) intersect = intersect.reshape(pitch.size * ML, N * NUM_ROOTS) + # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) - # Rearrange so that all the intersects along field line are contiguous. - contiguous = vmap( - lambda args: mask_take(*args, size=N * NUM_ROOTS, fill_value=jnp.nan) - ) - intersect = contiguous((intersect, is_intersect)) - B_z = contiguous((B_z, is_intersect)) - # Check sign of derivative to determine whether root is a valid bounce point. + + # Rearrange so that all intersects along a field line are contiguous. + intersect = v_mask_take(intersect, is_intersect) + B_z = v_mask_take(B_z, is_intersect) + # The boolean masks ``bp1`` and ``bp2`` will encode whether a given entry in + # ``intersect`` is a valid starting and ending bounce point, respectively. + # Sign of derivative determines whether an intersect is a valid bounce point. bp1 = B_z <= 0 bp2 = B_z >= 0 - - # index of last intersect - idx = ( - jnp.squeeze( - vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size))(~is_intersect) - ) - - 1 - ) - assert idx.shape == (pitch.size * ML,) - # Periodic boundary to compute bounce integrals of particles trapped outside - # this snapshot of the field lines. + # B_z <= 0 at intersect i implies B_z >= 0 at intersect i+1 by continuity. + + # # TODO: fix the boundary logic, + # extend bp1 and bp2 by single element and then test + # # index of last intersect along a field line + # idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 # noqa: E800 + # assert idx.shape == (pitch.size * ML,) # noqa: E800 + # Consider the boundary to be periodic to compute bounce integrals of + # particles trapped outside this snapshot of the field lines. # Roll such that first intersect is moved to index of last intersect. - is_bp = bp1 & put_along_axis(jnp.roll(bp2, -1, axis=-1), idx, bp2[:, 0], axis=-1) - # B_z<=0 at intersect_i implies B_z>=0 at intersect_i+1 by continuity. - # I think this step is only needed to determine if the boundaries are bounce points. - bp1 = bp1 & is_bp - bp2 = bp2 & is_bp - return intersect, bp1, bp2 + + # Get ζ values of bounce points from the masks. + bp1 = v_mask_take(intersect, bp1) + bp2 = v_mask_take(intersect, bp2) + assert bp1.shape == bp2.shape == (pitch.size * ML, N * NUM_ROOTS) + return bp1, bp2 -def _get_bounce_points_include_knots(pitch, zeta, poly_B, poly_B_z): - """Get the bounce points given |B| and 1 / λ. +def _compute_bp_include_knots(pitch, knots, poly_B, poly_B_z): + """Compute the bounce points given |B| and pitch λ. - Like ``_get_bounce_points`` but returns additional ingredients - needed by the algorithm in the direct method in ``bounce_integral``. + Like ``_compute_bp`` but returns ingredients needed by the + algorithm in the direct method in ``bounce_integral``. Parameters ---------- pitch : ndarray - λ values representing the constant function 1 / λ. - zeta : ndarray + λ values. + knots : ndarray Field line-following ζ coordinates of spline knots. poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. @@ -282,91 +379,90 @@ def _get_bounce_points_include_knots(pitch, zeta, poly_B, poly_B_z): Returns ------- - intersect_nan_to_right_knot, contiguous, is_intersect, is_bp - The polynomials' intersection points with 1 / λ is given by - ``intersect_nan_to_right_knot``. - In order to be JIT compilable, the returned array must have a shape that - accommodates the case where each cubic polynomial intersects 1 / λ thrice. - Rather than padding the nan values to the end, ``intersect_nan_to_right_knot`` - replaces the nan values with the right knot of the splines. This array - has shape (pitch.size * M * L, N, NUM_ROOTS + 2). - The boolean mask ``is_bp`` encodes whether a given entry in - - .. code-block:: python - contiguous( - (intersect_nan_to_right_knot.reshape(pitch.size * ML, -1), is_intersect) - ) - - is a valid bounce point. + intersect_nan_to_right_knot, is_intersect, is_bp + The boolean mask ``is_bp`` encodes whether a given pair of intersects + are the endpoints of a bounce integral. """ - ML = poly_B.shape[1] - N = poly_B.shape[2] - NUM_ROOTS = 3 - R = NUM_ROOTS + 2 - a_min = zeta[:-1] - a_max = zeta[1:] - - roots = cubic_poly_roots(poly_B, 1 / pitch, a_min, a_max, sort=True).reshape( - pitch.size, ML, N, 3 - ) + ML = jnp.array(poly_B.shape[1:-1]).prod() + N = poly_B.shape[-1] + a_min = knots[:-1] + a_max = knots[1:] + + # The polynomials' intersection points with 1 / λ is given by ``roots``. + # In order to be JIT compilable, this must have a shape that accommodates the + # case where each cubic polynomial intersects 1 / λ thrice. + # nan values in ``roots`` denote a polynomial has less than three intersects. + roots = cubic_poly_roots( + coef=poly_B, k=1 / pitch, a_min=a_min, a_max=a_max, sort=True + ).reshape(pitch.size, *poly_B.shape[1:], NUM_ROOTS) + # Reshape to unsqueeze pitch axis. + + # Include the knots of the splines along with the intersection points. + # This preprocessing makes the ``direct`` algorithm in ``bounce_integral`` simpler. roots = (roots[..., 0], roots[..., 1], roots[..., 2]) - nan_to_right_knot = tuple(map(lambda r: jnp.where(jnp.isnan(r), a_max, r), roots)) a_min = jnp.broadcast_to(a_min, shape=(pitch.size, ML, N)) a_max = jnp.broadcast_to(a_max, shape=(pitch.size, ML, N)) - # Include the knots of the splines along with the intersection points. intersect = jnp.stack((a_min, *roots, a_max), axis=-1) - intersect_nan_to_right_knot = jnp.stack( - (a_min, *nan_to_right_knot, a_max), axis=-1 - ).reshape(pitch.size * ML, N, R) - B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape(pitch.size * ML, N * R) + # Reshape so that last axis enumerates intersects of a pitch along a field line. + # Condense the first and second axes to vmap over them. + B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( + pitch.size * ML, N * (NUM_ROOTS + 2) + ) + # Only consider intersect if it is within knots that bound that polynomial. is_intersect = jnp.reshape( jnp.array([False, True, True, True, False], dtype=bool) & ~jnp.isnan(intersect), - newshape=(pitch.size * ML, N * R), + newshape=(pitch.size * ML, N * (NUM_ROOTS + 2)), ) + # Rearrange so that all the intersects along field line are contiguous. - contiguous = vmap(lambda args: mask_take(*args, size=N * R, fill_value=jnp.nan)) - B_z = contiguous((B_z, is_intersect)) - # Check sign of derivative to determine whether root is a valid bounce point. + B_z = v_mask_take(B_z, is_intersect) + # The boolean masks ``bp1`` and ``bp2`` will encode whether a given entry in + # ``intersect`` is a valid starting and ending bounce point, respectively. + # Sign of derivative determines whether an intersect is a valid bounce point. bp1 = B_z <= 0 bp2 = B_z >= 0 + # B_z <= 0 at intersect i implies B_z >= 0 at intersect i+1 by continuity. # index of last intersect - idx = ( - jnp.squeeze( - vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size))(~is_intersect) - ) - - 1 - ) + idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 assert idx.shape == (pitch.size * ML,) - # Periodic boundary to compute bounce integrals of particles trapped outside - # this snapshot of the field lines. + # Consider the boundary to be periodic to compute bounce integrals of + # particles trapped outside this snapshot of the field lines. # Roll such that first intersect is moved to index of last intersect. - is_bp = bp1 & put_along_axis(jnp.roll(bp2, -1, axis=-1), idx, bp2[:, 0], axis=-1) - return intersect_nan_to_right_knot, contiguous, is_intersect, is_bp + is_bp = bp1 & put_along_axis(jnp.roll(bp2, -1, axis=-1), idx, bp2[..., 0], axis=-1) + + # Returning this makes the ``direct`` algorithm in ``bounce_integral`` simpler. + # Replace nan values with right knots of the spline. + intersect_nan_to_right_knot = jnp.stack( + (a_min, *tuple(map(lambda r: jnp.where(jnp.isnan(r), a_max, r), roots)), a_max), + axis=-1, + ).reshape(pitch.size * ML, N, (NUM_ROOTS + 2)) + + return intersect_nan_to_right_knot, is_intersect, is_bp def _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, get_bounce_points, *original, err=False + pitch, knots, poly_B, poly_B_z, compute_bp, *original, err=False ): - """Return the ingredients needed by the ``bounce_integrals`` function. + """Return the ingredients needed by the ``bounce_integral`` function. Parameters ---------- pitch : ndarray λ values representing the constant function 1 / λ. If None, returns the given ``original`` tuple. - zeta : ndarray + knots : ndarray Field line-following ζ coordinates of spline knots. poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. poly_B_z : ndarray Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - get_bounce_points : callable - Method to return bounce points. + compute_bp : callable + Method to compute bounce points. original : tuple - pitch, intersect, is_bp, bp1, bp2. + Whatever this method returned earlier. err : bool Whether to raise an error if ``pitch`` is None and ``original`` is empty. @@ -377,7 +473,7 @@ def _compute_bp_if_given_pitch( return original else: pitch = jnp.atleast_1d(pitch) - return pitch, *get_bounce_points(pitch, zeta, poly_B, poly_B_z) + return pitch, *compute_bp(pitch, knots, poly_B, poly_B_z) def bounce_integral( @@ -453,33 +549,22 @@ def bounce_integral( alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) - zeta = jnp.linspace(0, zeta_max, resolution) + knots = jnp.linspace(0, zeta_max, resolution) L = rho.size M = alpha.size N = resolution - 1 # number of piecewise cubic polynomials per field line - NUM_ROOTS = 3 # number of roots for cubic polynomial - grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) - data = eq.compute( - ["B^zeta", "|B|", "|B|_z constant rho alpha"], grid=grid, data=data - ) + grid, data = field_line_to_desc_coords(eq, rho, alpha, knots) + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) B = data["|B|"].reshape(M * L, resolution) - - # TODO: https://github.com/f0uriest/interpax/issues/19 - poly_B = CubicHermiteSpline( - zeta, - B, - data["|B|_z constant rho alpha"].reshape(M * L, resolution), - axis=-1, - extrapolate="periodic", - ).c - + B_z_ra = data["|B|_z|r,a"].reshape(M * L, resolution) + poly_B = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=False).c poly_B = jnp.moveaxis(poly_B, 1, -1) poly_B_z = polyder(poly_B) assert poly_B.shape == (4, M * L, N) assert poly_B_z.shape == (3, M * L, N) - def _direct(name, pitch=None): + def direct(name, pitch=None): """Compute the bounce integral of the named quantity. Parameters @@ -492,60 +577,61 @@ def _direct(name, pitch=None): Returns ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) The last axis iterates through every bounce integral performed - along that field line padded by zeros. + along that field line padded by nan. """ ( pitch, intersect_nan_to_right_knot, - contiguous, is_intersect, is_bp, ) = _compute_bp_if_given_pitch( pitch, - zeta, + knots, poly_B, poly_B_z, - _get_bounce_points_include_knots, + _compute_bp_include_knots, *original, err=True, ) integrand = jnp.nan_to_num( - eq.compute(name, grid=grid, override_grid=False, data=data)[name] + eq.compute(name, grid=grid, data=data)[name] / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) ).reshape(pitch.size * M * L, resolution) - - # TODO: https://github.com/f0uriest/interpax/issues/19 - integrand = Akima1DInterpolator(zeta, integrand, axis=-1).c - + integrand = Akima1DInterpolator(knots, integrand, axis=-1, check=False).c integrand = jnp.moveaxis(integrand, 1, -1) assert integrand.shape == (4, pitch.size * M * L, N) # For this algorithm, computing integrals via differences of primitives # is preferable to any numerical quadrature. For example, even if the # intersection points were evenly spaced, a composite Simpson's quadrature # would require computing the spline on 1.8x more knots for the same accuracy. - R = NUM_ROOTS + 2 primitive = polyval(intersect_nan_to_right_knot, polyint(integrand)).reshape( - pitch.size * M * L, N * R + pitch.size * M * L, N * (NUM_ROOTS + 2) ) sums = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) - # Multiply by mask that is false at shared knots of piecewise spline + # We didn't enforce continuity of the piecewise primitives, so + # multiply by mask that is false at shared knots of piecewise spline # to avoid adding difference between primitives of splines at knots. - * jnp.append(jnp.arange(1, N * R) % R != 0, True), + * jnp.append( + jnp.arange(1, N * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, True + ), axis=-1, ) - F = jnp.nan_to_num( - fun((contiguous((sums, is_intersect)), is_bp)), posinf=0, neginf=0 + F = jnp.reshape( + # Compute difference of ``sums`` between bounce points. + v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[..., : N * NUM_ROOTS], + # Guaranteed to have at most N * NUM_ROOTS entries where is_bp is true. + newshape=(pitch.size, M, L, N * NUM_ROOTS), ) - return F.reshape(pitch.size, M, L, N * R // 2) + return F - def _quad_sin(name, pitch=None): + def quad(name, pitch=None): """Compute the bounce integral of the named quantity. Parameters @@ -558,60 +644,42 @@ def _quad_sin(name, pitch=None): Returns ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) The last axis iterates through every bounce integral performed - along that field line padded by zeros. + along that field line padded by nan. """ - pitch, intersect, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, _get_bounce_points, *original, err=True + pitch, bp1, bp2 = _compute_bp_if_given_pitch( + pitch, knots, poly_B, poly_B_z, _compute_bp, *original, err=True ) - bp1 = fun((intersect, bp1)) - bp2 = fun((intersect, bp2)) + bp1 = bp1.reshape(pitch.size, M * L, N * NUM_ROOTS) + bp2 = bp2.reshape(pitch.size, M * L, N * NUM_ROOTS) + # The last axis enumerates the quadrature points. X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - assert X.shape == (pitch.size * M * L, N * 2, x.size) - - def body(i, integral): - k = i % (N * 2) - j = i // (N * 2) - p = i // (M * L * N * 2) - v = j % pitch.size - # TODO: Add Hermite spline to interpax to pass in B_z[i]. - integrand = interp1d(X[j, k], zeta, f[v]) / ( - interp1d(X[j, k], zeta, B_sup_z[v]) - * jnp.sqrt(1 - pitch[p] * interp1d(X[j, k], zeta, B[v])) - ) - integral = put(integral, i, jnp.sum(w * integrand)) - return integral - - f = eq.compute(name, grid=grid, override_grid=False, data=data)[name].reshape( - M * L, resolution - ) + assert X.shape == (pitch.size, M * L, N * NUM_ROOTS, x.size) + + f = eq.compute(name, grid=grid, data=data)[name].reshape(M * L, resolution) B_sup_z = data["B^zeta"].reshape(M * L, resolution) - F = jnp.nan_to_num( - # TODO: Vectorize interpax to do this with 1 call with einsum. - fori_loop(0, pitch.size * M * L * N * 2, body, jnp.zeros(X.shape[:-1])) + F = jnp.reshape( + _compute_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra) * jnp.pi / (bp2 - bp1), - posinf=0, - neginf=0, + newshape=(pitch.size, M, L, N * NUM_ROOTS), ) - return F.reshape(pitch.size, M, L, N * 2) + return F if method == "direct": - bi = _direct - fun = vmap(lambda args: mask_diff(*args)[::2]) - get_bounce_points = _get_bounce_points_include_knots + original = _compute_bp_if_given_pitch( + pitch, knots, poly_B, poly_B_z, _compute_bp_include_knots, err=False + ) + return direct else: - bi = _quad_sin - fun = vmap(lambda args: mask_take(*args, size=N * 2, fill_value=jnp.nan)) - get_bounce_points = _get_bounce_points + original = _compute_bp_if_given_pitch( + pitch, knots, poly_B, poly_B_z, _compute_bp, err=False + ) x, w = tanh_sinh_quadrature(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 - original = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, get_bounce_points, err=False - ) - return bi + return quad def bounce_average( @@ -698,9 +766,9 @@ def _bounce_average(name, pitch=None): Returns ------- - G : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 2) + G : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) The last axis iterates through every bounce average performed - along that field line padded by zeros. + along that field line padded by nan. """ return safediv(bi(name, pitch), bi("1", pitch)) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index d04863409b..807c530a60 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -2,6 +2,7 @@ import numpy as np import pytest +from interpax import Akima1DInterpolator from desc.backend import fori_loop, put, root_scalar from desc.compute.bounce_integral import ( @@ -81,6 +82,16 @@ def test_polyval(): for k in range(poly.shape[2]): np.testing.assert_allclose(val[j, k], np.poly1d(poly[:, j, k])(x[j, k])) + y = np.arange(1, 6) + y = np.arange(y.prod()).reshape(*y) + x = np.arange(y.shape[-1]) + a1d = Akima1DInterpolator(x, y, axis=-1) + primitive = polyint(a1d.c) + d = np.diff(x) + k = polyval(d.reshape(d.size, *np.ones(primitive.ndim - 2, dtype=int)), primitive) + primitive = primitive.at[-1, 1:].add(np.cumsum(k, axis=-1)[:-1]) + np.testing.assert_allclose(primitive, a1d.antiderivative().c) + # TODO: finish up details if deemed useful def bounce_point( @@ -99,8 +110,8 @@ def residual(zeta, i): def jac(zeta): grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|_z constant rho alpha"], grid=grid, data=data) - return data["|B|_z constant rho alpha"] + data = eq.compute(["|B|_z|r,a"], grid=grid, data=data) + return data["|B|_z|r,a"] # Compute |B| - lambda on a dense grid. # For every field line, find the roots of this linear spline. From a501bca27f3e813cc8ffead0f63db1a1f7290473 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 14 Mar 2024 16:14:30 -0500 Subject: [PATCH 029/241] Remove vmap and fix reshaping bug --- desc/compute/bounce_integral.py | 256 ++++++++++++-------------------- 1 file changed, 99 insertions(+), 157 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 1552a6793c..131c631a5e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -11,65 +11,12 @@ NUM_ROOTS = 3 # max number of roots of a cubic polynomial # returns index of first nonzero element in a v_first_flatnonzero = vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size)) -v_mask_diff = vmap(lambda a, mask: mask_diff(a, mask)) +v_mask_diff = vmap(mask_diff) v_mask_take = vmap(lambda a, mask: mask_take(a, mask, size=a.size, fill_value=jnp.nan)) -# TODO: fix up some logic with the periodic boundary bounce integral thing - - -def _inmost(pitch, w, X, f, B_sup_z, B): - """Compute a single bounce integral. - - Parameters - ---------- - pitch : float - λ values. - w : ndarray, shape(w.size, ) - Quadrature weights. - X : ndarray, shape(w.size, ) - Quadrature points. - f : Akima1DInterpolator - Spline of function to compute bounce integral of. - B_sup_z : Akima1DInterpolator - Contravariant field-line following toroidal component of magnetic field. - B : CubicHermiteSpline - Norm of magnetic field. - - """ - assert jnp.size(pitch) == 1 - assert w.shape == X.shape - return jnp.sum(w * f(X) / (B_sup_z(X) * jnp.sqrt(1 - pitch * B(X)))) - - -def _inner(pitch, w, X, f, B_sup_z, B): - """Compute bounce integrals along a particular field line and pitch. - - Parameters - ---------- - pitch : float - λ values. - w : ndarray, shape(w.size, ) - Quadrature weights. - X : ndarray, shape(:, w.size, ) - Quadrature points. - f : Akima1DInterpolator - Spline of function to compute bounce integral of. - B_sup_z : Akima1DInterpolator - Contravariant field-line following toroidal component of magnetic field. - B : CubicHermiteSpline - Norm of magnetic field. - - """ - assert jnp.size(pitch) == 1 - assert w.shape == X.shape[1:] - return vmap(_inmost, in_axes=(None, None, 0, None, None, None))( - pitch, w, X, f, B_sup_z, B - ) - - -def _outer(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): - """Compute bounce integrals for every pitch along a single field line. +def _inner_product(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): + """Compute bounce integrals for every pitch along a particular field line. Parameters ---------- @@ -79,7 +26,7 @@ def _outer(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): λ values. w : ndarray, shape(w.size, ) Quadrature weights. - X : ndarray, shape(pitch.size, :, w.size) + X : ndarray, shape(pitch.size, X.shape[1], w.size) Quadrature points. f : ndarray, shape(knots.size, ) Spline of function to compute bounce integral of. @@ -90,20 +37,61 @@ def _outer(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): B_z_ra : ndarray, shape(knots.size, ) Norm of magnetic field derivative with respect to field-line following label. + Returns + ------- + inner_product : ndarray, shape(pitch.size, X.shape[1]) + Bounce integrals for every pitch along a particular field line. + """ - assert pitch.size == X.shape[0] + assert X.shape == (pitch.size, X.shape[1], w.size) # FIXME: https://github.com/f0uriest/interpax/issues/26 f = Akima1DInterpolator(knots, f, check=False) B_sup_z = Akima1DInterpolator(knots, B_sup_z, check=False) B = CubicHermiteSpline(knots, B, B_z_ra, check=False) - return vmap(_inner, in_axes=(0, None, 0, None, None, None))( - pitch, w, X, f, B_sup_z, B - ) + pitch = pitch[:, jnp.newaxis, jnp.newaxis] + return jnp.dot(f(X) / (B_sup_z(X) * jnp.sqrt(1 - pitch * B(X))), w) -# ...maybe there's a better way to do this with jnp.vectorize(..., excluded={...})? """Compute bounce integrals for every pitch along every field line.""" -_compute_quad = vmap(_outer, in_axes=(None, None, None, 1, 0, 0, 0, 0)) +_compute_quad = vmap(_inner_product, in_axes=(None, None, None, 1, 0, 0, 0, 0)) + + +def tanh_sinh_quadrature(resolution): + """ + tanh_sinh quadrature. + + This function outputs the quadrature points and weights + for a tanh-sinh quadrature. + + ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) + + Parameters + ---------- + resolution: int + Number of quadrature points, preferably odd + + Returns + ------- + x : numpy array + Quadrature points + w : numpy array + Quadrature weights + + """ + # https://github.com/f0uriest/quadax/blob/main/quadax/utils.py#L166 + # x = 1 - eps with some buffer + x_max = jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps + tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x)) + sinhinv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) + # inverse of tanh-sinh transformation for x_max + t_max = sinhinv(2 / jnp.pi * tanhinv(x_max)) + + points = jnp.linspace(-t_max, t_max, resolution) + h = 2 * t_max / (resolution - 1) + sinh_points = jnp.sinh(points) + x = jnp.tanh(0.5 * jnp.pi * sinh_points) + w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh_points) ** 2 + return x, w def polyint(c, k=jnp.array([0])): @@ -187,47 +175,6 @@ def polyval(x, c): return val -def tanh_sinh_quadrature(N): - """ - tanh_sinh quadrature. - - This function outputs the quadrature points and weights - for a tanh-sinh quadrature. - - ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) - - Parameters - ---------- - N: int - Number of quadrature points, preferably odd - - Returns - ------- - x : numpy array - Quadrature points - w : numpy array - Quadrature weights - - """ - # Copied from quadax. - # https://github.com/f0uriest/quadax/blob/main/quadax/utils.py#L166 - def get_tmax(xmax): - """Inverse of tanh-sinh transform.""" - tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x)) - sinhinv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) - return sinhinv(2 / jnp.pi * tanhinv(xmax)) - - # inverse of tanh-sinh transformation for x = 1-eps with small buffer - tmax = get_tmax(jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps) - - points = jnp.linspace(-tmax, tmax, N) - h = 2 * tmax / (N - 1) - sinh = jnp.sinh(points) - x = jnp.tanh(0.5 * jnp.pi * sinh) - w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh) ** 2 - return x, w - - def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False): """Roots of cubic polynomial. @@ -269,19 +216,20 @@ def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False) C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) C_is_zero = jnp.isclose(C, 0) - def compute_roots(xi_k): - t_3 = jnp.where(C_is_zero, 0, t_0 / (xi_k * C)) - r = -(b + xi_k * C + t_3) / (3 * a) - return r + def compute_root(xi): + return -(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C))) / (3 * a) - def clip_to_nan(r): - r = jnp.where(jnp.isreal(r) & (a_min <= r) & (r <= a_max), jnp.real(r), jnp.nan) - return r + def clip_to_nan(root): + return jnp.where( + jnp.isreal(root) & (a_min <= root) & (root <= a_max), + jnp.real(root), + jnp.nan, + ) xi_1 = (-1 + (-3) ** 0.5) / 2 xi_2 = xi_1**2 xi_3 = 1 - roots = tuple(map(compute_roots, (xi_1, xi_2, xi_3))) + roots = tuple(map(compute_root, (xi_1, xi_2, xi_3))) if clip: roots = tuple(map(clip_to_nan, roots)) roots = jnp.stack(roots, axis=-1) @@ -290,6 +238,7 @@ def clip_to_nan(r): return roots +# TODO: fix up some logic with the periodic boundary bounce integral thing def _compute_bp(pitch, knots, poly_B, poly_B_z): """Compute the bounce points given |B| and pitch λ. @@ -308,7 +257,7 @@ def _compute_bp(pitch, knots, poly_B, poly_B_z): ------- bp1, bp2 : ndarray, ndarray Field line-following ζ coordinates of bounce points for a given pitch - along a field line. Has shape (pitch.size * M * L, N * NUM_ROOTS). + along a field line. Has shape (pitch.size, M * L, N * NUM_ROOTS). If there were less than N * NUM_ROOTS bounce points along a field line, then the last axis is padded with nan. @@ -322,7 +271,7 @@ def _compute_bp(pitch, knots, poly_B, poly_B_z): # nan values in ``intersect`` denote a polynomial has less than three intersects. intersect = cubic_poly_roots( coef=poly_B, k=1 / pitch, a_min=knots[:-1], a_max=knots[1:], sort=True - ).reshape(pitch.size, *poly_B.shape[1:], NUM_ROOTS) + ).reshape(pitch.size, ML, N, NUM_ROOTS) # Reshape to unsqueeze pitch axis. # Reshape so that last axis enumerates intersects of a pitch along a field line. @@ -344,7 +293,6 @@ def _compute_bp(pitch, knots, poly_B, poly_B_z): bp2 = B_z >= 0 # B_z <= 0 at intersect i implies B_z >= 0 at intersect i+1 by continuity. - # # TODO: fix the boundary logic, # extend bp1 and bp2 by single element and then test # # index of last intersect along a field line # idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 # noqa: E800 @@ -354,9 +302,8 @@ def _compute_bp(pitch, knots, poly_B, poly_B_z): # Roll such that first intersect is moved to index of last intersect. # Get ζ values of bounce points from the masks. - bp1 = v_mask_take(intersect, bp1) - bp2 = v_mask_take(intersect, bp2) - assert bp1.shape == bp2.shape == (pitch.size * ML, N * NUM_ROOTS) + bp1 = v_mask_take(intersect, bp1).reshape(pitch.size, ML, N * NUM_ROOTS) + bp2 = v_mask_take(intersect, bp2).reshape(pitch.size, ML, N * NUM_ROOTS) return bp1, bp2 @@ -395,7 +342,7 @@ def _compute_bp_include_knots(pitch, knots, poly_B, poly_B_z): # nan values in ``roots`` denote a polynomial has less than three intersects. roots = cubic_poly_roots( coef=poly_B, k=1 / pitch, a_min=a_min, a_max=a_max, sort=True - ).reshape(pitch.size, *poly_B.shape[1:], NUM_ROOTS) + ).reshape(pitch.size, ML, N, NUM_ROOTS) # Reshape to unsqueeze pitch axis. # Include the knots of the splines along with the intersection points. @@ -451,7 +398,7 @@ def _compute_bp_if_given_pitch( Parameters ---------- pitch : ndarray - λ values representing the constant function 1 / λ. + λ values. If None, returns the given ``original`` tuple. knots : ndarray Field line-following ζ coordinates of spline knots. @@ -556,6 +503,7 @@ def bounce_integral( grid, data = field_line_to_desc_coords(eq, rho, alpha, knots) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) + B_sup_z = data["B^zeta"].reshape(M * L, resolution) B = data["|B|"].reshape(M * L, resolution) B_z_ra = data["|B|_z|r,a"].reshape(M * L, resolution) poly_B = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=False).c @@ -564,6 +512,37 @@ def bounce_integral( assert poly_B.shape == (4, M * L, N) assert poly_B_z.shape == (3, M * L, N) + def quad(name, pitch=None): + """Compute the bounce integral of the named quantity. + + Parameters + ---------- + name : ndarray + Name of quantity in ``data_index`` to compute the bounce integral of. + pitch : ndarray + λ values to evaluate the bounce integral at. + If None, uses the values given to the parent function. + + Returns + ------- + F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + The last axis iterates through every bounce integral performed + along that field line padded by nan. + + """ + pitch, bp1, bp2 = _compute_bp_if_given_pitch( + pitch, knots, poly_B, poly_B_z, _compute_bp, *original, err=True + ) + X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] + f = eq.compute(name, grid=grid, data=data)[name].reshape(M * L, resolution) + F = jnp.reshape( + _compute_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra) + * jnp.pi + / (bp2 - bp1), + newshape=(pitch.size, M, L, N * NUM_ROOTS), + ) + return F + def direct(name, pitch=None): """Compute the bounce integral of the named quantity. @@ -631,43 +610,6 @@ def direct(name, pitch=None): ) return F - def quad(name, pitch=None): - """Compute the bounce integral of the named quantity. - - Parameters - ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce integral of. - pitch : ndarray - λ values to evaluate the bounce integral at. - If None, uses the values given to the parent function. - - Returns - ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) - The last axis iterates through every bounce integral performed - along that field line padded by nan. - - """ - pitch, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, _compute_bp, *original, err=True - ) - bp1 = bp1.reshape(pitch.size, M * L, N * NUM_ROOTS) - bp2 = bp2.reshape(pitch.size, M * L, N * NUM_ROOTS) - # The last axis enumerates the quadrature points. - X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - assert X.shape == (pitch.size, M * L, N * NUM_ROOTS, x.size) - - f = eq.compute(name, grid=grid, data=data)[name].reshape(M * L, resolution) - B_sup_z = data["B^zeta"].reshape(M * L, resolution) - F = jnp.reshape( - _compute_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra) - * jnp.pi - / (bp2 - bp1), - newshape=(pitch.size, M, L, N * NUM_ROOTS), - ) - return F - if method == "direct": original = _compute_bp_if_given_pitch( pitch, knots, poly_B, poly_B_z, _compute_bp_include_knots, err=False From b577c621ac39bbc210a1156c5167a777b8b9f8b6 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 14 Mar 2024 17:26:17 -0500 Subject: [PATCH 030/241] Simplify surface integral function Took ~5min to do this since it bothered me. Benchmarks showed no difference in memory or speed. https://github.com/PlasmaControl/DESC/pull/501#issuecomment-1546307972 --- desc/compute/utils.py | 97 ++++++------------------------------------- 1 file changed, 13 insertions(+), 84 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 3d3c298f0e..23ce397438 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -737,18 +737,9 @@ def line_integrals( q : ndarray Quantity to integrate. The first dimension of the array should have size ``grid.num_nodes``. - - When ``q`` is 1-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a scalar function over the previously mentioned domain. - - When ``q`` is 2-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a vector-valued function over the previously mentioned domain. - - When ``q`` is 3-dimensional, the intention is to integrate, + When ``q`` is n-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - a matrix-valued function over the previously mentioned domain. + a n-dimensional function over the previously mentioned domain. line_label : str The coordinate curve to compute the integration over. To clarify, a theta (poloidal) curve is the intersection of a @@ -813,18 +804,9 @@ def surface_integrals(grid, q=jnp.array([1.0]), surface_label="rho", expand_out= q : ndarray Quantity to integrate. The first dimension of the array should have size ``grid.num_nodes``. - - When ``q`` is 1-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a scalar function over the previously mentioned domain. - - When ``q`` is 2-dimensional, the intention is to integrate, + When ``q`` is n-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - a vector-valued function over the previously mentioned domain. - - When ``q`` is 3-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a matrix-valued function over the previously mentioned domain. + a n-dimensional function over the previously mentioned domain. surface_label : str The surface label of rho, theta, or zeta to compute the integration over. expand_out : bool @@ -876,6 +858,7 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True): unique_size, inverse_idx, spacing, has_endpoint_dupe = _get_grid_surface( grid, surface_label ) + spacing = jnp.prod(spacing, axis=1) # Todo: Define masks as a sparse matrix once sparse matrices are no longer # experimental in jax. @@ -907,7 +890,6 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True): lambda _: masks, operand=None, ) - spacing = jnp.prod(spacing, axis=1) def _surface_integrals(q=jnp.array([1.0])): """Compute a surface integral for each surface in the grid. @@ -923,18 +905,9 @@ def _surface_integrals(q=jnp.array([1.0])): q : ndarray Quantity to integrate. The first dimension of the array should have size ``grid.num_nodes``. - - When ``q`` is 1-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a scalar function over the previously mentioned domain. - - When ``q`` is 2-dimensional, the intention is to integrate, + When ``q`` is n-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - a vector-valued function over the previously mentioned domain. - - When ``q`` is 3-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a matrix-valued function over the previously mentioned domain. + a n-dimensional function over the previously mentioned domain. Returns ------- @@ -942,7 +915,6 @@ def _surface_integrals(q=jnp.array([1.0])): Surface integral of the input over each surface in the grid. """ - axis_to_move = (jnp.ndim(q) == 3) * 2 integrands = (spacing * jnp.nan_to_num(q).T).T # `integrands` may have shape (g.size, f.size, v.size), where # g is the grid function depending on the integration variables @@ -953,30 +925,9 @@ def _surface_integrals(q=jnp.array([1.0])): # function-valued (with image size of f.size) # function over the grid (with domain size of g.size = grid.num_nodes) # over each surface in the grid. - # The distinction between f and v is semantic. - # We may alternatively consider an `integrands` of shape (g.size, f.size) to - # represent a vector-valued (with f.size components) function over the grid. - # Likewise, we may alternatively consider an `integrands` of shape - # (g.size, v.size) to represent a function-valued (with image size v.size) - # function over the grid. When `integrands` has dimension one, it is a - # scalar function over the grid. That is, a - # vector-valued (with 1 component), - # function-valued (with image size of 1) - # function over the grid (with domain size of g.size = grid.num_nodes) - - # The integration is performed by applying `masks`, the surface - # integral operator, to `integrands`. This operator hits the matrix formed - # by the last two dimensions of `integrands`, for every element along the - # previous dimension of `integrands`. Therefore, when `integrands` has three - # dimensions, the second must hold g. We may choose which of the first and - # third dimensions hold f and v. The choice below transposes `integrands` to - # shape (v.size, g.size, f.size). As we expect f.size >> v.size, the - # integration is in theory faster since numpy optimizes large matrix - # products. However, timing results showed no difference. - integrals = jnp.moveaxis( - masks @ jnp.moveaxis(integrands, axis_to_move, 0), 0, axis_to_move - ) + integrals = jnp.tensordot(masks, integrands, axes=([1], [0])) + # uses less memory than jnp.einsum("ug,g...->u...", masks, integrands) return grid.expand(integrals, surface_label) if expand_out else integrals return _surface_integrals @@ -1004,18 +955,9 @@ def surface_averages( q : ndarray Quantity to average. The first dimension of the array should have size ``grid.num_nodes``. - - When ``q`` is 1-dimensional, the intention is to average, - over the domain parameterized by rho, theta, and zeta, - a scalar function over the previously mentioned domain. - - When ``q`` is 2-dimensional, the intention is to average, + When ``q`` is n-dimensional, the intention is to average, over the domain parameterized by rho, theta, and zeta, - a vector-valued function over the previously mentioned domain. - - When ``q`` is 3-dimensional, the intention is to average, - over the domain parameterized by rho, theta, and zeta, - a matrix-valued function over the previously mentioned domain. + a n-dimensional function over the previously mentioned domain. sqrt_g : ndarray Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. surface_label : str @@ -1081,18 +1023,9 @@ def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): q : ndarray Quantity to average. The first dimension of the array should have size ``grid.num_nodes``. - - When ``q`` is 1-dimensional, the intention is to average, - over the domain parameterized by rho, theta, and zeta, - a scalar function over the previously mentioned domain. - - When ``q`` is 2-dimensional, the intention is to average, - over the domain parameterized by rho, theta, and zeta, - a vector-valued function over the previously mentioned domain. - - When ``q`` is 3-dimensional, the intention is to average, + When ``q`` is n-dimensional, the intention is to average, over the domain parameterized by rho, theta, and zeta, - a matrix-valued function over the previously mentioned domain. + a n-dimensional function over the previously mentioned domain. sqrt_g : ndarray Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. denominator : ndarray @@ -1213,10 +1146,6 @@ def surface_integrals_transform(grid, surface_label="rho"): (grid.num_surface_label, f.size, v.size). """ - # Although this method seems to duplicate surface_integrals(), the - # intentions of these methods may be to implement different algorithms. - # We can rely on surface_integrals() for the computation because its current - # implementation is flexible enough to implement both algorithms. # Expansion should not occur here. The typical use case of this method is to # transform into the computational domain, so the second dimension that # discretizes f over the codomain will typically have size grid.num_nodes From 69e06b6fa5923fd92af4a13d88f999c328bb35df Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 15 Mar 2024 01:00:52 -0500 Subject: [PATCH 031/241] Add an outline for testing quadrature in bounce integral --- desc/compute/bounce_integral.py | 70 +++++++-------- tests/test_bounce_integral.py | 146 +++++++++++++++++++++++++------- 2 files changed, 151 insertions(+), 65 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 131c631a5e..7d8306b2c2 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -15,7 +15,7 @@ v_mask_take = vmap(lambda a, mask: mask_take(a, mask, size=a.size, fill_value=jnp.nan)) -def _inner_product(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): +def _inner_product_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): """Compute bounce integrals for every pitch along a particular field line. Parameters @@ -29,7 +29,7 @@ def _inner_product(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): X : ndarray, shape(pitch.size, X.shape[1], w.size) Quadrature points. f : ndarray, shape(knots.size, ) - Spline of function to compute bounce integral of. + Function to compute bounce integral of, evaluated at knots. B_sup_z : ndarray, shape(knots.size, ) Contravariant field-line following toroidal component of magnetic field. B : ndarray, shape(knots.size, ) @@ -43,17 +43,19 @@ def _inner_product(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): Bounce integrals for every pitch along a particular field line. """ + assert pitch.ndim == 1 assert X.shape == (pitch.size, X.shape[1], w.size) # FIXME: https://github.com/f0uriest/interpax/issues/26 f = Akima1DInterpolator(knots, f, check=False) B_sup_z = Akima1DInterpolator(knots, B_sup_z, check=False) B = CubicHermiteSpline(knots, B, B_z_ra, check=False) pitch = pitch[:, jnp.newaxis, jnp.newaxis] - return jnp.dot(f(X) / (B_sup_z(X) * jnp.sqrt(1 - pitch * B(X))), w) + inner_product = jnp.dot(f(X) / (B_sup_z(X) * jnp.sqrt(1 - pitch * B(X))), w) + return inner_product """Compute bounce integrals for every pitch along every field line.""" -_compute_quad = vmap(_inner_product, in_axes=(None, None, None, 1, 0, 0, 0, 0)) +_compute_quad = vmap(_inner_product_quad, in_axes=(None, None, None, 1, 0, 0, 0, 0)) def tanh_sinh_quadrature(resolution): @@ -79,7 +81,7 @@ def tanh_sinh_quadrature(resolution): """ # https://github.com/f0uriest/quadax/blob/main/quadax/utils.py#L166 - # x = 1 - eps with some buffer + # x_max = 1 - eps with some buffer x_max = jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x)) sinhinv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) @@ -157,15 +159,15 @@ def polyval(x, c): Returns ------- val : ndarray - ``val[j, k, ...]`` is the polynomial with coefficients ``c[:, j, k, ...]`` - evaluated at the point ``x[j, k, ...]``. + ``val[j, k, ....]`` is the polynomial with coefficients ``c[:, j, k, ....]`` + evaluated at the point ``x[j, k, ....]``. Notes ----- This function does not perform the same operation as ``np.polynomial.polynomial.polyval(x, c)``. An example usage of this function is shown in - tests/test_compute_utils.py::TestComputeUtils::test_polyval. + tests/test_bounce_integral.py::test_polyval. """ X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) @@ -239,7 +241,7 @@ def clip_to_nan(root): # TODO: fix up some logic with the periodic boundary bounce integral thing -def _compute_bp(pitch, knots, poly_B, poly_B_z): +def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): """Compute the bounce points given |B| and pitch λ. Parameters @@ -307,11 +309,11 @@ def _compute_bp(pitch, knots, poly_B, poly_B_z): return bp1, bp2 -def _compute_bp_include_knots(pitch, knots, poly_B, poly_B_z): +def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): """Compute the bounce points given |B| and pitch λ. - Like ``_compute_bp`` but returns ingredients needed by the - algorithm in the direct method in ``bounce_integral``. + Like ``_compute_bounce_points`` but returns ingredients needed by the + algorithm in the ``direct`` method in ``bounce_integral``. Parameters ---------- @@ -391,7 +393,7 @@ def _compute_bp_include_knots(pitch, knots, poly_B, poly_B_z): def _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, compute_bp, *original, err=False + pitch, knots, poly_B, poly_B_z, compute_bounce_points, *original, err=False ): """Return the ingredients needed by the ``bounce_integral`` function. @@ -406,7 +408,7 @@ def _compute_bp_if_given_pitch( Polynomial coefficients of the cubic spline of |B|. poly_B_z : ndarray Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - compute_bp : callable + compute_bounce_points : callable Method to compute bounce points. original : tuple Whatever this method returned earlier. @@ -420,7 +422,7 @@ def _compute_bp_if_given_pitch( return original else: pitch = jnp.atleast_1d(pitch) - return pitch, *compute_bp(pitch, knots, poly_B, poly_B_z) + return pitch, *compute_bounce_points(pitch, knots, poly_B, poly_B_z) def bounce_integral( @@ -487,7 +489,7 @@ def bounce_integral( .. code-block:: python bi = bounce_integral(eq) - F = bi(name, pitch) + result = bi(name, pitch) """ if rho is None: @@ -496,17 +498,17 @@ def bounce_integral( alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) - knots = jnp.linspace(0, zeta_max, resolution) + zeta = jnp.linspace(0, zeta_max, resolution) L = rho.size M = alpha.size N = resolution - 1 # number of piecewise cubic polynomials per field line - grid, data = field_line_to_desc_coords(eq, rho, alpha, knots) + grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) B_sup_z = data["B^zeta"].reshape(M * L, resolution) B = data["|B|"].reshape(M * L, resolution) B_z_ra = data["|B|_z|r,a"].reshape(M * L, resolution) - poly_B = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=False).c + poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=False).c poly_B = jnp.moveaxis(poly_B, 1, -1) poly_B_z = polyder(poly_B) assert poly_B.shape == (4, M * L, N) @@ -525,23 +527,23 @@ def quad(name, pitch=None): Returns ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. """ pitch, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, _compute_bp, *original, err=True + pitch, zeta, poly_B, poly_B_z, _compute_bounce_points, *original, err=True ) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] f = eq.compute(name, grid=grid, data=data)[name].reshape(M * L, resolution) - F = jnp.reshape( - _compute_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra) + result = jnp.reshape( + _compute_quad(zeta, pitch, w, X, f, B_sup_z, B, B_z_ra) * jnp.pi / (bp2 - bp1), newshape=(pitch.size, M, L, N * NUM_ROOTS), ) - return F + return result def direct(name, pitch=None): """Compute the bounce integral of the named quantity. @@ -556,7 +558,7 @@ def direct(name, pitch=None): Returns ------- - F : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. @@ -568,10 +570,10 @@ def direct(name, pitch=None): is_bp, ) = _compute_bp_if_given_pitch( pitch, - knots, + zeta, poly_B, poly_B_z, - _compute_bp_include_knots, + _compute_bounce_points_with_knots, *original, err=True, ) @@ -580,7 +582,7 @@ def direct(name, pitch=None): eq.compute(name, grid=grid, data=data)[name] / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) ).reshape(pitch.size * M * L, resolution) - integrand = Akima1DInterpolator(knots, integrand, axis=-1, check=False).c + integrand = Akima1DInterpolator(zeta, integrand, axis=-1, check=False).c integrand = jnp.moveaxis(integrand, 1, -1) assert integrand.shape == (4, pitch.size * M * L, N) # For this algorithm, computing integrals via differences of primitives @@ -602,22 +604,22 @@ def direct(name, pitch=None): ), axis=-1, ) - F = jnp.reshape( + result = jnp.reshape( # Compute difference of ``sums`` between bounce points. v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[..., : N * NUM_ROOTS], # Guaranteed to have at most N * NUM_ROOTS entries where is_bp is true. newshape=(pitch.size, M, L, N * NUM_ROOTS), ) - return F + return result if method == "direct": original = _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, _compute_bp_include_knots, err=False + pitch, zeta, poly_B, poly_B_z, _compute_bounce_points_with_knots, err=False ) return direct else: original = _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, _compute_bp, err=False + pitch, zeta, poly_B, poly_B_z, _compute_bounce_points, err=False ) x, w = tanh_sinh_quadrature(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 @@ -690,7 +692,7 @@ def bounce_average( .. code-block:: python ba = bounce_average(eq) - G = ba(name, pitch) + result = ba(name, pitch) """ bi = bounce_integral(eq, rho, alpha, zeta_max, pitch, resolution, method) @@ -708,7 +710,7 @@ def _bounce_average(name, pitch=None): Returns ------- - G : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) The last axis iterates through every bounce average performed along that field line padded by nan. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 807c530a60..a9bdd79120 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -6,12 +6,25 @@ from desc.backend import fori_loop, put, root_scalar from desc.compute.bounce_integral import ( + bounce_integral, cubic_poly_roots, field_line_to_desc_coords, polyder, polyint, polyval, ) +from desc.continuation import solve_continuation_automatic +from desc.equilibrium import Equilibrium +from desc.geometry import FourierRZToroidalSurface +from desc.grid import LinearGrid +from desc.objectives import ( + ObjectiveFromUser, + ObjectiveFunction, + get_equilibrium_objective, + get_fixed_boundary_constraints, +) +from desc.optimize import Optimizer +from desc.profiles import PowerSeriesProfile @pytest.mark.unit @@ -82,68 +95,141 @@ def test_polyval(): for k in range(poly.shape[2]): np.testing.assert_allclose(val[j, k], np.poly1d(poly[:, j, k])(x[j, k])) + # integrate piecewise polynomial and set constants to preserve continuity y = np.arange(1, 6) y = np.arange(y.prod()).reshape(*y) x = np.arange(y.shape[-1]) a1d = Akima1DInterpolator(x, y, axis=-1) primitive = polyint(a1d.c) + # choose evaluation points at d just to match choice made in a1d.antiderivative() d = np.diff(x) - k = polyval(d.reshape(d.size, *np.ones(primitive.ndim - 2, dtype=int)), primitive) - primitive = primitive.at[-1, 1:].add(np.cumsum(k, axis=-1)[:-1]) + d = d.reshape(d.size, *np.ones(primitive.ndim - 2, dtype=int)) + k = polyval(d, primitive) + # don't want to use jax.ndarray.at[].add() in case jax is not installed + primitive = np.array(primitive) + primitive[-1, 1:] += np.cumsum(k, axis=-1)[:-1] np.testing.assert_allclose(primitive, a1d.antiderivative().c) -# TODO: finish up details if deemed useful -def bounce_point( - self, eq, lambdas, rho, alpha, max_bounce_points=20, max_field_line=10 * np.pi +@pytest.mark.unit +def test_elliptic_integral_limit(): + """Test bounce integral matches elliptic integrals. + + In the limit of a low beta, large aspect ratio tokamak the bounce integral + should converge to the elliptic integrals of the first kind. + todo: would be nice to understand physics for why these are supposed + to be proportional to bounce integral. Is this discussed in any book? + Also, looking at + https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellipk.html + Are we saying that in this limit, we expect that |B| ~ sin(t)^2, with m as the + pitch angle? I assume that we want to add g_zz to the integrand in the + definition of the function in the scipy documentation above, + and after a change of variables the bounce points will be the integration. + So this test will test whether the quadrature is accurate + (and not whether the bounce points were accurate). + + """ + L, M, N, NFP, sym = 6, 6, 6, 1, True + surface = FourierRZToroidalSurface( + R_lmn=[1.0, 0.1], + Z_lmn=[0.0, -0.1], + modes_R=np.array([[0, 0], [1, 0]]), + modes_Z=np.array([[0, 0], [-1, 0]]), + sym=sym, + NFP=NFP, + ) + eq = Equilibrium( + L=L, + M=M, + N=N, + NFP=NFP, + surface=surface, + pressure=PowerSeriesProfile([1e2, 0, -1e2]), + iota=PowerSeriesProfile([1, 0, 2]), + Psi=1.0, + ) + eq = solve_continuation_automatic(eq)[-1] + + def beta(grid, data): + return data["_vol"] + + low_beta = 0.01 + # todo: error that objective function has no linear attribute? + objective = ObjectiveFunction( + (ObjectiveFromUser(fun=beta, eq=eq, target=low_beta),) + ) + constraints = (*get_fixed_boundary_constraints(eq), get_equilibrium_objective(eq)) + opt = Optimizer("proximal-lsq-exact") + eq, result = eq.optimize( + objective=objective, constraints=constraints, optimizer=opt + ) + print(result) + + rho = 0.5 + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 20) + zeta = np.linspace(0, 10 * np.pi, 20) + grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + B = eq.compute(names="|B|", grid=grid)["|B|"] + pitch = np.linspace(1 / B.max(), 1 / B.min(), 10) + bi = bounce_integral( + eq, pitch=pitch, rho=rho, alpha=alpha, zeta_max=zeta[-1], resolution=zeta.size + ) + name = "g_zz" + result = bi(name) + print(result.shape) + print(result) + assert np.isfinite(result).any() + # todo: look into GitHub pull request #934 and see if zero B-field issue resolved + grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) + g_zz = eq.compute(name, grid=grid, data=data)[name].reshape(alpha.size, zeta.size) + print(g_zz) + # todo: get bounce points from _compute_bp and compute elliptic integral + + +# TODO: if deemed useful finish details using methods in desc.compute.bounce_integral +def _compute_bounce_points_with_root_finding( + eq, pitch, rho, alpha, resolution=20, zeta_max=10 * np.pi ): """Find bounce points.""" - # TODO: - # 1. make another version of desc.backend.root_scalar - # to avoid separate root finding routines in residual and jac - # and use previous desc coords as initial guess for next iteration - # 2. write docstrings and use transforms in api instead of eq + # TODO: avoid separate root finding routines in residual and jac + # and use previous desc coords as initial guess for next iteration def residual(zeta, i): grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|"], grid=grid, data=data) - return data["|B|"] - lambdas[i] + return data["|B|"] - pitch[i] def jac(zeta): grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|_z|r,a"], grid=grid, data=data) return data["|B|_z|r,a"] - # Compute |B| - lambda on a dense grid. + # Compute |B| - 1/pitch on a dense grid. # For every field line, find the roots of this linear spline. # These estimates for the true roots will serve as an initial guess, and # let us form a boundary mesh around root estimates to limit search domain # of the root finding algorithms. - zeta = np.linspace(0, max_field_line, 3 * max_bounce_points) + zeta = np.linspace(0, zeta_max, 3 * resolution) grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|"], grid=grid, data=data) B_norm = data["|B|"].reshape(alpha.size, rho.size, -1) # constant field line chunks - boundary_lt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - boundary_rt = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) - guess = np.zeros((lambdas.size, max_bounce_points, alpha.size, rho.size)) + boundary_lt = np.zeros((pitch.size, resolution, alpha.size, rho.size)) + boundary_rt = np.zeros((pitch.size, resolution, alpha.size, rho.size)) + guess = np.zeros((pitch.size, resolution, alpha.size, rho.size)) # todo: scan over this - for i in range(lambdas.size): + for i in range(pitch.size): for j in range(alpha.size): for k in range(rho.size): # indices of zeta values observed prior to sign change - idx = np.nonzero(np.diff(np.sign(B_norm[j, k] - lambdas[i])))[0] + idx = np.nonzero(np.diff(np.sign(B_norm[j, k] - pitch[i])))[0] guess[i, :, j, k] = grid.nodes[idx, 2] boundary_lt[i, :, j, k] = np.append(zeta[0], guess[:-1]) boundary_rt[i, :, j, k] = np.append(guess[1:], zeta[-1]) - guess = guess.reshape(lambdas.size, max_bounce_points, alpha.size * rho.size) - boundary_lt = boundary_lt.reshape( - lambdas.size, max_bounce_points, alpha.size * rho.size - ) - boundary_rt = boundary_rt.reshape( - lambdas.size, max_bounce_points, alpha.size * rho.size - ) + guess = guess.reshape(pitch.size, resolution, alpha.size * rho.size) + boundary_lt = boundary_lt.reshape(pitch.size, resolution, alpha.size * rho.size) + boundary_rt = boundary_rt.reshape(pitch.size, resolution, alpha.size * rho.size) - def body_lambdas(i, out): + def body_pitch(i, out): def body_roots(j, out_i): def fixup(z): return np.clip(z, boundary_lt[i, j], boundary_rt[i, j]) @@ -154,11 +240,9 @@ def fixup(z): out_i = put(out_i, j, root) return out_i - out = put(out, i, fori_loop(0, max_bounce_points, body_roots, out[i])) + out = put(out, i, fori_loop(0, resolution, body_roots, out[i])) return out - bounce_points = np.zeros( - shape=(lambdas.size, alpha.size, rho.size, max_bounce_points) - ) - bounce_points = fori_loop(0, lambdas.size, body_lambdas, bounce_points) + bounce_points = np.zeros(shape=(pitch.size, alpha.size, rho.size, resolution)) + bounce_points = fori_loop(0, pitch.size, body_pitch, bounce_points) return bounce_points From 83b382aa4626b334f58ea1304347d27971801943 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 15 Mar 2024 01:50:58 -0500 Subject: [PATCH 032/241] Remove outdated comments --- desc/compute/bounce_integral.py | 9 ++---- desc/compute/utils.py | 55 +++++++++++---------------------- 2 files changed, 21 insertions(+), 43 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 7d8306b2c2..ecd467c2fa 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -204,7 +204,7 @@ def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False) """ # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula - # The common libraries use root-finding which isn't compatible with JAX. + # The common libraries use root-finding which isn't JIT compilable. clip = not (a_min is None and a_max is None) if a_min is None: a_min = -jnp.inf @@ -274,7 +274,6 @@ def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): intersect = cubic_poly_roots( coef=poly_B, k=1 / pitch, a_min=knots[:-1], a_max=knots[1:], sort=True ).reshape(pitch.size, ML, N, NUM_ROOTS) - # Reshape to unsqueeze pitch axis. # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense the first and second axes to vmap over them. @@ -345,7 +344,6 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): roots = cubic_poly_roots( coef=poly_B, k=1 / pitch, a_min=a_min, a_max=a_max, sort=True ).reshape(pitch.size, ML, N, NUM_ROOTS) - # Reshape to unsqueeze pitch axis. # Include the knots of the splines along with the intersection points. # This preprocessing makes the ``direct`` algorithm in ``bounce_integral`` simpler. @@ -596,9 +594,8 @@ def direct(name, pitch=None): # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) - # We didn't enforce continuity of the piecewise primitives, so - # multiply by mask that is false at shared knots of piecewise spline - # to avoid adding difference between primitives of splines at knots. + # Didn't enforce continuity in the piecewise primitives when + # integrating, so mask the discontinuity to avoid summing it. * jnp.append( jnp.arange(1, N * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, True ), diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 23ce397438..97afc08ae4 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -739,7 +739,7 @@ def line_integrals( The first dimension of the array should have size ``grid.num_nodes``. When ``q`` is n-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - a n-dimensional function over the previously mentioned domain. + an n-dimensional function over the previously mentioned domain. line_label : str The coordinate curve to compute the integration over. To clarify, a theta (poloidal) curve is the intersection of a @@ -806,7 +806,7 @@ def surface_integrals(grid, q=jnp.array([1.0]), surface_label="rho", expand_out= The first dimension of the array should have size ``grid.num_nodes``. When ``q`` is n-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - a n-dimensional function over the previously mentioned domain. + an n-dimensional function over the previously mentioned domain. surface_label : str The surface label of rho, theta, or zeta to compute the integration over. expand_out : bool @@ -907,7 +907,7 @@ def _surface_integrals(q=jnp.array([1.0])): The first dimension of the array should have size ``grid.num_nodes``. When ``q`` is n-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - a n-dimensional function over the previously mentioned domain. + an n-dimensional function over the previously mentioned domain. Returns ------- @@ -916,16 +916,12 @@ def _surface_integrals(q=jnp.array([1.0])): """ integrands = (spacing * jnp.nan_to_num(q).T).T - # `integrands` may have shape (g.size, f.size, v.size), where + # `integrands` may have shape (g.size, *f.shape), where # g is the grid function depending on the integration variables # f is a function which may be independent of the integration variables - # v is the vector of components of f (or g). - # The intention is to integrate `integrands` which is a - # vector-valued (with v.size components) - # function-valued (with image size of f.size) + # The intention is to integrate `integrands` which is a function-valued # function over the grid (with domain size of g.size = grid.num_nodes) # over each surface in the grid. - # The distinction between f and v is semantic. integrals = jnp.tensordot(masks, integrands, axes=([1], [0])) # uses less memory than jnp.einsum("ug,g...->u...", masks, integrands) return grid.expand(integrals, surface_label) if expand_out else integrals @@ -957,7 +953,7 @@ def surface_averages( The first dimension of the array should have size ``grid.num_nodes``. When ``q`` is n-dimensional, the intention is to average, over the domain parameterized by rho, theta, and zeta, - a n-dimensional function over the previously mentioned domain. + an n-dimensional function over the previously mentioned domain. sqrt_g : ndarray Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. surface_label : str @@ -1008,7 +1004,7 @@ def surface_averages_map(grid, surface_label="rho", expand_out=True): ``function(q, sqrt_g)``. """ - compute_surface_integrals = surface_integrals_map(grid, surface_label, False) + integrate = surface_integrals_map(grid, surface_label, False) def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): """Compute a surface average for each surface in the grid. @@ -1025,7 +1021,7 @@ def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): The first dimension of the array should have size ``grid.num_nodes``. When ``q`` is n-dimensional, the intention is to average, over the domain parameterized by rho, theta, and zeta, - a n-dimensional function over the previously mentioned domain. + an n-dimensional function over the previously mentioned domain. sqrt_g : ndarray Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. denominator : ndarray @@ -1044,14 +1040,14 @@ def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): """ q = jnp.atleast_1d(q) sqrt_g = jnp.atleast_1d(sqrt_g) - numerator = compute_surface_integrals((sqrt_g * q.T).T) + numerator = integrate((sqrt_g * q.T).T) # memory optimization to call expand() at most once if denominator is None: # skip integration if constant denominator = ( (4 * jnp.pi**2 if surface_label == "rho" else 2 * jnp.pi) * sqrt_g if sqrt_g.size == 1 - else compute_surface_integrals(sqrt_g) + else integrate(sqrt_g) ) averages = (numerator.T / denominator).T if expand_out: @@ -1108,28 +1104,16 @@ def surface_integrals_transform(grid, surface_label="rho"): The second dimension may discretize some function, f, over the codomain, and therefore, have size that matches the desired number of points at which the output is evaluated. - If the integrand is vector-valued then the third dimension may - hold the components of size v.size. - This method can also be used to compute the output one point at a time. - In this case, ``q`` will be at most two-dimensional, and the second - dimension may hold the vector components. - - There is technically no difference between the labels f and v, so their - roles may be swapped if this is more convenient. + This method can also be used to compute the output one point at a time, + in which case ``q`` can have shape (``grid.num_nodes``, ). Input ----- - If ``q`` is one-dimensional, then it should have shape + If ``q`` has one-dimension, then it should have shape (``grid.num_nodes``, ). - If ``q`` is two-dimensional, then either - 1) g and f are scalar functions, so the input should have shape - (``grid.num_nodes``, f.size). - 2) g (or f) is a vector-valued function, and f has been evaluated at - only one point, so the input should have shape - (``grid.num_nodes``, v.size). - If ``q`` is three-dimensional, then it should have shape - (``grid.num_nodes``, f.size, v.size). + If ``q`` has multiple dimensions, then it should have shape + (``grid.num_nodes``, *f.shape). Output ------ @@ -1137,13 +1121,10 @@ def surface_integrals_transform(grid, surface_label="rho"): Tᵤ₁ for a particular surface of constant u₁ in the given grid. The order is sorted in increasing order of the values which specify u₁. - If ``q`` is one-dimensional, the returned array has shape + If ``q`` has one dimension, the returned array has shape (grid.num_surface_label, ). - If ``q`` is two-dimensional, the returned array has shape - (grid.num_surface_label, (f or v).size), depending on whether f or v is - the relevant label. - If ``q`` is three-dimensional, the returned array has shape - (grid.num_surface_label, f.size, v.size). + If ``q`` has multiple dimensions, the returned array has shape + (grid.num_surface_label, *f.shape). """ # Expansion should not occur here. The typical use case of this method is to From fae39326564fd1ec20933e8e595b220463b7ddac Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 15 Mar 2024 03:01:09 -0500 Subject: [PATCH 033/241] Switch to interp1d. Tanh_sinh bounce integral looks like it works --- desc/compute/bounce_integral.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ecd467c2fa..303578e448 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1,6 +1,6 @@ """Methods for computing bounce integrals.""" -from interpax import Akima1DInterpolator, CubicHermiteSpline +from interpax import Akima1DInterpolator, CubicHermiteSpline, interp1d from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, vmap from desc.compute.utils import mask_diff, mask_take, safediv @@ -15,19 +15,19 @@ v_mask_take = vmap(lambda a, mask: mask_take(a, mask, size=a.size, fill_value=jnp.nan)) -def _inner_product_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): +def _inner_product_quad(pitch, w, X, knots, f, B_sup_z, B, B_z_ra): """Compute bounce integrals for every pitch along a particular field line. Parameters ---------- - knots : ndarray, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. pitch : ndarray, shape(pitch.size, ) λ values. w : ndarray, shape(w.size, ) Quadrature weights. X : ndarray, shape(pitch.size, X.shape[1], w.size) Quadrature points. + knots : ndarray, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. f : ndarray, shape(knots.size, ) Function to compute bounce integral of, evaluated at knots. B_sup_z : ndarray, shape(knots.size, ) @@ -45,17 +45,23 @@ def _inner_product_quad(knots, pitch, w, X, f, B_sup_z, B, B_z_ra): """ assert pitch.ndim == 1 assert X.shape == (pitch.size, X.shape[1], w.size) - # FIXME: https://github.com/f0uriest/interpax/issues/26 - f = Akima1DInterpolator(knots, f, check=False) - B_sup_z = Akima1DInterpolator(knots, B_sup_z, check=False) - B = CubicHermiteSpline(knots, B, B_z_ra, check=False) + assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape + shape = X.shape + X = X.ravel() + f = interp1d(X, knots, f, method="akima").reshape(shape) + B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) + # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. + B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) pitch = pitch[:, jnp.newaxis, jnp.newaxis] - inner_product = jnp.dot(f(X) / (B_sup_z(X) * jnp.sqrt(1 - pitch * B(X))), w) + inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) + # p, N * NUM_ROOTS return inner_product """Compute bounce integrals for every pitch along every field line.""" -_compute_quad = vmap(_inner_product_quad, in_axes=(None, None, None, 1, 0, 0, 0, 0)) +_compute_quad = vmap( + _inner_product_quad, in_axes=(None, None, 1, None, 0, 0, 0, 0), out_axes=1 +) def tanh_sinh_quadrature(resolution): @@ -536,7 +542,7 @@ def quad(name, pitch=None): X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] f = eq.compute(name, grid=grid, data=data)[name].reshape(M * L, resolution) result = jnp.reshape( - _compute_quad(zeta, pitch, w, X, f, B_sup_z, B, B_z_ra) + _compute_quad(pitch, w, X, zeta, f, B_sup_z, B, B_z_ra) * jnp.pi / (bp2 - bp1), newshape=(pitch.size, M, L, N * NUM_ROOTS), @@ -712,6 +718,7 @@ def _bounce_average(name, pitch=None): along that field line padded by nan. """ + # Should be fine to fit akima spline to constant function "1". return safediv(bi(name, pitch), bi("1", pitch)) return _bounce_average From 408be03de314c2fb838d957dda5a76516eecb7ec Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 15 Mar 2024 21:08:38 -0500 Subject: [PATCH 034/241] Fix bugs, add tests, add example code, improve api for useablity Fix some subtle bugs with polynomial evalutation. Add tests and example code for methods. bounce integral api now returns grid and data to more easily compute pitch Add some info on design choices --- desc/compute/bounce_integral.py | 235 +++++++++++++++++++------------- desc/compute/utils.py | 13 +- tests/test_bounce_integral.py | 106 +++++++++----- 3 files changed, 214 insertions(+), 140 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 303578e448..8ef4f24329 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1,5 +1,4 @@ """Methods for computing bounce integrals.""" - from interpax import Akima1DInterpolator, CubicHermiteSpline, interp1d from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, vmap @@ -45,21 +44,27 @@ def _inner_product_quad(pitch, w, X, knots, f, B_sup_z, B, B_z_ra): """ assert pitch.ndim == 1 assert X.shape == (pitch.size, X.shape[1], w.size) + pitch = pitch[:, jnp.newaxis, jnp.newaxis] + # TODO: Simple to generalize pitch so that it can be specified per field line. + # Just add one new axis here and multiple to d in cubic_poly_roots. + # Useful since typically pitch is linearly spaced from min to max of + # 1/|B| along field field line, so specifying per field line would + # reduce the sparsity of the X matrix. Not important right now; marking + # for the future developer. assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape shape = X.shape X = X.ravel() + # Use akima to suppress oscillation. f = interp1d(X, knots, f, method="akima").reshape(shape) B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) - pitch = pitch[:, jnp.newaxis, jnp.newaxis] inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) - # p, N * NUM_ROOTS return inner_product """Compute bounce integrals for every pitch along every field line.""" -_compute_quad = vmap( +inner_product_quad = vmap( _inner_product_quad, in_axes=(None, None, 1, None, 0, 0, 0, 0), out_axes=1 ) @@ -152,11 +157,13 @@ def polyder(c): def polyval(x, c): """Evaluate the set of polynomials c at the points x. + Note that this function does not perform the same operation as + ``np.polynomial.polynomial.polyval(x, c)``. + Parameters ---------- x : ndarray Coordinates at which to evaluate the set of polynomials. - The first ``c.ndim`` axes should have shape ``c.shape[1:]``. c : ndarray First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, @@ -164,22 +171,27 @@ def polyval(x, c): Returns ------- - val : ndarray - ``val[j, k, ....]`` is the polynomial with coefficients ``c[:, j, k, ....]`` - evaluated at the point ``x[j, k, ....]``. + val : ndarray, shape(*x.shape) + Polynomial with given coefficients evaluated at given points. - Notes - ----- - This function does not perform the same operation as - ``np.polynomial.polynomial.polyval(x, c)``. - An example usage of this function is shown in - tests/test_bounce_integral.py::test_polyval. + Examples + -------- + .. code-block:: python + + val = polyval(x, c) + assert val.ndim == max(x.ndim, c.ndim - 1) + for index in np.ndindex(c.shape[1:]): + np.testing.assert_allclose( + actual=val[..., *index], + desired=np.poly1d(c[:, *index])(x[..., *index]), + ) """ + # Should be fine to do this instead of Horner's method + # because we expect to only integrate up to quartic polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) - alphabet = "abcdefghijklmnopqrstuvwxyz" - s = alphabet[: c.ndim] - val = jnp.einsum(f"{s},{s[1:]}...{s[0]}->{s[1:]}...", c, X) + val = jnp.einsum("...i,i...->...", X, c) + assert val.ndim == max(x.ndim, c.ndim - 1) return val @@ -246,8 +258,9 @@ def clip_to_nan(root): return roots -# TODO: fix up some logic with the periodic boundary bounce integral thing -def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): +# TODO: Consider the boundary to be periodic to compute bounce integrals of +# particles trapped outside this snapshot of the field lines. +def compute_bounce_points(pitch, knots, poly_B, poly_B_z): """Compute the bounce points given |B| and pitch λ. Parameters @@ -258,8 +271,14 @@ def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): Field line-following ζ coordinates of spline knots. poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| on along field line. poly_B_z : ndarray Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| on along field line. Returns ------- @@ -283,11 +302,11 @@ def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense the first and second axes to vmap over them. - B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( + B_z = polyval(intersect, poly_B_z[..., jnp.newaxis]).reshape( pitch.size * ML, N * NUM_ROOTS ) intersect = intersect.reshape(pitch.size * ML, N * NUM_ROOTS) - # Only consider intersect if it is within knots that bound that polynomial. + # Only consider intersect if it is within knots that bound that polynomial.pytes is_intersect = ~jnp.isnan(intersect) # Rearrange so that all intersects along a field line are contiguous. @@ -300,12 +319,10 @@ def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): bp2 = B_z >= 0 # B_z <= 0 at intersect i implies B_z >= 0 at intersect i+1 by continuity. - # extend bp1 and bp2 by single element and then test - # # index of last intersect along a field line + # extend bp1 and bp2 by single element and then test + # index of last intersect along a field line # idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 # noqa: E800 # assert idx.shape == (pitch.size * ML,) # noqa: E800 - # Consider the boundary to be periodic to compute bounce integrals of - # particles trapped outside this snapshot of the field lines. # Roll such that first intersect is moved to index of last intersect. # Get ζ values of bounce points from the masks. @@ -314,10 +331,12 @@ def _compute_bounce_points(pitch, knots, poly_B, poly_B_z): return bp1, bp2 +# TODO: Consider the boundary to be periodic to compute bounce integrals of +# particles trapped outside this snapshot of the field lines. def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): """Compute the bounce points given |B| and pitch λ. - Like ``_compute_bounce_points`` but returns ingredients needed by the + Like ``compute_bounce_points`` but returns ingredients needed by the algorithm in the ``direct`` method in ``bounce_integral``. Parameters @@ -328,8 +347,14 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): Field line-following ζ coordinates of spline knots. poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| on along field line. poly_B_z : ndarray Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| on along field line. Returns ------- @@ -360,7 +385,7 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense the first and second axes to vmap over them. - B_z = polyval(intersect, poly_B_z[:, jnp.newaxis]).reshape( + B_z = polyval(intersect, poly_B_z[..., jnp.newaxis]).reshape( pitch.size * ML, N * (NUM_ROOTS + 2) ) # Only consider intersect if it is within knots that bound that polynomial. @@ -397,7 +422,7 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): def _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, compute_bounce_points, *original, err=False + pitch, knots, poly_B, poly_B_z, compute_bp, *original, err=False ): """Return the ingredients needed by the ``bounce_integral`` function. @@ -410,9 +435,15 @@ def _compute_bp_if_given_pitch( Field line-following ζ coordinates of spline knots. poly_B : ndarray Polynomial coefficients of the cubic spline of |B|. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| on along field line. poly_B_z : ndarray Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - compute_bounce_points : callable + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| on along field line. + compute_bp : callable Method to compute bounce points. original : tuple Whatever this method returned earlier. @@ -426,15 +457,15 @@ def _compute_bp_if_given_pitch( return original else: pitch = jnp.atleast_1d(pitch) - return pitch, *compute_bounce_points(pitch, knots, poly_B, poly_B_z) + return pitch, *compute_bp(pitch, knots, poly_B, poly_B_z) def bounce_integral( eq, + pitch=None, rho=None, alpha=None, zeta_max=10 * jnp.pi, - pitch=None, resolution=20, method="tanh_sinh", ): @@ -458,14 +489,14 @@ def bounce_integral( ---------- eq : Equilibrium Equilibrium on which the bounce integral is defined. - rho : ndarray + pitch : ndarray + λ values to evaluate the bounce integral at. + rho : ndarray or float Unique flux surface label coordinates. - alpha : ndarray + alpha : ndarray or float Unique field line label coordinates over a constant rho surface. zeta_max : float Max value for field line following coordinate. - pitch : ndarray - λ values to evaluate the bounce integral at. resolution : int Number of interpolation points (knots) used for splines in the quadrature. A maximum of three bounce points can be detected in between knots. @@ -485,15 +516,21 @@ def bounce_integral( ------- bi : callable This callable method computes the bounce integral F_ℓ(λ) for every - specified field line ℓ (constant rho and alpha), for every λ value in - ``pitch``. + specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. + grid : Grid + DESC coordinate grid for the given field line coordinates. + data : dict + Dictionary of ndarrays of stuff evaluated on ``grid``. Examples -------- .. code-block:: python - bi = bounce_integral(eq) - result = bi(name, pitch) + bi, grid, data = bounce_integral(eq) + pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + name = "g_zz" + f = eq.compute(name, grid=grid, data=data)[name] + result = bi(f, pitch) """ if rho is None: @@ -518,13 +555,13 @@ def bounce_integral( assert poly_B.shape == (4, M * L, N) assert poly_B_z.shape == (3, M * L, N) - def quad(name, pitch=None): + def tanh_sinh(f, pitch=None): """Compute the bounce integral of the named quantity. Parameters ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce integral of. + f : ndarray + Quantity to compute the bounce integral of. pitch : ndarray λ values to evaluate the bounce integral at. If None, uses the values given to the parent function. @@ -537,25 +574,25 @@ def quad(name, pitch=None): """ pitch, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, _compute_bounce_points, *original, err=True + pitch, zeta, poly_B, poly_B_z, compute_bp, *original, err=True ) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = eq.compute(name, grid=grid, data=data)[name].reshape(M * L, resolution) + f = f.reshape(M * L, resolution) result = jnp.reshape( - _compute_quad(pitch, w, X, zeta, f, B_sup_z, B, B_z_ra) + inner_product_quad(pitch, w, X, zeta, f, B_sup_z, B, B_z_ra) * jnp.pi / (bp2 - bp1), newshape=(pitch.size, M, L, N * NUM_ROOTS), ) return result - def direct(name, pitch=None): + def direct(f, pitch=None): """Compute the bounce integral of the named quantity. Parameters ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce integral of. + f : ndarray + Quantity to compute the bounce integral of. pitch : ndarray λ values to evaluate the bounce integral at. If None, uses the values given to the parent function. @@ -573,18 +610,11 @@ def direct(name, pitch=None): is_intersect, is_bp, ) = _compute_bp_if_given_pitch( - pitch, - zeta, - poly_B, - poly_B_z, - _compute_bounce_points_with_knots, - *original, - err=True, + pitch, zeta, poly_B, poly_B_z, compute_bp, *original, err=True ) integrand = jnp.nan_to_num( - eq.compute(name, grid=grid, data=data)[name] - / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) + f / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) ).reshape(pitch.size * M * L, resolution) integrand = Akima1DInterpolator(zeta, integrand, axis=-1, check=False).c integrand = jnp.moveaxis(integrand, 1, -1) @@ -593,9 +623,9 @@ def direct(name, pitch=None): # is preferable to any numerical quadrature. For example, even if the # intersection points were evenly spaced, a composite Simpson's quadrature # would require computing the spline on 1.8x more knots for the same accuracy. - primitive = polyval(intersect_nan_to_right_knot, polyint(integrand)).reshape( - pitch.size * M * L, N * (NUM_ROOTS + 2) - ) + primitive = polyval( + intersect_nan_to_right_knot, polyint(integrand)[..., jnp.newaxis] + ).reshape(pitch.size * M * L, N * (NUM_ROOTS + 2)) sums = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. @@ -610,42 +640,43 @@ def direct(name, pitch=None): result = jnp.reshape( # Compute difference of ``sums`` between bounce points. v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[..., : N * NUM_ROOTS], - # Guaranteed to have at most N * NUM_ROOTS entries where is_bp is true. newshape=(pitch.size, M, L, N * NUM_ROOTS), ) return result - if method == "direct": - original = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, _compute_bounce_points_with_knots, err=False - ) - return direct - else: - original = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, _compute_bounce_points, err=False - ) + if method == "tanh_sinh": x, w = tanh_sinh_quadrature(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 - return quad + compute_bp = compute_bounce_points + bi = tanh_sinh + elif method == "direct": + compute_bp = _compute_bounce_points_with_knots + bi = direct + else: + raise ValueError(f"Got unknown method: {method}.") + original = _compute_bp_if_given_pitch( + pitch, zeta, poly_B, poly_B_z, compute_bp, err=False + ) + return bi, grid, data def bounce_average( eq, + pitch=None, rho=None, alpha=None, zeta_max=10 * jnp.pi, - pitch=None, resolution=20, - method="quad", + method="tanh_sinh", ): """Returns a method to compute the bounce average of any quantity. The bounce average is defined as - G_ℓ(λ) = (∫ g(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ), where + F_ℓ(λ) = (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ), where dℓ parameterizes the distance along the field line, λ is a constant proportional to the magnetic moment over energy, |B| is the norm of the magnetic field, - g(ℓ) is the quantity to integrate along the field line, + f(ℓ) is the quantity to integrate along the field line, and the endpoints of the integration are at the bounce points. For a particle with fixed λ, bounce points are defined to be the location on the field line such that the particle's velocity parallel to the @@ -659,15 +690,14 @@ def bounce_average( ---------- eq : Equilibrium Equilibrium on which the bounce average is defined. - rho : ndarray + pitch : ndarray + λ values to evaluate the bounce average at. + rho : ndarray or float Unique flux surface label coordinates. - alpha : ndarray + alpha : ndarray or float Unique field line label coordinates over a constant rho surface. zeta_max : float Max value for field line following coordinate. - pitch : ndarray - λ values to evaluate the bounce average at. - Defaults to linearly spaced values between min and max of |B|. resolution : int Number of interpolation points (knots) used for splines in the quadrature. A maximum of three bounce points can be detected in between knots. @@ -676,37 +706,45 @@ def bounce_average( So for well-behaved magnetic fields increasing resolution should increase the accuracy of the quadrature. method : str - The method to evaluate the integral. - The "spline" method exactly integrates a cubic spline of the integrand. - The "quad" method performs a Gauss quadrature and estimates the integrand - by using distinct cubic splines for components in the integrand so that - the singularity from the division by zero near the bounce points can be - captured more accurately than can be represented by a polynomial. + The quadrature scheme used to evaluate the integral. + The "direct" method exactly integrates a cubic spline of the integrand. + The "tanh_sinh" method performs a Tanh-sinh quadrature, where independent cubic + splines are used for components in the integrand so that the singularity near + the bounce points can be captured more accurately than can be represented by a + polynomial. Returns ------- ba : callable - This callable method computes the bounce average G_ℓ(λ) for every - specified field line ℓ (constant rho and alpha), for every λ value in - ``lambdas``. + This callable method computes the bounce average F_ℓ(λ) for every + specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. + grid : Grid + DESC coordinate grid for the given field line coordinates. + data : dict + Dictionary of ndarrays of stuff evaluated on ``grid``. Examples -------- .. code-block:: python - ba = bounce_average(eq) - result = ba(name, pitch) + ba, grid, data = bounce_integral(eq) + pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + name = "g_zz" + f = eq.compute(name, grid=grid, data=data)[name] + result = ba(f, pitch) """ - bi = bounce_integral(eq, rho, alpha, zeta_max, pitch, resolution, method) + bi, grid, data = bounce_integral( + eq, pitch, rho, alpha, zeta_max, resolution, method + ) - def _bounce_average(name, pitch=None): + def _bounce_average(f, pitch=None): """Compute the bounce average of the named quantity using the spline method. Parameters ---------- - name : ndarray - Name of quantity in ``data_index`` to compute the bounce average of. + f : ndarray + Quantity to compute the bounce average of. pitch : ndarray λ values to evaluate the bounce average at. If None, uses the values given to the parent function. @@ -718,10 +756,11 @@ def _bounce_average(name, pitch=None): along that field line padded by nan. """ - # Should be fine to fit akima spline to constant function "1". - return safediv(bi(name, pitch), bi("1", pitch)) + # Should be fine to fit akima spline to constant function "1" since + # akima suppresses oscillation of the fit. + return safediv(bi(f, pitch), bi(jnp.ones_like(f), pitch)) - return _bounce_average + return _bounce_average, grid, data def field_line_to_desc_coords(eq, rho, alpha, zeta): diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 97afc08ae4..ef8f9da4f6 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -864,7 +864,7 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True): # experimental in jax. # The ith row of masks is True only at the indices which correspond to the # ith surface. The integral over the ith surface is the dot product of the - # ith row vector and the vector of integrands of all surfaces. + # ith row vector and the integrand defined over all the surfaces. masks = inverse_idx == jnp.arange(unique_size)[:, jnp.newaxis] # Imagine a torus cross-section at zeta=π. # A grid with a duplicate zeta=π node has 2 of those cross-sections. @@ -917,13 +917,10 @@ def _surface_integrals(q=jnp.array([1.0])): """ integrands = (spacing * jnp.nan_to_num(q).T).T # `integrands` may have shape (g.size, *f.shape), where - # g is the grid function depending on the integration variables - # f is a function which may be independent of the integration variables - # The intention is to integrate `integrands` which is a function-valued - # function over the grid (with domain size of g.size = grid.num_nodes) - # over each surface in the grid. - integrals = jnp.tensordot(masks, integrands, axes=([1], [0])) - # uses less memory than jnp.einsum("ug,g...->u...", masks, integrands) + # g.size is grid.num_nodes and iterating along this axis varies the object, + # e.g. some function f, held in the remaining axes over the nodes of the grid. + # Uses less memory than jnp.einsum("ug,g...->u...", masks, integrands). + integrals = jnp.tensordot(masks, integrands, axes=1) return grid.expand(integrals, surface_label) if expand_out else integrals return _surface_integrals diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index a9bdd79120..056530aa75 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -1,12 +1,14 @@ -"""Tests bounce integral.""" +"""Test bounce integral methods.""" import numpy as np import pytest -from interpax import Akima1DInterpolator +from interpax import Akima1DInterpolator, CubicHermiteSpline from desc.backend import fori_loop, put, root_scalar from desc.compute.bounce_integral import ( + bounce_average, bounce_integral, + compute_bounce_points, cubic_poly_roots, field_line_to_desc_coords, polyder, @@ -15,8 +17,8 @@ ) from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium +from desc.examples import get from desc.geometry import FourierRZToroidalSurface -from desc.grid import LinearGrid from desc.objectives import ( ObjectiveFromUser, ObjectiveFunction, @@ -80,30 +82,41 @@ def test_polyder(): def test_polyval(): """Test vectorized computation of polynomial evaluation.""" quintic = 6 - poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi - assert np.unique(poly.shape).size == poly.ndim - x = np.linspace(0, 20, poly.shape[1] * poly.shape[2]).reshape( - poly.shape[1], poly.shape[2] - ) - x = np.stack([x, x * 2], axis=-1) - x = np.stack([x, x * 2, x * 3, x * 4], axis=-1) + c = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) + val = polyval(x, c) + for index in np.ndindex(c.shape[1:]): + np.testing.assert_allclose( + actual=val[..., *index], + desired=np.poly1d(c[:, *index])(x[..., *index]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", + ) + + x = np.stack([x, x * 2], axis=0) + x = np.stack([x, x * 2, x * 3, x * 4], axis=0) + # make sure broadcasting won't hide error in implementation assert np.unique(x.shape).size == x.ndim - assert poly.shape[1:] == x.shape[: poly.ndim - 1] - assert np.unique((poly.shape[0],) + x.shape[poly.ndim - 1 :]).size == x.ndim - 1 - val = polyval(x, poly) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - np.testing.assert_allclose(val[j, k], np.poly1d(poly[:, j, k])(x[j, k])) + assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] + assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 + val = polyval(x, c) + for index in np.ndindex(c.shape[1:]): + np.testing.assert_allclose( + actual=val[..., *index], + desired=np.poly1d(c[:, *index])(x[..., *index]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", + ) # integrate piecewise polynomial and set constants to preserve continuity - y = np.arange(1, 6) + y = np.arange(2, 8) y = np.arange(y.prod()).reshape(*y) x = np.arange(y.shape[-1]) a1d = Akima1DInterpolator(x, y, axis=-1) primitive = polyint(a1d.c) # choose evaluation points at d just to match choice made in a1d.antiderivative() d = np.diff(x) - d = d.reshape(d.size, *np.ones(primitive.ndim - 2, dtype=int)) + # evaluate every spline at d k = polyval(d, primitive) # don't want to use jax.ndarray.at[].add() in case jax is not installed primitive = np.array(primitive) @@ -111,6 +124,22 @@ def test_polyval(): np.testing.assert_allclose(primitive, a1d.antiderivative().c) +@pytest.mark.unit +def test_temporary(): + """Test that things are returned without errors.""" + eq = get("HELIOTRON") + ba, grid, data = bounce_average(eq, method="tanh_sinh") + pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + name = "g_zz" + f = eq.compute(name, grid=grid, data=data)[name] + result = ba(f, pitch) + assert np.isfinite(result).any(), "tanh_sinh quadrature failed." + + ba, _, _ = bounce_average(eq, method="direct") + result = ba(f, pitch) + print(np.isfinite(result).any()) + + @pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. @@ -158,6 +187,7 @@ def beta(grid, data): objective = ObjectiveFunction( (ObjectiveFromUser(fun=beta, eq=eq, target=low_beta),) ) + constraints = (*get_fixed_boundary_constraints(eq), get_equilibrium_objective(eq)) opt = Optimizer("proximal-lsq-exact") eq, result = eq.optimize( @@ -165,25 +195,33 @@ def beta(grid, data): ) print(result) - rho = 0.5 + rho = np.array([0.5]) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 20) - zeta = np.linspace(0, 10 * np.pi, 20) - grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - B = eq.compute(names="|B|", grid=grid)["|B|"] - pitch = np.linspace(1 / B.max(), 1 / B.min(), 10) - bi = bounce_integral( - eq, pitch=pitch, rho=rho, alpha=alpha, zeta_max=zeta[-1], resolution=zeta.size + zeta_max = 10 * np.pi + resolution = 30 + bi, grid, data = bounce_integral( + eq, + rho=rho, + alpha=alpha, + zeta_max=zeta_max, + resolution=resolution, + method="tanh_sinh", ) + pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), resolution) name = "g_zz" - result = bi(name) - print(result.shape) - print(result) - assert np.isfinite(result).any() - # todo: look into GitHub pull request #934 and see if zero B-field issue resolved - grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) - g_zz = eq.compute(name, grid=grid, data=data)[name].reshape(alpha.size, zeta.size) - print(g_zz) - # todo: get bounce points from _compute_bp and compute elliptic integral + f = eq.compute(name, grid=grid, data=data)[name] + result = bi(f, pitch) + assert np.isfinite(result).any(), "tanh_sinh quadrature failed." + + # routine copied from bounce_integrals functions + zeta = np.linspace(0, zeta_max, resolution) + B = data["|B|"].reshape(alpha.size * rho.size, resolution) + B_z_ra = data["|B|_z|r,a"].reshape(alpha.size * rho.size, resolution) + poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1).c + poly_B = np.moveaxis(poly_B, 1, -1) + poly_B_z = polyder(poly_B) + bp1, bp2 = compute_bounce_points(pitch, zeta, poly_B, poly_B_z) + # TODO now compare result to elliptic integral # TODO: if deemed useful finish details using methods in desc.compute.bounce_integral From 1b514219ef83174de618557278163daed8ee4a68 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 15 Mar 2024 22:34:06 -0500 Subject: [PATCH 035/241] Replace starred unpack since it only works on newer numpy versions --- tests/test_bounce_integral.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 056530aa75..567e042f62 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -36,8 +36,10 @@ def test_cubic_poly_roots(): poly = np.arange(-60, 60).reshape(cubic, 6, -1) poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) poly = poly * np.e * np.pi + # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim constant = np.arange(10) + # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim roots = cubic_poly_roots(poly, constant, sort=True) for j in range(poly.shape[1]): @@ -46,8 +48,8 @@ def test_cubic_poly_roots(): a, b, c, d = poly[:, j, k] d = d - constant[s] np.testing.assert_allclose( - roots[s, j, k], - np.sort_complex(np.roots([a, b, c, d])), + actual=roots[s, j, k], + desired=np.sort_complex(np.roots([a, b, c, d])), ) @@ -56,13 +58,15 @@ def test_polyint(): """Test vectorized computation of polynomial primitive.""" quintic = 6 poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim constant = np.pi - out = polyint(poly, k=constant) + primitive = polyint(poly, k=constant) for j in range(poly.shape[1]): for k in range(poly.shape[2]): np.testing.assert_allclose( - out[:, j, k], np.polyint(poly[:, j, k], k=constant) + actual=primitive[:, j, k], + desired=np.polyint(poly[:, j, k], k=constant), ) @@ -71,11 +75,14 @@ def test_polyder(): """Test vectorized computation of polynomial derivative.""" quintic = 6 poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim - out = polyder(poly) + derivative = polyder(poly) for j in range(poly.shape[1]): for k in range(poly.shape[2]): - np.testing.assert_allclose(out[:, j, k], np.polyder(poly[:, j, k])) + np.testing.assert_allclose( + actual=derivative[:, j, k], desired=np.polyder(poly[:, j, k]) + ) @pytest.mark.unit @@ -88,9 +95,10 @@ def test_polyval(): x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) val = polyval(x, c) for index in np.ndindex(c.shape[1:]): + idx = (..., *index) np.testing.assert_allclose( - actual=val[..., *index], - desired=np.poly1d(c[:, *index])(x[..., *index]), + actual=val[idx], + desired=np.poly1d(c[:, *index])(x[idx]), err_msg=f"Failed with shapes {x.shape} and {c.shape}.", ) @@ -102,9 +110,10 @@ def test_polyval(): assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 val = polyval(x, c) for index in np.ndindex(c.shape[1:]): + idx = (..., *index) np.testing.assert_allclose( - actual=val[..., *index], - desired=np.poly1d(c[:, *index])(x[..., *index]), + actual=val[idx], + desired=np.poly1d(c[:, *index])(x[idx]), err_msg=f"Failed with shapes {x.shape} and {c.shape}.", ) @@ -140,7 +149,7 @@ def test_temporary(): print(np.isfinite(result).any()) -@pytest.mark.unit +# @pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. From b8a41153ed0c45b17a036d7611eb04f20dc4e1c6 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 15 Mar 2024 22:38:33 -0500 Subject: [PATCH 036/241] Same as 1b514219ef83174de618557278163daed8ee4a68 --- tests/test_bounce_integral.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 567e042f62..cd913ee294 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -98,7 +98,7 @@ def test_polyval(): idx = (..., *index) np.testing.assert_allclose( actual=val[idx], - desired=np.poly1d(c[:, *index])(x[idx]), + desired=np.poly1d(c[idx])(x[idx]), err_msg=f"Failed with shapes {x.shape} and {c.shape}.", ) @@ -113,7 +113,7 @@ def test_polyval(): idx = (..., *index) np.testing.assert_allclose( actual=val[idx], - desired=np.poly1d(c[:, *index])(x[idx]), + desired=np.poly1d(c[idx])(x[idx]), err_msg=f"Failed with shapes {x.shape} and {c.shape}.", ) @@ -149,7 +149,7 @@ def test_temporary(): print(np.isfinite(result).any()) -# @pytest.mark.unit +@pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. From b408b46b53a82c0a541f97d49286b2c3221c88f9 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 16 Mar 2024 15:23:46 -0500 Subject: [PATCH 037/241] Genearlize to specify pitch per field line to avoid sparse result --- desc/compute/bounce_integral.py | 313 ++++++++++++++++++++------------ tests/test_bounce_integral.py | 54 +++--- 2 files changed, 232 insertions(+), 135 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 8ef4f24329..960739c095 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1,4 +1,6 @@ """Methods for computing bounce integrals.""" +from functools import partial + from interpax import Akima1DInterpolator, CubicHermiteSpline, interp1d from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, vmap @@ -14,17 +16,19 @@ v_mask_take = vmap(lambda a, mask: mask_take(a, mask, size=a.size, fill_value=jnp.nan)) -def _inner_product_quad(pitch, w, X, knots, f, B_sup_z, B, B_z_ra): - """Compute bounce integrals for every pitch along a particular field line. +# vmap to compute a bounce integral for every pitch along every field line. +@partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) +def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): + """Compute a bounce integral for every pitch along a particular field line. Parameters ---------- - pitch : ndarray, shape(pitch.size, ) + pitch : ndarray, shape(P, ) λ values. + X : ndarray, shape(P, (knots.size - 1) * NUM_ROOTS, w.size) + Quadrature points. w : ndarray, shape(w.size, ) Quadrature weights. - X : ndarray, shape(pitch.size, X.shape[1], w.size) - Quadrature points. knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. f : ndarray, shape(knots.size, ) @@ -38,23 +42,17 @@ def _inner_product_quad(pitch, w, X, knots, f, B_sup_z, B, B_z_ra): Returns ------- - inner_product : ndarray, shape(pitch.size, X.shape[1]) + inner_product : ndarray, shape(P, (knots.size - 1) * NUM_ROOTS) Bounce integrals for every pitch along a particular field line. """ assert pitch.ndim == 1 - assert X.shape == (pitch.size, X.shape[1], w.size) - pitch = pitch[:, jnp.newaxis, jnp.newaxis] - # TODO: Simple to generalize pitch so that it can be specified per field line. - # Just add one new axis here and multiple to d in cubic_poly_roots. - # Useful since typically pitch is linearly spaced from min to max of - # 1/|B| along field field line, so specifying per field line would - # reduce the sparsity of the X matrix. Not important right now; marking - # for the future developer. + assert X.shape == (pitch.size, (knots.size - 1) * NUM_ROOTS, w.size) assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape + pitch = pitch[:, jnp.newaxis, jnp.newaxis] shape = X.shape X = X.ravel() - # Use akima to suppress oscillation. + # Use akima spline to suppress oscillation. f = interp1d(X, knots, f, method="akima").reshape(shape) B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. @@ -63,12 +61,6 @@ def _inner_product_quad(pitch, w, X, knots, f, B_sup_z, B, B_z_ra): return inner_product -"""Compute bounce integrals for every pitch along every field line.""" -inner_product_quad = vmap( - _inner_product_quad, in_axes=(None, None, 1, None, 0, 0, 0, 0), out_axes=1 -) - - def tanh_sinh_quadrature(resolution): """ tanh_sinh quadrature. @@ -107,7 +99,7 @@ def tanh_sinh_quadrature(resolution): return x, w -def polyint(c, k=jnp.array([0])): +def polyint(c, k=None): """Coefficients for the primitives of the given set of polynomials. Parameters @@ -118,6 +110,7 @@ def polyint(c, k=jnp.array([0])): coefficient cᵢ should be stored at ``c[n - i]``. k : ndarray Integration constants. + Should broadcast with arrays of shape(*coef.shape[1:]). Returns ------- @@ -127,8 +120,10 @@ def polyint(c, k=jnp.array([0])): where n is ``c.shape[0] - 1``. """ + if k is None: + k = jnp.broadcast_to(0.0, c.shape[1:]) poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T - poly = jnp.append(poly, jnp.broadcast_to(k, c.shape[1:])[jnp.newaxis], axis=0) + poly = jnp.append(poly, k[jnp.newaxis], axis=0) return poly @@ -171,7 +166,7 @@ def polyval(x, c): Returns ------- - val : ndarray, shape(*x.shape) + val : ndarray Polynomial with given coefficients evaluated at given points. Examples @@ -179,24 +174,27 @@ def polyval(x, c): .. code-block:: python val = polyval(x, c) - assert val.ndim == max(x.ndim, c.ndim - 1) + if val.ndim != max(x.ndim, c.ndim - 1): + raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") for index in np.ndindex(c.shape[1:]): + idx = (..., *index) np.testing.assert_allclose( - actual=val[..., *index], - desired=np.poly1d(c[:, *index])(x[..., *index]), + actual=val[idx], + desired=np.poly1d(c[idx])(x[idx]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", ) """ # Should be fine to do this instead of Horner's method - # because we expect to only integrate up to quartic polynomials. + # because we expect to usually integrate up to quartic polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) val = jnp.einsum("...i,i...->...", X, c) assert val.ndim == max(x.ndim, c.ndim - 1) return val -def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False): - """Roots of cubic polynomial. +def cubic_poly_roots(coef, k=None, a_min=None, a_max=None, sort=False): + """Roots of cubic polynomial with given coefficients. Parameters ---------- @@ -204,21 +202,22 @@ def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False) First axis should store coefficients of a polynomial. For a polynomial given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. It is assumed that c₁ is nonzero. - k : ndarray, shape(k.size, ) + k : ndarray Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. + Should broadcast with arrays of shape(*coef.shape[1:]). a_min, a_max : ndarray Minimum and maximum value to return roots between. If specified only real roots are returned. If None, returns all complex roots. - Both arrays are broadcast against arrays of shape ``coef.shape[1:]``. + Should broadcast with arrays of shape(*coef.shape[1:]). sort : bool Whether to sort the roots. Returns ------- - roots : ndarray, shape(k.size, coef.shape, 3) - If ``k`` has one element, the first axis will be squeezed out. + roots : ndarray The roots of the cubic polynomial. + The three roots are iterated over the last axis. """ # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula @@ -230,14 +229,16 @@ def cubic_poly_roots(coef, k=jnp.array([0]), a_min=None, a_max=None, sort=False) a_max = jnp.inf a, b, c, d = coef - d = jnp.squeeze(jnp.moveaxis(d[..., jnp.newaxis] - k, -1, 0)) + if k is not None: + d = d - k t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) - C_is_zero = jnp.isclose(C, 0) + is_zero = jnp.isclose(C, 0) def compute_root(xi): - return -(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C))) / (3 * a) + t_2 = jnp.where(is_zero, 0, t_0 / (xi * C)) + return -(b + xi * C + t_2) / (3 * a) def clip_to_nan(root): return jnp.where( @@ -265,47 +266,59 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): Parameters ---------- - pitch : ndarray + pitch : ndarray, shape(P, A * R) λ values. - knots : ndarray + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. + knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray + poly_B : ndarray, shape(4, A * R, knots.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| on along field line. - poly_B_z : ndarray + polynomials of a particular spline of |B| along field line. + poly_B_z : ndarray, shape(3, A * R, knots.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| on along field line. + polynomials of a particular spline of |B| along field line. Returns ------- bp1, bp2 : ndarray, ndarray Field line-following ζ coordinates of bounce points for a given pitch - along a field line. Has shape (pitch.size, M * L, N * NUM_ROOTS). - If there were less than N * NUM_ROOTS bounce points along a field line, - then the last axis is padded with nan. + along a field line. Has shape (P, A * R, (knots.size - 1) * NUM_ROOTS). + If there were less than (knots.size - 1) * NUM_ROOTS bounce points along a + field line, then the last axis is padded with nan. """ - ML = jnp.array(poly_B.shape[1:-1]).prod() - N = poly_B.shape[-1] + P = pitch.shape[0] # batch size + AR = poly_B.shape[1] # alpha.size * rho.size + N = knots.size - 1 # number of piecewise cubic polynomials per field line + assert poly_B.shape[-1] == poly_B_z.shape[-1] == N # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the # case where each cubic polynomial intersects 1 / λ thrice. # nan values in ``intersect`` denote a polynomial has less than three intersects. intersect = cubic_poly_roots( - coef=poly_B, k=1 / pitch, a_min=knots[:-1], a_max=knots[1:], sort=True - ).reshape(pitch.size, ML, N, NUM_ROOTS) + coef=poly_B, + k=jnp.expand_dims(1 / pitch, axis=-1), + a_min=knots[:-1], + a_max=knots[1:], + sort=True, + ) + assert intersect.shape == (P, AR, N, NUM_ROOTS) # Reshape so that last axis enumerates intersects of a pitch along a field line. - # Condense the first and second axes to vmap over them. - B_z = polyval(intersect, poly_B_z[..., jnp.newaxis]).reshape( - pitch.size * ML, N * NUM_ROOTS + # Condense remaining axes to vmap over them. + B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape( + P * AR, N * NUM_ROOTS ) - intersect = intersect.reshape(pitch.size * ML, N * NUM_ROOTS) + intersect = intersect.reshape(P * AR, N * NUM_ROOTS) # Only consider intersect if it is within knots that bound that polynomial.pytes is_intersect = ~jnp.isnan(intersect) @@ -322,12 +335,12 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # extend bp1 and bp2 by single element and then test # index of last intersect along a field line # idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 # noqa: E800 - # assert idx.shape == (pitch.size * ML,) # noqa: E800 + # assert idx.shape == (P * AR,) # noqa: E800 # Roll such that first intersect is moved to index of last intersect. # Get ζ values of bounce points from the masks. - bp1 = v_mask_take(intersect, bp1).reshape(pitch.size, ML, N * NUM_ROOTS) - bp2 = v_mask_take(intersect, bp2).reshape(pitch.size, ML, N * NUM_ROOTS) + bp1 = v_mask_take(intersect, bp1).reshape(P, AR, N * NUM_ROOTS) + bp2 = v_mask_take(intersect, bp2).reshape(P, AR, N * NUM_ROOTS) return bp1, bp2 @@ -341,20 +354,25 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): Parameters ---------- - pitch : ndarray + pitch : ndarray, shape(P, A * R) λ values. - knots : ndarray + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. + knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray + poly_B : ndarray, shape(4, A * R, knots.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| on along field line. - poly_B_z : ndarray + polynomials of a particular spline of |B| along field line. + poly_B_z : ndarray, shape(3, A * R, knots.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| on along field line. + polynomials of a particular spline of |B| along field line. Returns ------- @@ -363,8 +381,10 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): are the endpoints of a bounce integral. """ - ML = jnp.array(poly_B.shape[1:-1]).prod() - N = poly_B.shape[-1] + P = pitch.shape[0] # batch size + AR = poly_B.shape[1] # alpha.size * rho.size + N = knots.size - 1 # number of piecewise cubic polynomials per field line + assert poly_B.shape[-1] == poly_B_z.shape[-1] == N a_min = knots[:-1] a_max = knots[1:] @@ -373,25 +393,30 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): # case where each cubic polynomial intersects 1 / λ thrice. # nan values in ``roots`` denote a polynomial has less than three intersects. roots = cubic_poly_roots( - coef=poly_B, k=1 / pitch, a_min=a_min, a_max=a_max, sort=True - ).reshape(pitch.size, ML, N, NUM_ROOTS) + coef=poly_B, + k=jnp.expand_dims(1 / pitch, axis=-1), + a_min=knots[:-1], + a_max=knots[1:], + sort=True, + ) + assert roots.shape == (P, AR, N, NUM_ROOTS) # Include the knots of the splines along with the intersection points. # This preprocessing makes the ``direct`` algorithm in ``bounce_integral`` simpler. roots = (roots[..., 0], roots[..., 1], roots[..., 2]) - a_min = jnp.broadcast_to(a_min, shape=(pitch.size, ML, N)) - a_max = jnp.broadcast_to(a_max, shape=(pitch.size, ML, N)) + a_min = jnp.broadcast_to(a_min, shape=(P, AR, N)) + a_max = jnp.broadcast_to(a_max, shape=(P, AR, N)) intersect = jnp.stack((a_min, *roots, a_max), axis=-1) # Reshape so that last axis enumerates intersects of a pitch along a field line. - # Condense the first and second axes to vmap over them. - B_z = polyval(intersect, poly_B_z[..., jnp.newaxis]).reshape( - pitch.size * ML, N * (NUM_ROOTS + 2) + # Condense remaining axes to vmap over them. + B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape( + P * AR, N * (NUM_ROOTS + 2) ) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = jnp.reshape( jnp.array([False, True, True, True, False], dtype=bool) & ~jnp.isnan(intersect), - newshape=(pitch.size * ML, N * (NUM_ROOTS + 2)), + newshape=(P * AR, N * (NUM_ROOTS + 2)), ) # Rearrange so that all the intersects along field line are contiguous. @@ -405,7 +430,7 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): # index of last intersect idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 - assert idx.shape == (pitch.size * ML,) + assert idx.shape == (P * AR,) # Consider the boundary to be periodic to compute bounce integrals of # particles trapped outside this snapshot of the field lines. # Roll such that first intersect is moved to index of last intersect. @@ -414,9 +439,13 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): # Returning this makes the ``direct`` algorithm in ``bounce_integral`` simpler. # Replace nan values with right knots of the spline. intersect_nan_to_right_knot = jnp.stack( - (a_min, *tuple(map(lambda r: jnp.where(jnp.isnan(r), a_max, r), roots)), a_max), + ( + a_min, + *tuple(map(lambda r: jnp.where(jnp.isnan(r), knots[1:], r), roots)), + a_max, + ), axis=-1, - ).reshape(pitch.size * ML, N, (NUM_ROOTS + 2)) + ).reshape(P * AR, N, (NUM_ROOTS + 2)) return intersect_nan_to_right_knot, is_intersect, is_bp @@ -428,21 +457,26 @@ def _compute_bp_if_given_pitch( Parameters ---------- - pitch : ndarray + pitch : ndarray, shape(P, A, R) λ values. If None, returns the given ``original`` tuple. - knots : ndarray + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. + knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray + poly_B : ndarray, shape(4, A * R, knots.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| on along field line. - poly_B_z : ndarray + polynomials of a particular spline of |B| along field line. + poly_B_z : ndarray, shape(3, A * R, knots.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| on along field line. + polynomials of a particular spline of |B| along field line. compute_bp : callable Method to compute bounce points. original : tuple @@ -456,7 +490,17 @@ def _compute_bp_if_given_pitch( raise ValueError("No pitch values were given.") return original else: + # ensure pitch has shape (batch.size, alpha.size, rho.size) + # can't use jnp.atleast_3d due to https://github.com/numpy/numpy/issues/25805 pitch = jnp.atleast_1d(pitch) + if pitch.ndim == 2: + pitch = pitch[jnp.newaxis] + if pitch.ndim == 1: + pitch = pitch[jnp.newaxis, jnp.newaxis] + err_msg = "Supplied invalid shape for pitch angles." + assert pitch.ndim == 3, err_msg + pitch = pitch.reshape(pitch.shape[0], -1) + assert pitch.shape[-1] == 1 or pitch.shape[-1] == poly_B.shape[1], err_msg return pitch, *compute_bp(pitch, knots, poly_B, poly_B_z) @@ -490,7 +534,13 @@ def bounce_integral( eq : Equilibrium Equilibrium on which the bounce integral is defined. pitch : ndarray - λ values to evaluate the bounce integral at. + λ values to evaluate the bounce integral at each field line. + May be specified later. + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. rho : ndarray or float Unique flux surface label coordinates. alpha : ndarray or float @@ -528,6 +578,9 @@ def bounce_integral( bi, grid, data = bounce_integral(eq) pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + # same pitch for every field line, may give sparse result + # See tests/test_bounce_integral.py::test_pitch_input for an alternative. + pitch = pitch[:, jnp.newaxis, jnp.newaxis] name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] result = bi(f, pitch) @@ -540,20 +593,19 @@ def bounce_integral( rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) zeta = jnp.linspace(0, zeta_max, resolution) - L = rho.size - M = alpha.size - N = resolution - 1 # number of piecewise cubic polynomials per field line + R = rho.size + A = alpha.size grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) - B_sup_z = data["B^zeta"].reshape(M * L, resolution) - B = data["|B|"].reshape(M * L, resolution) - B_z_ra = data["|B|_z|r,a"].reshape(M * L, resolution) + B_sup_z = data["B^zeta"].reshape(A * R, resolution) + B = data["|B|"].reshape(A * R, resolution) + B_z_ra = data["|B|_z|r,a"].reshape(A * R, resolution) poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=False).c poly_B = jnp.moveaxis(poly_B, 1, -1) poly_B_z = polyder(poly_B) - assert poly_B.shape == (4, M * L, N) - assert poly_B_z.shape == (3, M * L, N) + assert poly_B.shape == (4, A * R, resolution - 1) + assert poly_B_z.shape == (3, A * R, resolution - 1) def tanh_sinh(f, pitch=None): """Compute the bounce integral of the named quantity. @@ -563,12 +615,17 @@ def tanh_sinh(f, pitch=None): f : ndarray Quantity to compute the bounce integral of. pitch : ndarray - λ values to evaluate the bounce integral at. + λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + result : ndarray, shape(P, alpha.size, rho.size, (resolution - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. @@ -576,13 +633,14 @@ def tanh_sinh(f, pitch=None): pitch, bp1, bp2 = _compute_bp_if_given_pitch( pitch, zeta, poly_B, poly_B_z, compute_bp, *original, err=True ) + P = pitch.shape[0] + pitch = jnp.broadcast_to(pitch, shape=(P, A * R)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = f.reshape(M * L, resolution) + f = f.reshape(A * R, resolution) + quad = bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) result = jnp.reshape( - inner_product_quad(pitch, w, X, zeta, f, B_sup_z, B, B_z_ra) - * jnp.pi - / (bp2 - bp1), - newshape=(pitch.size, M, L, N * NUM_ROOTS), + quad / (bp2 - bp1) * jnp.pi, # complete the change of variable + newshape=(P, A, R, (resolution - 1) * NUM_ROOTS), ) return result @@ -594,12 +652,17 @@ def direct(f, pitch=None): f : ndarray Quantity to compute the bounce integral of. pitch : ndarray - λ values to evaluate the bounce integral at. + λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + result : ndarray, shape(P, alpha.size, rho.size, (resolution - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. @@ -612,20 +675,24 @@ def direct(f, pitch=None): ) = _compute_bp_if_given_pitch( pitch, zeta, poly_B, poly_B_z, compute_bp, *original, err=True ) + P = pitch.shape[0] integrand = jnp.nan_to_num( - f / (data["B^zeta"] * jnp.sqrt(1 - pitch[:, jnp.newaxis] * data["|B|"])) - ).reshape(pitch.size * M * L, resolution) + f.reshape(A * R, resolution) + / (B_sup_z * jnp.sqrt(1 - pitch[..., jnp.newaxis] * B)) + ).reshape(P * A * R, resolution) integrand = Akima1DInterpolator(zeta, integrand, axis=-1, check=False).c integrand = jnp.moveaxis(integrand, 1, -1) - assert integrand.shape == (4, pitch.size * M * L, N) + assert integrand.shape == (4, P * A * R, resolution - 1) + # For this algorithm, computing integrals via differences of primitives # is preferable to any numerical quadrature. For example, even if the # intersection points were evenly spaced, a composite Simpson's quadrature # would require computing the spline on 1.8x more knots for the same accuracy. primitive = polyval( - intersect_nan_to_right_knot, polyint(integrand)[..., jnp.newaxis] - ).reshape(pitch.size * M * L, N * (NUM_ROOTS + 2)) + x=intersect_nan_to_right_knot, c=polyint(integrand)[..., jnp.newaxis] + ).reshape(P * A * R, (resolution - 1) * (NUM_ROOTS + 2)) + sums = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. @@ -633,14 +700,18 @@ def direct(f, pitch=None): # Didn't enforce continuity in the piecewise primitives when # integrating, so mask the discontinuity to avoid summing it. * jnp.append( - jnp.arange(1, N * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, True + jnp.arange(1, (resolution - 1) * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) + != 0, + True, ), axis=-1, ) result = jnp.reshape( # Compute difference of ``sums`` between bounce points. - v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[..., : N * NUM_ROOTS], - newshape=(pitch.size, M, L, N * NUM_ROOTS), + v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[ + ..., : (resolution - 1) * NUM_ROOTS + ], + newshape=(P, A, R, (resolution - 1) * NUM_ROOTS), ) return result @@ -691,7 +762,13 @@ def bounce_average( eq : Equilibrium Equilibrium on which the bounce average is defined. pitch : ndarray - λ values to evaluate the bounce average at. + λ values to evaluate the bounce average at each field line. + May be specified later. + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + where in the latter the labels are interpreted as indices into the returned + that correspond to that field line. + If additional axes exist, they are the batch axes as usual. rho : ndarray or float Unique flux surface label coordinates. alpha : ndarray or float @@ -729,6 +806,9 @@ def bounce_average( ba, grid, data = bounce_integral(eq) pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + # same pitch for every field line, may give to sparse result + # See tests/test_bounce_integral.py::test_pitch_input for an alternative. + pitch = pitch[:, jnp.newaxis, jnp.newaxis] name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] result = ba(f, pitch) @@ -739,25 +819,30 @@ def bounce_average( ) def _bounce_average(f, pitch=None): - """Compute the bounce average of the named quantity using the spline method. + """Compute the bounce average of the named quantity. Parameters ---------- f : ndarray Quantity to compute the bounce average of. pitch : ndarray - λ values to evaluate the bounce average at. + λ values to evaluate the bounce average at each field line. If None, uses the values given to the parent function. + Last two axes should specify the λ value for a particular field line + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(pitch, alpha, rho, (resolution - 1) * 3) + result : ndarray, shape(P, alpha.size, rho.size, (resolution - 1) * 3) The last axis iterates through every bounce average performed along that field line padded by nan. """ - # Should be fine to fit akima spline to constant function "1" since - # akima suppresses oscillation of the fit. + # Should be fine to fit akima spline to constant function 1 since + # akima suppresses oscillation of the spline. return safediv(bi(f, pitch), bi(jnp.ones_like(f), pitch)) return _bounce_average, grid, data diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index cd913ee294..0564da5a8d 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -38,19 +38,15 @@ def test_cubic_poly_roots(): poly = poly * np.e * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim - constant = np.arange(10) - # make sure broadcasting won't hide error in implementation - assert np.unique(poly.shape + constant.shape).size == poly.ndim + constant.ndim + constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) roots = cubic_poly_roots(poly, constant, sort=True) for j in range(poly.shape[1]): for k in range(poly.shape[2]): - for s in range(constant.size): - a, b, c, d = poly[:, j, k] - d = d - constant[s] - np.testing.assert_allclose( - actual=roots[s, j, k], - desired=np.sort_complex(np.roots([a, b, c, d])), - ) + a, b, c, d = poly[:, j, k] + np.testing.assert_allclose( + actual=roots[j, k], + desired=np.sort_complex(np.roots([a, b, c, d - constant[j, k]])), + ) @pytest.mark.unit @@ -60,14 +56,15 @@ def test_polyint(): poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim - constant = np.pi + constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) primitive = polyint(poly, k=constant) for j in range(poly.shape[1]): for k in range(poly.shape[2]): np.testing.assert_allclose( actual=primitive[:, j, k], - desired=np.polyint(poly[:, j, k], k=constant), + desired=np.polyint(poly[:, j, k], k=constant[j, k]), ) + assert polyint(poly).shape == primitive.shape, "Failed broadcasting default k." @pytest.mark.unit @@ -93,7 +90,7 @@ def test_polyval(): # make sure broadcasting won't hide error in implementation assert np.unique(c.shape).size == c.ndim x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) - val = polyval(x, c) + val = polyval(x=x, c=c) for index in np.ndindex(c.shape[1:]): idx = (..., *index) np.testing.assert_allclose( @@ -108,7 +105,7 @@ def test_polyval(): assert np.unique(x.shape).size == x.ndim assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 - val = polyval(x, c) + val = polyval(x=x, c=c) for index in np.ndindex(c.shape[1:]): idx = (..., *index) np.testing.assert_allclose( @@ -126,7 +123,7 @@ def test_polyval(): # choose evaluation points at d just to match choice made in a1d.antiderivative() d = np.diff(x) # evaluate every spline at d - k = polyval(d, primitive) + k = polyval(x=d, c=primitive) # don't want to use jax.ndarray.at[].add() in case jax is not installed primitive = np.array(primitive) primitive[-1, 1:] += np.cumsum(k, axis=-1)[:-1] @@ -134,22 +131,37 @@ def test_polyval(): @pytest.mark.unit -def test_temporary(): - """Test that things are returned without errors.""" +def test_pitch_input(): + """Test different ways of specifying pitch.""" eq = get("HELIOTRON") - ba, grid, data = bounce_average(eq, method="tanh_sinh") - pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + rho = np.linspace(0, 1, 6) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 2) + ba, grid, data = bounce_average(eq, rho=rho, alpha=alpha, method="tanh_sinh") + pitch_resolution = 30 name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] + # same pitch for every field line, may lead to sparse result + pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) + pitch = pitch[:, np.newaxis, np.newaxis] + result = ba(f, pitch) + assert np.isfinite(result).any(), "tanh_sinh quadrature failed." + # specify pitch per field line + B = data["B"].reshape(alpha.size * rho.size, -1) + eps = 1e-5 # FIXME: vanishing B-field bug. + pitch = np.linspace( + 1 / (B.max(axis=-1) + eps), + 1 / (B.min(axis=-1) + eps), + pitch_resolution, + ).reshape(pitch_resolution, alpha.size, rho.size) result = ba(f, pitch) assert np.isfinite(result).any(), "tanh_sinh quadrature failed." - ba, _, _ = bounce_average(eq, method="direct") + ba, _, _ = bounce_average(eq, rho=rho, alpha=alpha, method="direct") result = ba(f, pitch) print(np.isfinite(result).any()) -@pytest.mark.unit +# @pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. From b782c0c05f6d017b40b519059697baf90b980c11 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 16 Mar 2024 16:16:26 -0500 Subject: [PATCH 038/241] clean up array parsing logic --- desc/compute/bounce_integral.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 960739c095..64314ea8da 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -490,13 +490,11 @@ def _compute_bp_if_given_pitch( raise ValueError("No pitch values were given.") return original else: - # ensure pitch has shape (batch.size, alpha.size, rho.size) - # can't use jnp.atleast_3d due to https://github.com/numpy/numpy/issues/25805 - pitch = jnp.atleast_1d(pitch) + # ensure pitch has shape (batch size, alpha.size, rho.size) + pitch = jnp.atleast_2d(pitch) if pitch.ndim == 2: + # Can't use atleast_3d; see https://github.com/numpy/numpy/issues/25805. pitch = pitch[jnp.newaxis] - if pitch.ndim == 1: - pitch = pitch[jnp.newaxis, jnp.newaxis] err_msg = "Supplied invalid shape for pitch angles." assert pitch.ndim == 3, err_msg pitch = pitch.reshape(pitch.shape[0], -1) From c0512453d129e0bc30c5ed3775bcd18a6b058207 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 16 Mar 2024 19:30:29 -0500 Subject: [PATCH 039/241] Separate spline resolution and quadrature resolution --- desc/compute/bounce_integral.py | 156 +++++++++++++++----------------- tests/test_bounce_integral.py | 26 ++---- 2 files changed, 85 insertions(+), 97 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 64314ea8da..97e56610a8 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -23,9 +23,9 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): Parameters ---------- - pitch : ndarray, shape(P, ) + pitch : ndarray, shape(pitch.size, ) λ values. - X : ndarray, shape(P, (knots.size - 1) * NUM_ROOTS, w.size) + X : ndarray, shape(pitch.size, (knots.size - 1) * NUM_ROOTS, w.size) Quadrature points. w : ndarray, shape(w.size, ) Quadrature weights. @@ -84,6 +84,7 @@ def tanh_sinh_quadrature(resolution): """ # https://github.com/f0uriest/quadax/blob/main/quadax/utils.py#L166 + # Compute boundary of quadrature. # x_max = 1 - eps with some buffer x_max = jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x)) @@ -269,7 +270,7 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): pitch : ndarray, shape(P, A * R) λ values. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. @@ -315,10 +316,8 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. - B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape( - P * AR, N * NUM_ROOTS - ) - intersect = intersect.reshape(P * AR, N * NUM_ROOTS) + B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * AR, -1) + intersect = intersect.reshape(P * AR, -1) # Only consider intersect if it is within knots that bound that polynomial.pytes is_intersect = ~jnp.isnan(intersect) @@ -339,8 +338,8 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # Roll such that first intersect is moved to index of last intersect. # Get ζ values of bounce points from the masks. - bp1 = v_mask_take(intersect, bp1).reshape(P, AR, N * NUM_ROOTS) - bp2 = v_mask_take(intersect, bp2).reshape(P, AR, N * NUM_ROOTS) + bp1 = v_mask_take(intersect, bp1).reshape(P, AR, -1) + bp2 = v_mask_take(intersect, bp2).reshape(P, AR, -1) return bp1, bp2 @@ -357,7 +356,7 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): pitch : ndarray, shape(P, A * R) λ values. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. @@ -410,13 +409,11 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. - B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape( - P * AR, N * (NUM_ROOTS + 2) - ) + B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * AR, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = jnp.reshape( jnp.array([False, True, True, True, False], dtype=bool) & ~jnp.isnan(intersect), - newshape=(P * AR, N * (NUM_ROOTS + 2)), + newshape=(P * AR, -1), ) # Rearrange so that all the intersects along field line are contiguous. @@ -445,7 +442,7 @@ def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): a_max, ), axis=-1, - ).reshape(P * AR, N, (NUM_ROOTS + 2)) + ).reshape(P * AR, N, -1) return intersect_nan_to_right_knot, is_intersect, is_bp @@ -459,9 +456,8 @@ def _compute_bp_if_given_pitch( ---------- pitch : ndarray, shape(P, A, R) λ values. - If None, returns the given ``original`` tuple. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. @@ -507,8 +503,8 @@ def bounce_integral( pitch=None, rho=None, alpha=None, - zeta_max=10 * jnp.pi, - resolution=20, + zeta=20, + resolution=11, method="tanh_sinh", ): """Returns a method to compute the bounce integral of any quantity. @@ -535,7 +531,7 @@ def bounce_integral( λ values to evaluate the bounce integral at each field line. May be specified later. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. @@ -543,22 +539,23 @@ def bounce_integral( Unique flux surface label coordinates. alpha : ndarray or float Unique field line label coordinates over a constant rho surface. - zeta_max : float - Max value for field line following coordinate. + zeta : ndarray or int + A cubic spline of the integrand is computed at these values of the field + line following coordinate, for every field line in the meshgrid formed from + rho and alpha specified above. + The number of knots specifies the grid resolution as increasing the + number of knots increases the accuracy of representing the integrand + and the accuracy of the locations of the bounce points. + If an integer is given, that many knots are linearly spaced from 0 to 10 pi. resolution : int - Number of interpolation points (knots) used for splines in the quadrature. - A maximum of three bounce points can be detected in between knots. - The accuracy of the quadrature will increase as some function of - the number of knots over the number of detected bounce points. - So for well-behaved magnetic fields increasing resolution should increase - the accuracy of the quadrature. + Number of quadrature points. method : str The quadrature scheme used to evaluate the integral. The "direct" method exactly integrates a cubic spline of the integrand. - The "tanh_sinh" method performs a Tanh-sinh quadrature, where independent cubic - splines are used for components in the integrand so that the singularity near - the bounce points can be captured more accurately than can be represented by a - polynomial. + The "tanh_sinh" method performs a tanh-sinh quadrature, where cubic + splines are used to represent each function in the integrand + so that the singularity near the bounce points can be captured more + accurately than can be represented by a polynomial. Returns ------- @@ -576,7 +573,7 @@ def bounce_integral( bi, grid, data = bounce_integral(eq) pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) - # same pitch for every field line, may give sparse result + # Same pitch for every field line may give sparse result. # See tests/test_bounce_integral.py::test_pitch_input for an alternative. pitch = pitch[:, jnp.newaxis, jnp.newaxis] name = "g_zz" @@ -587,23 +584,25 @@ def bounce_integral( if rho is None: rho = jnp.linspace(0, 1, 10) if alpha is None: - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) - zeta = jnp.linspace(0, zeta_max, resolution) + zeta = jnp.atleast_1d(zeta) + if zeta.size == 1: + zeta = jnp.linspace(0, 10 * jnp.pi, zeta.item()) R = rho.size A = alpha.size grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) - B_sup_z = data["B^zeta"].reshape(A * R, resolution) - B = data["|B|"].reshape(A * R, resolution) - B_z_ra = data["|B|_z|r,a"].reshape(A * R, resolution) + B_sup_z = data["B^zeta"].reshape(A * R, -1) + B = data["|B|"].reshape(A * R, -1) + B_z_ra = data["|B|_z|r,a"].reshape(A * R, -1) poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=False).c poly_B = jnp.moveaxis(poly_B, 1, -1) poly_B_z = polyder(poly_B) - assert poly_B.shape == (4, A * R, resolution - 1) - assert poly_B_z.shape == (3, A * R, resolution - 1) + assert poly_B.shape == (4, A * R, zeta.size - 1) + assert poly_B_z.shape == (3, A * R, zeta.size - 1) def tanh_sinh(f, pitch=None): """Compute the bounce integral of the named quantity. @@ -616,14 +615,14 @@ def tanh_sinh(f, pitch=None): λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(P, alpha.size, rho.size, (resolution - 1) * 3) + result : ndarray, shape(P, alpha.size, rho.size, (zeta.size - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. @@ -634,12 +633,10 @@ def tanh_sinh(f, pitch=None): P = pitch.shape[0] pitch = jnp.broadcast_to(pitch, shape=(P, A * R)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = f.reshape(A * R, resolution) + f = f.reshape(A * R, -1) quad = bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) - result = jnp.reshape( - quad / (bp2 - bp1) * jnp.pi, # complete the change of variable - newshape=(P, A, R, (resolution - 1) * NUM_ROOTS), - ) + # complete the change of variable + result = jnp.reshape(quad / (bp2 - bp1) * jnp.pi, newshape=(P, A, R, -1)) return result def direct(f, pitch=None): @@ -653,14 +650,14 @@ def direct(f, pitch=None): λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(P, alpha.size, rho.size, (resolution - 1) * 3) + result : ndarray, shape(P, alpha.size, rho.size, (zeta.size - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. @@ -676,12 +673,11 @@ def direct(f, pitch=None): P = pitch.shape[0] integrand = jnp.nan_to_num( - f.reshape(A * R, resolution) - / (B_sup_z * jnp.sqrt(1 - pitch[..., jnp.newaxis] * B)) - ).reshape(P * A * R, resolution) + f.reshape(A * R, -1) / (B_sup_z * jnp.sqrt(1 - pitch[..., jnp.newaxis] * B)) + ).reshape(P * A * R, -1) integrand = Akima1DInterpolator(zeta, integrand, axis=-1, check=False).c integrand = jnp.moveaxis(integrand, 1, -1) - assert integrand.shape == (4, P * A * R, resolution - 1) + assert integrand.shape == (4, P * A * R, zeta.size - 1) # For this algorithm, computing integrals via differences of primitives # is preferable to any numerical quadrature. For example, even if the @@ -689,7 +685,7 @@ def direct(f, pitch=None): # would require computing the spline on 1.8x more knots for the same accuracy. primitive = polyval( x=intersect_nan_to_right_knot, c=polyint(integrand)[..., jnp.newaxis] - ).reshape(P * A * R, (resolution - 1) * (NUM_ROOTS + 2)) + ).reshape(P * A * R, -1) sums = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles @@ -698,8 +694,7 @@ def direct(f, pitch=None): # Didn't enforce continuity in the piecewise primitives when # integrating, so mask the discontinuity to avoid summing it. * jnp.append( - jnp.arange(1, (resolution - 1) * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) - != 0, + jnp.arange(1, (zeta.size - 1) * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, True, ), axis=-1, @@ -707,9 +702,9 @@ def direct(f, pitch=None): result = jnp.reshape( # Compute difference of ``sums`` between bounce points. v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[ - ..., : (resolution - 1) * NUM_ROOTS + ..., : (zeta.size - 1) * NUM_ROOTS ], - newshape=(P, A, R, (resolution - 1) * NUM_ROOTS), + newshape=(P, A, R, -1), ) return result @@ -734,8 +729,8 @@ def bounce_average( pitch=None, rho=None, alpha=None, - zeta_max=10 * jnp.pi, - resolution=20, + zeta=20, + resolution=11, method="tanh_sinh", ): """Returns a method to compute the bounce average of any quantity. @@ -764,29 +759,30 @@ def bounce_average( May be specified later. Last two axes should specify the λ value for a particular field line parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` - where in the latter the labels are interpreted as indices into the returned - that correspond to that field line. - If additional axes exist, they are the batch axes as usual. + where in the latter the labels are interpreted as indices that correspond + to that field line. + If an additional axis exists on the left, it is the batch axis as usual. rho : ndarray or float Unique flux surface label coordinates. alpha : ndarray or float Unique field line label coordinates over a constant rho surface. - zeta_max : float - Max value for field line following coordinate. + zeta : ndarray or int + A cubic spline of the integrand is computed at these values of the field + line following coordinate, for every field line in the meshgrid formed from + rho and alpha specified above. + The number of knots specifies the grid resolution as increasing the + number of knots increases the accuracy of representing the integrand + and the accuracy of the locations of the bounce points. + If an integer is given, that many knots are linearly spaced from 0 to 10 pi. resolution : int - Number of interpolation points (knots) used for splines in the quadrature. - A maximum of three bounce points can be detected in between knots. - The accuracy of the quadrature will increase as some function of - the number of knots over the number of detected bounce points. - So for well-behaved magnetic fields increasing resolution should increase - the accuracy of the quadrature. + Number of quadrature points. method : str The quadrature scheme used to evaluate the integral. The "direct" method exactly integrates a cubic spline of the integrand. - The "tanh_sinh" method performs a Tanh-sinh quadrature, where independent cubic - splines are used for components in the integrand so that the singularity near - the bounce points can be captured more accurately than can be represented by a - polynomial. + The "tanh_sinh" method performs a tanh-sinh quadrature, where cubic + splines are used to represent each function in the integrand + so that the singularity near the bounce points can be captured more + accurately than can be represented by a polynomial. Returns ------- @@ -804,7 +800,7 @@ def bounce_average( ba, grid, data = bounce_integral(eq) pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) - # same pitch for every field line, may give to sparse result + # Same pitch for every field line may give sparse result. # See tests/test_bounce_integral.py::test_pitch_input for an alternative. pitch = pitch[:, jnp.newaxis, jnp.newaxis] name = "g_zz" @@ -812,9 +808,7 @@ def bounce_average( result = ba(f, pitch) """ - bi, grid, data = bounce_integral( - eq, pitch, rho, alpha, zeta_max, resolution, method - ) + bi, grid, data = bounce_integral(eq, pitch, rho, alpha, zeta, resolution, method) def _bounce_average(f, pitch=None): """Compute the bounce average of the named quantity. @@ -827,14 +821,14 @@ def _bounce_average(f, pitch=None): λ values to evaluate the bounce average at each field line. If None, uses the values given to the parent function. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[:, α, ρ]`` + parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(P, alpha.size, rho.size, (resolution - 1) * 3) + result : ndarray, shape(P, alpha.size, rho.size, (zeta.size - 1) * 3) The last axis iterates through every bounce average performed along that field line padded by nan. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 0564da5a8d..e7e5e3a09c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -137,10 +137,10 @@ def test_pitch_input(): rho = np.linspace(0, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 2) ba, grid, data = bounce_average(eq, rho=rho, alpha=alpha, method="tanh_sinh") - pitch_resolution = 30 + pitch_resolution = 15 name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] - # same pitch for every field line, may lead to sparse result + # Same pitch for every field line may give sparse result. pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) pitch = pitch[:, np.newaxis, np.newaxis] result = ba(f, pitch) @@ -161,7 +161,7 @@ def test_pitch_input(): print(np.isfinite(result).any()) -# @pytest.mark.unit +@pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. @@ -217,27 +217,21 @@ def beta(grid, data): print(result) rho = np.array([0.5]) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 20) - zeta_max = 10 * np.pi - resolution = 30 + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) + zeta = np.linspace(0, 10 * np.pi, 20) bi, grid, data = bounce_integral( - eq, - rho=rho, - alpha=alpha, - zeta_max=zeta_max, - resolution=resolution, - method="tanh_sinh", + eq, rho=rho, alpha=alpha, zeta=zeta, method="tanh_sinh" ) - pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), resolution) + pitch_resolution = 15 + pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] result = bi(f, pitch) assert np.isfinite(result).any(), "tanh_sinh quadrature failed." # routine copied from bounce_integrals functions - zeta = np.linspace(0, zeta_max, resolution) - B = data["|B|"].reshape(alpha.size * rho.size, resolution) - B_z_ra = data["|B|_z|r,a"].reshape(alpha.size * rho.size, resolution) + B = data["|B|"].reshape(alpha.size * rho.size, -1) + B_z_ra = data["|B|_z|r,a"].reshape(alpha.size * rho.size, -1) poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1).c poly_B = np.moveaxis(poly_B, 1, -1) poly_B_z = polyder(poly_B) From a9fe8a7126960ca2f50fcd7c991be3c075e5a4ed Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 16 Mar 2024 22:11:13 -0500 Subject: [PATCH 040/241] Allow passing in method for quadrature --- desc/compute/bounce_integral.py | 44 ++++++++++++++++++--------------- tests/test_bounce_integral.py | 6 ++--- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 97e56610a8..0856264235 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -505,7 +505,7 @@ def bounce_integral( alpha=None, zeta=20, resolution=11, - method="tanh_sinh", + method=tanh_sinh_quadrature, ): """Returns a method to compute the bounce integral of any quantity. @@ -549,13 +549,15 @@ def bounce_integral( If an integer is given, that many knots are linearly spaced from 0 to 10 pi. resolution : int Number of quadrature points. - method : str + method : callable The quadrature scheme used to evaluate the integral. + Should return quadrature points within the domain [-1, 1] + and quadrature weights with the call ``method(resolution)``. + Defaults to a tanh-sinh quadrature. + Cubic splines are used to represent each function in the integrand so that + the singularity near the bounce points can be captured more accurately than + can be represented by a polynomial. The "direct" method exactly integrates a cubic spline of the integrand. - The "tanh_sinh" method performs a tanh-sinh quadrature, where cubic - splines are used to represent each function in the integrand - so that the singularity near the bounce points can be captured more - accurately than can be represented by a polynomial. Returns ------- @@ -604,7 +606,7 @@ def bounce_integral( assert poly_B.shape == (4, A * R, zeta.size - 1) assert poly_B_z.shape == (3, A * R, zeta.size - 1) - def tanh_sinh(f, pitch=None): + def quadrature(f, pitch=None): """Compute the bounce integral of the named quantity. Parameters @@ -634,9 +636,9 @@ def tanh_sinh(f, pitch=None): pitch = jnp.broadcast_to(pitch, shape=(P, A * R)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] f = f.reshape(A * R, -1) - quad = bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) + result = bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) # complete the change of variable - result = jnp.reshape(quad / (bp2 - bp1) * jnp.pi, newshape=(P, A, R, -1)) + result = jnp.reshape(result / (bp2 - bp1) * jnp.pi, newshape=(P, A, R, -1)) return result def direct(f, pitch=None): @@ -687,7 +689,7 @@ def direct(f, pitch=None): x=intersect_nan_to_right_knot, c=polyint(integrand)[..., jnp.newaxis] ).reshape(P * A * R, -1) - sums = jnp.cumsum( + result = jnp.cumsum( # Periodic boundary to compute bounce integrals of particles # trapped outside this snapshot of the field lines. jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) @@ -701,18 +703,18 @@ def direct(f, pitch=None): ) result = jnp.reshape( # Compute difference of ``sums`` between bounce points. - v_mask_diff(v_mask_take(sums, is_intersect), is_bp)[ + v_mask_diff(v_mask_take(result, is_intersect), is_bp)[ ..., : (zeta.size - 1) * NUM_ROOTS ], newshape=(P, A, R, -1), ) return result - if method == "tanh_sinh": - x, w = tanh_sinh_quadrature(resolution) + if callable(method): + x, w = method(resolution) x = jnp.arcsin(x) / jnp.pi - 0.5 compute_bp = compute_bounce_points - bi = tanh_sinh + bi = quadrature elif method == "direct": compute_bp = _compute_bounce_points_with_knots bi = direct @@ -731,7 +733,7 @@ def bounce_average( alpha=None, zeta=20, resolution=11, - method="tanh_sinh", + method=tanh_sinh_quadrature, ): """Returns a method to compute the bounce average of any quantity. @@ -776,13 +778,15 @@ def bounce_average( If an integer is given, that many knots are linearly spaced from 0 to 10 pi. resolution : int Number of quadrature points. - method : str + method : callable The quadrature scheme used to evaluate the integral. + Should return quadrature points within the domain [-1, 1] + and quadrature weights with the call ``method(resolution)``. + Defaults to a tanh-sinh quadrature. + Cubic splines are used to represent each function in the integrand so that + the singularity near the bounce points can be captured more accurately than + can be represented by a polynomial. The "direct" method exactly integrates a cubic spline of the integrand. - The "tanh_sinh" method performs a tanh-sinh quadrature, where cubic - splines are used to represent each function in the integrand - so that the singularity near the bounce points can be captured more - accurately than can be represented by a polynomial. Returns ------- diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e7e5e3a09c..16a213e11b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -136,7 +136,7 @@ def test_pitch_input(): eq = get("HELIOTRON") rho = np.linspace(0, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 2) - ba, grid, data = bounce_average(eq, rho=rho, alpha=alpha, method="tanh_sinh") + ba, grid, data = bounce_average(eq, rho=rho, alpha=alpha) pitch_resolution = 15 name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] @@ -219,9 +219,7 @@ def beta(grid, data): rho = np.array([0.5]) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) zeta = np.linspace(0, 10 * np.pi, 20) - bi, grid, data = bounce_integral( - eq, rho=rho, alpha=alpha, zeta=zeta, method="tanh_sinh" - ) + bi, grid, data = bounce_integral(eq, rho=rho, alpha=alpha, zeta=zeta) pitch_resolution = 15 pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) name = "g_zz" From df4b33c6979c7936857c4ca1343113f8e6c01664 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 17 Mar 2024 23:14:02 -0400 Subject: [PATCH 041/241] Implement one option for ensuring that left bounce points are less... than right bounce points. Implement one option for handling particles that are not trapped in the given snapshot of the field line. --- desc/compute/bounce_integral.py | 470 ++++++++++++++------------------ desc/compute/utils.py | 138 +--------- tests/test_bounce_integral.py | 14 +- 3 files changed, 217 insertions(+), 405 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 0856264235..2369cd31ad 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1,20 +1,13 @@ """Methods for computing bounce integrals.""" from functools import partial -from interpax import Akima1DInterpolator, CubicHermiteSpline, interp1d +from interpax import CubicHermiteSpline, interp1d -from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, vmap -from desc.compute.utils import mask_diff, mask_take, safediv +from desc.backend import complex_sqrt, flatnonzero, jnp, put, put_along_axis, take, vmap from desc.grid import Grid, LinearGrid, _meshgrid_expand from .data_index import data_index -NUM_ROOTS = 3 # max number of roots of a cubic polynomial -# returns index of first nonzero element in a -v_first_flatnonzero = vmap(lambda a: flatnonzero(a, size=1, fill_value=a.size)) -v_mask_diff = vmap(mask_diff) -v_mask_take = vmap(lambda a, mask: mask_take(a, mask, size=a.size, fill_value=jnp.nan)) - # vmap to compute a bounce integral for every pitch along every field line. @partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) @@ -47,9 +40,12 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): """ assert pitch.ndim == 1 - assert X.shape == (pitch.size, (knots.size - 1) * NUM_ROOTS, w.size) + assert X.shape == (pitch.size, (knots.size - 1) * 3, w.size) assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape - pitch = pitch[:, jnp.newaxis, jnp.newaxis] + # Cubic spline the integrand so that we can evaluate it at quadrature points + # without expensive coordinate mappings and root finding. + # Spline each function separately so that the singularity near the bounce + # points can be captured more accurately than can be by any polynomial. shape = X.shape X = X.ravel() # Use akima spline to suppress oscillation. @@ -57,11 +53,12 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) + pitch = pitch[:, jnp.newaxis, jnp.newaxis] inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) return inner_product -def tanh_sinh_quadrature(resolution): +def tanh_sinh_quadrature(resolution=7): """ tanh_sinh quadrature. @@ -73,7 +70,7 @@ def tanh_sinh_quadrature(resolution): Parameters ---------- resolution: int - Number of quadrature points, preferably odd + Number of quadrature points, preferably odd. Returns ------- @@ -100,6 +97,109 @@ def tanh_sinh_quadrature(resolution): return x, w +@vmap +def take_mask(a, mask, size=None, fill_value=jnp.nan): + """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. + + Parameters + ---------- + a : ndarray + The source array. + mask : ndarray + Boolean mask to index into ``a``. + Should have same size as ``a``. + size : + Elements of ``a`` at the first size True indices of ``mask`` will be returned. + If there are fewer elements than size indicates, the returned array will be + padded with fill_value. + Defaults to ``a.size``. + fill_value : + When there are fewer than the indicated number of elements, + the remaining elements will be filled with ``fill_value``. + + Returns + ------- + a_mask : ndarray, shape(size, ) + Output array. + + """ + if size is None: + size = a.size + idx = flatnonzero(mask, size=size, fill_value=mask.size) + a_mask = take( + a, + idx, + axis=0, + mode="fill", + fill_value=fill_value, + unique_indices=True, + indices_are_sorted=True, + ) + return a_mask + + +def diff_mask(a, mask, n=1, axis=-1, prepend=None): + """Calculate the n-th discrete difference along the given axis of ``a[mask]``. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. This method is JIT compatible. + + Parameters + ---------- + a : array_like + Input array + mask : array_like + Boolean mask to index like ``a[mask]`` prior to computing difference. + Should have same size as ``a``. + n : int, optional + The number of times values are differenced. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend : array_like, optional + Values to prepend to `a` along axis prior to performing the difference. + Scalar values are expanded to arrays with length 1 in the direction of + axis and the shape of the input array in along all other axes. + Otherwise, the dimension and shape must match `a` except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as ``a`` + except along ``axis`` where the dimension is smaller by ``n``. The + type of the output is the same as the type of the difference + between any two elements of ``a``. + + Notes + ----- + The result is padded with nan at the end to be jit compilable. + + """ + prepend = () if prepend is None else (prepend,) + return jnp.diff(take_mask(a, mask, fill_value=jnp.nan), n, axis, *prepend) + + +@vmap +def _first_element(a, mask): + """Return first value of ``a`` where ``mask`` is nonzero.""" + assert a.ndim == mask.ndim == 1 + assert a.shape == mask.shape + idx = flatnonzero(mask, size=1, fill_value=a.size) + return a[idx] + + +@vmap +def _roll_and_replace(a, shift, replacement): + assert a.ndim == 1 + assert shift.size == 1 and shift.dtype == bool + assert replacement.size == 1 + # maybe jax will prefer this to an if statement + replacement = replacement * shift + a[0] * (~shift) + a = put(jnp.roll(a, shift), jnp.array([0]), replacement) + return a + + def polyint(c, k=None): """Coefficients for the primitives of the given set of polynomials. @@ -260,8 +360,6 @@ def clip_to_nan(root): return roots -# TODO: Consider the boundary to be periodic to compute bounce integrals of -# particles trapped outside this snapshot of the field lines. def compute_bounce_points(pitch, knots, poly_B, poly_B_z): """Compute the bounce points given |B| and pitch λ. @@ -312,144 +410,47 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): a_max=knots[1:], sort=True, ) - assert intersect.shape == (P, AR, N, NUM_ROOTS) + assert intersect.shape == (P, AR, N, 3) # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * AR, -1) intersect = intersect.reshape(P * AR, -1) - # Only consider intersect if it is within knots that bound that polynomial.pytes + # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) # Rearrange so that all intersects along a field line are contiguous. - intersect = v_mask_take(intersect, is_intersect) - B_z = v_mask_take(B_z, is_intersect) - # The boolean masks ``bp1`` and ``bp2`` will encode whether a given entry in - # ``intersect`` is a valid starting and ending bounce point, respectively. + intersect = take_mask(intersect, is_intersect) + B_z = take_mask(B_z, is_intersect) + assert intersect.shape == B_z.shape == is_intersect.shape == (P * AR, N * 3) + # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in + # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. - bp1 = B_z <= 0 - bp2 = B_z >= 0 - # B_z <= 0 at intersect i implies B_z >= 0 at intersect i+1 by continuity. - - # extend bp1 and bp2 by single element and then test - # index of last intersect along a field line - # idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 # noqa: E800 - # assert idx.shape == (P * AR,) # noqa: E800 - # Roll such that first intersect is moved to index of last intersect. - + is_bp1 = B_z < 0 + is_bp2 = B_z >= 0 # Get ζ values of bounce points from the masks. - bp1 = v_mask_take(intersect, bp1).reshape(P, AR, -1) - bp2 = v_mask_take(intersect, bp2).reshape(P, AR, -1) + bp1 = take_mask(intersect, is_bp1) + bp2 = take_mask(intersect, is_bp2) + # For correctness, it is necessary that the first intersect satisfies B_z <= 0. + # That is, the pairs bp1[:, i] and bp2[:, i] are the boundaries of an + # integral only if bp1[:, i] <= bp2[:, i]. + # Now, because B_z[:, i] <= 0 implies B_z[:, i + 1] >= 0 by continuity, + # there can be at most one inversion, and if it exists, the inversion must be + # at the first pair. To correct the inversion, it suffices to roll forward bp1 + # Then the pairs bp1[:, i + 1] and bp2[:, i] form integration boundaries. + # Moreover, if the first intersect satisfies B_z >= 0, that particle may be + # trapped in a well outside this snapshot of the field line. + # If the last intersect also satisfies B_z < 0, then we compute a bounce + # integral between these points. The above logic handles this assuming the + # field line is approximately periodic so that ζ = knots[-1] is ζ = 0. + last_one = jnp.squeeze(_first_element(intersect, ~is_intersect)) - 1 + bp1 = _roll_and_replace(bp1, bp1[:, 0] > bp2[:, 0], last_one - knots[-1]) + bp1 = bp1.reshape(P, AR, -1) + bp2 = bp2.reshape(P, AR, -1) return bp1, bp2 -# TODO: Consider the boundary to be periodic to compute bounce integrals of -# particles trapped outside this snapshot of the field lines. -def _compute_bounce_points_with_knots(pitch, knots, poly_B, poly_B_z): - """Compute the bounce points given |B| and pitch λ. - - Like ``compute_bounce_points`` but returns ingredients needed by the - algorithm in the ``direct`` method in ``bounce_integral``. - - Parameters - ---------- - pitch : ndarray, shape(P, A * R) - λ values. - Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. - knots : ndarray, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(4, A * R, knots.size - 1) - Polynomial coefficients of the cubic spline of |B|. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, A * R, knots.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - - Returns - ------- - intersect_nan_to_right_knot, is_intersect, is_bp - The boolean mask ``is_bp`` encodes whether a given pair of intersects - are the endpoints of a bounce integral. - - """ - P = pitch.shape[0] # batch size - AR = poly_B.shape[1] # alpha.size * rho.size - N = knots.size - 1 # number of piecewise cubic polynomials per field line - assert poly_B.shape[-1] == poly_B_z.shape[-1] == N - a_min = knots[:-1] - a_max = knots[1:] - - # The polynomials' intersection points with 1 / λ is given by ``roots``. - # In order to be JIT compilable, this must have a shape that accommodates the - # case where each cubic polynomial intersects 1 / λ thrice. - # nan values in ``roots`` denote a polynomial has less than three intersects. - roots = cubic_poly_roots( - coef=poly_B, - k=jnp.expand_dims(1 / pitch, axis=-1), - a_min=knots[:-1], - a_max=knots[1:], - sort=True, - ) - assert roots.shape == (P, AR, N, NUM_ROOTS) - - # Include the knots of the splines along with the intersection points. - # This preprocessing makes the ``direct`` algorithm in ``bounce_integral`` simpler. - roots = (roots[..., 0], roots[..., 1], roots[..., 2]) - a_min = jnp.broadcast_to(a_min, shape=(P, AR, N)) - a_max = jnp.broadcast_to(a_max, shape=(P, AR, N)) - intersect = jnp.stack((a_min, *roots, a_max), axis=-1) - - # Reshape so that last axis enumerates intersects of a pitch along a field line. - # Condense remaining axes to vmap over them. - B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * AR, -1) - # Only consider intersect if it is within knots that bound that polynomial. - is_intersect = jnp.reshape( - jnp.array([False, True, True, True, False], dtype=bool) & ~jnp.isnan(intersect), - newshape=(P * AR, -1), - ) - - # Rearrange so that all the intersects along field line are contiguous. - B_z = v_mask_take(B_z, is_intersect) - # The boolean masks ``bp1`` and ``bp2`` will encode whether a given entry in - # ``intersect`` is a valid starting and ending bounce point, respectively. - # Sign of derivative determines whether an intersect is a valid bounce point. - bp1 = B_z <= 0 - bp2 = B_z >= 0 - # B_z <= 0 at intersect i implies B_z >= 0 at intersect i+1 by continuity. - - # index of last intersect - idx = jnp.squeeze(v_first_flatnonzero(~is_intersect)) - 1 - assert idx.shape == (P * AR,) - # Consider the boundary to be periodic to compute bounce integrals of - # particles trapped outside this snapshot of the field lines. - # Roll such that first intersect is moved to index of last intersect. - is_bp = bp1 & put_along_axis(jnp.roll(bp2, -1, axis=-1), idx, bp2[..., 0], axis=-1) - - # Returning this makes the ``direct`` algorithm in ``bounce_integral`` simpler. - # Replace nan values with right knots of the spline. - intersect_nan_to_right_knot = jnp.stack( - ( - a_min, - *tuple(map(lambda r: jnp.where(jnp.isnan(r), knots[1:], r), roots)), - a_max, - ), - axis=-1, - ).reshape(P * AR, N, -1) - - return intersect_nan_to_right_knot, is_intersect, is_bp - - -def _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, compute_bp, *original, err=False -): +def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=False): """Return the ingredients needed by the ``bounce_integral`` function. Parameters @@ -473,8 +474,6 @@ def _compute_bp_if_given_pitch( First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - compute_bp : callable - Method to compute bounce points. original : tuple Whatever this method returned earlier. err : bool @@ -495,7 +494,8 @@ def _compute_bp_if_given_pitch( assert pitch.ndim == 3, err_msg pitch = pitch.reshape(pitch.shape[0], -1) assert pitch.shape[-1] == 1 or pitch.shape[-1] == poly_B.shape[1], err_msg - return pitch, *compute_bp(pitch, knots, poly_B, poly_B_z) + pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], poly_B.shape[1])) + return pitch, *compute_bounce_points(pitch, knots, poly_B, poly_B_z) def bounce_integral( @@ -504,8 +504,8 @@ def bounce_integral( rho=None, alpha=None, zeta=20, - resolution=11, - method=tanh_sinh_quadrature, + quadrature=tanh_sinh_quadrature, + **kwargs, ): """Returns a method to compute the bounce integral of any quantity. @@ -547,17 +547,11 @@ def bounce_integral( number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. If an integer is given, that many knots are linearly spaced from 0 to 10 pi. - resolution : int - Number of quadrature points. - method : callable + quadrature : callable The quadrature scheme used to evaluate the integral. - Should return quadrature points within the domain [-1, 1] - and quadrature weights with the call ``method(resolution)``. - Defaults to a tanh-sinh quadrature. - Cubic splines are used to represent each function in the integrand so that - the singularity near the bounce points can be captured more accurately than - can be represented by a polynomial. - The "direct" method exactly integrates a cubic spline of the integrand. + Should return quadrature points and weights when called. + The returned points should be within the domain [-1, 1]. + Can specify arguments to this callable with kwargs if convenient. Returns ------- @@ -606,8 +600,13 @@ def bounce_integral( assert poly_B.shape == (4, A * R, zeta.size - 1) assert poly_B_z.shape == (3, A * R, zeta.size - 1) - def quadrature(f, pitch=None): - """Compute the bounce integral of the named quantity. + x, w = quadrature(**kwargs) + # change of variable, x = sin[0.5 + π (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] + x = jnp.arcsin(x) / jnp.pi - 0.5 + original = _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, err=False) + + def _bounce_integral(f, pitch=None): + """Compute the bounce integral of ``f``. Parameters ---------- @@ -630,100 +629,17 @@ def quadrature(f, pitch=None): """ pitch, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, compute_bp, *original, err=True + pitch, zeta, poly_B, poly_B_z, *original, err=True ) - P = pitch.shape[0] - pitch = jnp.broadcast_to(pitch, shape=(P, A * R)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] f = f.reshape(A * R, -1) result = bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) # complete the change of variable - result = jnp.reshape(result / (bp2 - bp1) * jnp.pi, newshape=(P, A, R, -1)) + result = result / (bp2 - bp1) * jnp.pi + result = result.reshape(pitch.shape[0], A, R, -1) return result - def direct(f, pitch=None): - """Compute the bounce integral of the named quantity. - - Parameters - ---------- - f : ndarray - Quantity to compute the bounce integral of. - pitch : ndarray - λ values to evaluate the bounce integral at each field line. - If None, uses the values given to the parent function. - Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. - - Returns - ------- - result : ndarray, shape(P, alpha.size, rho.size, (zeta.size - 1) * 3) - The last axis iterates through every bounce integral performed - along that field line padded by nan. - - """ - ( - pitch, - intersect_nan_to_right_knot, - is_intersect, - is_bp, - ) = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, compute_bp, *original, err=True - ) - P = pitch.shape[0] - - integrand = jnp.nan_to_num( - f.reshape(A * R, -1) / (B_sup_z * jnp.sqrt(1 - pitch[..., jnp.newaxis] * B)) - ).reshape(P * A * R, -1) - integrand = Akima1DInterpolator(zeta, integrand, axis=-1, check=False).c - integrand = jnp.moveaxis(integrand, 1, -1) - assert integrand.shape == (4, P * A * R, zeta.size - 1) - - # For this algorithm, computing integrals via differences of primitives - # is preferable to any numerical quadrature. For example, even if the - # intersection points were evenly spaced, a composite Simpson's quadrature - # would require computing the spline on 1.8x more knots for the same accuracy. - primitive = polyval( - x=intersect_nan_to_right_knot, c=polyint(integrand)[..., jnp.newaxis] - ).reshape(P * A * R, -1) - - result = jnp.cumsum( - # Periodic boundary to compute bounce integrals of particles - # trapped outside this snapshot of the field lines. - jnp.diff(primitive, axis=-1, append=primitive[..., 0, jnp.newaxis]) - # Didn't enforce continuity in the piecewise primitives when - # integrating, so mask the discontinuity to avoid summing it. - * jnp.append( - jnp.arange(1, (zeta.size - 1) * (NUM_ROOTS + 2)) % (NUM_ROOTS + 2) != 0, - True, - ), - axis=-1, - ) - result = jnp.reshape( - # Compute difference of ``sums`` between bounce points. - v_mask_diff(v_mask_take(result, is_intersect), is_bp)[ - ..., : (zeta.size - 1) * NUM_ROOTS - ], - newshape=(P, A, R, -1), - ) - return result - - if callable(method): - x, w = method(resolution) - x = jnp.arcsin(x) / jnp.pi - 0.5 - compute_bp = compute_bounce_points - bi = quadrature - elif method == "direct": - compute_bp = _compute_bounce_points_with_knots - bi = direct - else: - raise ValueError(f"Got unknown method: {method}.") - original = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, compute_bp, err=False - ) - return bi, grid, data + return _bounce_integral, grid, data def bounce_average( @@ -732,8 +648,8 @@ def bounce_average( rho=None, alpha=None, zeta=20, - resolution=11, - method=tanh_sinh_quadrature, + quadrature=tanh_sinh_quadrature, + **kwargs, ): """Returns a method to compute the bounce average of any quantity. @@ -776,17 +692,11 @@ def bounce_average( number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. If an integer is given, that many knots are linearly spaced from 0 to 10 pi. - resolution : int - Number of quadrature points. - method : callable + quadrature : callable The quadrature scheme used to evaluate the integral. - Should return quadrature points within the domain [-1, 1] - and quadrature weights with the call ``method(resolution)``. - Defaults to a tanh-sinh quadrature. - Cubic splines are used to represent each function in the integrand so that - the singularity near the bounce points can be captured more accurately than - can be represented by a polynomial. - The "direct" method exactly integrates a cubic spline of the integrand. + Should return quadrature points and weights when called. + The returned points should be within the domain [-1, 1]. + Can specify arguments to this callable with kwargs if convenient. Returns ------- @@ -812,10 +722,10 @@ def bounce_average( result = ba(f, pitch) """ - bi, grid, data = bounce_integral(eq, pitch, rho, alpha, zeta, resolution, method) + bi, grid, data = bounce_integral(eq, pitch, rho, alpha, zeta, quadrature, **kwargs) def _bounce_average(f, pitch=None): - """Compute the bounce average of the named quantity. + """Compute the bounce average of ``f``. Parameters ---------- @@ -839,11 +749,53 @@ def _bounce_average(f, pitch=None): """ # Should be fine to fit akima spline to constant function 1 since # akima suppresses oscillation of the spline. - return safediv(bi(f, pitch), bi(jnp.ones_like(f), pitch)) + return bi(f, pitch) / bi(jnp.ones_like(f), pitch) return _bounce_average, grid, data +def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): + """Stretch batches of ``in_arr``. + + Given that ``in_arr`` is composed of N batches of ``in_batch_size`` + along its last axis, stretch the last axis so that it is composed of + N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` + missing elements in each batch are populated with ``fill``. + By default, these elements are populated evenly surrounding the input batches. + + Parameters + ---------- + in_arr : ndarray, shape(..., in_batch_size * N) + Input array + in_batch_size : int + Length of batches along last axis of input array. + out_batch_size : int + Length of batches along last axis of output array. + fill : bool or int or float + Value to fill at missing indices of each batch. + + Returns + ------- + out_arr : ndarray, shape(..., out_batch_size * N) + Output array + + """ + assert out_batch_size >= in_batch_size + N = in_arr.shape[-1] // in_batch_size + out_shape = in_arr.shape[:-1] + (N * out_batch_size,) + offset = (out_batch_size - in_batch_size) // 2 + idx = jnp.arange(in_arr.shape[-1]) + out_arr = put_along_axis( + arr=jnp.full(out_shape, fill, dtype=in_arr.dtype), + indices=(idx // in_batch_size) * out_batch_size + + offset + + (idx % in_batch_size), + values=in_arr, + axis=-1, + ) + return out_arr + + def field_line_to_desc_coords(eq, rho, alpha, zeta): """Get DESC grid from unique field line coordinates.""" r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") diff --git a/desc/compute/utils.py b/desc/compute/utils.py index ef8f9da4f6..72bfa543dd 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -7,16 +7,7 @@ import numpy as np from termcolor import colored -from desc.backend import ( - cond, - flatnonzero, - fori_loop, - jnp, - put, - put_along_axis, - take, - use_jax, -) +from desc.backend import cond, fori_loop, jnp, put from desc.grid import ConcentricGrid, LinearGrid from .data_index import data_index @@ -1283,133 +1274,6 @@ def body(i, mins): return grid.expand(mins, surface_label) -def mask_take(a, mask, size, fill_value=jnp.nan): - """JIT compilable method to return ``a[mask]`` padded by ``fill_value``. - - Parameters - ---------- - a : ndarray - The source array. - mask : ndarray - Boolean mask to index into ``a``. - size : - Elements of ``a`` at the first size True indices of ``mask`` will be returned. - If there are fewer elements than size indicates, the returned array will be - padded with fill_value. - fill_value : - When there are fewer than the indicated number of elements, - the remaining elements will be filled with ``fill_value``. - - Returns - ------- - a_mask : ndarray, shape(size, ) - Output array. - - """ - idx = flatnonzero(mask, size=size, fill_value=mask.size) - a_mask = take( - a, - idx, - axis=0, - mode="fill", - fill_value=fill_value, - unique_indices=True, - indices_are_sorted=True, - ) - return a_mask - - -def mask_diff(a, mask, n=1, axis=-1, prepend=None): - """Calculate the n-th discrete difference along the given axis of ``a[mask]``. - - The first difference is given by ``out[i] = a[i+1] - a[i]`` along - the given axis, higher differences are calculated by using `diff` - recursively. This method is JIT compatible. - - Parameters - ---------- - a : array_like - Input array - mask : array_like - Boolean mask to index like ``a[mask]`` prior to computing difference. - Should have same size as ``a``. - n : int, optional - The number of times values are differenced. - axis : int, optional - The axis along which the difference is taken, default is the - last axis. - prepend : array_like, optional - Values to prepend to `a` along axis prior to performing the difference. - Scalar values are expanded to arrays with length 1 in the direction of - axis and the shape of the input array in along all other axes. - Otherwise, the dimension and shape must match `a` except along axis. - - Returns - ------- - diff : ndarray - The n-th differences. The shape of the output is the same as ``a`` - except along ``axis`` where the dimension is smaller by ``n``. The - type of the output is the same as the type of the difference - between any two elements of ``a``. - - Notes - ----- - The result is padded with nan at the end to be jit compilable. - - """ - if prepend is None and not use_jax: - # https://github.com/numpy/numpy/blob/ - # d35cd07ea997f033b2d89d349734c61f5de54b0d/ - # numpy/lib/function_base.py#L1324-L1454 - prepend = np._NoValue - diff = jnp.diff( - mask_take(a, mask, size=mask.size, fill_value=jnp.nan), n, axis, prepend - ) - return diff - - -def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): - """Stretch batches of ``in_arr``. - - Given that ``in_arr`` is composed of N batches of ``in_batch_size`` - along its last axis, stretch the last axis so that it is composed of - N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` - missing elements in each batch are populated with ``fill``. - By default, these elements are populated evenly surrounding the input batches. - - Parameters - ---------- - in_arr : ndarray, shape(..., in_batch_size * N) - Input array - in_batch_size : int - Length of batches along last axis of input array. - out_batch_size : int - Length of batches along last axis of output array. - fill : bool or int or float - Value to fill at missing indices of each batch. - - Returns - ------- - out_arr : ndarray, shape(..., out_batch_size * N) - Output array - - """ - assert out_batch_size >= in_batch_size - N = in_arr.shape[-1] // in_batch_size - out_shape = in_arr.shape[:-1] + (N * out_batch_size,) - offset = (out_batch_size - in_batch_size) // 2 - idx = jnp.arange(in_arr.shape[-1]) - out_arr = put_along_axis( - arr=jnp.full(out_shape, fill, dtype=in_arr.dtype), - indices=(idx // in_batch_size) * out_batch_size - + offset - + (idx % in_batch_size), - values=in_arr, - axis=-1, - ) - return out_arr - - # defines the order in which objective arguments get concatenated into the state vector arg_order = ( "R_lmn", diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 16a213e11b..9bacdef4a7 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -135,16 +135,16 @@ def test_pitch_input(): """Test different ways of specifying pitch.""" eq = get("HELIOTRON") rho = np.linspace(0, 1, 6) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 2) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) ba, grid, data = bounce_average(eq, rho=rho, alpha=alpha) - pitch_resolution = 15 + pitch_resolution = 30 name = "g_zz" f = eq.compute(name, grid=grid, data=data)[name] # Same pitch for every field line may give sparse result. pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) pitch = pitch[:, np.newaxis, np.newaxis] result = ba(f, pitch) - assert np.isfinite(result).any(), "tanh_sinh quadrature failed." + assert np.isfinite(result).any() # specify pitch per field line B = data["B"].reshape(alpha.size * rho.size, -1) eps = 1e-5 # FIXME: vanishing B-field bug. @@ -154,14 +154,10 @@ def test_pitch_input(): pitch_resolution, ).reshape(pitch_resolution, alpha.size, rho.size) result = ba(f, pitch) - assert np.isfinite(result).any(), "tanh_sinh quadrature failed." - - ba, _, _ = bounce_average(eq, rho=rho, alpha=alpha, method="direct") - result = ba(f, pitch) - print(np.isfinite(result).any()) + assert np.isfinite(result).any() -@pytest.mark.unit +# @pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. From 65777a1066168635cb0be74c8ee1b39b0245dc6a Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 17 Mar 2024 23:34:39 -0400 Subject: [PATCH 042/241] Delay explicit broadcast until necessary for vmap --- desc/compute/bounce_integral.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 2369cd31ad..678775b689 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -494,7 +494,6 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa assert pitch.ndim == 3, err_msg pitch = pitch.reshape(pitch.shape[0], -1) assert pitch.shape[-1] == 1 or pitch.shape[-1] == poly_B.shape[1], err_msg - pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], poly_B.shape[1])) return pitch, *compute_bounce_points(pitch, knots, poly_B, poly_B_z) @@ -631,12 +630,16 @@ def _bounce_integral(f, pitch=None): pitch, bp1, bp2 = _compute_bp_if_given_pitch( pitch, zeta, poly_B, poly_B_z, *original, err=True ) + P = pitch.shape[0] + pitch = jnp.broadcast_to(pitch, shape=(P, poly_B.shape[1])) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] f = f.reshape(A * R, -1) - result = bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) - # complete the change of variable - result = result / (bp2 - bp1) * jnp.pi - result = result.reshape(pitch.shape[0], A, R, -1) + result = jnp.reshape( + bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) + # complete the change of variable + / (bp2 - bp1) * jnp.pi, + newshape=(P, A, R, -1), + ) return result return _bounce_integral, grid, data From ffb918efdfce294af287b60185f91532a607e7c8 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 18 Mar 2024 01:12:05 -0400 Subject: [PATCH 043/241] Physical justification for approximating field line as periodic --- desc/compute/bounce_integral.py | 53 +++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 678775b689..93f12b07de 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -18,7 +18,7 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): ---------- pitch : ndarray, shape(pitch.size, ) λ values. - X : ndarray, shape(pitch.size, (knots.size - 1) * NUM_ROOTS, w.size) + X : ndarray, shape(pitch.size, (knots.size - 1) * 3, w.size) Quadrature points. w : ndarray, shape(w.size, ) Quadrature weights. @@ -35,7 +35,7 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): Returns ------- - inner_product : ndarray, shape(P, (knots.size - 1) * NUM_ROOTS) + inner_product : ndarray, shape(P, (knots.size - 1) * 3) Bounce integrals for every pitch along a particular field line. """ @@ -389,8 +389,8 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): ------- bp1, bp2 : ndarray, ndarray Field line-following ζ coordinates of bounce points for a given pitch - along a field line. Has shape (P, A * R, (knots.size - 1) * NUM_ROOTS). - If there were less than (knots.size - 1) * NUM_ROOTS bounce points along a + along a field line. Has shape (P, A * R, (knots.size - 1) * 3). + If there were less than (knots.size - 1) * 3 bounce points along a field line, then the last axis is padded with nan. """ @@ -436,15 +436,28 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # integral only if bp1[:, i] <= bp2[:, i]. # Now, because B_z[:, i] <= 0 implies B_z[:, i + 1] >= 0 by continuity, # there can be at most one inversion, and if it exists, the inversion must be - # at the first pair. To correct the inversion, it suffices to roll forward bp1 - # Then the pairs bp1[:, i + 1] and bp2[:, i] form integration boundaries. + # at the first pair. To correct the inversion, it suffices to roll forward bp1. + # Then the pairs bp1[:, i] and bp2[:, i] for i > 0 form integration boundaries. # Moreover, if the first intersect satisfies B_z >= 0, that particle may be # trapped in a well outside this snapshot of the field line. - # If the last intersect also satisfies B_z < 0, then we compute a bounce - # integral between these points. The above logic handles this assuming the - # field line is approximately periodic so that ζ = knots[-1] is ζ = 0. - last_one = jnp.squeeze(_first_element(intersect, ~is_intersect)) - 1 - bp1 = _roll_and_replace(bp1, bp1[:, 0] > bp2[:, 0], last_one - knots[-1]) + # If, in addition, the last intersect satisfies B_z < 0, then we have the + # required information to compute a bounce integral between these points. + # The below logic handles both tasks. + last_intersect = jnp.squeeze(_first_element(intersect, ~is_intersect)) - 1 + bp1 = _roll_and_replace(bp1, bp1[:, 0] > bp2[:, 0], last_intersect - knots[-1]) + # Notice that for the latter, an "approximation" is made that the field line is + # periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so that the + # distance between these bounce points is well-defined. This may worry the + # reader if they recall that it is not desirable to have field lines close + # on themselves. However, for any irrational value for the rotational + # transform, there exists an arbitrarily close rational value (I'm just saying + # the basic result that rational numbers are dense in the real numbers). + # After such a rational amount of transits, the points corresponding to this + # distance along the field line and the start of the field line will be + # physically close. By continuity, the value of |B| at ζ = 0 is then close + # to the value of |B| of at ζ = knots[-1]. In general, continuity implies + # |B|(knots[-1] < ζ < knots[-1] + knots[0]) will approximately equal + # |B|(0 < ζ < knots[0]) as long as ζ = knots[-1] is large enough. bp1 = bp1.reshape(P, AR, -1) bp2 = bp2.reshape(P, AR, -1) return bp1, bp2 @@ -514,9 +527,10 @@ def bounce_integral( |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, and the endpoints of the integration are at the bounce points. - For a particle with fixed λ, bounce points are defined to be the location - on the field line such that the particle's velocity parallel to the - magnetic field is zero, i.e. λ |B| = 1. + Physically, the pitch angle λ is the magnetic moment over the energy + of particle. For a particle with fixed λ, bounce points are defined to be + the location on the field line such that the particle's velocity parallel + to the magnetic field is zero, i.e. λ |B| = 1. The bounce integral is defined up to a sign. We choose the sign that corresponds the particle's guiding center trajectory @@ -600,7 +614,7 @@ def bounce_integral( assert poly_B_z.shape == (3, A * R, zeta.size - 1) x, w = quadrature(**kwargs) - # change of variable, x = sin[0.5 + π (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] + # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 original = _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, err=False) @@ -631,7 +645,7 @@ def _bounce_integral(f, pitch=None): pitch, zeta, poly_B, poly_B_z, *original, err=True ) P = pitch.shape[0] - pitch = jnp.broadcast_to(pitch, shape=(P, poly_B.shape[1])) + pitch = jnp.broadcast_to(pitch, shape=(P, A * R)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] f = f.reshape(A * R, -1) result = jnp.reshape( @@ -663,9 +677,10 @@ def bounce_average( |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, and the endpoints of the integration are at the bounce points. - For a particle with fixed λ, bounce points are defined to be the location - on the field line such that the particle's velocity parallel to the - magnetic field is zero, i.e. λ |B| = 1. + Physically, the pitch angle λ is the magnetic moment over the energy + of particle. For a particle with fixed λ, bounce points are defined to be + the location on the field line such that the particle's velocity parallel + to the magnetic field is zero, i.e. λ |B| = 1. The bounce integral is defined up to a sign. We choose the sign that corresponds the particle's guiding center trajectory From ff6a318ba7af823c9a57b34c1ae1cbcacfcd8802 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 18 Mar 2024 01:24:24 -0400 Subject: [PATCH 044/241] Fix bug in last intersect value --- desc/compute/bounce_integral.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 93f12b07de..2e07da6768 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -181,11 +181,12 @@ def diff_mask(a, mask, n=1, axis=-1, prepend=None): @vmap -def _first_element(a, mask): - """Return first value of ``a`` where ``mask`` is nonzero.""" +def _last_element(a, mask): + """Return last element of ``a`` where ``mask`` is nonzero.""" assert a.ndim == mask.ndim == 1 assert a.shape == mask.shape - idx = flatnonzero(mask, size=1, fill_value=a.size) + assert mask.dtype == bool + idx = flatnonzero(~mask, size=1, fill_value=a.size) - 1 return a[idx] @@ -443,7 +444,7 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # If, in addition, the last intersect satisfies B_z < 0, then we have the # required information to compute a bounce integral between these points. # The below logic handles both tasks. - last_intersect = jnp.squeeze(_first_element(intersect, ~is_intersect)) - 1 + last_intersect = jnp.squeeze(_last_element(intersect, is_intersect)) bp1 = _roll_and_replace(bp1, bp1[:, 0] > bp2[:, 0], last_intersect - knots[-1]) # Notice that for the latter, an "approximation" is made that the field line is # periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so that the From 1cb9116ff79e64387e749cb8f677e58cbc846b79 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 24 Mar 2024 16:41:10 -0400 Subject: [PATCH 045/241] Return more useful things in bounce_integral --- desc/compute/bounce_integral.py | 89 +++++++++++----- desc/compute/utils.py | 181 +++++++++++++++++++++----------- tests/test_bounce_integral.py | 40 ++++--- 3 files changed, 205 insertions(+), 105 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 2e07da6768..6842a4d0c7 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -295,7 +295,7 @@ def polyval(x, c): return val -def cubic_poly_roots(coef, k=None, a_min=None, a_max=None, sort=False): +def cubic_poly_roots(coef, k=0, a_min=None, a_max=None, sort=False): """Roots of cubic polynomial with given coefficients. Parameters @@ -305,7 +305,7 @@ def cubic_poly_roots(coef, k=None, a_min=None, a_max=None, sort=False): given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. It is assumed that c₁ is nonzero. k : ndarray - Specify to instead find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. + Specify to find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. Should broadcast with arrays of shape(*coef.shape[1:]). a_min, a_max : ndarray Minimum and maximum value to return roots between. @@ -331,8 +331,7 @@ def cubic_poly_roots(coef, k=None, a_min=None, a_max=None, sort=False): a_max = jnp.inf a, b, c, d = coef - if k is not None: - d = d - k + d = d - k t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) @@ -565,29 +564,45 @@ def bounce_integral( The quadrature scheme used to evaluate the integral. Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. - Can specify arguments to this callable with kwargs if convenient. + kwargs : dict + Can specify arguments to the quadrature function with kwargs if convenient. + Can also specify whether to return items with ``return_items=True``. Returns ------- bi : callable This callable method computes the bounce integral F_ℓ(λ) for every specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. - grid : Grid - DESC coordinate grid for the given field line coordinates. - data : dict - Dictionary of ndarrays of stuff evaluated on ``grid``. + items : dict + Dictionary of useful intermediate quantities. + grid : Grid + DESC coordinate grid for the given field line coordinates. + data : dict + Dictionary of ndarrays of stuff evaluated on ``grid``. + poly_B : ndarray, shape(4, A * R, zeta.size - 1) + Polynomial coefficients of the cubic spline of |B|. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| along field line. + poly_B_z : ndarray, shape(3, A * R, zeta.size - 1) + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| along field line. Examples -------- .. code-block:: python - bi, grid, data = bounce_integral(eq) - pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + bi, items = bounce_integral(eq, return_items=True) + name = "g_zz" + f = eq.compute(name, grid=items["grid"], data=items["data"])[name] # Same pitch for every field line may give sparse result. # See tests/test_bounce_integral.py::test_pitch_input for an alternative. + pitch_res = 30 + B = items["data"]["B"] + pitch = jnp.linspace(1 / B.max(), 1 / B.min(), pitch_res) pitch = pitch[:, jnp.newaxis, jnp.newaxis] - name = "g_zz" - f = eq.compute(name, grid=grid, data=data)[name] result = bi(f, pitch) """ @@ -614,6 +629,7 @@ def bounce_integral( assert poly_B.shape == (4, A * R, zeta.size - 1) assert poly_B_z.shape == (3, A * R, zeta.size - 1) + return_items = kwargs.pop("return_items", False) x, w = quadrature(**kwargs) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 @@ -657,7 +673,11 @@ def _bounce_integral(f, pitch=None): ) return result - return _bounce_integral, grid, data + if return_items: + items = {"grid": grid, "data": data, "poly_B": poly_B, "poly_B_z": poly_B_z} + return _bounce_integral, items + else: + return _bounce_integral def bounce_average( @@ -716,32 +736,48 @@ def bounce_average( Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. Can specify arguments to this callable with kwargs if convenient. + kwargs : dict + Can specify arguments to the quadrature function with kwargs if convenient. + Can also specify whether to return items with ``return_items=True``. Returns ------- ba : callable This callable method computes the bounce average F_ℓ(λ) for every specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. - grid : Grid - DESC coordinate grid for the given field line coordinates. - data : dict - Dictionary of ndarrays of stuff evaluated on ``grid``. + items : dict + Dictionary of useful intermediate quantities. + grid : Grid + DESC coordinate grid for the given field line coordinates. + data : dict + Dictionary of ndarrays of stuff evaluated on ``grid``. + poly_B : ndarray, shape(4, A * R, zeta.size - 1) + Polynomial coefficients of the cubic spline of |B|. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| along field line. + poly_B_z : ndarray, shape(3, A * R, zeta.size - 1) + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| along field line. Examples -------- .. code-block:: python - ba, grid, data = bounce_integral(eq) - pitch = jnp.linspace(1 / data["B"].max(), 1 / data["B"].min(), 30) + ba, items = bounce_average(eq, return_items=True) + name = "g_zz" + f = eq.compute(name, grid=items["grid"], data=items["data"])[name] # Same pitch for every field line may give sparse result. # See tests/test_bounce_integral.py::test_pitch_input for an alternative. + pitch_res = 30 + B = items["data"]["B"] + pitch = jnp.linspace(1 / B.max(), 1 / B.min(), pitch_res) pitch = pitch[:, jnp.newaxis, jnp.newaxis] - name = "g_zz" - f = eq.compute(name, grid=grid, data=data)[name] result = ba(f, pitch) """ - bi, grid, data = bounce_integral(eq, pitch, rho, alpha, zeta, quadrature, **kwargs) def _bounce_average(f, pitch=None): """Compute the bounce average of ``f``. @@ -770,7 +806,12 @@ def _bounce_average(f, pitch=None): # akima suppresses oscillation of the spline. return bi(f, pitch) / bi(jnp.ones_like(f), pitch) - return _bounce_average, grid, data + bi = bounce_integral(eq, pitch, rho, alpha, zeta, quadrature, **kwargs) + if kwargs.get("return_items"): + bi, items = bi + return _bounce_average, items + else: + return _bounce_average def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 2926a0adac..3ef93e2cc0 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -749,9 +749,18 @@ def line_integrals( q : ndarray Quantity to integrate. The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to integrate, + + When ``q`` is 1-dimensional, the intention is to integrate, + over the domain parameterized by rho, theta, and zeta, + a scalar function over the previously mentioned domain. + + When ``q`` is 2-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - an n-dimensional function over the previously mentioned domain. + a vector-valued function over the previously mentioned domain. + + When ``q`` is 3-dimensional, the intention is to integrate, + over the domain parameterized by rho, theta, and zeta, + a matrix-valued function over the previously mentioned domain. line_label : str The coordinate curve to compute the integration over. To clarify, a theta (poloidal) curve is the intersection of a @@ -816,9 +825,18 @@ def surface_integrals(grid, q=jnp.array([1.0]), surface_label="rho", expand_out= q : ndarray Quantity to integrate. The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to integrate, + + When ``q`` is 1-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - an n-dimensional function over the previously mentioned domain. + a scalar function over the previously mentioned domain. + + When ``q`` is 2-dimensional, the intention is to integrate, + over the domain parameterized by rho, theta, and zeta, + a vector-valued function over the previously mentioned domain. + + When ``q`` is 3-dimensional, the intention is to integrate, + over the domain parameterized by rho, theta, and zeta, + a matrix-valued function over the previously mentioned domain. surface_label : str The surface label of rho, theta, or zeta to compute the integration over. expand_out : bool @@ -870,13 +888,12 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True): unique_size, inverse_idx, spacing, has_endpoint_dupe = _get_grid_surface( grid, surface_label ) - spacing = jnp.prod(spacing, axis=1) # Todo: Define masks as a sparse matrix once sparse matrices are no longer # experimental in jax. # The ith row of masks is True only at the indices which correspond to the # ith surface. The integral over the ith surface is the dot product of the - # ith row vector and the integrand defined over all the surfaces. + # ith row vector and the vector of integrands of all surfaces. masks = inverse_idx == jnp.arange(unique_size)[:, jnp.newaxis] # Imagine a torus cross-section at zeta=π. # A grid with a duplicate zeta=π node has 2 of those cross-sections. @@ -902,6 +919,7 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True): lambda _: masks, operand=None, ) + spacing = jnp.prod(spacing, axis=1) def _surface_integrals(q=jnp.array([1.0])): """Compute a surface integral for each surface in the grid. @@ -917,9 +935,18 @@ def _surface_integrals(q=jnp.array([1.0])): q : ndarray Quantity to integrate. The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to integrate, + + When ``q`` is 1-dimensional, the intention is to integrate, + over the domain parameterized by rho, theta, and zeta, + a scalar function over the previously mentioned domain. + + When ``q`` is 2-dimensional, the intention is to integrate, + over the domain parameterized by rho, theta, and zeta, + a vector-valued function over the previously mentioned domain. + + When ``q`` is 3-dimensional, the intention is to integrate, over the domain parameterized by rho, theta, and zeta, - an n-dimensional function over the previously mentioned domain. + a matrix-valued function over the previously mentioned domain. Returns ------- @@ -927,12 +954,41 @@ def _surface_integrals(q=jnp.array([1.0])): Surface integral of the input over each surface in the grid. """ + axis_to_move = (jnp.ndim(q) == 3) * 2 integrands = (spacing * jnp.nan_to_num(q).T).T - # `integrands` may have shape (g.size, *f.shape), where - # g.size is grid.num_nodes and iterating along this axis varies the object, - # e.g. some function f, held in the remaining axes over the nodes of the grid. - # Uses less memory than jnp.einsum("ug,g...->u...", masks, integrands). - integrals = jnp.tensordot(masks, integrands, axes=1) + # `integrands` may have shape (g.size, f.size, v.size), where + # g is the grid function depending on the integration variables + # f is a function which may be independent of the integration variables + # v is the vector of components of f (or g). + # The intention is to integrate `integrands` which is a + # vector-valued (with v.size components) + # function-valued (with image size of f.size) + # function over the grid (with domain size of g.size = grid.num_nodes) + # over each surface in the grid. + + # The distinction between f and v is semantic. + # We may alternatively consider an `integrands` of shape (g.size, f.size) to + # represent a vector-valued (with f.size components) function over the grid. + # Likewise, we may alternatively consider an `integrands` of shape + # (g.size, v.size) to represent a function-valued (with image size v.size) + # function over the grid. When `integrands` has dimension one, it is a + # scalar function over the grid. That is, a + # vector-valued (with 1 component), + # function-valued (with image size of 1) + # function over the grid (with domain size of g.size = grid.num_nodes) + + # The integration is performed by applying `masks`, the surface + # integral operator, to `integrands`. This operator hits the matrix formed + # by the last two dimensions of `integrands`, for every element along the + # previous dimension of `integrands`. Therefore, when `integrands` has three + # dimensions, the second must hold g. We may choose which of the first and + # third dimensions hold f and v. The choice below transposes `integrands` to + # shape (v.size, g.size, f.size). As we expect f.size >> v.size, the + # integration is in theory faster since numpy optimizes large matrix + # products. However, timing results showed no difference. + integrals = jnp.moveaxis( + masks @ jnp.moveaxis(integrands, axis_to_move, 0), 0, axis_to_move + ) return grid.expand(integrals, surface_label) if expand_out else integrals return _surface_integrals @@ -960,9 +1016,18 @@ def surface_averages( q : ndarray Quantity to average. The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to average, + + When ``q`` is 1-dimensional, the intention is to average, + over the domain parameterized by rho, theta, and zeta, + a scalar function over the previously mentioned domain. + + When ``q`` is 2-dimensional, the intention is to average, over the domain parameterized by rho, theta, and zeta, - an n-dimensional function over the previously mentioned domain. + a vector-valued function over the previously mentioned domain. + + When ``q`` is 3-dimensional, the intention is to average, + over the domain parameterized by rho, theta, and zeta, + a matrix-valued function over the previously mentioned domain. sqrt_g : ndarray Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. surface_label : str @@ -1013,7 +1078,7 @@ def surface_averages_map(grid, surface_label="rho", expand_out=True): ``function(q, sqrt_g)``. """ - integrate = surface_integrals_map(grid, surface_label, False) + compute_surface_integrals = surface_integrals_map(grid, surface_label, False) def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): """Compute a surface average for each surface in the grid. @@ -1028,9 +1093,18 @@ def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): q : ndarray Quantity to average. The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to average, + + When ``q`` is 1-dimensional, the intention is to average, + over the domain parameterized by rho, theta, and zeta, + a scalar function over the previously mentioned domain. + + When ``q`` is 2-dimensional, the intention is to average, over the domain parameterized by rho, theta, and zeta, - an n-dimensional function over the previously mentioned domain. + a vector-valued function over the previously mentioned domain. + + When ``q`` is 3-dimensional, the intention is to average, + over the domain parameterized by rho, theta, and zeta, + a matrix-valued function over the previously mentioned domain. sqrt_g : ndarray Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. denominator : ndarray @@ -1049,14 +1123,14 @@ def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): """ q = jnp.atleast_1d(q) sqrt_g = jnp.atleast_1d(sqrt_g) - numerator = integrate((sqrt_g * q.T).T) + numerator = compute_surface_integrals((sqrt_g * q.T).T) # memory optimization to call expand() at most once if denominator is None: # skip integration if constant denominator = ( (4 * jnp.pi**2 if surface_label == "rho" else 2 * jnp.pi) * sqrt_g if sqrt_g.size == 1 - else integrate(sqrt_g) + else compute_surface_integrals(sqrt_g) ) averages = (numerator.T / denominator).T if expand_out: @@ -1113,16 +1187,28 @@ def surface_integrals_transform(grid, surface_label="rho"): The second dimension may discretize some function, f, over the codomain, and therefore, have size that matches the desired number of points at which the output is evaluated. + If the integrand is vector-valued then the third dimension may + hold the components of size v.size. + + This method can also be used to compute the output one point at a time. + In this case, ``q`` will be at most two-dimensional, and the second + dimension may hold the vector components. - This method can also be used to compute the output one point at a time, - in which case ``q`` can have shape (``grid.num_nodes``, ). + There is technically no difference between the labels f and v, so their + roles may be swapped if this is more convenient. Input ----- - If ``q`` has one-dimension, then it should have shape + If ``q`` is one-dimensional, then it should have shape (``grid.num_nodes``, ). - If ``q`` has multiple dimensions, then it should have shape - (``grid.num_nodes``, *f.shape). + If ``q`` is two-dimensional, then either + 1) g and f are scalar functions, so the input should have shape + (``grid.num_nodes``, f.size). + 2) g (or f) is a vector-valued function, and f has been evaluated at + only one point, so the input should have shape + (``grid.num_nodes``, v.size). + If ``q`` is three-dimensional, then it should have shape + (``grid.num_nodes``, f.size, v.size). Output ------ @@ -1130,12 +1216,19 @@ def surface_integrals_transform(grid, surface_label="rho"): Tᵤ₁ for a particular surface of constant u₁ in the given grid. The order is sorted in increasing order of the values which specify u₁. - If ``q`` has one dimension, the returned array has shape + If ``q`` is one-dimensional, the returned array has shape (grid.num_surface_label, ). - If ``q`` has multiple dimensions, the returned array has shape - (grid.num_surface_label, *f.shape). + If ``q`` is two-dimensional, the returned array has shape + (grid.num_surface_label, (f or v).size), depending on whether f or v is + the relevant label. + If ``q`` is three-dimensional, the returned array has shape + (grid.num_surface_label, f.size, v.size). """ + # Although this method seems to duplicate surface_integrals(), the + # intentions of these methods may be to implement different algorithms. + # We can rely on surface_integrals() for the computation because its current + # implementation is flexible enough to implement both algorithms. # Expansion should not occur here. The typical use case of this method is to # transform into the computational domain, so the second dimension that # discretizes f over the codomain will typically have size grid.num_nodes @@ -1293,35 +1386,3 @@ def body(i, mins): # The above implementation was benchmarked to be more efficient than # alternatives without explicit loops in GitHub pull request #501. return grid.expand(mins, surface_label) - - -# defines the order in which objective arguments get concatenated into the state vector -arg_order = ( - "R_lmn", - "Z_lmn", - "L_lmn", - "p_l", - "i_l", - "c_l", - "Psi", - "Te_l", - "ne_l", - "Ti_l", - "Zeff_l", - "a_lmn", - "Ra_n", - "Za_n", - "Rb_lmn", - "Zb_lmn", -) -# map from profile name to equilibrium parameter name -profile_names = { - "pressure": "p_l", - "iota": "i_l", - "current": "c_l", - "electron_temperature": "Te_l", - "electron_density": "ne_l", - "ion_temperature": "Ti_l", - "atomic_number": "Zeff_l", - "anisotropy": "a_lmn", -} diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 9bacdef4a7..05f6c11a6b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -2,7 +2,7 @@ import numpy as np import pytest -from interpax import Akima1DInterpolator, CubicHermiteSpline +from interpax import Akima1DInterpolator from desc.backend import fori_loop, put, root_scalar from desc.compute.bounce_integral import ( @@ -136,23 +136,25 @@ def test_pitch_input(): eq = get("HELIOTRON") rho = np.linspace(0, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - ba, grid, data = bounce_average(eq, rho=rho, alpha=alpha) - pitch_resolution = 30 + ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" - f = eq.compute(name, grid=grid, data=data)[name] + f = eq.compute(name, grid=items["grid"], data=items["data"])[name] + # Same pitch for every field line may give sparse result. - pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) - pitch = pitch[:, np.newaxis, np.newaxis] + pitch_res = 30 + B = items["data"]["B"] + pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res)[:, np.newaxis, np.newaxis] result = ba(f, pitch) assert np.isfinite(result).any() + # specify pitch per field line - B = data["B"].reshape(alpha.size * rho.size, -1) + B = B.reshape(alpha.size * rho.size, -1) eps = 1e-5 # FIXME: vanishing B-field bug. pitch = np.linspace( 1 / (B.max(axis=-1) + eps), 1 / (B.min(axis=-1) + eps), - pitch_resolution, - ).reshape(pitch_resolution, alpha.size, rho.size) + pitch_res, + ).reshape(pitch_res, alpha.size, rho.size) result = ba(f, pitch) assert np.isfinite(result).any() @@ -170,7 +172,8 @@ def test_elliptic_integral_limit(): Are we saying that in this limit, we expect that |B| ~ sin(t)^2, with m as the pitch angle? I assume that we want to add g_zz to the integrand in the definition of the function in the scipy documentation above, - and after a change of variables the bounce points will be the integration. + and after a change of variables the bounce points will be the endpoints of + the integration. So this test will test whether the quadrature is accurate (and not whether the bounce points were accurate). @@ -215,22 +218,17 @@ def beta(grid, data): rho = np.array([0.5]) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) zeta = np.linspace(0, 10 * np.pi, 20) - bi, grid, data = bounce_integral(eq, rho=rho, alpha=alpha, zeta=zeta) - pitch_resolution = 15 - pitch = np.linspace(1 / data["B"].max(), 1 / data["B"].min(), pitch_resolution) + bi, items = bounce_integral(eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True) + B = items["data"]["B"] + pitch_res = 15 + pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res) name = "g_zz" - f = eq.compute(name, grid=grid, data=data)[name] + f = eq.compute(name, grid=items["grid"], data=items["data"])[name] result = bi(f, pitch) assert np.isfinite(result).any(), "tanh_sinh quadrature failed." - # routine copied from bounce_integrals functions - B = data["|B|"].reshape(alpha.size * rho.size, -1) - B_z_ra = data["|B|_z|r,a"].reshape(alpha.size * rho.size, -1) - poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1).c - poly_B = np.moveaxis(poly_B, 1, -1) - poly_B_z = polyder(poly_B) - bp1, bp2 = compute_bounce_points(pitch, zeta, poly_B, poly_B_z) # TODO now compare result to elliptic integral + bp1, bp2 = compute_bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z"]) # TODO: if deemed useful finish details using methods in desc.compute.bounce_integral From 0ae53d9d05b317d8b837ceefbaad0e8eb9db028b Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 29 Mar 2024 00:31:34 -0400 Subject: [PATCH 046/241] Add test for hairy ball theorem --- desc/compute/bounce_integral.py | 6 +++--- tests/test_bounce_integral.py | 34 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 14d7fd6cd6..ff062ec19d 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -857,7 +857,7 @@ def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): return out_arr -def field_line_to_desc_coords(eq, rho, alpha, zeta): +def field_line_to_desc_coords(eq, rho, alpha, zeta, jitable=True): """Get DESC grid from unique field line coordinates.""" r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") r, a, z = r.ravel(), a.ravel(), z.ravel() @@ -873,7 +873,7 @@ def field_line_to_desc_coords(eq, rho, alpha, zeta): # transform to approximate theta_PEST and the poloidal stream function anyway. # TODO: map coords recently updated, so maybe just switch to that lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - lg_data = eq.compute("iota", grid=lg) + lg_data = eq.compute(["iota", "iota_r", "iota_rr"], grid=lg) data = { d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) for d in lg_data @@ -882,5 +882,5 @@ def field_line_to_desc_coords(eq, rho, alpha, zeta): } sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) desc_coords = eq.compute_theta_coords(sfl_coords) - grid = Grid(desc_coords, jitable=True) + grid = Grid(desc_coords, jitable=jitable) return grid, data diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index f5434b67b8..19c3da5cad 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -159,6 +159,40 @@ def test_pitch_input(): assert np.isfinite(result).any() +@pytest.mark.unit +def test_hairy_ball_theorem(): + """Ensure |B| does not vanish after coordinate mapping.""" + eq = get("W7-X") + rho = np.linspace(0, 1, 5) + alpha = np.linspace(0, 2 * np.pi, 2) + zeta = np.linspace(0, 4 * np.pi, 10) + + grid_jit, data_jit = field_line_to_desc_coords(eq, rho, alpha, zeta, jitable=True) + data_jit = eq.compute("|B|", grid=grid_jit, data=data_jit) + grid_no_jit, data_no_jit = field_line_to_desc_coords( + eq, rho, alpha, zeta, jitable=False + ) + data_no_jit = eq.compute("|B|", grid=grid_no_jit, data=data_no_jit) + print(data_jit["|B|"]) + print() + print(data_no_jit["|B|"]) + # I think there are two separate bugs. + # Bug 1: + # Regardless of any logic issues with the coordinate mapping etc., I think + # these two arrays should be approximately equal. However, the jitted |B| + # has varies between 0 and 10^11, while the no jit |B| varies from 0 to 3. + np.testing.assert_allclose( + data_jit["|B|"], + data_no_jit["|B|"], + err_msg="jitting issue? grid weights issue?" + " mixing grids with different weights to compute things?", + ) + # Bug 2: vanishing |B|. + assert np.all( + ~np.isclose(data_no_jit["|B|"], 0) + ), "logic issue in field_line_to_desc_coords?" + + # @pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. From 7e7deb767eb7c22710954b0330321dd010ca41e3 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 29 Mar 2024 20:51:36 -0400 Subject: [PATCH 047/241] Fix meshgrid expand, and remove axis by default from... bounce integral for reasons scified in https://github.com/PlasmaControl/DESC/pull/968#issuecomment-2027755779 --- desc/compute/bounce_integral.py | 30 +++++++-------- desc/grid.py | 65 ++++++++++++--------------------- tests/test_bounce_integral.py | 50 ++++--------------------- tests/test_grid.py | 19 ++++++++++ 4 files changed, 64 insertions(+), 100 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ff062ec19d..99fc5a72be 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -595,20 +595,20 @@ def bounce_integral( -------- .. code-block:: python - bi, items = bounce_integral(eq, return_items=True) + rho = np.linspace(1e-12, 1, 6) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + bi, items = bounce_integral(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - # Same pitch for every field line may give sparse result. - # See tests/test_bounce_integral.py::test_pitch_input for an alternative. - pitch_res = 30 - B = items["data"]["B"] - pitch = jnp.linspace(1 / B.max(), 1 / B.min(), pitch_res) - pitch = pitch[:, jnp.newaxis, jnp.newaxis] + B = items["data"]["B"].reshape(alpha.size * rho.size, -1) + pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( + pitch_res, alpha.size, rho.size + ) result = bi(f, pitch) """ if rho is None: - rho = jnp.linspace(0, 1, 10) + rho = jnp.linspace(1e-12, 1, 10) if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) rho = jnp.atleast_1d(rho) @@ -767,15 +767,15 @@ def bounce_average( -------- .. code-block:: python - ba, items = bounce_average(eq, return_items=True) + rho = np.linspace(1e-12, 1, 6) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - # Same pitch for every field line may give sparse result. - # See tests/test_bounce_integral.py::test_pitch_input for an alternative. - pitch_res = 30 - B = items["data"]["B"] - pitch = jnp.linspace(1 / B.max(), 1 / B.min(), pitch_res) - pitch = pitch[:, jnp.newaxis, jnp.newaxis] + B = items["data"]["B"].reshape(alpha.size * rho.size, -1) + pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( + pitch_res, alpha.size, rho.size + ) result = ba(f, pitch) """ diff --git a/desc/grid.py b/desc/grid.py index 84e4044b02..c2c955d730 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -377,28 +377,18 @@ def expand(self, x, surface_label="rho"): """ assert surface_label in {"rho", "theta", "zeta"} + errorif( + not hasattr(self, f"_inverse_{surface_label}_idx"), + AttributeError, + "expand operation undefined for jit compatible grids", + ) if surface_label == "rho": - errorif( - not hasattr(self, "_inverse_rho_idx"), - AttributeError, - "expand operation undefined for jit compatible grids", - ) assert len(x) == self.num_rho return x[self.inverse_rho_idx] if surface_label == "theta": - errorif( - not hasattr(self, "_inverse_theta_idx"), - AttributeError, - "expand operation undefined for jit compatible grids", - ) assert len(x) == self.num_theta return x[self.inverse_theta_idx] if surface_label == "zeta": - errorif( - not hasattr(self, "_inverse_zeta_idx"), - AttributeError, - "expand operation undefined for jit compatible grids", - ) assert len(x) == self.num_zeta return x[self.inverse_zeta_idx] @@ -1580,16 +1570,14 @@ def find_least_rational_surfaces( return rho, io -def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): +def _meshgrid_expand(x, a_size, b_size, c_size, order=0): """Expand ``x`` by duplicating elements to match a meshgrid pattern. It is common to construct a meshgrid in the following manner. .. code-block:: python - # In this meshgrid, the fastest (slowest) changing coordinate is zeta (theta). - r, t, z = jnp.meshgrid(rho, theta, zeta, indexing="ij") - r, t, z = r.ravel(), t.ravel(), z.ravel() - nodes = jnp.column_stack([r, t, z]) + A, B, C = jnp.meshgrid(a, b, c, indexing="ij") + nodes = jnp.column_stack(tuple(map(np.ravel, (A, B, C)))) grid = Grid(nodes, sort=False, jitable=True) Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` @@ -1602,8 +1590,10 @@ def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): Stores the values of a surface function (constant over a surface) for all unique surfaces of the specified label on the grid. The length of ``x`` should match the number of unique surfaces of - the corresponding label in this grid. ``x`` should be sorted such - that x[i] corresponds to the value associated with surface_label[i]. + the corresponding label in this grid. + order : int + 0, 1, or 2. Corresponds to whether ``x`` is a surface function + of a, b, or c in the example code in the docstring. Returns ------- @@ -1611,23 +1601,14 @@ def _meshgrid_expand(x, rho_size, theta_size, zeta_size, surface_label="rho"): ``x`` expanded to match the meshgrid pattern. """ - assert surface_label in {"rho", "theta", "zeta"}, ( - "These labels need not correspond to DESC coordinates. " - "They should correspond to the order the arrays were given to construct " - "the meshgrid as shown in the example code in the docstring." - ) - if surface_label == "rho": - assert len(x) == rho_size - return jnp.tile( - repeat(x, zeta_size, total_repeat_length=rho_size * zeta_size), theta_size - ) - if surface_label == "theta": - assert len(x) == theta_size - return repeat( - x, - rho_size * zeta_size, - total_repeat_length=rho_size * theta_size * zeta_size, - ) - if surface_label == "zeta": - assert len(x) == zeta_size - return jnp.tile(x, rho_size * theta_size) + order = int(order) + assert 0 <= order <= 2 + if order == 0: + assert len(x) == a_size + return repeat(x, b_size * c_size, total_repeat_length=a_size * b_size * c_size) + if order == 1: + assert len(x) == b_size + return jnp.tile(repeat(x, c_size, total_repeat_length=b_size * c_size), a_size) + if order == 2: + assert len(x) == c_size + return jnp.tile(x, a_size * b_size) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 19c3da5cad..be4654a8ec 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -131,10 +131,10 @@ def test_polyval(): @pytest.mark.unit -def test_pitch_input(): - """Test different ways of specifying pitch.""" +def test_pitch_and_hairy_ball_theorem(): + """Test different ways of specifying pitch and ensure B does not vanish.""" eq = get("HELIOTRON") - rho = np.linspace(0, 1, 6) + rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" @@ -143,56 +143,20 @@ def test_pitch_input(): # Same pitch for every field line may give sparse result. pitch_res = 30 B = items["data"]["B"] + assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res)[:, np.newaxis, np.newaxis] result = ba(f, pitch) assert np.isfinite(result).any() # specify pitch per field line B = B.reshape(alpha.size * rho.size, -1) - eps = 1e-5 # FIXME: vanishing B-field bug. - pitch = np.linspace( - 1 / (B.max(axis=-1) + eps), - 1 / (B.min(axis=-1) + eps), - pitch_res, - ).reshape(pitch_res, alpha.size, rho.size) + pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res).reshape( + pitch_res, alpha.size, rho.size + ) result = ba(f, pitch) assert np.isfinite(result).any() -@pytest.mark.unit -def test_hairy_ball_theorem(): - """Ensure |B| does not vanish after coordinate mapping.""" - eq = get("W7-X") - rho = np.linspace(0, 1, 5) - alpha = np.linspace(0, 2 * np.pi, 2) - zeta = np.linspace(0, 4 * np.pi, 10) - - grid_jit, data_jit = field_line_to_desc_coords(eq, rho, alpha, zeta, jitable=True) - data_jit = eq.compute("|B|", grid=grid_jit, data=data_jit) - grid_no_jit, data_no_jit = field_line_to_desc_coords( - eq, rho, alpha, zeta, jitable=False - ) - data_no_jit = eq.compute("|B|", grid=grid_no_jit, data=data_no_jit) - print(data_jit["|B|"]) - print() - print(data_no_jit["|B|"]) - # I think there are two separate bugs. - # Bug 1: - # Regardless of any logic issues with the coordinate mapping etc., I think - # these two arrays should be approximately equal. However, the jitted |B| - # has varies between 0 and 10^11, while the no jit |B| varies from 0 to 3. - np.testing.assert_allclose( - data_jit["|B|"], - data_no_jit["|B|"], - err_msg="jitting issue? grid weights issue?" - " mixing grids with different weights to compute things?", - ) - # Bug 2: vanishing |B|. - assert np.all( - ~np.isclose(data_no_jit["|B|"], 0) - ), "logic issue in field_line_to_desc_coords?" - - # @pytest.mark.unit def test_elliptic_integral_limit(): """Test bounce integral matches elliptic integrals. diff --git a/tests/test_grid.py b/tests/test_grid.py index 62309419ac..e0aa8aa8a5 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -11,6 +11,7 @@ Grid, LinearGrid, QuadratureGrid, + _meshgrid_expand, dec_to_cf, find_least_rational_surfaces, find_most_rational_surfaces, @@ -754,6 +755,24 @@ def test(surface_label, grid): test("theta", cg_sym) test("zeta", cg_sym) + @pytest.mark.unit + def test_meshgrid_expand(self): + """Ensure alternative expansion works for grids made from meshgrid.""" + rho = np.linspace(0, 1, 4) + alpha = np.linspace(0, 2 * np.pi, 2) + zeta = np.linspace(0, 10 * np.pi, 3) + r, a, z = np.meshgrid(rho, alpha, zeta, indexing="ij") + r, a, z = r.ravel(), a.ravel(), z.ravel() + np.testing.assert_allclose( + r, _meshgrid_expand(rho, rho.size, alpha.size, zeta.size, order=0) + ) + np.testing.assert_allclose( + a, _meshgrid_expand(alpha, rho.size, alpha.size, zeta.size, order=1) + ) + np.testing.assert_allclose( + z, _meshgrid_expand(zeta, rho.size, alpha.size, zeta.size, order=2) + ) + @pytest.mark.unit def test_find_most_rational_surfaces(): From 165dfa981245097bdbf3d083b306ce9cc314f616 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 00:57:36 -0400 Subject: [PATCH 048/241] Add tests for bounce integralities --- desc/backend.py | 13 +++ desc/compute/_core.py | 23 ---- desc/compute/_field.py | 21 ++-- desc/compute/bounce_integral.py | 179 ++++++++++++++++---------------- tests/test_bounce_integral.py | 24 +++++ 5 files changed, 135 insertions(+), 125 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 252af51a86..a7c98ac6ab 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -843,6 +843,19 @@ def take( if fill_value is None: # TODO: Interpret default fill value based on dtype of a. fill_value = np.nan + if fill_value is None: + # copy jax logic + # https://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/slicing.html#gather + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.nan + elif np.issubdtype(a.dtype, np.signedinteger): + fill_value = np.iinfo(a.dtype).min + elif np.issubdtype(a.dtype, np.unsignedinteger): + fill_value = np.iinfo(a.dtype).max + elif a.dtype == np.bool_: + fill_value = True + else: + raise ValueError(f"Unsupported dtype {a.dtype}.") out = np.where( (-a.size <= indices) & (indices < a.size), np.take(a, indices, axis, out, mode="wrap"), diff --git a/desc/compute/_core.py b/desc/compute/_core.py index 9a39a22c5f..e16f2df059 100644 --- a/desc/compute/_core.py +++ b/desc/compute/_core.py @@ -28,29 +28,6 @@ def _0(params, transforms, profiles, data, **kwargs): return data -@register_compute_fun( - name="1", - label="1", - units="~", - units_long="None", - description="Ones", - dim=1, - params=[], - transforms={"grid": []}, - profiles=[], - coordinates="rtz", - data=[], - parameterization=[ - "desc.equilibrium.equilibrium.Equilibrium", - "desc.geometry.core.Surface", - "desc.geometry.core.Curve", - ], -) -def _1(params, transforms, profiles, data, **kwargs): - data["1"] = jnp.ones(transforms["grid"].num_nodes) - return data - - @register_compute_fun( name="R", label="R", diff --git a/desc/compute/_field.py b/desc/compute/_field.py index f2998a41b8..3e4e001c05 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2238,13 +2238,12 @@ def _B_mag(params, transforms, profiles, data, **kwargs): ], ) def _B_mag_r(params, transforms, profiles, data, **kwargs): - data["|B|_r"] = safediv( + data["|B|_r"] = ( data["B^theta_r"] * data["B_theta"] + data["B^theta"] * data["B_theta_r"] + data["B^zeta_r"] * data["B_zeta"] - + data["B^zeta"] * data["B_zeta_r"], - 2 * data["|B|"], - ) + + data["B^zeta"] * data["B_zeta_r"] + ) / (2 * data["|B|"]) return data @@ -2272,13 +2271,12 @@ def _B_mag_r(params, transforms, profiles, data, **kwargs): ], ) def _B_mag_t(params, transforms, profiles, data, **kwargs): - data["|B|_t"] = safediv( + data["|B|_t"] = ( data["B^theta_t"] * data["B_theta"] + data["B^theta"] * data["B_theta_t"] + data["B^zeta_t"] * data["B_zeta"] - + data["B^zeta"] * data["B_zeta_t"], - 2 * data["|B|"], - ) + + data["B^zeta"] * data["B_zeta_t"] + ) / (2 * data["|B|"]) return data @@ -2306,13 +2304,12 @@ def _B_mag_t(params, transforms, profiles, data, **kwargs): ], ) def _B_mag_z(params, transforms, profiles, data, **kwargs): - data["|B|_z"] = safediv( + data["|B|_z"] = ( data["B^theta_z"] * data["B_theta"] + data["B^theta"] * data["B_theta_z"] + data["B^zeta_z"] * data["B_zeta"] - + data["B^zeta"] * data["B_zeta_z"], - 2 * data["|B|"], - ) + + data["B^zeta"] * data["B_zeta_z"] + ) / (2 * data["|B|"]) return data diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 99fc5a72be..2898e0a387 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -107,13 +107,11 @@ def take_mask(a, mask, size=None, fill_value=jnp.nan): a : ndarray The source array. mask : ndarray - Boolean mask to index into ``a``. - Should have same size as ``a``. + Boolean mask to index into ``a``. Should have same size as ``a``. size : Elements of ``a`` at the first size True indices of ``mask`` will be returned. If there are fewer elements than size indicates, the returned array will be - padded with fill_value. - Defaults to ``a.size``. + padded with fill_value. Defaults to ``a.size``. fill_value : When there are fewer than the indicated number of elements, the remaining elements will be filled with ``fill_value``. @@ -139,55 +137,12 @@ def take_mask(a, mask, size=None, fill_value=jnp.nan): return a_mask -def diff_mask(a, mask, n=1, axis=-1, prepend=None): - """Calculate the n-th discrete difference along the given axis of ``a[mask]``. - - The first difference is given by ``out[i] = a[i+1] - a[i]`` along - the given axis, higher differences are calculated by using `diff` - recursively. This method is JIT compatible. - - Parameters - ---------- - a : array_like - Input array - mask : array_like - Boolean mask to index like ``a[mask]`` prior to computing difference. - Should have same size as ``a``. - n : int, optional - The number of times values are differenced. - axis : int, optional - The axis along which the difference is taken, default is the - last axis. - prepend : array_like, optional - Values to prepend to `a` along axis prior to performing the difference. - Scalar values are expanded to arrays with length 1 in the direction of - axis and the shape of the input array in along all other axes. - Otherwise, the dimension and shape must match `a` except along axis. - - Returns - ------- - diff : ndarray - The n-th differences. The shape of the output is the same as ``a`` - except along ``axis`` where the dimension is smaller by ``n``. The - type of the output is the same as the type of the difference - between any two elements of ``a``. - - Notes - ----- - The result is padded with nan at the end to be jit compilable. - - """ - prepend = () if prepend is None else (prepend,) - return jnp.diff(take_mask(a, mask, fill_value=jnp.nan), n, axis, *prepend) - - @vmap -def _last_element(a, mask): - """Return last element of ``a`` where ``mask`` is nonzero.""" - assert a.ndim == mask.ndim == 1 - assert a.shape == mask.shape - assert mask.dtype == bool - idx = flatnonzero(~mask, size=1, fill_value=a.size) - 1 +def _last_value(a): + """Assuming a is padded with nan at the right, return the last non-nan value.""" + assert a.ndim == 1 + a = a[::-1] + idx = flatnonzero(~jnp.isnan(a), size=1, fill_value=a.size) return a[idx] @@ -444,7 +399,7 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # If, in addition, the last intersect satisfies B_z < 0, then we have the # required information to compute a bounce integral between these points. # The below logic handles both tasks. - last_intersect = jnp.squeeze(_last_element(intersect, is_intersect)) + last_intersect = jnp.squeeze(_last_value(intersect)) bp1 = _roll_and_replace(bp1, bp1[:, 0] > bp2[:, 0], last_intersect - knots[-1]) # Notice that for the latter, an "approximation" is made that the field line is # periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so that the @@ -595,14 +550,14 @@ def bounce_integral( -------- .. code-block:: python - rho = np.linspace(1e-12, 1, 6) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + rho = jnp.linspace(1e-12, 1, 6) + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) bi, items = bounce_integral(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] B = items["data"]["B"].reshape(alpha.size * rho.size, -1) - pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( - pitch_res, alpha.size, rho.size + pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( + -1, alpha.size, rho.size ) result = bi(f, pitch) @@ -767,14 +722,14 @@ def bounce_average( -------- .. code-block:: python - rho = np.linspace(1e-12, 1, 6) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + rho = jnp.linspace(1e-12, 1, 6) + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] B = items["data"]["B"].reshape(alpha.size * rho.size, -1) - pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( - pitch_res, alpha.size, rho.size + pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( + -1, alpha.size, rho.size ) result = ba(f, pitch) @@ -815,6 +770,79 @@ def _bounce_average(f, pitch=None): return _bounce_average +def field_line_to_desc_coords(eq, rho, alpha, zeta, jitable=True): + """Get DESC grid from unique field line coordinates.""" + r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") + r, a, z = r.ravel(), a.ravel(), z.ravel() + # Map these Clebsch-Type field-line coordinates to DESC coordinates. + # Note that the rotational transform can be computed apriori because it is a single + # variable function of rho, and the coordinate mapping does not change rho. Once + # this is known, it is simple to compute theta_PEST from alpha. Then we transform + # from straight field-line coordinates to DESC coordinates with the method + # compute_theta_coords. This is preferred over transforming from Clebsch-Type + # coordinates to DESC coordinates directly with the more general method + # map_coordinates. That method requires an initial guess to be compatible with JIT, + # and generating a reasonable initial guess requires computing the rotational + # transform to approximate theta_PEST and the poloidal stream function anyway. + # TODO: map coords recently updated, so maybe just switch to that + lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + lg_data = eq.compute(["iota", "iota_r", "iota_rr"], grid=lg) + data = { + d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) + for d in lg_data + if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] + == "r" + } + sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) + desc_coords = eq.compute_theta_coords(sfl_coords) + grid = Grid(desc_coords, jitable=jitable) + return grid, data + + +# Current algorithm used for bounce integrals no longer requires these +# two functions. TODO: Delete before merge. +def diff_mask(a, mask, n=1, axis=-1, prepend=None): + """Calculate the n-th discrete difference along the given axis of ``a[mask]``. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. This method is JIT compatible. + + Parameters + ---------- + a : array_like + Input array + mask : array_like + Boolean mask to index like ``a[mask]`` prior to computing difference. + Should have same size as ``a``. + n : int, optional + The number of times values are differenced. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend : array_like, optional + Values to prepend to `a` along axis prior to performing the difference. + Scalar values are expanded to arrays with length 1 in the direction of + axis and the shape of the input array in along all other axes. + Otherwise, the dimension and shape must match `a` except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as ``a`` + except along ``axis`` where the dimension is smaller by ``n``. The + type of the output is the same as the type of the difference + between any two elements of ``a``. + + Notes + ----- + The result is padded with nan at the end to be jit compilable. + + """ + prepend = () if prepend is None else (prepend,) + return jnp.diff(take_mask(a, mask, fill_value=jnp.nan), n, axis, *prepend) + + def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): """Stretch batches of ``in_arr``. @@ -855,32 +883,3 @@ def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): axis=-1, ) return out_arr - - -def field_line_to_desc_coords(eq, rho, alpha, zeta, jitable=True): - """Get DESC grid from unique field line coordinates.""" - r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") - r, a, z = r.ravel(), a.ravel(), z.ravel() - # Map these Clebsch-Type field-line coordinates to DESC coordinates. - # Note that the rotational transform can be computed apriori because it is a single - # variable function of rho, and the coordinate mapping does not change rho. Once - # this is known, it is simple to compute theta_PEST from alpha. Then we transform - # from straight field-line coordinates to DESC coordinates with the method - # compute_theta_coords. This is preferred over transforming from Clebsch-Type - # coordinates to DESC coordinates directly with the more general method - # map_coordinates. That method requires an initial guess to be compatible with JIT, - # and generating a reasonable initial guess requires computing the rotational - # transform to approximate theta_PEST and the poloidal stream function anyway. - # TODO: map coords recently updated, so maybe just switch to that - lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - lg_data = eq.compute(["iota", "iota_r", "iota_rr"], grid=lg) - data = { - d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) - for d in lg_data - if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] - == "r" - } - sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) - desc_coords = eq.compute_theta_coords(sfl_coords) - grid = Grid(desc_coords, jitable=jitable) - return grid, data diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index be4654a8ec..37a21a75dd 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -6,6 +6,7 @@ from desc.backend import fori_loop, put, root_scalar from desc.compute.bounce_integral import ( + _last_value, bounce_average, bounce_integral, compute_bounce_points, @@ -14,6 +15,7 @@ polyder, polyint, polyval, + take_mask, ) from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium @@ -29,6 +31,28 @@ from desc.profiles import PowerSeriesProfile +@pytest.mark.unit +def test_mask_operation(): + """Test custom masked array operation.""" + rows = 5 + cols = 7 + a = np.random.rand(rows, cols) + nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) + a.ravel()[nan_idx] = np.nan + taken = take_mask(a, ~np.isnan(a)) + last = _last_value(taken) + for i in range(rows): + desired = a[i, ~np.isnan(a[i])] + np.testing.assert_allclose( + actual=taken[i], + desired=np.pad(desired, (0, cols - desired.size), constant_values=np.nan), + err_msg="take_mask", + ) + np.testing.assert_allclose( + actual=last[i], desired=desired[-1], err_msg="_last_value" + ) + + @pytest.mark.unit def test_cubic_poly_roots(): """Test vectorized computation of cubic polynomial exact roots.""" From c94c6fa95e45778a17983720ff08b246a83c8fe3 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 01:00:29 -0400 Subject: [PATCH 049/241] Fix extraneous optional parameter guard in desc.backend.take --- desc/backend.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index a7c98ac6ab..88358933f0 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -840,9 +840,6 @@ def take( ): """A numpy implementation of jnp.take.""" if mode == "fill": - if fill_value is None: - # TODO: Interpret default fill value based on dtype of a. - fill_value = np.nan if fill_value is None: # copy jax logic # https://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/slicing.html#gather From 91a539fc91a93f03018236066521c5dc58cabc64 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 01:04:20 -0400 Subject: [PATCH 050/241] Set fill value to 0 in def _last_value in case all values are nan. --- desc/compute/bounce_integral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 2898e0a387..63fda29936 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -142,7 +142,7 @@ def _last_value(a): """Assuming a is padded with nan at the right, return the last non-nan value.""" assert a.ndim == 1 a = a[::-1] - idx = flatnonzero(~jnp.isnan(a), size=1, fill_value=a.size) + idx = flatnonzero(~jnp.isnan(a), size=1, fill_value=0) return a[idx] From 7e5fbd0027ceccbcaea4f998e232f9b9ac3c7d5a Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 20:16:20 -0400 Subject: [PATCH 051/241] Add jitable desc_grid_from_field_line_coords method --- desc/backend.py | 2 +- desc/compute/bounce_integral.py | 66 ++++++++------------------- desc/compute/utils.py | 2 +- desc/equilibrium/coords.py | 81 ++++++++++++++++++++++++++++++++- tests/test_bounce_integral.py | 10 ++-- 5 files changed, 105 insertions(+), 56 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 88358933f0..db48b2ad4a 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -825,7 +825,7 @@ def flatnonzero(a, size=None, fill_value=0): """A numpy implementation of jnp.flatnonzero.""" nz = np.flatnonzero(a) if size is not None: - nz = np.append(nz, np.repeat(fill_value, max(size - nz.size, 0))) + nz = np.pad(nz, (0, max(size - nz.size, 0)), constant_values=fill_value) return nz def take( diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 63fda29936..a7d7bffe1c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -5,9 +5,7 @@ from interpax import CubicHermiteSpline, interp1d from desc.backend import complex_sqrt, flatnonzero, jnp, put, put_along_axis, take, vmap -from desc.grid import Grid, LinearGrid, _meshgrid_expand - -from .data_index import data_index +from desc.equilibrium.coords import desc_grid_from_field_line_coords # vmap to compute a bounce integral for every pitch along every field line. @@ -99,7 +97,7 @@ def tanh_sinh_quadrature(resolution=7): @vmap -def take_mask(a, mask, size=None, fill_value=jnp.nan): +def take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. Parameters @@ -108,13 +106,17 @@ def take_mask(a, mask, size=None, fill_value=jnp.nan): The source array. mask : ndarray Boolean mask to index into ``a``. Should have same size as ``a``. - size : + size : int Elements of ``a`` at the first size True indices of ``mask`` will be returned. If there are fewer elements than size indicates, the returned array will be padded with fill_value. Defaults to ``a.size``. fill_value : When there are fewer than the indicated number of elements, the remaining elements will be filled with ``fill_value``. + Defaults to NaN for inexact types, + the largest negative value for signed types, + the largest positive value for unsigned types, + and True for booleans. Returns ------- @@ -139,7 +141,7 @@ def take_mask(a, mask, size=None, fill_value=jnp.nan): @vmap def _last_value(a): - """Assuming a is padded with nan at the right, return the last non-nan value.""" + """Return the last non-nan value in a.""" assert a.ndim == 1 a = a[::-1] idx = flatnonzero(~jnp.isnan(a), size=1, fill_value=0) @@ -471,7 +473,7 @@ def bounce_integral( pitch=None, rho=None, alpha=None, - zeta=20, + zeta=jnp.linspace(0, 10 * jnp.pi, 20), quadrature=tanh_sinh_quadrature, **kwargs, ): @@ -504,18 +506,17 @@ def bounce_integral( where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. - rho : ndarray or float + rho : ndarray Unique flux surface label coordinates. - alpha : ndarray or float + alpha : ndarray Unique field line label coordinates over a constant rho surface. - zeta : ndarray or int + zeta : ndarray A cubic spline of the integrand is computed at these values of the field line following coordinate, for every field line in the meshgrid formed from rho and alpha specified above. The number of knots specifies the grid resolution as increasing the number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. - If an integer is given, that many knots are linearly spaced from 0 to 10 pi. quadrature : callable The quadrature scheme used to evaluate the integral. Should return quadrature points and weights when called. @@ -569,12 +570,10 @@ def bounce_integral( rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) zeta = jnp.atleast_1d(zeta) - if zeta.size == 1: - zeta = jnp.linspace(0, 10 * jnp.pi, zeta.item()) R = rho.size A = alpha.size - grid, data = field_line_to_desc_coords(eq, rho, alpha, zeta) + grid, data = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) B_sup_z = data["B^zeta"].reshape(A * R, -1) B = data["|B|"].reshape(A * R, -1) @@ -641,7 +640,7 @@ def bounce_average( pitch=None, rho=None, alpha=None, - zeta=20, + zeta=jnp.linspace(0, 10 * jnp.pi, 20), quadrature=tanh_sinh_quadrature, **kwargs, ): @@ -675,11 +674,11 @@ def bounce_average( where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. - rho : ndarray or float + rho : ndarray Unique flux surface label coordinates. - alpha : ndarray or float + alpha : ndarray Unique field line label coordinates over a constant rho surface. - zeta : ndarray or int + zeta : ndarray A cubic spline of the integrand is computed at these values of the field line following coordinate, for every field line in the meshgrid formed from rho and alpha specified above. @@ -770,35 +769,6 @@ def _bounce_average(f, pitch=None): return _bounce_average -def field_line_to_desc_coords(eq, rho, alpha, zeta, jitable=True): - """Get DESC grid from unique field line coordinates.""" - r, a, z = jnp.meshgrid(rho, alpha, zeta, indexing="ij") - r, a, z = r.ravel(), a.ravel(), z.ravel() - # Map these Clebsch-Type field-line coordinates to DESC coordinates. - # Note that the rotational transform can be computed apriori because it is a single - # variable function of rho, and the coordinate mapping does not change rho. Once - # this is known, it is simple to compute theta_PEST from alpha. Then we transform - # from straight field-line coordinates to DESC coordinates with the method - # compute_theta_coords. This is preferred over transforming from Clebsch-Type - # coordinates to DESC coordinates directly with the more general method - # map_coordinates. That method requires an initial guess to be compatible with JIT, - # and generating a reasonable initial guess requires computing the rotational - # transform to approximate theta_PEST and the poloidal stream function anyway. - # TODO: map coords recently updated, so maybe just switch to that - lg = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - lg_data = eq.compute(["iota", "iota_r", "iota_rr"], grid=lg) - data = { - d: _meshgrid_expand(lg.compress(lg_data[d]), rho.size, alpha.size, zeta.size) - for d in lg_data - if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] - == "r" - } - sfl_coords = jnp.column_stack([r, a + data["iota"] * z, z]) - desc_coords = eq.compute_theta_coords(sfl_coords) - grid = Grid(desc_coords, jitable=jitable) - return grid, data - - # Current algorithm used for bounce integrals no longer requires these # two functions. TODO: Delete before merge. def diff_mask(a, mask, n=1, axis=-1, prepend=None): @@ -840,7 +810,7 @@ def diff_mask(a, mask, n=1, axis=-1, prepend=None): """ prepend = () if prepend is None else (prepend,) - return jnp.diff(take_mask(a, mask, fill_value=jnp.nan), n, axis, *prepend) + return jnp.diff(take_mask(a, mask), n, axis, *prepend) def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 677b29e669..3a6aeac076 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -1158,7 +1158,7 @@ def surface_integrals_transform(grid, surface_label="rho"): Input ----- - If ``q`` has one-dimension, then it should have shape + If ``q`` has one dimension, then it should have shape (``grid.num_nodes``, ). If ``q`` has multiple dimensions, then it should have shape (``grid.num_nodes``, *f.shape). diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index ec823faaef..5eb439d60b 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -9,7 +9,7 @@ from desc.backend import fori_loop, jit, jnp, put, root, root_scalar, vmap from desc.compute import compute as compute_fun from desc.compute import data_index, get_profiles, get_transforms -from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid +from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid, _meshgrid_expand from desc.transform import Transform from desc.utils import setdefault @@ -262,6 +262,85 @@ def fixup(x, *args): return out +def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): + """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. + + Create a meshgrid from the given field line coordinates, + and transform this to a meshgrid in DESC coordinates. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which to perform coordinate mapping. + rho : ndarray + Unique flux surface label coordinates. + alpha : ndarray + Unique field line label coordinates over a constant rho surface. + zeta : ndarray + Unique Field line-following ζ coordinates. + + Returns + ------- + grid_desc : Grid + DESC coordinate grid. + data_desc : dict + Some flux surface quantities that may be more accurate than what + can be computed on the returned grid. + + """ + # The rotational transform can be computed apriori to the coordinate + # transformation because it is a single variable function of the flux surface + # label rho, and the coordinate mapping does not change rho. Once it is known, + # we can compute the straight field-line poloidal angle theta_PEST from the + # field-line label alpha. + # Then we transform from straight field-line coordinates to DESC coordinates + # with the root-finding method ``compute_theta_coords``. + # This is preferable to transforming from field-line coordinates to DESC + # coordinates directly with the more general root-finding method + # ``map_coordinates``. That method requires an initial guess, and generating + # a reasonable initial guess requires computing the rotational transform to + # approximate theta_PEST and the poloidal stream function anyway. + + # Choose nodes such that even spacing will yield correct flux surface integrals. + t = jnp.linspace(0, 2 * jnp.pi, 2 * eq.M_grid + 1, endpoint=False) + z = jnp.linspace(0, 2 * jnp.pi / eq.NFP, 2 * eq.N_grid + 1, endpoint=False) + grid = Grid( + nodes=jnp.column_stack( + tuple(map(jnp.ravel, jnp.meshgrid(rho, t, z, indexing="ij"))) + ), + spacing=jnp.array([1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size])[ + jnp.newaxis + ], + sort=False, + jitable=True, + _unique_rho_idx=jnp.arange(rho.size) * t.size * z.size, + ) + # We only need to compute the rotational transform to transform to straight + # field-line coordinates. However, it is a good idea to compute other flux + # surface quantities on this grid because the DESC coordinates corresponding + # to the given field line coordinates may not be uniformly distributed over + # flux surfaces. This would make quadratures performed over flux surfaces + # on the returned DESC grid inaccurate. + data = eq.compute(names=["iota", "iota_r"], grid=grid) + data_desc = { + d: _meshgrid_expand(grid.compress(data[d]), rho.size, alpha.size, zeta.size) + for d in data + if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] + == "r" + } + # meshgrid of Clebsch-Type field-line coordinates + r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) + coords_sfl = jnp.column_stack([r, a + data_desc["iota"] * z_fl, z_fl]) + coords_desc = eq.compute_theta_coords(coords_sfl) + grid_desc = Grid( + nodes=coords_desc, + sort=False, + jitable=True, + _unique_rho_idx=jnp.arange(rho.size) * alpha.size * zeta.size, + ) + return grid_desc, data_desc + + def is_nested(eq, grid=None, R_lmn=None, Z_lmn=None, L_lmn=None, msg=None): """Check that an equilibrium has properly nested flux surfaces in a plane. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 37a21a75dd..f788645e71 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -11,7 +11,6 @@ bounce_integral, compute_bounce_points, cubic_poly_roots, - field_line_to_desc_coords, polyder, polyint, polyval, @@ -19,6 +18,7 @@ ) from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import desc_grid_from_field_line_coords from desc.examples import get from desc.geometry import FourierRZToroidalSurface from desc.objectives import ( @@ -155,7 +155,7 @@ def test_polyval(): @pytest.mark.unit -def test_pitch_and_hairy_ball_theorem(): +def test_pitch_and_hairy_ball(): """Test different ways of specifying pitch and ensure B does not vanish.""" eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) @@ -260,12 +260,12 @@ def _compute_bounce_points_with_root_finding( # TODO: avoid separate root finding routines in residual and jac # and use previous desc coords as initial guess for next iteration def residual(zeta, i): - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + grid, data = desc_grid_from_field_line_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|"], grid=grid, data=data) return data["|B|"] - pitch[i] def jac(zeta): - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + grid, data = desc_grid_from_field_line_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|_z|r,a"], grid=grid, data=data) return data["|B|_z|r,a"] @@ -275,7 +275,7 @@ def jac(zeta): # let us form a boundary mesh around root estimates to limit search domain # of the root finding algorithms. zeta = np.linspace(0, zeta_max, 3 * resolution) - grid, data = field_line_to_desc_coords(rho, alpha, zeta, eq) + grid, data = desc_grid_from_field_line_coords(rho, alpha, zeta, eq) data = eq.compute(["|B|"], grid=grid, data=data) B_norm = data["|B|"].reshape(alpha.size, rho.size, -1) # constant field line chunks From 038e507a7069d55ebd5759cb376d21557ecff120 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 21:33:18 -0400 Subject: [PATCH 052/241] Add inverse and unique idx compute functions for meshgrid --- desc/compute/utils.py | 4 +- desc/equilibrium/coords.py | 35 ++++++++++-- desc/grid.py | 110 +++++++++++++++++++++++++++++++++++-- tests/test_grid.py | 32 +++++++---- 4 files changed, 158 insertions(+), 23 deletions(-) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 3a6aeac076..8adc6482ce 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -893,7 +893,9 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True, tol=1e-14) # Todo: Define masks as a sparse matrix once sparse matrices are no longer # experimental in jax. - if hasattr(grid, f"num_{surface_label}"): + if hasattr(grid, f"num_{surface_label}") and hasattr( + grid, f"_inverse_{surface_label}_idx" + ): # The ith row of masks is True only at the indices which correspond to the # ith surface. The integral over the ith surface is the dot product of the # ith row vector and the integrand defined over all the surfaces. diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 5eb439d60b..43448033f8 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -9,7 +9,15 @@ from desc.backend import fori_loop, jit, jnp, put, root, root_scalar, vmap from desc.compute import compute as compute_fun from desc.compute import data_index, get_profiles, get_transforms -from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid, _meshgrid_expand +from desc.grid import ( + ConcentricGrid, + Grid, + LinearGrid, + QuadratureGrid, + _meshgrid_expand, + _meshgrid_inverse_idx, + _meshgrid_unique_idx, +) from desc.transform import Transform from desc.utils import setdefault @@ -304,6 +312,15 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): # Choose nodes such that even spacing will yield correct flux surface integrals. t = jnp.linspace(0, 2 * jnp.pi, 2 * eq.M_grid + 1, endpoint=False) z = jnp.linspace(0, 2 * jnp.pi / eq.NFP, 2 * eq.N_grid + 1, endpoint=False) + label = ["rho", "theta", "zeta"] + unique_idx = { + f"_unique_{label[i]}_idx": idx + for i, idx in enumerate(_meshgrid_unique_idx(rho.size, t.size, z.size)) + } + inverse_idx = { + f"_inverse_{label[i]}_idx": idx + for i, idx in enumerate(_meshgrid_inverse_idx(rho.size, t.size, z.size)) + } grid = Grid( nodes=jnp.column_stack( tuple(map(jnp.ravel, jnp.meshgrid(rho, t, z, indexing="ij"))) @@ -313,7 +330,8 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): ], sort=False, jitable=True, - _unique_rho_idx=jnp.arange(rho.size) * t.size * z.size, + **unique_idx, + **inverse_idx, ) # We only need to compute the rotational transform to transform to straight # field-line coordinates. However, it is a good idea to compute other flux @@ -332,11 +350,16 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) coords_sfl = jnp.column_stack([r, a + data_desc["iota"] * z_fl, z_fl]) coords_desc = eq.compute_theta_coords(coords_sfl) + unique_idx = { + f"_unique_{label[i]}_idx": idx + for i, idx in enumerate(_meshgrid_unique_idx(rho.size, alpha.size, zeta.size)) + } + inverse_idx = { + f"_inverse_{label[i]}_idx": idx + for i, idx in enumerate(_meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)) + } grid_desc = Grid( - nodes=coords_desc, - sort=False, - jitable=True, - _unique_rho_idx=jnp.arange(rho.size) * alpha.size * zeta.size, + nodes=coords_desc, sort=False, jitable=True, **unique_idx, **inverse_idx ) return grid_desc, data_desc diff --git a/desc/grid.py b/desc/grid.py index c2c955d730..40cc3fdaf6 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -254,6 +254,14 @@ def unique_zeta_idx(self): ) return self._unique_zeta_idx + def _inverse_idx_from_unique_idx(self, surface_label): + axis = {"rho": 0, "theta": 1, "zeta": 2}[surface_label] + nodes = jnp.asarray(self.nodes[:, axis]) + unique_idx = getattr(self, f"_unique_{surface_label}_idx") + inverse_idx = jnp.zeros(self.num_nodes, dtype=int) + for i, u in enumerate(unique_idx): + inverse_idx = jnp.where(nodes == nodes[u], i, inverse_idx) + @property def inverse_rho_idx(self): """ndarray: Indices of unique_rho_idx that recover the rho coordinates.""" @@ -420,14 +428,14 @@ def copy_data_from_other(self, x, other_grid, surface_label="rho", tol=1e-14): xc = other_grid.compress(x, surface_label) y = self.expand(xc, surface_label) except AttributeError: - self_nodes = jnp.asarray(self.nodes) - other_nodes = jnp.asarray(other_grid.nodes) axis = {"rho": 0, "theta": 1, "zeta": 2}[surface_label] + self_nodes = jnp.asarray(self.nodes[:, axis]) + other_nodes = jnp.asarray(other_grid.nodes[:, axis]) y = jnp.zeros((self.num_nodes, *x.shape[1:])) def body(i, y): y = jnp.where( - jnp.abs(self_nodes[:, axis] - other_nodes[i, axis]) <= tol, + jnp.abs(self_nodes - other_nodes[i]) <= tol, x[i], y, ) @@ -481,13 +489,15 @@ class Grid(_Grid): Node coordinates, in (rho,theta,zeta) sort : bool Whether to sort the nodes for use with FFT method. + spacing : ndarray of shape(num_nodes, 3) + May be provided to ensure even spacing for surface averages etc. jitable : bool Whether to skip certain checks and conditionals that don't work under jit. Allows grid to be created on the fly with custom nodes, but weights, symmetry etc may be wrong if grid contains duplicate nodes. """ - def __init__(self, nodes, sort=False, jitable=False, **kwargs): + def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): # Python 3.3 (PEP 412) introduced key-sharing dictionaries. # This change measurably reduces memory usage of objects that # define all attributes in their __init__ method. @@ -533,6 +543,8 @@ def __init__(self, nodes, sort=False, jitable=False, **kwargs): self._L = self.num_nodes self._M = self.num_nodes self._N = self.num_nodes + if spacing is not None: + self._spacing = spacing errorif(len(kwargs), ValueError, f"Got unexpected kwargs {kwargs.keys()}") def _create_nodes(self, nodes): @@ -1576,8 +1588,9 @@ def _meshgrid_expand(x, a_size, b_size, c_size, order=0): It is common to construct a meshgrid in the following manner. .. code-block:: python - A, B, C = jnp.meshgrid(a, b, c, indexing="ij") - nodes = jnp.column_stack(tuple(map(np.ravel, (A, B, C)))) + a, b, c = jnp.meshgrid(a, b, c, indexing="ij") + a, b, c = map(jnp.ravel, (a, b, c)) + nodes = jnp.column_stack([a, b, c]) grid = Grid(nodes, sort=False, jitable=True) Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` @@ -1591,6 +1604,12 @@ def _meshgrid_expand(x, a_size, b_size, c_size, order=0): for all unique surfaces of the specified label on the grid. The length of ``x`` should match the number of unique surfaces of the corresponding label in this grid. + a_size : int + Size of the first array. + b_size : int + Size of the second array. + c_size : int + Size of the third array. order : int 0, 1, or 2. Corresponds to whether ``x`` is a surface function of a, b, or c in the example code in the docstring. @@ -1612,3 +1631,82 @@ def _meshgrid_expand(x, a_size, b_size, c_size, order=0): if order == 2: assert len(x) == c_size return jnp.tile(x, a_size * b_size) + + +def _meshgrid_inverse_idx(a_size, b_size, c_size): + """Return inverse indices for meshgrid pattern. + + It is common to construct a meshgrid in the following manner. + .. code-block:: python + + a, b, c = jnp.meshgrid(a, b, c, indexing="ij") + a, b, c = map(jnp.ravel, (a, b, c)) + nodes = jnp.column_stack([a, b, c]) + grid = Grid(nodes, sort=False, jitable=True) + + Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` + This method computes these indices. + + Parameters + ---------- + a_size : int + Size of the first array. + b_size : int + Size of the second array. + c_size : int + Size of the third array. + order : int + 0, 1, or 2. Whether to retrieve unique indices into a, b, or c. + + Returns + ------- + inverse_idx : ndarray, ndarray, ndarray + The inverse indices. + + """ + a = jnp.arange(a_size) + inverse_a_idx = repeat( + a, b_size * c_size, total_repeat_length=a_size * b_size * c_size + ) + b = jnp.arange(b_size) + inverse_b_idx = jnp.tile( + repeat(b, c_size, total_repeat_length=b_size * c_size), a_size + ) + c = jnp.arange(c_size) + inverse_c_idx = jnp.tile(c, a_size * b_size) + return inverse_a_idx, inverse_b_idx, inverse_c_idx + + +def _meshgrid_unique_idx(a_size, b_size, c_size): + """Return unique indices for meshgrid pattern. + + It is common to construct a meshgrid in the following manner. + .. code-block:: python + + a, b, c = jnp.meshgrid(a, b, c, indexing="ij") + a, b, c = map(jnp.ravel, (a, b, c)) + nodes = jnp.column_stack([a, b, c]) + grid = Grid(nodes, sort=False, jitable=True) + + Since ``jitable=True`` was specified, the attribute ``grid.unique_*_idx`` + This method computes these indices. + + Parameters + ---------- + a_size : int + Size of the first array. + b_size : int + Size of the second array. + c_size : int + Size of the third array. + + Returns + ------- + unique_idx : ndarray, ndarray, ndarray + The unique indices. + + """ + unique_a_idx = jnp.arange(a_size) * b_size * c_size + unique_b_idx = jnp.arange(b_size) * c_size + unique_c_idx = jnp.arange(c_size) + return unique_a_idx, unique_b_idx, unique_c_idx diff --git a/tests/test_grid.py b/tests/test_grid.py index e0aa8aa8a5..1b2cccd43d 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -12,6 +12,8 @@ LinearGrid, QuadratureGrid, _meshgrid_expand, + _meshgrid_inverse_idx, + _meshgrid_unique_idx, dec_to_cf, find_least_rational_surfaces, find_most_rational_surfaces, @@ -756,22 +758,32 @@ def test(surface_label, grid): test("zeta", cg_sym) @pytest.mark.unit - def test_meshgrid_expand(self): - """Ensure alternative expansion works for grids made from meshgrid.""" - rho = np.linspace(0, 1, 4) - alpha = np.linspace(0, 2 * np.pi, 2) - zeta = np.linspace(0, 10 * np.pi, 3) - r, a, z = np.meshgrid(rho, alpha, zeta, indexing="ij") - r, a, z = r.ravel(), a.ravel(), z.ravel() + def test_meshgrid_idx(self): + """Test unique, inverse idx computing logic from meshgrid.""" + R = np.linspace(0, 1, 4) + T = np.linspace(0, 2 * np.pi, 2) + Z = np.linspace(0, 10 * np.pi, 3) + r, t, z = map(np.ravel, np.meshgrid(R, T, Z, indexing="ij")) np.testing.assert_allclose( - r, _meshgrid_expand(rho, rho.size, alpha.size, zeta.size, order=0) + r, _meshgrid_expand(R, R.size, T.size, Z.size, order=0) ) np.testing.assert_allclose( - a, _meshgrid_expand(alpha, rho.size, alpha.size, zeta.size, order=1) + t, _meshgrid_expand(T, R.size, T.size, Z.size, order=1) ) np.testing.assert_allclose( - z, _meshgrid_expand(zeta, rho.size, alpha.size, zeta.size, order=2) + z, _meshgrid_expand(Z, R.size, T.size, Z.size, order=2) ) + uR, uT, uZ = _meshgrid_unique_idx(R.size, T.size, Z.size) + iR, iT, iZ = _meshgrid_inverse_idx(R.size, T.size, Z.size) + _, unique, inverse = np.unique(r, return_index=True, return_inverse=True) + np.testing.assert_allclose(uR, unique) + np.testing.assert_allclose(iR, inverse) + _, unique, inverse = np.unique(t, return_index=True, return_inverse=True) + np.testing.assert_allclose(uT, unique) + np.testing.assert_allclose(iT, inverse) + _, unique, inverse = np.unique(z, return_index=True, return_inverse=True) + np.testing.assert_allclose(uZ, unique) + np.testing.assert_allclose(iZ, inverse) @pytest.mark.unit From f39dd90d59c2bb8520ea1d2a4b229277f43de7ac Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 21:34:59 -0400 Subject: [PATCH 053/241] Remove incomplete code introduced when editing --- desc/grid.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/desc/grid.py b/desc/grid.py index 40cc3fdaf6..4c61379a08 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -254,14 +254,6 @@ def unique_zeta_idx(self): ) return self._unique_zeta_idx - def _inverse_idx_from_unique_idx(self, surface_label): - axis = {"rho": 0, "theta": 1, "zeta": 2}[surface_label] - nodes = jnp.asarray(self.nodes[:, axis]) - unique_idx = getattr(self, f"_unique_{surface_label}_idx") - inverse_idx = jnp.zeros(self.num_nodes, dtype=int) - for i, u in enumerate(unique_idx): - inverse_idx = jnp.where(nodes == nodes[u], i, inverse_idx) - @property def inverse_rho_idx(self): """ndarray: Indices of unique_rho_idx that recover the rho coordinates.""" From 302bdaa41449ec6a32406c75f50b35170bf736c5 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 21:54:19 -0400 Subject: [PATCH 054/241] Make meshgrid methods public so people can use them --- desc/equilibrium/coords.py | 16 ++++++++-------- desc/grid.py | 30 ++++++++++++++++-------------- tests/test_grid.py | 16 ++++++++-------- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 43448033f8..b6d50bec3d 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -14,9 +14,9 @@ Grid, LinearGrid, QuadratureGrid, - _meshgrid_expand, - _meshgrid_inverse_idx, - _meshgrid_unique_idx, + meshgrid_expand, + meshgrid_inverse_idx, + meshgrid_unique_idx, ) from desc.transform import Transform from desc.utils import setdefault @@ -315,11 +315,11 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): label = ["rho", "theta", "zeta"] unique_idx = { f"_unique_{label[i]}_idx": idx - for i, idx in enumerate(_meshgrid_unique_idx(rho.size, t.size, z.size)) + for i, idx in enumerate(meshgrid_unique_idx(rho.size, t.size, z.size)) } inverse_idx = { f"_inverse_{label[i]}_idx": idx - for i, idx in enumerate(_meshgrid_inverse_idx(rho.size, t.size, z.size)) + for i, idx in enumerate(meshgrid_inverse_idx(rho.size, t.size, z.size)) } grid = Grid( nodes=jnp.column_stack( @@ -341,7 +341,7 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): # on the returned DESC grid inaccurate. data = eq.compute(names=["iota", "iota_r"], grid=grid) data_desc = { - d: _meshgrid_expand(grid.compress(data[d]), rho.size, alpha.size, zeta.size) + d: meshgrid_expand(grid.compress(data[d]), rho.size, alpha.size, zeta.size) for d in data if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] == "r" @@ -352,11 +352,11 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): coords_desc = eq.compute_theta_coords(coords_sfl) unique_idx = { f"_unique_{label[i]}_idx": idx - for i, idx in enumerate(_meshgrid_unique_idx(rho.size, alpha.size, zeta.size)) + for i, idx in enumerate(meshgrid_unique_idx(rho.size, alpha.size, zeta.size)) } inverse_idx = { f"_inverse_{label[i]}_idx": idx - for i, idx in enumerate(_meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)) + for i, idx in enumerate(meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)) } grid_desc = Grid( nodes=coords_desc, sort=False, jitable=True, **unique_idx, **inverse_idx diff --git a/desc/grid.py b/desc/grid.py index 4c61379a08..b4274c3aae 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1574,7 +1574,7 @@ def find_least_rational_surfaces( return rho, io -def _meshgrid_expand(x, a_size, b_size, c_size, order=0): +def meshgrid_expand(x, a_size, b_size, c_size, order=0): """Expand ``x`` by duplicating elements to match a meshgrid pattern. It is common to construct a meshgrid in the following manner. @@ -1597,11 +1597,11 @@ def _meshgrid_expand(x, a_size, b_size, c_size, order=0): The length of ``x`` should match the number of unique surfaces of the corresponding label in this grid. a_size : int - Size of the first array. + Size of the first argument to meshgrid. b_size : int - Size of the second array. + Size of the second argument to meshgrid. c_size : int - Size of the third array. + Size of the third argument to meshgrid. order : int 0, 1, or 2. Corresponds to whether ``x`` is a surface function of a, b, or c in the example code in the docstring. @@ -1625,7 +1625,7 @@ def _meshgrid_expand(x, a_size, b_size, c_size, order=0): return jnp.tile(x, a_size * b_size) -def _meshgrid_inverse_idx(a_size, b_size, c_size): +def meshgrid_inverse_idx(a_size, b_size, c_size): """Return inverse indices for meshgrid pattern. It is common to construct a meshgrid in the following manner. @@ -1637,16 +1637,17 @@ def _meshgrid_inverse_idx(a_size, b_size, c_size): grid = Grid(nodes, sort=False, jitable=True) Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` - This method computes these indices. + can not be automatically computed. This method computes these indices. + One can then pass them in as keyword arguments to the Grid constructor. Parameters ---------- a_size : int - Size of the first array. + Size of the first argument to meshgrid. b_size : int - Size of the second array. + Size of the second argument to meshgrid. c_size : int - Size of the third array. + Size of the third argument to meshgrid. order : int 0, 1, or 2. Whether to retrieve unique indices into a, b, or c. @@ -1669,7 +1670,7 @@ def _meshgrid_inverse_idx(a_size, b_size, c_size): return inverse_a_idx, inverse_b_idx, inverse_c_idx -def _meshgrid_unique_idx(a_size, b_size, c_size): +def meshgrid_unique_idx(a_size, b_size, c_size): """Return unique indices for meshgrid pattern. It is common to construct a meshgrid in the following manner. @@ -1681,16 +1682,17 @@ def _meshgrid_unique_idx(a_size, b_size, c_size): grid = Grid(nodes, sort=False, jitable=True) Since ``jitable=True`` was specified, the attribute ``grid.unique_*_idx`` - This method computes these indices. + can not be automatically computed. This method computes these indices. + One can then pass them in as keyword arguments to the Grid constructor. Parameters ---------- a_size : int - Size of the first array. + Size of the first argument to meshgrid. b_size : int - Size of the second array. + Size of the second argument to meshgrid. c_size : int - Size of the third array. + Size of the third argument to meshgrid. Returns ------- diff --git a/tests/test_grid.py b/tests/test_grid.py index 1b2cccd43d..ea67177bbf 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -11,12 +11,12 @@ Grid, LinearGrid, QuadratureGrid, - _meshgrid_expand, - _meshgrid_inverse_idx, - _meshgrid_unique_idx, dec_to_cf, find_least_rational_surfaces, find_most_rational_surfaces, + meshgrid_expand, + meshgrid_inverse_idx, + meshgrid_unique_idx, ) from desc.profiles import PowerSeriesProfile @@ -765,16 +765,16 @@ def test_meshgrid_idx(self): Z = np.linspace(0, 10 * np.pi, 3) r, t, z = map(np.ravel, np.meshgrid(R, T, Z, indexing="ij")) np.testing.assert_allclose( - r, _meshgrid_expand(R, R.size, T.size, Z.size, order=0) + r, meshgrid_expand(R, R.size, T.size, Z.size, order=0) ) np.testing.assert_allclose( - t, _meshgrid_expand(T, R.size, T.size, Z.size, order=1) + t, meshgrid_expand(T, R.size, T.size, Z.size, order=1) ) np.testing.assert_allclose( - z, _meshgrid_expand(Z, R.size, T.size, Z.size, order=2) + z, meshgrid_expand(Z, R.size, T.size, Z.size, order=2) ) - uR, uT, uZ = _meshgrid_unique_idx(R.size, T.size, Z.size) - iR, iT, iZ = _meshgrid_inverse_idx(R.size, T.size, Z.size) + uR, uT, uZ = meshgrid_unique_idx(R.size, T.size, Z.size) + iR, iT, iZ = meshgrid_inverse_idx(R.size, T.size, Z.size) _, unique, inverse = np.unique(r, return_index=True, return_inverse=True) np.testing.assert_allclose(uR, unique) np.testing.assert_allclose(iR, inverse) From ec019657f679edcc5cc625f55237db8c25de4923 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 30 Mar 2024 23:04:22 -0400 Subject: [PATCH 055/241] Don't pass inunique, inverse idx after compute theta coords --- desc/equilibrium/coords.py | 42 +++++++++++++++----------------------- desc/grid.py | 6 +++--- 2 files changed, 20 insertions(+), 28 deletions(-) diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index b6d50bec3d..86010bf6e5 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -312,26 +312,23 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): # Choose nodes such that even spacing will yield correct flux surface integrals. t = jnp.linspace(0, 2 * jnp.pi, 2 * eq.M_grid + 1, endpoint=False) z = jnp.linspace(0, 2 * jnp.pi / eq.NFP, 2 * eq.N_grid + 1, endpoint=False) - label = ["rho", "theta", "zeta"] + nodes = jnp.column_stack( + tuple(map(jnp.ravel, jnp.meshgrid(rho, t, z, indexing="ij"))) + ) + spacing = jnp.array([1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size])[ + jnp.newaxis + ] + labels = ["rho", "theta", "zeta"] unique_idx = { - f"_unique_{label[i]}_idx": idx - for i, idx in enumerate(meshgrid_unique_idx(rho.size, t.size, z.size)) + f"_unique_{label}_idx": idx + for label, idx in zip(labels, meshgrid_unique_idx(rho.size, t.size, z.size)) } inverse_idx = { - f"_inverse_{label[i]}_idx": idx - for i, idx in enumerate(meshgrid_inverse_idx(rho.size, t.size, z.size)) + f"_inverse_{label}_idx": idx + for label, idx in zip(labels, meshgrid_inverse_idx(rho.size, t.size, z.size)) } grid = Grid( - nodes=jnp.column_stack( - tuple(map(jnp.ravel, jnp.meshgrid(rho, t, z, indexing="ij"))) - ), - spacing=jnp.array([1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size])[ - jnp.newaxis - ], - sort=False, - jitable=True, - **unique_idx, - **inverse_idx, + nodes, spacing=spacing, sort=False, jitable=True, **unique_idx, **inverse_idx ) # We only need to compute the rotational transform to transform to straight # field-line coordinates. However, it is a good idea to compute other flux @@ -346,20 +343,15 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] == "r" } - # meshgrid of Clebsch-Type field-line coordinates r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) coords_sfl = jnp.column_stack([r, a + data_desc["iota"] * z_fl, z_fl]) coords_desc = eq.compute_theta_coords(coords_sfl) - unique_idx = { - f"_unique_{label[i]}_idx": idx - for i, idx in enumerate(meshgrid_unique_idx(rho.size, alpha.size, zeta.size)) - } - inverse_idx = { - f"_inverse_{label[i]}_idx": idx - for i, idx in enumerate(meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)) - } grid_desc = Grid( - nodes=coords_desc, sort=False, jitable=True, **unique_idx, **inverse_idx + nodes=coords_desc, + sort=False, + jitable=True, + _unique_rho_idx=meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0], + _inverse_rho_idx=meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0], ) return grid_desc, data_desc diff --git a/desc/grid.py b/desc/grid.py index b4274c3aae..17c7a2cfd2 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -497,6 +497,8 @@ def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): self._sym = False self._node_pattern = "custom" self._nodes, self._spacing = self._create_nodes(nodes) + if spacing is not None: + self._spacing = spacing if sort: self._sort_nodes() if jitable: @@ -535,8 +537,6 @@ def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): self._L = self.num_nodes self._M = self.num_nodes self._N = self.num_nodes - if spacing is not None: - self._spacing = spacing errorif(len(kwargs), ValueError, f"Got unexpected kwargs {kwargs.keys()}") def _create_nodes(self, nodes): @@ -1649,7 +1649,7 @@ def meshgrid_inverse_idx(a_size, b_size, c_size): c_size : int Size of the third argument to meshgrid. order : int - 0, 1, or 2. Whether to retrieve unique indices into a, b, or c. + 0, 1, or 2. Whether to retrieve inverse indices for label a, b, or c. Returns ------- From c8e38c90ecd1b1cc16ec16d9472c5039024435d8 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 31 Mar 2024 02:00:40 -0400 Subject: [PATCH 056/241] Fix reshaping bug, recode no longer used in algorithm --- desc/compute/bounce_integral.py | 170 ++++++++------------------------ tests/test_bounce_integral.py | 58 +++++++++-- 2 files changed, 89 insertions(+), 139 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index a7d7bffe1c..525c941986 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -4,7 +4,7 @@ from interpax import CubicHermiteSpline, interp1d -from desc.backend import complex_sqrt, flatnonzero, jnp, put, put_along_axis, take, vmap +from desc.backend import complex_sqrt, flatnonzero, jnp, put, take, vmap from desc.equilibrium.coords import desc_grid_from_field_line_coords @@ -323,21 +323,21 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): Parameters ---------- - pitch : ndarray, shape(P, A * R) + pitch : ndarray, shape(P, R * A) λ values. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(4, A * R, knots.size - 1) + poly_B : ndarray, shape(4, R * A, knots.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, A * R, knots.size - 1) + poly_B_z : ndarray, shape(3, R * A, knots.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise @@ -347,13 +347,13 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): ------- bp1, bp2 : ndarray, ndarray Field line-following ζ coordinates of bounce points for a given pitch - along a field line. Has shape (P, A * R, (knots.size - 1) * 3). + along a field line. Has shape (P, R * A, (knots.size - 1) * 3). If there were less than (knots.size - 1) * 3 bounce points along a field line, then the last axis is padded with nan. """ P = pitch.shape[0] # batch size - AR = poly_B.shape[1] # alpha.size * rho.size + RA = poly_B.shape[1] # rho.size * alpha.size N = knots.size - 1 # number of piecewise cubic polynomials per field line assert poly_B.shape[-1] == poly_B_z.shape[-1] == N @@ -368,19 +368,19 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): a_max=knots[1:], sort=True, ) - assert intersect.shape == (P, AR, N, 3) + assert intersect.shape == (P, RA, N, 3) # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. - B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * AR, -1) - intersect = intersect.reshape(P * AR, -1) + B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * RA, -1) + intersect = intersect.reshape(P * RA, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) # Rearrange so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z = take_mask(B_z, is_intersect) - assert intersect.shape == B_z.shape == is_intersect.shape == (P * AR, N * 3) + assert intersect.shape == B_z.shape == is_intersect.shape == (P * RA, N * 3) # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. @@ -416,8 +416,8 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # to the value of |B| of at ζ = knots[-1]. In general, continuity implies # |B|(knots[-1] < ζ < knots[-1] + knots[0]) will approximately equal # |B|(0 < ζ < knots[0]) as long as ζ = knots[-1] is large enough. - bp1 = bp1.reshape(P, AR, -1) - bp2 = bp2.reshape(P, AR, -1) + bp1 = bp1.reshape(P, RA, -1) + bp2 = bp2.reshape(P, RA, -1) return bp1, bp2 @@ -429,18 +429,18 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa pitch : ndarray, shape(P, A, R) λ values. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(4, A * R, knots.size - 1) + poly_B : ndarray, shape(4, R * A, knots.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, A * R, knots.size - 1) + poly_B_z : ndarray, shape(3, R * A, knots.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise @@ -456,7 +456,7 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa raise ValueError("No pitch values were given.") return original else: - # ensure pitch has shape (batch size, alpha.size, rho.size) + # ensure pitch has shape (batch size, rho.size, alpha.size) pitch = jnp.atleast_2d(pitch) if pitch.ndim == 2: # Can't use atleast_3d; see https://github.com/numpy/numpy/issues/25805. @@ -471,7 +471,7 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa def bounce_integral( eq, pitch=None, - rho=None, + rho=jnp.linspace(1e-12, 1, 10), alpha=None, zeta=jnp.linspace(0, 10 * jnp.pi, 20), quadrature=tanh_sinh_quadrature, @@ -502,7 +502,7 @@ def bounce_integral( λ values to evaluate the bounce integral at each field line. May be specified later. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. @@ -536,12 +536,12 @@ def bounce_integral( DESC coordinate grid for the given field line coordinates. data : dict Dictionary of ndarrays of stuff evaluated on ``grid``. - poly_B : ndarray, shape(4, A * R, zeta.size - 1) + poly_B : ndarray, shape(4, R * A, zeta.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, A * R, zeta.size - 1) + poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise @@ -556,15 +556,13 @@ def bounce_integral( bi, items = bounce_integral(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - B = items["data"]["B"].reshape(alpha.size * rho.size, -1) + B = items["data"]["B"].reshape(rho.size * alpha.size, -1) pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( - -1, alpha.size, rho.size + -1, rho.size, alpha.size ) result = bi(f, pitch) """ - if rho is None: - rho = jnp.linspace(1e-12, 1, 10) if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) rho = jnp.atleast_1d(rho) @@ -575,14 +573,14 @@ def bounce_integral( grid, data = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) - B_sup_z = data["B^zeta"].reshape(A * R, -1) - B = data["|B|"].reshape(A * R, -1) - B_z_ra = data["|B|_z|r,a"].reshape(A * R, -1) + B_sup_z = data["B^zeta"].reshape(R * A, -1) + B = data["|B|"].reshape(R * A, -1) + B_z_ra = data["|B|_z|r,a"].reshape(R * A, -1) poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=False).c poly_B = jnp.moveaxis(poly_B, 1, -1) poly_B_z = polyder(poly_B) - assert poly_B.shape == (4, A * R, zeta.size - 1) - assert poly_B_z.shape == (3, A * R, zeta.size - 1) + assert poly_B.shape == (4, R * A, zeta.size - 1) + assert poly_B_z.shape == (3, R * A, zeta.size - 1) return_items = kwargs.pop("return_items", False) x, w = quadrature(**kwargs) @@ -601,14 +599,14 @@ def _bounce_integral(f, pitch=None): λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(P, alpha.size, rho.size, (zeta.size - 1) * 3) + result : ndarray, shape(P, rho.size, alpha.size, (zeta.size - 1) * 3) The last axis iterates through every bounce integral performed along that field line padded by nan. @@ -617,14 +615,14 @@ def _bounce_integral(f, pitch=None): pitch, zeta, poly_B, poly_B_z, *original, err=True ) P = pitch.shape[0] - pitch = jnp.broadcast_to(pitch, shape=(P, A * R)) + pitch = jnp.broadcast_to(pitch, shape=(P, R * A)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = f.reshape(A * R, -1) + f = f.reshape(R * A, zeta.size) result = jnp.reshape( bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) # complete the change of variable / (bp2 - bp1) * jnp.pi, - newshape=(P, A, R, -1), + newshape=(P, R, A, -1), ) return result @@ -638,7 +636,7 @@ def _bounce_integral(f, pitch=None): def bounce_average( eq, pitch=None, - rho=None, + rho=jnp.linspace(1e-12, 1, 10), alpha=None, zeta=jnp.linspace(0, 10 * jnp.pi, 20), quadrature=tanh_sinh_quadrature, @@ -670,7 +668,7 @@ def bounce_average( λ values to evaluate the bounce average at each field line. May be specified later. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. @@ -706,12 +704,12 @@ def bounce_average( DESC coordinate grid for the given field line coordinates. data : dict Dictionary of ndarrays of stuff evaluated on ``grid``. - poly_B : ndarray, shape(4, A * R, zeta.size - 1) + poly_B : ndarray, shape(4, R * A, zeta.size - 1) Polynomial coefficients of the cubic spline of |B|. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, A * R, zeta.size - 1) + poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise @@ -726,9 +724,9 @@ def bounce_average( ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - B = items["data"]["B"].reshape(alpha.size * rho.size, -1) + B = items["data"]["B"].reshape(rho.size * alpha.size, -1) pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( - -1, alpha.size, rho.size + -1, rho.size, alpha.size ) result = ba(f, pitch) @@ -745,14 +743,14 @@ def _bounce_average(f, pitch=None): λ values to evaluate the bounce average at each field line. If None, uses the values given to the parent function. Last two axes should specify the λ value for a particular field line - parameterized by α, ρ. That is, λ(α, ρ) is specified by ``pitch[..., α, ρ]`` + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` where in the latter the labels are interpreted as indices that correspond to that field line. If an additional axis exists on the left, it is the batch axis as usual. Returns ------- - result : ndarray, shape(P, alpha.size, rho.size, (zeta.size - 1) * 3) + result : ndarray, shape(P, rho.size, alpha.size, (zeta.size - 1) * 3) The last axis iterates through every bounce average performed along that field line padded by nan. @@ -767,89 +765,3 @@ def _bounce_average(f, pitch=None): return _bounce_average, items else: return _bounce_average - - -# Current algorithm used for bounce integrals no longer requires these -# two functions. TODO: Delete before merge. -def diff_mask(a, mask, n=1, axis=-1, prepend=None): - """Calculate the n-th discrete difference along the given axis of ``a[mask]``. - - The first difference is given by ``out[i] = a[i+1] - a[i]`` along - the given axis, higher differences are calculated by using `diff` - recursively. This method is JIT compatible. - - Parameters - ---------- - a : array_like - Input array - mask : array_like - Boolean mask to index like ``a[mask]`` prior to computing difference. - Should have same size as ``a``. - n : int, optional - The number of times values are differenced. - axis : int, optional - The axis along which the difference is taken, default is the - last axis. - prepend : array_like, optional - Values to prepend to `a` along axis prior to performing the difference. - Scalar values are expanded to arrays with length 1 in the direction of - axis and the shape of the input array in along all other axes. - Otherwise, the dimension and shape must match `a` except along axis. - - Returns - ------- - diff : ndarray - The n-th differences. The shape of the output is the same as ``a`` - except along ``axis`` where the dimension is smaller by ``n``. The - type of the output is the same as the type of the difference - between any two elements of ``a``. - - Notes - ----- - The result is padded with nan at the end to be jit compilable. - - """ - prepend = () if prepend is None else (prepend,) - return jnp.diff(take_mask(a, mask), n, axis, *prepend) - - -def stretch_batches(in_arr, in_batch_size, out_batch_size, fill): - """Stretch batches of ``in_arr``. - - Given that ``in_arr`` is composed of N batches of ``in_batch_size`` - along its last axis, stretch the last axis so that it is composed of - N batches of ``out_batch_size``. The ``out_batch_size - in_batch_size`` - missing elements in each batch are populated with ``fill``. - By default, these elements are populated evenly surrounding the input batches. - - Parameters - ---------- - in_arr : ndarray, shape(..., in_batch_size * N) - Input array - in_batch_size : int - Length of batches along last axis of input array. - out_batch_size : int - Length of batches along last axis of output array. - fill : bool or int or float - Value to fill at missing indices of each batch. - - Returns - ------- - out_arr : ndarray, shape(..., out_batch_size * N) - Output array - - """ - assert out_batch_size >= in_batch_size - N = in_arr.shape[-1] // in_batch_size - out_shape = in_arr.shape[:-1] + (N * out_batch_size,) - offset = (out_batch_size - in_batch_size) // 2 - idx = jnp.arange(in_arr.shape[-1]) - out_arr = put_along_axis( - arr=jnp.full(out_shape, fill, dtype=in_arr.dtype), - indices=(idx // in_batch_size) * out_batch_size - + offset - + (idx % in_batch_size), - values=in_arr, - axis=-1, - ) - return out_arr diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index f788645e71..25f74de94a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -1,5 +1,7 @@ """Test bounce integral methods.""" +import inspect + import numpy as np import pytest from interpax import Akima1DInterpolator @@ -53,11 +55,47 @@ def test_mask_operation(): ) +@pytest.mark.unit +def test_reshape_convention(): + """Test the reshaping convention separates data across field lines.""" + rho = np.linspace(0, 1, 3) + alpha = np.linspace(0, 2 * np.pi, 4) + zeta = np.linspace(0, 10 * np.pi, 5) + r, a, z = map(np.ravel, np.meshgrid(rho, alpha, zeta, indexing="ij")) + # functions of zeta should separate along first two axes + # since those are contiguous, this should work + f = z.reshape(-1, zeta.size) + for i in range(1, f.shape[0]): + np.testing.assert_allclose(f[i - 1], f[i]) + # likewise for rho + f = r.reshape(rho.size, -1) + for i in range(1, f.shape[-1]): + np.testing.assert_allclose(f[:, i - 1], f[:, i]) + # test final reshape of bounce integral result won't mix data + f = (a**2 + z).reshape(rho.size, alpha.size, zeta.size) + for i in range(1, f.shape[0]): + np.testing.assert_allclose(f[i - 1], f[i]) + f = (r**2 + z).reshape(rho.size, alpha.size, zeta.size) + for i in range(1, f.shape[1]): + np.testing.assert_allclose(f[:, i - 1], f[:, i]) + f = (r**2 + a).reshape(rho.size, alpha.size, zeta.size) + for i in range(1, f.shape[-1]): + np.testing.assert_allclose(f[..., i - 1], f[..., i]) + + err_msg = "The ordering conventions are required for correctness." + src = inspect.getsource(bounce_integral) + assert "R, A" in src and "A, R" not in src, err_msg + assert "A, zeta.size" in src, err_msg + src = inspect.getsource(desc_grid_from_field_line_coords) + assert 'indexing="ij"' in src, err_msg + assert 'meshgrid(rho, alpha, zeta, indexing="ij")' in src, err_msg + + @pytest.mark.unit def test_cubic_poly_roots(): """Test vectorized computation of cubic polynomial exact roots.""" cubic = 4 - poly = np.arange(-60, 60).reshape(cubic, 6, -1) + poly = np.arange(-24, 24).reshape(cubic, 6, -1) poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) poly = poly * np.e * np.pi # make sure broadcasting won't hide error in implementation @@ -77,7 +115,7 @@ def test_cubic_poly_roots(): def test_polyint(): """Test vectorized computation of polynomial primitive.""" quintic = 6 - poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.e * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) @@ -95,7 +133,7 @@ def test_polyint(): def test_polyder(): """Test vectorized computation of polynomial derivative.""" quintic = 6 - poly = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.e * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim derivative = polyder(poly) @@ -109,8 +147,8 @@ def test_polyder(): @pytest.mark.unit def test_polyval(): """Test vectorized computation of polynomial evaluation.""" - quintic = 6 - c = np.arange(-90, 90).reshape(quintic, 3, -1) * np.e * np.pi + quartic = 5 + c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.e * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(c.shape).size == c.ndim x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) @@ -161,21 +199,21 @@ def test_pitch_and_hairy_ball(): rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) + B = items["data"]["B"] + assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." + name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - # Same pitch for every field line may give sparse result. pitch_res = 30 - B = items["data"]["B"] - assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res)[:, np.newaxis, np.newaxis] result = ba(f, pitch) assert np.isfinite(result).any() # specify pitch per field line - B = B.reshape(alpha.size * rho.size, -1) + B = B.reshape(rho.size * alpha.size, -1) pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res).reshape( - pitch_res, alpha.size, rho.size + -1, rho.size, alpha.size ) result = ba(f, pitch) assert np.isfinite(result).any() From 2fb2b6b234a6d50fd484a49b26b0adf9f6abc82c Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 31 Mar 2024 12:17:49 -0400 Subject: [PATCH 057/241] Make sure some method in compute.utils work custom grid... Needed for desc_grid_from_field_line_coords function to work correctly --- desc/compute/bounce_integral.py | 2 +- desc/compute/utils.py | 58 +++++++++++++++------------------ desc/grid.py | 48 +++++++++++---------------- tests/test_compute_utils.py | 12 ++++++- 4 files changed, 58 insertions(+), 62 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 525c941986..c449e5b0c3 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -426,7 +426,7 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa Parameters ---------- - pitch : ndarray, shape(P, A, R) + pitch : ndarray, shape(P, R, A) λ values. Last two axes should specify the λ value for a particular field line parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 8adc6482ce..8b092ea40a 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -691,36 +691,26 @@ def _get_grid_surface(grid, surface_label): has_endpoint_dupe : bool Whether this surface label's nodes have a duplicate at the endpoint of a periodic domain. (e.g. a node at 0 and 2π). + has_idx : bool + Whether the grid knows the number of unique nodes and inverse idx. """ assert surface_label in {"rho", "theta", "zeta"} if surface_label == "rho": spacing = grid.spacing[:, 1:] has_endpoint_dupe = False - unique_size = getattr(grid, "num_rho", -1) - inverse_idx = getattr(grid, "_inverse_rho_idx", jnp.array([])) elif surface_label == "theta": spacing = grid.spacing[:, [0, 2]] - unique_size = getattr(grid, "num_theta", -1) - inverse_idx = getattr(grid, "_inverse_theta_idx", jnp.array([])) - has_endpoint_dupe = ( - isinstance(grid, LinearGrid) - and hasattr(grid, "_unique_theta_idx") - and (grid.nodes[grid.unique_theta_idx[0], 1] == 0) - & (grid.nodes[grid.unique_theta_idx[-1], 1] == 2 * np.pi) - ) + has_endpoint_dupe = isinstance(grid, LinearGrid) and grid._theta_endpoint else: spacing = grid.spacing[:, :2] - unique_size = getattr(grid, "num_zeta", -1) - inverse_idx = getattr(grid, "_inverse_zeta_idx", jnp.array([])) - has_endpoint_dupe = ( - isinstance(grid, LinearGrid) - and hasattr(grid, "_unique_zeta_idx") - and (grid.nodes[grid.unique_zeta_idx[0], 2] == 0) - & (grid.nodes[grid.unique_zeta_idx[-1], 2] == 2 * np.pi / grid.NFP) - ) - - return unique_size, inverse_idx, spacing, has_endpoint_dupe + has_endpoint_dupe = isinstance(grid, LinearGrid) and grid._zeta_endpoint + has_idx = hasattr(grid, f"num_{surface_label}") & hasattr( + grid, f"_inverse_{surface_label}_idx" + ) + unique_size = getattr(grid, f"num_{surface_label}", -1) + inverse_idx = getattr(grid, f"_inverse_{surface_label}_idx", jnp.array([])) + return unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx def line_integrals( @@ -886,16 +876,14 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True, tol=1e-14) "yellow", ) ) - unique_size, inverse_idx, spacing, has_endpoint_dupe = _get_grid_surface( + unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx = _get_grid_surface( grid, surface_label ) spacing = jnp.prod(spacing, axis=1) # Todo: Define masks as a sparse matrix once sparse matrices are no longer # experimental in jax. - if hasattr(grid, f"num_{surface_label}") and hasattr( - grid, f"_inverse_{surface_label}_idx" - ): + if has_idx: # The ith row of masks is True only at the indices which correspond to the # ith surface. The integral over the ith surface is the dot product of the # ith row vector and the integrand defined over all the surfaces. @@ -1052,8 +1040,12 @@ def surface_averages_map(grid, surface_label="rho", expand_out=True, tol=1e-14): ``function(q, sqrt_g)``. """ - if not hasattr(grid, f"num_{surface_label}"): - expand_out = False # don't try to expand already expanded output + expand_out = ( + expand_out + # don't try to expand already expanded output + & hasattr(grid, f"num_{surface_label}") + & hasattr(grid, f"_inverse_{surface_label}_idx") + ) integrate = surface_integrals_map(grid, surface_label, expand_out=False, tol=tol) def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): @@ -1181,7 +1173,9 @@ def surface_integrals_transform(grid, surface_label="rho"): # transform into the computational domain, so the second dimension that # discretizes f over the codomain will typically have size grid.num_nodes # to broadcast with quantities in data_index. - assert hasattr(grid, f"num_{surface_label}") + assert hasattr(grid, f"num_{surface_label}") & hasattr( + grid, f"_inverse_{surface_label}_idx" + ) return surface_integrals_map(grid, surface_label, expand_out=False) @@ -1265,7 +1259,7 @@ def surface_variance( By default, the returned array has the same shape as the input. """ - _, _, spacing, _ = _get_grid_surface(grid, surface_label) + _, _, spacing, _, has_idx = _get_grid_surface(grid, surface_label) integrate = surface_integrals_map(grid, surface_label, expand_out=False, tol=tol) v1 = integrate(weights) @@ -1278,10 +1272,10 @@ def surface_variance( q = jnp.atleast_1d(q) # compute variance in two passes to avoid catastrophic round off error mean = (integrate((weights * q.T).T).T / v1).T - if hasattr(grid, f"num_{surface_label}"): + if has_idx: mean = grid.expand(mean, surface_label) variance = (correction * integrate((weights * ((q - mean) ** 2).T).T).T / v1).T - if hasattr(grid, f"num_{surface_label}") and expand_out: + if has_idx & expand_out: return grid.expand(variance, surface_label) else: return variance @@ -1330,7 +1324,9 @@ def surface_min(grid, x, surface_label="rho"): The returned array has the same shape as the input. """ - unique_size, inverse_idx, _, _ = _get_grid_surface(grid, surface_label) + unique_size, inverse_idx, _, _, has_idx = _get_grid_surface(grid, surface_label) + if not has_idx: + raise NotImplementedError("Grid should have unique and inverse idx.") inverse_idx = jnp.asarray(inverse_idx) x = jnp.asarray(x) mins = jnp.full(unique_size, jnp.inf) diff --git a/desc/grid.py b/desc/grid.py index 17c7a2cfd2..41ede50dbd 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -626,6 +626,8 @@ def __init__( self._NFP = NFP self._sym = sym self._endpoint = bool(endpoint) + self._theta_endpoint = False + self._zeta_endpoint = False self._node_pattern = "linear" self._nodes, self._spacing = self._create_nodes( L=L, @@ -861,38 +863,26 @@ def _create_nodes( # noqa: C901 else: dz = np.array([ZETA_ENDPOINT]) - self._endpoint = ( + self._theta_endpoint = ( t.size > 0 - and z.size > 0 - and ( - ( - np.isclose(t[0], 0, atol=1e-12) - and np.isclose(t[-1], THETA_ENDPOINT, atol=1e-12) - ) - or (t.size == 1 and z.size > 1) - ) - and ( - ( - np.isclose(z[0], 0, atol=1e-12) - and np.isclose(z[-1], ZETA_ENDPOINT, atol=1e-12) - ) - or (z.size == 1 and t.size > 1) - ) - ) # if only one theta or one zeta point, can have endpoint=True + and np.isclose(t[0], 0, atol=1e-12) + and np.isclose(t[-1], THETA_ENDPOINT, atol=1e-12) + ) + self._zeta_endpoint = ( + z.size > 0 + and np.isclose(z[0], 0, atol=1e-12) + and np.isclose(z[-1], ZETA_ENDPOINT, atol=1e-12) + ) + # if only one theta or one zeta point, can have endpoint=True # if the other one is a full array + self._endpoint = (self._theta_endpoint or (t.size == 1 and z.size > 1)) and ( + self._zeta_endpoint or (z.size == 1 and t.size > 1) + ) - r, t, z = np.meshgrid(r, t, z, indexing="ij") - r = r.flatten() - t = t.flatten() - z = z.flatten() - - dr, dt, dz = np.meshgrid(dr, dt, dz, indexing="ij") - dr = dr.flatten() - dt = dt.flatten() - dz = dz.flatten() - - nodes = np.stack([r, t, z]).T - spacing = np.stack([dr, dt, dz]).T + r, t, z = map(np.ravel, np.meshgrid(r, t, z, indexing="ij")) + dr, dt, dz = map(np.ravel, np.meshgrid(dr, dt, dz, indexing="ij")) + nodes = np.column_stack([r, t, z]) + spacing = np.column_stack([dr, dt, dz]) return nodes, spacing diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index b8a69c20fa..7ce417424e 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -69,7 +69,17 @@ def surface_integrals(grid, q=np.array([1.0]), surface_label="rho"): Surface integral of the input over each surface in the grid. """ - _, _, spacing, has_endpoint_dupe = _get_grid_surface(grid, surface_label) + _, _, spacing, _, _ = _get_grid_surface(grid, surface_label) + if surface_label == "rho": + has_endpoint_dupe = False + elif surface_label == "theta": + has_endpoint_dupe = (grid.nodes[grid.unique_theta_idx[0], 1] == 0) & ( + grid.nodes[grid.unique_theta_idx[-1], 1] == 2 * np.pi + ) + else: + has_endpoint_dupe = (grid.nodes[grid.unique_zeta_idx[0], 2] == 0) & ( + grid.nodes[grid.unique_zeta_idx[-1], 2] == 2 * np.pi / grid.NFP + ) weights = (spacing.prod(axis=1) * np.nan_to_num(q).T).T surfaces = {} From acfc7fc7fbb78532e0bfc880108b8bee076f478e Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 1 Apr 2024 13:07:21 -0400 Subject: [PATCH 058/241] Modify roll_and_replace method because I think it isn't differentiable --- desc/compute/bounce_integral.py | 49 ++++++++++++++++++++++----------- tests/test_bounce_integral.py | 23 ++++++++++++++-- 2 files changed, 53 insertions(+), 19 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index c449e5b0c3..1fa1b9c05b 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -4,7 +4,7 @@ from interpax import CubicHermiteSpline, interp1d -from desc.backend import complex_sqrt, flatnonzero, jnp, put, take, vmap +from desc.backend import complex_sqrt, cond, flatnonzero, jnp, put, take, vmap from desc.equilibrium.coords import desc_grid_from_field_line_coords @@ -38,7 +38,7 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): Bounce integrals for every pitch along a particular field line. """ - assert pitch.ndim == 1 + assert pitch.ndim == 1 == w.ndim assert X.shape == (pitch.size, (knots.size - 1) * 3, w.size) assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape # Cubic spline the integrand so that we can evaluate it at quadrature points @@ -141,22 +141,38 @@ def take_mask(a, mask, size=None, fill_value=None): @vmap def _last_value(a): - """Return the last non-nan value in a.""" - assert a.ndim == 1 - a = a[::-1] - idx = flatnonzero(~jnp.isnan(a), size=1, fill_value=0) + """Return the last non-nan value in ``a``.""" + a = jnp.ravel(a)[::-1] + idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) return a[idx] @vmap -def _roll_and_replace(a, shift, replacement): - assert a.ndim == 1 - assert shift.size == 1 and shift.dtype == bool - assert replacement.size == 1 - # maybe jax will prefer this to an if statement - replacement = replacement * shift + a[0] * (~shift) - a = put(jnp.roll(a, shift), jnp.array([0]), replacement) - return a +def _roll_and_replace_if_shift(a, shift, replacement): + """If shift is true, roll right and put replacement value at index 0. + + Parameters + ---------- + a : ndarray + Array to roll. + shift : ndarray, shape(1, ) + Whether to roll array. + replacement : ndarray, shape(1, ) + Value to place at index zero. + + Returns + ------- + result : ndarray + The (possibly) rolled array. + + """ + return cond( + shift, + lambda x, r: put(jnp.roll(x, shift=1), jnp.array([0]), r), + lambda x, r: x, + a, + replacement, + ) def polyint(c, k=None): @@ -401,8 +417,9 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # If, in addition, the last intersect satisfies B_z < 0, then we have the # required information to compute a bounce integral between these points. # The below logic handles both tasks. - last_intersect = jnp.squeeze(_last_value(intersect)) - bp1 = _roll_and_replace(bp1, bp1[:, 0] > bp2[:, 0], last_intersect - knots[-1]) + bp1 = _roll_and_replace_if_shift( + bp1, bp1[:, 0] > bp2[:, 0], _last_value(intersect) - knots[-1] + ) # Notice that for the latter, an "approximation" is made that the field line is # periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so that the # distance between these bounce points is well-defined. This may worry the diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 25f74de94a..8f771cd331 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -6,9 +6,10 @@ import pytest from interpax import Akima1DInterpolator -from desc.backend import fori_loop, put, root_scalar +from desc.backend import fori_loop, jnp, put, put_along_axis, root_scalar, vmap from desc.compute.bounce_integral import ( _last_value, + _roll_and_replace_if_shift, bounce_average, bounce_integral, compute_bounce_points, @@ -48,12 +49,28 @@ def test_mask_operation(): np.testing.assert_allclose( actual=taken[i], desired=np.pad(desired, (0, cols - desired.size), constant_values=np.nan), - err_msg="take_mask", + err_msg="take_mask() has bugs.", ) np.testing.assert_allclose( - actual=last[i], desired=desired[-1], err_msg="_last_value" + actual=last[i], desired=desired[-1], err_msg="_last_value() has bugs." ) + shift = np.random.choice([True, False], size=rows) + replacement = last * shift + a[:, 0] * (~shift) + # This might be a better way to perform this computation, without + # the jax.cond, which will get transformed to jax.select under vmap + # which performs both branches of the computation. + # But perhaps computing replacement as above, while fine for jit, + # will make the computation non-differentiable... + desired = put_along_axis( + vmap(jnp.roll)(a, shift), jnp.array([0]), replacement[:, np.newaxis], axis=-1 + ) + np.testing.assert_allclose( + actual=_roll_and_replace_if_shift(a, shift, replacement), + desired=desired, + err_msg="_roll_and_replace_if_shift() has bugs.", + ) + @pytest.mark.unit def test_reshape_convention(): From c9489a63b76a05f4929ca7c277ba428a7a4eb7da Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 1 Apr 2024 13:24:54 -0400 Subject: [PATCH 059/241] Fix test for roll_and_replace function from previous commit --- tests/test_bounce_integral.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 8f771cd331..b816586d80 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -56,17 +56,19 @@ def test_mask_operation(): ) shift = np.random.choice([True, False], size=rows) - replacement = last * shift + a[:, 0] * (~shift) # This might be a better way to perform this computation, without # the jax.cond, which will get transformed to jax.select under vmap # which performs both branches of the computation. # But perhaps computing replacement as above, while fine for jit, - # will make the computation non-differentiable... + # will make the computation non-differentiable. desired = put_along_axis( - vmap(jnp.roll)(a, shift), jnp.array([0]), replacement[:, np.newaxis], axis=-1 + vmap(jnp.roll)(taken, shift), + np.array([0]), + np.expand_dims(last * shift + taken[:, 0] * (~shift), axis=-1), + axis=-1, ) np.testing.assert_allclose( - actual=_roll_and_replace_if_shift(a, shift, replacement), + actual=_roll_and_replace_if_shift(taken, shift, last), desired=desired, err_msg="_roll_and_replace_if_shift() has bugs.", ) From 777b1f8f724f694cdb2df82bbe5a979c08064a60 Mon Sep 17 00:00:00 2001 From: Rahul Date: Tue, 2 Apr 2024 19:12:21 -0400 Subject: [PATCH 060/241] adding bounce-averaged drift test --- tests/test_bounce_integral.py | 130 ++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index b816586d80..f692a6e8dc 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -5,6 +5,7 @@ import numpy as np import pytest from interpax import Akima1DInterpolator +from scipy.special import ellipe, ellipk from desc.backend import fori_loop, jnp, put, put_along_axis, root_scalar, vmap from desc.compute.bounce_integral import ( @@ -19,11 +20,13 @@ polyval, take_mask, ) +from desc.compute.utils import dot from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium from desc.equilibrium.coords import desc_grid_from_field_line_coords from desc.examples import get from desc.geometry import FourierRZToroidalSurface +from desc.grid import Grid from desc.objectives import ( ObjectiveFromUser, ObjectiveFunction, @@ -310,6 +313,133 @@ def beta(grid, data): bp1, bp2 = compute_bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z"]) +@pytest.mark.unit +def test_bounce_averaged_drifts(): + """Test bounce-averaged drift with analytical expressions. + + Calculate bounce-averaged drifts using the bounce-average routine and + compare it with the analytical expression + # Note 1: This test can be merged with the elliptic integral test as + we do calculate elliptic integrals here + # Note 2: Remove tests/test_equilibrium :: test_shifted_circle_geometry + # once all the epsilons and Gammas have been implmented and tested + """ + eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") + + eq_keys = ["iota", "iota_r", "a", "rho", "psi"] + + psi = 0.25 # rho^2 (or normalized psi) + alpha = 0 + + eq_keys = ["iota", "iota_r", "a", "rho", "psi"] + + data_eq = eq.compute(eq_keys) + + iotas = np.interp(np.sqrt(psi), data_eq["rho"], data_eq["iota"]) + shears = np.interp(np.sqrt(psi), data_eq["rho"], data_eq["iota_r"]) + + N = int((2 * eq.M_grid) * 4 + 1) + + zeta = np.linspace(-1.0 * np.pi / iotas, 1.0 * np.pi / iotas, N) + theta_PEST = alpha * np.ones(N, dtype=int) + iotas * zeta + + coords1 = np.zeros((N, 3)) + coords1[:, 0] = np.sqrt(psi) * np.ones(N, dtype=int) + coords1[:, 1] = theta_PEST + coords1[:, 2] = zeta + + # Creating a grid along a field line + c1 = eq.compute_theta_coords(coords1) + grid = Grid(c1, sort=False) + + # The bounce integral operator should be able to take a grid + bi, items = bounce_integral(eq, grid=grid, return_items=True) + + data_keys = [ + "|grad(psi)|^2", + "grad(psi)", + "B", + "iota", + "|B|", + "B^zeta", + "cvdrift0", + "cvdrift", + "gbdrift", + ] + + data = eq.compute(data_keys, grid=grid, override_grid=False) + + psib = data_eq["psi"][-1] + + # signs + sign_psi = psib / np.abs(psib) + sign_iota = iotas / np.abs(iotas) + + # normalizations + Lref = data_eq["a"] + Bref = 2 * np.abs(psib) / Lref**2 + + modB = data["|B|"] + bmag = modB / Bref + + x = Lref * np.sqrt(psi) + s_hat = -x / iotas * shears / Lref + + iota = data["iota"] + gradpar = Lref * data["B^zeta"] / modB + + ## Comparing coefficient calculation here with coefficients from compute/_mtric + cvdrift = -2 * sign_psi * Bref * Lref**2 * np.sqrt(psi) * data["cvdrift"] + gbdrift = -2 * sign_psi * Bref * Lref**2 * np.sqrt(psi) * data["gbdrift"] + + a0_over_R0 = Lref * np.sqrt(psi) + + bmag_an = np.mean(bmag) * (1 - a0_over_R0 * np.cos(theta_PEST)) + np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) + + gradpar_an = 2 * Lref * iota * (1 - a0_over_R0 * np.cos(theta_PEST)) + np.testing.assert_allclose(gradpar, gradpar_an, atol=9e-3, rtol=5e-3) + + dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * modB**2) + alpha_MHD = -dPdrho * 1 / iota**2 * 0.5 + + grad_psi = data["grad(psi)"] + grad_alpha = data["grad(alpha)"] + + gds21 = -sign_iota * np.array(dot(grad_psi, grad_alpha)) * s_hat / Bref + + fudge_factor2 = 0.19 + gbdrift_an = fudge_factor2 * ( + -1 * s_hat + (np.cos(theta_PEST) - 1.0 * gds21 / s_hat * np.sin(theta_PEST)) + ) + + fudge_factor3 = 0.07 + cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 + + # Comparing coefficients with their analytical expressions + np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=5e-3) + np.testing.assert_allclose(cvdrift, cvdrift_an, atol=9e-3, rtol=5e-3) + + # Values of pitch angle for which to evaluate the bounce averages + lambdas = np.linspace(1 / np.min(bmag), 1 / np.max(bmag), 11) + + bavg_drift_an = ( + 0.5 * cvdrift_an * ellipe(lambdas) + + gbdrift_an * ellipk(lambdas) + + dPdrho / bmag**2 * ellipe(lambdas) + ) + + # The quantities are already calculated along a field line + bavg_drift_num = bi( + np.sqrt(1 - lambdas * bmag) * 0.5 * cvdrift + + gbdrift * 1 / np.sqrt(1 - lambdas * bmag) + + dPdrho / bmag**2 * np.sqrt(1 - lambdas * bmag), + lambdas, + ) + + np.testing.assert_allclose(bavg_drift_num, bavg_drift_an, atol=2e-2, rtol=1e-2) + + # TODO: if deemed useful finish details using methods in desc.compute.bounce_integral def _compute_bounce_points_with_root_finding( eq, pitch, rho, alpha, resolution=20, zeta_max=10 * np.pi From b10070e82eb059cf59fbc6621bb4636e16285df9 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 3 Apr 2024 15:50:52 -0400 Subject: [PATCH 061/241] Add back modulo by 2pi for initial guess since zeta field line is same as zeta desc --- desc/equilibrium/coords.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 86010bf6e5..42a28d8004 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -344,7 +344,9 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): == "r" } r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) - coords_sfl = jnp.column_stack([r, a + data_desc["iota"] * z_fl, z_fl]) + coords_sfl = jnp.column_stack( + [r, (a + data_desc["iota"] * z_fl) % (2 * jnp.pi), z_fl] + ) coords_desc = eq.compute_theta_coords(coords_sfl) grid_desc = Grid( nodes=coords_desc, From 74c9c741760cdf26702c4c14b5011fc8d81702ff Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 5 Apr 2024 14:26:03 -0400 Subject: [PATCH 062/241] Undo modding of zeta by 2pi, and ensure WFB integrals are computed --- desc/compute/bounce_integral.py | 41 ++++++++++++++++----------------- desc/equilibrium/coords.py | 5 ++-- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 1fa1b9c05b..3e1a610613 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -400,7 +400,9 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. - is_bp1 = B_z < 0 + # Need to include zero derivative intersects to compute the WFB + # (world's fattest banana) orbit bounce integrals. + is_bp1 = B_z <= 0 is_bp2 = B_z >= 0 # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1) @@ -412,27 +414,24 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # there can be at most one inversion, and if it exists, the inversion must be # at the first pair. To correct the inversion, it suffices to roll forward bp1. # Then the pairs bp1[:, i] and bp2[:, i] for i > 0 form integration boundaries. - # Moreover, if the first intersect satisfies B_z >= 0, that particle may be - # trapped in a well outside this snapshot of the field line. - # If, in addition, the last intersect satisfies B_z < 0, then we have the - # required information to compute a bounce integral between these points. - # The below logic handles both tasks. bp1 = _roll_and_replace_if_shift( bp1, bp1[:, 0] > bp2[:, 0], _last_value(intersect) - knots[-1] ) - # Notice that for the latter, an "approximation" is made that the field line is - # periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so that the - # distance between these bounce points is well-defined. This may worry the - # reader if they recall that it is not desirable to have field lines close - # on themselves. However, for any irrational value for the rotational - # transform, there exists an arbitrarily close rational value (I'm just saying - # the basic result that rational numbers are dense in the real numbers). - # After such a rational amount of transits, the points corresponding to this - # distance along the field line and the start of the field line will be - # physically close. By continuity, the value of |B| at ζ = 0 is then close - # to the value of |B| of at ζ = knots[-1]. In general, continuity implies - # |B|(knots[-1] < ζ < knots[-1] + knots[0]) will approximately equal - # |B|(0 < ζ < knots[0]) as long as ζ = knots[-1] is large enough. + # Moreover, if the first intersect satisfies B_z >= 0, that particle may be + # trapped in a well outside this snapshot of the field line. + # If, in addition, the last intersect satisfies B_z <= 0, then we have the + # required information to compute a bounce integral between these points. + # This single bounce integral is somewhat undefined since the field typically + # does not close on itself, but in some cases it can make sense to include it. + # (To make this integral well-defined, an approximation is made that the field + # line is periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so + # that the distance between these bounce points is well-defined. This is fine + # as long as after a transit the field line begins physically close to where + # it began on the previous transit, for then continuity of |B| implies + # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0])). + # The above rolling logic handles both tasks. + # We don't need to check the conditions for the latter, because if they are + # not satisfied, the quadrature will evaluate √(1 − λ |B|) as nan, as desired. bp1 = bp1.reshape(P, RA, -1) bp2 = bp2.reshape(P, RA, -1) return bp1, bp2 @@ -490,7 +489,7 @@ def bounce_integral( pitch=None, rho=jnp.linspace(1e-12, 1, 10), alpha=None, - zeta=jnp.linspace(0, 10 * jnp.pi, 20), + zeta=jnp.linspace(0, 6 * jnp.pi, 20), quadrature=tanh_sinh_quadrature, **kwargs, ): @@ -655,7 +654,7 @@ def bounce_average( pitch=None, rho=jnp.linspace(1e-12, 1, 10), alpha=None, - zeta=jnp.linspace(0, 10 * jnp.pi, 20), + zeta=jnp.linspace(0, 6 * jnp.pi, 20), quadrature=tanh_sinh_quadrature, **kwargs, ): diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 4689a04a44..290a0971fe 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -383,9 +383,8 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): == "r" } r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) - coords_sfl = jnp.column_stack( - [r, (a + data_desc["iota"] * z_fl) % (2 * jnp.pi), z_fl] - ) + # don't modulo field line zeta by 2pi + coords_sfl = jnp.column_stack([r, a + data_desc["iota"] * z_fl, z_fl]) coords_desc = eq.compute_theta_coords(coords_sfl) grid_desc = Grid( nodes=coords_desc, From acb1f549cdd48bc25b5249e0cc316813b61fbcb9 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 6 Apr 2024 05:32:53 -0400 Subject: [PATCH 063/241] Fix bugs in compute_bounce_points concerning... The polynomial coefficients being for the local power basis expansion. Numerical errors with roots with small imagninary components not being found. Generalize and simplify logic to correct inversion of bounce points to all edge cases. Add unit tests for computing the bounce points. --- desc/backend.py | 2 +- desc/compute/bounce_integral.py | 107 ++++++++++++++++++-------------- tests/test_bounce_integral.py | 41 +++++++++--- 3 files changed, 93 insertions(+), 57 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index db48b2ad4a..29875fcc82 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -137,7 +137,7 @@ def put_along_axis(arr, indices, values, axis): array is treated as if a flattened 1d view had been created of it. """ - if axis != -1: + if not (axis == -1 or axis == arr.ndim - 1): raise NotImplementedError( f"put_along_axis for axis={axis} not implemented yet." ) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3e1a610613..eed1ce980e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -4,7 +4,16 @@ from interpax import CubicHermiteSpline, interp1d -from desc.backend import complex_sqrt, cond, flatnonzero, jnp, put, take, vmap +from desc.backend import ( + complex_sqrt, + cond, + flatnonzero, + jnp, + put, + put_along_axis, + take, + vmap, +) from desc.equilibrium.coords import desc_grid_from_field_line_coords @@ -124,8 +133,9 @@ def take_mask(a, mask, size=None, fill_value=None): Output array. """ + assert a.size == mask.size if size is None: - size = a.size + size = mask.size idx = flatnonzero(mask, size=size, fill_value=mask.size) a_mask = take( a, @@ -148,15 +158,15 @@ def _last_value(a): @vmap -def _roll_and_replace_if_shift(a, shift, replacement): - """If shift is true, roll right and put replacement value at index 0. +def _maybe_roll_and_replace(maybe, a, replacement): + """If maybe is true, roll a right and put replacement value at a[0]. Parameters ---------- + maybe : ndarray, shape(1, ) + Whether to roll array. a : ndarray Array to roll. - shift : ndarray, shape(1, ) - Whether to roll array. replacement : ndarray, shape(1, ) Value to place at index zero. @@ -167,7 +177,7 @@ def _roll_and_replace_if_shift(a, shift, replacement): """ return cond( - shift, + maybe, lambda x, r: put(jnp.roll(x, shift=1), jnp.array([0]), r), lambda x, r: x, a, @@ -277,7 +287,7 @@ def cubic_poly_roots(coef, k=0, a_min=None, a_max=None, sort=False): coef : ndarray First axis should store coefficients of a polynomial. For a polynomial given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. - It is assumed that c₁ is nonzero. + In writing the above polynomial, it is assumed that c₁ is nonzero. k : ndarray Specify to find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. Should broadcast with arrays of shape(*coef.shape[1:]). @@ -317,7 +327,7 @@ def compute_root(xi): def clip_to_nan(root): return jnp.where( - jnp.isreal(root) & (a_min <= root) & (root <= a_max), + jnp.isclose(jnp.imag(root), 0) & (a_min <= root) & (root <= a_max), jnp.real(root), jnp.nan, ) @@ -331,6 +341,7 @@ def clip_to_nan(root): roots = jnp.stack(roots, axis=-1) if sort: roots = jnp.sort(roots, axis=-1) + # TODO: make sure that double roots, triple roots, are filtered into single roots return roots @@ -349,12 +360,12 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. poly_B : ndarray, shape(4, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of |B|. + Polynomial coefficients of the cubic spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. poly_B_z : ndarray, shape(3, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. @@ -366,6 +377,8 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): along a field line. Has shape (P, R * A, (knots.size - 1) * 3). If there were less than (knots.size - 1) * 3 bounce points along a field line, then the last axis is padded with nan. + The pairs bp1[..., i] and bp2[..., i] form integration boundaries + for bounce integrals. """ P = pitch.shape[0] # batch size @@ -378,10 +391,11 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # case where each cubic polynomial intersects 1 / λ thrice. # nan values in ``intersect`` denote a polynomial has less than three intersects. intersect = cubic_poly_roots( + # coefficients are in local power basis expansion coef=poly_B, k=jnp.expand_dims(1 / pitch, axis=-1), - a_min=knots[:-1], - a_max=knots[1:], + a_min=jnp.array([0]), + a_max=jnp.diff(knots), sort=True, ) assert intersect.shape == (P, RA, N, 3) @@ -389,6 +403,8 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * RA, -1) + # Transform from local power basis expansion to real space. + intersect = intersect + knots[:-1, jnp.newaxis] intersect = intersect.reshape(P * RA, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) @@ -396,7 +412,7 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # Rearrange so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z = take_mask(B_z, is_intersect) - assert intersect.shape == B_z.shape == is_intersect.shape == (P * RA, N * 3) + assert intersect.shape == is_intersect.shape == B_z.shape == (P * RA, N * 3) # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. @@ -404,37 +420,34 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # (world's fattest banana) orbit bounce integrals. is_bp1 = B_z <= 0 is_bp2 = B_z >= 0 - # Get ζ values of bounce points from the masks. - bp1 = take_mask(intersect, is_bp1) - bp2 = take_mask(intersect, is_bp2) # For correctness, it is necessary that the first intersect satisfies B_z <= 0. # That is, the pairs bp1[:, i] and bp2[:, i] are the boundaries of an # integral only if bp1[:, i] <= bp2[:, i]. # Now, because B_z[:, i] <= 0 implies B_z[:, i + 1] >= 0 by continuity, # there can be at most one inversion, and if it exists, the inversion must be - # at the first pair. To correct the inversion, it suffices to roll forward bp1. - # Then the pairs bp1[:, i] and bp2[:, i] for i > 0 form integration boundaries. - bp1 = _roll_and_replace_if_shift( - bp1, bp1[:, 0] > bp2[:, 0], _last_value(intersect) - knots[-1] - ) - # Moreover, if the first intersect satisfies B_z >= 0, that particle may be - # trapped in a well outside this snapshot of the field line. - # If, in addition, the last intersect satisfies B_z <= 0, then we have the - # required information to compute a bounce integral between these points. - # This single bounce integral is somewhat undefined since the field typically - # does not close on itself, but in some cases it can make sense to include it. - # (To make this integral well-defined, an approximation is made that the field - # line is periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so - # that the distance between these bounce points is well-defined. This is fine - # as long as after a transit the field line begins physically close to where - # it began on the previous transit, for then continuity of |B| implies - # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0])). - # The above rolling logic handles both tasks. - # We don't need to check the conditions for the latter, because if they are - # not satisfied, the quadrature will evaluate √(1 − λ |B|) as nan, as desired. - bp1 = bp1.reshape(P, RA, -1) - bp2 = bp2.reshape(P, RA, -1) + # at the first pair. To correct the inversion, it suffices to disqualify + # the first intersect as a right bounce point. + edge_case = (B_z[:, 0] == 0) & (B_z[:, 1] < 0) + is_bp2 = put_along_axis(is_bp2, jnp.array([0]), edge_case[:, jnp.newaxis], axis=-1) + # Get ζ values of bounce points from the masks. + bp1 = take_mask(intersect, is_bp1).reshape(P, RA, -1) + bp2 = take_mask(intersect, is_bp2).reshape(P, RA, -1) return bp1, bp2 + # This is no longer implemented at the moment, but can be simply. + # If the first intersect satisfies B_z >= 0, that particle may be + # trapped in a well outside this snapshot of the field line. + # If, in addition, the last intersect satisfies B_z <= 0, then we have the + # required information to compute a bounce integral between these points. + # This single bounce integral is somewhat undefined since the field typically + # does not close on itself, but in some cases it can make sense to include it. + # (To make this integral well-defined, an approximation is made that the field + # line is periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so + # that the distance between these bounce points is well-defined. This is fine + # as long as after a transit the field line begins physically close to where + # it began on the previous transit, for then continuity of |B| implies + # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0])). + # We don't need to check the conditions for the latter, because if they are + # not satisfied, the quadrature will evaluate √(1 − λ |B|) as nan, as desired. def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=False): @@ -452,12 +465,12 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. poly_B : ndarray, shape(4, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of |B|. + Polynomial coefficients of the cubic spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. poly_B_z : ndarray, shape(3, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. @@ -553,13 +566,13 @@ def bounce_integral( data : dict Dictionary of ndarrays of stuff evaluated on ``grid``. poly_B : ndarray, shape(4, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of |B|. + Polynomial coefficients of the cubic spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - First axis should iterate through coefficients of power series, + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power + basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. @@ -721,13 +734,13 @@ def bounce_average( data : dict Dictionary of ndarrays of stuff evaluated on ``grid``. poly_B : ndarray, shape(4, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of |B|. + Polynomial coefficients of the cubic spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ. - First axis should iterate through coefficients of power series, + Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power + basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index f692a6e8dc..e5fba7607a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -4,13 +4,14 @@ import numpy as np import pytest -from interpax import Akima1DInterpolator +from interpax import Akima1DInterpolator, CubicHermiteSpline +from matplotlib import pyplot as plt from scipy.special import ellipe, ellipk from desc.backend import fori_loop, jnp, put, put_along_axis, root_scalar, vmap from desc.compute.bounce_integral import ( _last_value, - _roll_and_replace_if_shift, + _maybe_roll_and_replace, bounce_average, bounce_integral, compute_bounce_points, @@ -58,20 +59,20 @@ def test_mask_operation(): actual=last[i], desired=desired[-1], err_msg="_last_value() has bugs." ) - shift = np.random.choice([True, False], size=rows) + maybe = np.random.choice([True, False], size=rows) # This might be a better way to perform this computation, without # the jax.cond, which will get transformed to jax.select under vmap # which performs both branches of the computation. # But perhaps computing replacement as above, while fine for jit, # will make the computation non-differentiable. desired = put_along_axis( - vmap(jnp.roll)(taken, shift), + vmap(jnp.roll)(taken, maybe), np.array([0]), - np.expand_dims(last * shift + taken[:, 0] * (~shift), axis=-1), + np.expand_dims(last * maybe + taken[:, 0] * (~maybe), axis=-1), axis=-1, ) np.testing.assert_allclose( - actual=_roll_and_replace_if_shift(taken, shift, last), + actual=_maybe_roll_and_replace(maybe, taken, last), desired=desired, err_msg="_roll_and_replace_if_shift() has bugs.", ) @@ -214,6 +215,30 @@ def test_polyval(): np.testing.assert_allclose(primitive, a1d.antiderivative().c) +@pytest.mark.unit +def test_compute_bounce_points(): + """Test that the bounce points are computed correctly.""" + pitch = np.atleast_2d([2]) + start = np.pi / 3 + end = 6 * np.pi + knots = np.linspace(start, end, 5) + B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) + bp1, bp2 = compute_bounce_points( + pitch, knots, B.c[:, np.newaxis], B.derivative().c[:, np.newaxis] + ) + bp1, bp2 = map(np.ravel, (bp1, bp2)) + bp1, bp2 = map(lambda bp: bp[~np.isnan(bp)], (bp1, bp2)) + np.testing.assert_allclose(bp1, np.array([1.04719755, 7.13120418])) + np.testing.assert_allclose(bp2, np.array([5.19226163, 17.57830469])) + # TODO: add all the edge cases I parameterized, and use root finding + # as baseline test instead of hardcoding. + plt.axvline(x=knots, color="red", linestyle="--") + x = np.linspace(start, end, 50) + plt.plot(x, B(x)) + plt.plot(x, np.ones(x.size) / pitch.ravel()) + plt.show() + + @pytest.mark.unit def test_pitch_and_hairy_ball(): """Test different ways of specifying pitch and ensure B does not vanish.""" @@ -322,12 +347,10 @@ def test_bounce_averaged_drifts(): # Note 1: This test can be merged with the elliptic integral test as we do calculate elliptic integrals here # Note 2: Remove tests/test_equilibrium :: test_shifted_circle_geometry - # once all the epsilons and Gammas have been implmented and tested + # once all the epsilons and Gammas have been implemented and tested """ eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - eq_keys = ["iota", "iota_r", "a", "rho", "psi"] - psi = 0.25 # rho^2 (or normalized psi) alpha = 0 From 6ad2e59da051d48b8779073973c4aa11e21b9152 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 7 Apr 2024 03:20:07 -0400 Subject: [PATCH 064/241] Genearlize root finding to other degree polynomial to be able to compute pitch at fatter banana orbits. Continue adding tests --- desc/compute/bounce_integral.py | 275 +++++++++++++++++++------------- tests/test_bounce_integral.py | 176 ++++++++++++++------ 2 files changed, 295 insertions(+), 156 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index eed1ce980e..3ee2fecb7b 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -4,18 +4,12 @@ from interpax import CubicHermiteSpline, interp1d -from desc.backend import ( - complex_sqrt, - cond, - flatnonzero, - jnp, - put, - put_along_axis, - take, - vmap, -) +from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take, vmap +from desc.compute.utils import safediv from desc.equilibrium.coords import desc_grid_from_field_line_coords +roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(n)->(m)") + # vmap to compute a bounce integral for every pitch along every field line. @partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) @@ -26,7 +20,7 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): ---------- pitch : ndarray, shape(pitch.size, ) λ values. - X : ndarray, shape(pitch.size, (knots.size - 1) * 3, w.size) + X : ndarray, shape(pitch.size, (knots.size - 1) * degree, w.size) Quadrature points. w : ndarray, shape(w.size, ) Quadrature weights. @@ -43,14 +37,14 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): Returns ------- - inner_product : ndarray, shape(P, (knots.size - 1) * 3) + inner_product : ndarray, shape(P, (knots.size - 1) * degree) Bounce integrals for every pitch along a particular field line. """ assert pitch.ndim == 1 == w.ndim assert X.shape == (pitch.size, (knots.size - 1) * 3, w.size) assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape - # Cubic spline the integrand so that we can evaluate it at quadrature points + # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. # Spline each function separately so that the singularity near the bounce # points can be captured more accurately than can be by any polynomial. @@ -149,42 +143,6 @@ def take_mask(a, mask, size=None, fill_value=None): return a_mask -@vmap -def _last_value(a): - """Return the last non-nan value in ``a``.""" - a = jnp.ravel(a)[::-1] - idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) - return a[idx] - - -@vmap -def _maybe_roll_and_replace(maybe, a, replacement): - """If maybe is true, roll a right and put replacement value at a[0]. - - Parameters - ---------- - maybe : ndarray, shape(1, ) - Whether to roll array. - a : ndarray - Array to roll. - replacement : ndarray, shape(1, ) - Value to place at index zero. - - Returns - ------- - result : ndarray - The (possibly) rolled array. - - """ - return cond( - maybe, - lambda x, r: put(jnp.roll(x, shift=1), jnp.array([0]), r), - lambda x, r: x, - a, - replacement, - ) - - def polyint(c, k=None): """Coefficients for the primitives of the given set of polynomials. @@ -279,70 +237,168 @@ def polyval(x, c): return val -def cubic_poly_roots(coef, k=0, a_min=None, a_max=None, sort=False): - """Roots of cubic polynomial with given coefficients. +def _complex_to_nan(root, a_min=-jnp.inf, a_max=jnp.inf): + """Set complex-valued roots and real roots outside [a_min, a_max] to nan. Parameters ---------- - coef : ndarray - First axis should store coefficients of a polynomial. For a polynomial - given by c₁ x³ + c₂ x² + c₃ x + c₄, ``coef[i]`` should store cᵢ. - In writing the above polynomial, it is assumed that c₁ is nonzero. - k : ndarray - Specify to find solutions to c₁ x³ + c₂ x² + c₃ x + c₄ = ``k``. - Should broadcast with arrays of shape(*coef.shape[1:]). + root : ndarray + Complex-valued roots. a_min, a_max : ndarray Minimum and maximum value to return roots between. - If specified only real roots are returned. - If None, returns all complex roots. - Should broadcast with arrays of shape(*coef.shape[1:]). - sort : bool - Whether to sort the roots. + Should broadcast with ``root`` array. Returns ------- roots : ndarray - The roots of the cubic polynomial. - The three roots are iterated over the last axis. + The real roots in [a_min, a_max], others transformed to nan. """ - # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula - # The common libraries use root-finding which isn't JIT compilable. - clip = not (a_min is None and a_max is None) if a_min is None: a_min = -jnp.inf if a_max is None: a_max = jnp.inf + return jnp.where( + jnp.isclose(jnp.imag(root), 0) & (a_min <= root) & (root <= a_max), + jnp.real(root), + jnp.nan, + ) + + +def _root_linear(a, b): + sentinel = -1 # 0 is minimum value for valid root in local power basis + return safediv(-b, a, fill=sentinel) + - a, b, c, d = coef - d = d - k +def _root_quadratic(a, b, c): + t = complex_sqrt(b**2 - 4 * a * c) + root = lambda xi: safediv(-b + xi * t, 2 * a) + is_linear = jnp.isclose(a, 0) + r1 = jnp.where(is_linear, _root_linear(b, c), root(-1)) + r2 = jnp.where(is_linear, jnp.nan, root(1)) + return r1, r2 + + +def _root_cubic(a, b, c, d): + # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) - is_zero = jnp.isclose(C, 0) + C_is_zero = jnp.isclose(C, 0) - def compute_root(xi): - t_2 = jnp.where(is_zero, 0, t_0 / (xi * C)) - return -(b + xi * C + t_2) / (3 * a) + def root(xi): + return safediv(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C)), -3 * a) - def clip_to_nan(root): - return jnp.where( - jnp.isclose(jnp.imag(root), 0) & (a_min <= root) & (root <= a_max), - jnp.real(root), - jnp.nan, - ) + xi1 = (-1 + (-3) ** 0.5) / 2 + xi2 = xi1**2 + xi3 = 1 + is_quadratic = jnp.isclose(a, 0) + q1, q2 = _root_quadratic(b, c, d) + r1 = jnp.where(is_quadratic, q1, root(xi1)) + r2 = jnp.where(is_quadratic, q2, root(xi2)) + r3 = jnp.where(is_quadratic, jnp.nan, root(xi3)) + return r1, r2, r3 + + +def poly_roots(coef, k=0, a_min=None, a_max=None, sort=False): + """Roots of polynomial with given real coefficients. + + Parameters + ---------- + coef : ndarray + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + k : ndarray + Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. + Should broadcast with arrays of shape(*coef.shape[1:]). + a_min, a_max : ndarray + Minimum and maximum value to return roots between. + If specified only real roots are returned. + If None, returns all complex roots. + Should broadcast with arrays of shape(*coef.shape[1:]). + sort : bool + Whether to sort the roots. - xi_1 = (-1 + (-3) ** 0.5) / 2 - xi_2 = xi_1**2 - xi_3 = 1 - roots = tuple(map(compute_root, (xi_1, xi_2, xi_3))) - if clip: - roots = tuple(map(clip_to_nan, roots)) - roots = jnp.stack(roots, axis=-1) + Returns + ------- + r : ndarray + The roots of the polynomial, iterated over the last axis. + + """ + # TODO: need to add option to filter double/triple roots into single roots + if 2 <= coef.shape[0] <= 4: + # compute from analytic formula + func = {4: _root_cubic, 3: _root_quadratic, 2: _root_linear}[coef.shape[0]] + r = func(*coef[:-1], coef[-1] - k) + if not (a_min is None and a_max is None): + r = tuple(map(partial(_complex_to_nan, a_min=a_min, a_max=a_max), r)) + r = jnp.stack(r, axis=-1) + else: + # compute from eigenvalues of polynomial companion matrix + d = coef[-1] - k + c = [jnp.broadcast_to(c, d.shape) for c in coef[:-1]] + c.append(d) + coef = jnp.stack(c) + r = roots(coef.reshape(coef.shape[0], -1).T).reshape(*coef.shape[1:], -1) + if not (a_min is None and a_max is None): + if a_min is not None: + a_min = a_min[..., jnp.newaxis] + if a_max is not None: + a_max = a_max[..., jnp.newaxis] + r = _complex_to_nan(r, a_min, a_max) if sort: - roots = jnp.sort(roots, axis=-1) - # TODO: make sure that double roots, triple roots, are filtered into single roots - return roots + r = jnp.sort(r, axis=-1) + return r + + +def pitch_extrema(knots, poly_B, poly_B_z): + """Returns pitch that will capture fat banana orbits. + + These pitch values are 1/|B|(ζ*) where |B|(ζ*) is a local maximum. + The local minimum are returned as well. + + Parameters + ---------- + knots : ndarray, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. + poly_B : ndarray, shape(poly_B.shape[0], R * A, knots.size - 1) + Polynomial coefficients of the spline of |B| in local power basis. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| along field line. + poly_B_z : ndarray, shape(poly_B_z.shape[0], R * A, knots.size - 1) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + First axis should iterate through coefficients of power series, + and the last axis should iterate through the piecewise + polynomials of a particular spline of |B| along field line. + + Returns + ------- + pitch : ndarray, shape((knots.size - 1) * (poly_B_z.shape[0] - 1), R * A) + Returns at most pitch.shape[0] many pitch values for every field line. + If less extrema were found, then the array has nan padded on the right. + You will likely need to reshape the output as follows: + pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size). + + """ + RA = poly_B.shape[1] # rho.size * alpha.size + N = knots.size - 1 # number of piecewise cubic polynomials per field line + assert poly_B.shape[1:] == poly_B_z.shape[1:] + assert poly_B.shape[-1] == N + degree = poly_B_z.shape[0] - 1 + extrema = poly_roots( + coef=poly_B_z, + a_min=jnp.array([0]), + a_max=jnp.diff(knots), + sort=False, # don't need to sort + ) + assert extrema.shape == (RA, N, degree) + B_extrema = polyval(x=extrema, c=poly_B[..., jnp.newaxis]).reshape(RA, -1) + B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) + pitch = 1 / B_extrema.T + assert pitch.shape == (N * degree, RA) + return pitch def compute_bounce_points(pitch, knots, poly_B, poly_B_z): @@ -359,13 +415,13 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): If an additional axis exists on the left, it is the batch axis as usual. knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(4, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of |B| in local power basis. + poly_B : ndarray, shape(poly_B.shape[0], R * A, knots.size - 1) + Polynomial coefficients of the spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power basis. + poly_B_z : ndarray, shape(poly_B_z.shape[0], R * A, knots.size - 1) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. @@ -384,21 +440,22 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): P = pitch.shape[0] # batch size RA = poly_B.shape[1] # rho.size * alpha.size N = knots.size - 1 # number of piecewise cubic polynomials per field line - assert poly_B.shape[-1] == poly_B_z.shape[-1] == N + assert poly_B.shape[1:] == poly_B_z.shape[1:] + assert poly_B.shape[-1] == N + degree = poly_B.shape[0] - 1 # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the # case where each cubic polynomial intersects 1 / λ thrice. # nan values in ``intersect`` denote a polynomial has less than three intersects. - intersect = cubic_poly_roots( - # coefficients are in local power basis expansion + intersect = poly_roots( coef=poly_B, k=jnp.expand_dims(1 / pitch, axis=-1), a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, ) - assert intersect.shape == (P, RA, N, 3) + assert intersect.shape == (P, RA, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. @@ -412,7 +469,7 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # Rearrange so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z = take_mask(B_z, is_intersect) - assert intersect.shape == is_intersect.shape == B_z.shape == (P * RA, N * 3) + assert intersect.shape == is_intersect.shape == B_z.shape == (P * RA, N * degree) # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. @@ -464,13 +521,13 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa If an additional axis exists on the left, it is the batch axis as usual. knots : ndarray, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(4, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of |B| in local power basis. + poly_B : ndarray, shape(poly_B.shape[0], R * A, knots.size - 1) + Polynomial coefficients of the spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, R * A, knots.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power basis. + poly_B_z : ndarray, shape(poly_B_z.shape[0], R * A, knots.size - 1) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. @@ -540,7 +597,7 @@ def bounce_integral( alpha : ndarray Unique field line label coordinates over a constant rho surface. zeta : ndarray - A cubic spline of the integrand is computed at these values of the field + A spline of the integrand is computed at these values of the field line following coordinate, for every field line in the meshgrid formed from rho and alpha specified above. The number of knots specifies the grid resolution as increasing the @@ -566,12 +623,12 @@ def bounce_integral( data : dict Dictionary of ndarrays of stuff evaluated on ``grid``. poly_B : ndarray, shape(4, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of |B| in local power basis. + Polynomial coefficients of the spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. @@ -706,7 +763,7 @@ def bounce_average( alpha : ndarray Unique field line label coordinates over a constant rho surface. zeta : ndarray - A cubic spline of the integrand is computed at these values of the field + A spline of the integrand is computed at these values of the field line following coordinate, for every field line in the meshgrid formed from rho and alpha specified above. The number of knots specifies the grid resolution as increasing the @@ -734,12 +791,12 @@ def bounce_average( data : dict Dictionary of ndarrays of stuff evaluated on ``grid``. poly_B : ndarray, shape(4, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of |B| in local power basis. + Polynomial coefficients of the spline of |B| in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) - Polynomial coefficients of the cubic spline of ∂|B|/∂_ζ in local power + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis should iterate through coefficients of power series, and the last axis should iterate through the piecewise polynomials of a particular spline of |B| along field line. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e5fba7607a..94f18ed008 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -8,14 +8,23 @@ from matplotlib import pyplot as plt from scipy.special import ellipe, ellipk -from desc.backend import fori_loop, jnp, put, put_along_axis, root_scalar, vmap +from desc.backend import ( + cond, + flatnonzero, + fori_loop, + jnp, + put, + put_along_axis, + root_scalar, + vmap, +) from desc.compute.bounce_integral import ( - _last_value, - _maybe_roll_and_replace, + _compute_bp_if_given_pitch, bounce_average, bounce_integral, compute_bounce_points, - cubic_poly_roots, + pitch_extrema, + poly_roots, polyder, polyint, polyval, @@ -38,8 +47,44 @@ from desc.profiles import PowerSeriesProfile +@vmap +def _last_value(a): + """Return the last non-nan value in ``a``.""" + a = jnp.ravel(a)[::-1] + idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) + return a[idx] + + +@vmap +def _maybe_roll_and_replace(maybe, a, replacement): + """If maybe is true, roll a right and put replacement value at a[0]. + + Parameters + ---------- + maybe : ndarray, shape(1, ) + Whether to roll array. + a : ndarray + Array to roll. + replacement : ndarray, shape(1, ) + Value to place at index zero. + + Returns + ------- + result : ndarray + The (possibly) rolled array. + + """ + return cond( + maybe, + lambda x, r: put(jnp.roll(x, shift=1), jnp.array([0]), r), + lambda x, r: x, + a, + replacement, + ) + + @pytest.mark.unit -def test_mask_operation(): +def test_mask_operations(): """Test custom masked array operation.""" rows = 5 cols = 7 @@ -47,6 +92,7 @@ def test_mask_operation(): nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) a.ravel()[nan_idx] = np.nan taken = take_mask(a, ~np.isnan(a)) + assert np.all(np.diff(np.isnan(a), axis=-1) >= 0), "nan not padded on correctly" last = _last_value(taken) for i in range(rows): desired = a[i, ~np.isnan(a[i])] @@ -115,30 +161,31 @@ def test_reshape_convention(): @pytest.mark.unit -def test_cubic_poly_roots(): +def test_poly_roots(): """Test vectorized computation of cubic polynomial exact roots.""" cubic = 4 - poly = np.arange(-24, 24).reshape(cubic, 6, -1) - poly[0] = np.where(poly[0] == 0, np.ones_like(poly[0]), poly[0]) - poly = poly * np.e * np.pi + poly = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) - roots = cubic_poly_roots(poly, constant, sort=True) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - a, b, c, d = poly[:, j, k] - np.testing.assert_allclose( - actual=roots[j, k], - desired=np.sort_complex(np.roots([a, b, c, d - constant[j, k]])), - ) + constant = np.stack([constant, constant]) + actual = poly_roots(poly, constant, sort=True) + + for i in range(constant.shape[0]): + for j in range(poly.shape[1]): + for k in range(poly.shape[2]): + d = poly[-1, j, k] - constant[i, j, k] + np.testing.assert_allclose( + actual=actual[i, j, k], + desired=np.sort(np.roots([*poly[:-1, j, k], d])), + ) @pytest.mark.unit def test_polyint(): """Test vectorized computation of polynomial primitive.""" quintic = 6 - poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.e * np.pi + poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) @@ -156,7 +203,7 @@ def test_polyint(): def test_polyder(): """Test vectorized computation of polynomial derivative.""" quintic = 6 - poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.e * np.pi + poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim derivative = polyder(poly) @@ -171,7 +218,7 @@ def test_polyder(): def test_polyval(): """Test vectorized computation of polynomial evaluation.""" quartic = 5 - c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.e * np.pi + c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(c.shape).size == c.ndim x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) @@ -218,25 +265,59 @@ def test_polyval(): @pytest.mark.unit def test_compute_bounce_points(): """Test that the bounce points are computed correctly.""" - pitch = np.atleast_2d([2]) - start = np.pi / 3 - end = 6 * np.pi - knots = np.linspace(start, end, 5) - B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - bp1, bp2 = compute_bounce_points( - pitch, knots, B.c[:, np.newaxis], B.derivative().c[:, np.newaxis] - ) - bp1, bp2 = map(np.ravel, (bp1, bp2)) - bp1, bp2 = map(lambda bp: bp[~np.isnan(bp)], (bp1, bp2)) - np.testing.assert_allclose(bp1, np.array([1.04719755, 7.13120418])) - np.testing.assert_allclose(bp2, np.array([5.19226163, 17.57830469])) - # TODO: add all the edge cases I parameterized, and use root finding - # as baseline test instead of hardcoding. - plt.axvline(x=knots, color="red", linestyle="--") - x = np.linspace(start, end, 50) - plt.plot(x, B(x)) - plt.plot(x, np.ones(x.size) / pitch.ravel()) - plt.show() + + def filter_nan(bp): + is_nan = np.isnan(bp) + assert np.all(np.diff(is_nan, axis=-1) >= 0), "nan not padded on correctly" + return bp[~is_nan] + + def plot_field_line(B, pitch, start, end): + fig, ax = plt.subplots() + for knot in B.x: + ax.axvline(x=knot, color="red", linestyle="--") + z = np.linspace(start, end, 50) + ax.plot(z, B(z)) + ax.plot(z, np.full(z.size, 1 / pitch)) + plt.show() + + def assert_case_1(plot=False): + # 1/pitch does not intersect extrema + pitch = 2 + start = np.pi / 3 + end = 6 * np.pi + knots = np.linspace(start, end, 5) + B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) + # Can observe correctness of bounce points through this plot. + if plot: + plot_field_line(B, pitch, start, end) + _, bp1, bp2 = _compute_bp_if_given_pitch( + pitch, knots, B.c[:, np.newaxis], B.derivative().c[:, np.newaxis] + ) + bp1, bp2 = map(filter_nan, (bp1, bp2)) + # Hardcode desired because CubicHermiteSpline.solve not yet implemented. + np.testing.assert_allclose(bp1, desired=np.array([1.04719755, 7.13120418])) + np.testing.assert_allclose(bp2, desired=np.array([5.19226163, 17.57830469])) + + def assert_case_2(plot=False): + # 1/pitch intersects extrema + pitch = 1 + start = np.pi / 3 + end = 6 * np.pi + knots = np.linspace(start, end, 5) + B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) + # Can observe correctness of bounce points through this plot. + if plot: + plot_field_line(B, pitch, start, end) + _, bp1, bp2 = _compute_bp_if_given_pitch( + pitch, knots, B.c[:, np.newaxis], B.derivative().c[:, np.newaxis] + ) + bp1, bp2 = map(filter_nan, (bp1, bp2)) + # Hardcode desired because CubicHermiteSpline.solve not yet implemented. + np.testing.assert_allclose(bp1, desired=np.array([1.04719755, 7.13120418])) + np.testing.assert_allclose(bp2, desired=np.array([5.19226163, 17.57830469])) + + # TODO: add all the edge cases I parameterized + assert_case_1() @pytest.mark.unit @@ -245,25 +326,26 @@ def test_pitch_and_hairy_ball(): eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) + zeta = jnp.linspace(0, 6 * jnp.pi, 20) + ba, items = bounce_average(eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True) B = items["data"]["B"] assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - # Same pitch for every field line may give sparse result. - pitch_res = 30 - pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res)[:, np.newaxis, np.newaxis] - result = ba(f, pitch) - assert np.isfinite(result).any() - # specify pitch per field line + pitch_res = 30 B = B.reshape(rho.size * alpha.size, -1) pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res).reshape( - -1, rho.size, alpha.size + pitch_res, rho.size, alpha.size ) result = ba(f, pitch) assert np.isfinite(result).any() + # specify pitch from extrema of |B| + pitch = pitch_extrema(zeta, items["poly_B"], items["poly_B_z"]) + pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) + result = ba(f, pitch) + assert np.isfinite(result).any() # @pytest.mark.unit From 4690b8c32dc3e0d59df035b4c4941c247826f859 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 8 Apr 2024 02:22:38 -0400 Subject: [PATCH 065/241] Ensure multiplicity roots > 1 don't fail algorithm... Add checks. Do all preparation to allow for custom grid. --- desc/compute/bounce_integral.py | 541 +++++++++++++++++--------------- tests/test_bounce_integral.py | 138 ++++---- 2 files changed, 356 insertions(+), 323 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3ee2fecb7b..3bd087b427 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -13,36 +13,36 @@ # vmap to compute a bounce integral for every pitch along every field line. @partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) -def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): +def bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): """Compute a bounce integral for every pitch along a particular field line. Parameters ---------- - pitch : ndarray, shape(pitch.size, ) + pitch : Array, shape(pitch.size, ) λ values. - X : ndarray, shape(pitch.size, (knots.size - 1) * degree, w.size) + X : Array, shape(pitch.size, X.shape[1], w.size) Quadrature points. - w : ndarray, shape(w.size, ) + w : Array, shape(w.size, ) Quadrature weights. - knots : ndarray, shape(knots.size, ) + knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - f : ndarray, shape(knots.size, ) + f : Array, shape(knots.size, ) Function to compute bounce integral of, evaluated at knots. - B_sup_z : ndarray, shape(knots.size, ) + B_sup_z : Array, shape(knots.size, ) Contravariant field-line following toroidal component of magnetic field. - B : ndarray, shape(knots.size, ) + B : Array, shape(knots.size, ) Norm of magnetic field. - B_z_ra : ndarray, shape(knots.size, ) + B_z_ra : Array, shape(knots.size, ) Norm of magnetic field derivative with respect to field-line following label. Returns ------- - inner_product : ndarray, shape(P, (knots.size - 1) * degree) + inner_product : Array, shape(pitch.size, X.shape[1]) Bounce integrals for every pitch along a particular field line. """ assert pitch.ndim == 1 == w.ndim - assert X.shape == (pitch.size, (knots.size - 1) * 3, w.size) + assert X.shape == (pitch.size, X.shape[1], w.size) assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. @@ -60,7 +60,7 @@ def bounce_quadrature(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): return inner_product -def tanh_sinh_quadrature(resolution=7): +def tanh_sinh_quad(resolution=7): """ tanh_sinh quadrature. @@ -86,10 +86,10 @@ def tanh_sinh_quadrature(resolution=7): # Compute boundary of quadrature. # x_max = 1 - eps with some buffer x_max = jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps - tanhinv = lambda x: 1 / 2 * jnp.log((1 + x) / (1 - x)) - sinhinv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) + tanh_inv = lambda x: jnp.log((1 + x) / (1 - x)) / 2 + sinh_inv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) # inverse of tanh-sinh transformation for x_max - t_max = sinhinv(2 / jnp.pi * tanhinv(x_max)) + t_max = sinh_inv(2 / jnp.pi * tanh_inv(x_max)) points = jnp.linspace(-t_max, t_max, resolution) h = 2 * t_max / (resolution - 1) @@ -99,20 +99,20 @@ def tanh_sinh_quadrature(resolution=7): return x, w -@vmap +@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) def take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. Parameters ---------- - a : ndarray + a : Array The source array. - mask : ndarray - Boolean mask to index into ``a``. Should have same size as ``a``. + mask : Array + Boolean mask to index into ``a``. Should have same shape as ``a``. size : int Elements of ``a`` at the first size True indices of ``mask`` will be returned. If there are fewer elements than size indicates, the returned array will be - padded with fill_value. Defaults to ``a.size``. + padded with fill_value. Defaults to ``mask.size``. fill_value : When there are fewer than the indicated number of elements, the remaining elements will be filled with ``fill_value``. @@ -123,42 +123,40 @@ def take_mask(a, mask, size=None, fill_value=None): Returns ------- - a_mask : ndarray, shape(size, ) + a[mask][:size] : Array, shape(size, ) Output array. """ - assert a.size == mask.size - if size is None: - size = mask.size - idx = flatnonzero(mask, size=size, fill_value=mask.size) - a_mask = take( + assert a.shape == mask.shape + idx = flatnonzero( + mask, size=mask.size if size is None else size, fill_value=mask.size + ) + return take( a, idx, - axis=0, mode="fill", fill_value=fill_value, unique_indices=True, indices_are_sorted=True, ) - return a_mask -def polyint(c, k=None): +def poly_int(c, k=None): """Coefficients for the primitives of the given set of polynomials. Parameters ---------- - c : ndarray + c : Array First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, coefficient cᵢ should be stored at ``c[n - i]``. - k : ndarray + k : Array Integration constants. Should broadcast with arrays of shape(*coef.shape[1:]). Returns ------- - poly : ndarray + poly : Array Coefficients of polynomial primitive. That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, where n is ``c.shape[0] - 1``. @@ -171,19 +169,19 @@ def polyint(c, k=None): return poly -def polyder(c): +def poly_der(c): """Coefficients for the derivatives of the given set of polynomials. Parameters ---------- - c : ndarray + c : Array First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, coefficient cᵢ should be stored at ``c[n - i]``. Returns ------- - poly : ndarray + poly : Array Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is ``c.shape[0] - 1``. @@ -193,7 +191,7 @@ def polyder(c): return poly -def polyval(x, c): +def poly_val(x, c): """Evaluate the set of polynomials c at the points x. Note that this function does not perform the same operation as @@ -201,16 +199,16 @@ def polyval(x, c): Parameters ---------- - x : ndarray + x : Array Coordinates at which to evaluate the set of polynomials. - c : ndarray + c : Array First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, coefficient cᵢ should be stored at ``c[n - i]``. Returns ------- - val : ndarray + val : Array Polynomial with given coefficients evaluated at given points. Examples @@ -242,16 +240,16 @@ def _complex_to_nan(root, a_min=-jnp.inf, a_max=jnp.inf): Parameters ---------- - root : ndarray + root : Array Complex-valued roots. - a_min, a_max : ndarray + a_min, a_max : Array, Array Minimum and maximum value to return roots between. Should broadcast with ``root`` array. Returns ------- - roots : ndarray - The real roots in [a_min, a_max], others transformed to nan. + roots : Array + The real roots in [a_min, a_max]; others transformed to nan. """ if a_min is None: @@ -266,79 +264,95 @@ def _complex_to_nan(root, a_min=-jnp.inf, a_max=jnp.inf): def _root_linear(a, b): - sentinel = -1 # 0 is minimum value for valid root in local power basis - return safediv(-b, a, fill=sentinel) + """Return r such that a * r + b = 0.""" + return safediv(-b, a, fill=jnp.where(jnp.isclose(b, 0), 0, jnp.nan)) + +def _root_quadratic(a, b, c, distinct=False): + """Return r such that a * r**2 + b * r + c = 0.""" + discriminant = b**2 - 4 * a * c + C = complex_sqrt(discriminant) + + def root(xi): + return safediv(-b + xi * C, 2 * a) -def _root_quadratic(a, b, c): - t = complex_sqrt(b**2 - 4 * a * c) - root = lambda xi: safediv(-b + xi * t, 2 * a) is_linear = jnp.isclose(a, 0) + suppress_root = distinct & jnp.isclose(discriminant, 0) r1 = jnp.where(is_linear, _root_linear(b, c), root(-1)) - r2 = jnp.where(is_linear, jnp.nan, root(1)) + r2 = jnp.where(is_linear | suppress_root, jnp.nan, root(1)) return r1, r2 -def _root_cubic(a, b, c, d): +def _root_cubic(a, b, c, d, distinct=False): + """Return r such that a * r**3 + b * r**2 + c * r + d = 0.""" # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d - C = ((t_1 + complex_sqrt(t_1**2 - 4 * t_0**3)) / 2) ** (1 / 3) - C_is_zero = jnp.isclose(C, 0) + discriminant = t_1**2 - 4 * t_0**3 + C = ((t_1 + complex_sqrt(discriminant)) / 2) ** (1 / 3) + C_is_zero = jnp.isclose(t_0, 0) & jnp.isclose(t_1, 0) def root(xi): return safediv(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C)), -3 * a) + xi0 = 1 xi1 = (-1 + (-3) ** 0.5) / 2 xi2 = xi1**2 - xi3 = 1 is_quadratic = jnp.isclose(a, 0) - q1, q2 = _root_quadratic(b, c, d) - r1 = jnp.where(is_quadratic, q1, root(xi1)) - r2 = jnp.where(is_quadratic, q2, root(xi2)) - r3 = jnp.where(is_quadratic, jnp.nan, root(xi3)) + # C = 0 is equivalent to existence of triple root. + # Assuming the coefficients are real, it is also equivalent to + # existence of any real roots with multiplicity > 1. + suppress_root = distinct & C_is_zero + q1, q2 = _root_quadratic(b, c, d, distinct) + r1 = jnp.where(is_quadratic, q1, root(xi0)) + r2 = jnp.where(is_quadratic, q2, jnp.where(suppress_root, jnp.nan, root(xi1))) + r3 = jnp.where(is_quadratic | suppress_root, jnp.nan, root(xi2)) return r1, r2, r3 -def poly_roots(coef, k=0, a_min=None, a_max=None, sort=False): - """Roots of polynomial with given real coefficients. +def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): + """Roots of polynomial with given coefficients. Parameters ---------- - coef : ndarray + coef : Array First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, coefficient cᵢ should be stored at ``c[n - i]``. - k : ndarray + k : Array Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of shape(*coef.shape[1:]). - a_min, a_max : ndarray + a_min, a_max : Array, Array Minimum and maximum value to return roots between. If specified only real roots are returned. If None, returns all complex roots. Should broadcast with arrays of shape(*coef.shape[1:]). sort : bool Whether to sort the roots. + distinct : bool + Whether to only return the distinct roots. If true, when the + multiplicity is greater than one, the repeated roots are set to nan. Returns ------- - r : ndarray + r : Array, shape(..., coef.shape[1:], coef.shape[0] - 1) The roots of the polynomial, iterated over the last axis. """ - # TODO: need to add option to filter double/triple roots into single roots - if 2 <= coef.shape[0] <= 4: + func = {3: _root_quadratic, 4: _root_cubic} + if coef.shape[0] in func: # compute from analytic formula - func = {4: _root_cubic, 3: _root_quadratic, 2: _root_linear}[coef.shape[0]] - r = func(*coef[:-1], coef[-1] - k) + r = func[coef.shape[0]](*coef[:-1], coef[-1] - k, distinct) if not (a_min is None and a_max is None): r = tuple(map(partial(_complex_to_nan, a_min=a_min, a_max=a_max), r)) r = jnp.stack(r, axis=-1) + if sort: + r = jnp.sort(r, axis=-1) else: # compute from eigenvalues of polynomial companion matrix - d = coef[-1] - k - c = [jnp.broadcast_to(c, d.shape) for c in coef[:-1]] - c.append(d) + c_n = coef[-1] - k + c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in coef[:-1]] + c.append(c_n) coef = jnp.stack(c) r = roots(coef.reshape(coef.shape[0], -1).T).reshape(*coef.shape[1:], -1) if not (a_min is None and a_max is None): @@ -347,129 +361,158 @@ def poly_roots(coef, k=0, a_min=None, a_max=None, sort=False): if a_max is not None: a_max = a_max[..., jnp.newaxis] r = _complex_to_nan(r, a_min, a_max) - if sort: - r = jnp.sort(r, axis=-1) + if sort or distinct: + r = jnp.sort(r, axis=-1) + if distinct: + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0) + r = jnp.where(mask, jnp.nan, r) return r -def pitch_extrema(knots, poly_B, poly_B_z): - """Returns pitch that will capture fat banana orbits. +def pitch_of_extrema(knots, poly_B, poly_B_z): + """Return pitch values that will capture fat banana orbits. These pitch values are 1/|B|(ζ*) where |B|(ζ*) is a local maximum. The local minimum are returned as well. Parameters ---------- - knots : ndarray, shape(knots.size, ) + knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(poly_B.shape[0], R * A, knots.size - 1) + poly_B : Array, shape(poly_B.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(poly_B_z.shape[0], R * A, knots.size - 1) + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + poly_B_z : Array, shape(poly_B.shape[0] - 1, *poly_B.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. Returns ------- - pitch : ndarray, shape((knots.size - 1) * (poly_B_z.shape[0] - 1), R * A) - Returns at most pitch.shape[0] many pitch values for every field line. - If less extrema were found, then the array has nan padded on the right. - You will likely need to reshape the output as follows: - pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size). + pitch : Array, shape(N * (degree - 1), S) + For the shaping notation, the ``degree`` of the spline of |B| matches + ``poly_B.shape[0] - 1``, the number of polynomials per spline + ``N`` matches ``knots.size - 1``, and the number of field lines is + denoted by ``S``. + + If there were less than ``N * (degree - 1)`` extrema detected along a + field line, then the first axis, which enumerates the pitch values for + a particular field line, is padded with nan. """ - RA = poly_B.shape[1] # rho.size * alpha.size - N = knots.size - 1 # number of piecewise cubic polynomials per field line - assert poly_B.shape[1:] == poly_B_z.shape[1:] - assert poly_B.shape[-1] == N - degree = poly_B_z.shape[0] - 1 - extrema = poly_roots( + S = poly_B.shape[1] + N = knots.size - 1 + degree = poly_B.shape[0] - 1 + assert degree == poly_B_z.shape[0] and poly_B.shape[1:] == poly_B_z.shape[1:] + assert N == poly_B.shape[-1], "Last axis fails to enumerate spline polynomials." + + extrema = poly_root( coef=poly_B_z, a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=False, # don't need to sort + distinct=True, ) - assert extrema.shape == (RA, N, degree) - B_extrema = polyval(x=extrema, c=poly_B[..., jnp.newaxis]).reshape(RA, -1) + # Can detect at most degree of |B|_z spline extrema between each knot. + assert extrema.shape == (S, N, degree - 1) + # Reshape so that last axis enumerates (unsorted) extrema along a field line. + B_extrema = poly_val(x=extrema, c=poly_B[..., jnp.newaxis]).reshape(S, -1) + # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T - assert pitch.shape == (N * degree, RA) + assert pitch.shape == (N * (degree - 1), S) return pitch -def compute_bounce_points(pitch, knots, poly_B, poly_B_z): - """Compute the bounce points given |B| and pitch λ. +def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): + """Compute the bounce points given spline of |B| and pitch λ. Parameters ---------- - pitch : ndarray, shape(P, R * A) + pitch : Array, shape(P, S) λ values. - Last two axes should specify the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. - knots : ndarray, shape(knots.size, ) + Last axis enumerates the λ value for a particular field line + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. + knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(poly_B.shape[0], R * A, knots.size - 1) + poly_B : Array, shape(poly_B.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(poly_B_z.shape[0], R * A, knots.size - 1) + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + poly_B_z : Array, shape(poly_B.shape[0] - 1, *poly_B.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + check : bool + Flag for debugging. Returns ------- - bp1, bp2 : ndarray, ndarray - Field line-following ζ coordinates of bounce points for a given pitch - along a field line. Has shape (P, R * A, (knots.size - 1) * 3). - If there were less than (knots.size - 1) * 3 bounce points along a - field line, then the last axis is padded with nan. - The pairs bp1[..., i] and bp2[..., i] form integration boundaries - for bounce integrals. + bp1, bp2 : Array, Array, shape(P, S, N * degree) + For the shaping notation, the ``degree`` of the spline of |B| matches + ``poly_B.shape[0] - 1``, the number of polynomials per spline + ``N`` matches ``knots.size - 1``, and the number of field lines is + denoted by ``S``. + + The returned arrays are the field line-following ζ coordinates of bounce + points for a given pitch along a field line. The pairs bp1[..., i] and + bp2[..., i] form left and right integration boundaries, respectively, + for the bounce integrals. If there were less than ``N * degree`` bounce + points detected along a field line, then the last axis, which enumerates + the bounce points for a particular field line, is padded with nan. """ - P = pitch.shape[0] # batch size - RA = poly_B.shape[1] # rho.size * alpha.size - N = knots.size - 1 # number of piecewise cubic polynomials per field line - assert poly_B.shape[1:] == poly_B_z.shape[1:] - assert poly_B.shape[-1] == N + pitch = jnp.atleast_2d(pitch) + err_msg = "Supplied invalid shape for pitch angles." + assert pitch.ndim == 2, err_msg + assert pitch.shape[-1] == 1 or pitch.shape[-1] == poly_B.shape[1], err_msg + P = pitch.shape[0] + S = poly_B.shape[1] + N = knots.size - 1 degree = poly_B.shape[0] - 1 + assert degree == poly_B_z.shape[0] and poly_B.shape[1:] == poly_B_z.shape[1:] + assert N == poly_B.shape[-1], "Last axis fails to enumerate spline polynomials." # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the - # case where each cubic polynomial intersects 1 / λ thrice. - # nan values in ``intersect`` denote a polynomial has less than three intersects. - intersect = poly_roots( + # case where each cubic polynomial intersects 1 / λ degree times. + # nan values in ``intersect`` denote a polynomial has less than degree intersects. + intersect = poly_root( coef=poly_B, + # Expand to use same pitches across polynomials of a particular spline. k=jnp.expand_dims(1 / pitch, axis=-1), a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, + distinct=True, ) - assert intersect.shape == (P, RA, N, degree) + assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vmap over them. - B_z = polyval(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * RA, -1) + B_z = poly_val(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * S, -1) # Transform from local power basis expansion to real space. intersect = intersect + knots[:-1, jnp.newaxis] - intersect = intersect.reshape(P * RA, -1) + intersect = intersect.reshape(P * S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) # Rearrange so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z = take_mask(B_z, is_intersect) - assert intersect.shape == is_intersect.shape == B_z.shape == (P * RA, N * degree) + assert intersect.shape == B_z.shape == (P * S, N * degree) # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. @@ -487,10 +530,12 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): edge_case = (B_z[:, 0] == 0) & (B_z[:, 1] < 0) is_bp2 = put_along_axis(is_bp2, jnp.array([0]), edge_case[:, jnp.newaxis], axis=-1) # Get ζ values of bounce points from the masks. - bp1 = take_mask(intersect, is_bp1).reshape(P, RA, -1) - bp2 = take_mask(intersect, is_bp2).reshape(P, RA, -1) + bp1 = take_mask(intersect, is_bp1).reshape(P, S, -1) + bp2 = take_mask(intersect, is_bp2).reshape(P, S, -1) + if check: + assert jnp.all((bp2 >= bp1) | jnp.isnan(bp1) | jnp.isnan(bp2)) return bp1, bp2 - # This is no longer implemented at the moment, but can be simply. + # This is no longer implemented at the moment. # If the first intersect satisfies B_z >= 0, that particle may be # trapped in a well outside this snapshot of the field line. # If, in addition, the last intersect satisfies B_z <= 0, then we have the @@ -504,33 +549,16 @@ def compute_bounce_points(pitch, knots, poly_B, poly_B_z): # it began on the previous transit, for then continuity of |B| implies # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0])). # We don't need to check the conditions for the latter, because if they are - # not satisfied, the quadrature will evaluate √(1 − λ |B|) as nan, as desired. + # not satisfied, the quadrature will evaluate √(1 − λ |B|) as nan. -def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=False): - """Return the ingredients needed by the ``bounce_integral`` function. +def _compute_bp_if_given_pitch( + pitch, knots, poly_B, poly_B_z, *original, err=False, check=False +): + """Conditionally return the ingredients needed to compute bounce integrals. Parameters ---------- - pitch : ndarray, shape(P, R, A) - λ values. - Last two axes should specify the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. - knots : ndarray, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - poly_B : ndarray, shape(poly_B.shape[0], R * A, knots.size - 1) - Polynomial coefficients of the spline of |B| in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(poly_B_z.shape[0], R * A, knots.size - 1) - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. original : tuple Whatever this method returned earlier. err : bool @@ -542,16 +570,8 @@ def _compute_bp_if_given_pitch(pitch, knots, poly_B, poly_B_z, *original, err=Fa raise ValueError("No pitch values were given.") return original else: - # ensure pitch has shape (batch size, rho.size, alpha.size) pitch = jnp.atleast_2d(pitch) - if pitch.ndim == 2: - # Can't use atleast_3d; see https://github.com/numpy/numpy/issues/25805. - pitch = pitch[jnp.newaxis] - err_msg = "Supplied invalid shape for pitch angles." - assert pitch.ndim == 3, err_msg - pitch = pitch.reshape(pitch.shape[0], -1) - assert pitch.shape[-1] == 1 or pitch.shape[-1] == poly_B.shape[1], err_msg - return pitch, *compute_bounce_points(pitch, knots, poly_B, poly_B_z) + return pitch, *bounce_points(pitch, knots, poly_B, poly_B_z, check) def bounce_integral( @@ -560,7 +580,7 @@ def bounce_integral( rho=jnp.linspace(1e-12, 1, 10), alpha=None, zeta=jnp.linspace(0, 6 * jnp.pi, 20), - quadrature=tanh_sinh_quadrature, + quadrature=tanh_sinh_quad, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. @@ -583,20 +603,20 @@ def bounce_integral( Parameters ---------- eq : Equilibrium - Equilibrium on which the bounce integral is defined. - pitch : ndarray + Equilibrium on which the bounce integral is computed. + pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. May be specified later. - Last two axes should specify the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. - rho : ndarray + Last axis enumerates the λ value for a particular field line parameterized + by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. + rho : Array Unique flux surface label coordinates. - alpha : ndarray + alpha : Array Unique field line label coordinates over a constant rho surface. - zeta : ndarray + zeta : Array A spline of the integrand is computed at these values of the field line following coordinate, for every field line in the meshgrid formed from rho and alpha specified above. @@ -608,7 +628,7 @@ def bounce_integral( Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. kwargs : dict - Can specify arguments to the quadrature function with kwargs if convenient. + Can specify additional arguments to the quadrature function with kwargs. Can also specify whether to return items with ``return_items=True``. Returns @@ -621,17 +641,19 @@ def bounce_integral( grid : Grid DESC coordinate grid for the given field line coordinates. data : dict - Dictionary of ndarrays of stuff evaluated on ``grid``. - poly_B : ndarray, shape(4, R * A, zeta.size - 1) + Dictionary of Arrays of stuff evaluated on ``grid``. + poly_B : Array, shape(4, S, zeta.size - 1) Polynomial coefficients of the spline of |B| in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power - basis. First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + poly_B_z : Array, shape(3, S, zeta.size - 1) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. Examples -------- @@ -643,10 +665,9 @@ def bounce_integral( name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] B = items["data"]["B"].reshape(rho.size * alpha.size, -1) - pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( - -1, rho.size, alpha.size - ) - result = bi(f, pitch) + pitch_res = 30 + pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res) + result = bi(f, pitch).reshape(pitch_res, rho.size, alpha.size, -1) """ if alpha is None: @@ -654,63 +675,61 @@ def bounce_integral( rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) zeta = jnp.atleast_1d(zeta) - R = rho.size - A = alpha.size + S = rho.size * alpha.size grid, data = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) - B_sup_z = data["B^zeta"].reshape(R * A, -1) - B = data["|B|"].reshape(R * A, -1) - B_z_ra = data["|B|_z|r,a"].reshape(R * A, -1) + B_sup_z = data["B^zeta"].reshape(S, -1) + B = data["|B|"].reshape(S, -1) + B_z_ra = data["|B|_z|r,a"].reshape(S, -1) poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=False).c poly_B = jnp.moveaxis(poly_B, 1, -1) - poly_B_z = polyder(poly_B) - assert poly_B.shape == (4, R * A, zeta.size - 1) - assert poly_B_z.shape == (3, R * A, zeta.size - 1) + poly_B_z = poly_der(poly_B) + assert poly_B.shape == (4, S, zeta.size - 1) + assert poly_B_z.shape == (3, S, zeta.size - 1) + check = kwargs.pop("check", False) return_items = kwargs.pop("return_items", False) x, w = quadrature(**kwargs) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 - original = _compute_bp_if_given_pitch(pitch, zeta, poly_B, poly_B_z, err=False) + original = _compute_bp_if_given_pitch( + pitch, zeta, poly_B, poly_B_z, err=False, check=check + ) def _bounce_integral(f, pitch=None): """Compute the bounce integral of ``f``. Parameters ---------- - f : ndarray + f : Array, shape(items["grid"].num_nodes, ) Quantity to compute the bounce integral of. - pitch : ndarray + pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. - Last two axes should specify the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. + Last axis enumerates the λ value for a particular field line parameterized + by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. Returns ------- - result : ndarray, shape(P, rho.size, alpha.size, (zeta.size - 1) * 3) - The last axis iterates through every bounce integral performed - along that field line padded by nan. + result : Array, shape(P, S, (zeta.size - 1) * 3) + First axis enumerates pitch values. + Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. """ pitch, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, *original, err=True + pitch, zeta, poly_B, poly_B_z, *original, err=True, check=check ) - P = pitch.shape[0] - pitch = jnp.broadcast_to(pitch, shape=(P, R * A)) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = f.reshape(R * A, zeta.size) - result = jnp.reshape( - bounce_quadrature(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) - # complete the change of variable - / (bp2 - bp1) * jnp.pi, - newshape=(P, R, A, -1), + f = f.reshape(S, zeta.size) + pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], S)) + return ( + bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) / (bp2 - bp1) * jnp.pi ) - return result if return_items: items = {"grid": grid, "data": data, "poly_B": poly_B, "poly_B_z": poly_B_z} @@ -725,7 +744,7 @@ def bounce_average( rho=jnp.linspace(1e-12, 1, 10), alpha=None, zeta=jnp.linspace(0, 6 * jnp.pi, 20), - quadrature=tanh_sinh_quadrature, + quadrature=tanh_sinh_quad, **kwargs, ): """Returns a method to compute the bounce average of any quantity. @@ -749,34 +768,32 @@ def bounce_average( Parameters ---------- eq : Equilibrium - Equilibrium on which the bounce average is defined. - pitch : ndarray - λ values to evaluate the bounce average at each field line. + Equilibrium on which the bounce average is computed. + pitch : Array, shape(P, S) + λ values to evaluate the bounce integral at each field line. May be specified later. - Last two axes should specify the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. - rho : ndarray + Last axis enumerates the λ value for a particular field line parameterized + by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. + rho : Array Unique flux surface label coordinates. - alpha : ndarray + alpha : Array Unique field line label coordinates over a constant rho surface. - zeta : ndarray + zeta : Array A spline of the integrand is computed at these values of the field line following coordinate, for every field line in the meshgrid formed from rho and alpha specified above. The number of knots specifies the grid resolution as increasing the number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. - If an integer is given, that many knots are linearly spaced from 0 to 10 pi. quadrature : callable The quadrature scheme used to evaluate the integral. Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. - Can specify arguments to this callable with kwargs if convenient. kwargs : dict - Can specify arguments to the quadrature function with kwargs if convenient. + Can specify additional arguments to the quadrature function with kwargs. Can also specify whether to return items with ``return_items=True``. Returns @@ -789,17 +806,19 @@ def bounce_average( grid : Grid DESC coordinate grid for the given field line coordinates. data : dict - Dictionary of ndarrays of stuff evaluated on ``grid``. - poly_B : ndarray, shape(4, R * A, zeta.size - 1) + Dictionary of Arrays of stuff evaluated on ``grid``. + poly_B : Array, shape(4, S, zeta.size - 1) Polynomial coefficients of the spline of |B| in local power basis. - First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. - poly_B_z : ndarray, shape(3, R * A, zeta.size - 1) - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power - basis. First axis should iterate through coefficients of power series, - and the last axis should iterate through the piecewise - polynomials of a particular spline of |B| along field line. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + poly_B_z : Array, shape(3, S, zeta.size - 1) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. Examples -------- @@ -811,10 +830,9 @@ def bounce_average( name = "g_zz" f = eq.compute(name, grid=items["grid"], data=items["data"])[name] B = items["data"]["B"].reshape(rho.size * alpha.size, -1) - pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), 30).reshape( - -1, rho.size, alpha.size - ) - result = ba(f, pitch) + pitch_res = 30 + pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res) + result = ba(f, pitch).reshape(pitch_res, rho.size, alpha.size, -1) """ @@ -823,22 +841,23 @@ def _bounce_average(f, pitch=None): Parameters ---------- - f : ndarray + f : Array, shape(items["grid"].num_nodes, ) Quantity to compute the bounce average of. - pitch : ndarray + pitch : Array, shape(P, S) λ values to evaluate the bounce average at each field line. If None, uses the values given to the parent function. - Last two axes should specify the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., ρ, α]`` - where in the latter the labels are interpreted as indices that correspond - to that field line. - If an additional axis exists on the left, it is the batch axis as usual. + Last axis enumerates the λ value for a particular field line parameterized + by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. Returns ------- - result : ndarray, shape(P, rho.size, alpha.size, (zeta.size - 1) * 3) - The last axis iterates through every bounce average performed - along that field line padded by nan. + result : Array, shape(P, S, (zeta.size - 1) * 3) + First axis enumerates pitch values. + Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. """ # Should be fine to fit akima spline to constant function 1 since diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 94f18ed008..07d28fba1a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -16,18 +16,16 @@ put, put_along_axis, root_scalar, - vmap, ) from desc.compute.bounce_integral import ( - _compute_bp_if_given_pitch, bounce_average, bounce_integral, - compute_bounce_points, - pitch_extrema, - poly_roots, - polyder, - polyint, - polyval, + bounce_points, + pitch_of_extrema, + poly_der, + poly_int, + poly_root, + poly_val, take_mask, ) from desc.compute.utils import dot @@ -47,7 +45,8 @@ from desc.profiles import PowerSeriesProfile -@vmap +# TODO: delete if end up not needing +@np.vectorize(signature="(m)->()") def _last_value(a): """Return the last non-nan value in ``a``.""" a = jnp.ravel(a)[::-1] @@ -55,7 +54,7 @@ def _last_value(a): return a[idx] -@vmap +@np.vectorize(signature="(),(m),()->(m)") def _maybe_roll_and_replace(maybe, a, replacement): """If maybe is true, roll a right and put replacement value at a[0]. @@ -83,6 +82,15 @@ def _maybe_roll_and_replace(maybe, a, replacement): ) +def filter_nan(a): + """Filter out nan while making sure they have correct padding.""" + is_nan = np.isnan(a) + assert np.array_equal( + is_nan, np.sort(is_nan, axis=-1) + ), "nan not padded on correctly" + return a[~is_nan] + + @pytest.mark.unit def test_mask_operations(): """Test custom masked array operation.""" @@ -92,18 +100,15 @@ def test_mask_operations(): nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) a.ravel()[nan_idx] = np.nan taken = take_mask(a, ~np.isnan(a)) - assert np.all(np.diff(np.isnan(a), axis=-1) >= 0), "nan not padded on correctly" last = _last_value(taken) for i in range(rows): desired = a[i, ~np.isnan(a[i])] - np.testing.assert_allclose( - actual=taken[i], - desired=np.pad(desired, (0, cols - desired.size), constant_values=np.nan), - err_msg="take_mask() has bugs.", - ) - np.testing.assert_allclose( - actual=last[i], desired=desired[-1], err_msg="_last_value() has bugs." - ) + assert np.array_equal( + taken[i], + np.pad(desired, (0, cols - desired.size), constant_values=np.nan), + equal_nan=True, + ), "take_mask() has bugs." + assert np.array_equal(last[i], desired[-1]), "_last_value() has bugs." maybe = np.random.choice([True, False], size=rows) # This might be a better way to perform this computation, without @@ -111,17 +116,18 @@ def test_mask_operations(): # which performs both branches of the computation. # But perhaps computing replacement as above, while fine for jit, # will make the computation non-differentiable. + roll = np.vectorize(np.roll, signature="(m),()->(m)") desired = put_along_axis( - vmap(jnp.roll)(taken, maybe), + roll(taken, maybe), np.array([0]), np.expand_dims(last * maybe + taken[:, 0] * (~maybe), axis=-1), axis=-1, ) - np.testing.assert_allclose( - actual=_maybe_roll_and_replace(maybe, taken, last), - desired=desired, - err_msg="_roll_and_replace_if_shift() has bugs.", - ) + assert np.array_equal( + _maybe_roll_and_replace(maybe, taken, last), + desired, + equal_nan=True, + ), "_roll_and_replace_if_shift() has bugs." @pytest.mark.unit @@ -152,16 +158,15 @@ def test_reshape_convention(): np.testing.assert_allclose(f[..., i - 1], f[..., i]) err_msg = "The ordering conventions are required for correctness." - src = inspect.getsource(bounce_integral) - assert "R, A" in src and "A, R" not in src, err_msg - assert "A, zeta.size" in src, err_msg + assert "P, S, N" in inspect.getsource(bounce_points), err_msg + assert "S, zeta.size" in inspect.getsource(bounce_integral), err_msg src = inspect.getsource(desc_grid_from_field_line_coords) assert 'indexing="ij"' in src, err_msg assert 'meshgrid(rho, alpha, zeta, indexing="ij")' in src, err_msg @pytest.mark.unit -def test_poly_roots(): +def test_poly_root(): """Test vectorized computation of cubic polynomial exact roots.""" cubic = 4 poly = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi @@ -169,44 +174,51 @@ def test_poly_roots(): assert np.unique(poly.shape).size == poly.ndim constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) constant = np.stack([constant, constant]) - actual = poly_roots(poly, constant, sort=True) + actual = poly_root(poly, constant, sort=True) for i in range(constant.shape[0]): for j in range(poly.shape[1]): for k in range(poly.shape[2]): d = poly[-1, j, k] - constant[i, j, k] - np.testing.assert_allclose( - actual=actual[i, j, k], - desired=np.sort(np.roots([*poly[:-1, j, k], d])), - ) + desired = np.sort(np.roots([*poly[:-1, j, k], d])) + np.testing.assert_allclose(actual[i, j, k], desired) + + poly = np.array( + [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, -1, -8, 12], [1, -6, 11, -6]] + ) + actual = poly_root(poly.T, sort=True, distinct=True) + for j in range(poly.shape[0]): + distinct = actual[j][~np.isnan(actual[j])] + desired = np.unique(np.roots(poly[j])) + np.testing.assert_allclose(distinct, desired, err_msg=str(j)) @pytest.mark.unit -def test_polyint(): +def test_poly_int(): """Test vectorized computation of polynomial primitive.""" quintic = 6 poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) - primitive = polyint(poly, k=constant) + primitive = poly_int(poly, k=constant) for j in range(poly.shape[1]): for k in range(poly.shape[2]): np.testing.assert_allclose( actual=primitive[:, j, k], desired=np.polyint(poly[:, j, k], k=constant[j, k]), ) - assert polyint(poly).shape == primitive.shape, "Failed broadcasting default k." + assert poly_int(poly).shape == primitive.shape, "Failed broadcasting default k." @pytest.mark.unit -def test_polyder(): +def test_poly_der(): """Test vectorized computation of polynomial derivative.""" quintic = 6 poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(poly.shape).size == poly.ndim - derivative = polyder(poly) + derivative = poly_der(poly) for j in range(poly.shape[1]): for k in range(poly.shape[2]): np.testing.assert_allclose( @@ -215,14 +227,14 @@ def test_polyder(): @pytest.mark.unit -def test_polyval(): +def test_poly_val(): """Test vectorized computation of polynomial evaluation.""" quartic = 5 c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(c.shape).size == c.ndim x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) - val = polyval(x=x, c=c) + val = poly_val(x=x, c=c) for index in np.ndindex(c.shape[1:]): idx = (..., *index) np.testing.assert_allclose( @@ -237,7 +249,7 @@ def test_polyval(): assert np.unique(x.shape).size == x.ndim assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 - val = polyval(x=x, c=c) + val = poly_val(x=x, c=c) for index in np.ndindex(c.shape[1:]): idx = (..., *index) np.testing.assert_allclose( @@ -251,11 +263,11 @@ def test_polyval(): y = np.arange(y.prod()).reshape(*y) x = np.arange(y.shape[-1]) a1d = Akima1DInterpolator(x, y, axis=-1) - primitive = polyint(a1d.c) + primitive = poly_int(a1d.c) # choose evaluation points at d just to match choice made in a1d.antiderivative() d = np.diff(x) # evaluate every spline at d - k = polyval(x=d, c=primitive) + k = poly_val(x=d, c=primitive) # don't want to use jax.ndarray.at[].add() in case jax is not installed primitive = np.array(primitive) primitive[-1, 1:] += np.cumsum(k, axis=-1)[:-1] @@ -263,14 +275,9 @@ def test_polyval(): @pytest.mark.unit -def test_compute_bounce_points(): +def test_bounce_points(): """Test that the bounce points are computed correctly.""" - def filter_nan(bp): - is_nan = np.isnan(bp) - assert np.all(np.diff(is_nan, axis=-1) >= 0), "nan not padded on correctly" - return bp[~is_nan] - def plot_field_line(B, pitch, start, end): fig, ax = plt.subplots() for knot in B.x: @@ -290,8 +297,12 @@ def assert_case_1(plot=False): # Can observe correctness of bounce points through this plot. if plot: plot_field_line(B, pitch, start, end) - _, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, knots, B.c[:, np.newaxis], B.derivative().c[:, np.newaxis] + bp1, bp2 = bounce_points( + pitch, + knots, + B.c[:, np.newaxis], + B.derivative().c[:, np.newaxis], + check=True, ) bp1, bp2 = map(filter_nan, (bp1, bp2)) # Hardcode desired because CubicHermiteSpline.solve not yet implemented. @@ -308,8 +319,12 @@ def assert_case_2(plot=False): # Can observe correctness of bounce points through this plot. if plot: plot_field_line(B, pitch, start, end) - _, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, knots, B.c[:, np.newaxis], B.derivative().c[:, np.newaxis] + bp1, bp2 = bounce_points( + pitch, + knots, + B.c[:, np.newaxis], + B.derivative().c[:, np.newaxis], + check=True, ) bp1, bp2 = map(filter_nan, (bp1, bp2)) # Hardcode desired because CubicHermiteSpline.solve not yet implemented. @@ -336,14 +351,11 @@ def test_pitch_and_hairy_ball(): # specify pitch per field line pitch_res = 30 B = B.reshape(rho.size * alpha.size, -1) - pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res).reshape( - pitch_res, rho.size, alpha.size - ) + pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res) result = ba(f, pitch) assert np.isfinite(result).any() # specify pitch from extrema of |B| - pitch = pitch_extrema(zeta, items["poly_B"], items["poly_B_z"]) - pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) + pitch = pitch_of_extrema(zeta, items["poly_B"], items["poly_B_z"]) result = ba(f, pitch) assert np.isfinite(result).any() @@ -407,7 +419,9 @@ def beta(grid, data): rho = np.array([0.5]) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) zeta = np.linspace(0, 10 * np.pi, 20) - bi, items = bounce_integral(eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True) + bi, items = bounce_integral( + eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True, check=True + ) B = items["data"]["B"] pitch_res = 15 pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res) @@ -417,7 +431,7 @@ def beta(grid, data): assert np.isfinite(result).any(), "tanh_sinh quadrature failed." # TODO now compare result to elliptic integral - bp1, bp2 = compute_bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z"]) + bp1, bp2 = bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z"]) @pytest.mark.unit @@ -458,7 +472,7 @@ def test_bounce_averaged_drifts(): grid = Grid(c1, sort=False) # The bounce integral operator should be able to take a grid - bi, items = bounce_integral(eq, grid=grid, return_items=True) + bi, items = bounce_integral(eq, grid=grid, return_items=True, check=True) data_keys = [ "|grad(psi)|^2", From 1fd4c6a76212f27464f148e259bf13e83bbf79b8 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 8 Apr 2024 14:39:45 -0400 Subject: [PATCH 066/241] Clean up code and remove no longer used functions --- desc/compute/bounce_integral.py | 456 ++++++++++++++++---------------- desc/compute/utils.py | 2 +- tests/test_bounce_integral.py | 119 +++------ 3 files changed, 269 insertions(+), 308 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3bd087b427..0463fc6126 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -11,94 +11,6 @@ roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(n)->(m)") -# vmap to compute a bounce integral for every pitch along every field line. -@partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) -def bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): - """Compute a bounce integral for every pitch along a particular field line. - - Parameters - ---------- - pitch : Array, shape(pitch.size, ) - λ values. - X : Array, shape(pitch.size, X.shape[1], w.size) - Quadrature points. - w : Array, shape(w.size, ) - Quadrature weights. - knots : Array, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - f : Array, shape(knots.size, ) - Function to compute bounce integral of, evaluated at knots. - B_sup_z : Array, shape(knots.size, ) - Contravariant field-line following toroidal component of magnetic field. - B : Array, shape(knots.size, ) - Norm of magnetic field. - B_z_ra : Array, shape(knots.size, ) - Norm of magnetic field derivative with respect to field-line following label. - - Returns - ------- - inner_product : Array, shape(pitch.size, X.shape[1]) - Bounce integrals for every pitch along a particular field line. - - """ - assert pitch.ndim == 1 == w.ndim - assert X.shape == (pitch.size, X.shape[1], w.size) - assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape - # Spline the integrand so that we can evaluate it at quadrature points - # without expensive coordinate mappings and root finding. - # Spline each function separately so that the singularity near the bounce - # points can be captured more accurately than can be by any polynomial. - shape = X.shape - X = X.ravel() - # Use akima spline to suppress oscillation. - f = interp1d(X, knots, f, method="akima").reshape(shape) - B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) - # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. - B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) - pitch = pitch[:, jnp.newaxis, jnp.newaxis] - inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) - return inner_product - - -def tanh_sinh_quad(resolution=7): - """ - tanh_sinh quadrature. - - This function outputs the quadrature points and weights - for a tanh-sinh quadrature. - - ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) - - Parameters - ---------- - resolution: int - Number of quadrature points, preferably odd. - - Returns - ------- - x : numpy array - Quadrature points - w : numpy array - Quadrature weights - - """ - # https://github.com/f0uriest/quadax/blob/main/quadax/utils.py#L166 - # Compute boundary of quadrature. - # x_max = 1 - eps with some buffer - x_max = jnp.array(1.0) - 10 * jnp.finfo(jnp.array(1.0)).eps - tanh_inv = lambda x: jnp.log((1 + x) / (1 - x)) / 2 - sinh_inv = lambda x: jnp.log(x + jnp.sqrt(x**2 + 1)) - # inverse of tanh-sinh transformation for x_max - t_max = sinh_inv(2 / jnp.pi * tanh_inv(x_max)) - - points = jnp.linspace(-t_max, t_max, resolution) - h = 2 * t_max / (resolution - 1) - sinh_points = jnp.sinh(points) - x = jnp.tanh(0.5 * jnp.pi * sinh_points) - w = 0.5 * jnp.pi * h * jnp.cosh(points) / jnp.cosh(0.5 * jnp.pi * sinh_points) ** 2 - return x, w - - @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) def take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. @@ -141,115 +53,22 @@ def take_mask(a, mask, size=None, fill_value=None): ) -def poly_int(c, k=None): - """Coefficients for the primitives of the given set of polynomials. +def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): + """Keep real values inside [a_min, a_max] and set others to nan. Parameters ---------- - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - k : Array - Integration constants. - Should broadcast with arrays of shape(*coef.shape[1:]). - - Returns - ------- - poly : Array - Coefficients of polynomial primitive. - That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, - where n is ``c.shape[0] - 1``. - - """ - if k is None: - k = jnp.broadcast_to(0.0, c.shape[1:]) - poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T - poly = jnp.append(poly, k[jnp.newaxis], axis=0) - return poly - - -def poly_der(c): - """Coefficients for the derivatives of the given set of polynomials. - - Parameters - ---------- - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - - Returns - ------- - poly : Array - Coefficients of polynomial derivative, ignoring the arbitrary constant. - That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, - where n is ``c.shape[0] - 1``. - - """ - poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T - return poly - - -def poly_val(x, c): - """Evaluate the set of polynomials c at the points x. - - Note that this function does not perform the same operation as - ``np.polynomial.polynomial.polyval(x, c)``. - - Parameters - ---------- - x : Array - Coordinates at which to evaluate the set of polynomials. - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - - Returns - ------- - val : Array - Polynomial with given coefficients evaluated at given points. - - Examples - -------- - .. code-block:: python - - val = polyval(x, c) - if val.ndim != max(x.ndim, c.ndim - 1): - raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) - - """ - # Should be fine to do this instead of Horner's method - # because we expect to usually integrate up to quartic polynomials. - X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) - val = jnp.einsum("...i,i...->...", X, c) - assert val.ndim == max(x.ndim, c.ndim - 1) - return val - - -def _complex_to_nan(root, a_min=-jnp.inf, a_max=jnp.inf): - """Set complex-valued roots and real roots outside [a_min, a_max] to nan. - - Parameters - ---------- - root : Array - Complex-valued roots. + a : Array + Complex-valued array. a_min, a_max : Array, Array - Minimum and maximum value to return roots between. - Should broadcast with ``root`` array. + Minimum and maximum value to keep real values between. + Should broadcast with ``a``. Returns ------- roots : Array - The real roots in [a_min, a_max]; others transformed to nan. + The real values of ``a`` in [``a_min``, ``a_max``]; others set to nan. + The returned array preserves the order of ``a``. """ if a_min is None: @@ -257,13 +76,13 @@ def _complex_to_nan(root, a_min=-jnp.inf, a_max=jnp.inf): if a_max is None: a_max = jnp.inf return jnp.where( - jnp.isclose(jnp.imag(root), 0) & (a_min <= root) & (root <= a_max), - jnp.real(root), + jnp.isclose(jnp.imag(a), 0) & (a_min <= a) & (a <= a_max), + jnp.real(a), jnp.nan, ) -def _root_linear(a, b): +def _root_linear(a, b, *args): """Return r such that a * r + b = 0.""" return safediv(-b, a, fill=jnp.where(jnp.isclose(b, 0), 0, jnp.nan)) @@ -274,7 +93,7 @@ def _root_quadratic(a, b, c, distinct=False): C = complex_sqrt(discriminant) def root(xi): - return safediv(-b + xi * C, 2 * a) + return (-b + xi * C) / (2 * a) is_linear = jnp.isclose(a, 0) suppress_root = distinct & jnp.isclose(discriminant, 0) @@ -293,7 +112,7 @@ def _root_cubic(a, b, c, d, distinct=False): C_is_zero = jnp.isclose(t_0, 0) & jnp.isclose(t_1, 0) def root(xi): - return safediv(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C)), -3 * a) + return (b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C))) / (-3 * a) xi0 = 1 xi1 = (-1 + (-3) ** 0.5) / 2 @@ -339,12 +158,13 @@ def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): The roots of the polynomial, iterated over the last axis. """ - func = {3: _root_quadratic, 4: _root_cubic} + keep_only_real = not (a_min is None and a_max is None) + func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} if coef.shape[0] in func: # compute from analytic formula r = func[coef.shape[0]](*coef[:-1], coef[-1] - k, distinct) - if not (a_min is None and a_max is None): - r = tuple(map(partial(_complex_to_nan, a_min=a_min, a_max=a_max), r)) + if keep_only_real: + r = tuple(map(partial(_filter_real, a_min=a_min, a_max=a_max), r)) r = jnp.stack(r, axis=-1) if sort: r = jnp.sort(r, axis=-1) @@ -355,12 +175,12 @@ def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): c.append(c_n) coef = jnp.stack(c) r = roots(coef.reshape(coef.shape[0], -1).T).reshape(*coef.shape[1:], -1) - if not (a_min is None and a_max is None): + if keep_only_real: if a_min is not None: a_min = a_min[..., jnp.newaxis] if a_max is not None: a_max = a_max[..., jnp.newaxis] - r = _complex_to_nan(r, a_min, a_max) + r = _filter_real(r, a_min, a_max) if sort or distinct: r = jnp.sort(r, axis=-1) if distinct: @@ -369,11 +189,105 @@ def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): return r +def poly_int(c, k=None): + """Coefficients for the primitives of the given set of polynomials. + + Parameters + ---------- + c : Array + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + k : Array + Integration constants. + Should broadcast with arrays of shape(*coef.shape[1:]). + + Returns + ------- + poly : Array + Coefficients of polynomial primitive. + That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, + where n is ``c.shape[0] - 1``. + + """ + if k is None: + k = jnp.broadcast_to(0.0, c.shape[1:]) + poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T + poly = jnp.append(poly, k[jnp.newaxis], axis=0) + return poly + + +def poly_der(c): + """Coefficients for the derivatives of the given set of polynomials. + + Parameters + ---------- + c : Array + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + + Returns + ------- + poly : Array + Coefficients of polynomial derivative, ignoring the arbitrary constant. + That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, + where n is ``c.shape[0] - 1``. + + """ + poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T + return poly + + +def poly_val(x, c): + """Evaluate the set of polynomials c at the points x. + + Note that this function does not perform the same operation as + ``np.polynomial.polynomial.polyval(x, c)``. + + Parameters + ---------- + x : Array + Coordinates at which to evaluate the set of polynomials. + c : Array + First axis should store coefficients of a polynomial. + For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, + coefficient cᵢ should be stored at ``c[n - i]``. + + Returns + ------- + val : Array + Polynomial with given coefficients evaluated at given points. + + Examples + -------- + .. code-block:: python + + val = polyval(x, c) + if val.ndim != max(x.ndim, c.ndim - 1): + raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") + for index in np.ndindex(c.shape[1:]): + idx = (..., *index) + np.testing.assert_allclose( + actual=val[idx], + desired=np.poly1d(c[idx])(x[idx]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", + ) + + """ + # Should be fine to do this instead of Horner's method + # because we expect to usually integrate up to quartic polynomials. + X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) + val = jnp.einsum("...i,i...->...", X, c) + assert val.ndim == max(x.ndim, c.ndim - 1) + return val + + def pitch_of_extrema(knots, poly_B, poly_B_z): """Return pitch values that will capture fat banana orbits. - These pitch values are 1/|B|(ζ*) where |B|(ζ*) is a local maximum. - The local minimum are returned as well. + These pitch values are 1/|B|(ζ*) where |B|(ζ*) are local maxima. + The local minima are returned as well. Parameters ---------- @@ -416,6 +330,7 @@ def pitch_of_extrema(knots, poly_B, poly_B_z): a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=False, # don't need to sort + # False will double weight orbits with B_z = B_zz = 0 at bounce points. distinct=True, ) # Can detect at most degree of |B|_z spline extrema between each knot. @@ -487,7 +402,7 @@ def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the - # case where each cubic polynomial intersects 1 / λ degree times. + # case where each polynomial intersects 1 / λ degree times. # nan values in ``intersect`` denote a polynomial has less than degree intersects. intersect = poly_root( coef=poly_B, @@ -496,44 +411,43 @@ def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, - distinct=True, + distinct=True, # Required for correctness of ``edge_case``. ) assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. - # Condense remaining axes to vmap over them. + # Condense remaining axes to vectorize over them. B_z = poly_val(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * S, -1) - # Transform from local power basis expansion to real space. + # Transform out of local power basis expansion. intersect = intersect + knots[:-1, jnp.newaxis] intersect = intersect.reshape(P * S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) - # Rearrange so that all intersects along a field line are contiguous. + # Reorder so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z = take_mask(B_z, is_intersect) assert intersect.shape == B_z.shape == (P * S, N * degree) - # The boolean masks is_bp1 and is_bp2 will encode whether a given entry in - # intersect is a valid starting and ending bounce point, respectively. # Sign of derivative determines whether an intersect is a valid bounce point. # Need to include zero derivative intersects to compute the WFB # (world's fattest banana) orbit bounce integrals. is_bp1 = B_z <= 0 is_bp2 = B_z >= 0 - # For correctness, it is necessary that the first intersect satisfies B_z <= 0. - # That is, the pairs bp1[:, i] and bp2[:, i] are the boundaries of an - # integral only if bp1[:, i] <= bp2[:, i]. - # Now, because B_z[:, i] <= 0 implies B_z[:, i + 1] >= 0 by continuity, - # there can be at most one inversion, and if it exists, the inversion must be - # at the first pair. To correct the inversion, it suffices to disqualify - # the first intersect as a right bounce point. + # The pairs bp1[i, j] and bp2[i, j] are the boundaries of an integral only if + # bp1[i, j] <= bp2[i, j]. For correctness of the algorithm, it is necessary + # that the first intersect satisfies B_z <= 0. Now, because B_z[i, j] <= 0 + # implies B_z[i, j + 1] >= 0 by continuity, there can be at most one + # inversion, and if it exists, the inversion must be at the first pair. To + # correct the inversion, it suffices to disqualify the first intersect as an + # ending bounce point. edge_case = (B_z[:, 0] == 0) & (B_z[:, 1] < 0) - is_bp2 = put_along_axis(is_bp2, jnp.array([0]), edge_case[:, jnp.newaxis], axis=-1) + is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1).reshape(P, S, -1) bp2 = take_mask(intersect, is_bp2).reshape(P, S, -1) if check: - assert jnp.all((bp2 >= bp1) | jnp.isnan(bp1) | jnp.isnan(bp2)) + if not jnp.all((bp2 >= bp1) | jnp.isnan(bp1) | jnp.isnan(bp2)): + raise AssertionError("Bounce points have an inversion.") return bp1, bp2 # This is no longer implemented at the moment. # If the first intersect satisfies B_z >= 0, that particle may be @@ -574,13 +488,96 @@ def _compute_bp_if_given_pitch( return pitch, *bounce_points(pitch, knots, poly_B, poly_B_z, check) +def tanh_sinh_quad(resolution=7): + """Tanh-Sinh quadrature. + + This function outputs the quadrature points xₖ and weights wₖ + for a tanh-sinh quadrature. + + ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) + + Parameters + ---------- + resolution: int + Number of quadrature points, preferably odd. + + Returns + ------- + x : numpy array + Quadrature points + w : numpy array + Quadrature weights + + """ + # boundary of integral + x_max = jnp.array(1.0) + # subtract machine epsilon with buffer for floating point error + x_max = x_max - 10 * jnp.finfo(x_max).eps + # inverse of tanh-sinh transformation + t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) + kh = jnp.linspace(-t_max, t_max, resolution) + h = 2 * t_max / (resolution - 1) + x = jnp.tanh(0.5 * jnp.pi * jnp.sinh(kh)) + w = 0.5 * jnp.pi * h * jnp.cosh(kh) / jnp.cosh(0.5 * jnp.pi * jnp.sinh(kh)) ** 2 + return x, w + + +# Vectorize to compute a bounce integral for every pitch along every field line. +@partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) +def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): + """Compute a bounce integral for every pitch along a particular field line. + + Parameters + ---------- + pitch : Array, shape(pitch.size, ) + λ values. + X : Array, shape(pitch.size, X.shape[1], w.size) + Quadrature points. + w : Array, shape(w.size, ) + Quadrature weights. + knots : Array, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. + f : Array, shape(knots.size, ) + Function to compute bounce integral of, evaluated at knots. + B_sup_z : Array, shape(knots.size, ) + Contravariant field-line following toroidal component of magnetic field. + B : Array, shape(knots.size, ) + Norm of magnetic field. + B_z_ra : Array, shape(knots.size, ) + Norm of magnetic field derivative with respect to field-line following label. + + Returns + ------- + inner_product : Array, shape(pitch.size, X.shape[1]) + Bounce integrals for every pitch along a particular field line. + + """ + assert pitch.ndim == 1 == w.ndim + assert X.shape == (pitch.size, X.shape[1], w.size) + assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape + # Spline the integrand so that we can evaluate it at quadrature points + # without expensive coordinate mappings and root finding. + # Spline each function separately so that the singularity near the bounce + # points can be captured more accurately than can be by any polynomial. + shape = X.shape + X = X.ravel() + # Use akima spline to suppress oscillation. + f = interp1d(X, knots, f, method="akima").reshape(shape) + B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) + # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. + B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) + pitch = pitch[:, jnp.newaxis, jnp.newaxis] + inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) + return inner_product + + def bounce_integral( eq, pitch=None, rho=jnp.linspace(1e-12, 1, 10), alpha=None, zeta=jnp.linspace(0, 6 * jnp.pi, 20), - quadrature=tanh_sinh_quad, + quad=tanh_sinh_quad, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. @@ -623,7 +620,7 @@ def bounce_integral( The number of knots specifies the grid resolution as increasing the number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. - quadrature : callable + quad : callable The quadrature scheme used to evaluate the integral. Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. @@ -670,6 +667,9 @@ def bounce_integral( result = bi(f, pitch).reshape(pitch_res, rho.size, alpha.size, -1) """ + check = kwargs.pop("check", False) + return_items = kwargs.pop("return_items", False) + if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) rho = jnp.atleast_1d(rho) @@ -682,15 +682,13 @@ def bounce_integral( B_sup_z = data["B^zeta"].reshape(S, -1) B = data["|B|"].reshape(S, -1) B_z_ra = data["|B|_z|r,a"].reshape(S, -1) - poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=False).c + poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=check).c poly_B = jnp.moveaxis(poly_B, 1, -1) poly_B_z = poly_der(poly_B) assert poly_B.shape == (4, S, zeta.size - 1) assert poly_B_z.shape == (3, S, zeta.size - 1) - check = kwargs.pop("check", False) - return_items = kwargs.pop("return_items", False) - x, w = quadrature(**kwargs) + x, w = quad(**kwargs) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 original = _compute_bp_if_given_pitch( @@ -725,10 +723,12 @@ def _bounce_integral(f, pitch=None): pitch, zeta, poly_B, poly_B_z, *original, err=True, check=check ) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = f.reshape(S, zeta.size) + f = f.reshape(S, -1) pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], S)) return ( - bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) / (bp2 - bp1) * jnp.pi + _bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) + / (bp2 - bp1) + * jnp.pi ) if return_items: @@ -744,7 +744,7 @@ def bounce_average( rho=jnp.linspace(1e-12, 1, 10), alpha=None, zeta=jnp.linspace(0, 6 * jnp.pi, 20), - quadrature=tanh_sinh_quad, + quad=tanh_sinh_quad, **kwargs, ): """Returns a method to compute the bounce average of any quantity. @@ -788,7 +788,7 @@ def bounce_average( The number of knots specifies the grid resolution as increasing the number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. - quadrature : callable + quad : callable The quadrature scheme used to evaluate the integral. Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. @@ -864,7 +864,7 @@ def _bounce_average(f, pitch=None): # akima suppresses oscillation of the spline. return bi(f, pitch) / bi(jnp.ones_like(f), pitch) - bi = bounce_integral(eq, pitch, rho, alpha, zeta, quadrature, **kwargs) + bi = bounce_integral(eq, pitch, rho, alpha, zeta, quad, **kwargs) if kwargs.get("return_items"): bi, items = bi return _bounce_average, items diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 8b092ea40a..4bb7411a6d 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -881,7 +881,7 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True, tol=1e-14) ) spacing = jnp.prod(spacing, axis=1) - # Todo: Define masks as a sparse matrix once sparse matrices are no longer + # Todo: Define mask as a sparse matrix once sparse matrices are no longer # experimental in jax. if has_idx: # The ith row of masks is True only at the indices which correspond to the diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 07d28fba1a..e6447ee8b9 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -8,15 +8,7 @@ from matplotlib import pyplot as plt from scipy.special import ellipe, ellipk -from desc.backend import ( - cond, - flatnonzero, - fori_loop, - jnp, - put, - put_along_axis, - root_scalar, -) +from desc.backend import flatnonzero, fori_loop, jnp, put, root_scalar from desc.compute.bounce_integral import ( bounce_average, bounce_integral, @@ -45,7 +37,6 @@ from desc.profiles import PowerSeriesProfile -# TODO: delete if end up not needing @np.vectorize(signature="(m)->()") def _last_value(a): """Return the last non-nan value in ``a``.""" @@ -54,40 +45,10 @@ def _last_value(a): return a[idx] -@np.vectorize(signature="(),(m),()->(m)") -def _maybe_roll_and_replace(maybe, a, replacement): - """If maybe is true, roll a right and put replacement value at a[0]. - - Parameters - ---------- - maybe : ndarray, shape(1, ) - Whether to roll array. - a : ndarray - Array to roll. - replacement : ndarray, shape(1, ) - Value to place at index zero. - - Returns - ------- - result : ndarray - The (possibly) rolled array. - - """ - return cond( - maybe, - lambda x, r: put(jnp.roll(x, shift=1), jnp.array([0]), r), - lambda x, r: x, - a, - replacement, - ) - - -def filter_nan(a): +def _filter_not_nan(a): """Filter out nan while making sure they have correct padding.""" is_nan = np.isnan(a) - assert np.array_equal( - is_nan, np.sort(is_nan, axis=-1) - ), "nan not padded on correctly" + assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) return a[~is_nan] @@ -108,26 +69,7 @@ def test_mask_operations(): np.pad(desired, (0, cols - desired.size), constant_values=np.nan), equal_nan=True, ), "take_mask() has bugs." - assert np.array_equal(last[i], desired[-1]), "_last_value() has bugs." - - maybe = np.random.choice([True, False], size=rows) - # This might be a better way to perform this computation, without - # the jax.cond, which will get transformed to jax.select under vmap - # which performs both branches of the computation. - # But perhaps computing replacement as above, while fine for jit, - # will make the computation non-differentiable. - roll = np.vectorize(np.roll, signature="(m),()->(m)") - desired = put_along_axis( - roll(taken, maybe), - np.array([0]), - np.expand_dims(last * maybe + taken[:, 0] * (~maybe), axis=-1), - axis=-1, - ) - assert np.array_equal( - _maybe_roll_and_replace(maybe, taken, last), - desired, - equal_nan=True, - ), "_roll_and_replace_if_shift() has bugs." + assert np.array_equal(last[i], desired[-1]), "flatnonzero() has bugs." @pytest.mark.unit @@ -159,7 +101,9 @@ def test_reshape_convention(): err_msg = "The ordering conventions are required for correctness." assert "P, S, N" in inspect.getsource(bounce_points), err_msg - assert "S, zeta.size" in inspect.getsource(bounce_integral), err_msg + src = inspect.getsource(bounce_integral) + assert "S, zeta.size" in src, err_msg + assert "pitch_res, rho.size, alpha.size" in src, err_msg src = inspect.getsource(desc_grid_from_field_line_coords) assert 'indexing="ij"' in src, err_msg assert 'meshgrid(rho, alpha, zeta, indexing="ij")' in src, err_msg @@ -174,23 +118,40 @@ def test_poly_root(): assert np.unique(poly.shape).size == poly.ndim constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) constant = np.stack([constant, constant]) - actual = poly_root(poly, constant, sort=True) + root = poly_root(poly, constant, sort=True) for i in range(constant.shape[0]): for j in range(poly.shape[1]): for k in range(poly.shape[2]): d = poly[-1, j, k] - constant[i, j, k] - desired = np.sort(np.roots([*poly[:-1, j, k], d])) - np.testing.assert_allclose(actual[i, j, k], desired) + np.testing.assert_allclose( + actual=root[i, j, k], + desired=np.sort(np.roots([*poly[:-1, j, k], d])), + ) poly = np.array( - [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [1, -1, -8, 12], [1, -6, 11, -6]] + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [1, -1, -8, 12], + [1, -6, 11, -6], + [0, -6, 11, -2], + ] ) - actual = poly_root(poly.T, sort=True, distinct=True) + root = poly_root(poly.T, sort=True, distinct=True) for j in range(poly.shape[0]): - distinct = actual[j][~np.isnan(actual[j])] - desired = np.unique(np.roots(poly[j])) - np.testing.assert_allclose(distinct, desired, err_msg=str(j)) + np.testing.assert_allclose( + actual=_filter_not_nan(root[j]), + desired=np.unique(np.roots(poly[j])), + err_msg=str(j), + ) + poly = np.array([0, 1, -1, -8, 12]) + np.testing.assert_allclose( + actual=_filter_not_nan(poly_root(poly, sort=True, distinct=True)), + desired=np.unique(np.roots(poly)), + ) @pytest.mark.unit @@ -304,10 +265,10 @@ def assert_case_1(plot=False): B.derivative().c[:, np.newaxis], check=True, ) - bp1, bp2 = map(filter_nan, (bp1, bp2)) - # Hardcode desired because CubicHermiteSpline.solve not yet implemented. - np.testing.assert_allclose(bp1, desired=np.array([1.04719755, 7.13120418])) - np.testing.assert_allclose(bp2, desired=np.array([5.19226163, 17.57830469])) + bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + # Hardcode because CubicHermiteSpline.solve not yet implemented. + np.testing.assert_allclose(bp1, np.array([1.04719755, 7.13120418])) + np.testing.assert_allclose(bp2, np.array([5.19226163, 17.57830469])) def assert_case_2(plot=False): # 1/pitch intersects extrema @@ -326,10 +287,10 @@ def assert_case_2(plot=False): B.derivative().c[:, np.newaxis], check=True, ) - bp1, bp2 = map(filter_nan, (bp1, bp2)) - # Hardcode desired because CubicHermiteSpline.solve not yet implemented. - np.testing.assert_allclose(bp1, desired=np.array([1.04719755, 7.13120418])) - np.testing.assert_allclose(bp2, desired=np.array([5.19226163, 17.57830469])) + bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + # Hardcode because CubicHermiteSpline.solve not yet implemented. + np.testing.assert_allclose(bp1, np.array([1.04719755, 7.13120418])) + np.testing.assert_allclose(bp2, np.array([5.19226163, 17.57830469])) # TODO: add all the edge cases I parameterized assert_case_1() From de53ac115e29691889264db4f28f7d66892f646a Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 8 Apr 2024 21:22:22 -0400 Subject: [PATCH 067/241] Finish tests for bounce_points() and use special method for constant spline --- desc/compute/bounce_integral.py | 203 +++++++++++++++++--------------- tests/test_bounce_integral.py | 185 +++++++++++++++++++++-------- 2 files changed, 248 insertions(+), 140 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 0463fc6126..afa9bf3073 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -82,7 +82,7 @@ def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): ) -def _root_linear(a, b, *args): +def _root_linear(a, b, distinct=False): """Return r such that a * r + b = 0.""" return safediv(-b, a, fill=jnp.where(jnp.isclose(b, 0), 0, jnp.nan)) @@ -129,23 +129,23 @@ def root(xi): return r1, r2, r3 -def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): +def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): """Roots of polynomial with given coefficients. Parameters ---------- - coef : Array + c : Array First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, coefficient cᵢ should be stored at ``c[n - i]``. k : Array Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. - Should broadcast with arrays of shape(*coef.shape[1:]). + Should broadcast with arrays of shape(*c.shape[1:]). a_min, a_max : Array, Array Minimum and maximum value to return roots between. If specified only real roots are returned. If None, returns all complex roots. - Should broadcast with arrays of shape(*coef.shape[1:]). + Should broadcast with arrays of shape(*c.shape[1:]). sort : bool Whether to sort the roots. distinct : bool @@ -154,15 +154,15 @@ def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): Returns ------- - r : Array, shape(..., coef.shape[1:], coef.shape[0] - 1) + r : Array, shape(..., c.shape[1:], c.shape[0] - 1) The roots of the polynomial, iterated over the last axis. """ keep_only_real = not (a_min is None and a_max is None) func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} - if coef.shape[0] in func: + if c.shape[0] in func: # compute from analytic formula - r = func[coef.shape[0]](*coef[:-1], coef[-1] - k, distinct) + r = func[c.shape[0]](*c[:-1], c[-1] - k, distinct) if keep_only_real: r = tuple(map(partial(_filter_real, a_min=a_min, a_max=a_max), r)) r = jnp.stack(r, axis=-1) @@ -170,11 +170,11 @@ def poly_root(coef, k=0, a_min=None, a_max=None, sort=False, distinct=False): r = jnp.sort(r, axis=-1) else: # compute from eigenvalues of polynomial companion matrix - c_n = coef[-1] - k - c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in coef[:-1]] + c_n = c[-1] - k + c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) - coef = jnp.stack(c) - r = roots(coef.reshape(coef.shape[0], -1).T).reshape(*coef.shape[1:], -1) + c = jnp.stack(c) + r = roots(c.reshape(c.shape[0], -1).T).reshape(*c.shape[1:], -1) if keep_only_real: if a_min is not None: a_min = a_min[..., jnp.newaxis] @@ -283,7 +283,31 @@ def poly_val(x, c): return val -def pitch_of_extrema(knots, poly_B, poly_B_z): +def _check_shape(knots, B, B_z_ra, pitch=None): + """Ensure spline polynomial coefficients and pitch have correct shape.""" + if B.ndim == 2 and B_z_ra.ndim == 2: + # Add axis which enumerates field lines. + B = B[:, jnp.newaxis] + B_z_ra = B_z_ra[:, jnp.newaxis] + err_msg = "Supplied invalid shape for splines." + assert B.ndim == B_z_ra.ndim == 3, err_msg + S = B.shape[1] + N = knots.size - 1 + degree = B.shape[0] - 1 + assert degree == B_z_ra.shape[0] and B.shape[1:] == B_z_ra.shape[1:], err_msg + assert N == B.shape[-1], "Last axis fails to enumerate spline polynomials." + + if pitch is None: + return S, N, degree + pitch = jnp.atleast_2d(pitch) + err_msg = "Supplied invalid shape for pitch angles." + assert pitch.ndim == 2, err_msg + assert pitch.shape[-1] == 1 or pitch.shape[-1] == B.shape[1], err_msg + P = pitch.shape[0] + return P, S, N, degree + + +def pitch_of_extrema(knots, B, B_z_ra): """Return pitch values that will capture fat banana orbits. These pitch values are 1/|B|(ζ*) where |B|(ζ*) are local maxima. @@ -293,13 +317,13 @@ def pitch_of_extrema(knots, poly_B, poly_B_z): ---------- knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : Array, shape(poly_B.shape[0], S, knots.size - 1) + B : Array, shape(B.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - poly_B_z : Array, shape(poly_B.shape[0] - 1, *poly_B.shape[1:]) + B_z_ra : Array, shape(B.shape[0] - 1, *B.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -310,23 +334,17 @@ def pitch_of_extrema(knots, poly_B, poly_B_z): ------- pitch : Array, shape(N * (degree - 1), S) For the shaping notation, the ``degree`` of the spline of |B| matches - ``poly_B.shape[0] - 1``, the number of polynomials per spline - ``N`` matches ``knots.size - 1``, and the number of field lines is - denoted by ``S``. + ``B.shape[0] - 1``, the number of polynomials per spline ``N`` matches + ``knots.size - 1``, and the number of field lines is denoted by ``S``. If there were less than ``N * (degree - 1)`` extrema detected along a field line, then the first axis, which enumerates the pitch values for a particular field line, is padded with nan. """ - S = poly_B.shape[1] - N = knots.size - 1 - degree = poly_B.shape[0] - 1 - assert degree == poly_B_z.shape[0] and poly_B.shape[1:] == poly_B_z.shape[1:] - assert N == poly_B.shape[-1], "Last axis fails to enumerate spline polynomials." - + S, N, degree = _check_shape(knots, B, B_z_ra) extrema = poly_root( - coef=poly_B_z, + c=B_z_ra, a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=False, # don't need to sort @@ -336,7 +354,7 @@ def pitch_of_extrema(knots, poly_B, poly_B_z): # Can detect at most degree of |B|_z spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates (unsorted) extrema along a field line. - B_extrema = poly_val(x=extrema, c=poly_B[..., jnp.newaxis]).reshape(S, -1) + B_extrema = poly_val(x=extrema, c=B[..., jnp.newaxis]).reshape(S, -1) # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T @@ -344,32 +362,32 @@ def pitch_of_extrema(knots, poly_B, poly_B_z): return pitch -def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): +def bounce_points(knots, B, B_z_ra, pitch, check=False): """Compute the bounce points given spline of |B| and pitch λ. Parameters ---------- - pitch : Array, shape(P, S) - λ values. - Last axis enumerates the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - poly_B : Array, shape(poly_B.shape[0], S, knots.size - 1) + B : Array, shape(B.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - poly_B_z : Array, shape(poly_B.shape[0] - 1, *poly_B.shape[1:]) + B_z_ra : Array, shape(B.shape[0] - 1, *B.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. + pitch : Array, shape(P, S) + λ values. + Last axis enumerates the λ value for a particular field line + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. check : bool Flag for debugging. @@ -377,35 +395,24 @@ def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): ------- bp1, bp2 : Array, Array, shape(P, S, N * degree) For the shaping notation, the ``degree`` of the spline of |B| matches - ``poly_B.shape[0] - 1``, the number of polynomials per spline - ``N`` matches ``knots.size - 1``, and the number of field lines is - denoted by ``S``. + ``B.shape[0] - 1``, the number of polynomials per spline ``N`` matches + ``knots.size - 1``, and the number of field lines is denoted by ``S``. The returned arrays are the field line-following ζ coordinates of bounce - points for a given pitch along a field line. The pairs bp1[..., i] and - bp2[..., i] form left and right integration boundaries, respectively, + points for a given pitch along a field line. The pairs bp1[i, j] and + bp2[i, j] form left and right integration boundaries, respectively, for the bounce integrals. If there were less than ``N * degree`` bounce points detected along a field line, then the last axis, which enumerates the bounce points for a particular field line, is padded with nan. """ - pitch = jnp.atleast_2d(pitch) - err_msg = "Supplied invalid shape for pitch angles." - assert pitch.ndim == 2, err_msg - assert pitch.shape[-1] == 1 or pitch.shape[-1] == poly_B.shape[1], err_msg - P = pitch.shape[0] - S = poly_B.shape[1] - N = knots.size - 1 - degree = poly_B.shape[0] - 1 - assert degree == poly_B_z.shape[0] and poly_B.shape[1:] == poly_B_z.shape[1:] - assert N == poly_B.shape[-1], "Last axis fails to enumerate spline polynomials." - + P, S, N, degree = _check_shape(knots, B, B_z_ra, pitch) # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the # case where each polynomial intersects 1 / λ degree times. # nan values in ``intersect`` denote a polynomial has less than degree intersects. intersect = poly_root( - coef=poly_B, + c=B, # Expand to use same pitches across polynomials of a particular spline. k=jnp.expand_dims(1 / pitch, axis=-1), a_min=jnp.array([0]), @@ -413,11 +420,10 @@ def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): sort=True, distinct=True, # Required for correctness of ``edge_case``. ) - assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vectorize over them. - B_z = poly_val(x=intersect, c=poly_B_z[..., jnp.newaxis]).reshape(P * S, -1) + B_z_ra = poly_val(x=intersect, c=B_z_ra[..., jnp.newaxis]).reshape(P * S, -1) # Transform out of local power basis expansion. intersect = intersect + knots[:-1, jnp.newaxis] intersect = intersect.reshape(P * S, -1) @@ -426,48 +432,50 @@ def bounce_points(pitch, knots, poly_B, poly_B_z, check=False): # Reorder so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) - B_z = take_mask(B_z, is_intersect) - assert intersect.shape == B_z.shape == (P * S, N * degree) + B_z_ra = take_mask(B_z_ra, is_intersect) + assert intersect.shape == B_z_ra.shape == (P * S, N * degree) # Sign of derivative determines whether an intersect is a valid bounce point. # Need to include zero derivative intersects to compute the WFB # (world's fattest banana) orbit bounce integrals. - is_bp1 = B_z <= 0 - is_bp2 = B_z >= 0 - # The pairs bp1[i, j] and bp2[i, j] are the boundaries of an integral only if + is_bp1 = B_z_ra <= 0 + is_bp2 = B_z_ra >= 0 + # The pairs bp1[i, j] and bp2[i, j] are boundaries of an integral only if # bp1[i, j] <= bp2[i, j]. For correctness of the algorithm, it is necessary - # that the first intersect satisfies B_z <= 0. Now, because B_z[i, j] <= 0 - # implies B_z[i, j + 1] >= 0 by continuity, there can be at most one - # inversion, and if it exists, the inversion must be at the first pair. To - # correct the inversion, it suffices to disqualify the first intersect as an - # ending bounce point. - edge_case = (B_z[:, 0] == 0) & (B_z[:, 1] < 0) + # that the first intersect satisfies non-positive derivative. Now, because + # B_z_ra[i, j] <= 0 implies B_z_ra[i, j + 1] >= 0 by continuity, there can + # be at most one inversion, and if it exists, the inversion must be at the + # first pair. To correct the inversion, it suffices to disqualify the first + # intersect as an ending bounce point. + edge_case = (B_z_ra[:, 0] == 0) & (B_z_ra[:, 1] < 0) is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1).reshape(P, S, -1) bp2 = take_mask(intersect, is_bp2).reshape(P, S, -1) + if check: if not jnp.all((bp2 >= bp1) | jnp.isnan(bp1) | jnp.isnan(bp2)): raise AssertionError("Bounce points have an inversion.") + return bp1, bp2 # This is no longer implemented at the moment. - # If the first intersect satisfies B_z >= 0, that particle may be - # trapped in a well outside this snapshot of the field line. - # If, in addition, the last intersect satisfies B_z <= 0, then we have the - # required information to compute a bounce integral between these points. + # If the first intersect is at a non-negative derivative, that particle + # may be trapped in a well outside this snapshot of the field line. If, in + # addition, the last intersect is at a non-positive derivative, then we + # have information to compute a bounce integral between these points. # This single bounce integral is somewhat undefined since the field typically # does not close on itself, but in some cases it can make sense to include it. - # (To make this integral well-defined, an approximation is made that the field + # To make this integral well-defined, an approximation is made that the field # line is periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so # that the distance between these bounce points is well-defined. This is fine # as long as after a transit the field line begins physically close to where # it began on the previous transit, for then continuity of |B| implies - # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0])). - # We don't need to check the conditions for the latter, because if they are - # not satisfied, the quadrature will evaluate √(1 − λ |B|) as nan. + # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0]). + # We don't need to check conditions for the latter, because if they are not + # satisfied, the quadrature will evaluate √(1 − λ |B|) as nan automatically. def _compute_bp_if_given_pitch( - pitch, knots, poly_B, poly_B_z, *original, err=False, check=False + knots, B, B_z_ra, pitch, *original, err=False, check=False ): """Conditionally return the ingredients needed to compute bounce integrals. @@ -485,7 +493,7 @@ def _compute_bp_if_given_pitch( return original else: pitch = jnp.atleast_2d(pitch) - return pitch, *bounce_points(pitch, knots, poly_B, poly_B_z, check) + return *bounce_points(knots, B, B_z_ra, pitch, check), pitch def tanh_sinh_quad(resolution=7): @@ -523,8 +531,8 @@ def tanh_sinh_quad(resolution=7): # Vectorize to compute a bounce integral for every pitch along every field line. -@partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0), out_axes=1) -def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): +@partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0, None), out_axes=1) +def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra, f_method): """Compute a bounce integral for every pitch along a particular field line. Parameters @@ -545,6 +553,8 @@ def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): Norm of magnetic field. B_z_ra : Array, shape(knots.size, ) Norm of magnetic field derivative with respect to field-line following label. + f_method : str + Method of interpolation for f. Returns ------- @@ -561,8 +571,11 @@ def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra): # points can be captured more accurately than can be by any polynomial. shape = X.shape X = X.ravel() + if f_method == "constant": + f = f[0] + else: + f = interp1d(X, knots, f, method=f_method).reshape(shape) # Use akima spline to suppress oscillation. - f = interp1d(X, knots, f, method="akima").reshape(shape) B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) @@ -682,8 +695,9 @@ def bounce_integral( B_sup_z = data["B^zeta"].reshape(S, -1) B = data["|B|"].reshape(S, -1) B_z_ra = data["|B|_z|r,a"].reshape(S, -1) - poly_B = CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=check).c - poly_B = jnp.moveaxis(poly_B, 1, -1) + poly_B = jnp.moveaxis( + CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=check).c, 1, -1 + ) poly_B_z = poly_der(poly_B) assert poly_B.shape == (4, S, zeta.size - 1) assert poly_B_z.shape == (3, S, zeta.size - 1) @@ -692,10 +706,10 @@ def bounce_integral( # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 original = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, err=False, check=check + zeta, poly_B, poly_B_z, pitch, err=False, check=check ) - def _bounce_integral(f, pitch=None): + def _bounce_integral(f, pitch=None, f_method="akima"): """Compute the bounce integral of ``f``. Parameters @@ -710,6 +724,8 @@ def _bounce_integral(f, pitch=None): where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. + f_method : str, optional + Method of interpolation for f. Returns ------- @@ -719,17 +735,19 @@ def _bounce_integral(f, pitch=None): Last axis enumerates the bounce integrals. """ - pitch, bp1, bp2 = _compute_bp_if_given_pitch( - pitch, zeta, poly_B, poly_B_z, *original, err=True, check=check + bp1, bp2, pitch = _compute_bp_if_given_pitch( + zeta, poly_B, poly_B_z, pitch, *original, err=True, check=check ) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - f = f.reshape(S, -1) pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], S)) - return ( - _bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra) + f = f.reshape(S, -1) + result = ( + _bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra, f_method) / (bp2 - bp1) * jnp.pi ) + assert result.shape == (pitch.shape[0], S, (zeta.size - 1) * 3) + return result if return_items: items = {"grid": grid, "data": data, "poly_B": poly_B, "poly_B_z": poly_B_z} @@ -836,7 +854,7 @@ def bounce_average( """ - def _bounce_average(f, pitch=None): + def _bounce_average(f, pitch=None, f_method="akima"): """Compute the bounce average of ``f``. Parameters @@ -851,6 +869,9 @@ def _bounce_average(f, pitch=None): where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. + f_method : str, optional + Method of interpolation for f. + Returns ------- @@ -860,9 +881,7 @@ def _bounce_average(f, pitch=None): Last axis enumerates the bounce integrals. """ - # Should be fine to fit akima spline to constant function 1 since - # akima suppresses oscillation of the spline. - return bi(f, pitch) / bi(jnp.ones_like(f), pitch) + return bi(f, pitch, f_method) / bi(jnp.ones_like(f), pitch, "constant") bi = bounce_integral(eq, pitch, rho, alpha, zeta, quad, **kwargs) if kwargs.get("return_items"): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e6447ee8b9..8567a4cd4e 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -4,11 +4,14 @@ import numpy as np import pytest -from interpax import Akima1DInterpolator, CubicHermiteSpline +from interpax import Akima1DInterpolator from matplotlib import pyplot as plt + +# TODO: can use the one from interpax once .solve() is implemented +from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipe, ellipk -from desc.backend import flatnonzero, fori_loop, jnp, put, root_scalar +from desc.backend import flatnonzero, fori_loop, put, root_scalar from desc.compute.bounce_integral import ( bounce_average, bounce_integral, @@ -40,13 +43,13 @@ @np.vectorize(signature="(m)->()") def _last_value(a): """Return the last non-nan value in ``a``.""" - a = jnp.ravel(a)[::-1] - idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) + a = np.ravel(a)[::-1] + idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) return a[idx] def _filter_not_nan(a): - """Filter out nan while making sure they have correct padding.""" + """Filter out nan from ``a`` while asserting nan is padded at right.""" is_nan = np.isnan(a) assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) return a[~is_nan] @@ -68,8 +71,8 @@ def test_mask_operations(): taken[i], np.pad(desired, (0, cols - desired.size), constant_values=np.nan), equal_nan=True, - ), "take_mask() has bugs." - assert np.array_equal(last[i], desired[-1]), "flatnonzero() has bugs." + ), "take_mask has bugs." + assert np.array_equal(last[i], desired[-1]), "flatnonzero has bugs." @pytest.mark.unit @@ -77,7 +80,7 @@ def test_reshape_convention(): """Test the reshaping convention separates data across field lines.""" rho = np.linspace(0, 1, 3) alpha = np.linspace(0, 2 * np.pi, 4) - zeta = np.linspace(0, 10 * np.pi, 5) + zeta = np.linspace(0, 6 * np.pi, 5) r, a, z = map(np.ravel, np.meshgrid(rho, alpha, zeta, indexing="ij")) # functions of zeta should separate along first two axes # since those are contiguous, this should work @@ -237,63 +240,149 @@ def test_poly_val(): @pytest.mark.unit def test_bounce_points(): - """Test that the bounce points are computed correctly.""" + """Test that bounce points are computed correctly.""" def plot_field_line(B, pitch, start, end): + # Can observe correctness of bounce points through this plot. fig, ax = plt.subplots() for knot in B.x: ax.axvline(x=knot, color="red", linestyle="--") - z = np.linspace(start, end, 50) - ax.plot(z, B(z)) - ax.plot(z, np.full(z.size, 1 / pitch)) + z = np.linspace(start, end, 100) + ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$") + ax.plot(z, np.full(z.size, 1 / pitch), label=r"$1 / \lambda$") + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel("Tesla") + ax.legend() plt.show() + plt.close() - def assert_case_1(plot=False): - # 1/pitch does not intersect extrema - pitch = 2 + def test_bp1_first(plot=False): start = np.pi / 3 end = 6 * np.pi knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - # Can observe correctness of bounce points through this plot. + pitch = 2 if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points( - pitch, - knots, - B.c[:, np.newaxis], - B.derivative().c[:, np.newaxis], - check=True, - ) + bp1, bp2 = bounce_points(knots, B.c, B.derivative().c, pitch, check=True) + bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1, intersect[0::2]) + np.testing.assert_allclose(bp2, intersect[1::2]) + + def test_bp2_first(plot=False): + start = -3 * np.pi + end = -start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) + pitch = 2 + if plot: + plot_field_line(B, pitch, start, end) + bp1, bp2 = bounce_points(k, B.c, B.derivative().c, pitch, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - # Hardcode because CubicHermiteSpline.solve not yet implemented. - np.testing.assert_allclose(bp1, np.array([1.04719755, 7.13120418])) - np.testing.assert_allclose(bp2, np.array([5.19226163, 17.57830469])) + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1, intersect[1::2]) + np.testing.assert_allclose(bp2, intersect[0::2][1:]) + + def test_bp1_before_extrema(plot=False): + start = -np.pi + end = -2 * start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline( + k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + if plot: + plot_field_line(B, pitch, start, end) - def assert_case_2(plot=False): - # 1/pitch intersects extrema - pitch = 1 - start = np.pi / 3 - end = 6 * np.pi - knots = np.linspace(start, end, 5) - B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - # Can observe correctness of bounce points through this plot. + bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) + bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + intersect = B.solve(1 / pitch, extrapolate=False) + # Our routine correctly detects intersection, while scipy fails. + np.testing.assert_allclose(bp1[1], 1.9827671337414938) + intersect = np.insert(intersect, np.searchsorted(intersect, bp1[1]), bp1[1]) + np.testing.assert_allclose(bp1, intersect[[1, 2]]) + np.testing.assert_allclose(bp2, intersect[[2, 3]]) + + def test_bp2_before_extrema(plot=False): + start = -1.2 * np.pi + end = -2 * start + k = np.linspace(start, end, 7) + B = CubicHermiteSpline( + k, + np.cos(k) + 2 * np.sin(-2 * k) + k / 4, + -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 4, + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points( - pitch, - knots, - B.c[:, np.newaxis], - B.derivative().c[:, np.newaxis], - check=True, + + bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) + bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1, intersect[[0, -2]]) + np.testing.assert_allclose(bp2, intersect[[1, -1]]) + + def test_extrema_first_and_before_bp1(plot=False): + start = -1.2 * np.pi + end = -2 * start + k = np.linspace(start, end, 7) + B = CubicHermiteSpline( + k, + np.cos(k) + 2 * np.sin(-2 * k) + k / 20, + -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] + if plot: + plot_field_line(B, pitch, k[2], end) + + bp1, bp2 = bounce_points(k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], pitch, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - # Hardcode because CubicHermiteSpline.solve not yet implemented. - np.testing.assert_allclose(bp1, np.array([1.04719755, 7.13120418])) - np.testing.assert_allclose(bp2, np.array([5.19226163, 17.57830469])) + intersect = B.solve(1 / pitch, extrapolate=False) + # Our routine correctly detects intersection, while scipy fails. + np.testing.assert_allclose(bp1[0], 0.8353192766102349) + intersect = np.insert(intersect, np.searchsorted(intersect, bp1[0]), bp1[0]) + intersect = intersect[intersect >= k[2]] + np.testing.assert_allclose(bp1, intersect[[0, 1, 3]]) + np.testing.assert_allclose(bp2, intersect[[0, 2, 4]]) + + def test_extrema_first_and_before_bp2(plot=False): + start = -1.2 * np.pi + end = -2 * start + 1 + k = np.linspace(start, end, 7) + B = CubicHermiteSpline( + k, + np.cos(k) + 2 * np.sin(-2 * k) + k / 10, + -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + if plot: + plot_field_line(B, pitch, start, end) - # TODO: add all the edge cases I parameterized - assert_case_1() + bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) + bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + intersect = B.solve(1 / pitch, extrapolate=False) + # Our routine correctly detects intersection, while scipy fails. + np.testing.assert_allclose(bp1[0], -0.6719044147510538) + intersect = np.insert(intersect, np.searchsorted(intersect, bp1[0]), bp1[0]) + np.testing.assert_allclose(bp1, intersect[0::2]) + np.testing.assert_allclose(bp2, intersect[1::2]) + + # These are all the unique cases, if all tests pass then the bounce_points + # should work correctly for all inputs. Pass in True to see plots. + test_bp1_first() + test_bp2_first() + test_bp1_before_extrema() + test_bp2_before_extrema() + # In theory, this test should only pass if distinct=True when + # computing the intersections in bounce points. However, in this case, due + # to floating point errors, it also passes when distinct=False. + test_extrema_first_and_before_bp1() + test_extrema_first_and_before_bp2() @pytest.mark.unit @@ -302,7 +391,7 @@ def test_pitch_and_hairy_ball(): eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - zeta = jnp.linspace(0, 6 * jnp.pi, 20) + zeta = np.linspace(0, 6 * np.pi, 20) ba, items = bounce_average(eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True) B = items["data"]["B"] assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." @@ -379,7 +468,7 @@ def beta(grid, data): rho = np.array([0.5]) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) - zeta = np.linspace(0, 10 * np.pi, 20) + zeta = np.linspace(0, 6 * np.pi, 20) bi, items = bounce_integral( eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True, check=True ) @@ -522,7 +611,7 @@ def test_bounce_averaged_drifts(): # TODO: if deemed useful finish details using methods in desc.compute.bounce_integral def _compute_bounce_points_with_root_finding( - eq, pitch, rho, alpha, resolution=20, zeta_max=10 * np.pi + eq, pitch, rho, alpha, resolution=20, zeta_max=6 * np.pi ): # TODO: avoid separate root finding routines in residual and jac # and use previous desc coords as initial guess for next iteration From 8839e75a2cf8e71c60baeae7898929dd58bf2498 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 00:01:26 -0400 Subject: [PATCH 068/241] Fix bounce_average drift test and document weakness of jnp.roots --- desc/compute/bounce_integral.py | 20 +++++++++++++------- tests/test_bounce_integral.py | 27 ++++++++++++++++++++------- 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index afa9bf3073..ddeb1424ad 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -8,7 +8,7 @@ from desc.compute.utils import safediv from desc.equilibrium.coords import desc_grid_from_field_line_coords -roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(n)->(m)") +roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) @@ -161,7 +161,7 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): keep_only_real = not (a_min is None and a_max is None) func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} if c.shape[0] in func: - # compute from analytic formula + # Compute from analytic formula. r = func[c.shape[0]](*c[:-1], c[-1] - k, distinct) if keep_only_real: r = tuple(map(partial(_filter_real, a_min=a_min, a_max=a_max), r)) @@ -169,7 +169,9 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if sort: r = jnp.sort(r, axis=-1) else: - # compute from eigenvalues of polynomial companion matrix + # Compute from eigenvalues of polynomial companion matrix. + # This method can fail to detect roots near extrema, which is often + # where we want to detect roots for bounce integrals. c_n = c[-1] - k c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) @@ -284,7 +286,7 @@ def poly_val(x, c): def _check_shape(knots, B, B_z_ra, pitch=None): - """Ensure spline polynomial coefficients and pitch have correct shape.""" + """Ensure inputs have correct shape and return labels for those shapes.""" if B.ndim == 2 and B_z_ra.ndim == 2: # Add axis which enumerates field lines. B = B[:, jnp.newaxis] @@ -427,9 +429,9 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): # Transform out of local power basis expansion. intersect = intersect + knots[:-1, jnp.newaxis] intersect = intersect.reshape(P * S, -1) + # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) - # Reorder so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z_ra = take_mask(B_z_ra, is_intersect) @@ -445,7 +447,7 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): # B_z_ra[i, j] <= 0 implies B_z_ra[i, j + 1] >= 0 by continuity, there can # be at most one inversion, and if it exists, the inversion must be at the # first pair. To correct the inversion, it suffices to disqualify the first - # intersect as an ending bounce point. + # intersect as an ending bounce point, except under the following edge case. edge_case = (B_z_ra[:, 0] == 0) & (B_z_ra[:, 1] < 0) is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) # Get ζ values of bounce points from the masks. @@ -453,8 +455,12 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): bp2 = take_mask(intersect, is_bp2).reshape(P, S, -1) if check: - if not jnp.all((bp2 >= bp1) | jnp.isnan(bp1) | jnp.isnan(bp2)): + if jnp.any(bp1 > bp2): raise AssertionError("Bounce points have an inversion.") + if jnp.any(bp1[:, 1:] < bp2[:, :-1]): + raise AssertionError( + "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" + ) return bp1, bp2 # This is no longer implemented at the moment. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 8567a4cd4e..3654f5bd09 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -378,9 +378,9 @@ def test_extrema_first_and_before_bp2(plot=False): test_bp2_first() test_bp1_before_extrema() test_bp2_before_extrema() - # In theory, this test should only pass if distinct=True when - # computing the intersections in bounce points. However, in this case, due - # to floating point errors, it also passes when distinct=False. + # In theory, this test should only pass if distinct=True when computing the + # intersections in bounce points. However, we can get lucky due to floating + # point errors, and it may also pass when distinct=False. test_extrema_first_and_before_bp1() test_extrema_first_and_before_bp2() @@ -429,6 +429,7 @@ def test_elliptic_integral_limit(): (and not whether the bounce points were accurate). """ + assert False L, M, N, NFP, sym = 6, 6, 6, 1, True surface = FourierRZToroidalSurface( R_lmn=[1.0, 0.1], @@ -521,8 +522,20 @@ def test_bounce_averaged_drifts(): c1 = eq.compute_theta_coords(coords1) grid = Grid(c1, sort=False) - # The bounce integral operator should be able to take a grid - bi, items = bounce_integral(eq, grid=grid, return_items=True, check=True) + # TODO: Request: The bounce integral operator should be able to take a grid. + # Response: Currently the API is such that the method does all the + # above preprocessing for you. Let's test it for correctness + # first then do this later. + bi, items = bounce_integral( + eq, + rho=np.unique(coords1[:, 0]), + alpha=alpha, + zeta=zeta, + return_items=True, + check=True, + ) + grid = items["grid"] + grid._unique_zeta_idx = np.unique(grid.nodes[:, 2], return_index=True)[1] data_keys = [ "|grad(psi)|^2", @@ -536,7 +549,7 @@ def test_bounce_averaged_drifts(): "gbdrift", ] - data = eq.compute(data_keys, grid=grid, override_grid=False) + data = eq.compute(data_keys, grid=grid, data=items["data"]) psib = data_eq["psi"][-1] @@ -590,7 +603,7 @@ def test_bounce_averaged_drifts(): np.testing.assert_allclose(cvdrift, cvdrift_an, atol=9e-3, rtol=5e-3) # Values of pitch angle for which to evaluate the bounce averages - lambdas = np.linspace(1 / np.min(bmag), 1 / np.max(bmag), 11) + lambdas = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) bavg_drift_an = ( 0.5 * cvdrift_an * ellipe(lambdas) From cb7851e7bcee94c324a323a2f6a04163a6609d46 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 02:46:41 -0400 Subject: [PATCH 069/241] Allow bounce integral integrand f to be function of pitch for... as required by test_bounce_averaged_drifts. Add tests for pitch_of_extrema. --- desc/compute/bounce_integral.py | 44 +++++++++++++++++++++++---------- tests/test_bounce_integral.py | 41 +++++++++++++++++++++--------- 2 files changed, 61 insertions(+), 24 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ddeb1424ad..088834feb1 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -353,13 +353,12 @@ def pitch_of_extrema(knots, B, B_z_ra): # False will double weight orbits with B_z = B_zz = 0 at bounce points. distinct=True, ) - # Can detect at most degree of |B|_z spline extrema between each knot. - assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates (unsorted) extrema along a field line. B_extrema = poly_val(x=extrema, c=B[..., jnp.newaxis]).reshape(S, -1) # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T + # Can detect at most degree of |B|_z spline extrema between each knot. assert pitch.shape == (N * (degree - 1), S) return pitch @@ -536,8 +535,13 @@ def tanh_sinh_quad(resolution=7): return x, w +interp1d_vec = jnp.vectorize( + interp1d, signature="(m),(n),(n)->(m)", excluded={"fx", "method"} +) + + # Vectorize to compute a bounce integral for every pitch along every field line. -@partial(vmap, in_axes=(1, 1, None, None, 0, 0, 0, 0, None), out_axes=1) +@partial(vmap, in_axes=(1, 1, None, None, 1, 0, 0, 0, None), out_axes=1) def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra, f_method): """Compute a bounce integral for every pitch along a particular field line. @@ -551,7 +555,7 @@ def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra, f_method): Quadrature weights. knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - f : Array, shape(knots.size, ) + f : Array, shape(..., knots.size, ) Function to compute bounce integral of, evaluated at knots. B_sup_z : Array, shape(knots.size, ) Contravariant field-line following toroidal component of magnetic field. @@ -568,9 +572,10 @@ def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra, f_method): Bounce integrals for every pitch along a particular field line. """ - assert pitch.ndim == 1 == w.ndim + assert pitch.ndim == w.ndim == knots.ndim == 1 assert X.shape == (pitch.size, X.shape[1], w.size) - assert knots.shape == f.shape == B_sup_z.shape == B.shape == B_z_ra.shape + assert f.ndim <= 2 and f.shape[-1] == knots.size + assert knots.shape == B_sup_z.shape == B.shape == B_z_ra.shape # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. # Spline each function separately so that the singularity near the bounce @@ -578,14 +583,20 @@ def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra, f_method): shape = X.shape X = X.ravel() if f_method == "constant": - f = f[0] + f = f[..., 0].reshape(-1, 1, 1) + elif f.ndim == 1 or f.shape[0] == 1: + # Transpose because interp1d broadcasts opposite of numpy standard. + f = interp1d(X, knots, f.T, method=f_method).reshape(shape) else: - f = interp1d(X, knots, f, method=f_method).reshape(shape) + # First axis of f is a function of pitch; last axis is a function of knots. + f = interp1d_vec(X.reshape(pitch.size, -1), knots, f, method=f_method).reshape( + shape + ) # Use akima spline to suppress oscillation. B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) - pitch = pitch[:, jnp.newaxis, jnp.newaxis] + pitch = pitch.reshape(-1, 1, 1) inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) return inner_product @@ -720,8 +731,11 @@ def _bounce_integral(f, pitch=None, f_method="akima"): Parameters ---------- - f : Array, shape(items["grid"].num_nodes, ) + f : Array, shape(P, items["grid"].num_nodes, ) Quantity to compute the bounce integral of. + If two-dimensional, the first axis is interpreted as the batch axis, + which enumerates the evaluation of some function at particular pitch + values. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. @@ -744,9 +758,10 @@ def _bounce_integral(f, pitch=None, f_method="akima"): bp1, bp2, pitch = _compute_bp_if_given_pitch( zeta, poly_B, poly_B_z, pitch, *original, err=True, check=check ) + f = f.reshape(-1, S, zeta.size) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] + # Need explicit broadcast to vectorize over the S axis. pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], S)) - f = f.reshape(S, -1) result = ( _bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra, f_method) / (bp2 - bp1) @@ -865,8 +880,11 @@ def _bounce_average(f, pitch=None, f_method="akima"): Parameters ---------- - f : Array, shape(items["grid"].num_nodes, ) + f : Array, shape(P, items["grid"].num_nodes, ) Quantity to compute the bounce average of. + If two-dimensional, the first axis is interpreted as the batch axis, + which enumerates the evaluation of some function at particular pitch + values. pitch : Array, shape(P, S) λ values to evaluate the bounce average at each field line. If None, uses the values given to the parent function. @@ -887,7 +905,7 @@ def _bounce_average(f, pitch=None, f_method="akima"): Last axis enumerates the bounce integrals. """ - return bi(f, pitch, f_method) / bi(jnp.ones_like(f), pitch, "constant") + return bi(f, pitch, f_method) / bi(jnp.ones(f.shape[-1]), pitch, "constant") bi = bounce_integral(eq, pitch, rho, alpha, zeta, quad, **kwargs) if kwargs.get("return_items"): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 3654f5bd09..cf6459a508 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -238,6 +238,21 @@ def test_poly_val(): np.testing.assert_allclose(primitive, a1d.antiderivative().c) +@pytest.mark.unit +def test_pitch_of_extrema(): + """Test that these pitch intersect extrema of |B|.""" + start = -np.pi + end = -2 * start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline( + k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) + ) + B_z_ra = B.derivative() + pitch_scipy = 1 / B(B_z_ra.roots(extrapolate=False)) + pitch = _filter_not_nan(pitch_of_extrema(k, B.c, B_z_ra.c)) + np.testing.assert_allclose(pitch, pitch_scipy) + + @pytest.mark.unit def test_bounce_points(): """Test that bounce points are computed correctly.""" @@ -298,8 +313,8 @@ def test_bp1_before_extrema(plot=False): bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - # Our routine correctly detects intersection, while scipy fails. np.testing.assert_allclose(bp1[1], 1.9827671337414938) intersect = np.insert(intersect, np.searchsorted(intersect, bp1[1]), bp1[1]) np.testing.assert_allclose(bp1, intersect[[1, 2]]) @@ -341,8 +356,8 @@ def test_extrema_first_and_before_bp1(plot=False): bp1, bp2 = bounce_points(k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], pitch, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - # Our routine correctly detects intersection, while scipy fails. np.testing.assert_allclose(bp1[0], 0.8353192766102349) intersect = np.insert(intersect, np.searchsorted(intersect, bp1[0]), bp1[0]) intersect = intersect[intersect >= k[2]] @@ -365,8 +380,8 @@ def test_extrema_first_and_before_bp2(plot=False): bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - # Our routine correctly detects intersection, while scipy fails. np.testing.assert_allclose(bp1[0], -0.6719044147510538) intersect = np.insert(intersect, np.searchsorted(intersect, bp1[0]), bp1[0]) np.testing.assert_allclose(bp1, intersect[0::2]) @@ -603,21 +618,25 @@ def test_bounce_averaged_drifts(): np.testing.assert_allclose(cvdrift, cvdrift_an, atol=9e-3, rtol=5e-3) # Values of pitch angle for which to evaluate the bounce averages - lambdas = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) + pitch_res = 11 + pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), pitch_res).reshape( + pitch_res, -1 + ) bavg_drift_an = ( - 0.5 * cvdrift_an * ellipe(lambdas) - + gbdrift_an * ellipk(lambdas) - + dPdrho / bmag**2 * ellipe(lambdas) + 0.5 * cvdrift_an * ellipe(pitch) + + gbdrift_an * ellipk(pitch) + + dPdrho / bmag**2 * ellipe(pitch) ) # The quantities are already calculated along a field line bavg_drift_num = bi( - np.sqrt(1 - lambdas * bmag) * 0.5 * cvdrift - + gbdrift * 1 / np.sqrt(1 - lambdas * bmag) - + dPdrho / bmag**2 * np.sqrt(1 - lambdas * bmag), - lambdas, + np.sqrt(1 - pitch * bmag) * 0.5 * cvdrift + + gbdrift * 1 / np.sqrt(1 - pitch * bmag) + + dPdrho / bmag**2 * np.sqrt(1 - pitch * bmag), + pitch, ) + # might need to use _filter_not_nan function from top. np.testing.assert_allclose(bavg_drift_num, bavg_drift_an, atol=2e-2, rtol=1e-2) From aad8956d6eb51a4711a0a2740dab3a43e5f80e2b Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 04:47:37 -0400 Subject: [PATCH 070/241] Use vectorize instead of vmap to make broadcasting easier --- desc/compute/bounce_integral.py | 138 ++++++++++++++++++++------------ tests/test_bounce_integral.py | 4 +- 2 files changed, 88 insertions(+), 54 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 088834feb1..166e969ed8 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -4,12 +4,10 @@ from interpax import CubicHermiteSpline, interp1d -from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take, vmap +from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take from desc.compute.utils import safediv from desc.equilibrium.coords import desc_grid_from_field_line_coords -roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") - @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) def take_mask(a, mask, size=None, fill_value=None): @@ -129,6 +127,9 @@ def root(xi): return r1, r2, r3 +_roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") + + def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): """Roots of polynomial with given coefficients. @@ -176,7 +177,7 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) c = jnp.stack(c) - r = roots(c.reshape(c.shape[0], -1).T).reshape(*c.shape[1:], -1) + r = _roots(c.reshape(c.shape[0], -1).T).reshape(*c.shape[1:], -1) if keep_only_real: if a_min is not None: a_min = a_min[..., jnp.newaxis] @@ -535,69 +536,94 @@ def tanh_sinh_quad(resolution=7): return x, w -interp1d_vec = jnp.vectorize( - interp1d, signature="(m),(n),(n)->(m)", excluded={"fx", "method"} +@partial( + jnp.vectorize, + signature="(m),(n),(n),(n)->(m)", + excluded={"method", "derivative", "extrap", "period"}, +) +def _interp1d_vec_fx( + xq, + x, + f, + fx, + method="cubic", + derivative=0, + extrap=False, + period=None, +): + return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) + + +@partial( + jnp.vectorize, + signature="(m),(n),(n)->(m)", + excluded={"method", "derivative", "extrap", "period"}, ) +def _interp1d_vec( + xq, + x, + f, + method="cubic", + derivative=0, + extrap=False, + period=None, +): + return interp1d(xq, x, f, method, derivative, extrap, period) -# Vectorize to compute a bounce integral for every pitch along every field line. -@partial(vmap, in_axes=(1, 1, None, None, 1, 0, 0, 0, None), out_axes=1) -def _bounce_quad(pitch, X, w, knots, f, B_sup_z, B, B_z_ra, f_method): - """Compute a bounce integral for every pitch along a particular field line. +def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): + """Compute bounce integrals for every pitch along every field line. Parameters ---------- - pitch : Array, shape(pitch.size, ) - λ values. - X : Array, shape(pitch.size, X.shape[1], w.size) + X : Array, shape(P, S, X.shape[2], w.size) Quadrature points. w : Array, shape(w.size, ) Quadrature weights. knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - f : Array, shape(..., knots.size, ) - Function to compute bounce integral of, evaluated at knots. - B_sup_z : Array, shape(knots.size, ) + B_sup_z : Array, shape(S, knots.size, ) Contravariant field-line following toroidal component of magnetic field. - B : Array, shape(knots.size, ) + B : Array, shape(S, knots.size, ) Norm of magnetic field. - B_z_ra : Array, shape(knots.size, ) + B_z_ra : Array, shape(S, knots.size, ) Norm of magnetic field derivative with respect to field-line following label. + pitch : Array, shape(P, S) + λ values. + f : Array, shape(P, S, knots.size, ) + Function to compute bounce integral of, evaluated at knots. f_method : str Method of interpolation for f. Returns ------- - inner_product : Array, shape(pitch.size, X.shape[1]) + inner_product : Array, shape(X.shape[:-1]) Bounce integrals for every pitch along a particular field line. """ - assert pitch.ndim == w.ndim == knots.ndim == 1 - assert X.shape == (pitch.size, X.shape[1], w.size) - assert f.ndim <= 2 and f.shape[-1] == knots.size - assert knots.shape == B_sup_z.shape == B.shape == B_z_ra.shape + assert pitch.ndim == 2 + assert w.ndim == knots.ndim == 1 + assert X.shape == (pitch.shape[0], B.shape[0], X.shape[2], w.size) + assert knots.size == B.shape[-1] + assert f.ndim == 3 and f.shape[0] == 1 or f.shape[0] == pitch.shape[0] + assert f.shape[1:] == B_sup_z.shape == B.shape == B_z_ra.shape # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. # Spline each function separately so that the singularity near the bounce # points can be captured more accurately than can be by any polynomial. shape = X.shape - X = X.ravel() + X = X.reshape(X.shape[0], X.shape[1], -1) if f_method == "constant": - f = f[..., 0].reshape(-1, 1, 1) - elif f.ndim == 1 or f.shape[0] == 1: - # Transpose because interp1d broadcasts opposite of numpy standard. - f = interp1d(X, knots, f.T, method=f_method).reshape(shape) + f = f[..., 0, jnp.newaxis, jnp.newaxis] else: - # First axis of f is a function of pitch; last axis is a function of knots. - f = interp1d_vec(X.reshape(pitch.size, -1), knots, f, method=f_method).reshape( - shape - ) + f = _interp1d_vec(X, knots, f, method=f_method).reshape(shape) # Use akima spline to suppress oscillation. - B_sup_z = interp1d(X, knots, B_sup_z, method="akima").reshape(shape) - # Specify derivative at knots with fx=B_z_ra for ≈ cubic hermite interpolation. - B = interp1d(X, knots, B, fx=B_z_ra, method="cubic").reshape(shape) - pitch = pitch.reshape(-1, 1, 1) + B_sup_z = _interp1d_vec(X, knots, B_sup_z, method="akima").reshape(shape) + # Specify derivative at knots with B_z_ra for ≈ cubic hermite interpolation. + B = _interp1d_vec_fx(X, knots, B, B_z_ra, method="cubic").reshape(shape) + pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) + assert inner_product.shape == shape[:-1] return inner_product @@ -675,7 +701,7 @@ def bounce_integral( Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - poly_B_z : Array, shape(3, S, zeta.size - 1) + poly_B_z_ra : Array, shape(3, S, zeta.size - 1) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -715,15 +741,15 @@ def bounce_integral( poly_B = jnp.moveaxis( CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=check).c, 1, -1 ) - poly_B_z = poly_der(poly_B) + poly_B_z_ra = poly_der(poly_B) assert poly_B.shape == (4, S, zeta.size - 1) - assert poly_B_z.shape == (3, S, zeta.size - 1) + assert poly_B_z_ra.shape == (3, S, zeta.size - 1) x, w = quad(**kwargs) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 original = _compute_bp_if_given_pitch( - zeta, poly_B, poly_B_z, pitch, err=False, check=check + zeta, poly_B, poly_B_z_ra, pitch, err=False, check=check ) def _bounce_integral(f, pitch=None, f_method="akima"): @@ -756,14 +782,21 @@ def _bounce_integral(f, pitch=None, f_method="akima"): """ bp1, bp2, pitch = _compute_bp_if_given_pitch( - zeta, poly_B, poly_B_z, pitch, *original, err=True, check=check + zeta, poly_B, poly_B_z_ra, pitch, *original, err=True, check=check ) - f = f.reshape(-1, S, zeta.size) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] - # Need explicit broadcast to vectorize over the S axis. - pitch = jnp.broadcast_to(pitch, shape=(pitch.shape[0], S)) result = ( - _bounce_quad(pitch, X, w, zeta, f, B_sup_z, B, B_z_ra, f_method) + _bounce_quad( + X=X, + w=w, + knots=zeta, + B_sup_z=B_sup_z, + B=B, + B_z_ra=B_z_ra, + pitch=pitch, + f=f.reshape(-1, S, zeta.size), + f_method=f_method, + ) / (bp2 - bp1) * jnp.pi ) @@ -771,7 +804,12 @@ def _bounce_integral(f, pitch=None, f_method="akima"): return result if return_items: - items = {"grid": grid, "data": data, "poly_B": poly_B, "poly_B_z": poly_B_z} + items = { + "grid": grid, + "data": data, + "poly_B": poly_B, + "poly_B_z_ra": poly_B_z_ra, + } return _bounce_integral, items else: return _bounce_integral @@ -800,10 +838,6 @@ def bounce_average( the location on the field line such that the particle's velocity parallel to the magnetic field is zero, i.e. λ |B| = 1. - The bounce integral is defined up to a sign. - We choose the sign that corresponds the particle's guiding center trajectory - traveling in the direction of increasing field-line-following label. - Parameters ---------- eq : Equilibrium @@ -852,7 +886,7 @@ def bounce_average( Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - poly_B_z : Array, shape(3, S, zeta.size - 1) + poly_B_z_ra : Array, shape(3, S, zeta.size - 1) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -905,7 +939,7 @@ def _bounce_average(f, pitch=None, f_method="akima"): Last axis enumerates the bounce integrals. """ - return bi(f, pitch, f_method) / bi(jnp.ones(f.shape[-1]), pitch, "constant") + return bi(f, pitch, f_method) / bi(jnp.ones_like(f), pitch, "constant") bi = bounce_integral(eq, pitch, rho, alpha, zeta, quad, **kwargs) if kwargs.get("return_items"): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index cf6459a508..4dd6b181bc 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -420,7 +420,7 @@ def test_pitch_and_hairy_ball(): result = ba(f, pitch) assert np.isfinite(result).any() # specify pitch from extrema of |B| - pitch = pitch_of_extrema(zeta, items["poly_B"], items["poly_B_z"]) + pitch = pitch_of_extrema(zeta, items["poly_B"], items["poly_B_z_ra"]) result = ba(f, pitch) assert np.isfinite(result).any() @@ -497,7 +497,7 @@ def beta(grid, data): assert np.isfinite(result).any(), "tanh_sinh quadrature failed." # TODO now compare result to elliptic integral - bp1, bp2 = bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z"]) + bp1, bp2 = bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z_ra"]) @pytest.mark.unit From fb1d81ec17bf55937fdbaa5cc164bc31a3e7f923 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 11:21:15 -0400 Subject: [PATCH 071/241] Return arrays from check_shape, and fix 'check' test in bounce_points --- desc/compute/bounce_integral.py | 99 ++++++++++++++++++++++----------- tests/test_bounce_integral.py | 6 +- 2 files changed, 70 insertions(+), 35 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 166e969ed8..32f6b22960 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -167,8 +167,8 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if keep_only_real: r = tuple(map(partial(_filter_real, a_min=a_min, a_max=a_max), r)) r = jnp.stack(r, axis=-1) - if sort: - r = jnp.sort(r, axis=-1) + # We had ignored the case of double complex roots. + distinct = distinct and c.shape[0] > 3 and not keep_only_real else: # Compute from eigenvalues of polynomial companion matrix. # This method can fail to detect roots near extrema, which is often @@ -184,11 +184,11 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if a_max is not None: a_max = a_max[..., jnp.newaxis] r = _filter_real(r, a_min, a_max) - if sort or distinct: - r = jnp.sort(r, axis=-1) - if distinct: - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0) - r = jnp.where(mask, jnp.nan, r) + if sort or distinct: + r = jnp.sort(r, axis=-1) + if distinct: + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0) + r = jnp.where(mask, jnp.nan, r) return r @@ -287,27 +287,54 @@ def poly_val(x, c): def _check_shape(knots, B, B_z_ra, pitch=None): - """Ensure inputs have correct shape and return labels for those shapes.""" + """Ensure inputs have compatible shape, and return them with full dimension. + + Parameters + ---------- + knots : Array, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. + + Returns + ------- + B : Array, shape(B.shape[0], S, knots.size - 1) + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + B_z_ra : Array, shape(B.shape[0] - 1, *B.shape[1:]) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + pitch : Array, shape(P, S) + λ values. + Last axis enumerates the λ value for a particular field line + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. + + """ if B.ndim == 2 and B_z_ra.ndim == 2: # Add axis which enumerates field lines. B = B[:, jnp.newaxis] B_z_ra = B_z_ra[:, jnp.newaxis] err_msg = "Supplied invalid shape for splines." assert B.ndim == B_z_ra.ndim == 3, err_msg - S = B.shape[1] - N = knots.size - 1 - degree = B.shape[0] - 1 - assert degree == B_z_ra.shape[0] and B.shape[1:] == B_z_ra.shape[1:], err_msg - assert N == B.shape[-1], "Last axis fails to enumerate spline polynomials." - - if pitch is None: - return S, N, degree - pitch = jnp.atleast_2d(pitch) - err_msg = "Supplied invalid shape for pitch angles." - assert pitch.ndim == 2, err_msg - assert pitch.shape[-1] == 1 or pitch.shape[-1] == B.shape[1], err_msg - P = pitch.shape[0] - return P, S, N, degree + assert ( + B.shape[0] - 1 == B_z_ra.shape[0] and B.shape[1:] == B_z_ra.shape[1:] + ), err_msg + assert ( + B.shape[-1] == knots.size - 1 + ), "Last axis fails to enumerate spline polynomials." + if pitch is not None: + pitch = jnp.atleast_2d(pitch) + err_msg = "Supplied invalid shape for pitch angles." + assert pitch.ndim == 2, err_msg + assert pitch.shape[-1] == 1 or pitch.shape[-1] == B.shape[1], err_msg + return B, B_z_ra, pitch def pitch_of_extrema(knots, B, B_z_ra): @@ -345,7 +372,8 @@ def pitch_of_extrema(knots, B, B_z_ra): a particular field line, is padded with nan. """ - S, N, degree = _check_shape(knots, B, B_z_ra) + B, B_z_ra, _ = _check_shape(knots, B, B_z_ra) + S, N, degree = B.shape[1], knots.size - 1, B.shape[0] - 1 extrema = poly_root( c=B_z_ra, a_min=jnp.array([0]), @@ -354,12 +382,13 @@ def pitch_of_extrema(knots, B, B_z_ra): # False will double weight orbits with B_z = B_zz = 0 at bounce points. distinct=True, ) + # Can detect at most degree of |B|_z spline extrema between each knot. + assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates (unsorted) extrema along a field line. B_extrema = poly_val(x=extrema, c=B[..., jnp.newaxis]).reshape(S, -1) # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T - # Can detect at most degree of |B|_z spline extrema between each knot. assert pitch.shape == (N * (degree - 1), S) return pitch @@ -408,7 +437,8 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): the bounce points for a particular field line, is padded with nan. """ - P, S, N, degree = _check_shape(knots, B, B_z_ra, pitch) + B, B_z_ra, pitch = _check_shape(knots, B, B_z_ra, pitch) + P, S, N, degree = pitch.shape[0], B.shape[1], knots.size - 1, B.shape[0] - 1 # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the # case where each polynomial intersects 1 / λ degree times. @@ -422,6 +452,7 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): sort=True, distinct=True, # Required for correctness of ``edge_case``. ) + assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. # Condense remaining axes to vectorize over them. @@ -457,7 +488,7 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): if check: if jnp.any(bp1 > bp2): raise AssertionError("Bounce points have an inversion.") - if jnp.any(bp1[:, 1:] < bp2[:, :-1]): + if jnp.any(bp1[..., 1:] < bp2[..., :-1]): raise AssertionError( "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" ) @@ -538,37 +569,37 @@ def tanh_sinh_quad(resolution=7): @partial( jnp.vectorize, - signature="(m),(n),(n),(n)->(m)", + signature="(m),(n),(n)->(m)", excluded={"method", "derivative", "extrap", "period"}, ) -def _interp1d_vec_fx( +def _interp1d_vec( xq, x, f, - fx, method="cubic", derivative=0, extrap=False, period=None, ): - return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) + return interp1d(xq, x, f, method, derivative, extrap, period) @partial( jnp.vectorize, - signature="(m),(n),(n)->(m)", + signature="(m),(n),(n),(n)->(m)", excluded={"method", "derivative", "extrap", "period"}, ) -def _interp1d_vec( +def _interp1d_vec_fx( xq, x, f, + fx, method="cubic", derivative=0, extrap=False, period=None, ): - return interp1d(xq, x, f, method, derivative, extrap, period) + return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): @@ -598,7 +629,7 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): Returns ------- inner_product : Array, shape(X.shape[:-1]) - Bounce integrals for every pitch along a particular field line. + Bounce integrals for every pitch along every field line. """ assert pitch.ndim == 2 diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 4dd6b181bc..b9f5a16dc2 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -145,9 +145,13 @@ def test_poly_root(): ) root = poly_root(poly.T, sort=True, distinct=True) for j in range(poly.shape[0]): + unique_roots = np.unique(np.roots(poly[j])) + if j == 4: + # There are only two distinct roots. + unique_roots = unique_roots[[0, 1]] np.testing.assert_allclose( actual=_filter_not_nan(root[j]), - desired=np.unique(np.roots(poly[j])), + desired=unique_roots, err_msg=str(j), ) poly = np.array([0, 1, -1, -8, 12]) From a7ce53ecc5eaead6315995d408471115aaa53877 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 11:42:27 -0400 Subject: [PATCH 072/241] Remove uneeded reshape, taking advantage of jnp.vectorize --- desc/compute/bounce_integral.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 32f6b22960..1186aab8ee 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -455,18 +455,17 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. - # Condense remaining axes to vectorize over them. - B_z_ra = poly_val(x=intersect, c=B_z_ra[..., jnp.newaxis]).reshape(P * S, -1) + B_z_ra = poly_val(x=intersect, c=B_z_ra[..., jnp.newaxis]).reshape(P, S, -1) # Transform out of local power basis expansion. intersect = intersect + knots[:-1, jnp.newaxis] - intersect = intersect.reshape(P * S, -1) + intersect = intersect.reshape(P, S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) # Reorder so that all intersects along a field line are contiguous. intersect = take_mask(intersect, is_intersect) B_z_ra = take_mask(B_z_ra, is_intersect) - assert intersect.shape == B_z_ra.shape == (P * S, N * degree) + assert intersect.shape == B_z_ra.shape == (P, S, N * degree) # Sign of derivative determines whether an intersect is a valid bounce point. # Need to include zero derivative intersects to compute the WFB # (world's fattest banana) orbit bounce integrals. @@ -479,11 +478,11 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): # be at most one inversion, and if it exists, the inversion must be at the # first pair. To correct the inversion, it suffices to disqualify the first # intersect as an ending bounce point, except under the following edge case. - edge_case = (B_z_ra[:, 0] == 0) & (B_z_ra[:, 1] < 0) + edge_case = (B_z_ra[..., 0] == 0) & (B_z_ra[..., 1] < 0) is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) # Get ζ values of bounce points from the masks. - bp1 = take_mask(intersect, is_bp1).reshape(P, S, -1) - bp2 = take_mask(intersect, is_bp2).reshape(P, S, -1) + bp1 = take_mask(intersect, is_bp1) + bp2 = take_mask(intersect, is_bp2) if check: if jnp.any(bp1 > bp2): From 4d0e688353c3cff4067c7fd28f5b163f8ff73977 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 14:25:07 -0400 Subject: [PATCH 073/241] Use better names for returned quantities from bounce integral --- desc/compute/bounce_integral.py | 128 ++++++++++++++------------------ tests/test_bounce_integral.py | 91 +++++++++++------------ 2 files changed, 99 insertions(+), 120 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 1186aab8ee..d9d172d60e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -286,7 +286,7 @@ def poly_val(x, c): return val -def _check_shape(knots, B, B_z_ra, pitch=None): +def _check_shape(knots, B_c, B_z_ra_c, pitch=None): """Ensure inputs have compatible shape, and return them with full dimension. Parameters @@ -296,13 +296,13 @@ def _check_shape(knots, B, B_z_ra, pitch=None): Returns ------- - B : Array, shape(B.shape[0], S, knots.size - 1) + B_c : Array, shape(B_c.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - B_z_ra : Array, shape(B.shape[0] - 1, *B.shape[1:]) + B_z_ra_c : Array, shape(B_c.shape[0] - 1, *B_c.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -317,27 +317,27 @@ def _check_shape(knots, B, B_z_ra, pitch=None): If two-dimensional, the first axis is the batch axis as usual. """ - if B.ndim == 2 and B_z_ra.ndim == 2: + if B_c.ndim == 2 and B_z_ra_c.ndim == 2: # Add axis which enumerates field lines. - B = B[:, jnp.newaxis] - B_z_ra = B_z_ra[:, jnp.newaxis] + B_c = B_c[:, jnp.newaxis] + B_z_ra_c = B_z_ra_c[:, jnp.newaxis] err_msg = "Supplied invalid shape for splines." - assert B.ndim == B_z_ra.ndim == 3, err_msg + assert B_c.ndim == B_z_ra_c.ndim == 3, err_msg assert ( - B.shape[0] - 1 == B_z_ra.shape[0] and B.shape[1:] == B_z_ra.shape[1:] + B_c.shape[0] - 1 == B_z_ra_c.shape[0] and B_c.shape[1:] == B_z_ra_c.shape[1:] ), err_msg assert ( - B.shape[-1] == knots.size - 1 + B_c.shape[-1] == knots.size - 1 ), "Last axis fails to enumerate spline polynomials." if pitch is not None: pitch = jnp.atleast_2d(pitch) err_msg = "Supplied invalid shape for pitch angles." assert pitch.ndim == 2, err_msg - assert pitch.shape[-1] == 1 or pitch.shape[-1] == B.shape[1], err_msg - return B, B_z_ra, pitch + assert pitch.shape[-1] == 1 or pitch.shape[-1] == B_c.shape[1], err_msg + return B_c, B_z_ra_c, pitch -def pitch_of_extrema(knots, B, B_z_ra): +def pitch_of_extrema(knots, B_c, B_z_ra_c): """Return pitch values that will capture fat banana orbits. These pitch values are 1/|B|(ζ*) where |B|(ζ*) are local maxima. @@ -347,13 +347,13 @@ def pitch_of_extrema(knots, B, B_z_ra): ---------- knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - B : Array, shape(B.shape[0], S, knots.size - 1) + B_c : Array, shape(B_c.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - B_z_ra : Array, shape(B.shape[0] - 1, *B.shape[1:]) + B_z_ra_c : Array, shape(B_c.shape[0] - 1, *B_c.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -364,7 +364,7 @@ def pitch_of_extrema(knots, B, B_z_ra): ------- pitch : Array, shape(N * (degree - 1), S) For the shaping notation, the ``degree`` of the spline of |B| matches - ``B.shape[0] - 1``, the number of polynomials per spline ``N`` matches + ``B_c.shape[0] - 1``, the number of polynomials per spline ``N`` matches ``knots.size - 1``, and the number of field lines is denoted by ``S``. If there were less than ``N * (degree - 1)`` extrema detected along a @@ -372,10 +372,10 @@ def pitch_of_extrema(knots, B, B_z_ra): a particular field line, is padded with nan. """ - B, B_z_ra, _ = _check_shape(knots, B, B_z_ra) - S, N, degree = B.shape[1], knots.size - 1, B.shape[0] - 1 + B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) + S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 extrema = poly_root( - c=B_z_ra, + c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=False, # don't need to sort @@ -385,7 +385,7 @@ def pitch_of_extrema(knots, B, B_z_ra): # Can detect at most degree of |B|_z spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates (unsorted) extrema along a field line. - B_extrema = poly_val(x=extrema, c=B[..., jnp.newaxis]).reshape(S, -1) + B_extrema = poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T @@ -393,20 +393,20 @@ def pitch_of_extrema(knots, B, B_z_ra): return pitch -def bounce_points(knots, B, B_z_ra, pitch, check=False): +def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): """Compute the bounce points given spline of |B| and pitch λ. Parameters ---------- knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. - B : Array, shape(B.shape[0], S, knots.size - 1) + B_c : Array, shape(B_c.shape[0], S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - B_z_ra : Array, shape(B.shape[0] - 1, *B.shape[1:]) + B_z_ra_c : Array, shape(B_c.shape[0] - 1, *B_c.shape[1:]) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -426,25 +426,25 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): ------- bp1, bp2 : Array, Array, shape(P, S, N * degree) For the shaping notation, the ``degree`` of the spline of |B| matches - ``B.shape[0] - 1``, the number of polynomials per spline ``N`` matches + ``B_c.shape[0] - 1``, the number of polynomials per spline ``N`` matches ``knots.size - 1``, and the number of field lines is denoted by ``S``. The returned arrays are the field line-following ζ coordinates of bounce - points for a given pitch along a field line. The pairs bp1[i, j] and - bp2[i, j] form left and right integration boundaries, respectively, + points for a given pitch along a field line. The pairs bp1[i, j, k] and + bp2[i, j, k] form left and right integration boundaries, respectively, for the bounce integrals. If there were less than ``N * degree`` bounce points detected along a field line, then the last axis, which enumerates the bounce points for a particular field line, is padded with nan. """ - B, B_z_ra, pitch = _check_shape(knots, B, B_z_ra, pitch) - P, S, N, degree = pitch.shape[0], B.shape[1], knots.size - 1, B.shape[0] - 1 + B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) + P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 # The polynomials' intersection points with 1 / λ is given by ``intersect``. # In order to be JIT compilable, this must have a shape that accommodates the # case where each polynomial intersects 1 / λ degree times. # nan values in ``intersect`` denote a polynomial has less than degree intersects. intersect = poly_root( - c=B, + c=B_c, # Expand to use same pitches across polynomials of a particular spline. k=jnp.expand_dims(1 / pitch, axis=-1), a_min=jnp.array([0]), @@ -455,7 +455,7 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. - B_z_ra = poly_val(x=intersect, c=B_z_ra[..., jnp.newaxis]).reshape(P, S, -1) + B_z_ra = poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Transform out of local power basis expansion. intersect = intersect + knots[:-1, jnp.newaxis] intersect = intersect.reshape(P, S, -1) @@ -471,13 +471,13 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): # (world's fattest banana) orbit bounce integrals. is_bp1 = B_z_ra <= 0 is_bp2 = B_z_ra >= 0 - # The pairs bp1[i, j] and bp2[i, j] are boundaries of an integral only if - # bp1[i, j] <= bp2[i, j]. For correctness of the algorithm, it is necessary + # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only + # if bp1[i, j] <= bp2[i, j]. For correctness of the algorithm, it is required # that the first intersect satisfies non-positive derivative. Now, because - # B_z_ra[i, j] <= 0 implies B_z_ra[i, j + 1] >= 0 by continuity, there can - # be at most one inversion, and if it exists, the inversion must be at the - # first pair. To correct the inversion, it suffices to disqualify the first - # intersect as an ending bounce point, except under the following edge case. + # B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, there + # can be at most one inversion, and if it exists, the inversion must be at + # the first pair. To correct the inversion, it suffices to disqualify the + # first intersect as a right boundary, except under the following edge case. edge_case = (B_z_ra[..., 0] == 0) & (B_z_ra[..., 1] < 0) is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) # Get ζ values of bounce points from the masks. @@ -511,7 +511,7 @@ def bounce_points(knots, B, B_z_ra, pitch, check=False): def _compute_bp_if_given_pitch( - knots, B, B_z_ra, pitch, *original, err=False, check=False + knots, B_c, B_z_ra_c, pitch, check, *original, err=False ): """Conditionally return the ingredients needed to compute bounce integrals. @@ -529,7 +529,7 @@ def _compute_bp_if_given_pitch( return original else: pitch = jnp.atleast_2d(pitch) - return *bounce_points(knots, B, B_z_ra, pitch, check), pitch + return *bounce_points(knots, B_c, B_z_ra_c, pitch, check), pitch def tanh_sinh_quad(resolution=7): @@ -566,21 +566,11 @@ def tanh_sinh_quad(resolution=7): return x, w -@partial( - jnp.vectorize, +_interp1d_vec = jnp.vectorize( + interp1d, signature="(m),(n),(n)->(m)", excluded={"method", "derivative", "extrap", "period"}, ) -def _interp1d_vec( - xq, - x, - f, - method="cubic", - derivative=0, - extrap=False, - period=None, -): - return interp1d(xq, x, f, method, derivative, extrap, period) @partial( @@ -588,7 +578,7 @@ def _interp1d_vec( signature="(m),(n),(n),(n)->(m)", excluded={"method", "derivative", "extrap", "period"}, ) -def _interp1d_vec_fx( +def _interp1d_vec_with_df( xq, x, f, @@ -602,7 +592,7 @@ def _interp1d_vec_fx( def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): - """Compute bounce integrals for every pitch along every field line. + """Compute bounce quadrature for every pitch along every field line. Parameters ---------- @@ -628,7 +618,7 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): Returns ------- inner_product : Array, shape(X.shape[:-1]) - Bounce integrals for every pitch along every field line. + Bounce quadrature for every pitch along every field line. """ assert pitch.ndim == 2 @@ -649,11 +639,10 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): f = _interp1d_vec(X, knots, f, method=f_method).reshape(shape) # Use akima spline to suppress oscillation. B_sup_z = _interp1d_vec(X, knots, B_sup_z, method="akima").reshape(shape) - # Specify derivative at knots with B_z_ra for ≈ cubic hermite interpolation. - B = _interp1d_vec_fx(X, knots, B, B_z_ra, method="cubic").reshape(shape) + # Specify derivative at knots for ≈ cubic hermite interpolation. + B = _interp1d_vec_with_df(X, knots, B, B_z_ra, method="cubic").reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) - assert inner_product.shape == shape[:-1] return inner_product @@ -725,13 +714,13 @@ def bounce_integral( DESC coordinate grid for the given field line coordinates. data : dict Dictionary of Arrays of stuff evaluated on ``grid``. - poly_B : Array, shape(4, S, zeta.size - 1) + B.c : Array, shape(4, S, zeta.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - poly_B_z_ra : Array, shape(3, S, zeta.size - 1) + B_z_ra.c : Array, shape(3, S, zeta.size - 1) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -768,19 +757,17 @@ def bounce_integral( B_sup_z = data["B^zeta"].reshape(S, -1) B = data["|B|"].reshape(S, -1) B_z_ra = data["|B|_z|r,a"].reshape(S, -1) - poly_B = jnp.moveaxis( + B_c = jnp.moveaxis( CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=check).c, 1, -1 ) - poly_B_z_ra = poly_der(poly_B) - assert poly_B.shape == (4, S, zeta.size - 1) - assert poly_B_z_ra.shape == (3, S, zeta.size - 1) + B_z_ra_c = poly_der(B_c) + assert B_c.shape == (4, S, zeta.size - 1) + assert B_z_ra_c.shape == (3, S, zeta.size - 1) x, w = quad(**kwargs) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 - original = _compute_bp_if_given_pitch( - zeta, poly_B, poly_B_z_ra, pitch, err=False, check=check - ) + original = _compute_bp_if_given_pitch(zeta, B_c, B_z_ra_c, pitch, check, err=False) def _bounce_integral(f, pitch=None, f_method="akima"): """Compute the bounce integral of ``f``. @@ -812,7 +799,7 @@ def _bounce_integral(f, pitch=None, f_method="akima"): """ bp1, bp2, pitch = _compute_bp_if_given_pitch( - zeta, poly_B, poly_B_z_ra, pitch, *original, err=True, check=check + zeta, B_c, B_z_ra_c, pitch, check, *original, err=True ) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] result = ( @@ -834,12 +821,7 @@ def _bounce_integral(f, pitch=None, f_method="akima"): return result if return_items: - items = { - "grid": grid, - "data": data, - "poly_B": poly_B, - "poly_B_z_ra": poly_B_z_ra, - } + items = {"grid": grid, "data": data, "B.c": B_c, "B_z_ra.c": B_z_ra_c} return _bounce_integral, items else: return _bounce_integral @@ -910,13 +892,13 @@ def bounce_average( DESC coordinate grid for the given field line coordinates. data : dict Dictionary of Arrays of stuff evaluated on ``grid``. - poly_B : Array, shape(4, S, zeta.size - 1) + B.c : Array, shape(4, S, zeta.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - poly_B_z_ra : Array, shape(3, S, zeta.size - 1) + B_z_ra.c : Array, shape(3, S, zeta.size - 1) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index b9f5a16dc2..ad5b442d1b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -116,23 +116,23 @@ def test_reshape_convention(): def test_poly_root(): """Test vectorized computation of cubic polynomial exact roots.""" cubic = 4 - poly = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi + c = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi # make sure broadcasting won't hide error in implementation - assert np.unique(poly.shape).size == poly.ndim - constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) + assert np.unique(c.shape).size == c.ndim + constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) constant = np.stack([constant, constant]) - root = poly_root(poly, constant, sort=True) + root = poly_root(c, constant, sort=True) for i in range(constant.shape[0]): - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): - d = poly[-1, j, k] - constant[i, j, k] + for j in range(c.shape[1]): + for k in range(c.shape[2]): + d = c[-1, j, k] - constant[i, j, k] np.testing.assert_allclose( actual=root[i, j, k], - desired=np.sort(np.roots([*poly[:-1, j, k], d])), + desired=np.sort(np.roots([*c[:-1, j, k], d])), ) - poly = np.array( + c = np.array( [ [1, 0, 0, 0], [0, 1, 0, 0], @@ -143,9 +143,9 @@ def test_poly_root(): [0, -6, 11, -2], ] ) - root = poly_root(poly.T, sort=True, distinct=True) - for j in range(poly.shape[0]): - unique_roots = np.unique(np.roots(poly[j])) + root = poly_root(c.T, sort=True, distinct=True) + for j in range(c.shape[0]): + unique_roots = np.unique(np.roots(c[j])) if j == 4: # There are only two distinct roots. unique_roots = unique_roots[[0, 1]] @@ -154,10 +154,10 @@ def test_poly_root(): desired=unique_roots, err_msg=str(j), ) - poly = np.array([0, 1, -1, -8, 12]) + c = np.array([0, 1, -1, -8, 12]) np.testing.assert_allclose( - actual=_filter_not_nan(poly_root(poly, sort=True, distinct=True)), - desired=np.unique(np.roots(poly)), + actual=_filter_not_nan(poly_root(c, sort=True, distinct=True)), + desired=np.unique(np.roots(c)), ) @@ -165,51 +165,55 @@ def test_poly_root(): def test_poly_int(): """Test vectorized computation of polynomial primitive.""" quintic = 6 - poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi + c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation - assert np.unique(poly.shape).size == poly.ndim - constant = np.broadcast_to(np.arange(poly.shape[-1]), poly.shape[1:]) - primitive = poly_int(poly, k=constant) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): + assert np.unique(c.shape).size == c.ndim + constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) + primitive = poly_int(c, k=constant) + for j in range(c.shape[1]): + for k in range(c.shape[2]): np.testing.assert_allclose( actual=primitive[:, j, k], - desired=np.polyint(poly[:, j, k], k=constant[j, k]), + desired=np.polyint(c[:, j, k], k=constant[j, k]), ) - assert poly_int(poly).shape == primitive.shape, "Failed broadcasting default k." + assert poly_int(c).shape == primitive.shape, "Failed broadcasting default k." @pytest.mark.unit def test_poly_der(): """Test vectorized computation of polynomial derivative.""" quintic = 6 - poly = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi + c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation - assert np.unique(poly.shape).size == poly.ndim - derivative = poly_der(poly) - for j in range(poly.shape[1]): - for k in range(poly.shape[2]): + assert np.unique(c.shape).size == c.ndim + derivative = poly_der(c) + for j in range(c.shape[1]): + for k in range(c.shape[2]): np.testing.assert_allclose( - actual=derivative[:, j, k], desired=np.polyder(poly[:, j, k]) + actual=derivative[:, j, k], desired=np.polyder(c[:, j, k]) ) @pytest.mark.unit def test_poly_val(): """Test vectorized computation of polynomial evaluation.""" + + def test(x, c): + val = poly_val(x=x, c=c) + for index in np.ndindex(c.shape[1:]): + idx = (..., *index) + np.testing.assert_allclose( + actual=val[idx], + desired=np.poly1d(c[idx])(x[idx]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", + ) + quartic = 5 c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(c.shape).size == c.ndim x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) - val = poly_val(x=x, c=c) - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) + test(x, c) x = np.stack([x, x * 2], axis=0) x = np.stack([x, x * 2, x * 3, x * 4], axis=0) @@ -217,14 +221,7 @@ def test_poly_val(): assert np.unique(x.shape).size == x.ndim assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 - val = poly_val(x=x, c=c) - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) + test(x, c) # integrate piecewise polynomial and set constants to preserve continuity y = np.arange(2, 8) @@ -424,7 +421,7 @@ def test_pitch_and_hairy_ball(): result = ba(f, pitch) assert np.isfinite(result).any() # specify pitch from extrema of |B| - pitch = pitch_of_extrema(zeta, items["poly_B"], items["poly_B_z_ra"]) + pitch = pitch_of_extrema(zeta, items["B.c"], items["B_z_ra.c"]) result = ba(f, pitch) assert np.isfinite(result).any() @@ -501,7 +498,7 @@ def beta(grid, data): assert np.isfinite(result).any(), "tanh_sinh quadrature failed." # TODO now compare result to elliptic integral - bp1, bp2 = bounce_points(pitch, zeta, items["poly_B"], items["poly_B_z_ra"]) + bp1, bp2 = bounce_points(pitch, zeta, items["B.c"], items["B_z_ra.c"]) @pytest.mark.unit From ccd81c38e79f1b1d7ad47963ad2d7b438859faac Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 18:44:02 -0400 Subject: [PATCH 074/241] Change comment and simplify transpose in poly_root --- desc/compute/bounce_integral.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d9d172d60e..82f0e3c9d5 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -176,8 +176,8 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): c_n = c[-1] - k c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) - c = jnp.stack(c) - r = _roots(c.reshape(c.shape[0], -1).T).reshape(*c.shape[1:], -1) + c = jnp.stack(c, axis=-1) + r = _roots(c) if keep_only_real: if a_min is not None: a_min = a_min[..., jnp.newaxis] @@ -472,11 +472,11 @@ def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): is_bp1 = B_z_ra <= 0 is_bp2 = B_z_ra >= 0 # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only - # if bp1[i, j] <= bp2[i, j]. For correctness of the algorithm, it is required - # that the first intersect satisfies non-positive derivative. Now, because - # B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, there - # can be at most one inversion, and if it exists, the inversion must be at - # the first pair. To correct the inversion, it suffices to disqualify the + # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is + # required that the first intersect satisfies non-positive derivative. Now, + # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, + # there can be at most one inversion, and if it exists, the inversion must be + # at the first pair. To correct the inversion, it suffices to disqualify the # first intersect as a right boundary, except under the following edge case. edge_case = (B_z_ra[..., 0] == 0) & (B_z_ra[..., 1] < 0) is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) From 512ded110dfb8d8b71ec776234ba3972ab5cfd0e Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 9 Apr 2024 19:33:34 -0400 Subject: [PATCH 075/241] Try debugging bounce average drift tby ssinging grids --- desc/equilibrium/coords.py | 6 +++--- tests/test_bounce_integral.py | 27 ++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 290a0971fe..94b93bf6f3 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -354,9 +354,9 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): nodes = jnp.column_stack( tuple(map(jnp.ravel, jnp.meshgrid(rho, t, z, indexing="ij"))) ) - spacing = jnp.array([1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size])[ - jnp.newaxis - ] + spacing = jnp.ones(rho.size * t.size * z.size)[:, jnp.newaxis] * jnp.array( + [1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size] + ) labels = ["rho", "theta", "zeta"] unique_idx = { f"_unique_{label}_idx": idx diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index ad5b442d1b..10100bc3ed 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -550,8 +550,6 @@ def test_bounce_averaged_drifts(): return_items=True, check=True, ) - grid = items["grid"] - grid._unique_zeta_idx = np.unique(grid.nodes[:, 2], return_index=True)[1] data_keys = [ "|grad(psi)|^2", @@ -565,7 +563,30 @@ def test_bounce_averaged_drifts(): "gbdrift", ] - data = eq.compute(data_keys, grid=grid, data=items["data"]) + def make_sure_attributes_are_assigned(grid): + _, unique_rho_idx, inverse_rho_idx = np.unique( + grid.nodes[:, 0], return_index=True, return_inverse=True + ) + _, unique_theta_idx, inverse_theta_idx = np.unique( + grid.nodes[:, 1], return_index=True, return_inverse=True + ) + _, unique_zeta_idx, inverse_zeta_idx = np.unique( + grid.nodes[:, 2], return_index=True, return_inverse=True + ) + grid._unique_rho_idx = unique_rho_idx + grid._inverse_rho_idx = inverse_rho_idx + grid._unique_theta_idx = unique_theta_idx + grid._inverse_theta_idx = inverse_theta_idx + grid._unique_zeta_idx = unique_zeta_idx + grid._inverse_zeta_idx = inverse_zeta_idx + return grid + + # In the interest of debugging, let's not + # pass in already computed data as a seed, e.g. data=items["data"] + # so that all data is recomputed on the correct grids according to + # the logic in eq.compute + grid = make_sure_attributes_are_assigned(items["grid"]) + data = eq.compute(data_keys, grid=grid) psib = data_eq["psi"][-1] From 9df1306f53f258b5e4e0bf286b74709b7ffc88ff Mon Sep 17 00:00:00 2001 From: Rahul Date: Tue, 9 Apr 2024 20:40:18 -0400 Subject: [PATCH 076/241] fixing parts of test_bounce_averaged drifts before bi --- tests/test_bounce_integral.py | 42 ++++++++++------------------------- 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 10100bc3ed..e1d9f2e79f 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -550,6 +550,8 @@ def test_bounce_averaged_drifts(): return_items=True, check=True, ) + grid = items["grid"] + grid._unique_zeta_idx = np.unique(grid.nodes[:, 2], return_index=True)[1] data_keys = [ "|grad(psi)|^2", @@ -563,29 +565,6 @@ def test_bounce_averaged_drifts(): "gbdrift", ] - def make_sure_attributes_are_assigned(grid): - _, unique_rho_idx, inverse_rho_idx = np.unique( - grid.nodes[:, 0], return_index=True, return_inverse=True - ) - _, unique_theta_idx, inverse_theta_idx = np.unique( - grid.nodes[:, 1], return_index=True, return_inverse=True - ) - _, unique_zeta_idx, inverse_zeta_idx = np.unique( - grid.nodes[:, 2], return_index=True, return_inverse=True - ) - grid._unique_rho_idx = unique_rho_idx - grid._inverse_rho_idx = inverse_rho_idx - grid._unique_theta_idx = unique_theta_idx - grid._inverse_theta_idx = inverse_theta_idx - grid._unique_zeta_idx = unique_zeta_idx - grid._inverse_zeta_idx = inverse_zeta_idx - return grid - - # In the interest of debugging, let's not - # pass in already computed data as a seed, e.g. data=items["data"] - # so that all data is recomputed on the correct grids according to - # the logic in eq.compute - grid = make_sure_attributes_are_assigned(items["grid"]) data = eq.compute(data_keys, grid=grid) psib = data_eq["psi"][-1] @@ -611,12 +590,13 @@ def make_sure_attributes_are_assigned(grid): cvdrift = -2 * sign_psi * Bref * Lref**2 * np.sqrt(psi) * data["cvdrift"] gbdrift = -2 * sign_psi * Bref * Lref**2 * np.sqrt(psi) * data["gbdrift"] - a0_over_R0 = Lref * np.sqrt(psi) + epsilon = Lref * np.sqrt(psi) + B0 = np.mean(bmag) - bmag_an = np.mean(bmag) * (1 - a0_over_R0 * np.cos(theta_PEST)) + bmag_an = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) - gradpar_an = 2 * Lref * iota * (1 - a0_over_R0 * np.cos(theta_PEST)) + gradpar_an = 2 * Lref * iota * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(gradpar, gradpar_an, atol=9e-3, rtol=5e-3) dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * modB**2) @@ -637,7 +617,7 @@ def make_sure_attributes_are_assigned(grid): # Comparing coefficients with their analytical expressions np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=5e-3) - np.testing.assert_allclose(cvdrift, cvdrift_an, atol=9e-3, rtol=5e-3) + np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=1e1) # Values of pitch angle for which to evaluate the bounce averages pitch_res = 11 @@ -645,10 +625,12 @@ def make_sure_attributes_are_assigned(grid): pitch_res, -1 ) + k2 = 0.5 * ((1 - pitch * B0) / epsilon + 1) + bavg_drift_an = ( - 0.5 * cvdrift_an * ellipe(pitch) - + gbdrift_an * ellipk(pitch) - + dPdrho / bmag**2 * ellipe(pitch) + 0.5 * cvdrift_an * ellipe(k2) + + gbdrift_an * ellipk(k2) + + dPdrho / bmag**2 * ellipe(k2) ) # The quantities are already calculated along a field line From b3ee158003991c33976bb1ac8cb47e00a0460ea2 Mon Sep 17 00:00:00 2001 From: Rahul Date: Wed, 10 Apr 2024 14:50:57 -0400 Subject: [PATCH 077/241] increasing rtol because quantities that cross zero can have a huge rtol --- tests/test_bounce_integral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e1d9f2e79f..1dd454883a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -616,7 +616,7 @@ def test_bounce_averaged_drifts(): cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 # Comparing coefficients with their analytical expressions - np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=5e-3) + np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=1e1) np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=1e1) # Values of pitch angle for which to evaluate the bounce averages From 53142c9704b3df28da3e78daac203d18f6153d4d Mon Sep 17 00:00:00 2001 From: Rahul Date: Wed, 10 Apr 2024 15:14:01 -0400 Subject: [PATCH 078/241] adding pytest.warning --- tests/test_bounce_integral.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 1dd454883a..562476f3f2 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -633,14 +633,15 @@ def test_bounce_averaged_drifts(): + dPdrho / bmag**2 * ellipe(k2) ) - # The quantities are already calculated along a field line - bavg_drift_num = bi( - np.sqrt(1 - pitch * bmag) * 0.5 * cvdrift - + gbdrift * 1 / np.sqrt(1 - pitch * bmag) - + dPdrho / bmag**2 * np.sqrt(1 - pitch * bmag), - pitch, - ) - # might need to use _filter_not_nan function from top. + with pytest.warns(RuntimeWarning): + # The quantities are already calculated along a field line + bavg_drift_num = bi( + np.sqrt(1 - pitch * bmag) * 0.5 * cvdrift + + gbdrift * 1 / np.sqrt(1 - pitch * bmag) + + dPdrho / bmag**2 * np.sqrt(1 - pitch * bmag), + pitch, + ) + # might need to use _filter_not_nan function from top. np.testing.assert_allclose(bavg_drift_num, bavg_drift_an, atol=2e-2, rtol=1e-2) From f04523ddd0b0702253cdcf58f3ef77c989606a50 Mon Sep 17 00:00:00 2001 From: Rahul Date: Wed, 10 Apr 2024 19:18:23 -0400 Subject: [PATCH 079/241] override_grid=False fixes coefficient jumps. Should add this to the rest of the code. Improved agreement between coefficients too... --- tests/inputs/low-beta-shifted-circle.h5 | Bin 105905 -> 87041 bytes tests/test_bounce_integral.py | 6 +++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/inputs/low-beta-shifted-circle.h5 b/tests/inputs/low-beta-shifted-circle.h5 index 31f4fab80bf97a42c188109dd3432dbf4783a8f0..dd75392a097cd0fba6577aa0c0409c858826c039 100644 GIT binary patch delta 5677 zcma)Adq9-M7XRj3#N7pvMHCQ4miG!0h$y7(2S_{=1k@A|R7BHFP{=2#0ZJu_Z9#(f|t$wBpnG@srMQb zByn>j^Ep3Xe?Q*|-hqJ=eV4xOKQSmcJt#2Ucd_Qp)j1s8D9um}WFodT80JguptCG~ zV5W;?12;VsYGroNPrh(qrjN*D1|0PWF={`H0Y<&gV)ejc@UaXZ*u)~s#RHpYIV59X z6F(dhKH&6yV270*w6+>x`WvkVnAKIQNBo7s`VQ~L!N5_kuuqicR=pDvP(QL;FzYPZ zZyQ%FEWDag+nl_{$l0CUvcPhZQ2StcQ@vG$PrrtAfyQ|`l zd5;#jyq)d&9-czT3`ZZCVT3Td%gJ)x4I89C7umV>pqg`zjhOLXw>tkpi&NweMniW& z&G4nS?x?#*h<4O;V{u%F-sf|d>uCP0_f*Nb_tf0f$mOnqJ?ij(Kd|ZYXjgN2=dnQp zTQIC$`Gv{pq|v*)t-R%%xyG#Z8}26B#t1vwKds8XoG63@ysJNT>Sd#QW>om+h6O^N z#K!h7snWPmy18SxDaN?JChT^>gD~OJrdbOA6l)=5SJdT^OQr}D<*&BG+_yuG+}p9h z^5Jjlyk%Fc|4MZcxcHiFcTPQ3b9euBUhuaz+C&WxldrvCP_-Ti%9JULoaJ7V#`B5c z@XZfemyMGPEKsgkv}54b@A~#>+^+`icXxy8Z*Ka&(r|5c+iP#GxMV!N`@kpRv%eIqbCbhzkQ_0n6h$CWmZI;K{%A1lE87sdb=^FzW#5p zasM{EZn?uQKcf@k&{|4}9E5};-0Ip29N|1S{O#+Gzkx?k2{4y;3wuiOr&`eV>~NsJ zpEF=98d&A=0a_&S{Mbrmo~g=p?H0%#0G+**BQlH zY$SYtq(ZK7CgaWkj^1|^o(&MyvO**nc{6!5f#FhkJJAdB?S_VE2mpA1Jr{FC=j_HT~HW^Q4mF?6ER}O$|q@i46t{4 zGN0qBe_Uk_K%qV*b|?T^B1(xU(-*}j0^Sr1e@plhl$uL3&hlz7;u3Z2;-musuACoUufsm*dn}q6zCUVu^;XV~OUH(Al({Lj7{+ z!ONAkL?3zvXK1bbfx?SKLZyAOWVeaaJ=qCrNr-Zi$H7TAFlgo*A3)RLW?MKkh z%p3}b<$T%OI8l2VWN#iqB~mkUjAfhg&{jI6Fk<9Kk;4{J6lWVvF;RuEkprat6uU_K z{Z8~9cQLfka)h-C9>6=(m-63xp#H1p>vr@v-lD}Qh#KrD!HAjff%NbJH}sFH1_S<2 z%E3QWN<0ZS7M^f>{8gwhY~bs5>5C2X06)xA-(^JoIMtK%<1A0qkM(@gcI<#rQ1Jmq ztPj9Wj96`CpJK%7+q<7}AUi~2WkFV-A{Z=g=iUgApU4^du%hqngRzqb?tM!qci#=wxzsF#U({ zA|--L>Lx$3(1Y|AfDVhS`?gs2_aar*-h`UAapDFWNc54NhVuR)2PC7sra(`1oXpV9WL5W`V6qV0HM6#i?B zy&pzX(+tB7voZKh(TyGqPQ3U%tXsg`vKeF0Z%6pIr%LB!4a{}bL&4-B{Vlp_dmQ00 zSE|`dp{)7ZcjS z&#&-DqmGrC3XnWV%*`L?)`%VZ8p&R#r9@IZbq9#F`V=tvYB(be%_ z8gJuOIYa^qcpK9tvND7SF6R!}6WKp&d*+MY${NMCSN?>F_M_AMuVBR5KJ+R^%tXR9 z*7g;Rr0we($!0fE*+OMoBi{VF7CW#(qP)&k#@T~pz;`slGS?A$P#Y%a?=+G#;1LP} zodMMsu_M<0iZ+b;I-wmS*8Z9Ba-QXveWfLN;0sPf}uEYKQc2o~AVsQlj%{Uv?L^$(l zBAiX2ate`>@FuRZ-5>nO^D#|?n-t^=-Pb+Ho{Uk`3p*y*mw>^2>_qoscZ?l}M|d)0 zM+TK@G&vWR?|lP|)cFK~2Yf~oJ|1)qOMo+!06tGLuGabKAErRIg z?3jjOKrLmVb0hh|w(hfwpXjsbKwX?@<~?P{EFy(HUA zoWL?RnPV8h%2alcEM!6SJKV(qFKV}KuxN$Vvr~%TPSH&PrT4=oyxVls!vMLUKWidO z{lp8pcY)Go2yqfZO(wViy$^oc4M(<8b=7WQ9%zEUFKXb2KIcoO5Fv}$!A6A4ifR~w zr^QHa(w6uiZG}g)70jBs%A>vjJ=xHYC$qsE!1oTXoHGaclh`8FiMe2|oS)cC+9aYG zezdNEw-miF);g_3$o5dMtq2ooCZS3U`})KmmU-hoAzcybNxP!W9d`veEao)BSF{bh zhq#V8DbywG24nh4lXixfXQk=WC+~$F&*6OJtLfwN#Pbubc4!C#m*-&Yfwz2w8R70z?=>9B`cz5G2)UY$hA_ zVl&yW?Nt6oq~wpj;#5zf#kt>C9D5Tfj+aU7=>%WV3nQ-FPlT*Gy@4WR)d5pQ$aZm0 zD1wBaAZj&=%0y&6`lL=M0dh%8G+vc_nI>X-VW0GS^oTPkqnzr6`?%i=QJj}Zs~Nlr zoh&ZLsr8l@{{8yw&pMq1g3J6e8sK^&C0k9Xk#~acB>W*0VYSLcSgob9*@Vxf{{sdF BEfN3# delta 6074 zcma)Adt6l2_CNa^5N3ek88Vi_C<8)ijDRqx48FzGKr2BMd4`}Om?_7MmX6hQ%&z)7%oiLlJeh!#>l^a&pRpz3jB-9Gc- zkH@tQ^b~6`mogp)cb0#?Td};>===g+9`XotJo~5=bq;D;#fh&bZ20zA_STf&JFSOc3$3@`_&fHEt~me zX=!IQN56e_g+=2e&ED1}U#(r~Yl{2v#?@Cdr)wPZCJVjOPAD~|q5G}YA6uchGku%7 z`g5(OD(KD0R@!};5doQM2&UH_9~;L7D}t@3Q$>3pLuwHxnxq%QS!ed)ofF9~dhiY7gCtoWp6x zOi5oD&M+or*iF|{Cw^0#%4|EN^4&hBFSdSQ7zVqRoh50E=5TQ0vqm%mCw{g&`gK#H z-}}nIpp5AF7)EQ`DZIb_^qg!Ty_8`pZF=z6MxHN zJ2bM&b6-5d)Y(utM-RnF4_P}cV;C0(P=7H}Pq#O0nT)C2wlU_*z4Au1Yg`6sxx)EC zt8TiER{`MNMkS;o}k0oH& zjNp6YzXP)&HBui@Ts{Y(KoNPp4UvSJQu8-QnfPSi1?=V#g3XRW0O*BtezpMYC&M8! z91$J`L;yB&F8`kTFenq)phzj}?Jlf!kpVWyo&QJ1cOVpbK{-WF_ge_fAb<%ChX_RQ zDKVbXFmX+{P}i|r-z@OA7VJ!S zeVaA-4kRKMKK#i9&%RLQOVC;fFHQ8^2|mM7E^xN1)0{V(W!O>7#5)h)Hs#B292ws}iAF66K43#e(<>oMP z?bD%l=!Bx_gIKm@BM!|VknM)!7>55qV0t}{bf}VFuBv7W^N>H9+|qRj(c~Jn2!#+e zrgpuqqY{TbN=d?=pd?{W7021)_^wiD3!4wv!oT6T*+oj>;JgSadqGK3d_$?<>H!1K zLFQ@T`MC(uz%Oq=C_;37a|c2+*A3hI2-rgesGRys;cAu#E46PS>#Dmba=UbMau?KD zkdaroEZ~A2fPT&_P`(p{p`buJ|0wr6)>w=@g@}G@_da!|eF&`+_d0GvG$uwwm&IGB zc@E9|R+^{#^g^{0re)404!p%$i|@gNl1dGKppF7e|w%wXL-_+K7( zvI-B93@>q1dGNgf=lP|Z#rD(v8L?>v2TL+VzqFBo!KiDfJ^FXh%&NcrWPc- z6m$2#(D~GAHuMB?KshS;2B8_E^NtFHX!^UqN2sraYH9g*Um)W2kfpy85=xp736jjW zNYI6R73*pxPb5g41o+l@}CwiZ1g#`3kR7s3*#et%MUxGBBlHB6^41Xw}gnsOIWNHBf^g3WJ1*j z6e!-HaT_?KCha@@U0p{iZ@h#^$6rT-Cw%Wuh*uODA6~B}*A!I;vCqMQD#Iy#yLt5u z7fF>TkzoX>zKdK=KJk|Eu@^)e>HjAClIPn1j_92{M~#TKB;=jU^mgOZ8)mZ2o# zM%Ga-A(4iNVzo*j6i+v2xhfYJ;37=&9Yk_9mu`hZwxTttwZ@PZq0R$Z=T+8?!^CEqlGF-XNd}1+TNJa@b>jv_aMzc6%|quYR)b& z-wA$)mp?aNdp(;=mjmI+^#y=4!UH|2c}1|-Pm*%gwm`^ru>ssgxSBfk3@5R~fzsC@ zR*GR?T@1#Mz7R%t3~5o8j)1TL)xRTn<7whE$BM+4`a+pSJnGPk+ode{1HMwUC)?jc z?-!*!wi_Xu4@EmfR%tLa^i%~PPx=-ah+@&fRcE-!(fBM>KKV@ysj*BS3`4Ls!{nI>e@=TLmxmlX>YjKTL_gOGhquX2hKc(EvJ--8Fh-GJj! zpzuN({?rNc=VD0XES!fSwNv^Igru4d!T@kJ32=gosGLsD&`BLEv|LB<9$+))mtArK zQGojqF8Miw`BLryl)@EeZjTtRX6GPR5=wY7uQPMCqV;8b7oQ_SjHfCNLt0;kSO^P9 zHB~JUt0^)GRZ}!-UEC|!#Tm|!g!QNoNeY5}Ne#bC%u`kee5ddRO{#n;wnCHYwG4U3 zew6lNO#aZ}e`i&_{Qr^FsrJ-AIgOar6`ebMT diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 562476f3f2..9de8372995 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -565,7 +565,7 @@ def test_bounce_averaged_drifts(): "gbdrift", ] - data = eq.compute(data_keys, grid=grid) + data = eq.compute(data_keys, grid=grid, override_grid=False) psib = data_eq["psi"][-1] @@ -616,8 +616,8 @@ def test_bounce_averaged_drifts(): cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 # Comparing coefficients with their analytical expressions - np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=1e1) - np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=1e1) + np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=5e-3) + np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=5e-3) # Values of pitch angle for which to evaluate the bounce averages pitch_res = 11 From 530bcbb318d5f03e703040c4d51f11604e100bad Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 11 Apr 2024 02:18:33 -0400 Subject: [PATCH 080/241] Change API for bounce integral to support integrating... locally defined functions. --- desc/compute/bounce_integral.py | 372 ++++++++++++++------------------ desc/equilibrium/coords.py | 71 +++--- desc/grid.py | 64 +----- tests/test_bounce_integral.py | 151 ++++++++----- 4 files changed, 298 insertions(+), 360 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 82f0e3c9d5..c2bfd5e1a3 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -378,11 +378,10 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), - sort=False, # don't need to sort - # False will double weight orbits with B_z = B_zz = 0 at bounce points. + # False to double weight orbits with |B|_z_ra = |B|_zz_ra = 0 at bounce points. distinct=True, ) - # Can detect at most degree of |B|_z spline extrema between each knot. + # Can detect at most degree of |B|_z_ra spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates (unsorted) extrema along a field line. B_extrema = poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) @@ -591,7 +590,7 @@ def _interp1d_vec_with_df( return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) -def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): +def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): """Compute bounce quadrature for every pitch along every field line. Parameters @@ -608,12 +607,26 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): Norm of magnetic field. B_z_ra : Array, shape(S, knots.size, ) Norm of magnetic field derivative with respect to field-line following label. + integrand : callable + This callable is the composition operator on the set of functions in ``f`` + that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. + It should accept the items in ``f`` as arguments as well as two additional + keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. + Note that any arrays backed into the callabe method should broadcast + with arrays of shape(X.shape). + f : list of Array, shape(P, S, knots.size, ) + Arguments to the callable ``integrand``. + These should be the functions in the integrand of the bounce integral + evaluated at the knots. The values will be interpolated to the quadrature + points. All items in the list should be two-dimensional. The first axis of + that item is interpreted as the batch axis, which enumerates the + evaluation of the function at particular pitch values. pitch : Array, shape(P, S) λ values. - f : Array, shape(P, S, knots.size, ) - Function to compute bounce integral of, evaluated at knots. - f_method : str - Method of interpolation for f. + method : str + Method of interpolation for functions contained in ``f``. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Returns ------- @@ -625,48 +638,47 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, pitch, f, f_method): assert w.ndim == knots.ndim == 1 assert X.shape == (pitch.shape[0], B.shape[0], X.shape[2], w.size) assert knots.size == B.shape[-1] - assert f.ndim == 3 and f.shape[0] == 1 or f.shape[0] == pitch.shape[0] - assert f.shape[1:] == B_sup_z.shape == B.shape == B_z_ra.shape + assert B_sup_z.shape == B.shape == B_z_ra.shape + for ff in f: + assert ff.ndim == 3 and ff.shape[0] == 1 or ff.shape[0] == pitch.shape[0] + assert ff.shape[1:] == B.shape # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. # Spline each function separately so that the singularity near the bounce # points can be captured more accurately than can be by any polynomial. shape = X.shape X = X.reshape(X.shape[0], X.shape[1], -1) - if f_method == "constant": - f = f[..., 0, jnp.newaxis, jnp.newaxis] - else: - f = _interp1d_vec(X, knots, f, method=f_method).reshape(shape) - # Use akima spline to suppress oscillation. - B_sup_z = _interp1d_vec(X, knots, B_sup_z, method="akima").reshape(shape) + f = [_interp1d_vec(X, knots, ff, method=method).reshape(shape) for ff in f] + B_sup_z = _interp1d_vec(X, knots, B_sup_z, method=method).reshape(shape) # Specify derivative at knots for ≈ cubic hermite interpolation. B = _interp1d_vec_with_df(X, knots, B, B_z_ra, method="cubic").reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] - inner_product = jnp.dot(f / (B_sup_z * jnp.sqrt(1 - pitch * B)), w) + inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch) / B_sup_z, w) return inner_product -def bounce_integral( +def bounce_integral_map( eq, - pitch=None, rho=jnp.linspace(1e-12, 1, 10), alpha=None, - zeta=jnp.linspace(0, 6 * jnp.pi, 20), + knots=jnp.linspace(0, 6 * jnp.pi, 20), quad=tanh_sinh_quad, + pitch=None, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. - The bounce integral is defined as F_ℓ(λ) = ∫ f(ℓ) / √(1 − λ |B|) dℓ, where + The bounce integral is defined as ∫ f(ℓ) dℓ, where dℓ parameterizes the distance along the field line, λ is a constant proportional to the magnetic moment over energy, |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, - and the endpoints of the integration are at the bounce points. + and the boundaries of the integral are bounce points, ζ₁, ζ₂, such that + (λ |B|)(ζᵢ) = 1. Physically, the pitch angle λ is the magnetic moment over the energy of particle. For a particle with fixed λ, bounce points are defined to be the location on the field line such that the particle's velocity parallel - to the magnetic field is zero, i.e. λ |B| = 1. + to the magnetic field is zero. The bounce integral is defined up to a sign. We choose the sign that corresponds the particle's guiding center trajectory @@ -676,41 +688,43 @@ def bounce_integral( ---------- eq : Equilibrium Equilibrium on which the bounce integral is computed. - pitch : Array, shape(P, S) - λ values to evaluate the bounce integral at each field line. - May be specified later. - Last axis enumerates the λ value for a particular field line parameterized - by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. rho : Array Unique flux surface label coordinates. alpha : Array Unique field line label coordinates over a constant rho surface. - zeta : Array - A spline of the integrand is computed at these values of the field - line following coordinate, for every field line in the meshgrid formed from + knots : Array + Field line following coordinate values at which to compute a spline + of the integrand, for every field line in the meshgrid formed from rho and alpha specified above. - The number of knots specifies the grid resolution as increasing the + The number of knots specifies a grid resolution as increasing the number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. quad : callable The quadrature scheme used to evaluate the integral. Should return quadrature points and weights when called. The returned points should be within the domain [-1, 1]. + pitch : Array, shape(P, S) + λ values to evaluate the bounce integral at each field line. + May be specified later. + Last axis enumerates the λ value for a particular field line parameterized + by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. kwargs : dict Can specify additional arguments to the quadrature function with kwargs. - Can also specify whether to return items with ``return_items=True``. + Can also specify whether to not return items with ``return_items=False``. Returns ------- - bi : callable - This callable method computes the bounce integral F_ℓ(λ) for every + bounce_integral : callable + This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. items : dict Dictionary of useful intermediate quantities. - grid : Grid + grid_fl : Grid + Clebsch-Type field-line coordinates grid. + grid_desc : Grid DESC coordinate grid for the given field line coordinates. data : dict Dictionary of Arrays of stuff evaluated on ``grid``. @@ -729,56 +743,115 @@ def bounce_integral( Examples -------- + Suppose we want to compute a bounce average of the function + f(ℓ) = (1 − λ |B|) * g_zz, where g_zz is the squared norm of the + toroidal basis vector on some set of field lines specified by (ρ, α) + coordinates. This is defined as + [∫ f(ℓ) / √(1 − λ |B|) dℓ] / [∫ 1 / √(1 − λ |B|) dℓ] + + .. code-block:: python + def integrand_num(g_zz, B, pitch): + # Integrand in integral in numerator of bounce average. + f = (1 - pitch * B) * g_zz # something arbitrary + g = jnp.sqrt(1 - pitch * B) # typical to have this in denominator + return safediv(f, g, fill=jnp.nan) + + def integrand_den(B, pitch): + # Integrand in integral in denominator of bounce average. + g = jnp.nan(1 - pitch * B) # typical to have this in denominator + return safediv(1, g, fill=jnp.nan) + + eq = get("HELIOTRON") rho = jnp.linspace(1e-12, 1, 6) alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) - bi, items = bounce_integral(eq, rho=rho, alpha=alpha, return_items=True) - name = "g_zz" - f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - B = items["data"]["B"].reshape(rho.size * alpha.size, -1) - pitch_res = 30 - pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res) - result = bi(f, pitch).reshape(pitch_res, rho.size, alpha.size, -1) + knots = jnp.linspace(0, 6 * jnp.pi, 20) + + bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) + + g_zz = eq.compute("g_zz", grid=items["grid_desc"], data=items["data"])["g_zz"] + pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) + num = bounce_integral(integrand_num, g_zz, pitch) + den = bounce_integral(integrand_den, [], pitch) + average = num / den + assert jnp.isfinite(average).any() + + # Now we can group the data by field line. + average = average.reshape(pitch.shape[0], rho.size, alpha.size, -1) + # The bounce averages stored at index i, j + i, j = 0, 0 + print(average[:, i, j]) + # are the bounce averages along the field line with nodes + nodes = items["grid_fl"].nodes.reshape(rho.size, alpha.size, -1, 3) + print(nodes[i, j]) + # for the pitch values stored in + pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) + print(pitch[:, i, j]) + # Some of these bounce averages will evaluate as nan. + # You should filter out these nan values when computing stuff. + average_sum_per_field_lines = jnp.nansum(average, axis=-1) + print(average_sum_per_field_lines) + assert not jnp.allclose(average_sum_per_field_lines, 0) """ check = kwargs.pop("check", False) - return_items = kwargs.pop("return_items", False) + return_items = kwargs.pop("return_items", True) if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) - zeta = jnp.atleast_1d(zeta) + knots = jnp.atleast_1d(knots) + # number of field lines or splines S = rho.size * alpha.size - grid, data = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid, data=data) - B_sup_z = data["B^zeta"].reshape(S, -1) - B = data["|B|"].reshape(S, -1) - B_z_ra = data["|B|_z|r,a"].reshape(S, -1) + grid_fl, grid_desc, data = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc, data=data) + B_sup_z = data["B^zeta"].reshape(S, knots.size) + B = data["|B|"].reshape(S, knots.size) + B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) B_c = jnp.moveaxis( - CubicHermiteSpline(zeta, B, B_z_ra, axis=-1, check=check).c, 1, -1 + CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c, + source=1, + destination=-1, ) + assert B_c.shape == (4, S, knots.size - 1) B_z_ra_c = poly_der(B_c) - assert B_c.shape == (4, S, zeta.size - 1) - assert B_z_ra_c.shape == (3, S, zeta.size - 1) + assert B_z_ra_c.shape == (3, S, knots.size - 1) x, w = quad(**kwargs) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = jnp.arcsin(x) / jnp.pi - 0.5 - original = _compute_bp_if_given_pitch(zeta, B_c, B_z_ra_c, pitch, check, err=False) + original = _compute_bp_if_given_pitch(knots, B_c, B_z_ra_c, pitch, check, err=False) - def _bounce_integral(f, pitch=None, f_method="akima"): - """Compute the bounce integral of ``f``. + def _group_grid_data_by_field_line(f): + assert f.ndim <= 2, "See the docstring below." + return f.reshape(-1, S, knots.size) + + def bounce_integral(integrand, f, pitch=None, method="akima"): + """Bounce integrate ∫ f(ℓ) dℓ. Parameters ---------- - f : Array, shape(P, items["grid"].num_nodes, ) - Quantity to compute the bounce integral of. - If two-dimensional, the first axis is interpreted as the batch axis, - which enumerates the evaluation of some function at particular pitch - values. + integrand : callable + This callable is the composition operator on the set of functions in ``f`` + that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. + It should accept the items in ``f`` as arguments as well as two additional + keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. + Note that any arrays backed into the callabe method should broadcast + with arrays of shape(P, S, (knots.size - 1) * 3, w.size) where + P is the batch axis size of pitch, + S is the number of field lines given by rho.size * alpha.size, + and w.size is the number of quadrature points (by default 7). + f : list of Array, shape(P, items["grid"].num_nodes, ) + Arguments to the callable ``integrand``. + These should be the functions in the integrand of the bounce integral + evaluated at the knots. The values will be interpolated to the quadrature + points. If an item in the list is two-dimensional, the first axis of + that item is interpreted as the batch axis, which enumerates the + evaluation of the function at particular pitch values. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. If None, uses the values given to the parent function. @@ -787,8 +860,10 @@ def _bounce_integral(f, pitch=None, f_method="akima"): where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. - f_method : str, optional - Method of interpolation for f. + method : str + Method of interpolation for functions contained in ``f``. + Defaults to akima spline to suppress oscillation. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Returns ------- @@ -799,163 +874,28 @@ def _bounce_integral(f, pitch=None, f_method="akima"): """ bp1, bp2, pitch = _compute_bp_if_given_pitch( - zeta, B_c, B_z_ra_c, pitch, check, *original, err=True + knots, B_c, B_z_ra_c, pitch, check, *original, err=True ) X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] + if not isinstance(f, (list, tuple)): + f = [f] + f = tuple(map(_group_grid_data_by_field_line, f)) result = ( - _bounce_quad( - X=X, - w=w, - knots=zeta, - B_sup_z=B_sup_z, - B=B, - B_z_ra=B_z_ra, - pitch=pitch, - f=f.reshape(-1, S, zeta.size), - f_method=f_method, - ) + _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method) / (bp2 - bp1) * jnp.pi ) - assert result.shape == (pitch.shape[0], S, (zeta.size - 1) * 3) + assert result.shape == (pitch.shape[0], S, (knots.size - 1) * 3) return result if return_items: - items = {"grid": grid, "data": data, "B.c": B_c, "B_z_ra.c": B_z_ra_c} - return _bounce_integral, items - else: - return _bounce_integral - - -def bounce_average( - eq, - pitch=None, - rho=jnp.linspace(1e-12, 1, 10), - alpha=None, - zeta=jnp.linspace(0, 6 * jnp.pi, 20), - quad=tanh_sinh_quad, - **kwargs, -): - """Returns a method to compute the bounce average of any quantity. - - The bounce average is defined as - F_ℓ(λ) = (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ), where - dℓ parameterizes the distance along the field line, - λ is a constant proportional to the magnetic moment over energy, - |B| is the norm of the magnetic field, - f(ℓ) is the quantity to integrate along the field line, - and the endpoints of the integration are at the bounce points. - Physically, the pitch angle λ is the magnetic moment over the energy - of particle. For a particle with fixed λ, bounce points are defined to be - the location on the field line such that the particle's velocity parallel - to the magnetic field is zero, i.e. λ |B| = 1. - - Parameters - ---------- - eq : Equilibrium - Equilibrium on which the bounce average is computed. - pitch : Array, shape(P, S) - λ values to evaluate the bounce integral at each field line. - May be specified later. - Last axis enumerates the λ value for a particular field line parameterized - by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. - rho : Array - Unique flux surface label coordinates. - alpha : Array - Unique field line label coordinates over a constant rho surface. - zeta : Array - A spline of the integrand is computed at these values of the field - line following coordinate, for every field line in the meshgrid formed from - rho and alpha specified above. - The number of knots specifies the grid resolution as increasing the - number of knots increases the accuracy of representing the integrand - and the accuracy of the locations of the bounce points. - quad : callable - The quadrature scheme used to evaluate the integral. - Should return quadrature points and weights when called. - The returned points should be within the domain [-1, 1]. - kwargs : dict - Can specify additional arguments to the quadrature function with kwargs. - Can also specify whether to return items with ``return_items=True``. - - Returns - ------- - ba : callable - This callable method computes the bounce average F_ℓ(λ) for every - specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. - items : dict - Dictionary of useful intermediate quantities. - grid : Grid - DESC coordinate grid for the given field line coordinates. - data : dict - Dictionary of Arrays of stuff evaluated on ``grid``. - B.c : Array, shape(4, S, zeta.size - 1) - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - B_z_ra.c : Array, shape(3, S, zeta.size - 1) - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - - Examples - -------- - .. code-block:: python - - rho = jnp.linspace(1e-12, 1, 6) - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) - ba, items = bounce_average(eq, rho=rho, alpha=alpha, return_items=True) - name = "g_zz" - f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - B = items["data"]["B"].reshape(rho.size * alpha.size, -1) - pitch_res = 30 - pitch = jnp.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res) - result = ba(f, pitch).reshape(pitch_res, rho.size, alpha.size, -1) - - """ - - def _bounce_average(f, pitch=None, f_method="akima"): - """Compute the bounce average of ``f``. - - Parameters - ---------- - f : Array, shape(P, items["grid"].num_nodes, ) - Quantity to compute the bounce average of. - If two-dimensional, the first axis is interpreted as the batch axis, - which enumerates the evaluation of some function at particular pitch - values. - pitch : Array, shape(P, S) - λ values to evaluate the bounce average at each field line. - If None, uses the values given to the parent function. - Last axis enumerates the λ value for a particular field line parameterized - by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. - f_method : str, optional - Method of interpolation for f. - - - Returns - ------- - result : Array, shape(P, S, (zeta.size - 1) * 3) - First axis enumerates pitch values. - Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. - - """ - return bi(f, pitch, f_method) / bi(jnp.ones_like(f), pitch, "constant") - - bi = bounce_integral(eq, pitch, rho, alpha, zeta, quad, **kwargs) - if kwargs.get("return_items"): - bi, items = bi - return _bounce_average, items + items = { + "grid_fl": grid_fl, + "grid_desc": grid_desc, + "data": data, + "B.c": B_c, + "B_z_ra.c": B_z_ra_c, + } + return bounce_integral, items else: - return _bounce_average + return bounce_integral diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 94b93bf6f3..b5b798fd64 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -14,7 +14,6 @@ Grid, LinearGrid, QuadratureGrid, - meshgrid_expand, meshgrid_inverse_idx, meshgrid_unique_idx, ) @@ -328,25 +327,44 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): Returns ------- + grid_fl : Grid + Clebsch-Type field-line coordinates grid. grid_desc : Grid - DESC coordinate grid. - data_desc : dict + DESC coordinate grid for the given field line coordinates. + data : dict Some flux surface quantities that may be more accurate than what can be computed on the returned grid. """ + + def unique_idx(a_size, b_size, c_size): + labels = ["rho", "theta", "zeta"] + return { + f"_unique_{label}_idx": idx + for label, idx in zip(labels, meshgrid_unique_idx(a_size, b_size, c_size)) + } + + def inverse_idx(a_size, b_size, c_size): + labels = ["rho", "theta", "zeta"] + return { + f"_inverse_{label}_idx": idx + for label, idx in zip(labels, meshgrid_inverse_idx(a_size, b_size, c_size)) + } + + r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) + grid_fl = Grid( + nodes=jnp.column_stack([r, a, z_fl]), + sort=False, + jitable=True, + **unique_idx(rho.size, alpha.size, zeta.size), + **inverse_idx(rho.size, alpha.size, zeta.size), + ) # The rotational transform can be computed apriori to the coordinate # transformation because it is a single variable function of the flux surface # label rho, and the coordinate mapping does not change rho. Once it is known, # we can compute the straight field-line poloidal angle theta_PEST from the - # field-line label alpha. - # Then we transform from straight field-line coordinates to DESC coordinates - # with the root-finding method ``compute_theta_coords``. - # This is preferable to transforming from field-line coordinates to DESC - # coordinates directly with the more general root-finding method - # ``map_coordinates``. That method requires an initial guess, and generating - # a reasonable initial guess requires computing the rotational transform to - # approximate theta_PEST and the poloidal stream function anyway. + # field-line label alpha. Then we transform from straight field-line coordinates + # to DESC coordinates with the root-finding method ``compute_theta_coords``. # Choose nodes such that even spacing will yield correct flux surface integrals. t = jnp.linspace(0, 2 * jnp.pi, 2 * eq.M_grid + 1, endpoint=False) @@ -357,17 +375,13 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): spacing = jnp.ones(rho.size * t.size * z.size)[:, jnp.newaxis] * jnp.array( [1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size] ) - labels = ["rho", "theta", "zeta"] - unique_idx = { - f"_unique_{label}_idx": idx - for label, idx in zip(labels, meshgrid_unique_idx(rho.size, t.size, z.size)) - } - inverse_idx = { - f"_inverse_{label}_idx": idx - for label, idx in zip(labels, meshgrid_inverse_idx(rho.size, t.size, z.size)) - } - grid = Grid( - nodes, spacing=spacing, sort=False, jitable=True, **unique_idx, **inverse_idx + grid_iota = Grid( + nodes, + spacing=spacing, + sort=False, + jitable=True, + **unique_idx(rho.size, t.size, z.size), + **inverse_idx(rho.size, t.size, z.size), ) # We only need to compute the rotational transform to transform to straight # field-line coordinates. However, it is a good idea to compute other flux @@ -375,16 +389,15 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): # to the given field line coordinates may not be uniformly distributed over # flux surfaces. This would make quadratures performed over flux surfaces # on the returned DESC grid inaccurate. - data = eq.compute(names=["iota", "iota_r"], grid=grid) - data_desc = { - d: meshgrid_expand(grid.compress(data[d]), rho.size, alpha.size, zeta.size) - for d in data + data_iota = eq.compute(names=["iota", "iota_r"], grid=grid_iota) + data = { + d: grid_fl.expand(grid_iota.compress(data_iota[d])) + for d in data_iota if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] == "r" } - r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) # don't modulo field line zeta by 2pi - coords_sfl = jnp.column_stack([r, a + data_desc["iota"] * z_fl, z_fl]) + coords_sfl = jnp.column_stack([r, a + data["iota"] * z_fl, z_fl]) coords_desc = eq.compute_theta_coords(coords_sfl) grid_desc = Grid( nodes=coords_desc, @@ -393,7 +406,7 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): _unique_rho_idx=meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0], _inverse_rho_idx=meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0], ) - return grid_desc, data_desc + return grid_fl, grid_desc, data def is_nested(eq, grid=None, R_lmn=None, Z_lmn=None, L_lmn=None, msg=None): diff --git a/desc/grid.py b/desc/grid.py index 08b763be61..3129f258a7 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1572,57 +1572,6 @@ def find_least_rational_surfaces( return rho, io -def meshgrid_expand(x, a_size, b_size, c_size, order=0): - """Expand ``x`` by duplicating elements to match a meshgrid pattern. - - It is common to construct a meshgrid in the following manner. - .. code-block:: python - - a, b, c = jnp.meshgrid(a, b, c, indexing="ij") - a, b, c = map(jnp.ravel, (a, b, c)) - nodes = jnp.column_stack([a, b, c]) - grid = Grid(nodes, sort=False, jitable=True) - - Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` - is not computed, which is needed for the method ``grid.expand(x)``. - On such grids, this method should be used instead. - - Parameters - ---------- - x : ndarray - Stores the values of a surface function (constant over a surface) - for all unique surfaces of the specified label on the grid. - The length of ``x`` should match the number of unique surfaces of - the corresponding label in this grid. - a_size : int - Size of the first argument to meshgrid. - b_size : int - Size of the second argument to meshgrid. - c_size : int - Size of the third argument to meshgrid. - order : int - 0, 1, or 2. Corresponds to whether ``x`` is a surface function - of a, b, or c in the example code in the docstring. - - Returns - ------- - expand_x : ndarray - ``x`` expanded to match the meshgrid pattern. - - """ - order = int(order) - assert 0 <= order <= 2 - if order == 0: - assert len(x) == a_size - return repeat(x, b_size * c_size, total_repeat_length=a_size * b_size * c_size) - if order == 1: - assert len(x) == b_size - return jnp.tile(repeat(x, c_size, total_repeat_length=b_size * c_size), a_size) - if order == 2: - assert len(x) == c_size - return jnp.tile(x, a_size * b_size) - - def meshgrid_inverse_idx(a_size, b_size, c_size): """Return inverse indices for meshgrid pattern. @@ -1646,8 +1595,6 @@ def meshgrid_inverse_idx(a_size, b_size, c_size): Size of the second argument to meshgrid. c_size : int Size of the third argument to meshgrid. - order : int - 0, 1, or 2. Whether to retrieve inverse indices for label a, b, or c. Returns ------- @@ -1655,16 +1602,15 @@ def meshgrid_inverse_idx(a_size, b_size, c_size): The inverse indices. """ - a = jnp.arange(a_size) inverse_a_idx = repeat( - a, b_size * c_size, total_repeat_length=a_size * b_size * c_size + jnp.arange(a_size), + b_size * c_size, + total_repeat_length=a_size * b_size * c_size, ) - b = jnp.arange(b_size) inverse_b_idx = jnp.tile( - repeat(b, c_size, total_repeat_length=b_size * c_size), a_size + repeat(jnp.arange(b_size), c_size, total_repeat_length=b_size * c_size), a_size ) - c = jnp.arange(c_size) - inverse_c_idx = jnp.tile(c, a_size * b_size) + inverse_c_idx = jnp.tile(jnp.arange(c_size), a_size * b_size) return inverse_a_idx, inverse_b_idx, inverse_c_idx diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 9de8372995..c9c32b9f46 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -1,6 +1,7 @@ """Test bounce integral methods.""" import inspect +from functools import partial import numpy as np import pytest @@ -11,10 +12,9 @@ from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipe, ellipk -from desc.backend import flatnonzero, fori_loop, put, root_scalar +from desc.backend import complex_sqrt, flatnonzero, fori_loop, put, root_scalar from desc.compute.bounce_integral import ( - bounce_average, - bounce_integral, + bounce_integral_map, bounce_points, pitch_of_extrema, poly_der, @@ -23,7 +23,7 @@ poly_val, take_mask, ) -from desc.compute.utils import dot +from desc.compute.utils import dot, safediv from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium from desc.equilibrium.coords import desc_grid_from_field_line_coords @@ -40,7 +40,7 @@ from desc.profiles import PowerSeriesProfile -@np.vectorize(signature="(m)->()") +@partial(np.vectorize, signature="(m)->()") def _last_value(a): """Return the last non-nan value in ``a``.""" a = np.ravel(a)[::-1] @@ -104,9 +104,9 @@ def test_reshape_convention(): err_msg = "The ordering conventions are required for correctness." assert "P, S, N" in inspect.getsource(bounce_points), err_msg - src = inspect.getsource(bounce_integral) + src = inspect.getsource(bounce_integral_map) assert "S, zeta.size" in src, err_msg - assert "pitch_res, rho.size, alpha.size" in src, err_msg + assert "pitch.shape[0], rho.size, alpha.size" in src, err_msg src = inspect.getsource(desc_grid_from_field_line_coords) assert 'indexing="ij"' in src, err_msg assert 'meshgrid(rho, alpha, zeta, indexing="ij")' in src, err_msg @@ -402,28 +402,63 @@ def test_extrema_first_and_before_bp2(plot=False): @pytest.mark.unit -def test_pitch_and_hairy_ball(): - """Test different ways of specifying pitch and ensure B does not vanish.""" +def test_example_code_and_hairy_ball(): + """Test example code in bounce_integral docstring and ensure B does not vanish.""" + + def integrand_num(g_zz, B, pitch): + """Integrand in integral in numerator of bounce average.""" + f = (1 - pitch * B) * g_zz # something arbitrary + # When 1 - pitch * B is negative, we want g to evaluate as nan. + # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. + g = complex_sqrt(1 - pitch * B) # typical to have this in denominator + g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) + return safediv(f, g, fill=np.nan) + + def integrand_den(B, pitch): + """Integrand in integral in denominator of bounce average.""" + # When 1 - pitch * B is negative, we want g to evaluate as nan. + # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. + g = complex_sqrt(1 - pitch * B) # typical to have this in denominator + g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) + return safediv(1, g, fill=np.nan) + eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - zeta = np.linspace(0, 6 * np.pi, 20) - ba, items = bounce_average(eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True) + knots = np.linspace(0, 6 * np.pi, 20) + + bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) + + # start hairy ball test + B = eq.compute("B", grid=items["grid_desc"], data=items["data"])["B"] + assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." B = items["data"]["B"] assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." - - name = "g_zz" - f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - # specify pitch per field line - pitch_res = 30 - B = B.reshape(rho.size * alpha.size, -1) - pitch = np.linspace(1 / B.max(axis=-1), 1 / B.min(axis=-1), pitch_res) - result = ba(f, pitch) - assert np.isfinite(result).any() - # specify pitch from extrema of |B| - pitch = pitch_of_extrema(zeta, items["B.c"], items["B_z_ra.c"]) - result = ba(f, pitch) - assert np.isfinite(result).any() + # end hairy ball test + + g_zz = eq.compute("g_zz", grid=items["grid_desc"], data=items["data"])["g_zz"] + pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) + num = bounce_integral(integrand_num, g_zz, pitch) + den = bounce_integral(integrand_den, [], pitch) + average = num / den + assert np.isfinite(average).any() + + # Now we can group the data by field line. + average = average.reshape(pitch.shape[0], rho.size, alpha.size, -1) + # The bounce averages stored at index i, j + i, j = 0, 0 + print(average[:, i, j]) + # are the bounce averages along the field line with nodes + nodes = items["grid_fl"].nodes.reshape(rho.size, alpha.size, -1, 3) + print(nodes[i, j]) + # for the pitch values stored in + pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) + print(pitch[:, i, j]) + # Some of these bounce averages will evaluate as nan. + # You should filter out these nan values when computing stuff. + average_sum_per_field_lines = np.nansum(average, axis=-1) + print(average_sum_per_field_lines) + assert not np.allclose(average_sum_per_field_lines, 0) # @pytest.mark.unit @@ -445,7 +480,7 @@ def test_elliptic_integral_limit(): (and not whether the bounce points were accurate). """ - assert False + assert False, "Test not finished yet." L, M, N, NFP, sym = 6, 6, 6, 1, True surface = FourierRZToroidalSurface( R_lmn=[1.0, 0.1], @@ -485,20 +520,11 @@ def beta(grid, data): rho = np.array([0.5]) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) - zeta = np.linspace(0, 6 * np.pi, 20) - bi, items = bounce_integral( - eq, rho=rho, alpha=alpha, zeta=zeta, return_items=True, check=True - ) - B = items["data"]["B"] - pitch_res = 15 - pitch = np.linspace(1 / B.max(), 1 / B.min(), pitch_res) - name = "g_zz" - f = eq.compute(name, grid=items["grid"], data=items["data"])[name] - result = bi(f, pitch) - assert np.isfinite(result).any(), "tanh_sinh quadrature failed." - + knots = np.linspace(0, 6 * np.pi, 20) # TODO now compare result to elliptic integral - bp1, bp2 = bounce_points(pitch, zeta, items["B.c"], items["B_z_ra.c"]) + bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots, check=True) + pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) + bp1, bp2 = bounce_points(knots, items["B.c"], items["B_z_ra.c"], pitch) @pytest.mark.unit @@ -542,16 +568,11 @@ def test_bounce_averaged_drifts(): # Response: Currently the API is such that the method does all the # above preprocessing for you. Let's test it for correctness # first then do this later. - bi, items = bounce_integral( - eq, - rho=np.unique(coords1[:, 0]), - alpha=alpha, - zeta=zeta, - return_items=True, - check=True, + bounce_integral, items = bounce_integral_map( + eq, rho=np.unique(coords1[:, 0]), alpha=alpha, knots=zeta, check=True ) - grid = items["grid"] - grid._unique_zeta_idx = np.unique(grid.nodes[:, 2], return_index=True)[1] + grid = items["grid_desc"] + # grid._unique_zeta_idx = np.unique(grid.nodes[:, 2], return_index=True)[1] # noqa: E800, E501 data_keys = [ "|grad(psi)|^2", @@ -565,6 +586,7 @@ def test_bounce_averaged_drifts(): "gbdrift", ] + # override_grid is required for test to pass data = eq.compute(data_keys, grid=grid, override_grid=False) psib = data_eq["psi"][-1] @@ -633,15 +655,32 @@ def test_bounce_averaged_drifts(): + dPdrho / bmag**2 * ellipe(k2) ) - with pytest.warns(RuntimeWarning): - # The quantities are already calculated along a field line - bavg_drift_num = bi( - np.sqrt(1 - pitch * bmag) * 0.5 * cvdrift - + gbdrift * 1 / np.sqrt(1 - pitch * bmag) - + dPdrho / bmag**2 * np.sqrt(1 - pitch * bmag), - pitch, - ) - # might need to use _filter_not_nan function from top. + def integrand(B, pitch): + # The quantities cvdrift, gbdrift, and dPdrho are already calculated + # along a field line. These are constants baked into this function + # and will not change value. + # The arguments to this function, B and pitch will be interpolated + # onto the quadrature points before these quantities are evaluated. + + # When 1 - pitch * B is negative, we want g to evaluate as nan. + # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. + g = complex_sqrt(1 - pitch * B) + g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) + # just need to fix brodcasting of these to items["grid_desc"], + # maybe will use grid.copy_from_other method. Or recalculate + # these quantities along field line, see test_example_code_and_hairy_ball. + return g * 0.5 * cvdrift + gbdrift / g + dPdrho / B**2 * g + + # the integrand doesn't have any additional arguments besides B and pitch + # since gbdrift etc. are baked into the integrand function, so we pass + # an empty list. + additional_things_to_interpolate_onto_quadrature_points_besides_B_and_pitch = [] + bavg_drift_num = bounce_integral( + integrand, + additional_things_to_interpolate_onto_quadrature_points_besides_B_and_pitch, + pitch, + ) + print(bavg_drift_num) np.testing.assert_allclose(bavg_drift_num, bavg_drift_an, atol=2e-2, rtol=1e-2) From f2a6107ad41c7d750332d6d606ec739d17e68010 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 11 Apr 2024 02:27:38 -0400 Subject: [PATCH 081/241] Simplify shape broadc in docstring --- desc/compute/bounce_integral.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index c2bfd5e1a3..54e7707edf 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -613,8 +613,8 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): It should accept the items in ``f`` as arguments as well as two additional keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. - Note that any arrays backed into the callabe method should broadcast - with arrays of shape(X.shape). + Note that any arrays baked into the callable method should broadcast + with arrays of shape(P, S, 1, 1). f : list of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral @@ -840,11 +840,10 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): It should accept the items in ``f`` as arguments as well as two additional keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. - Note that any arrays backed into the callabe method should broadcast - with arrays of shape(P, S, (knots.size - 1) * 3, w.size) where + Note that any arrays baked into the callable method should broadcast + with arrays of shape(P, S, 1, 1) where P is the batch axis size of pitch, - S is the number of field lines given by rho.size * alpha.size, - and w.size is the number of quadrature points (by default 7). + S is the number of field lines given by rho.size * alpha.size. f : list of Array, shape(P, items["grid"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral From 88efe51c0787e2bc23de84fecf38fd3d716ed37b Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 11 Apr 2024 13:05:06 -0400 Subject: [PATCH 082/241] Make sure tests_bounce_average_drifts test runs with new API change --- desc/compute/bounce_integral.py | 18 ++++--- tests/test_bounce_integral.py | 92 +++++++++++++++------------------ 2 files changed, 51 insertions(+), 59 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 54e7707edf..5341492cc1 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -618,7 +618,8 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): f : list of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral - evaluated at the knots. The values will be interpolated to the quadrature + evaluated at the knots. They should be computed on the returned desc + coordinate grid. The values will be interpolated to the quadrature points. All items in the list should be two-dimensional. The first axis of that item is interpreted as the batch axis, which enumerates the evaluation of the function at particular pitch values. @@ -711,7 +712,7 @@ def bounce_integral_map( where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. - kwargs : dict + kwargs Can specify additional arguments to the quadrature function with kwargs. Can also specify whether to not return items with ``return_items=False``. @@ -760,7 +761,7 @@ def integrand_num(g_zz, B, pitch): def integrand_den(B, pitch): # Integrand in integral in denominator of bounce average. - g = jnp.nan(1 - pitch * B) # typical to have this in denominator + g = jnp.sqrt(1 - pitch * B) # typical to have this in denominator return safediv(1, g, fill=jnp.nan) eq = get("HELIOTRON") @@ -790,9 +791,9 @@ def integrand_den(B, pitch): print(pitch[:, i, j]) # Some of these bounce averages will evaluate as nan. # You should filter out these nan values when computing stuff. - average_sum_per_field_lines = jnp.nansum(average, axis=-1) - print(average_sum_per_field_lines) - assert not jnp.allclose(average_sum_per_field_lines, 0) + average_sum_over_field_line = jnp.nansum(average, axis=-1) + print(average_sum_over_field_line) + assert not jnp.allclose(average_sum_over_field_line, 0) """ check = kwargs.pop("check", False) @@ -844,10 +845,11 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): with arrays of shape(P, S, 1, 1) where P is the batch axis size of pitch, S is the number of field lines given by rho.size * alpha.size. - f : list of Array, shape(P, items["grid"].num_nodes, ) + f : list of Array, shape(P, items["grid_desc"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral - evaluated at the knots. The values will be interpolated to the quadrature + evaluated at the knots. They should be computed on the returned desc + coordinate grid. The values will be interpolated to the quadrature points. If an item in the list is two-dimensional, the first axis of that item is interpreted as the batch axis, which enumerates the evaluation of the function at particular pitch values. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index c9c32b9f46..9caacef918 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -29,7 +29,6 @@ from desc.equilibrium.coords import desc_grid_from_field_line_coords from desc.examples import get from desc.geometry import FourierRZToroidalSurface -from desc.grid import Grid from desc.objectives import ( ObjectiveFromUser, ObjectiveFunction, @@ -456,9 +455,9 @@ def integrand_den(B, pitch): print(pitch[:, i, j]) # Some of these bounce averages will evaluate as nan. # You should filter out these nan values when computing stuff. - average_sum_per_field_lines = np.nansum(average, axis=-1) - print(average_sum_per_field_lines) - assert not np.allclose(average_sum_per_field_lines, 0) + average_sum_over_field_line = np.nansum(average, axis=-1) + print(average_sum_over_field_line) + assert not np.allclose(average_sum_over_field_line, 0) # @pytest.mark.unit @@ -540,40 +539,32 @@ def test_bounce_averaged_drifts(): """ eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi = 0.25 # rho^2 (or normalized psi) - alpha = 0 + psi = 0.25 # normalized psi + rho = np.sqrt(psi) + alpha = np.array([0]) + data_eq = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) - eq_keys = ["iota", "iota_r", "a", "rho", "psi"] - - data_eq = eq.compute(eq_keys) - - iotas = np.interp(np.sqrt(psi), data_eq["rho"], data_eq["iota"]) - shears = np.interp(np.sqrt(psi), data_eq["rho"], data_eq["iota_r"]) + iotas = np.interp(rho, data_eq["rho"], data_eq["iota"]) + shears = np.interp(rho, data_eq["rho"], data_eq["iota_r"]) N = int((2 * eq.M_grid) * 4 + 1) - zeta = np.linspace(-1.0 * np.pi / iotas, 1.0 * np.pi / iotas, N) theta_PEST = alpha * np.ones(N, dtype=int) + iotas * zeta + # Creating a grid along a field line coords1 = np.zeros((N, 3)) coords1[:, 0] = np.sqrt(psi) * np.ones(N, dtype=int) coords1[:, 1] = theta_PEST coords1[:, 2] = zeta - - # Creating a grid along a field line - c1 = eq.compute_theta_coords(coords1) - grid = Grid(c1, sort=False) + # c1 = eq.compute_theta_coords(coords1) # noqa: E800 + # grid = Grid(c1, sort=False) # noqa: E800 # TODO: Request: The bounce integral operator should be able to take a grid. # Response: Currently the API is such that the method does all the # above preprocessing for you. Let's test it for correctness # first then do this later. - bounce_integral, items = bounce_integral_map( - eq, rho=np.unique(coords1[:, 0]), alpha=alpha, knots=zeta, check=True - ) + bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots=zeta, check=True) grid = items["grid_desc"] - # grid._unique_zeta_idx = np.unique(grid.nodes[:, 2], return_index=True)[1] # noqa: E800, E501 - data_keys = [ "|grad(psi)|^2", "grad(psi)", @@ -585,10 +576,12 @@ def test_bounce_averaged_drifts(): "cvdrift", "gbdrift", ] - - # override_grid is required for test to pass + # FIXME (outside scope of the bounce integral pull request): + # override_grid should not be required for the test to pass. data = eq.compute(data_keys, grid=grid, override_grid=False) - + # If this is the toroidal flux at the boundary, the [-1] retrieval + # assumes that the grid which computed the data_eq["psi"] happens to have + # its last node at the last closed flux surface. psib = data_eq["psi"][-1] # signs @@ -642,10 +635,8 @@ def test_bounce_averaged_drifts(): np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=5e-3) # Values of pitch angle for which to evaluate the bounce averages - pitch_res = 11 - pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), pitch_res).reshape( - pitch_res, -1 - ) + pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) + pitch = pitch.reshape(pitch.shape[0], -1) k2 = 0.5 * ((1 - pitch * B0) / epsilon + 1) @@ -655,34 +646,33 @@ def test_bounce_averaged_drifts(): + dPdrho / bmag**2 * ellipe(k2) ) - def integrand(B, pitch): - # The quantities cvdrift, gbdrift, and dPdrho are already calculated - # along a field line. These are constants baked into this function - # and will not change value. - # The arguments to this function, B and pitch will be interpolated + def integrand(cvdrift, gbdrift, B, pitch): + # The arguments to this function will be interpolated # onto the quadrature points before these quantities are evaluated. - + bmag = B / Bref # When 1 - pitch * B is negative, we want g to evaluate as nan. # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. - g = complex_sqrt(1 - pitch * B) + g = complex_sqrt(1 - pitch * bmag) g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) - # just need to fix brodcasting of these to items["grid_desc"], - # maybe will use grid.copy_from_other method. Or recalculate - # these quantities along field line, see test_example_code_and_hairy_ball. - return g * 0.5 * cvdrift + gbdrift / g + dPdrho / B**2 * g - - # the integrand doesn't have any additional arguments besides B and pitch - # since gbdrift etc. are baked into the integrand function, so we pass - # an empty list. - additional_things_to_interpolate_onto_quadrature_points_besides_B_and_pitch = [] + return (g * 0.5 * cvdrift) + (gbdrift / g) + (dPdrho / bmag**2 * g) + bavg_drift_num = bounce_integral( - integrand, - additional_things_to_interpolate_onto_quadrature_points_besides_B_and_pitch, - pitch, + integrand=integrand, + # additional things to interpolate onto quadrature points besides B and pitch + f=[cvdrift, gbdrift], + pitch=pitch, ) - print(bavg_drift_num) - - np.testing.assert_allclose(bavg_drift_num, bavg_drift_an, atol=2e-2, rtol=1e-2) + assert np.isfinite(bavg_drift_num).any(), "Quadrature failed." + # there's only one field line on the grid, so squeeze out that axis + bavg_drift_num = np.squeeze(bavg_drift_num, axis=1) + for i in range(pitch.shape[0]): + np.testing.assert_allclose( + _filter_not_nan(bavg_drift_num[i]), + bavg_drift_an[i], + atol=2e-2, + rtol=1e-2, + err_msg=f"Failed on index {i} for pitch {pitch[i]}", + ) # TODO: if deemed useful finish details using methods in desc.compute.bounce_integral From e8b360a0441d9056ea750fa81c489c9ca4bd0e32 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 11 Apr 2024 19:01:07 -0400 Subject: [PATCH 083/241] Fix bug in test where bounce points should be computed from normalized B values --- desc/compute/bounce_integral.py | 22 +++--- tests/test_bounce_integral.py | 117 ++++++++++++++++---------------- 2 files changed, 70 insertions(+), 69 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 5341492cc1..b6fe83d4ce 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -165,7 +165,7 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): # Compute from analytic formula. r = func[c.shape[0]](*c[:-1], c[-1] - k, distinct) if keep_only_real: - r = tuple(map(partial(_filter_real, a_min=a_min, a_max=a_max), r)) + r = [_filter_real(rr, a_min, a_max) for rr in r] r = jnp.stack(r, axis=-1) # We had ignored the case of double complex roots. distinct = distinct and c.shape[0] > 3 and not keep_only_real @@ -615,7 +615,7 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. Note that any arrays baked into the callable method should broadcast with arrays of shape(P, S, 1, 1). - f : list of Array, shape(P, S, knots.size, ) + f : iterable of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral evaluated at the knots. They should be computed on the returned desc @@ -640,9 +640,6 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): assert X.shape == (pitch.shape[0], B.shape[0], X.shape[2], w.size) assert knots.size == B.shape[-1] assert B_sup_z.shape == B.shape == B_z_ra.shape - for ff in f: - assert ff.ndim == 3 and ff.shape[0] == 1 or ff.shape[0] == pitch.shape[0] - assert ff.shape[1:] == B.shape # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. # Spline each function separately so that the singularity near the bounce @@ -798,6 +795,10 @@ def integrand_den(B, pitch): """ check = kwargs.pop("check", False) return_items = kwargs.pop("return_items", True) + normalize = kwargs.pop("normalize", 1) + x, w = quad(**kwargs) + # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) + x = jnp.arcsin(x) / jnp.pi - 0.5 if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) @@ -810,8 +811,8 @@ def integrand_den(B, pitch): grid_fl, grid_desc, data = desc_grid_from_field_line_coords(eq, rho, alpha, knots) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc, data=data) B_sup_z = data["B^zeta"].reshape(S, knots.size) - B = data["|B|"].reshape(S, knots.size) - B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) + B = data["|B|"].reshape(S, knots.size) / normalize + B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / normalize B_c = jnp.moveaxis( CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c, source=1, @@ -820,10 +821,6 @@ def integrand_den(B, pitch): assert B_c.shape == (4, S, knots.size - 1) B_z_ra_c = poly_der(B_c) assert B_z_ra_c.shape == (3, S, knots.size - 1) - - x, w = quad(**kwargs) - # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) - x = jnp.arcsin(x) / jnp.pi - 0.5 original = _compute_bp_if_given_pitch(knots, B_c, B_z_ra_c, pitch, check, err=False) def _group_grid_data_by_field_line(f): @@ -880,7 +877,7 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] if not isinstance(f, (list, tuple)): f = [f] - f = tuple(map(_group_grid_data_by_field_line, f)) + f = map(_group_grid_data_by_field_line, f) result = ( _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method) / (bp2 - bp1) @@ -894,6 +891,7 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): "grid_fl": grid_fl, "grid_desc": grid_desc, "data": data, + "knots": knots, "B.c": B_c, "B_z_ra.c": B_z_ra_c, } diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 9caacef918..69898c3100 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -538,33 +538,46 @@ def test_bounce_averaged_drifts(): # once all the epsilons and Gammas have been implemented and tested """ eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi = 0.25 # normalized psi rho = np.sqrt(psi) - alpha = np.array([0]) - data_eq = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) - - iotas = np.interp(rho, data_eq["rho"], data_eq["iota"]) - shears = np.interp(rho, data_eq["rho"], data_eq["iota_r"]) + data = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) - N = int((2 * eq.M_grid) * 4 + 1) - zeta = np.linspace(-1.0 * np.pi / iotas, 1.0 * np.pi / iotas, N) - theta_PEST = alpha * np.ones(N, dtype=int) + iotas * zeta + # normalization + Lref = data["a"] + epsilon = Lref * rho + psi_boundary = np.max(np.abs(data["psi"])) + Bref = 2 * np.abs(psi_boundary) / Lref**2 # Creating a grid along a field line + iota = np.interp(rho, data["rho"], data["iota"]) + shear = np.interp(rho, data["rho"], data["iota_r"]) + N = (2 * eq.M_grid) * 4 + 1 + zeta = np.linspace(-np.pi / iota, np.pi / iota, N) + alpha = 0 + theta_PEST = alpha + iota * zeta coords1 = np.zeros((N, 3)) - coords1[:, 0] = np.sqrt(psi) * np.ones(N, dtype=int) + coords1[:, 0] = np.broadcast_to(rho, N) coords1[:, 1] = theta_PEST coords1[:, 2] = zeta # c1 = eq.compute_theta_coords(coords1) # noqa: E800 # grid = Grid(c1, sort=False) # noqa: E800 - # TODO: Request: The bounce integral operator should be able to take a grid. # Response: Currently the API is such that the method does all the # above preprocessing for you. Let's test it for correctness # first then do this later. - bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots=zeta, check=True) - grid = items["grid_desc"] + bounce_integral, items = bounce_integral_map( + # FIXME: Question + # add normalize to compute matching bounce points for the test + # below, but should everything related to B be normalized? + # or just things relavant for computing bounce points? + # e.g. should I normalize B dot e^zeta = B^zeta by Bref as well? + eq, + rho, + alpha, + knots=zeta, + check=True, + normalize=Bref, + ) data_keys = [ "|grad(psi)|^2", "grad(psi)", @@ -576,70 +589,57 @@ def test_bounce_averaged_drifts(): "cvdrift", "gbdrift", ] - # FIXME (outside scope of the bounce integral pull request): + # FIXME (outside scope of the bounce branch): # override_grid should not be required for the test to pass. - data = eq.compute(data_keys, grid=grid, override_grid=False) - # If this is the toroidal flux at the boundary, the [-1] retrieval - # assumes that the grid which computed the data_eq["psi"] happens to have - # its last node at the last closed flux surface. - psib = data_eq["psi"][-1] - - # signs - sign_psi = psib / np.abs(psib) - sign_iota = iotas / np.abs(iotas) + # and anytime override_grid is true we should print a blue warning. + data_bounce = eq.compute(data_keys, grid=items["grid_desc"], override_grid=False) # normalizations - Lref = data_eq["a"] - Bref = 2 * np.abs(psib) / Lref**2 - - modB = data["|B|"] - bmag = modB / Bref - - x = Lref * np.sqrt(psi) - s_hat = -x / iotas * shears / Lref - - iota = data["iota"] - gradpar = Lref * data["B^zeta"] / modB - - ## Comparing coefficient calculation here with coefficients from compute/_mtric - cvdrift = -2 * sign_psi * Bref * Lref**2 * np.sqrt(psi) * data["cvdrift"] - gbdrift = -2 * sign_psi * Bref * Lref**2 * np.sqrt(psi) * data["gbdrift"] - - epsilon = Lref * np.sqrt(psi) + bmag = data_bounce["|B|"] / Bref B0 = np.mean(bmag) - bmag_an = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) - gradpar_an = 2 * Lref * iota * (1 - epsilon * np.cos(theta_PEST)) + x = Lref * rho + s_hat = -x / iota * shear / Lref + gradpar = Lref * data_bounce["B^zeta"] / data_bounce["|B|"] + gradpar_an = 2 * Lref * data_bounce["iota"] * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(gradpar, gradpar_an, atol=9e-3, rtol=5e-3) - dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * modB**2) - alpha_MHD = -dPdrho * 1 / iota**2 * 0.5 - - grad_psi = data["grad(psi)"] - grad_alpha = data["grad(alpha)"] - - gds21 = -sign_iota * np.array(dot(grad_psi, grad_alpha)) * s_hat / Bref - + # Comparing coefficient calculation here with coefficients from compute/_metric + cvdrift = -2 * np.sign(psi_boundary) * Bref * Lref**2 * rho * data_bounce["cvdrift"] + gbdrift = -2 * np.sign(psi_boundary) * Bref * Lref**2 * rho * data_bounce["gbdrift"] + dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data_bounce["|B|"] ** 2) + alpha_MHD = -dPdrho * 1 / data_bounce["iota"] ** 2 * 0.5 + + gds21 = ( + -np.sign(iota) + * dot(data_bounce["grad(psi)"], data_bounce["grad(alpha)"]) + * s_hat + / Bref + ) fudge_factor2 = 0.19 gbdrift_an = fudge_factor2 * ( - -1 * s_hat + (np.cos(theta_PEST) - 1.0 * gds21 / s_hat * np.sin(theta_PEST)) + -s_hat + (np.cos(theta_PEST) - gds21 / s_hat * np.sin(theta_PEST)) ) fudge_factor3 = 0.07 cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 - # Comparing coefficients with their analytical expressions np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=5e-3) np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=5e-3) - # Values of pitch angle for which to evaluate the bounce averages + # Values of pitch angle lambda for which to evaluate the bounce averages. pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) pitch = pitch.reshape(pitch.shape[0], -1) k2 = 0.5 * ((1 - pitch * B0) / epsilon + 1) - + # Fixme: What exactly is this a function of? + # cvdrift, gbdrift is a grid quantity, so grid.num_nodes length + # on a single field line grid -> so it has length number of zeta points + # So bavg_drift_an has shape shape (number of pitch, number of zeta points). + # For a fixed pitch at index i, what is difference bavg_drift_an[i, j] + # and bavg_drift_an[i, j+1]? bavg_drift_an = ( 0.5 * cvdrift_an * ellipe(k2) + gbdrift_an * ellipk(k2) @@ -649,12 +649,11 @@ def test_bounce_averaged_drifts(): def integrand(cvdrift, gbdrift, B, pitch): # The arguments to this function will be interpolated # onto the quadrature points before these quantities are evaluated. - bmag = B / Bref # When 1 - pitch * B is negative, we want g to evaluate as nan. # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. - g = complex_sqrt(1 - pitch * bmag) + g = complex_sqrt(1 - pitch * B) g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) - return (g * 0.5 * cvdrift) + (gbdrift / g) + (dPdrho / bmag**2 * g) + return (g * 0.5 * cvdrift) + (gbdrift / g) + (dPdrho / B**2 * g) bavg_drift_num = bounce_integral( integrand=integrand, @@ -667,7 +666,11 @@ def integrand(cvdrift, gbdrift, B, pitch): bavg_drift_num = np.squeeze(bavg_drift_num, axis=1) for i in range(pitch.shape[0]): np.testing.assert_allclose( + # this will have size equal to the number of bounce integrals + # found along the field line (there's only one field line in the grid) _filter_not_nan(bavg_drift_num[i]), + # this will have size equal to the number of nodes used to discretize + # that field line, so this test will always fail. bavg_drift_an[i], atol=2e-2, rtol=1e-2, From d08b360244c912f92a09c90da5ed969532a1aed1 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 12 Apr 2024 02:31:42 -0400 Subject: [PATCH 084/241] Add quadrature test --- desc/compute/bounce_integral.py | 60 ++++++++++++++++----------------- desc/equilibrium/coords.py | 2 +- tests/test_bounce_integral.py | 59 ++++++++++++++++++++++---------- 3 files changed, 72 insertions(+), 49 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index b6fe83d4ce..5fa42cc2dd 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -618,9 +618,9 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): f : iterable of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral - evaluated at the knots. They should be computed on the returned desc - coordinate grid. The values will be interpolated to the quadrature - points. All items in the list should be two-dimensional. The first axis of + evaluated (or interpolated to) the nodes of the returned desc + coordinate grid. + All items in the list should be two-dimensional. The first axis of that item is interpreted as the batch axis, which enumerates the evaluation of the function at particular pitch values. pitch : Array, shape(P, S) @@ -719,25 +719,24 @@ def bounce_integral_map( This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. items : dict - Dictionary of useful intermediate quantities. - grid_fl : Grid - Clebsch-Type field-line coordinates grid. - grid_desc : Grid - DESC coordinate grid for the given field line coordinates. - data : dict - Dictionary of Arrays of stuff evaluated on ``grid``. - B.c : Array, shape(4, S, zeta.size - 1) - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - B_z_ra.c : Array, shape(3, S, zeta.size - 1) - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. + grid_fl : Grid + Clebsch-Type field-line coordinate grid. + grid_desc : Grid + DESC coordinate grid for the given field line coordinates. + data : dict + Dictionary of Arrays of stuff evaluated on ``grid``. + B.c : Array, shape(4, S, zeta.size - 1) + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. + B_z_ra.c : Array, shape(3, S, zeta.size - 1) + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + First axis enumerates the coefficients of power series. + Second axis enumerates the splines along the field lines. + Last axis enumerates the polynomials of the spline along a particular + field line. Examples -------- @@ -752,14 +751,12 @@ def bounce_integral_map( def integrand_num(g_zz, B, pitch): # Integrand in integral in numerator of bounce average. - f = (1 - pitch * B) * g_zz # something arbitrary - g = jnp.sqrt(1 - pitch * B) # typical to have this in denominator - return safediv(f, g, fill=jnp.nan) + f = (1 - pitch * B) * g_zz + return f / jnp.sqrt(1 - pitch * B) def integrand_den(B, pitch): # Integrand in integral in denominator of bounce average. - g = jnp.sqrt(1 - pitch * B) # typical to have this in denominator - return safediv(1, g, fill=jnp.nan) + return 1 / jnp.sqrt(1 - pitch * B) eq = get("HELIOTRON") rho = jnp.linspace(1e-12, 1, 6) @@ -781,6 +778,7 @@ def integrand_den(B, pitch): i, j = 0, 0 print(average[:, i, j]) # are the bounce averages along the field line with nodes + # given in Clebsch-Type field-line coordinates ρ, α, ζ nodes = items["grid_fl"].nodes.reshape(rho.size, alpha.size, -1, 3) print(nodes[i, j]) # for the pitch values stored in @@ -845,10 +843,10 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): f : list of Array, shape(P, items["grid_desc"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral - evaluated at the knots. They should be computed on the returned desc - coordinate grid. The values will be interpolated to the quadrature - points. If an item in the list is two-dimensional, the first axis of - that item is interpreted as the batch axis, which enumerates the + evaluated (or interpolated to) the nodes of the returned desc + coordinate grid. + If an item in the list is two-dimensional, the first axis of that + item is interpreted as the batch axis, which enumerates the evaluation of the function at particular pitch values. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index b5b798fd64..d435792746 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -328,7 +328,7 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): Returns ------- grid_fl : Grid - Clebsch-Type field-line coordinates grid. + Clebsch-Type field-line coordinate grid. grid_desc : Grid DESC coordinate grid for the given field line coordinates. data : dict diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 69898c3100..784806e4bc 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -14,6 +14,7 @@ from desc.backend import complex_sqrt, flatnonzero, fori_loop, put, root_scalar from desc.compute.bounce_integral import ( + _bounce_quad, bounce_integral_map, bounce_points, pitch_of_extrema, @@ -54,6 +55,13 @@ def _filter_not_nan(a): return a[~is_nan] +def _sqrt(x): + """Reproduces jnp.sqrt with np.sqrt.""" + x = complex_sqrt(x) + x = np.where(np.isclose(np.imag(x), 0), np.real(x), np.nan) + return x + + @pytest.mark.unit def test_mask_operations(): """Test custom masked array operation.""" @@ -400,6 +408,35 @@ def test_extrema_first_and_before_bp2(plot=False): test_extrema_first_and_before_bp2() +@pytest.mark.unit +def test_bounce_quad(): + """Test quadrature reduces to elliptic integrals.""" + knots = np.linspace(-np.pi / 2, np.pi / 2, 10) + B = np.sin(knots).reshape(1, -1) + epsilon = 1e-2 + bp1, bp2 = knots[0] + epsilon, knots[-1] - epsilon + x, w = np.polynomial.chebyshev.chebgauss(65) + # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) + x = (np.arcsin(x) / np.pi - 0.5) * (bp2 - bp1) + bp2 + + def integrand(B, pitch): + return 1 / _sqrt(1 - pitch * B**2) + + bounce_quad = _bounce_quad( + X=x.reshape(1, 1, 1, -1), + w=w, + knots=knots, + B_sup_z=np.ones((1, knots.size)), + B=B, + B_z_ra=np.cos(knots).reshape(1, -1), + integrand=integrand, + f=[], + pitch=np.ones((1, 1)), + method="akima", + ).squeeze() + np.testing.assert_allclose(bounce_quad, 10.5966, atol=0.75) + + @pytest.mark.unit def test_example_code_and_hairy_ball(): """Test example code in bounce_integral docstring and ensure B does not vanish.""" @@ -407,19 +444,11 @@ def test_example_code_and_hairy_ball(): def integrand_num(g_zz, B, pitch): """Integrand in integral in numerator of bounce average.""" f = (1 - pitch * B) * g_zz # something arbitrary - # When 1 - pitch * B is negative, we want g to evaluate as nan. - # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. - g = complex_sqrt(1 - pitch * B) # typical to have this in denominator - g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) - return safediv(f, g, fill=np.nan) + return safediv(f, _sqrt(1 - pitch * B), fill=np.nan) def integrand_den(B, pitch): """Integrand in integral in denominator of bounce average.""" - # When 1 - pitch * B is negative, we want g to evaluate as nan. - # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. - g = complex_sqrt(1 - pitch * B) # typical to have this in denominator - g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) - return safediv(1, g, fill=np.nan) + return safediv(1, _sqrt(1 - pitch * B), fill=np.nan) eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) @@ -429,8 +458,6 @@ def integrand_den(B, pitch): bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) # start hairy ball test - B = eq.compute("B", grid=items["grid_desc"], data=items["data"])["B"] - assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." B = items["data"]["B"] assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." # end hairy ball test @@ -448,6 +475,7 @@ def integrand_den(B, pitch): i, j = 0, 0 print(average[:, i, j]) # are the bounce averages along the field line with nodes + # given in Clebsch-Type field-line coordinates ρ, α, ζ nodes = items["grid_fl"].nodes.reshape(rho.size, alpha.size, -1, 3) print(nodes[i, j]) # for the pitch values stored in @@ -649,11 +677,8 @@ def test_bounce_averaged_drifts(): def integrand(cvdrift, gbdrift, B, pitch): # The arguments to this function will be interpolated # onto the quadrature points before these quantities are evaluated. - # When 1 - pitch * B is negative, we want g to evaluate as nan. - # jnp.sqrt() will do this as desired, but np.sqrt() will give a runtime error. - g = complex_sqrt(1 - pitch * B) - g = np.where(np.isclose(np.imag(g), 0), np.real(g), np.nan) - return (g * 0.5 * cvdrift) + (gbdrift / g) + (dPdrho / B**2 * g) + g = _sqrt(1 - pitch * B) + return (0.5 * cvdrift * g) + (gbdrift / g) + (dPdrho / B**2 * g) bavg_drift_num = bounce_integral( integrand=integrand, From cc20d0b99011bac134e73ca704d1cf7fb51d87e9 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 12 Apr 2024 02:37:12 -0400 Subject: [PATCH 085/241] Add unit change of variables for clari --- tests/test_bounce_integral.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 784806e4bc..ca6f522c6f 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -422,18 +422,22 @@ def test_bounce_quad(): def integrand(B, pitch): return 1 / _sqrt(1 - pitch * B**2) - bounce_quad = _bounce_quad( - X=x.reshape(1, 1, 1, -1), - w=w, - knots=knots, - B_sup_z=np.ones((1, knots.size)), - B=B, - B_z_ra=np.cos(knots).reshape(1, -1), - integrand=integrand, - f=[], - pitch=np.ones((1, 1)), - method="akima", - ).squeeze() + bounce_quad = ( + _bounce_quad( + X=x.reshape(1, 1, 1, -1), + w=w, + knots=knots, + B_sup_z=np.ones((1, knots.size)), + B=B, + B_z_ra=np.cos(knots).reshape(1, -1), + integrand=integrand, + f=[], + pitch=np.ones((1, 1)), + method="akima", + ) + / (bp2 - bp1) + * np.pi + ) np.testing.assert_allclose(bounce_quad, 10.5966, atol=0.75) From a5b1793f1377249b51edc46cd0f6c3cd4067c28c Mon Sep 17 00:00:00 2001 From: Rahul Date: Fri, 12 Apr 2024 11:30:39 -0400 Subject: [PATCH 086/241] fixing the bounce averaged drifts test --- tests/test_bounce_integral.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index ca6f522c6f..2e30a9701f 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -673,9 +673,11 @@ def test_bounce_averaged_drifts(): # For a fixed pitch at index i, what is difference bavg_drift_an[i, j] # and bavg_drift_an[i, j+1]? bavg_drift_an = ( - 0.5 * cvdrift_an * ellipe(k2) - + gbdrift_an * ellipk(k2) - + dPdrho / bmag**2 * ellipe(k2) + ellipe(k2) + - 0.5 * ellipk(k2) + + 2 * s_hat * (ellipe(k2) + (k2 - 1) * ellipk(k2)) + - dPdrho / B0 * ellipk(k2) + - dPdrho / B0 * 2 / 3 * (ellipe(k2) * (2 * k2 - 1) + ellipk(k2) * (1 - k2)) ) def integrand(cvdrift, gbdrift, B, pitch): From fad449cd5e332c155cd34c7f94eead7bfc42ff44 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 12 Apr 2024 12:55:05 -0400 Subject: [PATCH 087/241] Remove test frgrid on meshgrid --- tests/test_bounce_integral.py | 3 +-- tests/test_grid.py | 10 ---------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 2e30a9701f..03eb5d7cd1 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -412,7 +412,6 @@ def test_extrema_first_and_before_bp2(plot=False): def test_bounce_quad(): """Test quadrature reduces to elliptic integrals.""" knots = np.linspace(-np.pi / 2, np.pi / 2, 10) - B = np.sin(knots).reshape(1, -1) epsilon = 1e-2 bp1, bp2 = knots[0] + epsilon, knots[-1] - epsilon x, w = np.polynomial.chebyshev.chebgauss(65) @@ -428,7 +427,7 @@ def integrand(B, pitch): w=w, knots=knots, B_sup_z=np.ones((1, knots.size)), - B=B, + B=np.sin(knots).reshape(1, -1), B_z_ra=np.cos(knots).reshape(1, -1), integrand=integrand, f=[], diff --git a/tests/test_grid.py b/tests/test_grid.py index ea67177bbf..0c93df228a 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -14,7 +14,6 @@ dec_to_cf, find_least_rational_surfaces, find_most_rational_surfaces, - meshgrid_expand, meshgrid_inverse_idx, meshgrid_unique_idx, ) @@ -764,15 +763,6 @@ def test_meshgrid_idx(self): T = np.linspace(0, 2 * np.pi, 2) Z = np.linspace(0, 10 * np.pi, 3) r, t, z = map(np.ravel, np.meshgrid(R, T, Z, indexing="ij")) - np.testing.assert_allclose( - r, meshgrid_expand(R, R.size, T.size, Z.size, order=0) - ) - np.testing.assert_allclose( - t, meshgrid_expand(T, R.size, T.size, Z.size, order=1) - ) - np.testing.assert_allclose( - z, meshgrid_expand(Z, R.size, T.size, Z.size, order=2) - ) uR, uT, uZ = meshgrid_unique_idx(R.size, T.size, Z.size) iR, iT, iZ = meshgrid_inverse_idx(R.size, T.size, Z.size) _, unique, inverse = np.unique(r, return_index=True, return_inverse=True) From 7d001e097106e2a592c7a3fffb451fb825aea66f Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 12 Apr 2024 14:58:42 -0400 Subject: [PATCH 088/241] Use modified tanh sinh quadrature --- desc/compute/bounce_integral.py | 16 ++++++++++++---- tests/test_bounce_integral.py | 5 +++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 5fa42cc2dd..fee8530272 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -531,14 +531,19 @@ def _compute_bp_if_given_pitch( return *bounce_points(knots, B_c, B_z_ra_c, pitch, check), pitch -def tanh_sinh_quad(resolution=7): - """Tanh-Sinh quadrature. +def tanh_sinh_cheby_quad(resolution=7): + """Modified Tanh-Sinh quadrature. This function outputs the quadrature points xₖ and weights wₖ - for a tanh-sinh quadrature. + for a modified tanh-sinh quadrature. + The generic tanh-sinh quadrature is ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) + The weights returned by this function, ωₖ, mimic the Chebyshev–Gauss quadrature. + + ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ) / √(1 − xₖ²) = ∑ₖ ωₖ f(xₖ) + Parameters ---------- resolution: int @@ -562,6 +567,9 @@ def tanh_sinh_quad(resolution=7): h = 2 * t_max / (resolution - 1) x = jnp.tanh(0.5 * jnp.pi * jnp.sinh(kh)) w = 0.5 * jnp.pi * h * jnp.cosh(kh) / jnp.cosh(0.5 * jnp.pi * jnp.sinh(kh)) ** 2 + # generic tanh-sinh integrates ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) + # we want to integrate ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ) / √(1 − xₖ²) + w = w / jnp.sqrt(1 - x**2) return x, w @@ -660,7 +668,7 @@ def bounce_integral_map( rho=jnp.linspace(1e-12, 1, 10), alpha=None, knots=jnp.linspace(0, 6 * jnp.pi, 20), - quad=tanh_sinh_quad, + quad=tanh_sinh_cheby_quad, pitch=None, **kwargs, ): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 03eb5d7cd1..ee56754041 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -23,6 +23,7 @@ poly_root, poly_val, take_mask, + tanh_sinh_cheby_quad, ) from desc.compute.utils import dot, safediv from desc.continuation import solve_continuation_automatic @@ -414,7 +415,7 @@ def test_bounce_quad(): knots = np.linspace(-np.pi / 2, np.pi / 2, 10) epsilon = 1e-2 bp1, bp2 = knots[0] + epsilon, knots[-1] - epsilon - x, w = np.polynomial.chebyshev.chebgauss(65) + x, w = tanh_sinh_cheby_quad(10) # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) x = (np.arcsin(x) / np.pi - 0.5) * (bp2 - bp1) + bp2 @@ -437,7 +438,7 @@ def integrand(B, pitch): / (bp2 - bp1) * np.pi ) - np.testing.assert_allclose(bounce_quad, 10.5966, atol=0.75) + np.testing.assert_allclose(bounce_quad, 10.5966, atol=0.15) @pytest.mark.unit From 77ddaa4b3fe08dbe05317130ef1212210bed5d03 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 12 Apr 2024 17:26:39 -0400 Subject: [PATCH 089/241] Remove old root finding bounce point code --- desc/compute/bounce_integral.py | 19 ++++------ tests/test_bounce_integral.py | 63 +-------------------------------- 2 files changed, 8 insertions(+), 74 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index fee8530272..aae2678af9 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -534,15 +534,10 @@ def _compute_bp_if_given_pitch( def tanh_sinh_cheby_quad(resolution=7): """Modified Tanh-Sinh quadrature. - This function outputs the quadrature points xₖ and weights wₖ - for a modified tanh-sinh quadrature. + Outputs the quadrature points xₖ and weights wₖ + to transform an integral following form to the weighted sum. - The generic tanh-sinh quadrature is - ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) - - The weights returned by this function, ωₖ, mimic the Chebyshev–Gauss quadrature. - - ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ) / √(1 − xₖ²) = ∑ₖ ωₖ f(xₖ) + ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ ωₖ f(xₖ) / √(1 − xₖ²) = ∑ₖ wₖ f(xₖ) Parameters ---------- @@ -566,9 +561,9 @@ def tanh_sinh_cheby_quad(resolution=7): kh = jnp.linspace(-t_max, t_max, resolution) h = 2 * t_max / (resolution - 1) x = jnp.tanh(0.5 * jnp.pi * jnp.sinh(kh)) + # weights for ∫₋₁¹ f(x) dx = ∑ₖ ωₖ f(xₖ) w = 0.5 * jnp.pi * h * jnp.cosh(kh) / jnp.cosh(0.5 * jnp.pi * jnp.sinh(kh)) ** 2 - # generic tanh-sinh integrates ∫₋₁¹ f(x) dx = ∑ₖ wₖ f(xₖ) - # we want to integrate ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ) / √(1 − xₖ²) + # weights for ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ) w = w / jnp.sqrt(1 - x**2) return x, w @@ -707,8 +702,8 @@ def bounce_integral_map( and the accuracy of the locations of the bounce points. quad : callable The quadrature scheme used to evaluate the integral. - Should return quadrature points and weights when called. - The returned points should be within the domain [-1, 1]. + The returned quadrature points xₖ and weights wₖ + should approximate ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ). pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. May be specified later. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index ee56754041..015b47dca3 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -12,7 +12,7 @@ from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipe, ellipk -from desc.backend import complex_sqrt, flatnonzero, fori_loop, put, root_scalar +from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( _bounce_quad, bounce_integral_map, @@ -707,64 +707,3 @@ def integrand(cvdrift, gbdrift, B, pitch): rtol=1e-2, err_msg=f"Failed on index {i} for pitch {pitch[i]}", ) - - -# TODO: if deemed useful finish details using methods in desc.compute.bounce_integral -def _compute_bounce_points_with_root_finding( - eq, pitch, rho, alpha, resolution=20, zeta_max=6 * np.pi -): - # TODO: avoid separate root finding routines in residual and jac - # and use previous desc coords as initial guess for next iteration - def residual(zeta, i): - grid, data = desc_grid_from_field_line_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|"], grid=grid, data=data) - return data["|B|"] - pitch[i] - - def jac(zeta): - grid, data = desc_grid_from_field_line_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|_z|r,a"], grid=grid, data=data) - return data["|B|_z|r,a"] - - # Compute |B| - 1/pitch on a dense grid. - # For every field line, find the roots of this linear spline. - # These estimates for the true roots will serve as an initial guess, and - # let us form a boundary mesh around root estimates to limit search domain - # of the root finding algorithms. - zeta = np.linspace(0, zeta_max, 3 * resolution) - grid, data = desc_grid_from_field_line_coords(rho, alpha, zeta, eq) - data = eq.compute(["|B|"], grid=grid, data=data) - B_norm = data["|B|"].reshape(alpha.size, rho.size, -1) # constant field line chunks - - boundary_lt = np.zeros((pitch.size, resolution, alpha.size, rho.size)) - boundary_rt = np.zeros((pitch.size, resolution, alpha.size, rho.size)) - guess = np.zeros((pitch.size, resolution, alpha.size, rho.size)) - # todo: scan over this - for i in range(pitch.size): - for j in range(alpha.size): - for k in range(rho.size): - # indices of zeta values observed prior to sign change - idx = np.nonzero(np.diff(np.sign(B_norm[j, k] - pitch[i])))[0] - guess[i, :, j, k] = grid.nodes[idx, 2] - boundary_lt[i, :, j, k] = np.append(zeta[0], guess[:-1]) - boundary_rt[i, :, j, k] = np.append(guess[1:], zeta[-1]) - guess = guess.reshape(pitch.size, resolution, alpha.size * rho.size) - boundary_lt = boundary_lt.reshape(pitch.size, resolution, alpha.size * rho.size) - boundary_rt = boundary_rt.reshape(pitch.size, resolution, alpha.size * rho.size) - - def body_pitch(i, out): - def body_roots(j, out_i): - def fixup(z): - return np.clip(z, boundary_lt[i, j], boundary_rt[i, j]) - - # todo: call vmap to vectorize on guess[i, j] so that we solve - # guess[i, j].size independent root finding problems - root = root_scalar(residual, guess[i, j], jac=jac, args=i, fixup=fixup) - out_i = put(out_i, j, root) - return out_i - - out = put(out, i, fori_loop(0, resolution, body_roots, out[i])) - return out - - bounce_points = np.zeros(shape=(pitch.size, alpha.size, rho.size, resolution)) - bounce_points = fori_loop(0, pitch.size, body_pitch, bounce_points) - return bounce_points From 12ee415d59ee39545ffea26be4276f9267385a28 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 12 Apr 2024 17:28:46 -0400 Subject: [PATCH 090/241] Fix typo in comment --- desc/compute/bounce_integral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index aae2678af9..c7c7aa0f5e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -535,7 +535,7 @@ def tanh_sinh_cheby_quad(resolution=7): """Modified Tanh-Sinh quadrature. Outputs the quadrature points xₖ and weights wₖ - to transform an integral following form to the weighted sum. + to transform an integral of the following form to the weighted sum. ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ ωₖ f(xₖ) / √(1 − xₖ²) = ∑ₖ wₖ f(xₖ) From fa7a6172bd322ec273c4c8203ffab457ff3c25b7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 13 Apr 2024 17:57:53 -0400 Subject: [PATCH 091/241] Use general automorphism for quadrature and fix change of variable bug --- desc/compute/bounce_integral.py | 192 +++++++++++++++++++++++++------- tests/test_bounce_integral.py | 82 +++++++++----- 2 files changed, 207 insertions(+), 67 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index c7c7aa0f5e..d7ff81cd22 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -531,25 +531,25 @@ def _compute_bp_if_given_pitch( return *bounce_points(knots, B_c, B_z_ra_c, pitch, check), pitch -def tanh_sinh_cheby_quad(resolution=7): - """Modified Tanh-Sinh quadrature. +def tanh_sinh_quad(resolution, w=lambda x: 1): + """Tanh-Sinh quadrature. - Outputs the quadrature points xₖ and weights wₖ - to transform an integral of the following form to the weighted sum. - - ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ ωₖ f(xₖ) / √(1 − xₖ²) = ∑ₖ wₖ f(xₖ) + Returns quadrature points xₖ and weights Wₖ for the approximate evaluation + of the integral ∫₋₁¹ w(x) f(x) dx ≈ ∑ₖ Wₖ f(xₖ). Parameters ---------- resolution: int - Number of quadrature points, preferably odd. + Number of quadrature points. + w : callable + Weight function defined, positive, and continuous on (-1, 1). Returns ------- - x : numpy array - Quadrature points - w : numpy array - Quadrature weights + x : Array + Quadrature points. + W : Array + Quadrature weights. """ # boundary of integral @@ -560,12 +560,12 @@ def tanh_sinh_cheby_quad(resolution=7): t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) kh = jnp.linspace(-t_max, t_max, resolution) h = 2 * t_max / (resolution - 1) - x = jnp.tanh(0.5 * jnp.pi * jnp.sinh(kh)) - # weights for ∫₋₁¹ f(x) dx = ∑ₖ ωₖ f(xₖ) - w = 0.5 * jnp.pi * h * jnp.cosh(kh) / jnp.cosh(0.5 * jnp.pi * jnp.sinh(kh)) ** 2 - # weights for ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ) - w = w / jnp.sqrt(1 - x**2) - return x, w + arg = 0.5 * jnp.pi * jnp.sinh(kh) + x = jnp.tanh(arg) + # weights for Tanh-Sinh quadrature ∫₋₁¹ f(x) dx ≈ ∑ₖ ωₖ f(xₖ) + W = 0.5 * jnp.pi * h * jnp.cosh(kh) / jnp.cosh(arg) ** 2 + W = W * w(x) + return x, W _interp1d_vec = jnp.vectorize( @@ -593,13 +593,13 @@ def _interp1d_vec_with_df( return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) -def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): +def _bounce_quad(Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): """Compute bounce quadrature for every pitch along every field line. Parameters ---------- - X : Array, shape(P, S, X.shape[2], w.size) - Quadrature points. + Z : Array, shape(P, S, Z.shape[2], w.size) + Quadrature points at field line-following ζ coordinates. w : Array, shape(w.size, ) Quadrature weights. knots : Array, shape(knots.size, ) @@ -634,42 +634,139 @@ def _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): Returns ------- - inner_product : Array, shape(X.shape[:-1]) + inner_product : Array, shape(Z.shape[:-1]) Bounce quadrature for every pitch along every field line. """ assert pitch.ndim == 2 assert w.ndim == knots.ndim == 1 - assert X.shape == (pitch.shape[0], B.shape[0], X.shape[2], w.size) + assert Z.shape == (pitch.shape[0], B.shape[0], Z.shape[2], w.size) assert knots.size == B.shape[-1] assert B_sup_z.shape == B.shape == B_z_ra.shape # Spline the integrand so that we can evaluate it at quadrature points # without expensive coordinate mappings and root finding. # Spline each function separately so that the singularity near the bounce # points can be captured more accurately than can be by any polynomial. - shape = X.shape - X = X.reshape(X.shape[0], X.shape[1], -1) - f = [_interp1d_vec(X, knots, ff, method=method).reshape(shape) for ff in f] - B_sup_z = _interp1d_vec(X, knots, B_sup_z, method=method).reshape(shape) + shape = Z.shape + Z = Z.reshape(Z.shape[0], Z.shape[1], -1) + f = [_interp1d_vec(Z, knots, ff, method=method).reshape(shape) for ff in f] + B_sup_z = _interp1d_vec(Z, knots, B_sup_z, method=method).reshape(shape) # Specify derivative at knots for ≈ cubic hermite interpolation. - B = _interp1d_vec_with_df(X, knots, B, B_z_ra, method="cubic").reshape(shape) + B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method="cubic").reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch) / B_sup_z, w) return inner_product +def _affine_bijection_forward(x, a, b): + """[a, b] ∋ x ↦ y ∈ [−1, 1].""" + y = 2 * (x - a) / (b - a) - 1 + return y + + +def _affine_bijection_reverse(x, a, b): + """[−1, 1] ∋ x ↦ y ∈ [a, b].""" + y = (x + 1) / 2 * (b - a) + a + return y + + +def _grad_affine_bijection_reverse(a, b): + """Gradient of reverse affine bijection.""" + dy_dx = (b - a) / 2 + return dy_dx + + +def automorphism_arcsin(x): + """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. + + The arcsin automorphism is an expansion, so it pushes the evaluation points + of the bounce integrand toward the singular region, which may induce + floating point error. + + The gradient of the arcsin automorphism introduces a singularity that augments + the singularity in the bounce integral. + """ + y = 2 * jnp.arcsin(x) / jnp.pi + return y + + +def grad_automorphism_arcsin(x): + """Gradient of arcsin automorphism. + + The arcsin automorphism is an expansion, so it pushes the evaluation points + of the bounce integrand toward the singular region, which may induce + floating point error. + + The gradient of the arcsin automorphism introduces a singularity that augments + the singularity in the bounce integral. + """ + dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) + return dy_dx + + +def automorphism_sin(x): + """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. + + The sin automorphism is a contraction, so it pulls the evaluation points + of the bounce integrand away from the singular region, inducing less + floating point error. + + The derivative of the sin automorphism is Lipschitz. + When this automorphism is used as the change of variable map for the bounce + integral, the Lipschitzness prevents generation of new singularities. + Furthermore, its derivative vanishes like the integrand of the elliptic + integral the second kind E(φ | 1), competing with the singularity in the + bounce integrand. Therefore, this automorphism pulls the mass of the bounce + integral away from the singularities, which should improve convergence of the + quadrature to the principal value of the true integral, so long as the + quadrature performs better on less singular integrands. + """ + y = jnp.sin(jnp.pi * x / 2) + return y + + +def grad_automorphism_sin(x): + """Gradient of sin automorphism. + + The sin automorphism is a contraction, so it will pull the evaluation points + away from the singular region, inducing less floating point error. + + The sin automorphism is a contraction, so it pulls the evaluation points + of the bounce integrand away from the singular region, inducing less + floating point error. + + The derivative of the sin automorphism is Lipschitz. + When this automorphism is used as the change of variable map for the bounce + integral, the Lipschitzness prevents generation of new singularities. + Furthermore, its derivative vanishes like the integrand of the elliptic + integral the second kind E(φ | 1), competing with the singularity in the + bounce integrand. Therefore, this automorphism pulls the mass of the bounce + integral away from the singularities, which should improve convergence of the + quadrature to the principal value of the true integral, so long as the + quadrature performs better on less singular integrands. + """ + dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 + return dy_dx + + def bounce_integral_map( eq, rho=jnp.linspace(1e-12, 1, 10), alpha=None, knots=jnp.linspace(0, 6 * jnp.pi, 20), - quad=tanh_sinh_cheby_quad, + quad=tanh_sinh_quad, + # In theory, the sin automorphism should perform better, but in + # test_bounce_quad, it appears tanh_sinh performs better the more + # singular an integral is. + automorphism=automorphism_arcsin, + grad_automorphism=grad_automorphism_arcsin, pitch=None, + return_items=True, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. - The bounce integral is defined as ∫ f(ℓ) dℓ, where + The bounce integral is defined as the principal value of ∫ f(ℓ) dℓ, where dℓ parameterizes the distance along the field line, λ is a constant proportional to the magnetic moment over energy, |B| is the norm of the magnetic field, @@ -703,7 +800,16 @@ def bounce_integral_map( quad : callable The quadrature scheme used to evaluate the integral. The returned quadrature points xₖ and weights wₖ - should approximate ∫₋₁¹ f(x) / √(1 − x²) dx = ∑ₖ wₖ f(xₖ). + should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). + automorphism : callable + The reverse automorphism of the real interval [-1, 1] defined below. + The forward automorphism is composed with the affine bijection + that maps the bounce points to [-1, 1]. The resulting forward map defines + a change of variable for the bounce integral. + grad_automorphism : callable + Derivative of the reverse automorphism, i.e. the derivative of the map + ``automorphism``. (Or 1 / derivative of the forward automorphism). + May be useful to use automatic differentiation. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. May be specified later. @@ -712,9 +818,10 @@ def bounce_integral_map( where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. + return_items : bool + Whether to return ``items`` as described below. kwargs Can specify additional arguments to the quadrature function with kwargs. - Can also specify whether to not return items with ``return_items=False``. Returns ------- @@ -795,11 +902,16 @@ def integrand_den(B, pitch): """ check = kwargs.pop("check", False) - return_items = kwargs.pop("return_items", True) normalize = kwargs.pop("normalize", 1) + if quad == tanh_sinh_quad: + kwargs.setdefault("resolution", 13) x, w = quad(**kwargs) - # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) - x = jnp.arcsin(x) / jnp.pi - 0.5 + # The gradient of the reverse transformation is the weight function w(x) of + # the quadrature. Apply weight function for the automorphism. + w = w * grad_automorphism(x) + # Apply reverse automorphism change of variable to quadrature points. + # Recall x = forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). + x = automorphism(x) if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) @@ -875,15 +987,15 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): bp1, bp2, pitch = _compute_bp_if_given_pitch( knots, B_c, B_z_ra_c, pitch, check, *original, err=True ) - X = x * (bp2 - bp1)[..., jnp.newaxis] + bp2[..., jnp.newaxis] + # # Apply affine change of variable to quadrature points. + Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) if not isinstance(f, (list, tuple)): f = [f] f = map(_group_grid_data_by_field_line, f) - result = ( - _bounce_quad(X, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method) - / (bp2 - bp1) - * jnp.pi - ) + # Integrate and complete the change of variable. + result = _bounce_quad( + Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method + ) * _grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, (knots.size - 1) * 3) return result diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 015b47dca3..64ef9de6d4 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -10,20 +10,26 @@ # TODO: can use the one from interpax once .solve() is implemented from scipy.interpolate import CubicHermiteSpline -from scipy.special import ellipe, ellipk +from scipy.special import ellipe, ellipk, ellipkm1 from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( + _affine_bijection_forward, + _affine_bijection_reverse, _bounce_quad, + _grad_affine_bijection_reverse, + automorphism_arcsin, + automorphism_sin, bounce_integral_map, bounce_points, + grad_automorphism_arcsin, pitch_of_extrema, poly_der, poly_int, poly_root, poly_val, take_mask, - tanh_sinh_cheby_quad, + tanh_sinh_quad, ) from desc.compute.utils import dot, safediv from desc.continuation import solve_continuation_automatic @@ -409,36 +415,58 @@ def test_extrema_first_and_before_bp2(plot=False): test_extrema_first_and_before_bp2() +@pytest.mark.unit +def test_automorphism(): + """Test automorphisms.""" + a, b = -312, 786 + x = np.linspace(a, b, 10) + y = _affine_bijection_forward(x, a, b) + np.testing.assert_allclose( + _affine_bijection_reverse(_affine_bijection_forward(x, a, b), a, b), x + ) + np.testing.assert_allclose( + _affine_bijection_forward(_affine_bijection_reverse(y, a, b), a, b), y + ) + np.testing.assert_allclose( + _affine_bijection_reverse(_affine_bijection_forward(y, a, b), a, b), y + ) + np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y) + np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y) + + @pytest.mark.unit def test_bounce_quad(): - """Test quadrature reduces to elliptic integrals.""" + """Test principal value of bounce integral matches elliptic integral.""" + + def reverse(x, bp1, bp2): + return _affine_bijection_reverse(automorphism_arcsin(x), bp1, bp2) + + def grad_reverse(x, bp1, bp2): + return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_arcsin(x) + knots = np.linspace(-np.pi / 2, np.pi / 2, 10) - epsilon = 1e-2 - bp1, bp2 = knots[0] + epsilon, knots[-1] - epsilon - x, w = tanh_sinh_cheby_quad(10) - # change of variable, x = sin([0.5 + (ζ − ζ_b₂)/(ζ_b₂−ζ_b₁)] π) - x = (np.arcsin(x) / np.pi - 0.5) * (bp2 - bp1) + bp2 + bp1, bp2 = knots[0], knots[-1] + x, w = tanh_sinh_quad(18, lambda x: grad_reverse(x, bp1, bp2)) + z = reverse(x, bp1, bp2) + p = 1e-3 + m = 1 - p def integrand(B, pitch): - return 1 / _sqrt(1 - pitch * B**2) - - bounce_quad = ( - _bounce_quad( - X=x.reshape(1, 1, 1, -1), - w=w, - knots=knots, - B_sup_z=np.ones((1, knots.size)), - B=np.sin(knots).reshape(1, -1), - B_z_ra=np.cos(knots).reshape(1, -1), - integrand=integrand, - f=[], - pitch=np.ones((1, 1)), - method="akima", - ) - / (bp2 - bp1) - * np.pi + return 1 / _sqrt(1 - pitch * m * B**2) + + bounce_quad = _bounce_quad( + Z=z.reshape(1, 1, 1, -1), + w=w, + knots=knots, + B_sup_z=np.ones((1, knots.size)), + B=np.sin(knots).reshape(1, -1), + B_z_ra=np.cos(knots).reshape(1, -1), + integrand=integrand, + f=[], + pitch=np.ones((1, 1)), + method="akima", ) - np.testing.assert_allclose(bounce_quad, 10.5966, atol=0.15) + np.testing.assert_allclose(bounce_quad, 2 * ellipkm1(p), rtol=1e-3) @pytest.mark.unit @@ -601,7 +629,7 @@ def test_bounce_averaged_drifts(): # FIXME: Question # add normalize to compute matching bounce points for the test # below, but should everything related to B be normalized? - # or just things relavant for computing bounce points? + # or just things relevant for computing bounce points? # e.g. should I normalize B dot e^zeta = B^zeta by Bref as well? eq, rho, From 990c96a3aa9792573ed2fc568c6e2a5976de0bf8 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 13 Apr 2024 19:12:55 -0400 Subject: [PATCH 092/241] Switch to sin automorphism to suppress singularity, add lebgauss test --- desc/compute/bounce_integral.py | 86 +++++++++++++++++++++++++-------- tests/test_bounce_integral.py | 70 ++++++++++++++++++++------- 2 files changed, 119 insertions(+), 37 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d7ff81cd22..0d6b4f9545 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -684,7 +684,17 @@ def automorphism_arcsin(x): floating point error. The gradient of the arcsin automorphism introduces a singularity that augments - the singularity in the bounce integral. + the singularity in the bounce integral. Therefore, the quadrature scheme + used to evaluate the integral must work well on hypersingular integrals. + + Parameters + ---------- + x : Array + + Returns + ------- + y : Array + """ y = 2 * jnp.arcsin(x) / jnp.pi return y @@ -698,7 +708,17 @@ def grad_automorphism_arcsin(x): floating point error. The gradient of the arcsin automorphism introduces a singularity that augments - the singularity in the bounce integral. + the singularity in the bounce integral. Therefore, the quadrature scheme + used to evaluate the integral must work well on hypersingular integrals. + + Parameters + ---------- + x : Array + + Returns + ------- + dy_dx : Array + """ dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) return dy_dx @@ -715,11 +735,24 @@ def automorphism_sin(x): When this automorphism is used as the change of variable map for the bounce integral, the Lipschitzness prevents generation of new singularities. Furthermore, its derivative vanishes like the integrand of the elliptic - integral the second kind E(φ | 1), competing with the singularity in the - bounce integrand. Therefore, this automorphism pulls the mass of the bounce - integral away from the singularities, which should improve convergence of the - quadrature to the principal value of the true integral, so long as the - quadrature performs better on less singular integrands. + integral the second kind E(φ | 1), suppressing the singularity in the + bounce integrand. + + Therefore, this automorphism pulls the mass of the bounce integral away + from the singularities, which should improve convergence of the quadrature + to the principal value of the true integral, so long as the quadrature + performs better on less singular integrands. If the integral was + hypersingular to begin with, Tanh-Sinh quadrature will still work well. + Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. + + Parameters + ---------- + x : Array + + Returns + ------- + y : Array + """ y = jnp.sin(jnp.pi * x / 2) return y @@ -739,11 +772,24 @@ def grad_automorphism_sin(x): When this automorphism is used as the change of variable map for the bounce integral, the Lipschitzness prevents generation of new singularities. Furthermore, its derivative vanishes like the integrand of the elliptic - integral the second kind E(φ | 1), competing with the singularity in the - bounce integrand. Therefore, this automorphism pulls the mass of the bounce - integral away from the singularities, which should improve convergence of the - quadrature to the principal value of the true integral, so long as the - quadrature performs better on less singular integrands. + integral the second kind E(φ | 1), suppressing the singularity in the + bounce integrand. + + Therefore, this automorphism pulls the mass of the bounce integral away + from the singularities, which should improve convergence of the quadrature + to the principal value of the true integral, so long as the quadrature + performs better on less singular integrands. If the integral was + hypersingular to begin with, Tanh-Sinh quadrature will still work well. + Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. + + Parameters + ---------- + x : Array + + Returns + ------- + dy_dx : Array + """ dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 return dy_dx @@ -755,11 +801,8 @@ def bounce_integral_map( alpha=None, knots=jnp.linspace(0, 6 * jnp.pi, 20), quad=tanh_sinh_quad, - # In theory, the sin automorphism should perform better, but in - # test_bounce_quad, it appears tanh_sinh performs better the more - # singular an integral is. - automorphism=automorphism_arcsin, - grad_automorphism=grad_automorphism_arcsin, + automorphism=automorphism_sin, + grad_automorphism=grad_automorphism_sin, pitch=None, return_items=True, **kwargs, @@ -801,11 +844,16 @@ def bounce_integral_map( The quadrature scheme used to evaluate the integral. The returned quadrature points xₖ and weights wₖ should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). + For the default choice of the automorphism below, + Tanh-Sinh quadrature works well if the integrand is hypersingular. + Otherwise, Gauss-Legendre quadrature can be more competitive. automorphism : callable The reverse automorphism of the real interval [-1, 1] defined below. The forward automorphism is composed with the affine bijection that maps the bounce points to [-1, 1]. The resulting forward map defines - a change of variable for the bounce integral. + a change of variable for the bounce integral. The choice made for + the automorphism can augment of suppress singularities. + Keep this in mind when choosing the quadrature method. grad_automorphism : callable Derivative of the reverse automorphism, i.e. the derivative of the map ``automorphism``. (Or 1 / derivative of the forward automorphism). @@ -904,7 +952,7 @@ def integrand_den(B, pitch): check = kwargs.pop("check", False) normalize = kwargs.pop("normalize", 1) if quad == tanh_sinh_quad: - kwargs.setdefault("resolution", 13) + kwargs.setdefault("resolution", 19) x, w = quad(**kwargs) # The gradient of the reverse transformation is the weight function w(x) of # the quadrature. Apply weight function for the automorphism. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 64ef9de6d4..acb2044060 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -23,6 +23,7 @@ bounce_integral_map, bounce_points, grad_automorphism_arcsin, + grad_automorphism_sin, pitch_of_extrema, poly_der, poly_int, @@ -437,36 +438,69 @@ def test_automorphism(): @pytest.mark.unit def test_bounce_quad(): """Test principal value of bounce integral matches elliptic integral.""" + p = 1e-3 + m = 1 - p + truth = 2 * ellipkm1(p) + rtol = 1e-3 - def reverse(x, bp1, bp2): + def reverse_arcsin(x, bp1, bp2): return _affine_bijection_reverse(automorphism_arcsin(x), bp1, bp2) - def grad_reverse(x, bp1, bp2): + def grad_reverse_arcsin(x, bp1, bp2): return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_arcsin(x) + def reverse_sin(x, bp1, bp2): + return _affine_bijection_reverse(automorphism_sin(x), bp1, bp2) + + def grad_reverse_sin(x, bp1, bp2): + return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_sin(x) + knots = np.linspace(-np.pi / 2, np.pi / 2, 10) bp1, bp2 = knots[0], knots[-1] - x, w = tanh_sinh_quad(18, lambda x: grad_reverse(x, bp1, bp2)) - z = reverse(x, bp1, bp2) - p = 1e-3 - m = 1 - p + B_sup_z = np.ones((1, knots.size)) + B = np.sin(knots).reshape(1, -1) + B_z_ra = np.cos(knots).reshape(1, -1) + pitch = np.ones((1, 1)) + method = "akima" + + # tanh sinh arcsin + x_t, w_t = tanh_sinh_quad(18) + w_t = w_t * grad_reverse_arcsin(x_t, bp1, bp2) + z_t = reverse_arcsin(x_t, bp1, bp2) + # gauss-legendre sin + x_g, w_g = np.polynomial.legendre.leggauss(16) + w_g = w_g * grad_reverse_sin(x_g, bp1, bp2) + z_g = reverse_sin(x_g, bp1, bp2) def integrand(B, pitch): return 1 / _sqrt(1 - pitch * m * B**2) - bounce_quad = _bounce_quad( - Z=z.reshape(1, 1, 1, -1), - w=w, - knots=knots, - B_sup_z=np.ones((1, knots.size)), - B=np.sin(knots).reshape(1, -1), - B_z_ra=np.cos(knots).reshape(1, -1), - integrand=integrand, - f=[], - pitch=np.ones((1, 1)), - method="akima", + tanh_sinh_arcsin = _bounce_quad( + z_t.reshape(1, 1, 1, -1), + w_t, + knots, + B_sup_z, + B, + B_z_ra, + integrand, + [], + pitch, + method, + ) + leg_gauss_sin = _bounce_quad( + z_g.reshape(1, 1, 1, -1), + w_g, + knots, + B_sup_z, + B, + B_z_ra, + integrand, + [], + pitch, + method, ) - np.testing.assert_allclose(bounce_quad, 2 * ellipkm1(p), rtol=1e-3) + np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) + np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @pytest.mark.unit From ff328c60613cdd47783eb84bb826f1515f814667 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 13 Apr 2024 19:23:50 -0400 Subject: [PATCH 093/241] Fix typos --- desc/compute/bounce_integral.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 0d6b4f9545..179652cf57 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -735,7 +735,7 @@ def automorphism_sin(x): When this automorphism is used as the change of variable map for the bounce integral, the Lipschitzness prevents generation of new singularities. Furthermore, its derivative vanishes like the integrand of the elliptic - integral the second kind E(φ | 1), suppressing the singularity in the + integral of the second kind E(φ | 1), suppressing the singularity in the bounce integrand. Therefore, this automorphism pulls the mass of the bounce integral away @@ -772,7 +772,7 @@ def grad_automorphism_sin(x): When this automorphism is used as the change of variable map for the bounce integral, the Lipschitzness prevents generation of new singularities. Furthermore, its derivative vanishes like the integrand of the elliptic - integral the second kind E(φ | 1), suppressing the singularity in the + integral of the second kind E(φ | 1), suppressing the singularity in the bounce integrand. Therefore, this automorphism pulls the mass of the bounce integral away @@ -852,7 +852,7 @@ def bounce_integral_map( The forward automorphism is composed with the affine bijection that maps the bounce points to [-1, 1]. The resulting forward map defines a change of variable for the bounce integral. The choice made for - the automorphism can augment of suppress singularities. + the automorphism can augment or suppress singularities. Keep this in mind when choosing the quadrature method. grad_automorphism : callable Derivative of the reverse automorphism, i.e. the derivative of the map From 28b2b19d1eca66cfa28ae7e06e9f6c2647deaf8b Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 14 Apr 2024 01:16:21 -0400 Subject: [PATCH 094/241] Improve automorphism test, remove unused code, update requirements --- desc/compute/bounce_integral.py | 142 +++++++--------------------- devtools/dev-requirements_conda.yml | 4 +- requirements.txt | 2 +- requirements_conda.yml | 4 +- tests/test_bounce_integral.py | 138 +++++++++------------------ 5 files changed, 88 insertions(+), 202 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 179652cf57..b4c0a5d70c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -130,7 +130,7 @@ def root(xi): _roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") -def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): +def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): """Roots of polynomial with given coefficients. Parameters @@ -192,35 +192,7 @@ def poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): return r -def poly_int(c, k=None): - """Coefficients for the primitives of the given set of polynomials. - - Parameters - ---------- - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. - k : Array - Integration constants. - Should broadcast with arrays of shape(*coef.shape[1:]). - - Returns - ------- - poly : Array - Coefficients of polynomial primitive. - That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁺¹, - where n is ``c.shape[0] - 1``. - - """ - if k is None: - k = jnp.broadcast_to(0.0, c.shape[1:]) - poly = (c.T / jnp.arange(c.shape[0], 0, -1)).T - poly = jnp.append(poly, k[jnp.newaxis], axis=0) - return poly - - -def poly_der(c): +def _poly_der(c): """Coefficients for the derivatives of the given set of polynomials. Parameters @@ -242,7 +214,7 @@ def poly_der(c): return poly -def poly_val(x, c): +def _poly_val(x, c): """Evaluate the set of polynomials c at the points x. Note that this function does not perform the same operation as @@ -374,7 +346,7 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - extrema = poly_root( + extrema = _poly_root( c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), @@ -384,7 +356,7 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): # Can detect at most degree of |B|_z_ra spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates (unsorted) extrema along a field line. - B_extrema = poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) + B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T @@ -442,7 +414,7 @@ def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): # In order to be JIT compilable, this must have a shape that accommodates the # case where each polynomial intersects 1 / λ degree times. # nan values in ``intersect`` denote a polynomial has less than degree intersects. - intersect = poly_root( + intersect = _poly_root( c=B_c, # Expand to use same pitches across polynomials of a particular spline. k=jnp.expand_dims(1 / pitch, axis=-1), @@ -454,7 +426,7 @@ def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. - B_z_ra = poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) + B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Transform out of local power basis expansion. intersect = intersect + knots[:-1, jnp.newaxis] intersect = intersect.reshape(P, S, -1) @@ -593,7 +565,7 @@ def _interp1d_vec_with_df( return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) -def _bounce_quad(Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method): +def _bounce_quad(Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method="akima"): """Compute bounce quadrature for every pitch along every field line. Parameters @@ -701,29 +673,14 @@ def automorphism_arcsin(x): def grad_automorphism_arcsin(x): - """Gradient of arcsin automorphism. - - The arcsin automorphism is an expansion, so it pushes the evaluation points - of the bounce integrand toward the singular region, which may induce - floating point error. - - The gradient of the arcsin automorphism introduces a singularity that augments - the singularity in the bounce integral. Therefore, the quadrature scheme - used to evaluate the integral must work well on hypersingular integrals. - - Parameters - ---------- - x : Array - - Returns - ------- - dy_dx : Array - - """ + """Gradient of arcsin automorphism.""" dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) return dy_dx +grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ + + def automorphism_sin(x): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. @@ -759,50 +716,21 @@ def automorphism_sin(x): def grad_automorphism_sin(x): - """Gradient of sin automorphism. - - The sin automorphism is a contraction, so it will pull the evaluation points - away from the singular region, inducing less floating point error. - - The sin automorphism is a contraction, so it pulls the evaluation points - of the bounce integrand away from the singular region, inducing less - floating point error. - - The derivative of the sin automorphism is Lipschitz. - When this automorphism is used as the change of variable map for the bounce - integral, the Lipschitzness prevents generation of new singularities. - Furthermore, its derivative vanishes like the integrand of the elliptic - integral of the second kind E(φ | 1), suppressing the singularity in the - bounce integrand. - - Therefore, this automorphism pulls the mass of the bounce integral away - from the singularities, which should improve convergence of the quadrature - to the principal value of the true integral, so long as the quadrature - performs better on less singular integrands. If the integral was - hypersingular to begin with, Tanh-Sinh quadrature will still work well. - Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. - - Parameters - ---------- - x : Array - - Returns - ------- - dy_dx : Array - - """ + """Gradient of sin automorphism.""" dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 return dy_dx +grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ + + def bounce_integral_map( eq, rho=jnp.linspace(1e-12, 1, 10), alpha=None, knots=jnp.linspace(0, 6 * jnp.pi, 20), quad=tanh_sinh_quad, - automorphism=automorphism_sin, - grad_automorphism=grad_automorphism_sin, + automorphism=(automorphism_sin, grad_automorphism_sin), pitch=None, return_items=True, **kwargs, @@ -847,17 +775,16 @@ def bounce_integral_map( For the default choice of the automorphism below, Tanh-Sinh quadrature works well if the integrand is hypersingular. Otherwise, Gauss-Legendre quadrature can be more competitive. - automorphism : callable - The reverse automorphism of the real interval [-1, 1] defined below. - The forward automorphism is composed with the affine bijection - that maps the bounce points to [-1, 1]. The resulting forward map defines - a change of variable for the bounce integral. The choice made for - the automorphism can augment or suppress singularities. + automorphism : callable, callable + The first index should store the automorphism of the real interval + [-1, 1] defined below. The second index should store the derivative + of the map stored in the first index. + + The inverse of the supplied automorphism is composed with the affine + bijection hat maps the bounce points to [-1, 1]. The resulting map + defines a change of variable for the bounce integral. The choice made + for the automorphism can augment or suppress singularities. Keep this in mind when choosing the quadrature method. - grad_automorphism : callable - Derivative of the reverse automorphism, i.e. the derivative of the map - ``automorphism``. (Or 1 / derivative of the forward automorphism). - May be useful to use automatic differentiation. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. May be specified later. @@ -869,7 +796,7 @@ def bounce_integral_map( return_items : bool Whether to return ``items`` as described below. kwargs - Can specify additional arguments to the quadrature function with kwargs. + Can specify additional arguments to the ``quad`` method with kwargs. Returns ------- @@ -883,13 +810,15 @@ def bounce_integral_map( DESC coordinate grid for the given field line coordinates. data : dict Dictionary of Arrays of stuff evaluated on ``grid``. - B.c : Array, shape(4, S, zeta.size - 1) + knots : Array, + Field line-following ζ coordinates of spline knots. + B.c : Array, shape(4, S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - B_z_ra.c : Array, shape(3, S, zeta.size - 1) + B_z_ra.c : Array, shape(3, S, knots.size - 1) Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. @@ -956,10 +885,11 @@ def integrand_den(B, pitch): x, w = quad(**kwargs) # The gradient of the reverse transformation is the weight function w(x) of # the quadrature. Apply weight function for the automorphism. - w = w * grad_automorphism(x) + auto, grad_auto = automorphism + w = w * grad_auto(x) # Apply reverse automorphism change of variable to quadrature points. # Recall x = forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). - x = automorphism(x) + x = auto(x) if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) @@ -980,7 +910,7 @@ def integrand_den(B, pitch): destination=-1, ) assert B_c.shape == (4, S, knots.size - 1) - B_z_ra_c = poly_der(B_c) + B_z_ra_c = _poly_der(B_c) assert B_z_ra_c.shape == (3, S, knots.size - 1) original = _compute_bp_if_given_pitch(knots, B_c, B_z_ra_c, pitch, check, err=False) @@ -1035,7 +965,7 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): bp1, bp2, pitch = _compute_bp_if_given_pitch( knots, B_c, B_z_ra_c, pitch, check, *original, err=True ) - # # Apply affine change of variable to quadrature points. + # Apply affine change of variable to quadrature points. Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) if not isinstance(f, (list, tuple)): f = [f] diff --git a/devtools/dev-requirements_conda.yml b/devtools/dev-requirements_conda.yml index 331ee79904..3a2bbf89c7 100644 --- a/devtools/dev-requirements_conda.yml +++ b/devtools/dev-requirements_conda.yml @@ -13,7 +13,7 @@ dependencies: - termcolor - pip - pip: - - interpax + - interpax >= 0.3.1 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - plotly >= 5.16, < 6.0 @@ -23,7 +23,7 @@ dependencies: - qicna @ git+https://github.com/rogeriojorge/pyQIC/ # building the docs - - nbsphinx > 0.8.5 + - nbsphinx == 0.8.12 - pandoc - sphinx > 3.0.0 - sphinx-argparse diff --git a/requirements.txt b/requirements.txt index a667a2a2db..5ba308811b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ colorama h5py >= 3.0.0, < 4.0 -interpax +interpax >= 0.3.1 jax[cpu] >= 0.3.2, < 0.5.0 matplotlib >= 3.5.0, < 4.0.0 mpmath >= 1.0.0, < 2.0 diff --git a/requirements_conda.yml b/requirements_conda.yml index e458f03c9f..b1d90fa592 100644 --- a/requirements_conda.yml +++ b/requirements_conda.yml @@ -2,7 +2,7 @@ name: desc-env dependencies: # standard install - colorama - - h5py >= 3.0.0 + - h5py >= 3.0.0, < 4.0 - matplotlib >= 3.5.0, < 4.0.0 - mpmath >= 1.0.0, < 2.0 - netcdf4 >= 1.5.4, < 2.0 @@ -12,7 +12,7 @@ dependencies: - termcolor - pip - pip: - - interpax + - interpax >= 0.3.1 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - plotly >= 5.16, < 6.0 diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index acb2044060..e5e4df7c0c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -5,7 +5,6 @@ import numpy as np import pytest -from interpax import Akima1DInterpolator from matplotlib import pyplot as plt # TODO: can use the one from interpax once .solve() is implemented @@ -18,6 +17,9 @@ _affine_bijection_reverse, _bounce_quad, _grad_affine_bijection_reverse, + _poly_der, + _poly_root, + _poly_val, automorphism_arcsin, automorphism_sin, bounce_integral_map, @@ -25,10 +27,6 @@ grad_automorphism_arcsin, grad_automorphism_sin, pitch_of_extrema, - poly_der, - poly_int, - poly_root, - poly_val, take_mask, tanh_sinh_quad, ) @@ -136,7 +134,7 @@ def test_poly_root(): assert np.unique(c.shape).size == c.ndim constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) constant = np.stack([constant, constant]) - root = poly_root(c, constant, sort=True) + root = _poly_root(c, constant, sort=True) for i in range(constant.shape[0]): for j in range(c.shape[1]): @@ -158,7 +156,7 @@ def test_poly_root(): [0, -6, 11, -2], ] ) - root = poly_root(c.T, sort=True, distinct=True) + root = _poly_root(c.T, sort=True, distinct=True) for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) if j == 4: @@ -171,29 +169,11 @@ def test_poly_root(): ) c = np.array([0, 1, -1, -8, 12]) np.testing.assert_allclose( - actual=_filter_not_nan(poly_root(c, sort=True, distinct=True)), + actual=_filter_not_nan(_poly_root(c, sort=True, distinct=True)), desired=np.unique(np.roots(c)), ) -@pytest.mark.unit -def test_poly_int(): - """Test vectorized computation of polynomial primitive.""" - quintic = 6 - c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) - primitive = poly_int(c, k=constant) - for j in range(c.shape[1]): - for k in range(c.shape[2]): - np.testing.assert_allclose( - actual=primitive[:, j, k], - desired=np.polyint(c[:, j, k], k=constant[j, k]), - ) - assert poly_int(c).shape == primitive.shape, "Failed broadcasting default k." - - @pytest.mark.unit def test_poly_der(): """Test vectorized computation of polynomial derivative.""" @@ -201,7 +181,7 @@ def test_poly_der(): c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi # make sure broadcasting won't hide error in implementation assert np.unique(c.shape).size == c.ndim - derivative = poly_der(c) + derivative = _poly_der(c) for j in range(c.shape[1]): for k in range(c.shape[2]): np.testing.assert_allclose( @@ -214,7 +194,7 @@ def test_poly_val(): """Test vectorized computation of polynomial evaluation.""" def test(x, c): - val = poly_val(x=x, c=c) + val = _poly_val(x=x, c=c) for index in np.ndindex(c.shape[1:]): idx = (..., *index) np.testing.assert_allclose( @@ -238,21 +218,6 @@ def test(x, c): assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 test(x, c) - # integrate piecewise polynomial and set constants to preserve continuity - y = np.arange(2, 8) - y = np.arange(y.prod()).reshape(*y) - x = np.arange(y.shape[-1]) - a1d = Akima1DInterpolator(x, y, axis=-1) - primitive = poly_int(a1d.c) - # choose evaluation points at d just to match choice made in a1d.antiderivative() - d = np.diff(x) - # evaluate every spline at d - k = poly_val(x=d, c=primitive) - # don't want to use jax.ndarray.at[].add() in case jax is not installed - primitive = np.array(primitive) - primitive[-1, 1:] += np.cumsum(k, axis=-1)[:-1] - np.testing.assert_allclose(primitive, a1d.antiderivative().c) - @pytest.mark.unit def test_pitch_of_extrema(): @@ -428,12 +393,24 @@ def test_automorphism(): np.testing.assert_allclose( _affine_bijection_forward(_affine_bijection_reverse(y, a, b), a, b), y ) - np.testing.assert_allclose( - _affine_bijection_reverse(_affine_bijection_forward(y, a, b), a, b), y - ) np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y) np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y) + np.testing.assert_allclose( + _grad_affine_bijection_reverse(a, b), + 1 / (2 / (b - a)), + ) + np.testing.assert_allclose( + grad_automorphism_sin(y), + 1 / grad_automorphism_arcsin(automorphism_sin(y)), + atol=1e-14, + ) + np.testing.assert_allclose( + 1 / grad_automorphism_arcsin(y), + grad_automorphism_sin(automorphism_arcsin(y)), + atol=1e-14, + ) + @pytest.mark.unit def test_bounce_quad(): @@ -443,63 +420,42 @@ def test_bounce_quad(): truth = 2 * ellipkm1(p) rtol = 1e-3 - def reverse_arcsin(x, bp1, bp2): + bp1 = -np.pi / 2 + bp2 = -bp1 + knots = np.linspace(bp1, bp2, 15) + B_sup_z = np.ones((1, knots.size)) + B = np.reshape(np.sin(knots) ** 2, (1, -1)) + B_z_ra = np.sin(2 * knots).reshape(1, -1) + pitch = np.ones((1, 1)) + + def integrand(B, pitch): + return 1 / _sqrt(1 - pitch * m * B) + + def reverse_arcsin(x): return _affine_bijection_reverse(automorphism_arcsin(x), bp1, bp2) - def grad_reverse_arcsin(x, bp1, bp2): + def grad_reverse_arcsin(x): return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_arcsin(x) - def reverse_sin(x, bp1, bp2): + def reverse_sin(x): return _affine_bijection_reverse(automorphism_sin(x), bp1, bp2) - def grad_reverse_sin(x, bp1, bp2): + def grad_reverse_sin(x): return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_sin(x) - knots = np.linspace(-np.pi / 2, np.pi / 2, 10) - bp1, bp2 = knots[0], knots[-1] - B_sup_z = np.ones((1, knots.size)) - B = np.sin(knots).reshape(1, -1) - B_z_ra = np.cos(knots).reshape(1, -1) - pitch = np.ones((1, 1)) - method = "akima" - - # tanh sinh arcsin - x_t, w_t = tanh_sinh_quad(18) - w_t = w_t * grad_reverse_arcsin(x_t, bp1, bp2) - z_t = reverse_arcsin(x_t, bp1, bp2) - # gauss-legendre sin - x_g, w_g = np.polynomial.legendre.leggauss(16) - w_g = w_g * grad_reverse_sin(x_g, bp1, bp2) - z_g = reverse_sin(x_g, bp1, bp2) - - def integrand(B, pitch): - return 1 / _sqrt(1 - pitch * m * B**2) - + x_t, w_t = tanh_sinh_quad(18, grad_reverse_arcsin) + z_t = reverse_arcsin(x_t).reshape(1, 1, 1, -1) tanh_sinh_arcsin = _bounce_quad( - z_t.reshape(1, 1, 1, -1), - w_t, - knots, - B_sup_z, - B, - B_z_ra, - integrand, - [], - pitch, - method, + z_t, w_t, knots, B_sup_z, B, B_z_ra, integrand, [], pitch ) + np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) + + x_g, w_g = np.polynomial.legendre.leggauss(16) + w_g = w_g * grad_reverse_sin(x_g) + z_g = reverse_sin(x_g).reshape(1, 1, 1, -1) leg_gauss_sin = _bounce_quad( - z_g.reshape(1, 1, 1, -1), - w_g, - knots, - B_sup_z, - B, - B_z_ra, - integrand, - [], - pitch, - method, + z_g, w_g, knots, B_sup_z, B, B_z_ra, integrand, [], pitch ) - np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) From 5a1bdc4bad37939062fcceb8cdbc5aa2094aeafe Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 14 Apr 2024 01:19:45 -0400 Subject: [PATCH 095/241] Fix test failure due to comment change --- desc/compute/bounce_integral.py | 2 +- tests/test_bounce_integral.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index b4c0a5d70c..ce35f150e0 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -888,7 +888,7 @@ def integrand_den(B, pitch): auto, grad_auto = automorphism w = w * grad_auto(x) # Apply reverse automorphism change of variable to quadrature points. - # Recall x = forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). + # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). x = auto(x) if alpha is None: diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e5e4df7c0c..f146f59267 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -118,7 +118,7 @@ def test_reshape_convention(): err_msg = "The ordering conventions are required for correctness." assert "P, S, N" in inspect.getsource(bounce_points), err_msg src = inspect.getsource(bounce_integral_map) - assert "S, zeta.size" in src, err_msg + assert "S, knots.size" in src, err_msg assert "pitch.shape[0], rho.size, alpha.size" in src, err_msg src = inspect.getsource(desc_grid_from_field_line_coords) assert 'indexing="ij"' in src, err_msg From 69c5134255166de42835c7d623e6fb048b1fc42b Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 14 Apr 2024 12:30:52 -0400 Subject: [PATCH 096/241] Choose better default knots for bounce integral --- desc/compute/bounce_integral.py | 15 +++++++-------- tests/test_bounce_integral.py | 14 ++++++++------ 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ce35f150e0..dbc228de4e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -254,7 +254,6 @@ def _poly_val(x, c): # because we expect to usually integrate up to quartic polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) val = jnp.einsum("...i,i...->...", X, c) - assert val.ndim == max(x.ndim, c.ndim - 1) return val @@ -728,7 +727,7 @@ def bounce_integral_map( eq, rho=jnp.linspace(1e-12, 1, 10), alpha=None, - knots=jnp.linspace(0, 6 * jnp.pi, 20), + knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 25), quad=tanh_sinh_quad, automorphism=(automorphism_sin, grad_automorphism_sin), pitch=None, @@ -781,7 +780,7 @@ def bounce_integral_map( of the map stored in the first index. The inverse of the supplied automorphism is composed with the affine - bijection hat maps the bounce points to [-1, 1]. The resulting map + bijection that maps the bounce points to [-1, 1]. The resulting map defines a change of variable for the bounce integral. The choice made for the automorphism can augment or suppress singularities. Keep this in mind when choosing the quadrature method. @@ -809,7 +808,7 @@ def bounce_integral_map( grid_desc : Grid DESC coordinate grid for the given field line coordinates. data : dict - Dictionary of Arrays of stuff evaluated on ``grid``. + Dictionary of Arrays of stuff evaluated on ``grid_desc``. knots : Array, Field line-following ζ coordinates of spline knots. B.c : Array, shape(4, S, knots.size - 1) @@ -884,11 +883,11 @@ def integrand_den(B, pitch): kwargs.setdefault("resolution", 19) x, w = quad(**kwargs) # The gradient of the reverse transformation is the weight function w(x) of - # the quadrature. Apply weight function for the automorphism. + # the quadrature. auto, grad_auto = automorphism w = w * grad_auto(x) - # Apply reverse automorphism change of variable to quadrature points. # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). + # Apply reverse automorphism to quadrature points. x = auto(x) if alpha is None: @@ -932,7 +931,7 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): Note that any arrays baked into the callable method should broadcast with arrays of shape(P, S, 1, 1) where P is the batch axis size of pitch, - S is the number of field lines given by rho.size * alpha.size. + S is the number of field lines given by ``rho.size * alpha.size``. f : list of Array, shape(P, items["grid_desc"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral @@ -965,7 +964,7 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): bp1, bp2, pitch = _compute_bp_if_given_pitch( knots, B_c, B_z_ra_c, pitch, check, *original, err=True ) - # Apply affine change of variable to quadrature points. + # Apply affine transformation to quadrature points. Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) if not isinstance(f, (list, tuple)): f = [f] diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index f146f59267..69f39aaa0b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -195,6 +195,8 @@ def test_poly_val(): def test(x, c): val = _poly_val(x=x, c=c) + if val.ndim != max(x.ndim, c.ndim - 1): + raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") for index in np.ndindex(c.shape[1:]): idx = (..., *index) np.testing.assert_allclose( @@ -387,12 +389,9 @@ def test_automorphism(): a, b = -312, 786 x = np.linspace(a, b, 10) y = _affine_bijection_forward(x, a, b) - np.testing.assert_allclose( - _affine_bijection_reverse(_affine_bijection_forward(x, a, b), a, b), x - ) - np.testing.assert_allclose( - _affine_bijection_forward(_affine_bijection_reverse(y, a, b), a, b), y - ) + x_1 = _affine_bijection_reverse(y, a, b) + np.testing.assert_allclose(x_1, x) + np.testing.assert_allclose(_affine_bijection_forward(x_1, a, b), y) np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y) np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y) @@ -443,6 +442,7 @@ def reverse_sin(x): def grad_reverse_sin(x): return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_sin(x) + # augment the singularity x_t, w_t = tanh_sinh_quad(18, grad_reverse_arcsin) z_t = reverse_arcsin(x_t).reshape(1, 1, 1, -1) tanh_sinh_arcsin = _bounce_quad( @@ -450,6 +450,7 @@ def grad_reverse_sin(x): ) np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) + # suppress the singularity x_g, w_g = np.polynomial.legendre.leggauss(16) w_g = w_g * grad_reverse_sin(x_g) z_g = reverse_sin(x_g).reshape(1, 1, 1, -1) @@ -650,6 +651,7 @@ def test_bounce_averaged_drifts(): bmag_an = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) + # FIXME should x be same as epsilon? x = Lref * rho s_hat = -x / iota * shear / Lref gradpar = Lref * data_bounce["B^zeta"] / data_bounce["|B|"] From c39a0732a3c43d0bde737bcbbb1b51d68bcebdff Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 14 Apr 2024 17:35:19 -0400 Subject: [PATCH 097/241] Make bounce integration more modular so that custom grid can be used --- desc/compute/bounce_integral.py | 409 +++++++++++++++++--------------- tests/test_bounce_integral.py | 35 +-- 2 files changed, 231 insertions(+), 213 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index dbc228de4e..e572aa75fc 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -7,6 +7,7 @@ from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take from desc.compute.utils import safediv from desc.equilibrium.coords import desc_grid_from_field_line_coords +from desc.utils import errorif @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) @@ -288,23 +289,24 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): If two-dimensional, the first axis is the batch axis as usual. """ + errorif(knots.ndim != 1) if B_c.ndim == 2 and B_z_ra_c.ndim == 2: # Add axis which enumerates field lines. B_c = B_c[:, jnp.newaxis] B_z_ra_c = B_z_ra_c[:, jnp.newaxis] - err_msg = "Supplied invalid shape for splines." - assert B_c.ndim == B_z_ra_c.ndim == 3, err_msg - assert ( - B_c.shape[0] - 1 == B_z_ra_c.shape[0] and B_c.shape[1:] == B_z_ra_c.shape[1:] - ), err_msg - assert ( - B_c.shape[-1] == knots.size - 1 - ), "Last axis fails to enumerate spline polynomials." + msg = "Supplied invalid shape for splines." + errorif(not (B_c.ndim == B_z_ra_c.ndim == 3), msg=msg) + errorif(B_c.shape[0] - 1 != B_z_ra_c.shape[0], msg=msg) + errorif(B_c.shape[1:] != B_z_ra_c.shape[1:], msg=msg) + errorif( + B_c.shape[-1] != knots.size - 1, + msg="Last axis fails to enumerate spline polynomials.", + ) if pitch is not None: pitch = jnp.atleast_2d(pitch) - err_msg = "Supplied invalid shape for pitch angles." - assert pitch.ndim == 2, err_msg - assert pitch.shape[-1] == 1 or pitch.shape[-1] == B_c.shape[1], err_msg + msg = "Supplied invalid shape for pitch angles." + errorif(pitch.ndim != 2, msg=msg) + errorif(pitch.shape[-1] != 1 and pitch.shape[-1] != B_c.shape[1], msg=msg) return B_c, B_z_ra_c, pitch @@ -455,13 +457,16 @@ def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): bp2 = take_mask(intersect, is_bp2) if check: - if jnp.any(bp1 > bp2): - raise AssertionError("Bounce points have an inversion.") - if jnp.any(bp1[..., 1:] < bp2[..., :-1]): - raise AssertionError( - "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" - ) - + errorif( + jnp.any(bp1 > bp2), + AssertionError, + "Bounce points have an inversion. Maybe create an issue on GitHub.", + ) + errorif( + jnp.any(bp1[..., 1:] < bp2[..., :-1]), + AssertionError, + "Discontinuity detected. Is B_z_ra the derivative of the spline of B?", + ) return bp1, bp2 # This is no longer implemented at the moment. # If the first intersect is at a non-negative derivative, that particle @@ -480,26 +485,98 @@ def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): # satisfied, the quadrature will evaluate √(1 − λ |B|) as nan automatically. -def _compute_bp_if_given_pitch( - knots, B_c, B_z_ra_c, pitch, check, *original, err=False -): - """Conditionally return the ingredients needed to compute bounce integrals. +def _affine_bijection_forward(x, a, b): + """[a, b] ∋ x ↦ y ∈ [−1, 1].""" + y = 2 * (x - a) / (b - a) - 1 + return y + + +def _affine_bijection_reverse(x, a, b): + """[−1, 1] ∋ x ↦ y ∈ [a, b].""" + y = (x + 1) / 2 * (b - a) + a + return y + + +def _grad_affine_bijection_reverse(a, b): + """Gradient of reverse affine bijection.""" + dy_dx = (b - a) / 2 + return dy_dx + + +def automorphism_arcsin(x): + """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. + + The arcsin automorphism is an expansion, so it pushes the evaluation points + of the bounce integrand toward the singular region, which may induce + floating point error. + + The gradient of the arcsin automorphism introduces a singularity that augments + the singularity in the bounce integral. Therefore, the quadrature scheme + used to evaluate the integral must work well on hypersingular integrals. Parameters ---------- - original : tuple - Whatever this method returned earlier. - err : bool - Whether to raise an error if ``pitch`` is None and ``original`` is empty. + x : Array + + Returns + ------- + y : Array """ - if pitch is None: - if err and not original: - raise ValueError("No pitch values were given.") - return original - else: - pitch = jnp.atleast_2d(pitch) - return *bounce_points(knots, B_c, B_z_ra_c, pitch, check), pitch + y = 2 * jnp.arcsin(x) / jnp.pi + return y + + +def grad_automorphism_arcsin(x): + """Gradient of arcsin automorphism.""" + dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) + return dy_dx + + +grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ + + +def automorphism_sin(x): + """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. + + The sin automorphism is a contraction, so it pulls the evaluation points + of the bounce integrand away from the singular region, inducing less + floating point error. + + The derivative of the sin automorphism is Lipschitz. + When this automorphism is used as the change of variable map for the bounce + integral, the Lipschitzness prevents generation of new singularities. + Furthermore, its derivative vanishes like the integrand of the elliptic + integral of the second kind E(φ | 1), suppressing the singularity in the + bounce integrand. + + Therefore, this automorphism pulls the mass of the bounce integral away + from the singularities, which should improve convergence of the quadrature + to the principal value of the true integral, so long as the quadrature + performs better on less singular integrands. If the integral was + hypersingular to begin with, Tanh-Sinh quadrature will still work well. + Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. + + Parameters + ---------- + x : Array + + Returns + ------- + y : Array + + """ + y = jnp.sin(jnp.pi * x / 2) + return y + + +def grad_automorphism_sin(x): + """Gradient of sin automorphism.""" + dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 + return dy_dx + + +grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ def tanh_sinh_quad(resolution, w=lambda x: 1): @@ -539,6 +616,46 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): return x, W +bounce_docstring = """w : Array, shape(w.size, ) + Quadrature weights. + knots : Array, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. + B_sup_z : Array, shape(S, knots.size, ) + Contravariant field-line following toroidal component of magnetic field. + B : Array, shape(S, knots.size, ) + Norm of magnetic field. + B_z_ra : Array, shape(S, knots.size, ) + Norm of magnetic field derivative with respect to field-line following label. + integrand : callable + This callable is the composition operator on the set of functions in ``f`` + that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. + It should accept the items in ``f`` as arguments as well as two additional + keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. + Note that any arrays baked into the callable method should broadcast + with arrays of shape(P, S, 1, 1) where + P is the batch axis size of pitch. + S is the number of field lines. + f : iterable of Array, shape(P, S, knots.size, ) + Arguments to the callable ``integrand``. + These should be the functions in the integrand of the bounce integral + evaluated (or interpolated to) the nodes of the returned desc + coordinate grid. + pitch : Array, shape(P, S) + λ values to evaluate the bounce integral at each field line. + Last axis enumerates the λ value for a particular field line parameterized + by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + The first axis is the batch axis as usual. + method : str + Method of interpolation for functions contained in ``f``. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + + """ +delimiter = "Returns" + + _interp1d_vec = jnp.vectorize( interp1d, signature="(m),(n),(n)->(m)", @@ -564,44 +681,15 @@ def _interp1d_vec_with_df( return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) -def _bounce_quad(Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method="akima"): - """Compute bounce quadrature for every pitch along every field line. +def _interpolating_quadrature( + Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method +): + """Interpolate given functions to points Z and perform quadrature. Parameters ---------- Z : Array, shape(P, S, Z.shape[2], w.size) Quadrature points at field line-following ζ coordinates. - w : Array, shape(w.size, ) - Quadrature weights. - knots : Array, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - B_sup_z : Array, shape(S, knots.size, ) - Contravariant field-line following toroidal component of magnetic field. - B : Array, shape(S, knots.size, ) - Norm of magnetic field. - B_z_ra : Array, shape(S, knots.size, ) - Norm of magnetic field derivative with respect to field-line following label. - integrand : callable - This callable is the composition operator on the set of functions in ``f`` - that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. - It should accept the items in ``f`` as arguments as well as two additional - keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to - approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. - Note that any arrays baked into the callable method should broadcast - with arrays of shape(P, S, 1, 1). - f : iterable of Array, shape(P, S, knots.size, ) - Arguments to the callable ``integrand``. - These should be the functions in the integrand of the bounce integral - evaluated (or interpolated to) the nodes of the returned desc - coordinate grid. - All items in the list should be two-dimensional. The first axis of - that item is interpreted as the batch axis, which enumerates the - evaluation of the function at particular pitch values. - pitch : Array, shape(P, S) - λ values. - method : str - Method of interpolation for functions contained in ``f``. - See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Returns ------- @@ -629,98 +717,65 @@ def _bounce_quad(Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method="a return inner_product -def _affine_bijection_forward(x, a, b): - """[a, b] ∋ x ↦ y ∈ [−1, 1].""" - y = 2 * (x - a) / (b - a) - 1 - return y - - -def _affine_bijection_reverse(x, a, b): - """[−1, 1] ∋ x ↦ y ∈ [a, b].""" - y = (x + 1) / 2 * (b - a) + a - return y - - -def _grad_affine_bijection_reverse(a, b): - """Gradient of reverse affine bijection.""" - dy_dx = (b - a) / 2 - return dy_dx - - -def automorphism_arcsin(x): - """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - - The arcsin automorphism is an expansion, so it pushes the evaluation points - of the bounce integrand toward the singular region, which may induce - floating point error. - - The gradient of the arcsin automorphism introduces a singularity that augments - the singularity in the bounce integral. Therefore, the quadrature scheme - used to evaluate the integral must work well on hypersingular integrals. - - Parameters - ---------- - x : Array - - Returns - ------- - y : Array - - """ - y = 2 * jnp.arcsin(x) / jnp.pi - return y - - -def grad_automorphism_arcsin(x): - """Gradient of arcsin automorphism.""" - dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) - return dy_dx - - -grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ - +_interpolating_quadrature.__doc__ = _interpolating_quadrature.__doc__.replace( + delimiter, bounce_docstring + delimiter, 1 +) -def automorphism_sin(x): - """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The sin automorphism is a contraction, so it pulls the evaluation points - of the bounce integrand away from the singular region, inducing less - floating point error. - - The derivative of the sin automorphism is Lipschitz. - When this automorphism is used as the change of variable map for the bounce - integral, the Lipschitzness prevents generation of new singularities. - Furthermore, its derivative vanishes like the integrand of the elliptic - integral of the second kind E(φ | 1), suppressing the singularity in the - bounce integrand. - - Therefore, this automorphism pulls the mass of the bounce integral away - from the singularities, which should improve convergence of the quadrature - to the principal value of the true integral, so long as the quadrature - performs better on less singular integrands. If the integral was - hypersingular to begin with, Tanh-Sinh quadrature will still work well. - Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. +def _bounce_quadrature( + bp1, bp2, x, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch=None, method="akima" +): + """Bounce integrate ∫ f(ℓ) dℓ. Parameters ---------- - x : Array + bp1, bp2 : Array, Array, shape(P, S, -1) + The field line-following ζ coordinates of bounce points for a given pitch + along a field line. The pairs bp1[i, j, k] and bp2[i, j, k] form left + and right integration boundaries, respectively, for the bounce integrals. + x : Array, shape(w.size, ) + Quadrature points in [-1, 1]. Returns ------- - y : Array + result : Array, shape(P, S, -1) + First axis enumerates pitch values. Second axis enumerates the field + lines. Last axis enumerates the bounce integrals. """ - y = jnp.sin(jnp.pi * x / 2) - return y + errorif(x.ndim != 1 or x.shape != w.shape) + errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) + pitch = jnp.atleast_2d(pitch) + + S = B.shape[0] + if not isinstance(f, (list, tuple)): + f = [f] + + def _group_grid_data_by_field_line(g): + errorif( + g.ndim > 2, + ValueError, + "Should have at most two dimensions, in which case the first axis" + " is interpreted as the batch axis, which enumerates the evaluation" + " of the function at particular pitch values.", + ) + return g.reshape(-1, S, knots.size) + f = map(_group_grid_data_by_field_line, f) -def grad_automorphism_sin(x): - """Gradient of sin automorphism.""" - dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 - return dy_dx + # Apply affine transformation to quadrature points. + Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + # Integrate and complete the change of variable. + result = _interpolating_quadrature( + Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method + ) * _grad_affine_bijection_reverse(bp1, bp2) + assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) + return result -grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ +_bounce_quadrature.__doc__ = _bounce_quadrature.__doc__.replace( + delimiter, bounce_docstring + delimiter, 1 +) def bounce_integral_map( @@ -730,7 +785,6 @@ def bounce_integral_map( knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 25), quad=tanh_sinh_quad, automorphism=(automorphism_sin, grad_automorphism_sin), - pitch=None, return_items=True, **kwargs, ): @@ -775,23 +829,13 @@ def bounce_integral_map( Tanh-Sinh quadrature works well if the integrand is hypersingular. Otherwise, Gauss-Legendre quadrature can be more competitive. automorphism : callable, callable - The first index should store the automorphism of the real interval - [-1, 1] defined below. The second index should store the derivative - of the map stored in the first index. - + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. The inverse of the supplied automorphism is composed with the affine bijection that maps the bounce points to [-1, 1]. The resulting map defines a change of variable for the bounce integral. The choice made for the automorphism can augment or suppress singularities. Keep this in mind when choosing the quadrature method. - pitch : Array, shape(P, S) - λ values to evaluate the bounce integral at each field line. - May be specified later. - Last axis enumerates the λ value for a particular field line parameterized - by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. return_items : bool Whether to return ``items`` as described below. kwargs @@ -801,7 +845,7 @@ def bounce_integral_map( ------- bounce_integral : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every - specified field line ℓ (constant rho and alpha), for every λ value in ``pitch``. + specified field line ℓ (constant rho, alpha), for every λ value in ``pitch``. items : dict grid_fl : Grid Clebsch-Type field-line coordinate grid. @@ -874,7 +918,6 @@ def integrand_den(B, pitch): # You should filter out these nan values when computing stuff. average_sum_over_field_line = jnp.nansum(average, axis=-1) print(average_sum_over_field_line) - assert not jnp.allclose(average_sum_over_field_line, 0) """ check = kwargs.pop("check", False) @@ -882,8 +925,7 @@ def integrand_den(B, pitch): if quad == tanh_sinh_quad: kwargs.setdefault("resolution", 19) x, w = quad(**kwargs) - # The gradient of the reverse transformation is the weight function w(x) of - # the quadrature. + # The gradient of the transformation is the weight function w(x) of the integral. auto, grad_auto = automorphism w = w * grad_auto(x) # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). @@ -898,11 +940,13 @@ def integrand_den(B, pitch): # number of field lines or splines S = rho.size * alpha.size + # Compute |B| and group data along field lines. grid_fl, grid_desc, data = desc_grid_from_field_line_coords(eq, rho, alpha, knots) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc, data=data) B_sup_z = data["B^zeta"].reshape(S, knots.size) B = data["|B|"].reshape(S, knots.size) / normalize B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / normalize + # Compute spline of |B| along field lines. B_c = jnp.moveaxis( CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c, source=1, @@ -911,13 +955,8 @@ def integrand_den(B, pitch): assert B_c.shape == (4, S, knots.size - 1) B_z_ra_c = _poly_der(B_c) assert B_z_ra_c.shape == (3, S, knots.size - 1) - original = _compute_bp_if_given_pitch(knots, B_c, B_z_ra_c, pitch, check, err=False) - - def _group_grid_data_by_field_line(f): - assert f.ndim <= 2, "See the docstring below." - return f.reshape(-1, S, knots.size) - def bounce_integral(integrand, f, pitch=None, method="akima"): + def bounce_integral(integrand, f, pitch, method="akima"): """Bounce integrate ∫ f(ℓ) dℓ. Parameters @@ -930,19 +969,18 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. Note that any arrays baked into the callable method should broadcast with arrays of shape(P, S, 1, 1) where - P is the batch axis size of pitch, - S is the number of field lines given by ``rho.size * alpha.size``. - f : list of Array, shape(P, items["grid_desc"].num_nodes, ) + P is the batch axis size of pitch. + S is the number of field lines. + f : iterable of Array, shape(P, items["grid_desc"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral evaluated (or interpolated to) the nodes of the returned desc coordinate grid. - If an item in the list is two-dimensional, the first axis of that - item is interpreted as the batch axis, which enumerates the - evaluation of the function at particular pitch values. + Should have at most two dimensions, in which case the first axis + is interpreted as the batch axis, which enumerates the evaluation + of the function at particular pitch values. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. - If None, uses the values given to the parent function. Last axis enumerates the λ value for a particular field line parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` where in the latter the labels (ρ, α) are interpreted as index into the @@ -955,25 +993,16 @@ def bounce_integral(integrand, f, pitch=None, method="akima"): Returns ------- - result : Array, shape(P, S, (zeta.size - 1) * 3) - First axis enumerates pitch values. - Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. + result : Array, shape(P, S, (knots.size - 1) * 3) + First axis enumerates pitch values. Second axis enumerates the field + lines. Last axis enumerates the bounce integrals. """ - bp1, bp2, pitch = _compute_bp_if_given_pitch( - knots, B_c, B_z_ra_c, pitch, check, *original, err=True + bp1, bp2 = bounce_points(knots, B_c, B_z_ra_c, pitch, check) + result = _bounce_quadrature( + bp1, bp2, x, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method ) - # Apply affine transformation to quadrature points. - Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) - if not isinstance(f, (list, tuple)): - f = [f] - f = map(_group_grid_data_by_field_line, f) - # Integrate and complete the change of variable. - result = _bounce_quad( - Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method - ) * _grad_affine_bijection_reverse(bp1, bp2) - assert result.shape == (pitch.shape[0], S, (knots.size - 1) * 3) + assert result.shape[-1] == (knots.size - 1) * 3 return result if return_items: diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 69f39aaa0b..72c301fc4c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -15,7 +15,7 @@ from desc.compute.bounce_integral import ( _affine_bijection_forward, _affine_bijection_reverse, - _bounce_quad, + _bounce_quadrature, _grad_affine_bijection_reverse, _poly_der, _poly_root, @@ -412,7 +412,7 @@ def test_automorphism(): @pytest.mark.unit -def test_bounce_quad(): +def test_bounce_quadrature(): """Test principal value of bounce integral matches elliptic integral.""" p = 1e-3 m = 1 - p @@ -422,6 +422,8 @@ def test_bounce_quad(): bp1 = -np.pi / 2 bp2 = -bp1 knots = np.linspace(bp1, bp2, 15) + bp1 = np.atleast_3d(bp1) + bp2 = np.atleast_3d(bp2) B_sup_z = np.ones((1, knots.size)) B = np.reshape(np.sin(knots) ** 2, (1, -1)) B_z_ra = np.sin(2 * knots).reshape(1, -1) @@ -430,32 +432,20 @@ def test_bounce_quad(): def integrand(B, pitch): return 1 / _sqrt(1 - pitch * m * B) - def reverse_arcsin(x): - return _affine_bijection_reverse(automorphism_arcsin(x), bp1, bp2) - - def grad_reverse_arcsin(x): - return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_arcsin(x) - - def reverse_sin(x): - return _affine_bijection_reverse(automorphism_sin(x), bp1, bp2) - - def grad_reverse_sin(x): - return _grad_affine_bijection_reverse(bp1, bp2) * grad_automorphism_sin(x) - # augment the singularity - x_t, w_t = tanh_sinh_quad(18, grad_reverse_arcsin) - z_t = reverse_arcsin(x_t).reshape(1, 1, 1, -1) - tanh_sinh_arcsin = _bounce_quad( - z_t, w_t, knots, B_sup_z, B, B_z_ra, integrand, [], pitch + x_t, w_t = tanh_sinh_quad(18, grad_automorphism_arcsin) + z_t = automorphism_arcsin(x_t) + tanh_sinh_arcsin = _bounce_quadrature( + bp1, bp2, z_t, w_t, knots, B_sup_z, B, B_z_ra, integrand, [], pitch ) np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) # suppress the singularity x_g, w_g = np.polynomial.legendre.leggauss(16) - w_g = w_g * grad_reverse_sin(x_g) - z_g = reverse_sin(x_g).reshape(1, 1, 1, -1) - leg_gauss_sin = _bounce_quad( - z_g, w_g, knots, B_sup_z, B, B_z_ra, integrand, [], pitch + w_g = w_g * grad_automorphism_sin(x_g) + z_g = automorphism_sin(x_g) + leg_gauss_sin = _bounce_quadrature( + bp1, bp2, z_g, w_g, knots, B_sup_z, B, B_z_ra, integrand, [], pitch ) np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -508,7 +498,6 @@ def integrand_den(B, pitch): # You should filter out these nan values when computing stuff. average_sum_over_field_line = np.nansum(average, axis=-1) print(average_sum_over_field_line) - assert not np.allclose(average_sum_over_field_line, 0) # @pytest.mark.unit From e681c22914027304b1c82cf5f278cf454f57f267 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 14 Apr 2024 17:53:11 -0400 Subject: [PATCH 098/241] Make variables in bounce_integral.py private --- desc/compute/bounce_integral.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index e572aa75fc..4606bb955c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -616,7 +616,7 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): return x, W -bounce_docstring = """w : Array, shape(w.size, ) +_repeated_docstring = """w : Array, shape(w.size, ) Quadrature weights. knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. @@ -653,7 +653,7 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. """ -delimiter = "Returns" +_delimiter = "Returns" _interp1d_vec = jnp.vectorize( @@ -694,7 +694,7 @@ def _interpolating_quadrature( Returns ------- inner_product : Array, shape(Z.shape[:-1]) - Bounce quadrature for every pitch along every field line. + Quadrature for every pitch along every field line. """ assert pitch.ndim == 2 @@ -718,7 +718,7 @@ def _interpolating_quadrature( _interpolating_quadrature.__doc__ = _interpolating_quadrature.__doc__.replace( - delimiter, bounce_docstring + delimiter, 1 + _delimiter, _repeated_docstring + _delimiter, 1 ) @@ -774,7 +774,7 @@ def _group_grid_data_by_field_line(g): _bounce_quadrature.__doc__ = _bounce_quadrature.__doc__.replace( - delimiter, bounce_docstring + delimiter, 1 + _delimiter, _repeated_docstring + _delimiter, 1 ) From df418692ce1fe1f4bdbb916c126a7647f3cdf121 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 15 Apr 2024 23:31:48 -0400 Subject: [PATCH 099/241] Use map_coords in desc_grid_from_field_line_coords now that it uses the better initial guess that desc_grid_from_field_line_coords used. Fix bug in compute_theta_coords where all axis are squeezed out from theta. Make bounce_quadrature test more robust. --- desc/compute/bounce_integral.py | 21 +++----- desc/equilibrium/coords.py | 89 ++++++++------------------------- tests/test_bounce_integral.py | 25 +++++---- 3 files changed, 44 insertions(+), 91 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 4606bb955c..8451d3f59c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -762,8 +762,6 @@ def _group_grid_data_by_field_line(g): return g.reshape(-1, S, knots.size) f = map(_group_grid_data_by_field_line, f) - - # Apply affine transformation to quadrature points. Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. result = _interpolating_quadrature( @@ -780,7 +778,7 @@ def _group_grid_data_by_field_line(g): def bounce_integral_map( eq, - rho=jnp.linspace(1e-12, 1, 10), + rho=jnp.linspace(1e-12, 1, 5), alpha=None, knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 25), quad=tanh_sinh_quad, @@ -847,12 +845,10 @@ def bounce_integral_map( This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line ℓ (constant rho, alpha), for every λ value in ``pitch``. items : dict - grid_fl : Grid - Clebsch-Type field-line coordinate grid. grid_desc : Grid DESC coordinate grid for the given field line coordinates. - data : dict - Dictionary of Arrays of stuff evaluated on ``grid_desc``. + grid_fl : Grid + Clebsch-Type field-line coordinate grid. knots : Array, Field line-following ζ coordinates of spline knots. B.c : Array, shape(4, S, knots.size - 1) @@ -895,7 +891,7 @@ def integrand_den(B, pitch): bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) - g_zz = eq.compute("g_zz", grid=items["grid_desc"], data=items["data"])["g_zz"] + g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) num = bounce_integral(integrand_num, g_zz, pitch) den = bounce_integral(integrand_den, [], pitch) @@ -941,8 +937,8 @@ def integrand_den(B, pitch): S = rho.size * alpha.size # Compute |B| and group data along field lines. - grid_fl, grid_desc, data = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc, data=data) + grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) B_sup_z = data["B^zeta"].reshape(S, knots.size) B = data["|B|"].reshape(S, knots.size) / normalize B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / normalize @@ -971,7 +967,7 @@ def bounce_integral(integrand, f, pitch, method="akima"): with arrays of shape(P, S, 1, 1) where P is the batch axis size of pitch. S is the number of field lines. - f : iterable of Array, shape(P, items["grid_desc"].num_nodes, ) + f : list of Array, shape(P, items["grid_desc"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral evaluated (or interpolated to) the nodes of the returned desc @@ -1007,9 +1003,8 @@ def bounce_integral(integrand, f, pitch, method="akima"): if return_items: items = { - "grid_fl": grid_fl, "grid_desc": grid_desc, - "data": data, + "grid_fl": grid_fl, "knots": knots, "B.c": B_c, "B_z_ra.c": B_z_ra_c, diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index d435792746..5aacb5bf57 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -199,6 +199,7 @@ def _initial_guess_heuristic(yk, coords, inbasis, eq, profiles): elif poloidal == "alpha": alpha = coords[:, inbasis.index("alpha")] iota = profiles["iota"](rho) + # why not alpha % (2 * jnp.pi) + iota * zeta % (user supplied period)? theta = (alpha + iota * zeta) % (2 * jnp.pi) yk = jnp.array([rho, theta, zeta]).T @@ -300,7 +301,7 @@ def fixup(x, *args): ) theta_DESC, (res, niter) = vecroot(theta_star, theta_star, rho, zeta) - nodes = jnp.array([rho, theta_DESC.squeeze(), zeta]).T + nodes = jnp.array([rho, jnp.atleast_1d(theta_DESC.squeeze()), zeta]).T out = nodes if full_output: @@ -312,7 +313,7 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. Create a meshgrid from the given field line coordinates, - and transform this to a meshgrid in DESC coordinates. + and return the equivalent DESC coordinate grid. Parameters ---------- @@ -323,90 +324,42 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): alpha : ndarray Unique field line label coordinates over a constant rho surface. zeta : ndarray - Unique Field line-following ζ coordinates. + Unique field line-following ζ coordinates. Returns ------- - grid_fl : Grid - Clebsch-Type field-line coordinate grid. grid_desc : Grid DESC coordinate grid for the given field line coordinates. - data : dict - Some flux surface quantities that may be more accurate than what - can be computed on the returned grid. + grid_fl : Grid + Clebsch-Type field-line coordinate grid. """ - - def unique_idx(a_size, b_size, c_size): - labels = ["rho", "theta", "zeta"] - return { - f"_unique_{label}_idx": idx - for label, idx in zip(labels, meshgrid_unique_idx(a_size, b_size, c_size)) - } - - def inverse_idx(a_size, b_size, c_size): - labels = ["rho", "theta", "zeta"] - return { - f"_inverse_{label}_idx": idx - for label, idx in zip(labels, meshgrid_inverse_idx(a_size, b_size, c_size)) - } - r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) + coords_fl = jnp.column_stack([r, a, z_fl]) + _unique_rho_idx = meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0] + _inverse_rho_idx = meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0] grid_fl = Grid( - nodes=jnp.column_stack([r, a, z_fl]), + nodes=coords_fl, sort=False, jitable=True, - **unique_idx(rho.size, alpha.size, zeta.size), - **inverse_idx(rho.size, alpha.size, zeta.size), - ) - # The rotational transform can be computed apriori to the coordinate - # transformation because it is a single variable function of the flux surface - # label rho, and the coordinate mapping does not change rho. Once it is known, - # we can compute the straight field-line poloidal angle theta_PEST from the - # field-line label alpha. Then we transform from straight field-line coordinates - # to DESC coordinates with the root-finding method ``compute_theta_coords``. - - # Choose nodes such that even spacing will yield correct flux surface integrals. - t = jnp.linspace(0, 2 * jnp.pi, 2 * eq.M_grid + 1, endpoint=False) - z = jnp.linspace(0, 2 * jnp.pi / eq.NFP, 2 * eq.N_grid + 1, endpoint=False) - nodes = jnp.column_stack( - tuple(map(jnp.ravel, jnp.meshgrid(rho, t, z, indexing="ij"))) - ) - spacing = jnp.ones(rho.size * t.size * z.size)[:, jnp.newaxis] * jnp.array( - [1 / rho.size, 2 * jnp.pi / t.size, 2 * jnp.pi / z.size] + _unique_rho_idx=_unique_rho_idx, + _inverse_rho_idx=_inverse_rho_idx, ) - grid_iota = Grid( - nodes, - spacing=spacing, - sort=False, - jitable=True, - **unique_idx(rho.size, t.size, z.size), - **inverse_idx(rho.size, t.size, z.size), + coords_desc = map_coordinates( + eq, + coords_fl, + inbasis=("rho", "alpha", "zeta"), + outbasis=("rho", "theta", "zeta"), + period=(np.inf, 2 * np.pi, np.inf), ) - # We only need to compute the rotational transform to transform to straight - # field-line coordinates. However, it is a good idea to compute other flux - # surface quantities on this grid because the DESC coordinates corresponding - # to the given field line coordinates may not be uniformly distributed over - # flux surfaces. This would make quadratures performed over flux surfaces - # on the returned DESC grid inaccurate. - data_iota = eq.compute(names=["iota", "iota_r"], grid=grid_iota) - data = { - d: grid_fl.expand(grid_iota.compress(data_iota[d])) - for d in data_iota - if data_index["desc.equilibrium.equilibrium.Equilibrium"][d]["coordinates"] - == "r" - } - # don't modulo field line zeta by 2pi - coords_sfl = jnp.column_stack([r, a + data["iota"] * z_fl, z_fl]) - coords_desc = eq.compute_theta_coords(coords_sfl) grid_desc = Grid( nodes=coords_desc, sort=False, jitable=True, - _unique_rho_idx=meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0], - _inverse_rho_idx=meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0], + _unique_rho_idx=_unique_rho_idx, + _inverse_rho_idx=_inverse_rho_idx, ) - return grid_fl, grid_desc, data + return grid_desc, grid_fl def is_nested(eq, grid=None, R_lmn=None, Z_lmn=None, L_lmn=None, msg=None): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 72c301fc4c..ac4f870c0b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -416,17 +416,22 @@ def test_bounce_quadrature(): """Test principal value of bounce integral matches elliptic integral.""" p = 1e-3 m = 1 - p - truth = 2 * ellipkm1(p) + # Some prime number that doesn't appear anywhere in calculation. + # Ensures no lucky cancellation occurs from this test case since otherwise + # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi + # appears often in transformations. + v = 7 + truth = v * 2 * ellipkm1(p) rtol = 1e-3 - bp1 = -np.pi / 2 + bp1 = -np.pi / 2 * v bp2 = -bp1 knots = np.linspace(bp1, bp2, 15) bp1 = np.atleast_3d(bp1) bp2 = np.atleast_3d(bp2) B_sup_z = np.ones((1, knots.size)) - B = np.reshape(np.sin(knots) ** 2, (1, -1)) - B_z_ra = np.sin(2 * knots).reshape(1, -1) + B = (np.sin(knots / v) ** 2).reshape(1, -1) + B_z_ra = (np.sin(2 * knots / v) / v).reshape(1, -1) pitch = np.ones((1, 1)) def integrand(B, pitch): @@ -434,18 +439,18 @@ def integrand(B, pitch): # augment the singularity x_t, w_t = tanh_sinh_quad(18, grad_automorphism_arcsin) - z_t = automorphism_arcsin(x_t) + x_t = automorphism_arcsin(x_t) tanh_sinh_arcsin = _bounce_quadrature( - bp1, bp2, z_t, w_t, knots, B_sup_z, B, B_z_ra, integrand, [], pitch + bp1, bp2, x_t, w_t, knots, B_sup_z, B, B_z_ra, integrand, [], pitch ) np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) # suppress the singularity x_g, w_g = np.polynomial.legendre.leggauss(16) w_g = w_g * grad_automorphism_sin(x_g) - z_g = automorphism_sin(x_g) + x_g = automorphism_sin(x_g) leg_gauss_sin = _bounce_quadrature( - bp1, bp2, z_g, w_g, knots, B_sup_z, B, B_z_ra, integrand, [], pitch + bp1, bp2, x_g, w_g, knots, B_sup_z, B, B_z_ra, integrand, [], pitch ) np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -471,11 +476,11 @@ def integrand_den(B, pitch): bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) # start hairy ball test - B = items["data"]["B"] + B = eq.compute("|B|", grid=items["grid_desc"])["|B|"] assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." # end hairy ball test - g_zz = eq.compute("g_zz", grid=items["grid_desc"], data=items["data"])["g_zz"] + g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) num = bounce_integral(integrand_num, g_zz, pitch) den = bounce_integral(integrand_den, [], pitch) From df5198bd86152e8d50475e8b290a1396d9d8cb47 Mon Sep 17 00:00:00 2001 From: Rahul Date: Thu, 18 Apr 2024 12:36:27 -0400 Subject: [PATCH 100/241] bounce average test modified, scipy's incomplete elliptic integrals seem to be failing! --- tests/test_bounce_integral.py | 64 ++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 16 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index ac4f870c0b..b8a77e9104 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -9,7 +9,7 @@ # TODO: can use the one from interpax once .solve() is implemented from scipy.interpolate import CubicHermiteSpline -from scipy.special import ellipe, ellipk, ellipkm1 +from scipy.special import ellipeinc, ellipkinc, ellipkm1 from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( @@ -664,41 +664,73 @@ def test_bounce_averaged_drifts(): * s_hat / Bref ) + + gds21_an = ( + -1 * s_hat * (s_hat * theta_PEST - alpha_MHD / bmag**4 * np.sin(theta_PEST)) + ) + np.testing.assert_allclose(gds21, gds21_an, atol=1.7e-2, rtol=5e-4) + fudge_factor2 = 0.19 gbdrift_an = fudge_factor2 * ( - -s_hat + (np.cos(theta_PEST) - gds21 / s_hat * np.sin(theta_PEST)) + -s_hat + (np.cos(theta_PEST) - gds21_an / s_hat * np.sin(theta_PEST)) ) fudge_factor3 = 0.07 cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 # Comparing coefficients with their analytical expressions - np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.5e-2, rtol=5e-3) + np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.2e-2, rtol=5e-3) np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=5e-3) # Values of pitch angle lambda for which to evaluate the bounce averages. pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) pitch = pitch.reshape(pitch.shape[0], -1) - k2 = 0.5 * ((1 - pitch * B0) / epsilon + 1) + k2 = 0.5 * ((1 - pitch * B0) / (pitch * B0 * epsilon) + 1) + k = np.sqrt(k2) # Fixme: What exactly is this a function of? - # cvdrift, gbdrift is a grid quantity, so grid.num_nodes length - # on a single field line grid -> so it has length number of zeta points - # So bavg_drift_an has shape shape (number of pitch, number of zeta points). - # For a fixed pitch at index i, what is difference bavg_drift_an[i, j] - # and bavg_drift_an[i, j+1]? - bavg_drift_an = ( - ellipe(k2) - - 0.5 * ellipk(k2) - + 2 * s_hat * (ellipe(k2) + (k2 - 1) * ellipk(k2)) - - dPdrho / B0 * ellipk(k2) - - dPdrho / B0 * 2 / 3 * (ellipe(k2) * (2 * k2 - 1) + ellipk(k2) * (1 - k2)) + # cvdrift, gbdrift is a grid quantity, so grid.num_nodes length + # on a single field line grid -> so it has length number of zeta points + # So bavg_drift_an has shape shape (number of pitch, number of zeta points). + # For a fixed pitch at index i, what is difference bavg_drift_an[i, j] + # and bavg_drift_an[i, j+1]? + # RG : Here are the notes that explain these integrals + # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf + integral_0 = 4 / k * ellipkinc(np.arcsin(k), 1 / k2) # ∫ dx sqrt(k2-sin(x/2)^2) + integral_1 = 4 * k * ellipeinc(np.arcsin(k), 1 / k2) # ∫ dx/sqrt(k2-sin(x/2)^2) + integral_2 = 16 * k * integral_0 + + integral_3 = ( + 4 / 9 * (8 * k * (-1 + 2 * k2) * integral_1 - 4 * k * (-1 + k2) * integral_0) + ) + + integral_4 = ( + 2 + * np.sqrt(2) + / 3 + * (4 * np.sqrt(2) * k * (-1 + 2 * k2) * integral_0 - 2 * (-1 + k2) * integral_1) + ) + integral_5 = ( + 2 + / 30 + * ( + 32 * k * (1 - k2 + k2**2) * integral_0 + - 16 * k * (1 - 3 * k2 + 2 * k2**2) * integral_1 + ) + ) + integral_6 = 2 / 3 * (k * (-2 + 4 * k2) * integral_0 - 4 * (-1 + k2) * integral_1) + integral_7 = 4 / k * (2 * k2 * integral_0 + (1 - 2 * k2) * integral_1) + + bavg_drift_an = fudge_factor3 * dPdrho / B0**2 - 0.5 * fudge_factor2 * ( + s_hat * (integral_0 + integral_1 + integral_2 + integral_3) + + alpha_MHD / B0**4 * (integral_4 + integral_5) + + (integral_6 + integral_7) ) def integrand(cvdrift, gbdrift, B, pitch): # The arguments to this function will be interpolated # onto the quadrature points before these quantities are evaluated. g = _sqrt(1 - pitch * B) - return (0.5 * cvdrift * g) + (gbdrift / g) + (dPdrho / B**2 * g) + return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) bavg_drift_num = bounce_integral( integrand=integrand, From b6a0286d132d94af3679c3d6a86f0f05002df5da Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 19 Apr 2024 01:44:01 -0400 Subject: [PATCH 101/241] Do quadrature instead of using brokescipy.special functions --- desc/compute/bounce_integral.py | 114 ++++++++++++++++---------------- tests/test_bounce_integral.py | 96 ++++++++++++++++++++------- 2 files changed, 127 insertions(+), 83 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 8451d3f59c..0209fe43d7 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -365,11 +365,18 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): return pitch -def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): +def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): """Compute the bounce points given spline of |B| and pitch λ. Parameters ---------- + pitch : Array, shape(P, S) + λ values. + Last axis enumerates the λ value for a particular field line + parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + If two-dimensional, the first axis is the batch axis as usual. knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. B_c : Array, shape(B_c.shape[0], S, knots.size - 1) @@ -384,13 +391,6 @@ def bounce_points(knots, B_c, B_z_ra_c, pitch, check=False): Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - pitch : Array, shape(P, S) - λ values. - Last axis enumerates the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. check : bool Flag for debugging. @@ -552,10 +552,10 @@ def automorphism_sin(x): Therefore, this automorphism pulls the mass of the bounce integral away from the singularities, which should improve convergence of the quadrature - to the principal value of the true integral, so long as the quadrature - performs better on less singular integrands. If the integral was - hypersingular to begin with, Tanh-Sinh quadrature will still work well. - Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. + to the true integral, so long as the quadrature performs better on less + singular integrands. If the integral was hypersingular to begin with, + Tanh-Sinh quadrature will still work well. Otherwise, Gauss-Legendre + quadrature can outperform Tanh-Sinh. Parameters ---------- @@ -618,29 +618,26 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): _repeated_docstring = """w : Array, shape(w.size, ) Quadrature weights. - knots : Array, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - B_sup_z : Array, shape(S, knots.size, ) - Contravariant field-line following toroidal component of magnetic field. - B : Array, shape(S, knots.size, ) - Norm of magnetic field. - B_z_ra : Array, shape(S, knots.size, ) - Norm of magnetic field derivative with respect to field-line following label. integrand : callable This callable is the composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. - It should accept the items in ``f`` as arguments as well as two additional - keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to - approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. + It should accept the items in ``f`` as arguments as well as the additional + keyword arguments: ``B``, ``pitch``, and ``Z``, where ``Z`` is the set of + quadrature points. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. Note that any arrays baked into the callable method should broadcast - with arrays of shape(P, S, 1, 1) where - P is the batch axis size of pitch. - S is the number of field lines. - f : iterable of Array, shape(P, S, knots.size, ) + with ``Z``. + f : list or tuple of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral evaluated (or interpolated to) the nodes of the returned desc coordinate grid. + B_sup_z : Array, shape(S, knots.size, ) + Contravariant field-line following toroidal component of magnetic field. + B : Array, shape(S, knots.size, ) + Norm of magnetic field. + B_z_ra : Array, shape(S, knots.size, ) + Norm of magnetic field derivative with respect to field-line following label. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. Last axis enumerates the λ value for a particular field line parameterized @@ -648,6 +645,8 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. The first axis is the batch axis as usual. + knots : Array, shape(knots.size, ) + Field line-following ζ coordinates of spline knots. method : str Method of interpolation for functions contained in ``f``. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. @@ -681,8 +680,8 @@ def _interp1d_vec_with_df( return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) -def _interpolating_quadrature( - Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method +def _interpolatory_quadrature( + Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method ): """Interpolate given functions to points Z and perform quadrature. @@ -707,29 +706,30 @@ def _interpolating_quadrature( # Spline each function separately so that the singularity near the bounce # points can be captured more accurately than can be by any polynomial. shape = Z.shape - Z = Z.reshape(Z.shape[0], Z.shape[1], -1) - f = [_interp1d_vec(Z, knots, ff, method=method).reshape(shape) for ff in f] - B_sup_z = _interp1d_vec(Z, knots, B_sup_z, method=method).reshape(shape) + Z_ps = Z.reshape(Z.shape[0], Z.shape[1], -1) + f = [_interp1d_vec(Z_ps, knots, ff, method=method).reshape(shape) for ff in f] + B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) # Specify derivative at knots for ≈ cubic hermite interpolation. - B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method="cubic").reshape(shape) + B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method="cubic").reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] - inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch) / B_sup_z, w) + inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch, Z=Z) / B_sup_z, w) return inner_product -_interpolating_quadrature.__doc__ = _interpolating_quadrature.__doc__.replace( +_interpolatory_quadrature.__doc__ = _interpolatory_quadrature.__doc__.replace( _delimiter, _repeated_docstring + _delimiter, 1 ) def _bounce_quadrature( - bp1, bp2, x, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch=None, method="akima" + bp1, bp2, x, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method="akima" ): """Bounce integrate ∫ f(ℓ) dℓ. Parameters ---------- - bp1, bp2 : Array, Array, shape(P, S, -1) + bp1, bp2 : Array, Array + Each should have shape(P, S, bp1.shape[-1]). The field line-following ζ coordinates of bounce points for a given pitch along a field line. The pairs bp1[i, j, k] and bp2[i, j, k] form left and right integration boundaries, respectively, for the bounce integrals. @@ -738,7 +738,7 @@ def _bounce_quadrature( Returns ------- - result : Array, shape(P, S, -1) + result : Array, shape(P, S, bp1.shape[-1]) First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. @@ -752,20 +752,19 @@ def _bounce_quadrature( f = [f] def _group_grid_data_by_field_line(g): - errorif( - g.ndim > 2, - ValueError, - "Should have at most two dimensions, in which case the first axis" - " is interpreted as the batch axis, which enumerates the evaluation" - " of the function at particular pitch values.", + msg = ( + "Should have at most two dimensions, in which case the first axis " + "is interpreted as the batch axis, which enumerates the evaluation " + "of the function at particular pitch values." ) + errorif(g.ndim > 2, ValueError, msg) return g.reshape(-1, S, knots.size) f = map(_group_grid_data_by_field_line, f) Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. - result = _interpolating_quadrature( - Z, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method + result = _interpolatory_quadrature( + Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method ) * _grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result @@ -788,7 +787,7 @@ def bounce_integral_map( ): """Returns a method to compute the bounce integral of any quantity. - The bounce integral is defined as the principal value of ∫ f(ℓ) dℓ, where + The bounce integral is defined as ∫ f(ℓ) dℓ, where dℓ parameterizes the distance along the field line, λ is a constant proportional to the magnetic moment over energy, |B| is the norm of the magnetic field, @@ -875,14 +874,14 @@ def bounce_integral_map( .. code-block:: python - def integrand_num(g_zz, B, pitch): + def integrand_num(g_zz, B, pitch, Z): # Integrand in integral in numerator of bounce average. f = (1 - pitch * B) * g_zz - return f / jnp.sqrt(1 - pitch * B) + return safediv(f, jnp.sqrt(1 - pitch * B)) - def integrand_den(B, pitch): + def integrand_den(B, pitch, Z): # Integrand in integral in denominator of bounce average. - return 1 / jnp.sqrt(1 - pitch * B) + return safediv(1, jnp.sqrt(1 - pitch * B)) eq = get("HELIOTRON") rho = jnp.linspace(1e-12, 1, 6) @@ -961,12 +960,11 @@ def bounce_integral(integrand, f, pitch, method="akima"): This callable is the composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the items in ``f`` as arguments as well as two additional - keyword arguments: ``B``, and ``pitch``. A quadrature will be performed to - approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. + keyword arguments: ``B``, ``pitch``, and ``Z``, where ``Z`` is the set of + quadrature points. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. Note that any arrays baked into the callable method should broadcast - with arrays of shape(P, S, 1, 1) where - P is the batch axis size of pitch. - S is the number of field lines. + with ``Z``. f : list of Array, shape(P, items["grid_desc"].num_nodes, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral @@ -994,9 +992,9 @@ def bounce_integral(integrand, f, pitch, method="akima"): lines. Last axis enumerates the bounce integrals. """ - bp1, bp2 = bounce_points(knots, B_c, B_z_ra_c, pitch, check) + bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, check) result = _bounce_quadrature( - bp1, bp2, x, w, knots, B_sup_z, B, B_z_ra, integrand, f, pitch, method + bp1, bp2, x, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method ) assert result.shape[-1] == (knots.size - 1) * 3 return result diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index b8a77e9104..4661585554 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -6,10 +6,11 @@ import numpy as np import pytest from matplotlib import pyplot as plt +from scipy import integrate # TODO: can use the one from interpax once .solve() is implemented from scipy.interpolate import CubicHermiteSpline -from scipy.special import ellipeinc, ellipkinc, ellipkm1 +from scipy.special import ellipkm1 from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( @@ -85,7 +86,9 @@ def test_mask_operations(): np.pad(desired, (0, cols - desired.size), constant_values=np.nan), equal_nan=True, ), "take_mask has bugs." - assert np.array_equal(last[i], desired[-1]), "flatnonzero has bugs." + assert np.array_equal( + last[i], desired[-1] if desired.size else np.nan + ), "flatnonzero has bugs." @pytest.mark.unit @@ -262,7 +265,7 @@ def test_bp1_first(plot=False): pitch = 2 if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(knots, B.c, B.derivative().c, pitch, check=True) + bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[0::2]) @@ -276,7 +279,7 @@ def test_bp2_first(plot=False): pitch = 2 if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(k, B.c, B.derivative().c, pitch, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[1::2]) @@ -294,7 +297,7 @@ def test_bp1_before_extrema(plot=False): if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -317,7 +320,7 @@ def test_bp2_before_extrema(plot=False): if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) @@ -337,7 +340,7 @@ def test_extrema_first_and_before_bp1(plot=False): if plot: plot_field_line(B, pitch, k[2], end) - bp1, bp2 = bounce_points(k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], pitch, check=True) + bp1, bp2 = bounce_points(pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -361,7 +364,7 @@ def test_extrema_first_and_before_bp2(plot=False): if plot: plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(k, B.c, B_z_ra.c, pitch, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -413,8 +416,8 @@ def test_automorphism(): @pytest.mark.unit def test_bounce_quadrature(): - """Test principal value of bounce integral matches elliptic integral.""" - p = 1e-3 + """Test bounce integral matches elliptic integral.""" + p = 1e-4 m = 1 - p # Some prime number that doesn't appear anywhere in calculation. # Ensures no lucky cancellation occurs from this test case since otherwise @@ -434,14 +437,14 @@ def test_bounce_quadrature(): B_z_ra = (np.sin(2 * knots / v) / v).reshape(1, -1) pitch = np.ones((1, 1)) - def integrand(B, pitch): - return 1 / _sqrt(1 - pitch * m * B) + def integrand(B, pitch, Z): + return 1 / np.sqrt(1 - pitch * m * B) # augment the singularity x_t, w_t = tanh_sinh_quad(18, grad_automorphism_arcsin) x_t = automorphism_arcsin(x_t) tanh_sinh_arcsin = _bounce_quadrature( - bp1, bp2, x_t, w_t, knots, B_sup_z, B, B_z_ra, integrand, [], pitch + bp1, bp2, x_t, w_t, integrand, [], B_sup_z, B, B_z_ra, pitch, knots ) np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) @@ -450,7 +453,7 @@ def integrand(B, pitch): w_g = w_g * grad_automorphism_sin(x_g) x_g = automorphism_sin(x_g) leg_gauss_sin = _bounce_quadrature( - bp1, bp2, x_g, w_g, knots, B_sup_z, B, B_z_ra, integrand, [], pitch + bp1, bp2, x_g, w_g, integrand, [], B_sup_z, B, B_z_ra, pitch, knots ) np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -459,14 +462,14 @@ def integrand(B, pitch): def test_example_code_and_hairy_ball(): """Test example code in bounce_integral docstring and ensure B does not vanish.""" - def integrand_num(g_zz, B, pitch): + def integrand_num(g_zz, B, pitch, Z): """Integrand in integral in numerator of bounce average.""" f = (1 - pitch * B) * g_zz # something arbitrary - return safediv(f, _sqrt(1 - pitch * B), fill=np.nan) + return safediv(f, _sqrt(1 - pitch * B)) - def integrand_den(B, pitch): + def integrand_den(B, pitch, Z): """Integrand in integral in denominator of bounce average.""" - return safediv(1, _sqrt(1 - pitch * B), fill=np.nan) + return safediv(1, _sqrt(1 - pitch * B)) eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) @@ -476,7 +479,7 @@ def integrand_den(B, pitch): bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) # start hairy ball test - B = eq.compute("|B|", grid=items["grid_desc"])["|B|"] + B = eq.compute("|B|", grid=items["grid_desc"], override_grid=False)["|B|"] assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." # end hairy ball test @@ -568,7 +571,51 @@ def beta(grid, data): # TODO now compare result to elliptic integral bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots, check=True) pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) - bp1, bp2 = bounce_points(knots, items["B.c"], items["B_z_ra.c"], pitch) + bp1, bp2 = bounce_points(pitch, knots, items["B.c"], items["B_z_ra.c"]) + + +@pytest.mark.unit +def test_integral_0(k=0.9, resolution=10): + """4 / k * ellipkinc(np.arcsin(k), 1 / k**2).""" + k = np.atleast_1d(k) + bp1 = np.zeros_like(k) + bp2 = np.arcsin(k) + x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) + Z = _affine_bijection_reverse( + automorphism_arcsin(x), bp1[..., np.newaxis], bp2[..., np.newaxis] + ) + k = k[..., np.newaxis] + + def integrand(Z, k): + return safediv(4 / k, np.sqrt(1 - 1 / k**2 * np.sin(Z) ** 2)) + + quad = np.dot(integrand(Z, k), w) * _grad_affine_bijection_reverse(bp1, bp2) + if k.size == 1: + q = integrate.quad(integrand, bp1.item(), bp2.item(), args=(k.item(),))[0] + np.testing.assert_allclose(quad, q, rtol=1e-5) + return quad + + +@pytest.mark.unit +def test_integral_1(k=0.9, resolution=10): + """4 * k * ellipeinc(np.arcsin(k), 1 / k**2).""" + k = np.atleast_1d(k) + bp1 = np.zeros_like(k) + bp2 = np.arcsin(k) + x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) + Z = _affine_bijection_reverse( + automorphism_arcsin(x), bp1[..., np.newaxis], bp2[..., np.newaxis] + ) + k = k[..., np.newaxis] + + def integrand(Z, k): + return 4 * k * np.sqrt(1 - 1 / k**2 * np.sin(Z) ** 2) + + quad = np.dot(integrand(Z, k), w) * _grad_affine_bijection_reverse(bp1, bp2) + if k.size == 1: + q = integrate.quad(integrand, bp1.item(), bp2.item(), args=(k.item(),))[0] + np.testing.assert_allclose(quad, q, rtol=1e-4) + return quad @pytest.mark.unit @@ -645,7 +692,6 @@ def test_bounce_averaged_drifts(): bmag_an = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) - # FIXME should x be same as epsilon? x = Lref * rho s_hat = -x / iota * shear / Lref gradpar = Lref * data_bounce["B^zeta"] / data_bounce["|B|"] @@ -695,10 +741,10 @@ def test_bounce_averaged_drifts(): # and bavg_drift_an[i, j+1]? # RG : Here are the notes that explain these integrals # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf - integral_0 = 4 / k * ellipkinc(np.arcsin(k), 1 / k2) # ∫ dx sqrt(k2-sin(x/2)^2) - integral_1 = 4 * k * ellipeinc(np.arcsin(k), 1 / k2) # ∫ dx/sqrt(k2-sin(x/2)^2) - integral_2 = 16 * k * integral_0 + integral_0 = test_integral_0(k) + integral_1 = test_integral_1(k) + integral_2 = 16 * k * integral_0 integral_3 = ( 4 / 9 * (8 * k * (-1 + 2 * k2) * integral_1 - 4 * k * (-1 + k2) * integral_0) ) @@ -726,7 +772,7 @@ def test_bounce_averaged_drifts(): + (integral_6 + integral_7) ) - def integrand(cvdrift, gbdrift, B, pitch): + def integrand(cvdrift, gbdrift, B, pitch, Z): # The arguments to this function will be interpolated # onto the quadrature points before these quantities are evaluated. g = _sqrt(1 - pitch * B) From 0d74884b2f636fe1af4bcb34725889299002b773 Mon Sep 17 00:00:00 2001 From: Rahul Date: Fri, 19 Apr 2024 11:34:31 -0400 Subject: [PATCH 102/241] small changes to the bounce average test --- tests/test_bounce_integral.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 4661585554..8218565163 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -766,10 +766,15 @@ def test_bounce_averaged_drifts(): integral_6 = 2 / 3 * (k * (-2 + 4 * k2) * integral_0 - 4 * (-1 + k2) * integral_1) integral_7 = 4 / k * (2 * k2 * integral_0 + (1 - 2 * k2) * integral_1) - bavg_drift_an = fudge_factor3 * dPdrho / B0**2 - 0.5 * fudge_factor2 * ( - s_hat * (integral_0 + integral_1 + integral_2 + integral_3) - + alpha_MHD / B0**4 * (integral_4 + integral_5) - + (integral_6 + integral_7) + bavg_drift_an = ( + fudge_factor3 * dPdrho / B0**2 * integral_1 + - 0.5 + * fudge_factor2 + * ( + s_hat * (integral_0 + integral_1 + integral_2 + integral_3) + + alpha_MHD / B0**4 * (integral_4 + integral_5) + + (integral_6 + integral_7) + )[:, 0] ) def integrand(cvdrift, gbdrift, B, pitch, Z): From ae0a6f8627c318b7a397b78e5d7b5634b7444fd5 Mon Sep 17 00:00:00 2001 From: Rahul Date: Fri, 19 Apr 2024 11:45:09 -0400 Subject: [PATCH 103/241] more changes to the bounce average test; most of the numerical values are Nans for some reason --- tests/test_bounce_integral.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 8218565163..837eb91f7e 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -773,9 +773,9 @@ def test_bounce_averaged_drifts(): * ( s_hat * (integral_0 + integral_1 + integral_2 + integral_3) + alpha_MHD / B0**4 * (integral_4 + integral_5) - + (integral_6 + integral_7) - )[:, 0] - ) + - (integral_6 + integral_7) + ) + )[:, 0] def integrand(cvdrift, gbdrift, B, pitch, Z): # The arguments to this function will be interpolated From d05482c70bc8196255f4197087d9b2be197f8da3 Mon Sep 17 00:00:00 2001 From: Rahul Date: Fri, 19 Apr 2024 11:56:02 -0400 Subject: [PATCH 104/241] adding responses to questions --- tests/test_bounce_integral.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 837eb91f7e..74fe10d222 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -663,6 +663,11 @@ def test_bounce_averaged_drifts(): # below, but should everything related to B be normalized? # or just things relevant for computing bounce points? # e.g. should I normalize B dot e^zeta = B^zeta by Bref as well? + # Response (R.G.): Yes, it would be better to normalize everything + # All the quantities can be normalized using combinations of Lref + # and Bref. To see what normalizations I use see below. + # For B^zeta the normalization should be Lref/Bref. Since we only + # use b dot grad zeta, we need B^zeta/|B| * Lref eq, rho, alpha, From b4604f6dbfac6e7757753d82ebbfa6d6d781d5c7 Mon Sep 17 00:00:00 2001 From: Rahul Date: Fri, 19 Apr 2024 12:21:12 -0400 Subject: [PATCH 105/241] bavg_drift_num has the same dimension as pitch now :) --- tests/test_bounce_integral.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 74fe10d222..290aa493f9 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -707,7 +707,7 @@ def test_bounce_averaged_drifts(): cvdrift = -2 * np.sign(psi_boundary) * Bref * Lref**2 * rho * data_bounce["cvdrift"] gbdrift = -2 * np.sign(psi_boundary) * Bref * Lref**2 * rho * data_bounce["gbdrift"] dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data_bounce["|B|"] ** 2) - alpha_MHD = -dPdrho * 1 / data_bounce["iota"] ** 2 * 0.5 + alpha_MHD = -np.mean(dPdrho * 1 / data_bounce["iota"] ** 2 * 0.5) gds21 = ( -np.sign(iota) @@ -780,7 +780,7 @@ def test_bounce_averaged_drifts(): + alpha_MHD / B0**4 * (integral_4 + integral_5) - (integral_6 + integral_7) ) - )[:, 0] + ) def integrand(cvdrift, gbdrift, B, pitch, Z): # The arguments to this function will be interpolated From 86666485cb6f53d99c8a01f8fbe621ce430f3733 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 21 Apr 2024 01:09:00 -0400 Subject: [PATCH 106/241] Add helper function to compute epsilon effective --- desc/compute/bounce_integral.py | 111 +++++++++++++++++++++++--------- tests/test_bounce_integral.py | 39 +++++++---- 2 files changed, 107 insertions(+), 43 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 0209fe43d7..ddefea678f 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -282,8 +282,7 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): field line. pitch : Array, shape(P, S) λ values. - Last axis enumerates the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. @@ -310,11 +309,24 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def pitch_of_extrema(knots, B_c, B_z_ra_c): +def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): """Return pitch values that will capture fat banana orbits. - These pitch values are 1/|B|(ζ*) where |B|(ζ*) are local maxima. - The local minima are returned as well. + Particles with λ = 1 / |B|(ζ*) where |B|(ζ*) are local maxima + have fat banana orbits increasing neoclassical transport. + + When computing ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ in equation 29 of + V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. + Evaluation of 1/ν neoclassical transport in stellarators. + Phys. Plasmas 1 December 1999; 6 (12): 4622–4632. + https://doi.org/10.1063/1.873749 + the contribution of ∑ⱼ Hⱼ² / Iⱼ to ε is largest in the intervals such that + b ∈ [|B|(ζ*) - db, |B|(ζ*)]. + To see this, observe that Iⱼ ∼ √(1 − λ B), hence Hⱼ² / Iⱼ ∼ Hⱼ² / √(1 − λ B). + For λ = 1 / |B|(ζ*), near |B|(ζ*), the quantity 1 / √(1 − λ B) is singular. + The slower |B| tends to |B|(ζ*) the less integrable this singularity becomes. + Therefore, a quadrature for ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ would do well to evaluate the + integrand near b = 1 / λ = |B|(ζ*). Parameters ---------- @@ -332,6 +344,8 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. + sort : bool + Whether to sort pitch values in order of increasing ζ* along field line. Returns ------- @@ -342,21 +356,25 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): If there were less than ``N * (degree - 1)`` extrema detected along a field line, then the first axis, which enumerates the pitch values for - a particular field line, is padded with nan. + a particular field line, is padded with nan. The first axis is sorted + in order of decreasing pitch values. """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 + # The local minima are returned as well, which has no negative effect + # other than perhaps not being an optimal quadrature point. extrema = _poly_root( c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), + sort=sort, # False to double weight orbits with |B|_z_ra = |B|_zz_ra = 0 at bounce points. distinct=True, ) # Can detect at most degree of |B|_z_ra spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) - # Reshape so that last axis enumerates (unsorted) extrema along a field line. + # Reshape so that last axis enumerates extrema along a field line. B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) # Might be useful to pad all the nan at the end rather than interspersed. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) @@ -365,6 +383,40 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): return pitch +# TODO: Any reason to not define pitch as b from the start? +# Would be simpler to use 1/lambda in formulas and redefine lambda = b. +def pitch_trapz(pitch_knot, resolution): + """Returns quadrature points for trapezoidal integration in 1/pitch between knots. + + Parameters + ---------- + pitch_knot : Array, shape(P, S) + λ values that should be included as quadrature points. + λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + where in the latter the labels (ρ, α) are interpreted as index into the + last axis that corresponds to that field line. + The first axis is the batch axis as usual. + resolution : int + Number of quadrature points. + + Returns + ------- + pitch : Array, shape((P - 1) * resolution + 1, S) + Quadrature points in pitch space. + + """ + pitch_knot = jnp.atleast_2d(pitch_knot) + errorif(pitch_knot.ndim != 2) + # It is tedious to do a composite Gauss Quadrature with points at b_knot. + # So let's just do trapezoidal for now. + b_knot = jnp.sort(1 / pitch_knot, axis=0) + b = jnp.linspace(b_knot[:-1, ...], b_knot[1:, ...], resolution, endpoint=False) + b = jnp.moveaxis(b, source=0, destination=1).reshape(-1, pitch_knot.shape[-1]) + b = jnp.append(b, b_knot[jnp.newaxis, -1, ...], axis=0) + assert b.shape == ((pitch_knot.shape[0] - 1) * resolution + 1, pitch_knot.shape[-1]) + return 1 / b + + def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): """Compute the bounce points given spline of |B| and pitch λ. @@ -372,8 +424,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): ---------- pitch : Array, shape(P, S) λ values. - Last axis enumerates the λ value for a particular field line - parameterized by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. @@ -468,21 +519,19 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): "Discontinuity detected. Is B_z_ra the derivative of the spline of B?", ) return bp1, bp2 - # This is no longer implemented at the moment. - # If the first intersect is at a non-negative derivative, that particle - # may be trapped in a well outside this snapshot of the field line. If, in - # addition, the last intersect is at a non-positive derivative, then we - # have information to compute a bounce integral between these points. - # This single bounce integral is somewhat undefined since the field typically - # does not close on itself, but in some cases it can make sense to include it. - # To make this integral well-defined, an approximation is made that the field - # line is periodic such that ζ = knots[-1] can be interpreted as ζ = 0 so - # that the distance between these bounce points is well-defined. This is fine - # as long as after a transit the field line begins physically close to where - # it began on the previous transit, for then continuity of |B| implies - # |B|(knots[-1] < ζ < knots[-1] + knots[0]) is close to |B|(0 < ζ < knots[0]). - # We don't need to check conditions for the latter, because if they are not - # satisfied, the quadrature will evaluate √(1 − λ |B|) as nan automatically. + # Consistent with (in particular the discussion on page 3 and 5 of) + # V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. + # Evaluation of 1/ν neoclassical transport in stellarators. + # Phys. Plasmas 1 December 1999; 6 (12): 4622–4632. + # https://doi.org/10.1063/1.873749. + # we ignore the bounce points of particles assigned to a class that + # are trapped outside this snapshot of the field line. The caveat + # is that the field line discussed in the paper above specifies the + # flux surface completely as its length tends to infinity, whereas + # the field line snapshot here is for a particular alpha coordinate. + # Don't think it's necessary to stitch together the field lines using + # rotational transform to potentially capture the bounce point outside + # this snapshot of the field line. def _affine_bijection_forward(x, a, b): @@ -491,13 +540,13 @@ def _affine_bijection_forward(x, a, b): return y -def _affine_bijection_reverse(x, a, b): +def affine_bijection_reverse(x, a, b): """[−1, 1] ∋ x ↦ y ∈ [a, b].""" y = (x + 1) / 2 * (b - a) + a return y -def _grad_affine_bijection_reverse(a, b): +def grad_affine_bijection_reverse(a, b): """Gradient of reverse affine bijection.""" dy_dx = (b - a) / 2 return dy_dx @@ -640,8 +689,7 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): Norm of magnetic field derivative with respect to field-line following label. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. - Last axis enumerates the λ value for a particular field line parameterized - by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. The first axis is the batch axis as usual. @@ -761,11 +809,11 @@ def _group_grid_data_by_field_line(g): return g.reshape(-1, S, knots.size) f = map(_group_grid_data_by_field_line, f) - Z = _affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. result = _interpolatory_quadrature( Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method - ) * _grad_affine_bijection_reverse(bp1, bp2) + ) * grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result @@ -975,8 +1023,7 @@ def bounce_integral(integrand, f, pitch, method="akima"): of the function at particular pitch values. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. - Last axis enumerates the λ value for a particular field line parameterized - by ρ, α. That is, λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` + λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` where in the latter the labels (ρ, α) are interpreted as index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 290aa493f9..dcee67b0c9 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -15,19 +15,20 @@ from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( _affine_bijection_forward, - _affine_bijection_reverse, _bounce_quadrature, - _grad_affine_bijection_reverse, _poly_der, _poly_root, _poly_val, + affine_bijection_reverse, automorphism_arcsin, automorphism_sin, bounce_integral_map, bounce_points, + grad_affine_bijection_reverse, grad_automorphism_arcsin, grad_automorphism_sin, pitch_of_extrema, + pitch_trapz, take_mask, tanh_sinh_quad, ) @@ -45,6 +46,7 @@ ) from desc.optimize import Optimizer from desc.profiles import PowerSeriesProfile +from desc.utils import only1 @partial(np.vectorize, signature="(m)->()") @@ -87,7 +89,9 @@ def test_mask_operations(): equal_nan=True, ), "take_mask has bugs." assert np.array_equal( - last[i], desired[-1] if desired.size else np.nan + last[i], + desired[-1] if desired.size else np.nan, + equal_nan=True, ), "flatnonzero has bugs." @@ -235,8 +239,21 @@ def test_pitch_of_extrema(): ) B_z_ra = B.derivative() pitch_scipy = 1 / B(B_z_ra.roots(extrapolate=False)) - pitch = _filter_not_nan(pitch_of_extrema(k, B.c, B_z_ra.c)) - np.testing.assert_allclose(pitch, pitch_scipy) + pitch = pitch_of_extrema(k, B.c, B_z_ra.c) + np.testing.assert_allclose(_filter_not_nan(pitch), pitch_scipy) + + +@pytest.mark.unit +def test_pitch_trapz(): + """Test this utility function.""" + B_min_tz = np.array([0.1, 0.2]) + B_max_tz = np.array([1, 3]) + pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) + pitch = pitch_trapz(pitch_knot, resolution=3) + assert np.array_equal(1 / pitch, np.sort(1 / pitch, axis=-1)) + for i in range(pitch_knot.shape[0]): + for j in range(pitch_knot.shape[1]): + assert only1(np.isclose(pitch_knot[i, j], pitch[:, j]).tolist()) @pytest.mark.unit @@ -392,14 +409,14 @@ def test_automorphism(): a, b = -312, 786 x = np.linspace(a, b, 10) y = _affine_bijection_forward(x, a, b) - x_1 = _affine_bijection_reverse(y, a, b) + x_1 = affine_bijection_reverse(y, a, b) np.testing.assert_allclose(x_1, x) np.testing.assert_allclose(_affine_bijection_forward(x_1, a, b), y) np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y) np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y) np.testing.assert_allclose( - _grad_affine_bijection_reverse(a, b), + grad_affine_bijection_reverse(a, b), 1 / (2 / (b - a)), ) np.testing.assert_allclose( @@ -581,7 +598,7 @@ def test_integral_0(k=0.9, resolution=10): bp1 = np.zeros_like(k) bp2 = np.arcsin(k) x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) - Z = _affine_bijection_reverse( + Z = affine_bijection_reverse( automorphism_arcsin(x), bp1[..., np.newaxis], bp2[..., np.newaxis] ) k = k[..., np.newaxis] @@ -589,7 +606,7 @@ def test_integral_0(k=0.9, resolution=10): def integrand(Z, k): return safediv(4 / k, np.sqrt(1 - 1 / k**2 * np.sin(Z) ** 2)) - quad = np.dot(integrand(Z, k), w) * _grad_affine_bijection_reverse(bp1, bp2) + quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(bp1, bp2) if k.size == 1: q = integrate.quad(integrand, bp1.item(), bp2.item(), args=(k.item(),))[0] np.testing.assert_allclose(quad, q, rtol=1e-5) @@ -603,7 +620,7 @@ def test_integral_1(k=0.9, resolution=10): bp1 = np.zeros_like(k) bp2 = np.arcsin(k) x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) - Z = _affine_bijection_reverse( + Z = affine_bijection_reverse( automorphism_arcsin(x), bp1[..., np.newaxis], bp2[..., np.newaxis] ) k = k[..., np.newaxis] @@ -611,7 +628,7 @@ def test_integral_1(k=0.9, resolution=10): def integrand(Z, k): return 4 * k * np.sqrt(1 - 1 / k**2 * np.sin(Z) ** 2) - quad = np.dot(integrand(Z, k), w) * _grad_affine_bijection_reverse(bp1, bp2) + quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(bp1, bp2) if k.size == 1: q = integrate.quad(integrand, bp1.item(), bp2.item(), args=(k.item(),))[0] np.testing.assert_allclose(quad, q, rtol=1e-4) From 77c88bf557a9278545394920ca710ea4e8d4253b Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 21 Apr 2024 11:41:10 -0400 Subject: [PATCH 107/241] Make sure nan won't appear in gradient and improve composite linspace for pitch integration --- desc/compute/bounce_integral.py | 50 +++++++++++++++++---------------- tests/test_bounce_integral.py | 31 ++++++++++++++------ 2 files changed, 49 insertions(+), 32 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ddefea678f..11e13eef75 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -92,7 +92,7 @@ def _root_quadratic(a, b, c, distinct=False): C = complex_sqrt(discriminant) def root(xi): - return (-b + xi * C) / (2 * a) + return safediv(-b + xi * C, 2 * a) is_linear = jnp.isclose(a, 0) suppress_root = distinct & jnp.isclose(discriminant, 0) @@ -108,10 +108,10 @@ def _root_cubic(a, b, c, d, distinct=False): t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d discriminant = t_1**2 - 4 * t_0**3 C = ((t_1 + complex_sqrt(discriminant)) / 2) ** (1 / 3) - C_is_zero = jnp.isclose(t_0, 0) & jnp.isclose(t_1, 0) + C_is_zero = jnp.isclose(C, 0) def root(xi): - return (b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C))) / (-3 * a) + return safediv(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C)), -3 * a) xi0 = 1 xi1 = (-1 + (-3) ** 0.5) / 2 @@ -383,38 +383,40 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): return pitch -# TODO: Any reason to not define pitch as b from the start? -# Would be simpler to use 1/lambda in formulas and redefine lambda = b. -def pitch_trapz(pitch_knot, resolution): - """Returns quadrature points for trapezoidal integration in 1/pitch between knots. +def composite_linspace(knots, resolution, invert=False): + """Returns linearly spaced points between ``knots``. Parameters ---------- - pitch_knot : Array, shape(P, S) - λ values that should be included as quadrature points. - λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. - The first axis is the batch axis as usual. + knots : Array, shape(P, ...) + First axis has values to return linearly spaced values between. + The remaining axis are batch axes. resolution : int - Number of quadrature points. + Number of points between each knot. + invert : bool + Whether the spacing is uniform in ``1 / knots`` or ``knots``. Returns ------- - pitch : Array, shape((P - 1) * resolution + 1, S) - Quadrature points in pitch space. + result : Array, shape((P - 1) * resolution + 1, *knots.shape[1:]) + Sorted in increasing order of ``1 / knots`` or ``knots`` + depending on whether ``invert`` is true or false, respectively. """ - pitch_knot = jnp.atleast_2d(pitch_knot) - errorif(pitch_knot.ndim != 2) - # It is tedious to do a composite Gauss Quadrature with points at b_knot. - # So let's just do trapezoidal for now. - b_knot = jnp.sort(1 / pitch_knot, axis=0) + knots = jnp.atleast_1d(knots) + P = knots.shape[0] + S = knots.shape[1:] + + def inverse_if_invert(f): + return 1 / f if invert else f + + b_knot = jnp.sort(inverse_if_invert(knots), axis=0) b = jnp.linspace(b_knot[:-1, ...], b_knot[1:, ...], resolution, endpoint=False) - b = jnp.moveaxis(b, source=0, destination=1).reshape(-1, pitch_knot.shape[-1]) + b = jnp.moveaxis(b, source=0, destination=1).reshape(-1, *S) b = jnp.append(b, b_knot[jnp.newaxis, -1, ...], axis=0) - assert b.shape == ((pitch_knot.shape[0] - 1) * resolution + 1, pitch_knot.shape[-1]) - return 1 / b + assert b.shape == ((P - 1) * resolution + 1, *S) + result = inverse_if_invert(b) + return result def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index dcee67b0c9..720388f25b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -24,11 +24,11 @@ automorphism_sin, bounce_integral_map, bounce_points, + composite_linspace, grad_affine_bijection_reverse, grad_automorphism_arcsin, grad_automorphism_sin, pitch_of_extrema, - pitch_trapz, take_mask, tanh_sinh_quad, ) @@ -244,16 +244,31 @@ def test_pitch_of_extrema(): @pytest.mark.unit -def test_pitch_trapz(): - """Test this utility function.""" +def test_composite_linspace(): + """Test this utility function useful for integration over pitch variable.""" B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) - pitch = pitch_trapz(pitch_knot, resolution=3) - assert np.array_equal(1 / pitch, np.sort(1 / pitch, axis=-1)) - for i in range(pitch_knot.shape[0]): - for j in range(pitch_knot.shape[1]): - assert only1(np.isclose(pitch_knot[i, j], pitch[:, j]).tolist()) + + def inverse_if_invert(f, invert): + return 1 / f if invert else f + + def test(invert): + print() + print(inverse_if_invert(pitch_knot, invert)) + pitch = composite_linspace(pitch_knot, resolution=3, invert=invert) + b = inverse_if_invert(pitch, invert) + print() + print(b) + np.testing.assert_allclose( + b, np.sort(b, axis=0), atol=0, rtol=0, err_msg=invert + ) + for i in range(pitch_knot.shape[0]): + for j in range(pitch_knot.shape[1]): + assert only1(np.isclose(pitch_knot[i, j], pitch[:, j]).tolist()) + + test(False) + test(True) @pytest.mark.unit From bf6cb733015c2dce4d672678e2850c3a098c5146 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 21 Apr 2024 16:31:08 -0400 Subject: [PATCH 108/241] Simplify composite_linspace function --- desc/compute/bounce_integral.py | 67 ++++++++++++++------------------- tests/test_bounce_integral.py | 32 ++++++---------- 2 files changed, 40 insertions(+), 59 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 11e13eef75..3d007570b1 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -258,6 +258,34 @@ def _poly_val(x, c): return val +def composite_linspace(knots, resolution): + """Returns linearly spaced points between ``knots``. + + Parameters + ---------- + knots : Array + First axis has values to return linearly spaced values between. + The remaining axis are batch axes. + resolution : int + Number of points between each knot. + + Returns + ------- + result : Array, shape((knots.shape[0] - 1) * resolution + 1, *knots.shape[1:]) + Sorted linearly spaced points between ``knots``. + + """ + knots = jnp.atleast_1d(knots) + P = knots.shape[0] + S = knots.shape[1:] + knots = jnp.sort(knots, axis=0) + result = jnp.linspace(knots[:-1, ...], knots[1:, ...], resolution, endpoint=False) + result = jnp.moveaxis(result, source=0, destination=1).reshape(-1, *S) + result = jnp.append(result, knots[jnp.newaxis, -1, ...], axis=0) + assert result.shape == ((P - 1) * resolution + 1, *S) + return result + + def _check_shape(knots, B_c, B_z_ra_c, pitch=None): """Ensure inputs have compatible shape, and return them with full dimension. @@ -369,7 +397,6 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=sort, - # False to double weight orbits with |B|_z_ra = |B|_zz_ra = 0 at bounce points. distinct=True, ) # Can detect at most degree of |B|_z_ra spline extrema between each knot. @@ -383,42 +410,6 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): return pitch -def composite_linspace(knots, resolution, invert=False): - """Returns linearly spaced points between ``knots``. - - Parameters - ---------- - knots : Array, shape(P, ...) - First axis has values to return linearly spaced values between. - The remaining axis are batch axes. - resolution : int - Number of points between each knot. - invert : bool - Whether the spacing is uniform in ``1 / knots`` or ``knots``. - - Returns - ------- - result : Array, shape((P - 1) * resolution + 1, *knots.shape[1:]) - Sorted in increasing order of ``1 / knots`` or ``knots`` - depending on whether ``invert`` is true or false, respectively. - - """ - knots = jnp.atleast_1d(knots) - P = knots.shape[0] - S = knots.shape[1:] - - def inverse_if_invert(f): - return 1 / f if invert else f - - b_knot = jnp.sort(inverse_if_invert(knots), axis=0) - b = jnp.linspace(b_knot[:-1, ...], b_knot[1:, ...], resolution, endpoint=False) - b = jnp.moveaxis(b, source=0, destination=1).reshape(-1, *S) - b = jnp.append(b, b_knot[jnp.newaxis, -1, ...], axis=0) - assert b.shape == ((P - 1) * resolution + 1, *S) - result = inverse_if_invert(b) - return result - - def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): """Compute the bounce points given spline of |B| and pitch λ. @@ -639,7 +630,7 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): Parameters ---------- resolution: int - Number of quadrature points. + Number of quadrature points, preferably odd. w : callable Weight function defined, positive, and continuous on (-1, 1). diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 720388f25b..9eb3913fee 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -245,30 +245,20 @@ def test_pitch_of_extrema(): @pytest.mark.unit def test_composite_linspace(): - """Test this utility function useful for integration over pitch variable.""" + """Test this utility function useful for Newton-Cotes integration over pitch.""" B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) - - def inverse_if_invert(f, invert): - return 1 / f if invert else f - - def test(invert): - print() - print(inverse_if_invert(pitch_knot, invert)) - pitch = composite_linspace(pitch_knot, resolution=3, invert=invert) - b = inverse_if_invert(pitch, invert) - print() - print(b) - np.testing.assert_allclose( - b, np.sort(b, axis=0), atol=0, rtol=0, err_msg=invert - ) - for i in range(pitch_knot.shape[0]): - for j in range(pitch_knot.shape[1]): - assert only1(np.isclose(pitch_knot[i, j], pitch[:, j]).tolist()) - - test(False) - test(True) + b_knot = 1 / pitch_knot + print() + print(b_knot) + b = composite_linspace(b_knot, resolution=3) + print() + print(b) + np.testing.assert_allclose(b, np.sort(b, axis=0), atol=0, rtol=0) + for i in range(pitch_knot.shape[0]): + for j in range(pitch_knot.shape[1]): + assert only1(np.isclose(b_knot[i, j], b[:, j]).tolist()) @pytest.mark.unit From a460ca13169c87c38d830110a6bd4b097435e326 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 21 Apr 2024 18:40:17 -0400 Subject: [PATCH 109/241] A bug has been caught, but is not yet squashed! --- desc/compute/bounce_integral.py | 109 +++++++++++++++++++++++--------- tests/test_bounce_integral.py | 85 +++++++++---------------- 2 files changed, 109 insertions(+), 85 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3d007570b1..99019ea40a 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -499,19 +499,6 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1) bp2 = take_mask(intersect, is_bp2) - - if check: - errorif( - jnp.any(bp1 > bp2), - AssertionError, - "Bounce points have an inversion. Maybe create an issue on GitHub.", - ) - errorif( - jnp.any(bp1[..., 1:] < bp2[..., :-1]), - AssertionError, - "Discontinuity detected. Is B_z_ra the derivative of the spline of B?", - ) - return bp1, bp2 # Consistent with (in particular the discussion on page 3 and 5 of) # V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. # Evaluation of 1/ν neoclassical transport in stellarators. @@ -526,6 +513,14 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): # rotational transform to potentially capture the bounce point outside # this snapshot of the field line. + if check: + msg = "Bounce points have an inversion. Maybe create an issue on GitHub." + assert not jnp.any(bp1 > bp2), msg + msg = "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" + assert not jnp.any(bp1[..., 1:] < bp2[..., :-1]), msg + + return bp1, bp2 + def _affine_bijection_forward(x, a, b): """[a, b] ∋ x ↦ y ∈ [−1, 1].""" @@ -722,7 +717,7 @@ def _interp1d_vec_with_df( def _interpolatory_quadrature( - Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method + Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, check=False ): """Interpolate given functions to points Z and perform quadrature. @@ -754,6 +749,8 @@ def _interpolatory_quadrature( B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method="cubic").reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch, Z=Z) / B_sup_z, w) + if check: + _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product) return inner_product @@ -762,8 +759,45 @@ def _interpolatory_quadrature( ) +def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): + """Check that no integrals were lost and the hairy ball theorem is upheld.""" + is_not_quad_point = jnp.isnan(Z) + # We want quantities to evaluate as finite only at quadrature points + # for the integrals with boundaries at valid bounce points. + msg = "Interpolation failed." + assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B_z_ra)), msg + for ff in f: + assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg + + msg = "|B| has vanished." + assert not jnp.isclose(B, 0).any(), msg + assert not jnp.isclose(B_sup_z, 0).any(), msg + + quad_resolution = Z.shape[-1] + # Number of integrals that we should be computing. + goal = jnp.sum(1 - is_not_quad_point) // quad_resolution + # Number of integrals that were actually computed. + actual = jnp.isfinite(inner_product).sum() + assert goal == actual, f"Lost {goal - actual} integrals." + assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]) + + def _bounce_quadrature( - bp1, bp2, x, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method="akima" + bp1, + bp2, + x, + w, + integrand, + f, + B_sup_z, + B, + B_z_ra, + pitch, + knots, + method="akima", + check=False, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -798,14 +832,14 @@ def _group_grid_data_by_field_line(g): "is interpreted as the batch axis, which enumerates the evaluation " "of the function at particular pitch values." ) - errorif(g.ndim > 2, ValueError, msg) + errorif(g.ndim > 2, msg=msg) return g.reshape(-1, S, knots.size) f = map(_group_grid_data_by_field_line, f) Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. result = _interpolatory_quadrature( - Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method + Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, check ) * grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result @@ -816,7 +850,7 @@ def _group_grid_data_by_field_line(g): ) -def bounce_integral_map( +def bounce_integral( eq, rho=jnp.linspace(1e-12, 1, 5), alpha=None, @@ -881,7 +915,7 @@ def bounce_integral_map( Returns ------- - bounce_integral : callable + bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line ℓ (constant rho, alpha), for every λ value in ``pitch``. items : dict @@ -927,14 +961,12 @@ def integrand_den(B, pitch, Z): eq = get("HELIOTRON") rho = jnp.linspace(1e-12, 1, 6) alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) - knots = jnp.linspace(0, 6 * jnp.pi, 20) - - bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) + bounce_integrate, items = bounce_integral(eq, rho, alpha) g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] - pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) - num = bounce_integral(integrand_num, g_zz, pitch) - den = bounce_integral(integrand_den, [], pitch) + pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) + num = bounce_integrate(integrand_num, g_zz, pitch) + den = bounce_integrate(integrand_den, [], pitch) average = num / den assert jnp.isfinite(average).any() @@ -978,7 +1010,12 @@ def integrand_den(B, pitch, Z): # Compute |B| and group data along field lines. grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) + data = eq.compute( + ["B^zeta", "|B|", "|B|_z|r,a"], + grid=grid_desc, + # TODO: look into override grid in different PR + override_grid=False, + ) B_sup_z = data["B^zeta"].reshape(S, knots.size) B = data["|B|"].reshape(S, knots.size) / normalize B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / normalize @@ -992,7 +1029,7 @@ def integrand_den(B, pitch, Z): B_z_ra_c = _poly_der(B_c) assert B_z_ra_c.shape == (3, S, knots.size - 1) - def bounce_integral(integrand, f, pitch, method="akima"): + def bounce_integrate(integrand, f, pitch, method="akima"): """Bounce integrate ∫ f(ℓ) dℓ. Parameters @@ -1034,7 +1071,19 @@ def bounce_integral(integrand, f, pitch, method="akima"): """ bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, check) result = _bounce_quadrature( - bp1, bp2, x, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method + bp1, + bp2, + x, + w, + integrand, + f, + B_sup_z, + B, + B_z_ra, + pitch, + knots, + method, + check, ) assert result.shape[-1] == (knots.size - 1) * 3 return result @@ -1047,6 +1096,6 @@ def bounce_integral(integrand, f, pitch, method="akima"): "B.c": B_c, "B_z_ra.c": B_z_ra_c, } - return bounce_integral, items + return bounce_integrate, items else: - return bounce_integral + return bounce_integrate diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 9eb3913fee..d85b868f70 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -22,7 +22,7 @@ affine_bijection_reverse, automorphism_arcsin, automorphism_sin, - bounce_integral_map, + bounce_integral, bounce_points, composite_linspace, grad_affine_bijection_reverse, @@ -124,7 +124,7 @@ def test_reshape_convention(): err_msg = "The ordering conventions are required for correctness." assert "P, S, N" in inspect.getsource(bounce_points), err_msg - src = inspect.getsource(bounce_integral_map) + src = inspect.getsource(bounce_integral) assert "S, knots.size" in src, err_msg assert "pitch.shape[0], rho.size, alpha.size" in src, err_msg src = inspect.getsource(desc_grid_from_field_line_coords) @@ -481,12 +481,12 @@ def integrand(B, pitch, Z): @pytest.mark.unit -def test_example_code_and_hairy_ball(): - """Test example code in bounce_integral docstring and ensure B does not vanish.""" +def test_example_code(): + """Test example code in bounce_integral docstring.""" def integrand_num(g_zz, B, pitch, Z): """Integrand in integral in numerator of bounce average.""" - f = (1 - pitch * B) * g_zz # something arbitrary + f = (1 - pitch * B) * g_zz return safediv(f, _sqrt(1 - pitch * B)) def integrand_den(B, pitch, Z): @@ -496,19 +496,12 @@ def integrand_den(B, pitch, Z): eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - knots = np.linspace(0, 6 * np.pi, 20) - - bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots) - - # start hairy ball test - B = eq.compute("|B|", grid=items["grid_desc"], override_grid=False)["|B|"] - assert not np.isclose(B, 0, atol=1e-19).any(), "B should never vanish." - # end hairy ball test + bounce_integrate, items = bounce_integral(eq, rho, alpha) g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] - pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) - num = bounce_integral(integrand_num, g_zz, pitch) - den = bounce_integral(integrand_den, [], pitch) + pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) + num = bounce_integrate(integrand_num, g_zz, pitch) + den = bounce_integrate(integrand_den, [], pitch) average = num / den assert np.isfinite(average).any() @@ -591,7 +584,7 @@ def beta(grid, data): alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) knots = np.linspace(0, 6 * np.pi, 20) # TODO now compare result to elliptic integral - bounce_integral, items = bounce_integral_map(eq, rho, alpha, knots, check=True) + bounce_integrate, items = bounce_integral(eq, rho, alpha, knots, check=True) pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) bp1, bp2 = bounce_points(pitch, knots, items["B.c"], items["B_z_ra.c"]) @@ -659,7 +652,9 @@ def test_bounce_averaged_drifts(): # normalization Lref = data["a"] epsilon = Lref * rho - psi_boundary = np.max(np.abs(data["psi"])) + psi_boundary = np.max( + np.abs(data["psi"]) + ) # data["psi"][np.argmax(np.abs(data["psi"]))] Bref = 2 * np.abs(psi_boundary) / Lref**2 # Creating a grid along a field line @@ -679,7 +674,7 @@ def test_bounce_averaged_drifts(): # Response: Currently the API is such that the method does all the # above preprocessing for you. Let's test it for correctness # first then do this later. - bounce_integral, items = bounce_integral_map( + bounce_integrate, items = bounce_integral( # FIXME: Question # add normalize to compute matching bounce points for the test # below, but should everything related to B be normalized? @@ -760,48 +755,28 @@ def test_bounce_averaged_drifts(): k2 = 0.5 * ((1 - pitch * B0) / (pitch * B0 * epsilon) + 1) k = np.sqrt(k2) - # Fixme: What exactly is this a function of? - # cvdrift, gbdrift is a grid quantity, so grid.num_nodes length - # on a single field line grid -> so it has length number of zeta points - # So bavg_drift_an has shape shape (number of pitch, number of zeta points). - # For a fixed pitch at index i, what is difference bavg_drift_an[i, j] - # and bavg_drift_an[i, j+1]? - # RG : Here are the notes that explain these integrals - # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf - - integral_0 = test_integral_0(k) - integral_1 = test_integral_1(k) - integral_2 = 16 * k * integral_0 - integral_3 = ( - 4 / 9 * (8 * k * (-1 + 2 * k2) * integral_1 - 4 * k * (-1 + k2) * integral_0) - ) - - integral_4 = ( + # Here are the notes that explain these integrals. + # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. + I_0 = test_integral_0(k) + I_1 = test_integral_1(k) + I_2 = 16 * k * I_0 + I_3 = 4 / 9 * (8 * k * (-1 + 2 * k2) * I_1 - 4 * k * (-1 + k2) * I_0) + I_4 = ( 2 * np.sqrt(2) / 3 - * (4 * np.sqrt(2) * k * (-1 + 2 * k2) * integral_0 - 2 * (-1 + k2) * integral_1) + * (4 * np.sqrt(2) * k * (-1 + 2 * k2) * I_0 - 2 * (-1 + k2) * I_1) ) - integral_5 = ( + I_5 = ( 2 / 30 - * ( - 32 * k * (1 - k2 + k2**2) * integral_0 - - 16 * k * (1 - 3 * k2 + 2 * k2**2) * integral_1 - ) + * (32 * k * (1 - k2 + k2**2) * I_0 - 16 * k * (1 - 3 * k2 + 2 * k2**2) * I_1) ) - integral_6 = 2 / 3 * (k * (-2 + 4 * k2) * integral_0 - 4 * (-1 + k2) * integral_1) - integral_7 = 4 / k * (2 * k2 * integral_0 + (1 - 2 * k2) * integral_1) - - bavg_drift_an = ( - fudge_factor3 * dPdrho / B0**2 * integral_1 - - 0.5 - * fudge_factor2 - * ( - s_hat * (integral_0 + integral_1 + integral_2 + integral_3) - + alpha_MHD / B0**4 * (integral_4 + integral_5) - - (integral_6 + integral_7) - ) + I_6 = 2 / 3 * (k * (-2 + 4 * k2) * I_0 - 4 * (-1 + k2) * I_1) + I_7 = 4 / k * (2 * k2 * I_0 + (1 - 2 * k2) * I_1) + + bavg_drift_an = fudge_factor3 * dPdrho / B0**2 * I_1 - 0.5 * fudge_factor2 * ( + s_hat * (I_0 + I_1 + I_2 + I_3) + alpha_MHD / B0**4 * (I_4 + I_5) - (I_6 + I_7) ) def integrand(cvdrift, gbdrift, B, pitch, Z): @@ -810,7 +785,7 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): g = _sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - bavg_drift_num = bounce_integral( + bavg_drift_num = bounce_integrate( integrand=integrand, # additional things to interpolate onto quadrature points besides B and pitch f=[cvdrift, gbdrift], From df16d7ddd42676244ac3aaa9c947faf248ca9e91 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 21 Apr 2024 21:01:55 -0400 Subject: [PATCH 110/241] Fix interpolation error induced nan propogation --- desc/compute/bounce_integral.py | 92 ++++++++++++++++++++++----------- tests/test_bounce_integral.py | 36 ++++++++++--- 2 files changed, 90 insertions(+), 38 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 99019ea40a..36752e2bff 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -653,6 +653,61 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): return x, W +def _suppress_bad_nan(V): + """Zero out nan values induced by error. + + Assuming that V is a well-behaved function of some interpolation points Z, + then V(Z) should evaluate as NaN only if Z is NaN. This condition needs to + be enforced explicitly due to floating point and interpolation error. + + In the context of bounce integrals, the √(1 − λ |B|) terms necessitate this. + For interpolation error in |B| may yield λ |B| > 1 at quadrature points + between bounce points, which is inconsistent with our knowledge of the |B| + spline on which the bounce points were computed. This inconsistency will + be more prevalent in the limit the number of quadrature points per bounce + integration is much greater than the number of knots. + + Parameters + ---------- + V : Array + Interpolation values. + + Returns + ------- + V : Array + The interpolation values with the bad NaN values set to zero. + + """ + # This simple logic is encapsulated here to make explicit the bug it resolves. + V = jnp.nan_to_num(V) + return V + + +def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): + """Check that no integrals were lost and the hairy ball theorem is upheld.""" + is_not_quad_point = jnp.isnan(Z) + # We want quantities to evaluate as finite only at quadrature points + # for the integrals with boundaries at valid bounce points. + msg = "Interpolation failed." + assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B_z_ra)), msg + for ff in f: + assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg + + msg = "|B| has vanished." + assert not jnp.isclose(B, 0).any(), msg + assert not jnp.isclose(B_sup_z, 0).any(), msg + + quad_resolution = Z.shape[-1] + # Number of integrals that we should be computing. + goal = jnp.sum(1 - is_not_quad_point) // quad_resolution + # Number of integrals that were actually computed. + actual = jnp.isfinite(inner_product).sum() + assert goal == actual, f"Lost {goal - actual} integrals." + assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]) + + _repeated_docstring = """w : Array, shape(w.size, ) Quadrature weights. integrand : callable @@ -748,7 +803,10 @@ def _interpolatory_quadrature( # Specify derivative at knots for ≈ cubic hermite interpolation. B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method="cubic").reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] - inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch, Z=Z) / B_sup_z, w) + inner_product = jnp.dot( + _suppress_bad_nan(integrand(*f, B=B, pitch=pitch, Z=Z)) / B_sup_z, + w, + ) if check: _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product) return inner_product @@ -759,31 +817,6 @@ def _interpolatory_quadrature( ) -def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): - """Check that no integrals were lost and the hairy ball theorem is upheld.""" - is_not_quad_point = jnp.isnan(Z) - # We want quantities to evaluate as finite only at quadrature points - # for the integrals with boundaries at valid bounce points. - msg = "Interpolation failed." - assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B_z_ra)), msg - for ff in f: - assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg - - msg = "|B| has vanished." - assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(B_sup_z, 0).any(), msg - - quad_resolution = Z.shape[-1] - # Number of integrals that we should be computing. - goal = jnp.sum(1 - is_not_quad_point) // quad_resolution - # Number of integrals that were actually computed. - actual = jnp.isfinite(inner_product).sum() - assert goal == actual, f"Lost {goal - actual} integrals." - assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]) - - def _bounce_quadrature( bp1, bp2, @@ -944,7 +977,7 @@ def bounce_integral( f(ℓ) = (1 − λ |B|) * g_zz, where g_zz is the squared norm of the toroidal basis vector on some set of field lines specified by (ρ, α) coordinates. This is defined as - [∫ f(ℓ) / √(1 − λ |B|) dℓ] / [∫ 1 / √(1 − λ |B|) dℓ] + (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ) .. code-block:: python @@ -961,7 +994,7 @@ def integrand_den(B, pitch, Z): eq = get("HELIOTRON") rho = jnp.linspace(1e-12, 1, 6) alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) - bounce_integrate, items = bounce_integral(eq, rho, alpha) + bounce_integrate, items = bounce_integral(eq, rho, alpha, check=True) g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) @@ -984,8 +1017,7 @@ def integrand_den(B, pitch, Z): print(pitch[:, i, j]) # Some of these bounce averages will evaluate as nan. # You should filter out these nan values when computing stuff. - average_sum_over_field_line = jnp.nansum(average, axis=-1) - print(average_sum_over_field_line) + print(jnp.nansum(average, axis=-1)) """ check = kwargs.pop("check", False) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index d85b868f70..b188631e83 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -466,16 +466,37 @@ def integrand(B, pitch, Z): x_t, w_t = tanh_sinh_quad(18, grad_automorphism_arcsin) x_t = automorphism_arcsin(x_t) tanh_sinh_arcsin = _bounce_quadrature( - bp1, bp2, x_t, w_t, integrand, [], B_sup_z, B, B_z_ra, pitch, knots + bp1, + bp2, + x_t, + w_t, + integrand, + [], + B_sup_z, + B, + B_z_ra, + pitch, + knots, + check=True, ) np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) - - # suppress the singularity x_g, w_g = np.polynomial.legendre.leggauss(16) + # suppress the singularity w_g = w_g * grad_automorphism_sin(x_g) x_g = automorphism_sin(x_g) leg_gauss_sin = _bounce_quadrature( - bp1, bp2, x_g, w_g, integrand, [], B_sup_z, B, B_z_ra, pitch, knots + bp1, + bp2, + x_g, + w_g, + integrand, + [], + B_sup_z, + B, + B_z_ra, + pitch, + knots, + check=True, ) np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -497,7 +518,7 @@ def integrand_den(B, pitch, Z): rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - bounce_integrate, items = bounce_integral(eq, rho, alpha) + bounce_integrate, items = bounce_integral(eq, rho, alpha, check=True) g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) num = bounce_integrate(integrand_num, g_zz, pitch) @@ -519,8 +540,7 @@ def integrand_den(B, pitch, Z): print(pitch[:, i, j]) # Some of these bounce averages will evaluate as nan. # You should filter out these nan values when computing stuff. - average_sum_over_field_line = np.nansum(average, axis=-1) - print(average_sum_over_field_line) + print(np.nansum(average, axis=-1)) # @pytest.mark.unit @@ -586,7 +606,7 @@ def beta(grid, data): # TODO now compare result to elliptic integral bounce_integrate, items = bounce_integral(eq, rho, alpha, knots, check=True) pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) - bp1, bp2 = bounce_points(pitch, knots, items["B.c"], items["B_z_ra.c"]) + bp1, bp2 = bounce_points(pitch, knots, items["B.c"], items["B_z_ra.c"], check=True) @pytest.mark.unit From ed8dc4f8a77872da00aa0a78a47a09f4ceb9af30 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 22 Apr 2024 00:41:52 -0400 Subject: [PATCH 111/241] Add methods for debugging bounce point inversion --- desc/compute/bounce_integral.py | 120 +++++++++++++++++++++++++++++--- tests/test_bounce_integral.py | 78 +++++++-------------- 2 files changed, 137 insertions(+), 61 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 36752e2bff..149a9f98d4 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -2,7 +2,8 @@ from functools import partial -from interpax import CubicHermiteSpline, interp1d +from interpax import CubicHermiteSpline, PPoly, interp1d +from matplotlib import pyplot as plt from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take from desc.compute.utils import safediv @@ -52,6 +53,14 @@ def take_mask(a, mask, size=None, fill_value=None): ) +# only use for debugging +def _filter_not_nan(a): + """Filter out nan from ``a`` while asserting nan is padded at right.""" + is_nan = jnp.isnan(a) + assert jnp.array_equal(is_nan, jnp.sort(is_nan, axis=-1)), "take_mask() has a bug." + return a[~is_nan] + + def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): """Keep real values inside [a_min, a_max] and set others to nan. @@ -512,16 +521,106 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): # Don't think it's necessary to stitch together the field lines using # rotational transform to potentially capture the bounce point outside # this snapshot of the field line. - if check: - msg = "Bounce points have an inversion. Maybe create an issue on GitHub." - assert not jnp.any(bp1 > bp2), msg - msg = "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" - assert not jnp.any(bp1[..., 1:] < bp2[..., :-1]), msg - + _check_bounce_points(bp1, bp2, pitch, knots, B_c) return bp1, bp2 +def plot_field_line( + B, + pitch, + bp1=jnp.array([]), + bp2=jnp.array([]), + start=None, + stop=None, + show=True, +): + """Plot the field line given spline of |B|. + + Parameters + ---------- + B : PPoly + Spline of |B| over given field line. + pitch : float + λ value. + bp1 : Array + Bounce points with B_z_ra <= 0. + bp2 : Array + Bounce points with B_z_ra >= 0. + start : float + Minimum zeta on plot. + stop : float + Maximum zeta of plot. + show : bool + Whether to show the plot. + + Returns + ------- + fig, ax : matplotlib figure and axes. + + """ + fig, ax = plt.subplots() + for knot in B.x: + ax.axvline(x=knot, color="red", linestyle="--") + ax.axhline(y=1 / pitch, color="purple", label=r"$1 / \lambda$") + z = jnp.linspace( + start=B.x[0] if start is None else start, + stop=B.x[-1] if stop is None else stop, + num=100, + ) + ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$") + ax.plot(bp1, jnp.full_like(bp1, 1 / pitch), "v", markersize=8, label="bp1") + ax.plot(bp2, jnp.full_like(bp2, 1 / pitch), "^", markersize=8, label="bp2") + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel("Tesla") + ax.legend() + if show: + plt.tight_layout() + plt.show() + plt.close() + return fig, ax + + +def _check_bounce_points(bp1, bp2, pitch, knots, B_c): + """Check that bounce points are computed correctly. + + Parameters + ---------- + bp1, bp2 : Array, Array + Output of ``bounce_points``. + pitch : Array + Input to ``bounce_points``. + knots : Array + Input to ``bounce_points``. + B_c : Array + Input to ``bounce_points``. + + """ + msg_1 = "Bounce points have an inversion. Maybe create an issue on GitHub." + err_1 = jnp.any(bp1 > bp2) + msg_2 = "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1]) + if err_1 or err_2: + P, S = bp1.shape[:-1] + for p in range(P): + for s in range(S): + err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) + err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) + if err_1_ps or err_2_ps: + print(f"Error at index {p},{s} out of {P},{S}") + bp1_ps = _filter_not_nan(bp1[p, s]) + bp2_ps = _filter_not_nan(bp2[p, s]) + print(bp1_ps) + print(bp2_ps) + plot_field_line( + PPoly(B_c[:, s], knots, check=True), pitch[p, s], bp1_ps, bp2_ps + ) + assert not err_1_ps, msg_1 + assert not err_2_ps, msg_2 + assert not err_1, msg_1 + assert not err_2, msg_2 + + def _affine_bijection_forward(x, a, b): """[a, b] ∋ x ↦ y ∈ [−1, 1].""" y = 2 * (x - a) / (b - a) - 1 @@ -889,7 +988,7 @@ def bounce_integral( alpha=None, knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 25), quad=tanh_sinh_quad, - automorphism=(automorphism_sin, grad_automorphism_sin), + automorphism=(automorphism_arcsin, grad_automorphism_arcsin), return_items=True, **kwargs, ): @@ -931,8 +1030,9 @@ def bounce_integral( The returned quadrature points xₖ and weights wₖ should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). For the default choice of the automorphism below, - Tanh-Sinh quadrature works well if the integrand is hypersingular. - Otherwise, Gauss-Legendre quadrature can be more competitive. + Tanh-Sinh quadrature works well if the integrand is singular. + Otherwise, Gauss-Legendre quadrature with the sin automorphism + can be more competitive. automorphism : callable, callable The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index b188631e83..56355b336b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -5,7 +5,6 @@ import numpy as np import pytest -from matplotlib import pyplot as plt from scipy import integrate # TODO: can use the one from interpax once .solve() is implemented @@ -16,6 +15,7 @@ from desc.compute.bounce_integral import ( _affine_bijection_forward, _bounce_quadrature, + _filter_not_nan, _poly_der, _poly_root, _poly_val, @@ -29,6 +29,7 @@ grad_automorphism_arcsin, grad_automorphism_sin, pitch_of_extrema, + plot_field_line, take_mask, tanh_sinh_quad, ) @@ -57,13 +58,6 @@ def _last_value(a): return a[idx] -def _filter_not_nan(a): - """Filter out nan from ``a`` while asserting nan is padded at right.""" - is_nan = np.isnan(a) - assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) - return a[~is_nan] - - def _sqrt(x): """Reproduces jnp.sqrt with np.sqrt.""" x = complex_sqrt(x) @@ -265,49 +259,35 @@ def test_composite_linspace(): def test_bounce_points(): """Test that bounce points are computed correctly.""" - def plot_field_line(B, pitch, start, end): - # Can observe correctness of bounce points through this plot. - fig, ax = plt.subplots() - for knot in B.x: - ax.axvline(x=knot, color="red", linestyle="--") - z = np.linspace(start, end, 100) - ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$") - ax.plot(z, np.full(z.size, 1 / pitch), label=r"$1 / \lambda$") - ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel("Tesla") - ax.legend() - plt.show() - plt.close() - - def test_bp1_first(plot=False): + def test_bp1_first(plot): start = np.pi / 3 end = 6 * np.pi knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch = 2 - if plot: - plot_field_line(B, pitch, start, end) bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + if plot: + plot_field_line(B, pitch, bp1, bp2) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) - def test_bp2_first(plot=False): + def test_bp2_first(plot): start = -3 * np.pi end = -start k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch = 2 - if plot: - plot_field_line(B, pitch, start, end) bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + if plot: + plot_field_line(B, pitch, bp1, bp2) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[1::2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) - def test_bp1_before_extrema(plot=False): + def test_bp1_before_extrema(plot): start = -np.pi end = -2 * start k = np.linspace(start, end, 5) @@ -316,11 +296,10 @@ def test_bp1_before_extrema(plot=False): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] - if plot: - plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + if plot: + plot_field_line(B, pitch, bp1, bp2) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[1], 1.9827671337414938) @@ -328,7 +307,7 @@ def test_bp1_before_extrema(plot=False): np.testing.assert_allclose(bp1, intersect[[1, 2]]) np.testing.assert_allclose(bp2, intersect[[2, 3]]) - def test_bp2_before_extrema(plot=False): + def test_bp2_before_extrema(plot): start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -339,16 +318,15 @@ def test_bp2_before_extrema(plot=False): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - if plot: - plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + if plot: + plot_field_line(B, pitch, bp1, bp2) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) - def test_extrema_first_and_before_bp1(plot=False): + def test_extrema_first_and_before_bp1(plot): start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -359,11 +337,10 @@ def test_extrema_first_and_before_bp1(plot=False): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - if plot: - plot_field_line(B, pitch, k[2], end) - bp1, bp2 = bounce_points(pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + if plot: + plot_field_line(B, pitch, bp1, bp2, start=k[2]) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], 0.8353192766102349) @@ -372,7 +349,7 @@ def test_extrema_first_and_before_bp1(plot=False): np.testing.assert_allclose(bp1, intersect[[0, 1, 3]]) np.testing.assert_allclose(bp2, intersect[[0, 2, 4]]) - def test_extrema_first_and_before_bp2(plot=False): + def test_extrema_first_and_before_bp2(plot): start = -1.2 * np.pi end = -2 * start + 1 k = np.linspace(start, end, 7) @@ -383,11 +360,10 @@ def test_extrema_first_and_before_bp2(plot=False): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] - if plot: - plot_field_line(B, pitch, start, end) - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + if plot: + plot_field_line(B, pitch, bp1, bp2) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], -0.6719044147510538) @@ -396,16 +372,16 @@ def test_extrema_first_and_before_bp2(plot=False): np.testing.assert_allclose(bp2, intersect[1::2]) # These are all the unique cases, if all tests pass then the bounce_points - # should work correctly for all inputs. Pass in True to see plots. - test_bp1_first() - test_bp2_first() - test_bp1_before_extrema() - test_bp2_before_extrema() + # should work correctly for all inputs. + test_bp1_first(True) + test_bp2_first(True) + test_bp1_before_extrema(True) + test_bp2_before_extrema(True) # In theory, this test should only pass if distinct=True when computing the # intersections in bounce points. However, we can get lucky due to floating # point errors, and it may also pass when distinct=False. - test_extrema_first_and_before_bp1() - test_extrema_first_and_before_bp2() + test_extrema_first_and_before_bp1(True) + test_extrema_first_and_before_bp2(True) @pytest.mark.unit From 37f2dd03179454d88b5d9d3d616671df1637dc32 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 22 Apr 2024 03:29:40 -0400 Subject: [PATCH 112/241] Think the splines are failing. In particular, the spline approaches the singularity slower the " true integrand, which makes the integral of the spline much les integrable than the true integral. --- desc/compute/bounce_integral.py | 178 +++++++++++++++++--------------- tests/test_bounce_integral.py | 54 +++------- 2 files changed, 113 insertions(+), 119 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 149a9f98d4..016c528982 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -274,7 +274,7 @@ def composite_linspace(knots, resolution): ---------- knots : Array First axis has values to return linearly spaced values between. - The remaining axis are batch axes. + The remaining axes are batch axes. resolution : int Number of points between each knot. @@ -334,10 +334,8 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): errorif(not (B_c.ndim == B_z_ra_c.ndim == 3), msg=msg) errorif(B_c.shape[0] - 1 != B_z_ra_c.shape[0], msg=msg) errorif(B_c.shape[1:] != B_z_ra_c.shape[1:], msg=msg) - errorif( - B_c.shape[-1] != knots.size - 1, - msg="Last axis fails to enumerate spline polynomials.", - ) + msg = "Last axis fails to enumerate spline polynomials." + errorif(B_c.shape[-1] != knots.size - 1, msg=msg) if pitch is not None: pitch = jnp.atleast_2d(pitch) msg = "Supplied invalid shape for pitch angles." @@ -353,17 +351,19 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): have fat banana orbits increasing neoclassical transport. When computing ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ in equation 29 of + V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. Evaluation of 1/ν neoclassical transport in stellarators. Phys. Plasmas 1 December 1999; 6 (12): 4622–4632. https://doi.org/10.1063/1.873749 + the contribution of ∑ⱼ Hⱼ² / Iⱼ to ε is largest in the intervals such that - b ∈ [|B|(ζ*) - db, |B|(ζ*)]. - To see this, observe that Iⱼ ∼ √(1 − λ B), hence Hⱼ² / Iⱼ ∼ Hⱼ² / √(1 − λ B). - For λ = 1 / |B|(ζ*), near |B|(ζ*), the quantity 1 / √(1 − λ B) is singular. - The slower |B| tends to |B|(ζ*) the less integrable this singularity becomes. - Therefore, a quadrature for ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ would do well to evaluate the - integrand near b = 1 / λ = |B|(ζ*). + b ∈ [|B|(ζ*) - db, |B|(ζ*)]. To see this, observe that Iⱼ ∼ √(1 − λ B), + hence Hⱼ² / Iⱼ ∼ Hⱼ² / √(1 − λ B). For λ = 1 / |B|(ζ*), near |B|(ζ*), the + quantity 1 / √(1 − λ B) is singular. The slower |B| tends to |B|(ζ*) the + less integrable this singularity becomes. Therefore, a quadrature for + ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ would do well to evaluate the integrand near + b = 1 / λ = |B|(ζ*). Parameters ---------- @@ -526,13 +526,53 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): return bp1, bp2 +def _check_bounce_points(bp1, bp2, pitch, knots, B_c): + """Check that bounce points are computed correctly. + + Parameters + ---------- + bp1, bp2 : Array, Array + Output of ``bounce_points``. + pitch : Array + Input to ``bounce_points``. + knots : Array + Input to ``bounce_points``. + B_c : Array + Input to ``bounce_points``. + + """ + msg_1 = "Bounce points have an inversion. Maybe create an issue on GitHub." + err_1 = jnp.any(bp1 > bp2) + msg_2 = "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1]) + if err_1 or err_2: + P, S = bp1.shape[:-1] + for p in range(P): + for s in range(S): + err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) + err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) + if err_1_ps or err_2_ps: + print(f"Error at index {p},{s} out of {P},{S}") + bp1_ps, bp2_ps = map(_filter_not_nan, (bp1[p, s], bp2[p, s])) + print(bp1_ps) + print(bp2_ps) + plot_field_line( + PPoly(B_c[:, s], knots, check=True), pitch[p, s], bp1_ps, bp2_ps + ) + assert not err_1_ps, msg_1 + assert not err_2_ps, msg_2 + assert not err_1, msg_1 + assert not err_2, msg_2 + + def plot_field_line( B, - pitch, + pitch=None, bp1=jnp.array([]), bp2=jnp.array([]), start=None, stop=None, + num=200, show=True, ): """Plot the field line given spline of |B|. @@ -548,9 +588,11 @@ def plot_field_line( bp2 : Array Bounce points with B_z_ra >= 0. start : float - Minimum zeta on plot. + Minimum ζ on plot. stop : float - Maximum zeta of plot. + Maximum ζ of plot. + num : int + Number of ζ points to plot. show : bool Whether to show the plot. @@ -562,15 +604,17 @@ def plot_field_line( fig, ax = plt.subplots() for knot in B.x: ax.axvline(x=knot, color="red", linestyle="--") - ax.axhline(y=1 / pitch, color="purple", label=r"$1 / \lambda$") z = jnp.linspace( start=B.x[0] if start is None else start, stop=B.x[-1] if stop is None else stop, - num=100, + num=num, ) ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$") - ax.plot(bp1, jnp.full_like(bp1, 1 / pitch), "v", markersize=8, label="bp1") - ax.plot(bp2, jnp.full_like(bp2, 1 / pitch), "^", markersize=8, label="bp2") + if pitch is not None: + b = 1 / pitch + ax.axhline(y=b, color="purple", label=r"$1 / \lambda$") + ax.plot(bp1, jnp.full_like(bp1, b), "v", markersize=8, label="bp1") + ax.plot(bp2, jnp.full_like(bp2, b), "^", markersize=8, label="bp2") ax.set_xlabel(r"Field line $\zeta$") ax.set_ylabel("Tesla") ax.legend() @@ -581,46 +625,6 @@ def plot_field_line( return fig, ax -def _check_bounce_points(bp1, bp2, pitch, knots, B_c): - """Check that bounce points are computed correctly. - - Parameters - ---------- - bp1, bp2 : Array, Array - Output of ``bounce_points``. - pitch : Array - Input to ``bounce_points``. - knots : Array - Input to ``bounce_points``. - B_c : Array - Input to ``bounce_points``. - - """ - msg_1 = "Bounce points have an inversion. Maybe create an issue on GitHub." - err_1 = jnp.any(bp1 > bp2) - msg_2 = "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1]) - if err_1 or err_2: - P, S = bp1.shape[:-1] - for p in range(P): - for s in range(S): - err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) - err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) - if err_1_ps or err_2_ps: - print(f"Error at index {p},{s} out of {P},{S}") - bp1_ps = _filter_not_nan(bp1[p, s]) - bp2_ps = _filter_not_nan(bp2[p, s]) - print(bp1_ps) - print(bp2_ps) - plot_field_line( - PPoly(B_c[:, s], knots, check=True), pitch[p, s], bp1_ps, bp2_ps - ) - assert not err_1_ps, msg_1 - assert not err_2_ps, msg_2 - assert not err_1, msg_1 - assert not err_2, msg_2 - - def _affine_bijection_forward(x, a, b): """[a, b] ∋ x ↦ y ∈ [−1, 1].""" y = 2 * (x - a) / (b - a) - 1 @@ -778,7 +782,7 @@ def _suppress_bad_nan(V): """ # This simple logic is encapsulated here to make explicit the bug it resolves. - V = jnp.nan_to_num(V) + V = jnp.nan_to_num(V, posinf=jnp.inf, neginf=-jnp.inf) return V @@ -803,8 +807,16 @@ def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): goal = jnp.sum(1 - is_not_quad_point) // quad_resolution # Number of integrals that were actually computed. actual = jnp.isfinite(inner_product).sum() - assert goal == actual, f"Lost {goal - actual} integrals." - assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]) + err_msg = ( + f"Lost {goal - actual} integrals.\n" + "If the integrand is not singular, consider making a GitHub issue.\n" + "Otherwise, this spline method has failed.\n" + "In particular, the spline approaches the singularity slower than the " + "true integrand, which makes the integral of the spline much less " + "integrable than the true integral." + ) + assert goal == actual, err_msg + assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]), err_msg _repeated_docstring = """w : Array, shape(w.size, ) @@ -840,6 +852,8 @@ def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): method : str Method of interpolation for functions contained in ``f``. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + check : bool + Flag for debugging. """ _delimiter = "Returns" @@ -901,6 +915,7 @@ def _interpolatory_quadrature( B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) # Specify derivative at knots for ≈ cubic hermite interpolation. B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method="cubic").reshape(shape) + pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot( _suppress_bad_nan(integrand(*f, B=B, pitch=pitch, Z=Z)) / B_sup_z, @@ -984,12 +999,12 @@ def _group_grid_data_by_field_line(g): def bounce_integral( eq, - rho=jnp.linspace(1e-12, 1, 5), + rho=jnp.linspace(1e-7, 1, 5), alpha=None, knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 25), quad=tanh_sinh_quad, automorphism=(automorphism_arcsin, grad_automorphism_arcsin), - return_items=True, + check=False, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. @@ -1041,16 +1056,18 @@ def bounce_integral( defines a change of variable for the bounce integral. The choice made for the automorphism can augment or suppress singularities. Keep this in mind when choosing the quadrature method. - return_items : bool - Whether to return ``items`` as described below. + check : bool + Flag for debugging. kwargs Can specify additional arguments to the ``quad`` method with kwargs. + Can also specify reference magnetic field strength and length scale + for normalization. Returns ------- bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every - specified field line ℓ (constant rho, alpha), for every λ value in ``pitch``. + specified field line ℓ for every λ value in ``pitch``. items : dict grid_desc : Grid DESC coordinate grid for the given field line coordinates. @@ -1120,8 +1137,8 @@ def integrand_den(B, pitch, Z): print(jnp.nansum(average, axis=-1)) """ - check = kwargs.pop("check", False) - normalize = kwargs.pop("normalize", 1) + B_ref = kwargs.pop("B_ref", 1) + L_ref = kwargs.pop("L_ref", 1) if quad == tanh_sinh_quad: kwargs.setdefault("resolution", 19) x, w = quad(**kwargs) @@ -1148,9 +1165,9 @@ def integrand_den(B, pitch, Z): # TODO: look into override grid in different PR override_grid=False, ) - B_sup_z = data["B^zeta"].reshape(S, knots.size) - B = data["|B|"].reshape(S, knots.size) / normalize - B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / normalize + B_sup_z = data["B^zeta"].reshape(S, knots.size) * L_ref / B_ref + B = data["|B|"].reshape(S, knots.size) / B_ref + B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / B_ref # Compute spline of |B| along field lines. B_c = jnp.moveaxis( CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c, @@ -1160,6 +1177,13 @@ def integrand_den(B, pitch, Z): assert B_c.shape == (4, S, knots.size - 1) B_z_ra_c = _poly_der(B_c) assert B_z_ra_c.shape == (3, S, knots.size - 1) + items = { + "grid_desc": grid_desc, + "grid_fl": grid_fl, + "knots": knots, + "B.c": B_c, + "B_z_ra.c": B_z_ra_c, + } def bounce_integrate(integrand, f, pitch, method="akima"): """Bounce integrate ∫ f(ℓ) dℓ. @@ -1220,14 +1244,4 @@ def bounce_integrate(integrand, f, pitch, method="akima"): assert result.shape[-1] == (knots.size - 1) * 3 return result - if return_items: - items = { - "grid_desc": grid_desc, - "grid_fl": grid_fl, - "knots": knots, - "B.c": B_c, - "B_z_ra.c": B_z_ra_c, - } - return bounce_integrate, items - else: - return bounce_integrate + return bounce_integrate, items diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 56355b336b..1904ba1ccc 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -646,12 +646,10 @@ def test_bounce_averaged_drifts(): data = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) # normalization - Lref = data["a"] - epsilon = Lref * rho - psi_boundary = np.max( - np.abs(data["psi"]) - ) # data["psi"][np.argmax(np.abs(data["psi"]))] - Bref = 2 * np.abs(psi_boundary) / Lref**2 + L_ref = data["a"] + epsilon = L_ref * rho + psi_boundary = data["psi"][np.argmax(np.abs(data["psi"]))] + B_ref = 2 * np.abs(psi_boundary) / L_ref**2 # Creating a grid along a field line iota = np.interp(rho, data["rho"], data["iota"]) @@ -671,22 +669,7 @@ def test_bounce_averaged_drifts(): # above preprocessing for you. Let's test it for correctness # first then do this later. bounce_integrate, items = bounce_integral( - # FIXME: Question - # add normalize to compute matching bounce points for the test - # below, but should everything related to B be normalized? - # or just things relevant for computing bounce points? - # e.g. should I normalize B dot e^zeta = B^zeta by Bref as well? - # Response (R.G.): Yes, it would be better to normalize everything - # All the quantities can be normalized using combinations of Lref - # and Bref. To see what normalizations I use see below. - # For B^zeta the normalization should be Lref/Bref. Since we only - # use b dot grad zeta, we need B^zeta/|B| * Lref - eq, - rho, - alpha, - knots=zeta, - check=True, - normalize=Bref, + eq, rho, alpha, knots=zeta, check=True, B_ref=B_ref, L_ref=L_ref ) data_keys = [ "|grad(psi)|^2", @@ -705,20 +688,24 @@ def test_bounce_averaged_drifts(): data_bounce = eq.compute(data_keys, grid=items["grid_desc"], override_grid=False) # normalizations - bmag = data_bounce["|B|"] / Bref + bmag = data_bounce["|B|"] / B_ref B0 = np.mean(bmag) bmag_an = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) - x = Lref * rho - s_hat = -x / iota * shear / Lref - gradpar = Lref * data_bounce["B^zeta"] / data_bounce["|B|"] - gradpar_an = 2 * Lref * data_bounce["iota"] * (1 - epsilon * np.cos(theta_PEST)) + x = L_ref * rho # same as epsilon + s_hat = -x / iota * shear / L_ref + gradpar = L_ref * data_bounce["B^zeta"] / data_bounce["|B|"] + gradpar_an = 2 * L_ref * data_bounce["iota"] * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(gradpar, gradpar_an, atol=9e-3, rtol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric - cvdrift = -2 * np.sign(psi_boundary) * Bref * Lref**2 * rho * data_bounce["cvdrift"] - gbdrift = -2 * np.sign(psi_boundary) * Bref * Lref**2 * rho * data_bounce["gbdrift"] + cvdrift = ( + -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data_bounce["cvdrift"] + ) + gbdrift = ( + -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data_bounce["gbdrift"] + ) dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data_bounce["|B|"] ** 2) alpha_MHD = -np.mean(dPdrho * 1 / data_bounce["iota"] ** 2 * 0.5) @@ -726,9 +713,8 @@ def test_bounce_averaged_drifts(): -np.sign(iota) * dot(data_bounce["grad(psi)"], data_bounce["grad(alpha)"]) * s_hat - / Bref + / B_ref ) - gds21_an = ( -1 * s_hat * (s_hat * theta_PEST - alpha_MHD / bmag**4 * np.sin(theta_PEST)) ) @@ -738,7 +724,6 @@ def test_bounce_averaged_drifts(): gbdrift_an = fudge_factor2 * ( -s_hat + (np.cos(theta_PEST) - gds21_an / s_hat * np.sin(theta_PEST)) ) - fudge_factor3 = 0.07 cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 # Comparing coefficients with their analytical expressions @@ -748,7 +733,6 @@ def test_bounce_averaged_drifts(): # Values of pitch angle lambda for which to evaluate the bounce averages. pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) pitch = pitch.reshape(pitch.shape[0], -1) - k2 = 0.5 * ((1 - pitch * B0) / (pitch * B0 * epsilon) + 1) k = np.sqrt(k2) # Here are the notes that explain these integrals. @@ -792,11 +776,7 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): bavg_drift_num = np.squeeze(bavg_drift_num, axis=1) for i in range(pitch.shape[0]): np.testing.assert_allclose( - # this will have size equal to the number of bounce integrals - # found along the field line (there's only one field line in the grid) _filter_not_nan(bavg_drift_num[i]), - # this will have size equal to the number of nodes used to discretize - # that field line, so this test will always fail. bavg_drift_an[i], atol=2e-2, rtol=1e-2, From 130f816221c22e22d6421c3d29bb91e82c925a52 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 03:16:28 -0400 Subject: [PATCH 113/241] Squash floating point bugs that cause bounce points to not be detected (part 2) --- desc/compute/bounce_integral.py | 281 +++++++++++++++++++++----------- tests/test_bounce_integral.py | 99 +++++------ 2 files changed, 238 insertions(+), 142 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 016c528982..bf479b1dd1 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -2,7 +2,7 @@ from functools import partial -from interpax import CubicHermiteSpline, PPoly, interp1d +from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take @@ -53,6 +53,14 @@ def take_mask(a, mask, size=None, fill_value=None): ) +@partial(jnp.vectorize, signature="(m)->()") +def _last_value(a): + """Return the last non-nan value in ``a``.""" + a = a[::-1] + idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) + return a[idx] + + # only use for debugging def _filter_not_nan(a): """Filter out nan from ``a`` while asserting nan is padded at right.""" @@ -177,8 +185,11 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if keep_only_real: r = [_filter_real(rr, a_min, a_max) for rr in r] r = jnp.stack(r, axis=-1) - # We had ignored the case of double complex roots. - distinct = distinct and c.shape[0] > 3 and not keep_only_real + # We didn't handle the case of removing the double complex roots when + # distinct is True, so we still need to remove double roots. + # This is necessary even when returning only real roots because + # floating point math can cast complex roots with small imaginary + # part into real roots. else: # Compute from eigenvalues of polynomial companion matrix. # This method can fail to detect roots near extrema, which is often @@ -197,7 +208,11 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if sort or distinct: r = jnp.sort(r, axis=-1) if distinct: - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0) + # Atol needs to be low enough that distinct roots which are close do not + # get removed, otherwise algorithms that rely on continuity of the spline + # such as bounce_points() will fail. The current atol was chosen so that + # test_bounce_points() passes. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0, atol=1e-15) r = jnp.where(mask, jnp.nan, r) return r @@ -260,8 +275,7 @@ def _poly_val(x, c): ) """ - # Should be fine to do this instead of Horner's method - # because we expect to usually integrate up to quartic polynomials. + # Fine instead of Horner's method as we expect to evaluate cubic polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) val = jnp.einsum("...i,i...->...", X, c) return val @@ -274,6 +288,7 @@ def composite_linspace(knots, resolution): ---------- knots : Array First axis has values to return linearly spaced values between. + It is assumed these values are sorted. The remaining axes are batch axes. resolution : int Number of points between each knot. @@ -287,7 +302,6 @@ def composite_linspace(knots, resolution): knots = jnp.atleast_1d(knots) P = knots.shape[0] S = knots.shape[1:] - knots = jnp.sort(knots, axis=0) result = jnp.linspace(knots[:-1, ...], knots[1:, ...], resolution, endpoint=False) result = jnp.moveaxis(result, source=0, destination=1).reshape(-1, *S) result = jnp.append(result, knots[jnp.newaxis, -1, ...], axis=0) @@ -344,7 +358,7 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): +def pitch_of_extrema(knots, B_c, B_z_ra_c): """Return pitch values that will capture fat banana orbits. Particles with λ = 1 / |B|(ζ*) where |B|(ζ*) are local maxima @@ -381,8 +395,6 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - sort : bool - Whether to sort pitch values in order of increasing ζ* along field line. Returns ------- @@ -399,27 +411,30 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, sort=False): """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - # The local minima are returned as well, which has no negative effect - # other than perhaps not being an optimal quadrature point. - extrema = _poly_root( - c=B_z_ra_c, - a_min=jnp.array([0]), - a_max=jnp.diff(knots), - sort=sort, - distinct=True, + ext = _poly_root( + c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), distinct=True ) # Can detect at most degree of |B|_z_ra spline extrema between each knot. - assert extrema.shape == (S, N, degree - 1) + assert ext.shape == (S, N, degree - 1) # Reshape so that last axis enumerates extrema along a field line. - B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) - # Might be useful to pad all the nan at the end rather than interspersed. - B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) - pitch = 1 / B_extrema.T + B_ext = _poly_val(x=ext, c=B_c[..., jnp.newaxis]).reshape(S, -1) + B_ext = jnp.sort(B_ext, axis=-1) + + # Not possible to detect all bounce points on extrema due to floating point errors. + # TODO: Sift them up and down by epsilon. + # _poly_val(x=ext, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) # noqa: E800 + eps = 0 + B_ext = jnp.clip( + B_ext, + B_ext[:, 0, jnp.newaxis] + eps, + _last_value(B_ext)[:, jnp.newaxis] - eps, + ) + pitch = 1 / B_ext.T assert pitch.shape == (N * (degree - 1), S) return pitch -def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): +def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): """Compute the bounce points given spline of |B| and pitch λ. Parameters @@ -446,6 +461,8 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): field line. check : bool Flag for debugging. + plot : bool + Whether to plot even if error was not detected during the check. Returns ------- @@ -470,12 +487,12 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): # nan values in ``intersect`` denote a polynomial has less than degree intersects. intersect = _poly_root( c=B_c, - # Expand to use same pitches across polynomials of a particular spline. - k=jnp.expand_dims(1 / pitch, axis=-1), + # New axis to use same pitches across polynomials of a particular spline. + k=(1 / pitch)[..., jnp.newaxis], a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, - distinct=True, # Required for correctness of ``edge_case``. + distinct=True, ) assert intersect.shape == (P, S, N, degree) @@ -504,7 +521,13 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): # at the first pair. To correct the inversion, it suffices to disqualify the # first intersect as a right boundary, except under the following edge case. edge_case = (B_z_ra[..., 0] == 0) & (B_z_ra[..., 1] < 0) + # In theory, we need to keep propagating this edge case, + # e.g (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) + # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1) bp2 = take_mask(intersect, is_bp2) @@ -522,11 +545,11 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False): # rotational transform to potentially capture the bounce point outside # this snapshot of the field line. if check: - _check_bounce_points(bp1, bp2, pitch, knots, B_c) + _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot) return bp1, bp2 -def _check_bounce_points(bp1, bp2, pitch, knots, B_c): +def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): """Check that bounce points are computed correctly. Parameters @@ -539,49 +562,64 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c): Input to ``bounce_points``. B_c : Array Input to ``bounce_points``. + plot : bool + Whether to plot even if error was not detected. """ - msg_1 = "Bounce points have an inversion. Maybe create an issue on GitHub." - err_1 = jnp.any(bp1 > bp2) - msg_2 = "Discontinuity detected. Is B_z_ra the derivative of the spline of B?" - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1]) - if err_1 or err_2: - P, S = bp1.shape[:-1] + eps = 10 * jnp.finfo(jnp.array(1.0)).eps + P, S = bp1.shape[:-1] + + msg_1 = "Bounce points have an inversion." + err_1 = jnp.any(bp1 > bp2, axis=(0, -1)) + msg_2 = "Discontinuity detected." + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=(0, -1)) + + for s in jnp.nonzero(err_1 | err_2)[0]: + B = PPoly(B_c[:, s], knots) for p in range(P): - for s in range(S): - err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) - err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) - if err_1_ps or err_2_ps: - print(f"Error at index {p},{s} out of {P},{S}") - bp1_ps, bp2_ps = map(_filter_not_nan, (bp1[p, s], bp2[p, s])) - print(bp1_ps) - print(bp2_ps) - plot_field_line( - PPoly(B_c[:, s], knots, check=True), pitch[p, s], bp1_ps, bp2_ps - ) - assert not err_1_ps, msg_1 - assert not err_2_ps, msg_2 - assert not err_1, msg_1 - assert not err_2, msg_2 - - -def plot_field_line( + B_mid = B((bp1[p, s] + bp2[p, s]) / 2) + err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) + err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) + err_3_ps = jnp.any(B_mid > 1 / pitch[p, s] + eps) + if err_1_ps or err_2_ps or err_3_ps: + print(f"Error at index p={p}, s={s} out of {P},{S}.") + bp1_ps, bp2_ps, B_mid = map( + _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) + ) + print("bp1: ", bp1_ps) + print("bp2: ", bp2_ps) + print("B - 1/pitch:", B(bp1_ps) - 1 / pitch[p, s]) + plot_field_line_with_ripple( + B, pitch[p, s], bp1_ps, bp2_ps, id=f"{p},{s}" + ) + assert not err_1_ps, msg_1 + assert not err_2_ps, msg_2 + msg_3 = f"B midpoint = {B_mid} > {1 / pitch[p, s] + eps} = 1/pitch." + assert not err_3_ps, msg_3 + if plot: + for s in range(S): + B = PPoly(B_c[:, s], knots) + plot_field_line_with_ripple(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s)) + + +def plot_field_line_with_ripple( B, pitch=None, bp1=jnp.array([]), bp2=jnp.array([]), start=None, stop=None, - num=200, + num=300, show=True, + id=None, ): - """Plot the field line given spline of |B|. + """Plot the field line given spline of |B| and bounce points etc. Parameters ---------- B : PPoly Spline of |B| over given field line. - pitch : float + pitch : Array λ value. bp1 : Array Bounce points with B_z_ra <= 0. @@ -593,31 +631,72 @@ def plot_field_line( Maximum ζ of plot. num : int Number of ζ points to plot. + Should be dense to see oscillations. show : bool Whether to show the plot. + id : str + String to prepend to plot title. Returns ------- fig, ax : matplotlib figure and axes. """ + legend = {} + + def add(lines): + if not hasattr(lines, "__iter__"): + lines = [lines] + for line in lines: + label = line.get_label() + if label not in legend: + legend[label] = line + fig, ax = plt.subplots() for knot in B.x: - ax.axvline(x=knot, color="red", linestyle="--") + add(ax.axvline(x=knot, color="tab:blue", alpha=0.25, label="knot")) z = jnp.linspace( start=B.x[0] if start is None else start, stop=B.x[-1] if stop is None else stop, num=num, ) - ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$") + add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) + if pitch is not None: - b = 1 / pitch - ax.axhline(y=b, color="purple", label=r"$1 / \lambda$") - ax.plot(bp1, jnp.full_like(bp1, b), "v", markersize=8, label="bp1") - ax.plot(bp2, jnp.full_like(bp2, b), "^", markersize=8, label="bp2") + pitch = jnp.atleast_1d(pitch) + bp1, bp2 = map(jnp.atleast_2d, (bp1, bp2)) + for p in range(pitch.shape[0]): + b = 1 / pitch[p] + add(ax.axhline(b, color="tab:purple", label=r"$1 / \lambda$")) + bp1_p, bp2_p = map(_filter_not_nan, (bp1[p], bp2[p])) + add( + ax.scatter( + bp1_p, + jnp.full_like(bp1_p, b), + marker="v", + s=75, + color="tab:red", + label="bp1", + ) + ) + add( + ax.scatter( + bp2_p, + jnp.full_like(bp2_p, b), + marker="^", + s=75, + color="tab:green", + label="bp2", + ) + ) + ax.set_xlabel(r"Field line $\zeta$") ax.set_ylabel("Tesla") - ax.legend() + ax.legend(legend.values(), legend.keys()) + title = r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$" + if id is not None: + title = f"{id}. {title}" + ax.set_title(title) if show: plt.tight_layout() plt.show() @@ -652,7 +731,7 @@ def automorphism_arcsin(x): The gradient of the arcsin automorphism introduces a singularity that augments the singularity in the bounce integral. Therefore, the quadrature scheme - used to evaluate the integral must work well on hypersingular integrals. + used to evaluate the integral must work well on singular integrals. Parameters ---------- @@ -693,7 +772,7 @@ def automorphism_sin(x): Therefore, this automorphism pulls the mass of the bounce integral away from the singularities, which should improve convergence of the quadrature to the true integral, so long as the quadrature performs better on less - singular integrands. If the integral was hypersingular to begin with, + singular integrands. If the integral was singular to begin with, Tanh-Sinh quadrature will still work well. Otherwise, Gauss-Legendre quadrature can outperform Tanh-Sinh. @@ -719,7 +798,7 @@ def grad_automorphism_sin(x): grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ -def tanh_sinh_quad(resolution, w=lambda x: 1): +def tanh_sinh_quad(resolution, w=lambda x: 1, t_max=None): """Tanh-Sinh quadrature. Returns quadrature points xₖ and weights Wₖ for the approximate evaluation @@ -731,6 +810,11 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): Number of quadrature points, preferably odd. w : callable Weight function defined, positive, and continuous on (-1, 1). + t_max : float + The positive limit of quadrature points to be mapped. + Larger limit implies better results, but limited due to overflow in sinh. + A typical value is 3.14. + Computed automatically if not supplied. Returns ------- @@ -740,12 +824,13 @@ def tanh_sinh_quad(resolution, w=lambda x: 1): Quadrature weights. """ - # boundary of integral - x_max = jnp.array(1.0) - # subtract machine epsilon with buffer for floating point error - x_max = x_max - 10 * jnp.finfo(x_max).eps - # inverse of tanh-sinh transformation - t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) + if t_max is None: + # boundary of integral + x_max = jnp.array(1.0) + # subtract machine epsilon with buffer for floating point error + x_max = x_max - 10 * jnp.finfo(x_max).eps + # inverse of tanh-sinh transformation + t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) kh = jnp.linspace(-t_max, t_max, resolution) h = 2 * t_max / (resolution - 1) arg = 0.5 * jnp.pi * jnp.sinh(kh) @@ -766,7 +851,7 @@ def _suppress_bad_nan(V): In the context of bounce integrals, the √(1 − λ |B|) terms necessitate this. For interpolation error in |B| may yield λ |B| > 1 at quadrature points between bounce points, which is inconsistent with our knowledge of the |B| - spline on which the bounce points were computed. This inconsistency will + spline on which the bounce points were computed. This inconsistency can be more prevalent in the limit the number of quadrature points per bounce integration is much greater than the number of knots. @@ -807,14 +892,7 @@ def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): goal = jnp.sum(1 - is_not_quad_point) // quad_resolution # Number of integrals that were actually computed. actual = jnp.isfinite(inner_product).sum() - err_msg = ( - f"Lost {goal - actual} integrals.\n" - "If the integrand is not singular, consider making a GitHub issue.\n" - "Otherwise, this spline method has failed.\n" - "In particular, the spline approaches the singularity slower than the " - "true integrand, which makes the integral of the spline much less " - "integrable than the true integral." - ) + err_msg = f"Lost {goal - actual} integrals.\n" assert goal == actual, err_msg assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]), err_msg @@ -885,7 +963,7 @@ def _interp1d_vec_with_df( def _interpolatory_quadrature( - Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, check=False + Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, method_B, check=False ): """Interpolate given functions to points Z and perform quadrature. @@ -914,7 +992,7 @@ def _interpolatory_quadrature( f = [_interp1d_vec(Z_ps, knots, ff, method=method).reshape(shape) for ff in f] B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) # Specify derivative at knots for ≈ cubic hermite interpolation. - B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method="cubic").reshape(shape) + B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot( @@ -944,6 +1022,7 @@ def _bounce_quadrature( pitch, knots, method="akima", + method_B="cubic", check=False, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -986,7 +1065,7 @@ def _group_grid_data_by_field_line(g): Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. result = _interpolatory_quadrature( - Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, check + Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, method_B, check ) * grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result @@ -1001,10 +1080,13 @@ def bounce_integral( eq, rho=jnp.linspace(1e-7, 1, 5), alpha=None, - knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 25), + knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), quad=tanh_sinh_quad, automorphism=(automorphism_arcsin, grad_automorphism_arcsin), + B_ref=1, + L_ref=1, check=False, + plot=True, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. @@ -1056,12 +1138,18 @@ def bounce_integral( defines a change of variable for the bounce integral. The choice made for the automorphism can augment or suppress singularities. Keep this in mind when choosing the quadrature method. + B_ref : float + Reference magnetic field strength for normalization. + L_ref : float + Reference length scale for normalization. check : bool Flag for debugging. + plot : bool + Whether to plot even if error was not detected during the check. kwargs Can specify additional arguments to the ``quad`` method with kwargs. - Can also specify reference magnetic field strength and length scale - for normalization. + Can also specify to use a monotonic interpolation for |B| rather + than a cubic Hermite spline with ``monotonic=True``. Returns ------- @@ -1137,8 +1225,7 @@ def integrand_den(B, pitch, Z): print(jnp.nansum(average, axis=-1)) """ - B_ref = kwargs.pop("B_ref", 1) - L_ref = kwargs.pop("L_ref", 1) + monotonic = kwargs.pop("monotonic", False) if quad == tanh_sinh_quad: kwargs.setdefault("resolution", 19) x, w = quad(**kwargs) @@ -1169,13 +1256,14 @@ def integrand_den(B, pitch, Z): B = data["|B|"].reshape(S, knots.size) / B_ref B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / B_ref # Compute spline of |B| along field lines. - B_c = jnp.moveaxis( - CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c, - source=1, - destination=-1, + B_c = ( + PchipInterpolator(knots, B, axis=-1, check=check).c + if monotonic + else CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c ) - assert B_c.shape == (4, S, knots.size - 1) + B_c = jnp.moveaxis(B_c, source=1, destination=-1) B_z_ra_c = _poly_der(B_c) + assert B_c.shape == (4, S, knots.size - 1) assert B_z_ra_c.shape == (3, S, knots.size - 1) items = { "grid_desc": grid_desc, @@ -1225,7 +1313,7 @@ def bounce_integrate(integrand, f, pitch, method="akima"): lines. Last axis enumerates the bounce integrals. """ - bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, check) + bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, check, plot) result = _bounce_quadrature( bp1, bp2, @@ -1239,7 +1327,8 @@ def bounce_integrate(integrand, f, pitch, method="akima"): pitch, knots, method, - check, + method_B="monotonic" if monotonic else "cubic", + check=check, ) assert result.shape[-1] == (knots.size - 1) * 3 return result diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 1904ba1ccc..92c8adfa45 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -1,7 +1,6 @@ """Test bounce integral methods.""" import inspect -from functools import partial import numpy as np import pytest @@ -11,11 +10,12 @@ from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipkm1 -from desc.backend import complex_sqrt, flatnonzero +from desc.backend import complex_sqrt from desc.compute.bounce_integral import ( _affine_bijection_forward, _bounce_quadrature, _filter_not_nan, + _last_value, _poly_der, _poly_root, _poly_val, @@ -29,7 +29,7 @@ grad_automorphism_arcsin, grad_automorphism_sin, pitch_of_extrema, - plot_field_line, + plot_field_line_with_ripple, take_mask, tanh_sinh_quad, ) @@ -50,14 +50,6 @@ from desc.utils import only1 -@partial(np.vectorize, signature="(m)->()") -def _last_value(a): - """Return the last non-nan value in ``a``.""" - a = np.ravel(a)[::-1] - idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) - return a[idx] - - def _sqrt(x): """Reproduces jnp.sqrt with np.sqrt.""" x = complex_sqrt(x) @@ -160,9 +152,6 @@ def test_poly_root(): root = _poly_root(c.T, sort=True, distinct=True) for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) - if j == 4: - # There are only two distinct roots. - unique_roots = unique_roots[[0, 1]] np.testing.assert_allclose( actual=_filter_not_nan(root[j]), desired=unique_roots, @@ -232,7 +221,7 @@ def test_pitch_of_extrema(): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) B_z_ra = B.derivative() - pitch_scipy = 1 / B(B_z_ra.roots(extrapolate=False)) + pitch_scipy = 1 / np.sort(B(B_z_ra.roots(extrapolate=False))) pitch = pitch_of_extrema(k, B.c, B_z_ra.c) np.testing.assert_allclose(_filter_not_nan(pitch), pitch_scipy) @@ -243,7 +232,7 @@ def test_composite_linspace(): B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) - b_knot = 1 / pitch_knot + b_knot = np.sort(1 / pitch_knot, axis=0) print() print(b_knot) b = composite_linspace(b_knot, resolution=3) @@ -259,7 +248,7 @@ def test_composite_linspace(): def test_bounce_points(): """Test that bounce points are computed correctly.""" - def test_bp1_first(plot): + def test_bp1_first(): start = np.pi / 3 end = 6 * np.pi knots = np.linspace(start, end, 5) @@ -267,27 +256,29 @@ def test_bp1_first(plot): pitch = 2 bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - if plot: - plot_field_line(B, pitch, bp1, bp2) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) - def test_bp2_first(plot): + def test_bp2_first(): start = -3 * np.pi end = -start k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch = 2 - bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) + bp1, bp2 = bounce_points( + pitch, + k, + B.c, + B.derivative().c, + check=True, + ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - if plot: - plot_field_line(B, pitch, bp1, bp2) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[1::2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) - def test_bp1_before_extrema(plot): + def test_bp1_before_extrema(): start = -np.pi end = -2 * start k = np.linspace(start, end, 5) @@ -298,8 +289,6 @@ def test_bp1_before_extrema(plot): pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - if plot: - plot_field_line(B, pitch, bp1, bp2) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[1], 1.9827671337414938) @@ -307,7 +296,7 @@ def test_bp1_before_extrema(plot): np.testing.assert_allclose(bp1, intersect[[1, 2]]) np.testing.assert_allclose(bp2, intersect[[2, 3]]) - def test_bp2_before_extrema(plot): + def test_bp2_before_extrema(): start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -320,13 +309,11 @@ def test_bp2_before_extrema(plot): pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - if plot: - plot_field_line(B, pitch, bp1, bp2) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) - def test_extrema_first_and_before_bp1(plot): + def test_extrema_first_and_before_bp1(): start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -337,10 +324,11 @@ def test_extrema_first_and_before_bp1(plot): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True) + bp1, bp2 = bounce_points( + pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False + ) + plot_field_line_with_ripple(B, pitch, bp1, bp2, start=k[2]) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - if plot: - plot_field_line(B, pitch, bp1, bp2, start=k[2]) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], 0.8353192766102349) @@ -349,7 +337,7 @@ def test_extrema_first_and_before_bp1(plot): np.testing.assert_allclose(bp1, intersect[[0, 1, 3]]) np.testing.assert_allclose(bp2, intersect[[0, 2, 4]]) - def test_extrema_first_and_before_bp2(plot): + def test_extrema_first_and_before_bp2(): start = -1.2 * np.pi end = -2 * start + 1 k = np.linspace(start, end, 7) @@ -360,10 +348,21 @@ def test_extrema_first_and_before_bp2(plot): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + # This note may not make sense to the reader now, but if a regression + # fails this test, it will save many hours of debugging. + # If the filter in place to return only the distinct roots is too coarse, + # in particular atol < 1e-15, then this test will error. In the resulting + # plot that the error will produce the red bounce point on the first hump + # disappears. The true sequence is green, double red, green, red, green. + # The first green was close to the double red and hence the first of the + # double red root pair was erased as it was falsely detected as a duplicate. + # The second of the double red root pair is correctly erased. All that is + # left is the green. Now the bounce_points method assumes the intermediate + # value theorem holds for the continuous spline, so when fed these sequence + # of roots, the correct action is to ignore the first green root since + # otherwise the interior of the bounce points would be hills and not valleys. bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - if plot: - plot_field_line(B, pitch, bp1, bp2) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], -0.6719044147510538) @@ -371,17 +370,15 @@ def test_extrema_first_and_before_bp2(plot): np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) - # These are all the unique cases, if all tests pass then the bounce_points - # should work correctly for all inputs. - test_bp1_first(True) - test_bp2_first(True) - test_bp1_before_extrema(True) - test_bp2_before_extrema(True) + test_bp1_first() + test_bp2_first() + test_bp1_before_extrema() + test_bp2_before_extrema() # In theory, this test should only pass if distinct=True when computing the # intersections in bounce points. However, we can get lucky due to floating # point errors, and it may also pass when distinct=False. - test_extrema_first_and_before_bp1(True) - test_extrema_first_and_before_bp2(True) + test_extrema_first_and_before_bp1() + test_extrema_first_and_before_bp2() @pytest.mark.unit @@ -478,8 +475,11 @@ def integrand(B, pitch, Z): @pytest.mark.unit -def test_example_code(): +def test_example_bounce_integral(): """Test example code in bounce_integral docstring.""" + # This test also smoke tests the bounce_points routine because + # the |B| spline that is generated from this combination of knots + # equilibrium etc. has many edge cases for bounce point computations. def integrand_num(g_zz, B, pitch, Z): """Integrand in integral in numerator of bounce average.""" @@ -669,7 +669,14 @@ def test_bounce_averaged_drifts(): # above preprocessing for you. Let's test it for correctness # first then do this later. bounce_integrate, items = bounce_integral( - eq, rho, alpha, knots=zeta, check=True, B_ref=B_ref, L_ref=L_ref + eq, + rho, + alpha, + knots=zeta, + B_ref=B_ref, + L_ref=L_ref, + check=True, + monotonic=True, ) data_keys = [ "|grad(psi)|^2", From 64e8b4666bc1679e8104bce8e583a574cf5332f4 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 14:47:41 -0400 Subject: [PATCH 114/241] Fix all floating point error induced issues for detecting bounce points! --- desc/compute/bounce_integral.py | 82 ++++++++++++++++++--------------- tests/test_bounce_integral.py | 21 +++++---- 2 files changed, 58 insertions(+), 45 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index bf479b1dd1..48763d892a 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -99,12 +99,12 @@ def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): def _root_linear(a, b, distinct=False): - """Return r such that a * r + b = 0.""" + """Return r such that a r + b = 0.""" return safediv(-b, a, fill=jnp.where(jnp.isclose(b, 0), 0, jnp.nan)) def _root_quadratic(a, b, c, distinct=False): - """Return r such that a * r**2 + b * r + c = 0.""" + """Return r such that a r² + b r + c = 0.""" discriminant = b**2 - 4 * a * c C = complex_sqrt(discriminant) @@ -119,7 +119,7 @@ def root(xi): def _root_cubic(a, b, c, d, distinct=False): - """Return r such that a * r**3 + b * r**2 + c * r + d = 0.""" + """Return r such that a r³ + b r² + c r + d = 0.""" # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula t_0 = b**2 - 3 * a * c t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d @@ -186,7 +186,7 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): r = [_filter_real(rr, a_min, a_max) for rr in r] r = jnp.stack(r, axis=-1) # We didn't handle the case of removing the double complex roots when - # distinct is True, so we still need to remove double roots. + # ``distinct`` is true, so we still need to remove double roots. # This is necessary even when returning only real roots because # floating point math can cast complex roots with small imaginary # part into real roots. @@ -281,17 +281,18 @@ def _poly_val(x, c): return val -def composite_linspace(knots, resolution): +def composite_linspace(knots, resolution, is_sorted=False): """Returns linearly spaced points between ``knots``. Parameters ---------- knots : Array First axis has values to return linearly spaced values between. - It is assumed these values are sorted. The remaining axes are batch axes. resolution : int Number of points between each knot. + is_sorted : bool + Whether the knots are already sorted along the first axis. Returns ------- @@ -302,6 +303,8 @@ def composite_linspace(knots, resolution): knots = jnp.atleast_1d(knots) P = knots.shape[0] S = knots.shape[1:] + if not is_sorted: + knots = jnp.sort(knots, axis=0) result = jnp.linspace(knots[:-1, ...], knots[1:, ...], resolution, endpoint=False) result = jnp.moveaxis(result, source=0, destination=1).reshape(-1, *S) result = jnp.append(result, knots[jnp.newaxis, -1, ...], axis=0) @@ -358,7 +361,7 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def pitch_of_extrema(knots, B_c, B_z_ra_c): +def pitch_of_extrema(knots, B_c, B_z_ra_c, epsilon_shift=1e-6): """Return pitch values that will capture fat banana orbits. Particles with λ = 1 / |B|(ζ*) where |B|(ζ*) are local maxima @@ -395,6 +398,9 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. + epsilon_shift : float + Small amount to shift maxima down and minima up to avoid floating point + errors in downstream routines. Returns ------- @@ -405,31 +411,35 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c): If there were less than ``N * (degree - 1)`` extrema detected along a field line, then the first axis, which enumerates the pitch values for - a particular field line, is padded with nan. The first axis is sorted - in order of decreasing pitch values. + a particular field line, is padded with nan. """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - ext = _poly_root( + extrema = _poly_root( c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), distinct=True ) # Can detect at most degree of |B|_z_ra spline extrema between each knot. - assert ext.shape == (S, N, degree - 1) + assert extrema.shape == (S, N, degree - 1) # Reshape so that last axis enumerates extrema along a field line. - B_ext = _poly_val(x=ext, c=B_c[..., jnp.newaxis]).reshape(S, -1) - B_ext = jnp.sort(B_ext, axis=-1) - - # Not possible to detect all bounce points on extrema due to floating point errors. - # TODO: Sift them up and down by epsilon. - # _poly_val(x=ext, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) # noqa: E800 - eps = 0 - B_ext = jnp.clip( - B_ext, - B_ext[:, 0, jnp.newaxis] + eps, - _last_value(B_ext)[:, jnp.newaxis] - eps, + B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) + B_extrema_z_ra = _poly_val( + x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis] + ).reshape(S, -1) + + # Floating point error impedes consistent detection of bounce points riding + # extrema. Shift pitch values slightly to resolve this issue. + # Higher priority to shift down maxima than shift up minima, so identify near + # equality with zero as maxima. + is_maxima = B_extrema_z_ra <= 0 + B_extrema = jnp.where( + is_maxima, + (1 - epsilon_shift) * B_extrema, + (1 + epsilon_shift) * B_extrema, ) - pitch = 1 / B_ext.T + # Pad all the nan at the end rather than interspersed to be consistent. + B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) + pitch = 1 / B_extrema.T assert pitch.shape == (N * (degree - 1), S) return pitch @@ -663,28 +673,26 @@ def add(lines): add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - pitch = jnp.atleast_1d(pitch) + b = jnp.atleast_1d(1 / pitch) bp1, bp2 = map(jnp.atleast_2d, (bp1, bp2)) - for p in range(pitch.shape[0]): - b = 1 / pitch[p] - add(ax.axhline(b, color="tab:purple", label=r"$1 / \lambda$")) - bp1_p, bp2_p = map(_filter_not_nan, (bp1[p], bp2[p])) + for bb in jnp.unique(b): + add(ax.axhline(bb, color="tab:purple", alpha=0.25, label=r"$1 / \lambda$")) + for i in range(b.shape[0]): + bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) add( ax.scatter( - bp1_p, - jnp.full_like(bp1_p, b), + bp1_i, + jnp.full_like(bp1_i, b[i]), marker="v", - s=75, color="tab:red", label="bp1", ) ) add( ax.scatter( - bp2_p, - jnp.full_like(bp2_p, b), + bp2_i, + jnp.full_like(bp2_i, b[i]), marker="^", - s=75, color="tab:green", label="bp2", ) @@ -695,7 +703,7 @@ def add(lines): ax.legend(legend.values(), legend.keys()) title = r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$" if id is not None: - title = f"{id}. {title}" + title = f"{title}. id = {id}." ax.set_title(title) if show: plt.tight_layout() @@ -892,7 +900,7 @@ def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): goal = jnp.sum(1 - is_not_quad_point) // quad_resolution # Number of integrals that were actually computed. actual = jnp.isfinite(inner_product).sum() - err_msg = f"Lost {goal - actual} integrals.\n" + err_msg = f"Lost {goal - actual} integrals. Likely due to floating point error." assert goal == actual, err_msg assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]), err_msg @@ -1199,7 +1207,7 @@ def integrand_den(B, pitch, Z): eq = get("HELIOTRON") rho = jnp.linspace(1e-12, 1, 6) alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) - bounce_integrate, items = bounce_integral(eq, rho, alpha, check=True) + bounce_integrate, items = bounce_integral(eq, rho, alpha) g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 92c8adfa45..7c41bfb0f1 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -221,9 +221,11 @@ def test_pitch_of_extrema(): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) B_z_ra = B.derivative() - pitch_scipy = 1 / np.sort(B(B_z_ra.roots(extrapolate=False))) - pitch = pitch_of_extrema(k, B.c, B_z_ra.c) - np.testing.assert_allclose(_filter_not_nan(pitch), pitch_scipy) + pitch_scipy = 1 / B(B_z_ra.roots(extrapolate=False)) + rtol = 1e-7 + pitch = pitch_of_extrema(k, B.c, B_z_ra.c, epsilon_shift=rtol) + eps = 100 * np.finfo(float).eps + np.testing.assert_allclose(_filter_not_nan(pitch), pitch_scipy, rtol=rtol + eps) @pytest.mark.unit @@ -232,7 +234,7 @@ def test_composite_linspace(): B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) - b_knot = np.sort(1 / pitch_knot, axis=0) + b_knot = 1 / pitch_knot print() print(b_knot) b = composite_linspace(b_knot, resolution=3) @@ -477,7 +479,7 @@ def integrand(B, pitch, Z): @pytest.mark.unit def test_example_bounce_integral(): """Test example code in bounce_integral docstring.""" - # This test also smoke tests the bounce_points routine because + # This test also stress tests the bounce_points routine because # the |B| spline that is generated from this combination of knots # equilibrium etc. has many edge cases for bounce point computations. @@ -494,7 +496,7 @@ def integrand_den(B, pitch, Z): rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - bounce_integrate, items = bounce_integral(eq, rho, alpha, check=True) + bounce_integrate, items = bounce_integral(eq, rho, alpha, check=True, plot=False) g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) num = bounce_integrate(integrand_num, g_zz, pitch) @@ -738,8 +740,11 @@ def test_bounce_averaged_drifts(): np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=5e-3) # Values of pitch angle lambda for which to evaluate the bounce averages. - pitch = np.linspace(1 / np.max(bmag), 1 / np.min(bmag), 11) - pitch = pitch.reshape(pitch.shape[0], -1) + delta_shift = 1e-6 + pitch_resolution = 11 + pitch = np.linspace( + 1 / np.max(bmag) + delta_shift, 1 / np.min(bmag) - delta_shift, pitch_resolution + ).reshape(pitch_resolution, -1) k2 = 0.5 * ((1 - pitch * B0) / (pitch * B0 * epsilon) + 1) k = np.sqrt(k2) # Here are the notes that explain these integrals. From 9d53735c14b8a979ae830a359844bde25872e2f4 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 17:24:50 -0400 Subject: [PATCH 115/241] Improve test_bounce_averaged_drifts --- desc/compute/bounce_integral.py | 44 +++++++++++-------------------- tests/test_bounce_integral.py | 46 +++++++++++++++++++++++++-------- 2 files changed, 50 insertions(+), 40 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 48763d892a..83a6b1d8e3 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -53,14 +53,6 @@ def take_mask(a, mask, size=None, fill_value=None): ) -@partial(jnp.vectorize, signature="(m)->()") -def _last_value(a): - """Return the last non-nan value in ``a``.""" - a = a[::-1] - idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) - return a[idx] - - # only use for debugging def _filter_not_nan(a): """Filter out nan from ``a`` while asserting nan is padded at right.""" @@ -185,11 +177,8 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if keep_only_real: r = [_filter_real(rr, a_min, a_max) for rr in r] r = jnp.stack(r, axis=-1) - # We didn't handle the case of removing the double complex roots when - # ``distinct`` is true, so we still need to remove double roots. - # This is necessary even when returning only real roots because - # floating point math can cast complex roots with small imaginary - # part into real roots. + # We had ignored the case of double complex roots. + distinct = distinct and c.shape[0] > 3 and not keep_only_real else: # Compute from eigenvalues of polynomial companion matrix. # This method can fail to detect roots near extrema, which is often @@ -211,7 +200,7 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): # Atol needs to be low enough that distinct roots which are close do not # get removed, otherwise algorithms that rely on continuity of the spline # such as bounce_points() will fail. The current atol was chosen so that - # test_bounce_points() passes. + # test_bounce_points() passes when this block is forced to run. mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0, atol=1e-15) r = jnp.where(mask, jnp.nan, r) return r @@ -361,7 +350,7 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def pitch_of_extrema(knots, B_c, B_z_ra_c, epsilon_shift=1e-6): +def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): """Return pitch values that will capture fat banana orbits. Particles with λ = 1 / |B|(ζ*) where |B|(ζ*) are local maxima @@ -398,8 +387,8 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, epsilon_shift=1e-6): Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials of the spline along a particular field line. - epsilon_shift : float - Small amount to shift maxima down and minima up to avoid floating point + relative_shift : float + Relative amount to shift maxima down and minima up to avoid floating point errors in downstream routines. Returns @@ -421,12 +410,8 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, epsilon_shift=1e-6): ) # Can detect at most degree of |B|_z_ra spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) - # Reshape so that last axis enumerates extrema along a field line. - B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) - B_extrema_z_ra = _poly_val( - x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis] - ).reshape(S, -1) - + B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) + B_extrema_z_ra = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) # Floating point error impedes consistent detection of bounce points riding # extrema. Shift pitch values slightly to resolve this issue. # Higher priority to shift down maxima than shift up minima, so identify near @@ -434,9 +419,10 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, epsilon_shift=1e-6): is_maxima = B_extrema_z_ra <= 0 B_extrema = jnp.where( is_maxima, - (1 - epsilon_shift) * B_extrema, - (1 + epsilon_shift) * B_extrema, - ) + (1 - relative_shift) * B_extrema, + (1 + relative_shift) * B_extrema, + ).reshape(S, -1) + # Reshape so that last axis enumerates extrema along a field line. # Pad all the nan at the end rather than interspersed to be consistent. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T @@ -674,10 +660,10 @@ def add(lines): if pitch is not None: b = jnp.atleast_1d(1 / pitch) + for val in jnp.unique(b): + add(ax.axhline(val, color="tab:purple", alpha=0.75, label=r"$1 / \lambda$")) bp1, bp2 = map(jnp.atleast_2d, (bp1, bp2)) - for bb in jnp.unique(b): - add(ax.axhline(bb, color="tab:purple", alpha=0.25, label=r"$1 / \lambda$")) - for i in range(b.shape[0]): + for i in range(bp1.shape[0]): bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) add( ax.scatter( diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 7c41bfb0f1..b94ce7beaf 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -1,21 +1,20 @@ """Test bounce integral methods.""" import inspect +from functools import partial import numpy as np import pytest +from matplotlib import pyplot as plt from scipy import integrate - -# TODO: can use the one from interpax once .solve() is implemented from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipkm1 -from desc.backend import complex_sqrt +from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( _affine_bijection_forward, _bounce_quadrature, _filter_not_nan, - _last_value, _poly_der, _poly_root, _poly_val, @@ -57,6 +56,14 @@ def _sqrt(x): return x +@partial(np.vectorize, signature="(m)->()") +def _last_value(a): + """Return the last non-nan value in ``a``.""" + a = a[::-1] + idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) + return a[idx] + + @pytest.mark.unit def test_mask_operations(): """Test custom masked array operation.""" @@ -223,7 +230,7 @@ def test_pitch_of_extrema(): B_z_ra = B.derivative() pitch_scipy = 1 / B(B_z_ra.roots(extrapolate=False)) rtol = 1e-7 - pitch = pitch_of_extrema(k, B.c, B_z_ra.c, epsilon_shift=rtol) + pitch = pitch_of_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) eps = 100 * np.finfo(float).eps np.testing.assert_allclose(_filter_not_nan(pitch), pitch_scipy, rtol=rtol + eps) @@ -678,7 +685,9 @@ def test_bounce_averaged_drifts(): B_ref=B_ref, L_ref=L_ref, check=True, + plot=True, monotonic=True, + resolution=50, ) data_keys = [ "|grad(psi)|^2", @@ -767,8 +776,15 @@ def test_bounce_averaged_drifts(): I_6 = 2 / 3 * (k * (-2 + 4 * k2) * I_0 - 4 * (-1 + k2) * I_1) I_7 = 4 / k * (2 * k2 * I_0 + (1 - 2 * k2) * I_1) - bavg_drift_an = fudge_factor3 * dPdrho / B0**2 * I_1 - 0.5 * fudge_factor2 * ( - s_hat * (I_0 + I_1 + I_2 + I_3) + alpha_MHD / B0**4 * (I_4 + I_5) - (I_6 + I_7) + bavg_drift_an = np.squeeze( + fudge_factor3 * dPdrho / B0**2 * I_1 + - 0.5 + * fudge_factor2 + * ( + s_hat * (I_0 + I_1 + I_2 + I_3) + + alpha_MHD / B0**4 * (I_4 + I_5) + - (I_6 + I_7) + ) ) def integrand(cvdrift, gbdrift, B, pitch, Z): @@ -784,13 +800,21 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): pitch=pitch, ) assert np.isfinite(bavg_drift_num).any(), "Quadrature failed." - # there's only one field line on the grid, so squeeze out that axis - bavg_drift_num = np.squeeze(bavg_drift_num, axis=1) + # There should be only one bounce integral done per pitch in this example, + # so we can pull out the not nan values. + bavg_drift_num = np.squeeze(_filter_not_nan(bavg_drift_num)) + assert bavg_drift_an.shape == bavg_drift_num.shape + x = np.arange(bavg_drift_an.size) + plt.scatter(x, bavg_drift_an, label="analytic") + plt.scatter(x, bavg_drift_num, label="numerical") + plt.title("Comparison of analytical to numerical.") + plt.legend() + plt.show() for i in range(pitch.shape[0]): np.testing.assert_allclose( - _filter_not_nan(bavg_drift_num[i]), + bavg_drift_num[i], bavg_drift_an[i], atol=2e-2, rtol=1e-2, - err_msg=f"Failed on index {i} for pitch {pitch[i]}", + err_msg=f"Failed on index {i} for 1/pitch {1 / pitch[i]}", ) From 33c124934474c6bbece638d26a4c817618a76e71 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 19:32:06 -0400 Subject: [PATCH 116/241] Clean up bounce_average_drift test --- desc/compute/bounce_integral.py | 2 +- tests/test_bounce_integral.py | 79 +++++++++++++++------------------ 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 83a6b1d8e3..1147b2209a 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -685,7 +685,7 @@ def add(lines): ) ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel("Tesla") + ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") ax.legend(legend.values(), legend.keys()) title = r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$" if id is not None: diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index b94ce7beaf..13b4f51d90 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -671,23 +671,21 @@ def test_bounce_averaged_drifts(): coords1[:, 0] = np.broadcast_to(rho, N) coords1[:, 1] = theta_PEST coords1[:, 2] = zeta - # c1 = eq.compute_theta_coords(coords1) # noqa: E800 - # grid = Grid(c1, sort=False) # noqa: E800 # TODO: Request: The bounce integral operator should be able to take a grid. # Response: Currently the API is such that the method does all the # above preprocessing for you. Let's test it for correctness # first then do this later. + resolution = 50 bounce_integrate, items = bounce_integral( - eq, - rho, - alpha, + eq=eq, + rho=rho, + alpha=alpha, knots=zeta, B_ref=B_ref, L_ref=L_ref, check=True, plot=True, - monotonic=True, - resolution=50, + resolution=resolution, ) data_keys = [ "|grad(psi)|^2", @@ -708,14 +706,16 @@ def test_bounce_averaged_drifts(): # normalizations bmag = data_bounce["|B|"] / B_ref B0 = np.mean(bmag) - bmag_an = B0 * (1 - epsilon * np.cos(theta_PEST)) - np.testing.assert_allclose(bmag, bmag_an, atol=5e-3, rtol=5e-3) + bmag_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) + np.testing.assert_allclose(bmag, bmag_analytic, atol=5e-3, rtol=5e-3) - x = L_ref * rho # same as epsilon + x = L_ref * rho # same as epsilon? s_hat = -x / iota * shear / L_ref gradpar = L_ref * data_bounce["B^zeta"] / data_bounce["|B|"] - gradpar_an = 2 * L_ref * data_bounce["iota"] * (1 - epsilon * np.cos(theta_PEST)) - np.testing.assert_allclose(gradpar, gradpar_an, atol=9e-3, rtol=5e-3) + gradpar_analytic = ( + 2 * L_ref * data_bounce["iota"] * (1 - epsilon * np.cos(theta_PEST)) + ) + np.testing.assert_allclose(gradpar, gradpar_analytic, atol=9e-3, rtol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric cvdrift = ( @@ -733,27 +733,27 @@ def test_bounce_averaged_drifts(): * s_hat / B_ref ) - gds21_an = ( + gds21_analytic = ( -1 * s_hat * (s_hat * theta_PEST - alpha_MHD / bmag**4 * np.sin(theta_PEST)) ) - np.testing.assert_allclose(gds21, gds21_an, atol=1.7e-2, rtol=5e-4) + np.testing.assert_allclose(gds21, gds21_analytic, atol=1.7e-2, rtol=5e-4) fudge_factor2 = 0.19 - gbdrift_an = fudge_factor2 * ( - -s_hat + (np.cos(theta_PEST) - gds21_an / s_hat * np.sin(theta_PEST)) + gbdrift_analytic = fudge_factor2 * ( + -s_hat + (np.cos(theta_PEST) - gds21_analytic / s_hat * np.sin(theta_PEST)) ) fudge_factor3 = 0.07 - cvdrift_an = gbdrift_an + fudge_factor3 * alpha_MHD / bmag**2 + cvdrift_analytic = gbdrift_analytic + fudge_factor3 * alpha_MHD / bmag**2 # Comparing coefficients with their analytical expressions - np.testing.assert_allclose(gbdrift, gbdrift_an, atol=1.2e-2, rtol=5e-3) - np.testing.assert_allclose(cvdrift, cvdrift_an, atol=1.8e-2, rtol=5e-3) + np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1.2e-2, rtol=5e-3) + np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=1.8e-2, rtol=5e-3) # Values of pitch angle lambda for which to evaluate the bounce averages. delta_shift = 1e-6 - pitch_resolution = 11 + pitch_resolution = 50 pitch = np.linspace( 1 / np.max(bmag) + delta_shift, 1 / np.min(bmag) - delta_shift, pitch_resolution - ).reshape(pitch_resolution, -1) + ) k2 = 0.5 * ((1 - pitch * B0) / (pitch * B0 * epsilon) + 1) k = np.sqrt(k2) # Here are the notes that explain these integrals. @@ -776,7 +776,7 @@ def test_bounce_averaged_drifts(): I_6 = 2 / 3 * (k * (-2 + 4 * k2) * I_0 - 4 * (-1 + k2) * I_1) I_7 = 4 / k * (2 * k2 * I_0 + (1 - 2 * k2) * I_1) - bavg_drift_an = np.squeeze( + bouce_drift_analytic = ( fudge_factor3 * dPdrho / B0**2 * I_1 - 0.5 * fudge_factor2 @@ -788,33 +788,24 @@ def test_bounce_averaged_drifts(): ) def integrand(cvdrift, gbdrift, B, pitch, Z): - # The arguments to this function will be interpolated - # onto the quadrature points before these quantities are evaluated. g = _sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - bavg_drift_num = bounce_integrate( + bounce_drift = bounce_integrate( integrand=integrand, - # additional things to interpolate onto quadrature points besides B and pitch f=[cvdrift, gbdrift], - pitch=pitch, + pitch=pitch.reshape(pitch_resolution, -1), ) - assert np.isfinite(bavg_drift_num).any(), "Quadrature failed." - # There should be only one bounce integral done per pitch in this example, - # so we can pull out the not nan values. - bavg_drift_num = np.squeeze(_filter_not_nan(bavg_drift_num)) - assert bavg_drift_an.shape == bavg_drift_num.shape - x = np.arange(bavg_drift_an.size) - plt.scatter(x, bavg_drift_an, label="analytic") - plt.scatter(x, bavg_drift_num, label="numerical") - plt.title("Comparison of analytical to numerical.") + # There is only one bounce integral per pitch in this example. + bounce_drift = np.squeeze(_filter_not_nan(bounce_drift)) + assert bouce_drift_analytic.shape == bounce_drift.shape + + plt.plot(1 / pitch, bouce_drift_analytic, marker="o", label="analytic") + plt.plot(1 / pitch, bounce_drift, marker="x", label="numerical") + plt.xlabel(r"$1 / \lambda$") + plt.ylabel("Bounce averaged drift") + plt.title(f"Quadrature resolution = {resolution}. Delta shift = {delta_shift}.") plt.legend() + plt.tight_layout() plt.show() - for i in range(pitch.shape[0]): - np.testing.assert_allclose( - bavg_drift_num[i], - bavg_drift_an[i], - atol=2e-2, - rtol=1e-2, - err_msg=f"Failed on index {i} for 1/pitch {1 / pitch[i]}", - ) + np.testing.assert_allclose(bounce_drift, bouce_drift_analytic, atol=2e-2, rtol=1e-2) From 81aa30abbf76720ecec72e37852d27abd31fc194 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 20:10:35 -0400 Subject: [PATCH 117/241] Make it simpler to change spline method in test_bounce_averaged_drifts --- tests/test_bounce_integral.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 13b4f51d90..08ae0e5fff 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -676,6 +676,8 @@ def test_bounce_averaged_drifts(): # above preprocessing for you. Let's test it for correctness # first then do this later. resolution = 50 + # Whether to use monotonic or Hermite splines to interpolate |B|. + monotonic = False bounce_integrate, items = bounce_integral( eq=eq, rho=rho, @@ -686,6 +688,7 @@ def test_bounce_averaged_drifts(): check=True, plot=True, resolution=resolution, + monotonic=monotonic, ) data_keys = [ "|grad(psi)|^2", @@ -744,7 +747,6 @@ def test_bounce_averaged_drifts(): ) fudge_factor3 = 0.07 cvdrift_analytic = gbdrift_analytic + fudge_factor3 * alpha_MHD / bmag**2 - # Comparing coefficients with their analytical expressions np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1.2e-2, rtol=5e-3) np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=1.8e-2, rtol=5e-3) @@ -776,7 +778,7 @@ def test_bounce_averaged_drifts(): I_6 = 2 / 3 * (k * (-2 + 4 * k2) * I_0 - 4 * (-1 + k2) * I_1) I_7 = 4 / k * (2 * k2 * I_0 + (1 - 2 * k2) * I_1) - bouce_drift_analytic = ( + bounce_drift_analytic = ( fudge_factor3 * dPdrho / B0**2 * I_1 - 0.5 * fudge_factor2 @@ -791,21 +793,33 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): g = _sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) + # Can choose method of interpolation for all quantities besides |B| from + # interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html#interpax.interp1d. + method = "akima" bounce_drift = bounce_integrate( integrand=integrand, f=[cvdrift, gbdrift], pitch=pitch.reshape(pitch_resolution, -1), + method=method, ) # There is only one bounce integral per pitch in this example. bounce_drift = np.squeeze(_filter_not_nan(bounce_drift)) - assert bouce_drift_analytic.shape == bounce_drift.shape + assert bounce_drift_analytic.shape == bounce_drift.shape - plt.plot(1 / pitch, bouce_drift_analytic, marker="o", label="analytic") + plt.plot(1 / pitch, bounce_drift_analytic, marker="o", label="analytic") plt.plot(1 / pitch, bounce_drift, marker="x", label="numerical") plt.xlabel(r"$1 / \lambda$") plt.ylabel("Bounce averaged drift") - plt.title(f"Quadrature resolution = {resolution}. Delta shift = {delta_shift}.") plt.legend() plt.tight_layout() plt.show() - np.testing.assert_allclose(bounce_drift, bouce_drift_analytic, atol=2e-2, rtol=1e-2) + msg = ( + "Maybe tune these parameters?" + f"Quadrature resolution is {resolution}.\n" + f"Delta shift is {delta_shift}.\n" + f"Spline method for integrand quantities is {method}.\n" + f"Spline method for |B| is monotonic? (as opposed to Hermite): {monotonic}." + ) + np.testing.assert_allclose( + bounce_drift, bounce_drift_analytic, atol=2e-2, rtol=1e-2, err_msg=msg + ) From 26a97276af9b3844741bfa6cbba76e2aaae27288 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 21:32:00 -0400 Subject: [PATCH 118/241] Move function definition to stop circular import --- desc/compute/bounce_integral.py | 54 +++++++++++++++++++++++++++- desc/equilibrium/coords.py | 62 +-------------------------------- tests/test_bounce_integral.py | 2 +- 3 files changed, 55 insertions(+), 63 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 1147b2209a..be0b9fee93 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -7,7 +7,7 @@ from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take from desc.compute.utils import safediv -from desc.equilibrium.coords import desc_grid_from_field_line_coords +from desc.grid import Grid, meshgrid_inverse_idx, meshgrid_unique_idx from desc.utils import errorif @@ -1328,3 +1328,55 @@ def bounce_integrate(integrand, f, pitch, method="akima"): return result return bounce_integrate, items + + +def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): + """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. + + Create a meshgrid from the given field line coordinates, + and return the equivalent DESC coordinate grid. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which to perform coordinate mapping. + rho : ndarray + Unique flux surface label coordinates. + alpha : ndarray + Unique field line label coordinates over a constant rho surface. + zeta : ndarray + Unique field line-following ζ coordinates. + + Returns + ------- + grid_desc : Grid + DESC coordinate grid for the given field line coordinates. + grid_fl : Grid + Clebsch-Type field-line coordinate grid. + + """ + r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) + coords_fl = jnp.column_stack([r, a, z_fl]) + _unique_rho_idx = meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0] + _inverse_rho_idx = meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0] + grid_fl = Grid( + nodes=coords_fl, + sort=False, + jitable=True, + _unique_rho_idx=_unique_rho_idx, + _inverse_rho_idx=_inverse_rho_idx, + ) + coords_desc = eq.map_coordinates( + coords_fl, + inbasis=("rho", "alpha", "zeta"), + outbasis=("rho", "theta", "zeta"), + period=(jnp.inf, 2 * jnp.pi, jnp.inf), + ) + grid_desc = Grid( + nodes=coords_desc, + sort=False, + jitable=True, + _unique_rho_idx=_unique_rho_idx, + _inverse_rho_idx=_inverse_rho_idx, + ) + return grid_desc, grid_fl diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 5aacb5bf57..695fe7d31f 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -9,14 +9,7 @@ from desc.backend import fori_loop, jit, jnp, put, root, root_scalar, vmap from desc.compute import compute as compute_fun from desc.compute import data_index, get_profiles, get_transforms -from desc.grid import ( - ConcentricGrid, - Grid, - LinearGrid, - QuadratureGrid, - meshgrid_inverse_idx, - meshgrid_unique_idx, -) +from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid from desc.transform import Transform from desc.utils import setdefault @@ -309,59 +302,6 @@ def fixup(x, *args): return out -def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): - """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. - - Create a meshgrid from the given field line coordinates, - and return the equivalent DESC coordinate grid. - - Parameters - ---------- - eq : Equilibrium - Equilibrium on which to perform coordinate mapping. - rho : ndarray - Unique flux surface label coordinates. - alpha : ndarray - Unique field line label coordinates over a constant rho surface. - zeta : ndarray - Unique field line-following ζ coordinates. - - Returns - ------- - grid_desc : Grid - DESC coordinate grid for the given field line coordinates. - grid_fl : Grid - Clebsch-Type field-line coordinate grid. - - """ - r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) - coords_fl = jnp.column_stack([r, a, z_fl]) - _unique_rho_idx = meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0] - _inverse_rho_idx = meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0] - grid_fl = Grid( - nodes=coords_fl, - sort=False, - jitable=True, - _unique_rho_idx=_unique_rho_idx, - _inverse_rho_idx=_inverse_rho_idx, - ) - coords_desc = map_coordinates( - eq, - coords_fl, - inbasis=("rho", "alpha", "zeta"), - outbasis=("rho", "theta", "zeta"), - period=(np.inf, 2 * np.pi, np.inf), - ) - grid_desc = Grid( - nodes=coords_desc, - sort=False, - jitable=True, - _unique_rho_idx=_unique_rho_idx, - _inverse_rho_idx=_inverse_rho_idx, - ) - return grid_desc, grid_fl - - def is_nested(eq, grid=None, R_lmn=None, Z_lmn=None, L_lmn=None, msg=None): """Check that an equilibrium has properly nested flux surfaces in a plane. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 08ae0e5fff..caf76c3ced 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -24,6 +24,7 @@ bounce_integral, bounce_points, composite_linspace, + desc_grid_from_field_line_coords, grad_affine_bijection_reverse, grad_automorphism_arcsin, grad_automorphism_sin, @@ -35,7 +36,6 @@ from desc.compute.utils import dot, safediv from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import desc_grid_from_field_line_coords from desc.examples import get from desc.geometry import FourierRZToroidalSurface from desc.objectives import ( From 599eff6f620e20dff03c6f3d85ac27a0e7f2add0 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Apr 2024 22:32:22 -0400 Subject: [PATCH 119/241] Rename some variables to avoid confusion --- desc/compute/bounce_integral.py | 48 +++++++++++++++------------------ 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index be0b9fee93..8bac593344 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -270,35 +270,33 @@ def _poly_val(x, c): return val -def composite_linspace(knots, resolution, is_sorted=False): - """Returns linearly spaced points between ``knots``. +def composite_linspace(breaks, resolution, is_sorted=False): + """Returns linearly spaced points between breakpoints. Parameters ---------- - knots : Array + breaks : Array First axis has values to return linearly spaced values between. The remaining axes are batch axes. resolution : int - Number of points between each knot. + Number of points between each break. is_sorted : bool Whether the knots are already sorted along the first axis. Returns ------- - result : Array, shape((knots.shape[0] - 1) * resolution + 1, *knots.shape[1:]) - Sorted linearly spaced points between ``knots``. + pts : Array, shape((breaks.shape[0] - 1) * resolution + 1, *breaks.shape[1:]) + Sorted linearly spaced points between ``breaks``. """ - knots = jnp.atleast_1d(knots) - P = knots.shape[0] - S = knots.shape[1:] + breaks = jnp.atleast_1d(breaks) if not is_sorted: - knots = jnp.sort(knots, axis=0) - result = jnp.linspace(knots[:-1, ...], knots[1:, ...], resolution, endpoint=False) - result = jnp.moveaxis(result, source=0, destination=1).reshape(-1, *S) - result = jnp.append(result, knots[jnp.newaxis, -1, ...], axis=0) - assert result.shape == ((P - 1) * resolution + 1, *S) - return result + breaks = jnp.sort(breaks, axis=0) + pts = jnp.linspace(breaks[:-1, ...], breaks[1:, ...], resolution, endpoint=False) + pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *breaks.shape[1:]) + pts = jnp.append(pts, breaks[jnp.newaxis, -1, ...], axis=0) + assert pts.shape == ((breaks.shape[0] - 1) * resolution + 1, *breaks.shape[1:]) + return pts def _check_shape(knots, B_c, B_z_ra_c, pitch=None): @@ -411,12 +409,12 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): # Can detect at most degree of |B|_z_ra spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) - B_extrema_z_ra = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) + B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) # Floating point error impedes consistent detection of bounce points riding # extrema. Shift pitch values slightly to resolve this issue. # Higher priority to shift down maxima than shift up minima, so identify near # equality with zero as maxima. - is_maxima = B_extrema_z_ra <= 0 + is_maxima = B_zz_ra_extrema <= 0 B_extrema = jnp.where( is_maxima, (1 - relative_shift) * B_extrema, @@ -572,25 +570,23 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): for s in jnp.nonzero(err_1 | err_2)[0]: B = PPoly(B_c[:, s], knots) + B_mid = B((bp1[:, s] + bp2[:, s]) / 2) for p in range(P): - B_mid = B((bp1[p, s] + bp2[p, s]) / 2) err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) - err_3_ps = jnp.any(B_mid > 1 / pitch[p, s] + eps) + err_3_ps = jnp.any(B_mid[p] > 1 / pitch[p, s] + eps) if err_1_ps or err_2_ps or err_3_ps: - print(f"Error at index p={p}, s={s} out of {P},{S}.") - bp1_ps, bp2_ps, B_mid = map( - _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) + bp1_ps, bp2_ps, B_mid_ps = map( + _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid[p]) ) - print("bp1: ", bp1_ps) - print("bp2: ", bp2_ps) - print("B - 1/pitch:", B(bp1_ps) - 1 / pitch[p, s]) plot_field_line_with_ripple( B, pitch[p, s], bp1_ps, bp2_ps, id=f"{p},{s}" ) + print("bp1:", bp1_ps) + print("bp2:", bp2_ps) assert not err_1_ps, msg_1 assert not err_2_ps, msg_2 - msg_3 = f"B midpoint = {B_mid} > {1 / pitch[p, s] + eps} = 1/pitch." + msg_3 = f"B midpoint = {B_mid_ps} > {1 / pitch[p, s] + eps} = 1/pitch." assert not err_3_ps, msg_3 if plot: for s in range(S): From 43cad500488d2ca3642865e0bf5a0dfec41cb3d0 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Apr 2024 11:22:51 -0400 Subject: [PATCH 120/241] Remove override_grid from eq.compute in bounce_integral. Doesn't seem to have an effect there. Also document some code better and refactor. --- desc/compute/bounce_integral.py | 114 +++++++++++++++++--------------- desc/compute/utils.py | 26 ++++---- desc/grid.py | 2 + tests/test_bounce_integral.py | 47 ++++++------- 4 files changed, 95 insertions(+), 94 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 8bac593344..a63020463f 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -281,7 +281,7 @@ def composite_linspace(breaks, resolution, is_sorted=False): resolution : int Number of points between each break. is_sorted : bool - Whether the knots are already sorted along the first axis. + Whether the breaks are already sorted along the first axis. Returns ------- @@ -406,7 +406,6 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): extrema = _poly_root( c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), distinct=True ) - # Can detect at most degree of |B|_z_ra spline extrema between each knot. assert extrema.shape == (S, N, degree - 1) B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) @@ -415,13 +414,12 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): # Higher priority to shift down maxima than shift up minima, so identify near # equality with zero as maxima. is_maxima = B_zz_ra_extrema <= 0 + # Reshape so that last axis enumerates extrema along a field line. B_extrema = jnp.where( is_maxima, (1 - relative_shift) * B_extrema, (1 + relative_shift) * B_extrema, ).reshape(S, -1) - # Reshape so that last axis enumerates extrema along a field line. - # Pad all the nan at the end rather than interspersed to be consistent. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T assert pitch.shape == (N * (degree - 1), S) @@ -564,33 +562,27 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): P, S = bp1.shape[:-1] msg_1 = "Bounce points have an inversion." - err_1 = jnp.any(bp1 > bp2, axis=(0, -1)) + err_1 = jnp.any(bp1 > bp2, axis=-1) msg_2 = "Discontinuity detected." - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=(0, -1)) + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) - for s in jnp.nonzero(err_1 | err_2)[0]: + for s in range(S): B = PPoly(B_c[:, s], knots) - B_mid = B((bp1[:, s] + bp2[:, s]) / 2) for p in range(P): - err_1_ps = jnp.any(bp1[p, s] > bp2[p, s]) - err_2_ps = jnp.any(bp1[p, s, 1:] < bp2[p, s, :-1]) - err_3_ps = jnp.any(B_mid[p] > 1 / pitch[p, s] + eps) - if err_1_ps or err_2_ps or err_3_ps: - bp1_ps, bp2_ps, B_mid_ps = map( - _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid[p]) - ) - plot_field_line_with_ripple( - B, pitch[p, s], bp1_ps, bp2_ps, id=f"{p},{s}" + B_mid = B((bp1[p, s] + bp2[p, s]) / 2) + err_3 = jnp.any(B_mid > 1 / pitch[p, s] + eps) + if err_1[p, s] or err_2[p, s] or err_3: + bp1_p, bp2_p, B_mid = map( + _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) ) - print("bp1:", bp1_ps) - print("bp2:", bp2_ps) - assert not err_1_ps, msg_1 - assert not err_2_ps, msg_2 - msg_3 = f"B midpoint = {B_mid_ps} > {1 / pitch[p, s] + eps} = 1/pitch." - assert not err_3_ps, msg_3 - if plot: - for s in range(S): - B = PPoly(B_c[:, s], knots) + plot_field_line_with_ripple(B, pitch[p, s], bp1_p, bp2_p, id=f"{p},{s}") + print("bp1:", bp1_p) + print("bp2:", bp2_p) + assert not err_1[p, s], msg_1 + assert not err_2[p, s], msg_2 + msg_3 = f"B midpoint = {B_mid} > {1 / pitch[p, s] + eps} = 1/pitch." + assert not err_3, msg_3 + if plot: plot_field_line_with_ripple(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s)) @@ -857,12 +849,30 @@ def _suppress_bad_nan(V): """ # This simple logic is encapsulated here to make explicit the bug it resolves. + # Don't suppress inf as that indicates catastrophic floating point error. V = jnp.nan_to_num(V, posinf=jnp.inf, neginf=-jnp.inf) return V def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): - """Check that no integrals were lost and the hairy ball theorem is upheld.""" + """Check for floating point errors. + + Parameters + ---------- + Z : Array + Quadrature points at field line-following ζ coordinates. + B_sup_z : Array, shape(Z.shape) + Contravariant field-line following toroidal component of magnetic field. + Interpolated to Z. + B : Array, shape(Z.shape) + Norm of magnetic field. Interpolated to Z. + B_z_ra : Array, shape(Z.shape) + Norm of magnetic field derivative with respect to field-line following label. + Interpolated to Z. + inner_product : Array + Output of ``_interpolatory_quadrature``. + + """ is_not_quad_point = jnp.isnan(Z) # We want quantities to evaluate as finite only at quadrature points # for the integrals with boundaries at valid bounce points. @@ -873,7 +883,7 @@ def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): for ff in f: assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg - msg = "|B| has vanished." + msg = "|B| has vanished, violating the hairy ball theorem." assert not jnp.isclose(B, 0).any(), msg assert not jnp.isclose(B_sup_z, 0).any(), msg @@ -1215,37 +1225,20 @@ def integrand_den(B, pitch, Z): print(jnp.nansum(average, axis=-1)) """ - monotonic = kwargs.pop("monotonic", False) - if quad == tanh_sinh_quad: - kwargs.setdefault("resolution", 19) - x, w = quad(**kwargs) - # The gradient of the transformation is the weight function w(x) of the integral. - auto, grad_auto = automorphism - w = w * grad_auto(x) - # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). - # Apply reverse automorphism to quadrature points. - x = auto(x) - if alpha is None: alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) knots = jnp.atleast_1d(knots) - # number of field lines or splines - S = rho.size * alpha.size - # Compute |B| and group data along field lines. grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - data = eq.compute( - ["B^zeta", "|B|", "|B|_z|r,a"], - grid=grid_desc, - # TODO: look into override grid in different PR - override_grid=False, - ) - B_sup_z = data["B^zeta"].reshape(S, knots.size) * L_ref / B_ref - B = data["|B|"].reshape(S, knots.size) / B_ref - B_z_ra = data["|B|_z|r,a"].reshape(S, knots.size) / B_ref + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) + B_sup_z = data["B^zeta"].reshape(-1, knots.size) * L_ref / B_ref + B = data["|B|"].reshape(-1, knots.size) / B_ref + B_z_ra = data["|B|_z|r,a"].reshape(-1, knots.size) / B_ref + # Compute spline of |B| along field lines. + monotonic = kwargs.pop("monotonic", False) B_c = ( PchipInterpolator(knots, B, axis=-1, check=check).c if monotonic @@ -1253,8 +1246,9 @@ def integrand_den(B, pitch, Z): ) B_c = jnp.moveaxis(B_c, source=1, destination=-1) B_z_ra_c = _poly_der(B_c) - assert B_c.shape == (4, S, knots.size - 1) - assert B_z_ra_c.shape == (3, S, knots.size - 1) + degree = 3 + assert B_c.shape == (degree + 1, rho.size * alpha.size, knots.size - 1) + assert B_z_ra_c.shape == (degree, rho.size * alpha.size, knots.size - 1) items = { "grid_desc": grid_desc, "grid_fl": grid_fl, @@ -1263,6 +1257,16 @@ def integrand_den(B, pitch, Z): "B_z_ra.c": B_z_ra_c, } + if quad == tanh_sinh_quad: + kwargs.setdefault("resolution", 19) + x, w = quad(**kwargs) + # The gradient of the transformation is the weight function w(x) of the integral. + auto, grad_auto = automorphism + w = w * grad_auto(x) + # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). + # Apply reverse automorphism to quadrature points. + x = auto(x) + def bounce_integrate(integrand, f, pitch, method="akima"): """Bounce integrate ∫ f(ℓ) dℓ. @@ -1298,7 +1302,7 @@ def bounce_integrate(integrand, f, pitch, method="akima"): Returns ------- - result : Array, shape(P, S, (knots.size - 1) * 3) + result : Array, shape(P, S, (knots.size - 1) * degree) First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. @@ -1320,7 +1324,7 @@ def bounce_integrate(integrand, f, pitch, method="akima"): method_B="monotonic" if monotonic else "cubic", check=check, ) - assert result.shape[-1] == (knots.size - 1) * 3 + assert result.shape[-1] == (knots.size - 1) * degree return result return bounce_integrate, items diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 4ed75e4a04..05572029db 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -10,6 +10,7 @@ from desc.backend import cond, fori_loop, jnp, put from desc.grid import ConcentricGrid, Grid, LinearGrid +from ..utils import errorif, warnif from .data_index import allowed_kwargs, data_index # map from profile name to equilibrium parameter name @@ -862,14 +863,11 @@ def surface_integrals_map(grid, surface_label="rho", expand_out=True, tol=1e-14) surface in the grid with code: ``function(q)``. """ - if surface_label == "theta" and isinstance(grid, ConcentricGrid): - warnings.warn( - colored( - "Integrals over constant theta surfaces are poorly defined for " - + "ConcentricGrid.", - "yellow", - ) - ) + msg = colored( + "Integrals over constant theta surfaces are poorly defined for ConcentricGrid.", + "yellow", + ) + warnif(surface_label == "theta" and isinstance(grid, ConcentricGrid), msg=msg) unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx = _get_grid_surface( grid, surface_label ) @@ -1037,8 +1035,8 @@ def surface_averages_map(grid, surface_label="rho", expand_out=True, tol=1e-14): expand_out = ( expand_out # don't try to expand already expanded output - & hasattr(grid, f"num_{surface_label}") - & hasattr(grid, f"_inverse_{surface_label}_idx") + and hasattr(grid, f"num_{surface_label}") + and hasattr(grid, f"_inverse_{surface_label}_idx") ) integrate = surface_integrals_map(grid, surface_label, expand_out=False, tol=tol) @@ -1167,7 +1165,7 @@ def surface_integrals_transform(grid, surface_label="rho"): # transform into the computational domain, so the second dimension that # discretizes f over the codomain will typically have size grid.num_nodes # to broadcast with quantities in data_index. - assert hasattr(grid, f"num_{surface_label}") & hasattr( + assert hasattr(grid, f"num_{surface_label}") and hasattr( grid, f"_inverse_{surface_label}_idx" ) return surface_integrals_map(grid, surface_label, expand_out=False) @@ -1269,7 +1267,7 @@ def surface_variance( if has_idx: mean = grid.expand(mean, surface_label) variance = (correction * integrate((weights * ((q - mean) ** 2).T).T).T / v1).T - if has_idx & expand_out: + if has_idx and expand_out: return grid.expand(variance, surface_label) else: return variance @@ -1319,8 +1317,8 @@ def surface_min(grid, x, surface_label="rho"): """ unique_size, inverse_idx, _, _, has_idx = _get_grid_surface(grid, surface_label) - if not has_idx: - raise NotImplementedError("Grid should have unique and inverse idx.") + msg = "Grid should have unique and inverse idx." + errorif(not has_idx, NotImplementedError, msg=msg) inverse_idx = jnp.asarray(inverse_idx) x = jnp.asarray(x) mins = jnp.full(unique_size, jnp.inf) diff --git a/desc/grid.py b/desc/grid.py index 3129f258a7..8f6ef2fc13 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -16,6 +16,8 @@ "ConcentricGrid", "find_least_rational_surfaces", "find_most_rational_surfaces", + "meshgrid_unique_idx", + "meshgrid_inverse_idx", ] diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index caf76c3ced..6c6ff38843 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -242,10 +242,8 @@ def test_composite_linspace(): B_max_tz = np.array([1, 3]) pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) b_knot = 1 / pitch_knot - print() - print(b_knot) b = composite_linspace(b_knot, resolution=3) - print() + print(b_knot) print(b) np.testing.assert_allclose(b, np.sort(b, axis=0), atol=0, rtol=0) for i in range(pitch_knot.shape[0]): @@ -263,7 +261,9 @@ def test_bp1_first(): knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch = 2 - bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) + bp1, bp2 = bounce_points( + pitch, knots, B.c, B.derivative().c, check=True, plot=False + ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[0::2]) @@ -276,11 +276,7 @@ def test_bp2_first(): B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch = 2 bp1, bp2 = bounce_points( - pitch, - k, - B.c, - B.derivative().c, - check=True, + pitch, k, B.c, B.derivative().c, check=True, plot=False ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) intersect = B.solve(1 / pitch, extrapolate=False) @@ -296,7 +292,7 @@ def test_bp1_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -316,13 +312,13 @@ def test_bp2_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) - def test_extrema_first_and_before_bp1(): + def test_extrema_first_and_before_bp1(plot=False): start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -336,7 +332,8 @@ def test_extrema_first_and_before_bp1(): bp1, bp2 = bounce_points( pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False ) - plot_field_line_with_ripple(B, pitch, bp1, bp2, start=k[2]) + if plot: + plot_field_line_with_ripple(B, pitch, bp1, bp2, start=k[2]) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -357,8 +354,7 @@ def test_extrema_first_and_before_bp2(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] - # This note may not make sense to the reader now, but if a regression - # fails this test, it will save many hours of debugging. + # If a regression fails this test, this note will save many hours of debugging. # If the filter in place to return only the distinct roots is too coarse, # in particular atol < 1e-15, then this test will error. In the resulting # plot that the error will produce the red bounce point on the first hump @@ -370,7 +366,7 @@ def test_extrema_first_and_before_bp2(): # value theorem holds for the continuous spline, so when fed these sequence # of roots, the correct action is to ignore the first green root since # otherwise the interior of the bounce points would be hills and not valleys. - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -741,12 +737,12 @@ def test_bounce_averaged_drifts(): ) np.testing.assert_allclose(gds21, gds21_analytic, atol=1.7e-2, rtol=5e-4) - fudge_factor2 = 0.19 - gbdrift_analytic = fudge_factor2 * ( + fudge_factor_gbdrift = 0.19 + gbdrift_analytic = fudge_factor_gbdrift * ( -s_hat + (np.cos(theta_PEST) - gds21_analytic / s_hat * np.sin(theta_PEST)) ) - fudge_factor3 = 0.07 - cvdrift_analytic = gbdrift_analytic + fudge_factor3 * alpha_MHD / bmag**2 + fudge_factor_cvdrift = 0.07 + cvdrift_analytic = gbdrift_analytic + fudge_factor_cvdrift * alpha_MHD / bmag**2 np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1.2e-2, rtol=5e-3) np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=1.8e-2, rtol=5e-3) @@ -779,9 +775,9 @@ def test_bounce_averaged_drifts(): I_7 = 4 / k * (2 * k2 * I_0 + (1 - 2 * k2) * I_1) bounce_drift_analytic = ( - fudge_factor3 * dPdrho / B0**2 * I_1 + fudge_factor_cvdrift * dPdrho / B0**2 * I_1 - 0.5 - * fudge_factor2 + * fudge_factor_gbdrift * ( s_hat * (I_0 + I_1 + I_2 + I_3) + alpha_MHD / B0**4 * (I_4 + I_5) @@ -804,7 +800,7 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): ) # There is only one bounce integral per pitch in this example. bounce_drift = np.squeeze(_filter_not_nan(bounce_drift)) - assert bounce_drift_analytic.shape == bounce_drift.shape + assert bounce_drift.shape == bounce_drift_analytic.shape plt.plot(1 / pitch, bounce_drift_analytic, marker="o", label="analytic") plt.plot(1 / pitch, bounce_drift, marker="x", label="numerical") @@ -814,11 +810,12 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): plt.tight_layout() plt.show() msg = ( - "Maybe tune these parameters?" + "Maybe tune these parameters?\n" f"Quadrature resolution is {resolution}.\n" f"Delta shift is {delta_shift}.\n" f"Spline method for integrand quantities is {method}.\n" - f"Spline method for |B| is monotonic? (as opposed to Hermite): {monotonic}." + f"Spline method for |B| is monotonic? (as opposed to Hermite): {monotonic}.\n" + f"Fudge factors: {fudge_factor_gbdrift}, {fudge_factor_cvdrift}.\n" ) np.testing.assert_allclose( bounce_drift, bounce_drift_analytic, atol=2e-2, rtol=1e-2, err_msg=msg From ab9ba6bd5a01037069065fe0e8d7d2ce260f1c0a Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Apr 2024 11:36:14 -0400 Subject: [PATCH 121/241] Reorder arguments into guard function for consistency --- desc/compute/bounce_integral.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index a63020463f..06dc1dc33f 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -854,21 +854,23 @@ def _suppress_bad_nan(V): return V -def _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product): +def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): """Check for floating point errors. Parameters ---------- Z : Array Quadrature points at field line-following ζ coordinates. + f : iterable of Array, shape(Z.shape) + Arguments to the integrand interpolated to Z. B_sup_z : Array, shape(Z.shape) - Contravariant field-line following toroidal component of magnetic field. - Interpolated to Z. + Contravariant field-line following toroidal component of magnetic field, + interpolated to Z. B : Array, shape(Z.shape) - Norm of magnetic field. Interpolated to Z. + Norm of magnetic field, interpolated to Z. B_z_ra : Array, shape(Z.shape) - Norm of magnetic field derivative with respect to field-line following label. - Interpolated to Z. + Norm of magnetic field derivative with respect to field-line following label, + interpolated to Z. inner_product : Array Output of ``_interpolatory_quadrature``. @@ -993,14 +995,13 @@ def _interpolatory_quadrature( B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) # Specify derivative at knots for ≈ cubic hermite interpolation. B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) - pitch = pitch[..., jnp.newaxis, jnp.newaxis] inner_product = jnp.dot( _suppress_bad_nan(integrand(*f, B=B, pitch=pitch, Z=Z)) / B_sup_z, w, ) if check: - _assert_finite_and_hairy(Z, B_sup_z, B, f, B_z_ra, inner_product) + _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product) return inner_product From 22fee1293733a5022daf47fe80a64914af66eed8 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Apr 2024 13:07:48 -0400 Subject: [PATCH 122/241] Use same resolution in quadrature in bounce average drift test and also refactor. No change in test results. --- desc/compute/bounce_integral.py | 19 +---- tests/test_bounce_integral.py | 132 ++++++++++++++------------------ 2 files changed, 62 insertions(+), 89 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 06dc1dc33f..1d68fbffe4 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -473,10 +473,10 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - # The polynomials' intersection points with 1 / λ is given by ``intersect``. + # The polynomials' intersection points with 1 / λ is given by intersect. # In order to be JIT compilable, this must have a shape that accommodates the # case where each polynomial intersects 1 / λ degree times. - # nan values in ``intersect`` denote a polynomial has less than degree intersects. + # nan values in intersect denote a polynomial has less than degree intersects. intersect = _poly_root( c=B_c, # New axis to use same pitches across polynomials of a particular spline. @@ -491,8 +491,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): # Reshape so that last axis enumerates intersects of a pitch along a field line. B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Transform out of local power basis expansion. - intersect = intersect + knots[:-1, jnp.newaxis] - intersect = intersect.reshape(P, S, -1) + intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = ~jnp.isnan(intersect) @@ -952,14 +951,7 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): excluded={"method", "derivative", "extrap", "period"}, ) def _interp1d_vec_with_df( - xq, - x, - f, - fx, - method="cubic", - derivative=0, - extrap=False, - period=None, + xq, x, f, fx, method="cubic", derivative=0, extrap=False, period=None ): return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) @@ -1048,7 +1040,6 @@ def _bounce_quadrature( errorif(x.ndim != 1 or x.shape != w.shape) errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) pitch = jnp.atleast_2d(pitch) - S = B.shape[0] if not isinstance(f, (list, tuple)): f = [f] @@ -1231,14 +1222,12 @@ def integrand_den(B, pitch, Z): rho = jnp.atleast_1d(rho) alpha = jnp.atleast_1d(alpha) knots = jnp.atleast_1d(knots) - # Compute |B| and group data along field lines. grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) B_sup_z = data["B^zeta"].reshape(-1, knots.size) * L_ref / B_ref B = data["|B|"].reshape(-1, knots.size) / B_ref B_z_ra = data["|B|_z|r,a"].reshape(-1, knots.size) / B_ref - # Compute spline of |B| along field lines. monotonic = kwargs.pop("monotonic", False) B_c = ( PchipInterpolator(knots, B, axis=-1, check=check).c diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 6c6ff38843..e112c5dd65 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -524,72 +524,6 @@ def integrand_den(B, pitch, Z): print(np.nansum(average, axis=-1)) -# @pytest.mark.unit -def test_elliptic_integral_limit(): - """Test bounce integral matches elliptic integrals. - - In the limit of a low beta, large aspect ratio tokamak the bounce integral - should converge to the elliptic integrals of the first kind. - todo: would be nice to understand physics for why these are supposed - to be proportional to bounce integral. Is this discussed in any book? - Also, looking at - https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellipk.html - Are we saying that in this limit, we expect that |B| ~ sin(t)^2, with m as the - pitch angle? I assume that we want to add g_zz to the integrand in the - definition of the function in the scipy documentation above, - and after a change of variables the bounce points will be the endpoints of - the integration. - So this test will test whether the quadrature is accurate - (and not whether the bounce points were accurate). - - """ - assert False, "Test not finished yet." - L, M, N, NFP, sym = 6, 6, 6, 1, True - surface = FourierRZToroidalSurface( - R_lmn=[1.0, 0.1], - Z_lmn=[0.0, -0.1], - modes_R=np.array([[0, 0], [1, 0]]), - modes_Z=np.array([[0, 0], [-1, 0]]), - sym=sym, - NFP=NFP, - ) - eq = Equilibrium( - L=L, - M=M, - N=N, - NFP=NFP, - surface=surface, - pressure=PowerSeriesProfile([1e2, 0, -1e2]), - iota=PowerSeriesProfile([1, 0, 2]), - Psi=1.0, - ) - eq = solve_continuation_automatic(eq)[-1] - - def beta(grid, data): - return data["_vol"] - - low_beta = 0.01 - # todo: error that objective function has no linear attribute? - objective = ObjectiveFunction( - (ObjectiveFromUser(fun=beta, eq=eq, target=low_beta),) - ) - - constraints = (*get_fixed_boundary_constraints(eq), get_equilibrium_objective(eq)) - opt = Optimizer("proximal-lsq-exact") - eq, result = eq.optimize( - objective=objective, constraints=constraints, optimizer=opt - ) - print(result) - - rho = np.array([0.5]) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) - knots = np.linspace(0, 6 * np.pi, 20) - # TODO now compare result to elliptic integral - bounce_integrate, items = bounce_integral(eq, rho, alpha, knots, check=True) - pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) - bp1, bp2 = bounce_points(pitch, knots, items["B.c"], items["B_z_ra.c"], check=True) - - @pytest.mark.unit def test_integral_0(k=0.9, resolution=10): """4 / k * ellipkinc(np.arcsin(k), 1 / k**2).""" @@ -640,8 +574,7 @@ def test_bounce_averaged_drifts(): Calculate bounce-averaged drifts using the bounce-average routine and compare it with the analytical expression - # Note 1: This test can be merged with the elliptic integral test as - we do calculate elliptic integrals here + # Note 1: This test can be merged with the low beta test # Note 2: Remove tests/test_equilibrium :: test_shifted_circle_geometry # once all the epsilons and Gammas have been implemented and tested """ @@ -663,14 +596,11 @@ def test_bounce_averaged_drifts(): zeta = np.linspace(-np.pi / iota, np.pi / iota, N) alpha = 0 theta_PEST = alpha + iota * zeta - coords1 = np.zeros((N, 3)) - coords1[:, 0] = np.broadcast_to(rho, N) - coords1[:, 1] = theta_PEST - coords1[:, 2] = zeta # TODO: Request: The bounce integral operator should be able to take a grid. # Response: Currently the API is such that the method does all the # above preprocessing for you. Let's test it for correctness # first then do this later. + resolution = 50 # Whether to use monotonic or Hermite splines to interpolate |B|. monotonic = False @@ -756,8 +686,8 @@ def test_bounce_averaged_drifts(): k = np.sqrt(k2) # Here are the notes that explain these integrals. # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. - I_0 = test_integral_0(k) - I_1 = test_integral_1(k) + I_0 = test_integral_0(k, resolution) + I_1 = test_integral_1(k, resolution) I_2 = 16 * k * I_0 I_3 = 4 / 9 * (8 * k * (-1 + 2 * k2) * I_1 - 4 * k * (-1 + k2) * I_0) I_4 = ( @@ -820,3 +750,57 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): np.testing.assert_allclose( bounce_drift, bounce_drift_analytic, atol=2e-2, rtol=1e-2, err_msg=msg ) + + +@pytest.mark.regression +def test_bounce_averaged_drifts_low_beta(): + """Test bounce integrals in low beta limit.""" + assert False, "Test not finished yet." + L, M, N, NFP, sym = 6, 6, 6, 1, True + surface = FourierRZToroidalSurface( + R_lmn=[1.0, 0.1], + Z_lmn=[0.0, -0.1], + modes_R=np.array([[0, 0], [1, 0]]), + modes_Z=np.array([[0, 0], [-1, 0]]), + sym=sym, + NFP=NFP, + ) + eq = Equilibrium( + L=L, + M=M, + N=N, + NFP=NFP, + surface=surface, + pressure=PowerSeriesProfile([1e2, 0, -1e2]), + iota=PowerSeriesProfile([1, 0, 2]), + Psi=1.0, + ) + eq = solve_continuation_automatic(eq)[-1] + + def beta(grid, data): + return data["_vol"] + + low_beta = 0.01 + # todo: error that objective function has no linear attribute? + objective = ObjectiveFunction( + (ObjectiveFromUser(fun=beta, eq=eq, target=low_beta),) + ) + + constraints = (*get_fixed_boundary_constraints(eq), get_equilibrium_objective(eq)) + opt = Optimizer("proximal-lsq-exact") + eq, result = eq.optimize( + objective=objective, constraints=constraints, optimizer=opt + ) + print(result) + + rho = np.array([0.5]) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) + knots = np.linspace(0, 6 * np.pi, 20) + # TODO now compare result to elliptic integral + bounce_integrate, items = bounce_integral( + eq, rho, alpha, knots, check=True, plot=False + ) + pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) + bp1, bp2 = bounce_points( + pitch, knots, items["B.c"], items["B_z_ra.c"], check=True, plot=False + ) From cb9c55cbaf4323a3021e2becf00f67d54a24d25e Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Apr 2024 23:50:03 -0400 Subject: [PATCH 123/241] Fix floating point error in automorphism sin --- desc/compute/bounce_integral.py | 33 +++++++++++++++++---------------- tests/test_bounce_integral.py | 16 ++++++++++++---- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 1d68fbffe4..22e52cf801 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -557,7 +557,7 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): Whether to plot even if error was not detected. """ - eps = 10 * jnp.finfo(jnp.array(1.0)).eps + eps = 10 * jnp.finfo(jnp.array(1.0).dtype).eps P, S = bp1.shape[:-1] msg_1 = "Bounce points have an inversion." @@ -706,10 +706,6 @@ def grad_affine_bijection_reverse(a, b): def automorphism_arcsin(x): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The arcsin automorphism is an expansion, so it pushes the evaluation points - of the bounce integrand toward the singular region, which may induce - floating point error. - The gradient of the arcsin automorphism introduces a singularity that augments the singularity in the bounce integral. Therefore, the quadrature scheme used to evaluate the integral must work well on singular integrals. @@ -717,10 +713,12 @@ def automorphism_arcsin(x): Parameters ---------- x : Array + Points to tranform. Returns ------- y : Array + Transformed points. """ y = 2 * jnp.arcsin(x) / jnp.pi @@ -736,13 +734,9 @@ def grad_automorphism_arcsin(x): grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ -def automorphism_sin(x): +def automorphism_sin(x, eps=5e-7): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The sin automorphism is a contraction, so it pulls the evaluation points - of the bounce integrand away from the singular region, inducing less - floating point error. - The derivative of the sin automorphism is Lipschitz. When this automorphism is used as the change of variable map for the bounce integral, the Lipschitzness prevents generation of new singularities. @@ -760,18 +754,26 @@ def automorphism_sin(x): Parameters ---------- x : Array + Points to transform. + eps : float + Buffer for floating point error. Returns ------- y : Array + Transformed points. """ + x = jnp.where(x > +eps, x - eps, x) + x = jnp.where(x < -eps, x + eps, x) y = jnp.sin(jnp.pi * x / 2) return y -def grad_automorphism_sin(x): +def grad_automorphism_sin(x, eps=5e-7): """Gradient of sin automorphism.""" + x = jnp.where(x > +eps, x - eps, x) + x = jnp.where(x < -eps, x + eps, x) dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 return dy_dx @@ -808,8 +810,8 @@ def tanh_sinh_quad(resolution, w=lambda x: 1, t_max=None): if t_max is None: # boundary of integral x_max = jnp.array(1.0) - # subtract machine epsilon with buffer for floating point error - x_max = x_max - 10 * jnp.finfo(x_max).eps + # buffer for floating point error + x_max = x_max - 10 * jnp.finfo(x_max.dtype).eps # inverse of tanh-sinh transformation t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) kh = jnp.linspace(-t_max, t_max, resolution) @@ -893,9 +895,8 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): goal = jnp.sum(1 - is_not_quad_point) // quad_resolution # Number of integrals that were actually computed. actual = jnp.isfinite(inner_product).sum() - err_msg = f"Lost {goal - actual} integrals. Likely due to floating point error." + err_msg = f"Lost {goal - actual} integrals from floating point error." assert goal == actual, err_msg - assert jnp.all(jnp.isfinite(inner_product) ^ is_not_quad_point[..., 0]), err_msg _repeated_docstring = """w : Array, shape(w.size, ) @@ -1074,7 +1075,7 @@ def bounce_integral( alpha=None, knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), quad=tanh_sinh_quad, - automorphism=(automorphism_arcsin, grad_automorphism_arcsin), + automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1, L_ref=1, check=False, diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e112c5dd65..f6512a25ac 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -395,8 +395,8 @@ def test_automorphism(): x_1 = affine_bijection_reverse(y, a, b) np.testing.assert_allclose(x_1, x) np.testing.assert_allclose(_affine_bijection_forward(x_1, a, b), y) - np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y) - np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y) + np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=1e-6) + np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=1e-6) np.testing.assert_allclose( grad_affine_bijection_reverse(a, b), @@ -405,14 +405,22 @@ def test_automorphism(): np.testing.assert_allclose( grad_automorphism_sin(y), 1 / grad_automorphism_arcsin(automorphism_sin(y)), - atol=1e-14, + atol=1e-6, ) np.testing.assert_allclose( 1 / grad_automorphism_arcsin(y), grad_automorphism_sin(automorphism_arcsin(y)), - atol=1e-14, + atol=2e-6, ) + # test that floating point error is acceptable + x, w = tanh_sinh_quad(19) + assert np.all(np.abs(x) < 1) + y = 1 / (1 - np.abs(automorphism_sin(x))) + assert np.isfinite(y).all() + y = 1 / (1 - np.abs(automorphism_arcsin(x))) + assert np.isfinite(y).all() + @pytest.mark.unit def test_bounce_quadrature(): From 9069695d960403b3655664f47c251970feb06d88 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Apr 2024 01:26:13 -0400 Subject: [PATCH 124/241] floating point error resistant quadratic root --- desc/compute/bounce_integral.py | 15 +++++++-------- tests/test_bounce_integral.py | 4 +++- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 22e52cf801..f1b4ae9d15 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -97,16 +97,15 @@ def _root_linear(a, b, distinct=False): def _root_quadratic(a, b, c, distinct=False): """Return r such that a r² + b r + c = 0.""" + # numerical.recipes/book.html, page 227 discriminant = b**2 - 4 * a * c C = complex_sqrt(discriminant) - - def root(xi): - return safediv(-b + xi * C, 2 * a) - + sgn = jnp.sign(jnp.real(jnp.conj(b) * C)) + q = -0.5 * (b + sgn * C) is_linear = jnp.isclose(a, 0) suppress_root = distinct & jnp.isclose(discriminant, 0) - r1 = jnp.where(is_linear, _root_linear(b, c), root(-1)) - r2 = jnp.where(is_linear | suppress_root, jnp.nan, root(1)) + r1 = jnp.where(is_linear, _root_linear(b, c), safediv(q, a)) + r2 = jnp.where(is_linear | suppress_root, jnp.nan, safediv(c, q)) return r1, r2 @@ -1071,11 +1070,11 @@ def _group_grid_data_by_field_line(g): def bounce_integral( eq, - rho=jnp.linspace(1e-7, 1, 5), + rho=jnp.linspace(1e-7, 1, 10), alpha=None, knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), quad=tanh_sinh_quad, - automorphism=(automorphism_sin, grad_automorphism_sin), + automorphism=(automorphism_arcsin, grad_automorphism_arcsin), B_ref=1, L_ref=1, check=False, diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index f6512a25ac..dddb89b53a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -232,7 +232,9 @@ def test_pitch_of_extrema(): rtol = 1e-7 pitch = pitch_of_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) eps = 100 * np.finfo(float).eps - np.testing.assert_allclose(_filter_not_nan(pitch), pitch_scipy, rtol=rtol + eps) + np.testing.assert_allclose( + np.sort(_filter_not_nan(pitch)), np.sort(pitch_scipy), rtol=rtol + eps + ) @pytest.mark.unit From 57e2d32ee34ccfe44dadcadf724834ed8a997d62 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Apr 2024 02:30:24 -0400 Subject: [PATCH 125/241] Use clip instead of shifting in arcsin automorph --- desc/compute/bounce_integral.py | 30 ++++++++++++------------------ tests/test_bounce_integral.py | 23 +++++++++++++---------- 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index f1b4ae9d15..887395d752 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -269,7 +269,7 @@ def _poly_val(x, c): return val -def composite_linspace(breaks, resolution, is_sorted=False): +def composite_linspace(breaks, resolution): """Returns linearly spaced points between breakpoints. Parameters @@ -277,20 +277,17 @@ def composite_linspace(breaks, resolution, is_sorted=False): breaks : Array First axis has values to return linearly spaced values between. The remaining axes are batch axes. + Assumes input is sorted. resolution : int Number of points between each break. - is_sorted : bool - Whether the breaks are already sorted along the first axis. Returns ------- pts : Array, shape((breaks.shape[0] - 1) * resolution + 1, *breaks.shape[1:]) - Sorted linearly spaced points between ``breaks``. + Linearly spaced points between ``breaks``. """ breaks = jnp.atleast_1d(breaks) - if not is_sorted: - breaks = jnp.sort(breaks, axis=0) pts = jnp.linspace(breaks[:-1, ...], breaks[1:, ...], resolution, endpoint=False) pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *breaks.shape[1:]) pts = jnp.append(pts, breaks[jnp.newaxis, -1, ...], axis=0) @@ -410,15 +407,14 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) # Floating point error impedes consistent detection of bounce points riding # extrema. Shift pitch values slightly to resolve this issue. - # Higher priority to shift down maxima than shift up minima, so identify near - # equality with zero as maxima. - is_maxima = B_zz_ra_extrema <= 0 - # Reshape so that last axis enumerates extrema along a field line. B_extrema = jnp.where( - is_maxima, + # Higher priority to shift down maxima than shift up minima, so identify + # near equality with zero as maxima. + B_zz_ra_extrema <= 0, (1 - relative_shift) * B_extrema, (1 + relative_shift) * B_extrema, ).reshape(S, -1) + # Reshape so that last axis enumerates extrema along a field line. B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T assert pitch.shape == (N * (degree - 1), S) @@ -733,7 +729,7 @@ def grad_automorphism_arcsin(x): grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ -def automorphism_sin(x, eps=5e-7): +def automorphism_sin(x, eps=None): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. The derivative of the sin automorphism is Lipschitz. @@ -763,16 +759,14 @@ def automorphism_sin(x, eps=5e-7): Transformed points. """ - x = jnp.where(x > +eps, x - eps, x) - x = jnp.where(x < -eps, x + eps, x) y = jnp.sin(jnp.pi * x / 2) - return y + if eps is None: + eps = 1e3 * jnp.finfo(jnp.array(1.0).dtype).eps + return jnp.clip(y, -1 + eps, 1 - eps) -def grad_automorphism_sin(x, eps=5e-7): +def grad_automorphism_sin(x): """Gradient of sin automorphism.""" - x = jnp.where(x > +eps, x - eps, x) - x = jnp.where(x < -eps, x + eps, x) dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 return dy_dx diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index dddb89b53a..9464f16f3a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -242,15 +242,16 @@ def test_composite_linspace(): """Test this utility function useful for Newton-Cotes integration over pitch.""" B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) - pitch_knot = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) - b_knot = 1 / pitch_knot - b = composite_linspace(b_knot, resolution=3) - print(b_knot) + pitch = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) + breaks = 1 / pitch + breaks = np.sort(breaks, axis=0) + b = composite_linspace(breaks, resolution=3) + print(breaks) print(b) np.testing.assert_allclose(b, np.sort(b, axis=0), atol=0, rtol=0) - for i in range(pitch_knot.shape[0]): - for j in range(pitch_knot.shape[1]): - assert only1(np.isclose(b_knot[i, j], b[:, j]).tolist()) + for i in range(pitch.shape[0]): + for j in range(pitch.shape[1]): + assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) @pytest.mark.unit @@ -397,8 +398,8 @@ def test_automorphism(): x_1 = affine_bijection_reverse(y, a, b) np.testing.assert_allclose(x_1, x) np.testing.assert_allclose(_affine_bijection_forward(x_1, a, b), y) - np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=1e-6) - np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=1e-6) + np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=5e-7) + np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=5e-7) np.testing.assert_allclose( grad_affine_bijection_reverse(a, b), @@ -407,7 +408,7 @@ def test_automorphism(): np.testing.assert_allclose( grad_automorphism_sin(y), 1 / grad_automorphism_arcsin(automorphism_sin(y)), - atol=1e-6, + atol=2e-6, ) np.testing.assert_allclose( 1 / grad_automorphism_arcsin(y), @@ -418,6 +419,8 @@ def test_automorphism(): # test that floating point error is acceptable x, w = tanh_sinh_quad(19) assert np.all(np.abs(x) < 1) + y = 1 / (1 - np.abs(x)) + assert np.isfinite(y).all() y = 1 / (1 - np.abs(automorphism_sin(x))) assert np.isfinite(y).all() y = 1 / (1 - np.abs(automorphism_arcsin(x))) From 49e87d5f5d47b71318b0b1e881e8059d021f1a25 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Apr 2024 07:40:15 -0400 Subject: [PATCH 126/241] API changes to be able to compute effective ripple in compute_funs --- desc/compute/bounce_integral.py | 182 ++++++++++++++------------- desc/grid.py | 135 ++++++++------------ tests/test_bounce_integral.py | 213 +++++++++++++++----------------- tests/test_grid.py | 23 ++-- 4 files changed, 256 insertions(+), 297 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 887395d752..69c4bb5702 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -7,7 +7,7 @@ from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take from desc.compute.utils import safediv -from desc.grid import Grid, meshgrid_inverse_idx, meshgrid_unique_idx +from desc.grid import Grid from desc.utils import errorif @@ -193,6 +193,7 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): if a_max is not None: a_max = a_max[..., jnp.newaxis] r = _filter_real(r, a_min, a_max) + if sort or distinct: r = jnp.sort(r, axis=-1) if distinct: @@ -320,8 +321,8 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): pitch : Array, shape(P, S) λ values. λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. + where in the latter the labels (ρ, α) are interpreted as the index into + the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. """ @@ -429,8 +430,8 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): pitch : Array, shape(P, S) λ values. λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. + where in the latter the labels (ρ, α) are interpreted as the index into + the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. @@ -574,7 +575,10 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): print("bp2:", bp2_p) assert not err_1[p, s], msg_1 assert not err_2[p, s], msg_2 - msg_3 = f"B midpoint = {B_mid} > {1 / pitch[p, s] + eps} = 1/pitch." + msg_3 = ( + f"B midpoint = {B_mid} > {1 / pitch[p, s] + eps} = 1/pitch." + "Should use a monotonic spline." + ) assert not err_3, msg_3 if plot: plot_field_line_with_ripple(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s)) @@ -903,7 +907,7 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. Note that any arrays baked into the callable method should broadcast with ``Z``. - f : list or tuple of Array, shape(P, S, knots.size, ) + f : list of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral evaluated (or interpolated to) the nodes of the returned desc @@ -917,7 +921,7 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the + where in the latter the labels (ρ, α) are interpreted as the index into the last axis that corresponds to that field line. The first axis is the batch axis as usual. knots : Array, shape(knots.size, ) @@ -1031,23 +1035,23 @@ def _bounce_quadrature( lines. Last axis enumerates the bounce integrals. """ - errorif(x.ndim != 1 or x.shape != w.shape) errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) + errorif(x.ndim != 1 or x.shape != w.shape) pitch = jnp.atleast_2d(pitch) S = B.shape[0] if not isinstance(f, (list, tuple)): f = [f] - def _group_grid_data_by_field_line(g): + def group_data_by_field_line_and_pitch(g): msg = ( - "Should have at most two dimensions, in which case the first axis " + "Should have at most three dimensions, in which case the first axis " "is interpreted as the batch axis, which enumerates the evaluation " "of the function at particular pitch values." ) - errorif(g.ndim > 2, msg=msg) + errorif(g.ndim > 3, msg=msg) return g.reshape(-1, S, knots.size) - f = map(_group_grid_data_by_field_line, f) + f = map(group_data_by_field_line_and_pitch, f) Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. result = _interpolatory_quadrature( @@ -1063,10 +1067,10 @@ def _group_grid_data_by_field_line(g): def bounce_integral( - eq, - rho=jnp.linspace(1e-7, 1, 10), - alpha=None, - knots=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), + B_sup_z, + B, + B_z_ra, + knots, quad=tanh_sinh_quad, automorphism=(automorphism_arcsin, grad_automorphism_arcsin), B_ref=1, @@ -1095,16 +1099,25 @@ def bounce_integral( Parameters ---------- - eq : Equilibrium - Equilibrium on which the bounce integral is computed. - rho : Array - Unique flux surface label coordinates. - alpha : Array - Unique field line label coordinates over a constant rho surface. - knots : Array - Field line following coordinate values at which to compute a spline - of the integrand, for every field line in the meshgrid formed from - rho and alpha specified above. + B_sup_z : Array, shape(S, knots.size, ) + Contravariant field-line following toroidal component of magnetic field. + B^ζ(ρ, α, ζ) is specified by ``B_sup_z[(ρ, α), ζ]``, where in the latter + the labels (ρ, α) are interpreted as the index into the first axis that + corresponds to that field line. + B : Array, shape(S, knots.size, ) + Norm of magnetic field. + |B|(ρ, α, ζ) is specified by ``B[(ρ, α), ζ]``, where in the latter + the labels (ρ, α) are interpreted as the index into the first axis that + corresponds to that field line. + B_z_ra : Array, shape(S, knots.size, ) + Norm of magnetic field derivative with respect to field-line following label. + ∂|B|/∂_ζ(ρ, α, ζ) is specified by ``B_z_ra[(ρ, α), ζ]``, where in the latter + the labels (ρ, α) are interpreted as the index into the first axis that + corresponds to that field line. + knots : Array, shape(knots.size, ) + Field line following coordinate values at which ``B_sup_z``, + ``B``, and ``B_z_ra`` were evaluated. + These knots are used to compute a spline of the integrand. The number of knots specifies a grid resolution as increasing the number of knots increases the accuracy of representing the integrand and the accuracy of the locations of the bounce points. @@ -1116,7 +1129,7 @@ def bounce_integral( Tanh-Sinh quadrature works well if the integrand is singular. Otherwise, Gauss-Legendre quadrature with the sin automorphism can be more competitive. - automorphism : callable, callable + automorphism : (callable, callable) The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. The inverse of the supplied automorphism is composed with the affine @@ -1142,12 +1155,8 @@ def bounce_integral( bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line ℓ for every λ value in ``pitch``. - items : dict - grid_desc : Grid - DESC coordinate grid for the given field line coordinates. - grid_fl : Grid - Clebsch-Type field-line coordinate grid. - knots : Array, + spline : dict + knots : Array, shape(knots.size, ) Field line-following ζ coordinates of spline knots. B.c : Array, shape(4, S, knots.size - 1) Polynomial coefficients of the spline of |B| in local power basis. @@ -1173,6 +1182,21 @@ def bounce_integral( .. code-block:: python + eq = get("HELIOTRON") + rho = np.linspace(1e-12, 1, 6) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + knots = np.linspace(-3 * np.pi, 3 * np.pi, 40) + grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) + bounce_integrate, spline = bounce_integral( + data["B^zeta"], + data["|B|"], + data["|B|_z|r,a"], + knots, + check=True, + plot=False, + ) + def integrand_num(g_zz, B, pitch, Z): # Integrand in integral in numerator of bounce average. f = (1 - pitch * B) * g_zz @@ -1182,17 +1206,12 @@ def integrand_den(B, pitch, Z): # Integrand in integral in denominator of bounce average. return safediv(1, jnp.sqrt(1 - pitch * B)) - eq = get("HELIOTRON") - rho = jnp.linspace(1e-12, 1, 6) - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 5) - bounce_integrate, items = bounce_integral(eq, rho, alpha) - - g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] - pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) + g_zz = eq.compute("g_zz", grid=grid_desc, data=data)["g_zz"] + pitch = pitch_of_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) num = bounce_integrate(integrand_num, g_zz, pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den - assert jnp.isfinite(average).any() + assert np.isfinite(average).any() # Now we can group the data by field line. average = average.reshape(pitch.shape[0], rho.size, alpha.size, -1) @@ -1201,27 +1220,28 @@ def integrand_den(B, pitch, Z): print(average[:, i, j]) # are the bounce averages along the field line with nodes # given in Clebsch-Type field-line coordinates ρ, α, ζ - nodes = items["grid_fl"].nodes.reshape(rho.size, alpha.size, -1, 3) + nodes = grid_fl.nodes.reshape(rho.size, alpha.size, -1, 3) print(nodes[i, j]) # for the pitch values stored in pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) print(pitch[:, i, j]) # Some of these bounce averages will evaluate as nan. # You should filter out these nan values when computing stuff. - print(jnp.nansum(average, axis=-1)) + print(np.nansum(average, axis=-1)) """ - if alpha is None: - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 10) - rho = jnp.atleast_1d(rho) - alpha = jnp.atleast_1d(alpha) - knots = jnp.atleast_1d(knots) - grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) - B_sup_z = data["B^zeta"].reshape(-1, knots.size) * L_ref / B_ref - B = data["|B|"].reshape(-1, knots.size) / B_ref - B_z_ra = data["|B|_z|r,a"].reshape(-1, knots.size) / B_ref + def group_data_by_field_line(g): + errorif(g.ndim > 2) + return g.reshape(-1, knots.size) + + B_sup_z = B_sup_z * L_ref / B_ref + B = B / B_ref + B_z_ra = B_z_ra / B_ref + B_sup_z, B, B_z_ra = map(group_data_by_field_line, (B_sup_z, B, B_z_ra)) + errorif(not (B_sup_z.shape == B.shape == B_z_ra.shape)) + + # Compute splines. monotonic = kwargs.pop("monotonic", False) B_c = ( PchipInterpolator(knots, B, axis=-1, check=check).c @@ -1231,15 +1251,10 @@ def integrand_den(B, pitch, Z): B_c = jnp.moveaxis(B_c, source=1, destination=-1) B_z_ra_c = _poly_der(B_c) degree = 3 - assert B_c.shape == (degree + 1, rho.size * alpha.size, knots.size - 1) - assert B_z_ra_c.shape == (degree, rho.size * alpha.size, knots.size - 1) - items = { - "grid_desc": grid_desc, - "grid_fl": grid_fl, - "knots": knots, - "B.c": B_c, - "B_z_ra.c": B_z_ra_c, - } + assert B_c.shape[0] == degree + 1 + assert B_z_ra_c.shape[0] == degree + assert B_c.shape[-1] == B_z_ra_c.shape[-1] == knots.size - 1 + spline = {"knots": knots, "B.c": B_c, "B_z_ra.c": B_z_ra_c} if quad == tanh_sinh_quad: kwargs.setdefault("resolution", 19) @@ -1265,19 +1280,18 @@ def bounce_integrate(integrand, f, pitch, method="akima"): bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. Note that any arrays baked into the callable method should broadcast with ``Z``. - f : list of Array, shape(P, items["grid_desc"].num_nodes, ) + f : list of Array, shape(..., S, knots.size) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral - evaluated (or interpolated to) the nodes of the returned desc - coordinate grid. - Should have at most two dimensions, in which case the first axis + evaluated (or interpolated to) DESC grid. + Should have at most three dimensions, in which case the first axis is interpreted as the batch axis, which enumerates the evaluation of the function at particular pitch values. pitch : Array, shape(P, S) λ values to evaluate the bounce integral at each field line. λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as index into the - last axis that corresponds to that field line. + where in the latter the labels (ρ, α) are interpreted as the index into + the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis as usual. method : str Method of interpolation for functions contained in ``f``. @@ -1311,10 +1325,15 @@ def bounce_integrate(integrand, f, pitch, method="akima"): assert result.shape[-1] == (knots.size - 1) * degree return result - return bounce_integrate, items + return bounce_integrate, spline -def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): +def desc_grid_from_field_line_coords( + eq, + rho=jnp.linspace(1e-7, 1, 10), + alpha=None, + zeta=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), +): """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. Create a meshgrid from the given field line coordinates, @@ -1328,6 +1347,7 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): Unique flux surface label coordinates. alpha : ndarray Unique field line label coordinates over a constant rho surface. + Defaults to 20 linearly spaced nodes. zeta : ndarray Unique field line-following ζ coordinates. @@ -1339,19 +1359,11 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): Clebsch-Type field-line coordinate grid. """ - r, a, z_fl = map(jnp.ravel, jnp.meshgrid(rho, alpha, zeta, indexing="ij")) - coords_fl = jnp.column_stack([r, a, z_fl]) - _unique_rho_idx = meshgrid_unique_idx(rho.size, alpha.size, zeta.size)[0] - _inverse_rho_idx = meshgrid_inverse_idx(rho.size, alpha.size, zeta.size)[0] - grid_fl = Grid( - nodes=coords_fl, - sort=False, - jitable=True, - _unique_rho_idx=_unique_rho_idx, - _inverse_rho_idx=_inverse_rho_idx, - ) + if alpha is None: + alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) + grid_fl = Grid.create_meshgrid(rho, alpha, zeta) coords_desc = eq.map_coordinates( - coords_fl, + grid_fl.nodes, inbasis=("rho", "alpha", "zeta"), outbasis=("rho", "theta", "zeta"), period=(jnp.inf, 2 * jnp.pi, jnp.inf), @@ -1360,7 +1372,7 @@ def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): nodes=coords_desc, sort=False, jitable=True, - _unique_rho_idx=_unique_rho_idx, - _inverse_rho_idx=_inverse_rho_idx, + _unique_rho_idx=grid_fl.unique_rho_idx, + _inverse_rho_idx=grid_fl.inverse_rho_idx, ) return grid_desc, grid_fl diff --git a/desc/grid.py b/desc/grid.py index 8f6ef2fc13..3620017aee 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -16,8 +16,6 @@ "ConcentricGrid", "find_least_rational_surfaces", "find_most_rational_surfaces", - "meshgrid_unique_idx", - "meshgrid_inverse_idx", ] @@ -496,9 +494,58 @@ class Grid(_Grid): jitable : bool Whether to skip certain checks and conditionals that don't work under jit. Allows grid to be created on the fly with custom nodes, but weights, symmetry - etc may be wrong if grid contains duplicate nodes. + etc. may be wrong if grid contains duplicate nodes. """ + @classmethod + def create_meshgrid(cls, a, b, c): + """Create a meshgrid from the given coordinates. + + Parameters + ---------- + a, b, c : Array, Array, Array + Unique values of each coordinate. + + Returns + ------- + grid : Grid + Meshgrid with indices assigned. + + """ + a, b, c = map(jnp.atleast_1d, (a, b, c)) + aa, bb, cc = map(jnp.ravel, jnp.meshgrid(a, b, c, indexing="ij")) + nodes = jnp.column_stack([aa, bb, cc]) + + ds = jnp.array([1 / a.size, 2 * jnp.pi / b.size, 2 * jnp.pi / c.size]) + num_nodes = a.size * b.size * c.size + spacing = jnp.ones(num_nodes)[:, jnp.newaxis] * ds + + unique_a_idx = jnp.arange(a.size) * b.size * c.size + unique_b_idx = jnp.arange(b.size) * c.size + unique_c_idx = jnp.arange(c.size) + inverse_a_idx = repeat( + unique_a_idx // (b.size * c.size), + b.size * c.size, + total_repeat_length=num_nodes, + ) + inverse_b_idx = jnp.tile( + repeat(unique_b_idx // c.size, c.size, total_repeat_length=b.size * c.size), + a.size, + ) + inverse_c_idx = jnp.tile(unique_c_idx, a.size * b.size) + return cls( + nodes=nodes, + spacing=spacing, + sort=False, + jitable=True, + _unique_rho_idx=unique_a_idx, + _unique_theta_idx=unique_b_idx, + _unique_zeta_idx=unique_c_idx, + _inverse_rho_idx=inverse_a_idx, + _inverse_theta_idx=inverse_b_idx, + _inverse_zeta_idx=inverse_c_idx, + ) + def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): # Python 3.3 (PEP 412) introduced key-sharing dictionaries. # This change measurably reduces memory usage of objects that @@ -512,8 +559,8 @@ def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): if sort: self._sort_nodes() if jitable: - # dont do anything with symmetry since that changes # of nodes - # avoid point at the axis, for now. FIXME: make axis boolean mask? + # Don't do anything with symmetry since that changes # of nodes + # avoid point at the axis, for now. r, t, z = self._nodes.T r = jnp.where(r == 0, 1e-12, r) self._nodes = jnp.array([r, t, z]).T @@ -1572,81 +1619,3 @@ def find_least_rational_surfaces( io = find_most_distant(io_rat, n, a, b, tol=atol, **kwargs) rho = _find_rho(iota, io, tol=atol) return rho, io - - -def meshgrid_inverse_idx(a_size, b_size, c_size): - """Return inverse indices for meshgrid pattern. - - It is common to construct a meshgrid in the following manner. - .. code-block:: python - - a, b, c = jnp.meshgrid(a, b, c, indexing="ij") - a, b, c = map(jnp.ravel, (a, b, c)) - nodes = jnp.column_stack([a, b, c]) - grid = Grid(nodes, sort=False, jitable=True) - - Since ``jitable=True`` was specified, the attribute ``grid.inverse_*_idx`` - can not be automatically computed. This method computes these indices. - One can then pass them in as keyword arguments to the Grid constructor. - - Parameters - ---------- - a_size : int - Size of the first argument to meshgrid. - b_size : int - Size of the second argument to meshgrid. - c_size : int - Size of the third argument to meshgrid. - - Returns - ------- - inverse_idx : ndarray, ndarray, ndarray - The inverse indices. - - """ - inverse_a_idx = repeat( - jnp.arange(a_size), - b_size * c_size, - total_repeat_length=a_size * b_size * c_size, - ) - inverse_b_idx = jnp.tile( - repeat(jnp.arange(b_size), c_size, total_repeat_length=b_size * c_size), a_size - ) - inverse_c_idx = jnp.tile(jnp.arange(c_size), a_size * b_size) - return inverse_a_idx, inverse_b_idx, inverse_c_idx - - -def meshgrid_unique_idx(a_size, b_size, c_size): - """Return unique indices for meshgrid pattern. - - It is common to construct a meshgrid in the following manner. - .. code-block:: python - - a, b, c = jnp.meshgrid(a, b, c, indexing="ij") - a, b, c = map(jnp.ravel, (a, b, c)) - nodes = jnp.column_stack([a, b, c]) - grid = Grid(nodes, sort=False, jitable=True) - - Since ``jitable=True`` was specified, the attribute ``grid.unique_*_idx`` - can not be automatically computed. This method computes these indices. - One can then pass them in as keyword arguments to the Grid constructor. - - Parameters - ---------- - a_size : int - Size of the first argument to meshgrid. - b_size : int - Size of the second argument to meshgrid. - c_size : int - Size of the third argument to meshgrid. - - Returns - ------- - unique_idx : ndarray, ndarray, ndarray - The unique indices. - - """ - unique_a_idx = jnp.arange(a_size) * b_size * c_size - unique_b_idx = jnp.arange(b_size) * c_size - unique_c_idx = jnp.arange(c_size) - return unique_a_idx, unique_b_idx, unique_c_idx diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 9464f16f3a..cf22adcf1f 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -13,7 +13,6 @@ from desc.backend import complex_sqrt, flatnonzero from desc.compute.bounce_integral import ( _affine_bijection_forward, - _bounce_quadrature, _filter_not_nan, _poly_der, _poly_root, @@ -38,6 +37,7 @@ from desc.equilibrium import Equilibrium from desc.examples import get from desc.geometry import FourierRZToroidalSurface +from desc.grid import Grid from desc.objectives import ( ObjectiveFromUser, ObjectiveFunction, @@ -94,7 +94,8 @@ def test_reshape_convention(): rho = np.linspace(0, 1, 3) alpha = np.linspace(0, 2 * np.pi, 4) zeta = np.linspace(0, 6 * np.pi, 5) - r, a, z = map(np.ravel, np.meshgrid(rho, alpha, zeta, indexing="ij")) + grid = Grid.create_meshgrid(rho, alpha, zeta) + r, a, z = grid.nodes.T # functions of zeta should separate along first two axes # since those are contiguous, this should work f = z.reshape(-1, zeta.size) @@ -120,9 +121,6 @@ def test_reshape_convention(): src = inspect.getsource(bounce_integral) assert "S, knots.size" in src, err_msg assert "pitch.shape[0], rho.size, alpha.size" in src, err_msg - src = inspect.getsource(desc_grid_from_field_line_coords) - assert 'indexing="ij"' in src, err_msg - assert 'meshgrid(rho, alpha, zeta, indexing="ij")' in src, err_msg @pytest.mark.unit @@ -159,16 +157,18 @@ def test_poly_root(): root = _poly_root(c.T, sort=True, distinct=True) for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) + root_filter = _filter_not_nan(root[j]) + assert root_filter.size == unique_roots.size np.testing.assert_allclose( - actual=_filter_not_nan(root[j]), + actual=root_filter, desired=unique_roots, err_msg=str(j), ) c = np.array([0, 1, -1, -8, 12]) - np.testing.assert_allclose( - actual=_filter_not_nan(_poly_root(c, sort=True, distinct=True)), - desired=np.unique(np.roots(c)), - ) + root = _filter_not_nan(_poly_root(c, sort=True, distinct=True)) + unique_root = np.unique(np.roots(c)) + assert root.size == unique_root.size + np.testing.assert_allclose(root, unique_root) @pytest.mark.unit @@ -228,13 +228,13 @@ def test_pitch_of_extrema(): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) B_z_ra = B.derivative() - pitch_scipy = 1 / B(B_z_ra.roots(extrapolate=False)) + pitch_scipy = np.sort(1 / B(B_z_ra.roots(extrapolate=False))) rtol = 1e-7 pitch = pitch_of_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) eps = 100 * np.finfo(float).eps - np.testing.assert_allclose( - np.sort(_filter_not_nan(pitch)), np.sort(pitch_scipy), rtol=rtol + eps - ) + pitch = np.sort(_filter_not_nan(pitch)) + assert pitch.size == pitch_scipy.size + np.testing.assert_allclose(pitch, pitch_scipy, rtol=rtol + eps) @pytest.mark.unit @@ -268,6 +268,7 @@ def test_bp1_first(): pitch, knots, B.c, B.derivative().c, check=True, plot=False ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) @@ -282,6 +283,7 @@ def test_bp2_first(): pitch, k, B.c, B.derivative().c, check=True, plot=False ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[1::2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) @@ -297,6 +299,7 @@ def test_bp1_before_extrema(): pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[1], 1.9827671337414938) @@ -317,6 +320,7 @@ def test_bp2_before_extrema(): pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) @@ -338,6 +342,7 @@ def test_extrema_first_and_before_bp1(plot=False): if plot: plot_field_line_with_ripple(B, pitch, bp1, bp2, start=k[2]) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], 0.8353192766102349) @@ -371,6 +376,7 @@ def test_extrema_first_and_before_bp2(): # otherwise the interior of the bounce points would be hills and not valleys. bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], -0.6719044147510538) @@ -440,55 +446,45 @@ def test_bounce_quadrature(): truth = v * 2 * ellipkm1(p) rtol = 1e-3 + def integrand(B, pitch, Z): + return 1 / np.sqrt(1 - pitch * m * B) + bp1 = -np.pi / 2 * v bp2 = -bp1 knots = np.linspace(bp1, bp2, 15) - bp1 = np.atleast_3d(bp1) - bp2 = np.atleast_3d(bp2) - B_sup_z = np.ones((1, knots.size)) - B = (np.sin(knots / v) ** 2).reshape(1, -1) - B_z_ra = (np.sin(2 * knots / v) / v).reshape(1, -1) - pitch = np.ones((1, 1)) - - def integrand(B, pitch, Z): - return 1 / np.sqrt(1 - pitch * m * B) + B_sup_z = np.ones(knots.size) + B = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) + B_z_ra = np.sin(2 * knots / v) / v + pitch = 1 + np.finfo(np.array(1.0).dtype).eps - # augment the singularity - x_t, w_t = tanh_sinh_quad(18, grad_automorphism_arcsin) - x_t = automorphism_arcsin(x_t) - tanh_sinh_arcsin = _bounce_quadrature( - bp1, - bp2, - x_t, - w_t, - integrand, - [], + bounce_integrate, _ = bounce_integral( B_sup_z, B, B_z_ra, - pitch, knots, + quad=tanh_sinh_quad, + automorphism=(automorphism_arcsin, grad_automorphism_arcsin), + resolution=18, check=True, + plot=False, ) + tanh_sinh_arcsin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) + assert tanh_sinh_arcsin.size == 1 np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) - x_g, w_g = np.polynomial.legendre.leggauss(16) - # suppress the singularity - w_g = w_g * grad_automorphism_sin(x_g) - x_g = automorphism_sin(x_g) - leg_gauss_sin = _bounce_quadrature( - bp1, - bp2, - x_g, - w_g, - integrand, - [], + + bounce_integrate, _ = bounce_integral( B_sup_z, B, B_z_ra, - pitch, knots, + quad=np.polynomial.legendre.leggauss, + automorphism=(automorphism_sin, grad_automorphism_sin), + deg=16, check=True, + plot=False, ) + leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) + assert leg_gauss_sin.size == 1 np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -498,6 +494,20 @@ def test_example_bounce_integral(): # This test also stress tests the bounce_points routine because # the |B| spline that is generated from this combination of knots # equilibrium etc. has many edge cases for bounce point computations. + eq = get("HELIOTRON") + rho = np.linspace(1e-12, 1, 6) + alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + knots = np.linspace(-3 * np.pi, 3 * np.pi, 40) + grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) + bounce_integrate, spline = bounce_integral( + data["B^zeta"], + data["|B|"], + data["|B|_z|r,a"], + knots, + check=True, + plot=False, + ) def integrand_num(g_zz, B, pitch, Z): """Integrand in integral in numerator of bounce average.""" @@ -508,13 +518,8 @@ def integrand_den(B, pitch, Z): """Integrand in integral in denominator of bounce average.""" return safediv(1, _sqrt(1 - pitch * B)) - eq = get("HELIOTRON") - rho = np.linspace(1e-12, 1, 6) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - - bounce_integrate, items = bounce_integral(eq, rho, alpha, check=True, plot=False) - g_zz = eq.compute("g_zz", grid=items["grid_desc"])["g_zz"] - pitch = pitch_of_extrema(items["knots"], items["B.c"], items["B_z_ra.c"]) + g_zz = eq.compute("g_zz", grid=grid_desc, data=data)["g_zz"] + pitch = pitch_of_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) num = bounce_integrate(integrand_num, g_zz, pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den @@ -527,7 +532,7 @@ def integrand_den(B, pitch, Z): print(average[:, i, j]) # are the bounce averages along the field line with nodes # given in Clebsch-Type field-line coordinates ρ, α, ζ - nodes = items["grid_fl"].nodes.reshape(rho.size, alpha.size, -1, 3) + nodes = grid_fl.nodes.reshape(rho.size, alpha.size, -1, 3) print(nodes[i, j]) # for the pitch values stored in pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) @@ -594,33 +599,30 @@ def test_bounce_averaged_drifts(): eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") psi = 0.25 # normalized psi rho = np.sqrt(psi) - data = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) + data_init = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) # normalization - L_ref = data["a"] + L_ref = data_init["a"] epsilon = L_ref * rho - psi_boundary = data["psi"][np.argmax(np.abs(data["psi"]))] + psi_boundary = data_init["psi"][np.argmax(np.abs(data_init["psi"]))] B_ref = 2 * np.abs(psi_boundary) / L_ref**2 - # Creating a grid along a field line - iota = np.interp(rho, data["rho"], data["iota"]) - shear = np.interp(rho, data["rho"], data["iota_r"]) + iota = np.interp(rho, data_init["rho"], data_init["iota"]) + shear = np.interp(rho, data_init["rho"], data_init["iota_r"]) N = (2 * eq.M_grid) * 4 + 1 zeta = np.linspace(-np.pi / iota, np.pi / iota, N) alpha = 0 theta_PEST = alpha + iota * zeta - # TODO: Request: The bounce integral operator should be able to take a grid. - # Response: Currently the API is such that the method does all the - # above preprocessing for you. Let's test it for correctness - # first then do this later. + grid_desc, _ = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) + data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) resolution = 50 # Whether to use monotonic or Hermite splines to interpolate |B|. monotonic = False - bounce_integrate, items = bounce_integral( - eq=eq, - rho=rho, - alpha=alpha, + bounce_integrate, spline = bounce_integral( + data["B^zeta"], + data["|B|"], + data["|B|_z|r,a"], knots=zeta, B_ref=B_ref, L_ref=L_ref, @@ -629,52 +631,45 @@ def test_bounce_averaged_drifts(): resolution=resolution, monotonic=monotonic, ) - data_keys = [ - "|grad(psi)|^2", - "grad(psi)", - "B", - "iota", - "|B|", - "B^zeta", - "cvdrift0", - "cvdrift", - "gbdrift", - ] # FIXME (outside scope of the bounce branch): # override_grid should not be required for the test to pass. # and anytime override_grid is true we should print a blue warning. - data_bounce = eq.compute(data_keys, grid=items["grid_desc"], override_grid=False) + data = eq.compute( + [ + "|grad(psi)|^2", + "grad(psi)", + "B", + "iota", + "|B|", + "B^zeta", + "cvdrift0", + "cvdrift", + "gbdrift", + ], + grid=grid_desc, + data=data, + override_grid=False, + ) # normalizations - bmag = data_bounce["|B|"] / B_ref + bmag = data["|B|"] / B_ref B0 = np.mean(bmag) bmag_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(bmag, bmag_analytic, atol=5e-3, rtol=5e-3) x = L_ref * rho # same as epsilon? s_hat = -x / iota * shear / L_ref - gradpar = L_ref * data_bounce["B^zeta"] / data_bounce["|B|"] - gradpar_analytic = ( - 2 * L_ref * data_bounce["iota"] * (1 - epsilon * np.cos(theta_PEST)) - ) + gradpar = L_ref * data["B^zeta"] / data["|B|"] + gradpar_analytic = 2 * L_ref * data["iota"] * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(gradpar, gradpar_analytic, atol=9e-3, rtol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric - cvdrift = ( - -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data_bounce["cvdrift"] - ) - gbdrift = ( - -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data_bounce["gbdrift"] - ) - dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data_bounce["|B|"] ** 2) - alpha_MHD = -np.mean(dPdrho * 1 / data_bounce["iota"] ** 2 * 0.5) - - gds21 = ( - -np.sign(iota) - * dot(data_bounce["grad(psi)"], data_bounce["grad(alpha)"]) - * s_hat - / B_ref - ) + cvdrift = -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data["cvdrift"] + gbdrift = -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data["gbdrift"] + dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) + alpha_MHD = -np.mean(dPdrho * 1 / data["iota"] ** 2 * 0.5) + + gds21 = -np.sign(iota) * dot(data["grad(psi)"], data["grad(alpha)"]) * s_hat / B_ref gds21_analytic = ( -1 * s_hat * (s_hat * theta_PEST - alpha_MHD / bmag**4 * np.sin(theta_PEST)) ) @@ -741,9 +736,9 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): pitch=pitch.reshape(pitch_resolution, -1), method=method, ) - # There is only one bounce integral per pitch in this example. bounce_drift = np.squeeze(_filter_not_nan(bounce_drift)) - assert bounce_drift.shape == bounce_drift_analytic.shape + msg = "There is only one bounce integral per pitch in this example." + assert bounce_drift.size == bounce_drift_analytic.size, msg plt.plot(1 / pitch, bounce_drift_analytic, marker="o", label="analytic") plt.plot(1 / pitch, bounce_drift, marker="x", label="numerical") @@ -753,12 +748,10 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): plt.tight_layout() plt.show() msg = ( - "Maybe tune these parameters?\n" f"Quadrature resolution is {resolution}.\n" f"Delta shift is {delta_shift}.\n" f"Spline method for integrand quantities is {method}.\n" f"Spline method for |B| is monotonic? (as opposed to Hermite): {monotonic}.\n" - f"Fudge factors: {fudge_factor_gbdrift}, {fudge_factor_cvdrift}.\n" ) np.testing.assert_allclose( bounce_drift, bounce_drift_analytic, atol=2e-2, rtol=1e-2, err_msg=msg @@ -805,15 +798,3 @@ def beta(grid, data): objective=objective, constraints=constraints, optimizer=opt ) print(result) - - rho = np.array([0.5]) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 10) - knots = np.linspace(0, 6 * np.pi, 20) - # TODO now compare result to elliptic integral - bounce_integrate, items = bounce_integral( - eq, rho, alpha, knots, check=True, plot=False - ) - pitch = pitch_of_extrema(knots, items["B.c"], items["B_z_ra.c"]) - bp1, bp2 = bounce_points( - pitch, knots, items["B.c"], items["B_z_ra.c"], check=True, plot=False - ) diff --git a/tests/test_grid.py b/tests/test_grid.py index 0c93df228a..1b0fbf4e43 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -14,8 +14,6 @@ dec_to_cf, find_least_rational_surfaces, find_most_rational_surfaces, - meshgrid_inverse_idx, - meshgrid_unique_idx, ) from desc.profiles import PowerSeriesProfile @@ -757,23 +755,22 @@ def test(surface_label, grid): test("zeta", cg_sym) @pytest.mark.unit - def test_meshgrid_idx(self): - """Test unique, inverse idx computing logic from meshgrid.""" + def test_meshgrid(self): + """Test meshgrid constructor.""" R = np.linspace(0, 1, 4) T = np.linspace(0, 2 * np.pi, 2) Z = np.linspace(0, 10 * np.pi, 3) - r, t, z = map(np.ravel, np.meshgrid(R, T, Z, indexing="ij")) - uR, uT, uZ = meshgrid_unique_idx(R.size, T.size, Z.size) - iR, iT, iZ = meshgrid_inverse_idx(R.size, T.size, Z.size) + grid = Grid.create_meshgrid(R, T, Z) + r, t, z = grid.nodes.T _, unique, inverse = np.unique(r, return_index=True, return_inverse=True) - np.testing.assert_allclose(uR, unique) - np.testing.assert_allclose(iR, inverse) + np.testing.assert_allclose(grid.unique_rho_idx, unique) + np.testing.assert_allclose(grid.inverse_rho_idx, inverse) _, unique, inverse = np.unique(t, return_index=True, return_inverse=True) - np.testing.assert_allclose(uT, unique) - np.testing.assert_allclose(iT, inverse) + np.testing.assert_allclose(grid.unique_theta_idx, unique) + np.testing.assert_allclose(grid.inverse_theta_idx, inverse) _, unique, inverse = np.unique(z, return_index=True, return_inverse=True) - np.testing.assert_allclose(uZ, unique) - np.testing.assert_allclose(iZ, inverse) + np.testing.assert_allclose(grid.unique_zeta_idx, unique) + np.testing.assert_allclose(grid.inverse_zeta_idx, inverse) @pytest.mark.unit From c7ec4356c45c8244ee34a68ef857a04cfd829e4d Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Apr 2024 07:43:39 -0400 Subject: [PATCH 127/241] Remove override_grid=False, doesn't seem to matter anymore --- tests/test_bounce_integral.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index cf22adcf1f..ef444acf6b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -631,9 +631,6 @@ def test_bounce_averaged_drifts(): resolution=resolution, monotonic=monotonic, ) - # FIXME (outside scope of the bounce branch): - # override_grid should not be required for the test to pass. - # and anytime override_grid is true we should print a blue warning. data = eq.compute( [ "|grad(psi)|^2", @@ -648,7 +645,6 @@ def test_bounce_averaged_drifts(): ], grid=grid_desc, data=data, - override_grid=False, ) # normalizations From 772ed2aa9655ad21ba1522568626fa845ca1a67e Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 26 Apr 2024 06:56:15 -0400 Subject: [PATCH 128/241] fix bugs in bounce average drift test --- desc/compute/bounce_integral.py | 95 ++++---------- tests/test_bounce_integral.py | 217 +++++++++++++++----------------- 2 files changed, 124 insertions(+), 188 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 69c4bb5702..ecba6b8d24 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -351,21 +351,6 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): Particles with λ = 1 / |B|(ζ*) where |B|(ζ*) are local maxima have fat banana orbits increasing neoclassical transport. - When computing ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ in equation 29 of - - V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. - Evaluation of 1/ν neoclassical transport in stellarators. - Phys. Plasmas 1 December 1999; 6 (12): 4622–4632. - https://doi.org/10.1063/1.873749 - - the contribution of ∑ⱼ Hⱼ² / Iⱼ to ε is largest in the intervals such that - b ∈ [|B|(ζ*) - db, |B|(ζ*)]. To see this, observe that Iⱼ ∼ √(1 − λ B), - hence Hⱼ² / Iⱼ ∼ Hⱼ² / √(1 − λ B). For λ = 1 / |B|(ζ*), near |B|(ζ*), the - quantity 1 / √(1 − λ B) is singular. The slower |B| tends to |B|(ζ*) the - less integrable this singularity becomes. Therefore, a quadrature for - ε ∼ ∫ db ∑ⱼ Hⱼ² / Iⱼ would do well to evaluate the integrand near - b = 1 / λ = |B|(ζ*). - Parameters ---------- knots : Array, shape(knots.size, ) @@ -416,7 +401,6 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): (1 + relative_shift) * B_extrema, ).reshape(S, -1) # Reshape so that last axis enumerates extrema along a field line. - B_extrema = take_mask(B_extrema, ~jnp.isnan(B_extrema)) pitch = 1 / B_extrema.T assert pitch.shape == (N * (degree - 1), S) return pitch @@ -469,13 +453,8 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - # The polynomials' intersection points with 1 / λ is given by intersect. - # In order to be JIT compilable, this must have a shape that accommodates the - # case where each polynomial intersects 1 / λ degree times. - # nan values in intersect denote a polynomial has less than degree intersects. intersect = _poly_root( c=B_c, - # New axis to use same pitches across polynomials of a particular spline. k=(1 / pitch)[..., jnp.newaxis], a_min=jnp.array([0]), a_max=jnp.diff(knots), @@ -495,9 +474,6 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): intersect = take_mask(intersect, is_intersect) B_z_ra = take_mask(B_z_ra, is_intersect) assert intersect.shape == B_z_ra.shape == (P, S, N * degree) - # Sign of derivative determines whether an intersect is a valid bounce point. - # Need to include zero derivative intersects to compute the WFB - # (world's fattest banana) orbit bounce integrals. is_bp1 = B_z_ra <= 0 is_bp2 = B_z_ra >= 0 # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only @@ -514,10 +490,10 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): # due to floating point errors grows, so the real solution is to pick a less # degenerate pitch value - one that does not ride the global extrema of |B|. is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) - # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1) bp2 = take_mask(intersect, is_bp2) + # Consistent with (in particular the discussion on page 3 and 5 of) # V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. # Evaluation of 1/ν neoclassical transport in stellarators. @@ -570,7 +546,9 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): bp1_p, bp2_p, B_mid = map( _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) ) - plot_field_line_with_ripple(B, pitch[p, s], bp1_p, bp2_p, id=f"{p},{s}") + plot_field_line_with_ripple( + B, pitch[p, s], bp1_p, bp2_p, name=f"{p},{s}" + ) print("bp1:", bp1_p) print("bp2:", bp2_p) assert not err_1[p, s], msg_1 @@ -581,7 +559,9 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): ) assert not err_3, msg_3 if plot: - plot_field_line_with_ripple(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s)) + plot_field_line_with_ripple( + B, pitch[:, s], bp1[:, s], bp2[:, s], name=str(s) + ) def plot_field_line_with_ripple( @@ -593,7 +573,7 @@ def plot_field_line_with_ripple( stop=None, num=300, show=True, - id=None, + name=None, ): """Plot the field line given spline of |B| and bounce points etc. @@ -616,7 +596,7 @@ def plot_field_line_with_ripple( Should be dense to see oscillations. show : bool Whether to show the plot. - id : str + name : str String to prepend to plot title. Returns @@ -674,8 +654,8 @@ def add(lines): ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") ax.legend(legend.values(), legend.keys()) title = r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$" - if id is not None: - title = f"{title}. id = {id}." + if name is not None: + title = f"{title}. name = {name}." ax.set_title(title) if show: plt.tight_layout() @@ -712,7 +692,7 @@ def automorphism_arcsin(x): Parameters ---------- x : Array - Points to tranform. + Points to transform. Returns ------- @@ -821,37 +801,6 @@ def tanh_sinh_quad(resolution, w=lambda x: 1, t_max=None): return x, W -def _suppress_bad_nan(V): - """Zero out nan values induced by error. - - Assuming that V is a well-behaved function of some interpolation points Z, - then V(Z) should evaluate as NaN only if Z is NaN. This condition needs to - be enforced explicitly due to floating point and interpolation error. - - In the context of bounce integrals, the √(1 − λ |B|) terms necessitate this. - For interpolation error in |B| may yield λ |B| > 1 at quadrature points - between bounce points, which is inconsistent with our knowledge of the |B| - spline on which the bounce points were computed. This inconsistency can - be more prevalent in the limit the number of quadrature points per bounce - integration is much greater than the number of knots. - - Parameters - ---------- - V : Array - Interpolation values. - - Returns - ------- - V : Array - The interpolation values with the bad NaN values set to zero. - - """ - # This simple logic is encapsulated here to make explicit the bug it resolves. - # Don't suppress inf as that indicates catastrophic floating point error. - V = jnp.nan_to_num(V, posinf=jnp.inf, neginf=-jnp.inf) - return V - - def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): """Check for floating point errors. @@ -892,8 +841,10 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): goal = jnp.sum(1 - is_not_quad_point) // quad_resolution # Number of integrals that were actually computed. actual = jnp.isfinite(inner_product).sum() - err_msg = f"Lost {goal - actual} integrals from floating point error." - assert goal == actual, err_msg + assert goal == actual, ( + f"Lost {goal - actual} integrals " + "from floating point or spline approximation error." + ) _repeated_docstring = """w : Array, shape(w.size, ) @@ -983,11 +934,17 @@ def _interpolatory_quadrature( Z_ps = Z.reshape(Z.shape[0], Z.shape[1], -1) f = [_interp1d_vec(Z_ps, knots, ff, method=method).reshape(shape) for ff in f] B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) - # Specify derivative at knots for ≈ cubic hermite interpolation. B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) - pitch = pitch[..., jnp.newaxis, jnp.newaxis] + V = integrand(*f, B=B, pitch=pitch[..., jnp.newaxis, jnp.newaxis], Z=Z) + # Assuming that V is a well-behaved function of some interpolation points Z, + # V(Z) should evaluate as NaN only if Z is NaN. This condition needs to + # be enforced explicitly due to floating point and interpolation error. + # In the context of bounce integrals, the √(1 − λ |B|) terms necessitate this. + # For interpolation error in |B| may yield λ |B| > 1 at quadrature points + # between bounce points. Don't suppress inf as that indicates catastrophic + # floating point error. inner_product = jnp.dot( - _suppress_bad_nan(integrand(*f, B=B, pitch=pitch, Z=Z)) / B_sup_z, + jnp.nan_to_num(V, posinf=jnp.inf, neginf=-jnp.inf) / B_sup_z, w, ) if check: @@ -1274,7 +1231,7 @@ def bounce_integrate(integrand, f, pitch, method="akima"): integrand : callable This callable is the composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. - It should accept the items in ``f`` as arguments as well as two additional + It should accept the items in ``f`` as arguments as well as the additional keyword arguments: ``B``, ``pitch``, and ``Z``, where ``Z`` is the set of quadrature points. A quadrature will be performed to approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index ef444acf6b..bc9779ed3e 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -33,19 +33,9 @@ tanh_sinh_quad, ) from desc.compute.utils import dot, safediv -from desc.continuation import solve_continuation_automatic from desc.equilibrium import Equilibrium from desc.examples import get -from desc.geometry import FourierRZToroidalSurface -from desc.grid import Grid -from desc.objectives import ( - ObjectiveFromUser, - ObjectiveFunction, - get_equilibrium_objective, - get_fixed_boundary_constraints, -) -from desc.optimize import Optimizer -from desc.profiles import PowerSeriesProfile +from desc.grid import Grid, LinearGrid from desc.utils import only1 @@ -592,34 +582,61 @@ def test_bounce_averaged_drifts(): Calculate bounce-averaged drifts using the bounce-average routine and compare it with the analytical expression - # Note 1: This test can be merged with the low beta test # Note 2: Remove tests/test_equilibrium :: test_shifted_circle_geometry # once all the epsilons and Gammas have been implemented and tested """ eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi = 0.25 # normalized psi - rho = np.sqrt(psi) - data_init = eq.compute(["iota", "iota_r", "a", "rho", "psi"]) - - # normalization - L_ref = data_init["a"] - epsilon = L_ref * rho - psi_boundary = data_init["psi"][np.argmax(np.abs(data_init["psi"]))] - B_ref = 2 * np.abs(psi_boundary) / L_ref**2 + psi_boundary = eq.Psi / (2 * np.pi) + psi = 0.25 * psi_boundary + rho = np.sqrt(psi / psi_boundary) + assert np.isclose(rho, 0.5) + + # Compute flux surface quantities on a grid that we know has + # resolution and node placement for correct flux surface quantities. + grid_flux = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + data_flux = eq.compute( + names=["iota", "iota_r", "a", "psi"], + grid=grid_flux, + ) + assert np.isclose(grid_flux.compress(data_flux["psi"]).item(), psi) - iota = np.interp(rho, data_init["rho"], data_init["iota"]) - shear = np.interp(rho, data_init["rho"], data_init["iota_r"]) - N = (2 * eq.M_grid) * 4 + 1 - zeta = np.linspace(-np.pi / iota, np.pi / iota, N) alpha = 0 - theta_PEST = alpha + iota * zeta - + iota = grid_flux.compress(data_flux["iota"]).item() + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + # Compute quantities on grid that can separate into field lines via + # a simple np.reshape operation, as expected by bounce_integral(). grid_desc, _ = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) - resolution = 50 - # Whether to use monotonic or Hermite splines to interpolate |B|. + data = eq.compute( + names=[ + "B^zeta", + "|B|", + "|B|_z|r,a", + "cvdrift", + "gbdrift", + "cvdrift0", + "B", + "grad(alpha)", + "|grad(psi)|^2", + "grad(psi)", + ], + grid=grid_desc, + ) + + # normalization + L_ref = data_flux["a"] + # FIXME: + # When we (incorrectly) use psi, numerical and analytic match up to a sign + # error, but the analytic plot doesn't reproduce results that match paper, + # which makes sense. + # When we use the proper psi at the lcfs, (i.e. psi_boundary) the numerical + # no longer matches analytic. The analytic plot looks as expected though. + # So we just need to make sure we are using the correct psi in the numerical + # computations in this test and elsewhere in DESC. + B_ref = 2 * np.abs(psi) / L_ref**2 + + quad_resolution = 50 monotonic = False - bounce_integrate, spline = bounce_integral( + bounce_integrate, _ = bounce_integral( data["B^zeta"], data["|B|"], data["|B|_z|r,a"], @@ -628,70 +645,71 @@ def test_bounce_averaged_drifts(): L_ref=L_ref, check=True, plot=True, - resolution=resolution, + resolution=quad_resolution, monotonic=monotonic, ) - data = eq.compute( - [ - "|grad(psi)|^2", - "grad(psi)", - "B", - "iota", - "|B|", - "B^zeta", - "cvdrift0", - "cvdrift", - "gbdrift", - ], - grid=grid_desc, - data=data, - ) - # normalizations - bmag = data["|B|"] / B_ref - B0 = np.mean(bmag) - bmag_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) - np.testing.assert_allclose(bmag, bmag_analytic, atol=5e-3, rtol=5e-3) + # FIXME: Do these have the correct normalization in the radial coordinate? + epsilon = L_ref * rho + # I wouldn't really consider 0.05 << 1... maybe for a rough approximation. + assert np.isclose(epsilon, 0.05) + x = L_ref * rho + + theta_PEST = alpha + iota * zeta + B_normalized = data["|B|"] / B_ref + B0 = np.mean(B_normalized) + # same as B0 / (1 + epsilon cos(theta)) assuming epsilon << 1 + B_normalized_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) + np.testing.assert_allclose(B_normalized, B_normalized_analytic, rtol=5e-3) - x = L_ref * rho # same as epsilon? + shear = grid_flux.compress(data_flux["iota_r"]).item() s_hat = -x / iota * shear / L_ref gradpar = L_ref * data["B^zeta"] / data["|B|"] - gradpar_analytic = 2 * L_ref * data["iota"] * (1 - epsilon * np.cos(theta_PEST)) + gradpar_analytic = 2 * L_ref * iota * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(gradpar, gradpar_analytic, atol=9e-3, rtol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric - cvdrift = -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data["cvdrift"] - gbdrift = -2 * np.sign(psi_boundary) * B_ref * L_ref**2 * rho * data["gbdrift"] + cvdrift = -2 * np.sign(psi) * B_ref * L_ref**2 * rho * data["cvdrift"] + gbdrift = -2 * np.sign(psi) * B_ref * L_ref**2 * rho * data["gbdrift"] dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) - alpha_MHD = -np.mean(dPdrho * 1 / data["iota"] ** 2 * 0.5) - - gds21 = -np.sign(iota) * dot(data["grad(psi)"], data["grad(alpha)"]) * s_hat / B_ref + alpha_MHD = -np.mean(dPdrho / iota**2 * 0.5) + gds21 = ( # noqa: F841 + -np.sign(iota) * dot(data["grad(psi)"], data["grad(alpha)"]) * s_hat / B_ref + ) gds21_analytic = ( - -1 * s_hat * (s_hat * theta_PEST - alpha_MHD / bmag**4 * np.sin(theta_PEST)) + -1 + * s_hat + * (s_hat * theta_PEST - alpha_MHD / B_normalized**4 * np.sin(theta_PEST)) ) - np.testing.assert_allclose(gds21, gds21_analytic, atol=1.7e-2, rtol=5e-4) + # np.testing.assert_allclose(gds21, gds21_analytic) # noqa: E800 fudge_factor_gbdrift = 0.19 gbdrift_analytic = fudge_factor_gbdrift * ( -s_hat + (np.cos(theta_PEST) - gds21_analytic / s_hat * np.sin(theta_PEST)) ) fudge_factor_cvdrift = 0.07 - cvdrift_analytic = gbdrift_analytic + fudge_factor_cvdrift * alpha_MHD / bmag**2 - np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1.2e-2, rtol=5e-3) - np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=1.8e-2, rtol=5e-3) + cvdrift_analytic = ( # noqa: F841 + gbdrift_analytic + fudge_factor_cvdrift * alpha_MHD / B_normalized**2 + ) + # np.testing.assert_allclose(gbdrift, gbdrift_analytic) # noqa: E800 + # np.testing.assert_allclose(cvdrift, cvdrift_analytic) # noqa: E800 # Values of pitch angle lambda for which to evaluate the bounce averages. delta_shift = 1e-6 pitch_resolution = 50 pitch = np.linspace( - 1 / np.max(bmag) + delta_shift, 1 / np.min(bmag) - delta_shift, pitch_resolution + 1 / np.max(B_normalized) + delta_shift, + 1 / np.min(B_normalized) - delta_shift, + pitch_resolution, ) - k2 = 0.5 * ((1 - pitch * B0) / (pitch * B0 * epsilon) + 1) - k = np.sqrt(k2) + # Changed from RG appendix equation A10 to match equation 19 here + # https://cptc.wisc.edu/wp-content/uploads/sites/327/2017/09/UW-CPTC_15-4.pdf. + k2 = 0.5 * ((1 - pitch * B0) / epsilon + 1) + k = _sqrt(k2) # Here are the notes that explain these integrals. # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. - I_0 = test_integral_0(k, resolution) - I_1 = test_integral_1(k, resolution) + I_0 = test_integral_0(k, quad_resolution) + I_1 = test_integral_1(k, quad_resolution) I_2 = 16 * k * I_0 I_3 = 4 / 9 * (8 * k * (-1 + 2 * k2) * I_1 - 4 * k * (-1 + k2) * I_0) I_4 = ( @@ -723,8 +741,6 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): g = _sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - # Can choose method of interpolation for all quantities besides |B| from - # interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html#interpax.interp1d. method = "akima" bounce_drift = bounce_integrate( integrand=integrand, @@ -736,15 +752,20 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): msg = "There is only one bounce integral per pitch in this example." assert bounce_drift.size == bounce_drift_analytic.size, msg - plt.plot(1 / pitch, bounce_drift_analytic, marker="o", label="analytic") - plt.plot(1 / pitch, bounce_drift, marker="x", label="numerical") - plt.xlabel(r"$1 / \lambda$") - plt.ylabel("Bounce averaged drift") - plt.legend() + fig, ax = plt.subplots(2) + ax[0].plot(1 / pitch, bounce_drift_analytic, marker="o", label="analytic") + ax[0].plot(1 / pitch, bounce_drift, marker="x", label="numerical") + ax[0].set_xlabel(r"$1 / \lambda$") + ax[0].set_ylabel("Bounce averaged drift") + ax[0].legend() + ax[1].plot(1 / pitch, bounce_drift_analytic, marker="o", label="analytic") + ax[1].set_xlabel(r"$1 / \lambda$") + ax[1].set_ylabel("Bounce averaged drift") + ax[1].legend() plt.tight_layout() plt.show() msg = ( - f"Quadrature resolution is {resolution}.\n" + f"Quadrature resolution is {quad_resolution}.\n" f"Delta shift is {delta_shift}.\n" f"Spline method for integrand quantities is {method}.\n" f"Spline method for |B| is monotonic? (as opposed to Hermite): {monotonic}.\n" @@ -752,45 +773,3 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): np.testing.assert_allclose( bounce_drift, bounce_drift_analytic, atol=2e-2, rtol=1e-2, err_msg=msg ) - - -@pytest.mark.regression -def test_bounce_averaged_drifts_low_beta(): - """Test bounce integrals in low beta limit.""" - assert False, "Test not finished yet." - L, M, N, NFP, sym = 6, 6, 6, 1, True - surface = FourierRZToroidalSurface( - R_lmn=[1.0, 0.1], - Z_lmn=[0.0, -0.1], - modes_R=np.array([[0, 0], [1, 0]]), - modes_Z=np.array([[0, 0], [-1, 0]]), - sym=sym, - NFP=NFP, - ) - eq = Equilibrium( - L=L, - M=M, - N=N, - NFP=NFP, - surface=surface, - pressure=PowerSeriesProfile([1e2, 0, -1e2]), - iota=PowerSeriesProfile([1, 0, 2]), - Psi=1.0, - ) - eq = solve_continuation_automatic(eq)[-1] - - def beta(grid, data): - return data["_vol"] - - low_beta = 0.01 - # todo: error that objective function has no linear attribute? - objective = ObjectiveFunction( - (ObjectiveFromUser(fun=beta, eq=eq, target=low_beta),) - ) - - constraints = (*get_fixed_boundary_constraints(eq), get_equilibrium_objective(eq)) - opt = Optimizer("proximal-lsq-exact") - eq, result = eq.optimize( - objective=objective, constraints=constraints, optimizer=opt - ) - print(result) From 9767f07db26ffcef48f30f5d44c249ed0950d94c Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 26 Apr 2024 08:45:04 -0400 Subject: [PATCH 129/241] Add back testing assertions to analytic expressions --- tests/test_bounce_integral.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index bc9779ed3e..8085f5c201 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -632,7 +632,7 @@ def test_bounce_averaged_drifts(): # no longer matches analytic. The analytic plot looks as expected though. # So we just need to make sure we are using the correct psi in the numerical # computations in this test and elsewhere in DESC. - B_ref = 2 * np.abs(psi) / L_ref**2 + B_ref = 2 * np.abs(psi_boundary) / L_ref**2 quad_resolution = 50 monotonic = False @@ -658,41 +658,40 @@ def test_bounce_averaged_drifts(): theta_PEST = alpha + iota * zeta B_normalized = data["|B|"] / B_ref B0 = np.mean(B_normalized) - # same as B0 / (1 + epsilon cos(theta)) assuming epsilon << 1 - B_normalized_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) - np.testing.assert_allclose(B_normalized, B_normalized_analytic, rtol=5e-3) + # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 + taylor = 1 - epsilon * np.cos(theta_PEST) + B_normalized_analytic = B0 * taylor + np.testing.assert_allclose(B_normalized, B_normalized_analytic, atol=3e-3) shear = grid_flux.compress(data_flux["iota_r"]).item() s_hat = -x / iota * shear / L_ref gradpar = L_ref * data["B^zeta"] / data["|B|"] - gradpar_analytic = 2 * L_ref * iota * (1 - epsilon * np.cos(theta_PEST)) - np.testing.assert_allclose(gradpar, gradpar_analytic, atol=9e-3, rtol=5e-3) + gradpar_analytic = 2 * L_ref * iota * taylor + np.testing.assert_allclose(gradpar, gradpar_analytic, atol=1e-2) # Comparing coefficient calculation here with coefficients from compute/_metric cvdrift = -2 * np.sign(psi) * B_ref * L_ref**2 * rho * data["cvdrift"] gbdrift = -2 * np.sign(psi) * B_ref * L_ref**2 * rho * data["gbdrift"] dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) alpha_MHD = -np.mean(dPdrho / iota**2 * 0.5) - gds21 = ( # noqa: F841 - -np.sign(iota) * dot(data["grad(psi)"], data["grad(alpha)"]) * s_hat / B_ref - ) + gds21 = -np.sign(iota) * dot(data["grad(psi)"], data["grad(alpha)"]) * s_hat / B_ref gds21_analytic = ( -1 * s_hat * (s_hat * theta_PEST - alpha_MHD / B_normalized**4 * np.sin(theta_PEST)) ) - # np.testing.assert_allclose(gds21, gds21_analytic) # noqa: E800 + np.testing.assert_allclose(gds21, gds21_analytic, atol=2e-2) fudge_factor_gbdrift = 0.19 gbdrift_analytic = fudge_factor_gbdrift * ( -s_hat + (np.cos(theta_PEST) - gds21_analytic / s_hat * np.sin(theta_PEST)) ) fudge_factor_cvdrift = 0.07 - cvdrift_analytic = ( # noqa: F841 + cvdrift_analytic = ( gbdrift_analytic + fudge_factor_cvdrift * alpha_MHD / B_normalized**2 ) - # np.testing.assert_allclose(gbdrift, gbdrift_analytic) # noqa: E800 - # np.testing.assert_allclose(cvdrift, cvdrift_analytic) # noqa: E800 + np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1e-2) + np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=2e-2) # Values of pitch angle lambda for which to evaluate the bounce averages. delta_shift = 1e-6 From 025f09d2d2ca3618373f18618f369aea3facafc0 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 27 Apr 2024 00:27:54 -0400 Subject: [PATCH 130/241] Add methods to plot interpolated integrand. --- desc/compute/bounce_integral.py | 224 +++++++++++++++++++++----------- desc/grid.py | 7 +- tests/test_bounce_integral.py | 9 +- 3 files changed, 157 insertions(+), 83 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ecba6b8d24..1d505c8774 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -434,7 +434,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): check : bool Flag for debugging. plot : bool - Whether to plot even if error was not detected during the check. + Whether to plot some things if check is true. Returns ------- @@ -512,7 +512,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): return bp1, bp2 -def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): +def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=True): """Check that bounce points are computed correctly. Parameters @@ -526,7 +526,7 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): B_c : Array Input to ``bounce_points``. plot : bool - Whether to plot even if error was not detected. + Whether to plot some things. """ eps = 10 * jnp.finfo(jnp.array(1.0).dtype).eps @@ -546,16 +546,18 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): bp1_p, bp2_p, B_mid = map( _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) ) - plot_field_line_with_ripple( - B, pitch[p, s], bp1_p, bp2_p, name=f"{p},{s}" - ) + if plot: + plot_field_line_with_ripple( + B, pitch[p, s], bp1_p, bp2_p, name=f"{p},{s}" + ) print("bp1:", bp1_p) print("bp2:", bp2_p) assert not err_1[p, s], msg_1 assert not err_2[p, s], msg_2 msg_3 = ( - f"B midpoint = {B_mid} > {1 / pitch[p, s] + eps} = 1/pitch." - "Should use a monotonic spline." + f"Detected B midpoint = {B_mid}>{1 / pitch[p, s] + eps} = 1/pitch. " + "You need to use more knots or, if that is infeasible, switch to a " + "monotonic spline method.\n" ) assert not err_3, msg_3 if plot: @@ -571,7 +573,7 @@ def plot_field_line_with_ripple( bp2=jnp.array([]), start=None, stop=None, - num=300, + num=500, show=True, name=None, ): @@ -584,16 +586,15 @@ def plot_field_line_with_ripple( pitch : Array λ value. bp1 : Array - Bounce points with B_z_ra <= 0. + Bounce points with ∂|B|/∂_ζ <= 0. bp2 : Array - Bounce points with B_z_ra >= 0. + Bounce points with ∂|B|/∂_ζ >= 0. start : float Minimum ζ on plot. stop : float Maximum ζ of plot. num : int Number of ζ points to plot. - Should be dense to see oscillations. show : bool Whether to show the plot. name : str @@ -689,6 +690,11 @@ def automorphism_arcsin(x): the singularity in the bounce integral. Therefore, the quadrature scheme used to evaluate the integral must work well on singular integrals. + The arcsin automorphism pulls points in [−1, 1] away from the boundary. + This can reduce floating point error if paired with a quadrature + scheme that is aggressive with placing nodes near endpoints, such as + Tanh-Sinh quadrature. + Parameters ---------- x : Array @@ -726,9 +732,11 @@ def automorphism_sin(x, eps=None): Therefore, this automorphism pulls the mass of the bounce integral away from the singularities, which should improve convergence of the quadrature to the true integral, so long as the quadrature performs better on less - singular integrands. If the integral was singular to begin with, - Tanh-Sinh quadrature will still work well. Otherwise, Gauss-Legendre - quadrature can outperform Tanh-Sinh. + singular integrands. Pairs well with Gauss-Legendre quadrature. + + The sin automorphism pushes points in [−1, 1] toward the boundary. + This can increase floating point error if paired with a quadrature + scheme that is aggressive with placing nodes near endpoints. Parameters ---------- @@ -801,52 +809,6 @@ def tanh_sinh_quad(resolution, w=lambda x: 1, t_max=None): return x, W -def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): - """Check for floating point errors. - - Parameters - ---------- - Z : Array - Quadrature points at field line-following ζ coordinates. - f : iterable of Array, shape(Z.shape) - Arguments to the integrand interpolated to Z. - B_sup_z : Array, shape(Z.shape) - Contravariant field-line following toroidal component of magnetic field, - interpolated to Z. - B : Array, shape(Z.shape) - Norm of magnetic field, interpolated to Z. - B_z_ra : Array, shape(Z.shape) - Norm of magnetic field derivative with respect to field-line following label, - interpolated to Z. - inner_product : Array - Output of ``_interpolatory_quadrature``. - - """ - is_not_quad_point = jnp.isnan(Z) - # We want quantities to evaluate as finite only at quadrature points - # for the integrals with boundaries at valid bounce points. - msg = "Interpolation failed." - assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B_z_ra)), msg - for ff in f: - assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg - - msg = "|B| has vanished, violating the hairy ball theorem." - assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(B_sup_z, 0).any(), msg - - quad_resolution = Z.shape[-1] - # Number of integrals that we should be computing. - goal = jnp.sum(1 - is_not_quad_point) // quad_resolution - # Number of integrals that were actually computed. - actual = jnp.isfinite(inner_product).sum() - assert goal == actual, ( - f"Lost {goal - actual} integrals " - "from floating point or spline approximation error." - ) - - _repeated_docstring = """w : Array, shape(w.size, ) Quadrature weights. integrand : callable @@ -882,6 +844,8 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. check : bool Flag for debugging. + plot : bool + Whether to plot some things if check is true. """ _delimiter = "Returns" @@ -906,7 +870,19 @@ def _interp1d_vec_with_df( def _interpolatory_quadrature( - Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, method_B, check=False + Z, + w, + integrand, + f, + B_sup_z, + B, + B_z_ra, + pitch, + knots, + method, + method_B, + check=False, + plot=True, ): """Interpolate given functions to points Z and perform quadrature. @@ -932,7 +908,7 @@ def _interpolatory_quadrature( # points can be captured more accurately than can be by any polynomial. shape = Z.shape Z_ps = Z.reshape(Z.shape[0], Z.shape[1], -1) - f = [_interp1d_vec(Z_ps, knots, ff, method=method).reshape(shape) for ff in f] + f = [_interp1d_vec(Z_ps, knots, f_i, method=method).reshape(shape) for f_i in f] B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) V = integrand(*f, B=B, pitch=pitch[..., jnp.newaxis, jnp.newaxis], Z=Z) @@ -949,6 +925,9 @@ def _interpolatory_quadrature( ) if check: _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product) + if plot: + _plot(Z, B, name=r"$\vert B \vert$") + _plot(Z, V, name="integrand") return inner_product @@ -957,6 +936,77 @@ def _interpolatory_quadrature( ) +def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): + """Check for floating point errors. + + Parameters + ---------- + Z : Array + Quadrature points at field line-following ζ coordinates. + f : iterable of Array, shape(Z.shape) + Arguments to the integrand interpolated to Z. + B_sup_z : Array, shape(Z.shape) + Contravariant field-line following toroidal component of magnetic field, + interpolated to Z. + B : Array, shape(Z.shape) + Norm of magnetic field, interpolated to Z. + B_z_ra : Array, shape(Z.shape) + Norm of magnetic field derivative with respect to field-line following label, + interpolated to Z. + inner_product : Array + Output of ``_interpolatory_quadrature``. + + """ + is_not_quad_point = jnp.isnan(Z) + # We want quantities to evaluate as finite only at quadrature points + # for the integrals with boundaries at valid bounce points. + msg = "Interpolation failed." + assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B_z_ra)), msg + for ff in f: + assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg + + msg = "|B| has vanished, violating the hairy ball theorem." + assert not jnp.isclose(B, 0).any(), msg + assert not jnp.isclose(B_sup_z, 0).any(), msg + + quad_resolution = Z.shape[-1] + # Number of integrals that we should be computing. + goal = jnp.sum(1 - is_not_quad_point) // quad_resolution + # Number of integrals that were actually computed. + actual = jnp.isfinite(inner_product).sum() + assert goal == actual, ( + f"Lost {goal - actual} integrals " + "from floating point or spline approximation error." + ) + + +def _plot(Z, V, name=""): + """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" + for p in range(Z.shape[0]): + for s in range(Z.shape[1]): + is_quad_point_set = jnp.nonzero(~jnp.any(jnp.isnan(Z[p, s]), axis=-1))[0] + if not is_quad_point_set.size: + continue + fig, ax = plt.subplots() + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel(name) + ax.set_title( + f"Interpolation of {name} to quadrature points. Index {p},{s}." + ) + for i in is_quad_point_set: + ax.plot(Z[p, s, i], V[p, s, i], marker="o") + fig.text( + 0.01, + 0.01, + f"Each color specifies the set of points and values (ζ, {name}(ζ)) " + "used to evaluate an integral.", + ) + plt.tight_layout() + plt.show() + + def _bounce_quadrature( bp1, bp2, @@ -972,6 +1022,7 @@ def _bounce_quadrature( method="akima", method_B="cubic", check=False, + plot=True, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -1012,7 +1063,19 @@ def group_data_by_field_line_and_pitch(g): Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) # Integrate and complete the change of variable. result = _interpolatory_quadrature( - Z, w, integrand, f, B_sup_z, B, B_z_ra, pitch, knots, method, method_B, check + Z, + w, + integrand, + f, + B_sup_z, + B, + B_z_ra, + pitch, + knots, + method, + method_B, + check, + plot, ) * grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result @@ -1054,6 +1117,14 @@ def bounce_integral( We choose the sign that corresponds the particle's guiding center trajectory traveling in the direction of increasing field-line-following label. + Notes + ----- + This function requires that the quantities `B_sup_z`, `B`, `B_z_ra`, + and the quantities in ``f`` passed to the returned method + can be separated into field lines via ``.reshape(S, knots.size)``. + One way to satisfy this is to pass in quantities computed on the grid + returned from the method ``desc_grid_from_field_line_coords``. + Parameters ---------- B_sup_z : Array, shape(S, knots.size, ) @@ -1074,18 +1145,22 @@ def bounce_integral( knots : Array, shape(knots.size, ) Field line following coordinate values at which ``B_sup_z``, ``B``, and ``B_z_ra`` were evaluated. - These knots are used to compute a spline of the integrand. - The number of knots specifies a grid resolution as increasing the - number of knots increases the accuracy of representing the integrand - and the accuracy of the locations of the bounce points. + These knots are used to compute a spline of |B| and interpolate + the integrand. The number of knots specifies a grid resolution + as increasing the number of knots increases the accuracy of + representing the integrand and the accuracy of the locations of + the bounce points. The default spline method for |B| is a cubic + Hermite spline. This is preferred because the strength of the + singularity typical in bounce integral is ~ 1 / |∂|B|/∂_ζ|, so + the derivative information should be captured without compromise. + Can also specify to use a monotonic interpolation for |B| rather + than a cubic Hermite spline with keyword argument ``monotonic=True``. quad : callable The quadrature scheme used to evaluate the integral. The returned quadrature points xₖ and weights wₖ should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). - For the default choice of the automorphism below, - Tanh-Sinh quadrature works well if the integrand is singular. - Otherwise, Gauss-Legendre quadrature with the sin automorphism - can be more competitive. + Gauss-Legendre quadrature (``orthax.legendre.leggauss``) + with ``automorphism_sin`` can be competitive against the default choice. automorphism : (callable, callable) The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. @@ -1101,11 +1176,9 @@ def bounce_integral( check : bool Flag for debugging. plot : bool - Whether to plot even if error was not detected during the check. + Whether to plot some things if check is true. kwargs Can specify additional arguments to the ``quad`` method with kwargs. - Can also specify to use a monotonic interpolation for |B| rather - than a cubic Hermite spline with ``monotonic=True``. Returns ------- @@ -1278,6 +1351,7 @@ def bounce_integrate(integrand, f, pitch, method="akima"): method, method_B="monotonic" if monotonic else "cubic", check=check, + plot=plot, ) assert result.shape[-1] == (knots.size - 1) * degree return result diff --git a/desc/grid.py b/desc/grid.py index 3620017aee..e917b4505e 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -516,17 +516,13 @@ def create_meshgrid(cls, a, b, c): aa, bb, cc = map(jnp.ravel, jnp.meshgrid(a, b, c, indexing="ij")) nodes = jnp.column_stack([aa, bb, cc]) - ds = jnp.array([1 / a.size, 2 * jnp.pi / b.size, 2 * jnp.pi / c.size]) - num_nodes = a.size * b.size * c.size - spacing = jnp.ones(num_nodes)[:, jnp.newaxis] * ds - unique_a_idx = jnp.arange(a.size) * b.size * c.size unique_b_idx = jnp.arange(b.size) * c.size unique_c_idx = jnp.arange(c.size) inverse_a_idx = repeat( unique_a_idx // (b.size * c.size), b.size * c.size, - total_repeat_length=num_nodes, + total_repeat_length=a.size * b.size * c.size, ) inverse_b_idx = jnp.tile( repeat(unique_b_idx // c.size, c.size, total_repeat_length=b.size * c.size), @@ -535,7 +531,6 @@ def create_meshgrid(cls, a, b, c): inverse_c_idx = jnp.tile(unique_c_idx, a.size * b.size) return cls( nodes=nodes, - spacing=spacing, sort=False, jitable=True, _unique_rho_idx=unique_a_idx, diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 8085f5c201..8194c19ae3 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -643,9 +643,14 @@ def test_bounce_averaged_drifts(): knots=zeta, B_ref=B_ref, L_ref=L_ref, + quad=tanh_sinh_quad, + automorphism=(automorphism_arcsin, grad_automorphism_arcsin), + resolution=quad_resolution, + # quad=np.polynomial.legendre.leggauss, # noqa: E800 + # automorphism=(automorphism_sin, grad_automorphism_sin), # noqa: E800 + # deg=quad_resolution, # noqa: E800 check=True, plot=True, - resolution=quad_resolution, monotonic=monotonic, ) @@ -748,7 +753,7 @@ def integrand(cvdrift, gbdrift, B, pitch, Z): method=method, ) bounce_drift = np.squeeze(_filter_not_nan(bounce_drift)) - msg = "There is only one bounce integral per pitch in this example." + msg = "There should be one bounce integral per pitch in this example." assert bounce_drift.size == bounce_drift_analytic.size, msg fig, ax = plt.subplots(2) From 4cca3c1a830783490cacf3b77126f7f0ad9c6894 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 27 Apr 2024 00:32:40 -0400 Subject: [PATCH 131/241] Fix bad merge --- requirements_conda.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements_conda.yml b/requirements_conda.yml index aac42d3489..5b26b11625 100644 --- a/requirements_conda.yml +++ b/requirements_conda.yml @@ -15,7 +15,6 @@ dependencies: - interpax >= 0.3.1 # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. - - interpax - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - plotly >= 5.16, < 6.0 From f303a6e6b626b5d2eded792d26dd7b54655307cc Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 27 Apr 2024 02:34:17 -0400 Subject: [PATCH 132/241] Add back override_grid=False. --- desc/compute/bounce_integral.py | 9 ++++++--- tests/test_bounce_integral.py | 10 +++++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 1d505c8774..7a5d26c0e1 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1217,7 +1217,11 @@ def bounce_integral( alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) knots = np.linspace(-3 * np.pi, 3 * np.pi, 40) grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) + data = eq.compute( + ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], + grid=grid_desc, + override_grid=False, # Need to have this. + ) bounce_integrate, spline = bounce_integral( data["B^zeta"], data["|B|"], @@ -1236,9 +1240,8 @@ def integrand_den(B, pitch, Z): # Integrand in integral in denominator of bounce average. return safediv(1, jnp.sqrt(1 - pitch * B)) - g_zz = eq.compute("g_zz", grid=grid_desc, data=data)["g_zz"] pitch = pitch_of_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) - num = bounce_integrate(integrand_num, g_zz, pitch) + num = bounce_integrate(integrand_num, data["g_zz"], pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den assert np.isfinite(average).any() diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 8194c19ae3..421b541a8e 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -489,7 +489,11 @@ def test_example_bounce_integral(): alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) knots = np.linspace(-3 * np.pi, 3 * np.pi, 40) grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a"], grid=grid_desc) + data = eq.compute( + ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], + grid=grid_desc, + override_grid=False, # Need to have this. + ) bounce_integrate, spline = bounce_integral( data["B^zeta"], data["|B|"], @@ -508,9 +512,8 @@ def integrand_den(B, pitch, Z): """Integrand in integral in denominator of bounce average.""" return safediv(1, _sqrt(1 - pitch * B)) - g_zz = eq.compute("g_zz", grid=grid_desc, data=data)["g_zz"] pitch = pitch_of_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) - num = bounce_integrate(integrand_num, g_zz, pitch) + num = bounce_integrate(integrand_num, data["g_zz"], pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den assert np.isfinite(average).any() @@ -620,6 +623,7 @@ def test_bounce_averaged_drifts(): "grad(psi)", ], grid=grid_desc, + override_grid=False, # Need to have this. ) # normalization From 4a457c003fdddb881a39997f84f0aa3a3f03b8ba Mon Sep 17 00:00:00 2001 From: Rahul Date: Sat, 27 Apr 2024 18:11:18 -0400 Subject: [PATCH 133/241] corrected errors in the analytical integrals, analytical and numerical drifts have the same trend now --- tests/test_bounce_integral.py | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 421b541a8e..dec082cf53 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -654,7 +654,7 @@ def test_bounce_averaged_drifts(): # automorphism=(automorphism_sin, grad_automorphism_sin), # noqa: E800 # deg=quad_resolution, # noqa: E800 check=True, - plot=True, + plot=False, monotonic=monotonic, ) @@ -718,24 +718,17 @@ def test_bounce_averaged_drifts(): # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. I_0 = test_integral_0(k, quad_resolution) I_1 = test_integral_1(k, quad_resolution) - I_2 = 16 * k * I_0 - I_3 = 4 / 9 * (8 * k * (-1 + 2 * k2) * I_1 - 4 * k * (-1 + k2) * I_0) - I_4 = ( - 2 - * np.sqrt(2) - / 3 - * (4 * np.sqrt(2) * k * (-1 + 2 * k2) * I_0 - 2 * (-1 + k2) * I_1) - ) - I_5 = ( - 2 - / 30 - * (32 * k * (1 - k2 + k2**2) * I_0 - 16 * k * (1 - 3 * k2 + 2 * k2**2) * I_1) - ) - I_6 = 2 / 3 * (k * (-2 + 4 * k2) * I_0 - 4 * (-1 + k2) * I_1) - I_7 = 4 / k * (2 * k2 * I_0 + (1 - 2 * k2) * I_1) + K = k / 4 * I_0 + E = I_1 / (4 * k) + I_2 = 16 * k * E + I_3 = 16 * k / 9 * (2 * (-1 + 2 * k2) * E - (-1 + k2) * K) + I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - (-1 + k2) * K) + I_5 = 32 * k / 30 * (2 * (1 - k2 + k2**2) * E - (1 - 3 * k2 + 2 * k2**2) * K) + I_6 = 4 / k * (2 * k2 * E + (1 - 2 * k2) * K) + I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) bounce_drift_analytic = ( - fudge_factor_cvdrift * dPdrho / B0**2 * I_1 + -fudge_factor_cvdrift * dPdrho / B0**2 * I_1 - 0.5 * fudge_factor_gbdrift * ( From d6deb24ceb080a0fde32691a45d53f8ef7e07b78 Mon Sep 17 00:00:00 2001 From: Rahul Date: Sat, 27 Apr 2024 18:24:23 -0400 Subject: [PATCH 134/241] correcting k2; it's incorrect in Hegna's paper --- tests/test_bounce_integral.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index dec082cf53..abf7f1ec66 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -712,8 +712,9 @@ def test_bounce_averaged_drifts(): ) # Changed from RG appendix equation A10 to match equation 19 here # https://cptc.wisc.edu/wp-content/uploads/sites/327/2017/09/UW-CPTC_15-4.pdf. - k2 = 0.5 * ((1 - pitch * B0) / epsilon + 1) + k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) k = _sqrt(k2) + print(k2) # Here are the notes that explain these integrals. # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. I_0 = test_integral_0(k, quad_resolution) From 87f5c2dddc0792abeac1832821c105a14372ad28 Mon Sep 17 00:00:00 2001 From: Rahul Date: Sat, 27 Apr 2024 19:14:37 -0400 Subject: [PATCH 135/241] replacing dPdrho with alpha_MHD terms; will take a look again later. --- tests/test_bounce_integral.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index abf7f1ec66..46cee57764 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -714,7 +714,6 @@ def test_bounce_averaged_drifts(): # https://cptc.wisc.edu/wp-content/uploads/sites/327/2017/09/UW-CPTC_15-4.pdf. k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) k = _sqrt(k2) - print(k2) # Here are the notes that explain these integrals. # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. I_0 = test_integral_0(k, quad_resolution) @@ -729,11 +728,11 @@ def test_bounce_averaged_drifts(): I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) bounce_drift_analytic = ( - -fudge_factor_cvdrift * dPdrho / B0**2 * I_1 + fudge_factor_cvdrift * alpha_MHD / B0**2 * I_1 - 0.5 * fudge_factor_gbdrift * ( - s_hat * (I_0 + I_1 + I_2 + I_3) + s_hat * (I_0 + I_1 - I_2 - I_3) + alpha_MHD / B0**4 * (I_4 + I_5) - (I_6 + I_7) ) From 5431e0c94feadbd7e19247a901600e8a71d3a51f Mon Sep 17 00:00:00 2001 From: Rahul Date: Sat, 27 Apr 2024 19:41:02 -0400 Subject: [PATCH 136/241] adding b dot grad theta in the analytical denominator --- tests/test_bounce_integral.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 46cee57764..4eb7328f0d 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -675,8 +675,9 @@ def test_bounce_averaged_drifts(): shear = grid_flux.compress(data_flux["iota_r"]).item() s_hat = -x / iota * shear / L_ref gradpar = L_ref * data["B^zeta"] / data["|B|"] - gradpar_analytic = 2 * L_ref * iota * taylor - np.testing.assert_allclose(gradpar, gradpar_analytic, atol=1e-2) + gradpar_analytic = L_ref * taylor + G0 = np.mean(gradpar_analytic) + np.testing.assert_allclose(gradpar, gradpar_analytic, atol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric cvdrift = -2 * np.sign(psi) * B_ref * L_ref**2 * rho * data["cvdrift"] @@ -736,7 +737,7 @@ def test_bounce_averaged_drifts(): + alpha_MHD / B0**4 * (I_4 + I_5) - (I_6 + I_7) ) - ) + ) / G0 def integrand(cvdrift, gbdrift, B, pitch, Z): g = _sqrt(1 - pitch * B) From 767e74a663527626090e9772d6b4216ca9f3b483 Mon Sep 17 00:00:00 2001 From: Rahul Date: Sat, 27 Apr 2024 20:02:09 -0400 Subject: [PATCH 137/241] adding gradpar_theta_analytic = b dot grad theta_PEST for the correct denominator --- tests/test_bounce_integral.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 4eb7328f0d..7bd7d10f51 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -676,7 +676,8 @@ def test_bounce_averaged_drifts(): s_hat = -x / iota * shear / L_ref gradpar = L_ref * data["B^zeta"] / data["|B|"] gradpar_analytic = L_ref * taylor - G0 = np.mean(gradpar_analytic) + gradpar_theta_analytic = iota * gradpar_analytic + G0 = np.mean(gradpar_theta_analytic) np.testing.assert_allclose(gradpar, gradpar_analytic, atol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric From a449715bbc3f07bf1a5f4d747673688d9f6d6373 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 28 Apr 2024 02:43:43 -0400 Subject: [PATCH 138/241] Fix the fourth integral in the bounce average test and add numerical checks --- desc/compute/bounce_integral.py | 6 +- tests/test_bounce_integral.py | 103 +++++++++++++++++++++----------- 2 files changed, 70 insertions(+), 39 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 7a5d26c0e1..ef58503c96 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -925,9 +925,9 @@ def _interpolatory_quadrature( ) if check: _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product) - if plot: - _plot(Z, B, name=r"$\vert B \vert$") - _plot(Z, V, name="integrand") + # if plot: # noqa: E800 + # _plot(Z, B, name=r"$\vert B \vert$") # noqa: E800 + # _plot(Z, V, name="integrand") # noqa: E800 return inner_product diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 7bd7d10f51..f29f4ab831 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -535,48 +535,75 @@ def integrand_den(B, pitch, Z): print(np.nansum(average, axis=-1)) -@pytest.mark.unit -def test_integral_0(k=0.9, resolution=10): - """4 / k * ellipkinc(np.arcsin(k), 1 / k**2).""" +@partial(np.vectorize, excluded={0}) +def _adaptive_elliptic(integrand, k): + # Do quadrature since scipy's elliptic integral functions are broken. k = np.atleast_1d(k) - bp1 = np.zeros_like(k) - bp2 = np.arcsin(k) - x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) - Z = affine_bijection_reverse( - automorphism_arcsin(x), bp1[..., np.newaxis], bp2[..., np.newaxis] - ) - k = k[..., np.newaxis] - - def integrand(Z, k): - return safediv(4 / k, np.sqrt(1 - 1 / k**2 * np.sin(Z) ** 2)) - - quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(bp1, bp2) - if k.size == 1: - q = integrate.quad(integrand, bp1.item(), bp2.item(), args=(k.item(),))[0] - np.testing.assert_allclose(quad, q, rtol=1e-5) + a = np.zeros_like(k) + b = 2 * np.arcsin(k) + quad = integrate.quad(integrand, a.item(), b.item(), args=(k.item(),))[0] return quad -@pytest.mark.unit -def test_integral_1(k=0.9, resolution=10): - """4 * k * ellipeinc(np.arcsin(k), 1 / k**2).""" +def _fixed_elliptic(integrand, k, resolution): k = np.atleast_1d(k) - bp1 = np.zeros_like(k) - bp2 = np.arcsin(k) + a = np.zeros_like(k) + b = 2 * np.arcsin(k) x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) Z = affine_bijection_reverse( - automorphism_arcsin(x), bp1[..., np.newaxis], bp2[..., np.newaxis] + automorphism_arcsin(x), a[..., np.newaxis], b[..., np.newaxis] ) k = k[..., np.newaxis] + quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(a, b) + return quad - def integrand(Z, k): - return 4 * k * np.sqrt(1 - 1 / k**2 * np.sin(Z) ** 2) - quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(bp1, bp2) - if k.size == 1: - q = integrate.quad(integrand, bp1.item(), bp2.item(), args=(k.item(),))[0] - np.testing.assert_allclose(quad, q, rtol=1e-4) - return quad +def _check_integrals(k, I_2, I_3, I_4, I_5, I_6, I_7): + # Check for math mistakes. + np.testing.assert_allclose( + I_2, + 2 + * _adaptive_elliptic( + lambda Z, k: 1 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k + ), + ) + np.testing.assert_allclose( + I_3, + 2 + * _adaptive_elliptic( + lambda Z, k: np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k + ), + ) + np.testing.assert_allclose( + I_4, + 2 + * _adaptive_elliptic( + lambda Z, k: 1 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k + ), + ) + np.testing.assert_allclose( + I_5, + 2 + * _adaptive_elliptic( + lambda Z, k: np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k + ), + ) + np.testing.assert_allclose( + I_6, + # scipy fails + 2 + * _fixed_elliptic( + lambda Z, k: np.cos(Z) / np.sqrt(k**2 - np.sin(Z / 2) ** 2), k, resolution=9 + ), + rtol=1e-2, + ) + np.testing.assert_allclose( + I_7, + 2 + * _adaptive_elliptic( + lambda Z, k: np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k + ), + ) @pytest.mark.unit @@ -712,22 +739,26 @@ def test_bounce_averaged_drifts(): 1 / np.min(B_normalized) - delta_shift, pitch_resolution, ) - # Changed from RG appendix equation A10 to match equation 19 here - # https://cptc.wisc.edu/wp-content/uploads/sites/327/2017/09/UW-CPTC_15-4.pdf. k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) k = _sqrt(k2) # Here are the notes that explain these integrals. # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. - I_0 = test_integral_0(k, quad_resolution) - I_1 = test_integral_1(k, quad_resolution) + ellipk = lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) + I_0 = _adaptive_elliptic(ellipk, k) + ellipe = lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) + I_1 = _adaptive_elliptic(ellipe, k) + # Make sure scipy's adaptive quadrature is not broken. + np.testing.assert_allclose(I_0, _fixed_elliptic(ellipk, k, 9), rtol=1e-3) + np.testing.assert_allclose(I_1, _fixed_elliptic(ellipe, k, 9), rtol=1e-3) K = k / 4 * I_0 E = I_1 / (4 * k) I_2 = 16 * k * E I_3 = 16 * k / 9 * (2 * (-1 + 2 * k2) * E - (-1 + k2) * K) - I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - (-1 + k2) * K) + I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - 2 * (-1 + k2) * K) I_5 = 32 * k / 30 * (2 * (1 - k2 + k2**2) * E - (1 - 3 * k2 + 2 * k2**2) * K) I_6 = 4 / k * (2 * k2 * E + (1 - 2 * k2) * K) I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) + _check_integrals(k, I_2, I_3, I_4, I_5, I_6, I_7) bounce_drift_analytic = ( fudge_factor_cvdrift * alpha_MHD / B0**2 * I_1 From 3a4a384704d962f6416fd9b419085856e22efe79 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 28 Apr 2024 12:30:10 -0400 Subject: [PATCH 139/241] Make changes to get_extrema to make computing eps_eff easier --- desc/compute/bounce_integral.py | 38 +++++------ tests/test_bounce_integral.py | 110 +++++++++++++++----------------- 2 files changed, 70 insertions(+), 78 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ef58503c96..60a537da99 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -345,11 +345,8 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): - """Return pitch values that will capture fat banana orbits. - - Particles with λ = 1 / |B|(ζ*) where |B|(ζ*) are local maxima - have fat banana orbits increasing neoclassical transport. +def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): + """Return |B| values at extrema. Parameters ---------- @@ -370,6 +367,8 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): relative_shift : float Relative amount to shift maxima down and minima up to avoid floating point errors in downstream routines. + sort : bool + Whether to sort output. Returns ------- @@ -379,8 +378,7 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): ``knots.size - 1``, and the number of field lines is denoted by ``S``. If there were less than ``N * (degree - 1)`` extrema detected along a - field line, then the first axis, which enumerates the pitch values for - a particular field line, is padded with nan. + field line, then the first axis is padded with nan. """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) @@ -393,17 +391,19 @@ def pitch_of_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) # Floating point error impedes consistent detection of bounce points riding # extrema. Shift pitch values slightly to resolve this issue. - B_extrema = jnp.where( - # Higher priority to shift down maxima than shift up minima, so identify - # near equality with zero as maxima. - B_zz_ra_extrema <= 0, - (1 - relative_shift) * B_extrema, - (1 + relative_shift) * B_extrema, - ).reshape(S, -1) - # Reshape so that last axis enumerates extrema along a field line. - pitch = 1 / B_extrema.T - assert pitch.shape == (N * (degree - 1), S) - return pitch + B_extrema = ( + jnp.where( + # Higher priority to shift down maxima than shift up minima, so identify + # near equality with zero as maxima. + B_zz_ra_extrema <= 0, + (1 - relative_shift) * B_extrema, + (1 + relative_shift) * B_extrema, + ) + .reshape(S, -1) + .T + ) + assert B_extrema.shape == (N * (degree - 1), S) + return jnp.sort(B_extrema, axis=0) if sort else B_extrema def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): @@ -1240,7 +1240,7 @@ def integrand_den(B, pitch, Z): # Integrand in integral in denominator of bounce average. return safediv(1, jnp.sqrt(1 - pitch * B)) - pitch = pitch_of_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) + pitch = 1 / get_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) num = bounce_integrate(integrand_num, data["g_zz"], pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index f29f4ab831..7bf9c15454 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -24,10 +24,10 @@ bounce_points, composite_linspace, desc_grid_from_field_line_coords, + get_extrema, grad_affine_bijection_reverse, grad_automorphism_arcsin, grad_automorphism_sin, - pitch_of_extrema, plot_field_line_with_ripple, take_mask, tanh_sinh_quad, @@ -209,7 +209,7 @@ def test(x, c): @pytest.mark.unit -def test_pitch_of_extrema(): +def test_get_extrema(): """Test that these pitch intersect extrema of |B|.""" start = -np.pi end = -2 * start @@ -218,13 +218,13 @@ def test_pitch_of_extrema(): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) B_z_ra = B.derivative() - pitch_scipy = np.sort(1 / B(B_z_ra.roots(extrapolate=False))) + extrema_scipy = np.sort(B(B_z_ra.roots(extrapolate=False))) rtol = 1e-7 - pitch = pitch_of_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) + extrema = get_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) eps = 100 * np.finfo(float).eps - pitch = np.sort(_filter_not_nan(pitch)) - assert pitch.size == pitch_scipy.size - np.testing.assert_allclose(pitch, pitch_scipy, rtol=rtol + eps) + extrema = _filter_not_nan(extrema) + assert extrema.size == extrema_scipy.size + np.testing.assert_allclose(extrema, extrema_scipy, rtol=rtol + eps) @pytest.mark.unit @@ -232,15 +232,12 @@ def test_composite_linspace(): """Test this utility function useful for Newton-Cotes integration over pitch.""" B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) - pitch = np.linspace(1 / B_min_tz, 1 / B_max_tz, num=5) - breaks = 1 / pitch - breaks = np.sort(breaks, axis=0) + breaks = np.linspace(B_min_tz, B_max_tz, num=5) b = composite_linspace(breaks, resolution=3) print(breaks) print(b) - np.testing.assert_allclose(b, np.sort(b, axis=0), atol=0, rtol=0) - for i in range(pitch.shape[0]): - for j in range(pitch.shape[1]): + for i in range(breaks.shape[0]): + for j in range(breaks.shape[1]): assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) @@ -512,7 +509,7 @@ def integrand_den(B, pitch, Z): """Integrand in integral in denominator of bounce average.""" return safediv(1, _sqrt(1 - pitch * B)) - pitch = pitch_of_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) + pitch = 1 / get_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) num = bounce_integrate(integrand_num, data["g_zz"], pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den @@ -537,12 +534,7 @@ def integrand_den(B, pitch, Z): @partial(np.vectorize, excluded={0}) def _adaptive_elliptic(integrand, k): - # Do quadrature since scipy's elliptic integral functions are broken. - k = np.atleast_1d(k) - a = np.zeros_like(k) - b = 2 * np.arcsin(k) - quad = integrate.quad(integrand, a.item(), b.item(), args=(k.item(),))[0] - return quad + return integrate.quad(integrand, 0, 2 * np.arcsin(k), args=(k,))[0] def _fixed_elliptic(integrand, k, resolution): @@ -558,52 +550,70 @@ def _fixed_elliptic(integrand, k, resolution): return quad -def _check_integrals(k, I_2, I_3, I_4, I_5, I_6, I_7): +def _elliptic_incomplete(k2): + K_integrand = lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * (k / 4) + E_integrand = lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) / (k * 4) + # Scipy's elliptic integrals are broken. + # https://github.com/scipy/scipy/issues/20525. + k = np.sqrt(k2) + K = _adaptive_elliptic(K_integrand, k) + E = _adaptive_elliptic(E_integrand, k) + # Make sure scipy's adaptive quadrature is not broken. + np.testing.assert_allclose(K, _fixed_elliptic(K_integrand, k, 9), rtol=1e-3) + np.testing.assert_allclose(E, _fixed_elliptic(E_integrand, k, 9), rtol=1e-3) + + # Here are the notes that explain these integrals. + # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. + I_0 = 4 / k * K + I_1 = 4 * k * E + I_2 = 16 * k * E + I_3 = 16 * k / 9 * (2 * (-1 + 2 * k2) * E - (-1 + k2) * K) + I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - 2 * (-1 + k2) * K) + I_5 = 32 * k / 30 * (2 * (1 - k2 + k2**2) * E - (1 - 3 * k2 + 2 * k2**2) * K) + I_6 = 4 / k * (2 * k2 * E + (1 - 2 * k2) * K) + I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) # Check for math mistakes. np.testing.assert_allclose( I_2, - 2 - * _adaptive_elliptic( - lambda Z, k: 1 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k + _adaptive_elliptic( + lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k ), ) np.testing.assert_allclose( I_3, - 2 - * _adaptive_elliptic( - lambda Z, k: np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k + _adaptive_elliptic( + lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k ), ) np.testing.assert_allclose( I_4, - 2 - * _adaptive_elliptic( - lambda Z, k: 1 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k + _adaptive_elliptic( + lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k ), ) np.testing.assert_allclose( I_5, - 2 - * _adaptive_elliptic( - lambda Z, k: np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k + _adaptive_elliptic( + lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k ), ) + # scipy fails np.testing.assert_allclose( I_6, - # scipy fails - 2 - * _fixed_elliptic( - lambda Z, k: np.cos(Z) / np.sqrt(k**2 - np.sin(Z / 2) ** 2), k, resolution=9 + _fixed_elliptic( + lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), + k, + resolution=9, ), rtol=1e-2, ) np.testing.assert_allclose( I_7, - 2 - * _adaptive_elliptic( - lambda Z, k: np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k + _adaptive_elliptic( + lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k ), ) + return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 @pytest.mark.unit @@ -740,25 +750,7 @@ def test_bounce_averaged_drifts(): pitch_resolution, ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) - k = _sqrt(k2) - # Here are the notes that explain these integrals. - # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. - ellipk = lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) - I_0 = _adaptive_elliptic(ellipk, k) - ellipe = lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) - I_1 = _adaptive_elliptic(ellipe, k) - # Make sure scipy's adaptive quadrature is not broken. - np.testing.assert_allclose(I_0, _fixed_elliptic(ellipk, k, 9), rtol=1e-3) - np.testing.assert_allclose(I_1, _fixed_elliptic(ellipe, k, 9), rtol=1e-3) - K = k / 4 * I_0 - E = I_1 / (4 * k) - I_2 = 16 * k * E - I_3 = 16 * k / 9 * (2 * (-1 + 2 * k2) * E - (-1 + k2) * K) - I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - 2 * (-1 + k2) * K) - I_5 = 32 * k / 30 * (2 * (1 - k2 + k2**2) * E - (1 - 3 * k2 + 2 * k2**2) * K) - I_6 = 4 / k * (2 * k2 * E + (1 - 2 * k2) * K) - I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) - _check_integrals(k, I_2, I_3, I_4, I_5, I_6, I_7) + I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) bounce_drift_analytic = ( fudge_factor_cvdrift * alpha_MHD / B0**2 * I_1 From 03754abce63a03c7259b48fa3c93dcb869d8342a Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 28 Apr 2024 16:37:04 -0400 Subject: [PATCH 140/241] Make sure test_bounce_average_drifts computes things on correct grid --- desc/compute/bounce_integral.py | 7 +- tests/test_bounce_integral.py | 128 +++++++++++++++++++++++--------- 2 files changed, 96 insertions(+), 39 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 60a537da99..77141625b2 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -372,11 +372,10 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): Returns ------- - pitch : Array, shape(N * (degree - 1), S) + B_extrema : Array, shape(N * (degree - 1), S) For the shaping notation, the ``degree`` of the spline of |B| matches ``B_c.shape[0] - 1``, the number of polynomials per spline ``N`` matches ``knots.size - 1``, and the number of field lines is denoted by ``S``. - If there were less than ``N * (degree - 1)`` extrema detected along a field line, then the first axis is padded with nan. @@ -1240,7 +1239,7 @@ def integrand_den(B, pitch, Z): # Integrand in integral in denominator of bounce average. return safediv(1, jnp.sqrt(1 - pitch * B)) - pitch = 1 / get_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) + pitch = 1 / get_extrema(**spline) num = bounce_integrate(integrand_num, data["g_zz"], pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den @@ -1287,7 +1286,7 @@ def group_data_by_field_line(g): assert B_c.shape[0] == degree + 1 assert B_z_ra_c.shape[0] == degree assert B_c.shape[-1] == B_z_ra_c.shape[-1] == knots.size - 1 - spline = {"knots": knots, "B.c": B_c, "B_z_ra.c": B_z_ra_c} + spline = {"knots": knots, "B_c": B_c, "B_z_ra_c": B_z_ra_c} if quad == tanh_sinh_quad: kwargs.setdefault("resolution", 19) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 7bf9c15454..90baa4175c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -11,6 +11,7 @@ from scipy.special import ellipkm1 from desc.backend import complex_sqrt, flatnonzero +from desc.compute import data_index from desc.compute.bounce_integral import ( _affine_bijection_forward, _filter_not_nan, @@ -32,11 +33,11 @@ take_mask, tanh_sinh_quad, ) -from desc.compute.utils import dot, safediv +from desc.compute.utils import dot, get_data_deps, safediv from desc.equilibrium import Equilibrium from desc.examples import get from desc.grid import Grid, LinearGrid -from desc.utils import only1 +from desc.utils import errorif, only1 def _sqrt(x): @@ -509,7 +510,7 @@ def integrand_den(B, pitch, Z): """Integrand in integral in denominator of bounce average.""" return safediv(1, _sqrt(1 - pitch * B)) - pitch = 1 / get_extrema(knots, spline["B.c"], spline["B_z_ra.c"]) + pitch = 1 / get_extrema(**spline) num = bounce_integrate(integrand_num, data["g_zz"], pitch) den = bounce_integrate(integrand_den, [], pitch) average = num / den @@ -616,6 +617,82 @@ def _elliptic_incomplete(k2): return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 +def _compute_field_line_data( + eq, rho, alpha, field_line_names, other_0d_or_1dr_names=None +): + """Compute field line quantities on correct grids. + + Parameters + ---------- + eq : Equilibrium + Equilibrium to compute on. + rho : Array + Field line radial label. + alpha : Array + Field line poloidal label. + field_line_names : list + Field line quantities that will be computed on the returned field line grid. + other_0d_or_1dr_names : list, optional + Other quantities to compute that are constant throughout volume or over + flux surface. + + Returns + ------- + data : dict + Computed quantities. + grid_desc : Grid + Grid on which the returned quantities can be broadcast on. + grid_fl : Grid + Clebsch-Type field-line coordinates corresponding to above grid. + zeta : Array + Zeta values along field line. + + """ + errorif(alpha != 0, NotImplementedError) + if other_0d_or_1dr_names is None: + other_0d_or_1dr_names = [] + other_0d_or_1dr_names.append("iota") + p = "desc.equilibrium.equilibrium.Equilibrium" + # Gather dependencies of given quantities. + deps = ( + get_data_deps(field_line_names + other_0d_or_1dr_names, obj=p, has_axis=False) + + other_0d_or_1dr_names + ) + deps = list(set(deps)) + # Create grid with given flux surfaces. + grid1dr = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + # Compute dependencies on correct grids. + seed_data = eq.compute(deps, grid=grid1dr) + dep1dr = {dep for dep in deps if data_index[p][dep]["coordinates"] == "r"} + dep0d = {dep for dep in deps if data_index[p][dep]["coordinates"] == ""} + + # Make a set of nodes along a single fieldline. + iota = grid1dr.compress(seed_data["iota"]).item() + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + # Make grid that can separate into field lines via a reshape operation, + # as expected by bounce_integral(). + grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) + + # Collect quantities that can be used as a seed to compute the + # field line quantities over the grid mapped from field line coordinates. + # (Single field line grid won't have enough poloidal resolution to + # compute these quantities accurately). + data0d = {key: val for key, val in seed_data.items() if key in dep0d} + data1d = { + key: grid_desc.copy_data_from_other(val, grid1dr) + for key, val in seed_data.items() + if key in dep1dr + } + data = {} + data.update(data0d) + data.update(data1d) + # Compute field line quantities with precomputed dependencies. + data = eq.compute( + names=field_line_names, grid=grid_desc, data=data, override_grid=False + ) + return data, grid_desc, grid_fl, zeta + + @pytest.mark.unit def test_bounce_averaged_drifts(): """Test bounce-averaged drift with analytical expressions. @@ -630,48 +707,30 @@ def test_bounce_averaged_drifts(): psi = 0.25 * psi_boundary rho = np.sqrt(psi / psi_boundary) assert np.isclose(rho, 0.5) - - # Compute flux surface quantities on a grid that we know has - # resolution and node placement for correct flux surface quantities. - grid_flux = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - data_flux = eq.compute( - names=["iota", "iota_r", "a", "psi"], - grid=grid_flux, - ) - assert np.isclose(grid_flux.compress(data_flux["psi"]).item(), psi) - alpha = 0 - iota = grid_flux.compress(data_flux["iota"]).item() - zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - # Compute quantities on grid that can separate into field lines via - # a simple np.reshape operation, as expected by bounce_integral(). - grid_desc, _ = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) - data = eq.compute( - names=[ + data, grid, grid_fl, zeta = _compute_field_line_data( + eq, + rho, + alpha, + field_line_names=[ "B^zeta", "|B|", "|B|_z|r,a", "cvdrift", "gbdrift", - "cvdrift0", - "B", "grad(alpha)", - "|grad(psi)|^2", "grad(psi)", ], - grid=grid_desc, - override_grid=False, # Need to have this. + other_0d_or_1dr_names=["iota_r", "a", "psi"], ) + assert np.allclose(data["psi"], psi) # normalization - L_ref = data_flux["a"] + L_ref = data["a"] # FIXME: - # When we (incorrectly) use psi, numerical and analytic match up to a sign - # error, but the analytic plot doesn't reproduce results that match paper, - # which makes sense. - # When we use the proper psi at the lcfs, (i.e. psi_boundary) the numerical - # no longer matches analytic. The analytic plot looks as expected though. - # So we just need to make sure we are using the correct psi in the numerical + # When we use psi, numerical and analytic match better. + # When we use the psi at the lcfs, plots match worse. + # Need to make sure we are using the correct psi in the numerical # computations in this test and elsewhere in DESC. B_ref = 2 * np.abs(psi_boundary) / L_ref**2 @@ -695,12 +754,12 @@ def test_bounce_averaged_drifts(): monotonic=monotonic, ) - # FIXME: Do these have the correct normalization in the radial coordinate? epsilon = L_ref * rho # I wouldn't really consider 0.05 << 1... maybe for a rough approximation. assert np.isclose(epsilon, 0.05) x = L_ref * rho + iota = grid.compress(data["iota"]).item() theta_PEST = alpha + iota * zeta B_normalized = data["|B|"] / B_ref B0 = np.mean(B_normalized) @@ -709,7 +768,7 @@ def test_bounce_averaged_drifts(): B_normalized_analytic = B0 * taylor np.testing.assert_allclose(B_normalized, B_normalized_analytic, atol=3e-3) - shear = grid_flux.compress(data_flux["iota_r"]).item() + shear = grid.compress(data["iota_r"]).item() s_hat = -x / iota * shear / L_ref gradpar = L_ref * data["B^zeta"] / data["|B|"] gradpar_analytic = L_ref * taylor @@ -751,7 +810,6 @@ def test_bounce_averaged_drifts(): ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) - bounce_drift_analytic = ( fudge_factor_cvdrift * alpha_MHD / B0**2 * I_1 - 0.5 From 8b1d066cbf2b947629a52bcd9d733dca9596b288 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 28 Apr 2024 16:57:40 -0400 Subject: [PATCH 141/241] Make sure _compute_field_line recomputes field line quantities --- tests/test_bounce_integral.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 90baa4175c..d0ff29a4b4 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -617,9 +617,7 @@ def _elliptic_incomplete(k2): return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 -def _compute_field_line_data( - eq, rho, alpha, field_line_names, other_0d_or_1dr_names=None -): +def _compute_field_line_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): """Compute field line quantities on correct grids. Parameters @@ -630,9 +628,10 @@ def _compute_field_line_data( Field line radial label. alpha : Array Field line poloidal label. - field_line_names : list + names_field_line : list Field line quantities that will be computed on the returned field line grid. - other_0d_or_1dr_names : list, optional + Should not include 0d or 1dr quantities. + names_0d_or_1dr : list Other quantities to compute that are constant throughout volume or over flux surface. @@ -649,14 +648,14 @@ def _compute_field_line_data( """ errorif(alpha != 0, NotImplementedError) - if other_0d_or_1dr_names is None: - other_0d_or_1dr_names = [] - other_0d_or_1dr_names.append("iota") + if names_0d_or_1dr is None: + names_0d_or_1dr = [] + names_0d_or_1dr.append("iota") p = "desc.equilibrium.equilibrium.Equilibrium" # Gather dependencies of given quantities. deps = ( - get_data_deps(field_line_names + other_0d_or_1dr_names, obj=p, has_axis=False) - + other_0d_or_1dr_names + get_data_deps(names_field_line + names_0d_or_1dr, obj=p, has_axis=False) + + names_0d_or_1dr ) deps = list(set(deps)) # Create grid with given flux surfaces. @@ -687,9 +686,13 @@ def _compute_field_line_data( data.update(data0d) data.update(data1d) # Compute field line quantities with precomputed dependencies. + for name in names_field_line: + if name in data: + del data[name] data = eq.compute( - names=field_line_names, grid=grid_desc, data=data, override_grid=False + names=names_field_line, grid=grid_desc, data=data, override_grid=False ) + assert np.allclose(data["iota"], iota) return data, grid_desc, grid_fl, zeta @@ -712,7 +715,7 @@ def test_bounce_averaged_drifts(): eq, rho, alpha, - field_line_names=[ + names_field_line=[ "B^zeta", "|B|", "|B|_z|r,a", @@ -721,7 +724,7 @@ def test_bounce_averaged_drifts(): "grad(alpha)", "grad(psi)", ], - other_0d_or_1dr_names=["iota_r", "a", "psi"], + names_0d_or_1dr=["iota_r", "a", "psi"], ) assert np.allclose(data["psi"], psi) From b508b546d14467d6e68f34baa9679c1f6e94c7dc Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 28 Apr 2024 22:22:31 -0400 Subject: [PATCH 142/241] Working now! --- tests/test_bounce_integral.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index d0ff29a4b4..35ab9b1696 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -813,6 +813,9 @@ def test_bounce_averaged_drifts(): ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) + y = np.sqrt(epsilon * pitch * B0) + I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) + I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) bounce_drift_analytic = ( fudge_factor_cvdrift * alpha_MHD / B0**2 * I_1 - 0.5 From 6dc4a1ba65cd8446dc21c6fba3e4540351f7a31b Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 29 Apr 2024 01:04:34 -0400 Subject: [PATCH 143/241] Clean up some code and add image comparison test for drift --- desc/compute/bounce_integral.py | 13 ++- tests/baseline/test_drift.png | Bin 0 -> 26726 bytes tests/test_bounce_integral.py | 139 +++++++++++++------------------- 3 files changed, 59 insertions(+), 93 deletions(-) create mode 100644 tests/baseline/test_drift.png diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 77141625b2..5a2454a46a 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1219,7 +1219,7 @@ def bounce_integral( data = eq.compute( ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], grid=grid_desc, - override_grid=False, # Need to have this. + override_grid=False, ) bounce_integrate, spline = bounce_integral( data["B^zeta"], @@ -1230,20 +1230,17 @@ def bounce_integral( plot=False, ) - def integrand_num(g_zz, B, pitch, Z): - # Integrand in integral in numerator of bounce average. + def numerator(g_zz, B, pitch, Z): f = (1 - pitch * B) * g_zz return safediv(f, jnp.sqrt(1 - pitch * B)) - def integrand_den(B, pitch, Z): - # Integrand in integral in denominator of bounce average. + def denominator(B, pitch, Z): return safediv(1, jnp.sqrt(1 - pitch * B)) pitch = 1 / get_extrema(**spline) - num = bounce_integrate(integrand_num, data["g_zz"], pitch) - den = bounce_integrate(integrand_den, [], pitch) + num = bounce_integrate(numerator, data["g_zz"], pitch) + den = bounce_integrate(denominator, [], pitch) average = num / den - assert np.isfinite(average).any() # Now we can group the data by field line. average = average.reshape(pitch.shape[0], rho.size, alpha.size, -1) diff --git a/tests/baseline/test_drift.png b/tests/baseline/test_drift.png new file mode 100644 index 0000000000000000000000000000000000000000..6e4ad73e9ca77f1ca32ffc117443921ed8cb9e6f GIT binary patch literal 26726 zcmeEuXH-*d*Ji|y1q-O42q+*$K&b*!6e*z-2%+Zzsi8`hjv}Z?Gc*C|fsjz5KKT>IMB&TDN=coS2S?fb+&f*GIzCtX_&jaI5@jI*jcc9TDiK}IXj66 z-V+qP&2H=N?&2mTB;@!%e?idM)kcVo>!t^|$VnF!LpKB<>fc|k4M{tvwNCGf zF^8I;0PBFg|1>{G2mR;iS-QX9z;gsahb}Dt{}61$|D(IH?CNo7tG~i+y2NwAsCU6L zPSCWEG6~i@!Uu=L!@|N4)4x)XA1xG73A&u;v#bvf8dFzZ)RC_$Do4cg$vdXluJ%;`^lY zHMJSYq~3?ZAWQpXcY^)D4kvWeWc^J{=?a6>qM$Qsw|dJZ0%Um#)kDp%6bjNYdoB$X z##R(+&q9!!LC1h{B-p0*w`53mEXp1-)6<8%EGa5Y{rKV0liJ@QYW${TkkhFK`__ z@-w=(+9=TL;pxPdJf?GZbanBO`UC`buS3sL?0c!W^i&y*xuF#3ESU0n`H{PR{{Cb~ zr3ZW&m+!eDOle{yDT|cthl3v-YM`dbp{U8MU7FZd2MZQ z`ZF#20a13HW*wntYM7YqOxg$USc;Q$ET|#`f|` z`nacPzBkCpQtsgubw`wHDw<^()%cdR%N=kiDmuoQ-yt4_!PxIUKRUr%rU?f>bepyg z#|RkwgLHQXx4$g7#UtFI9l^4Up@cz?u3g=J=#Z0kfHHZdGLTbT{K5KMqF$ELtFzbs zfgY35`N+8uZpLr9-EA3;C73u%ap$wc zkhM7^(v_o8eLZksS*Kt?b065g3%3-zJO_WSj=)!ywh!jqVz$vV9RVpWu#to3q26QVzLN>8s#N>Nz|1-^uY9Ob+7L4pI=Yx$8wN3K6U z7}^dCd;Md%BKI9n%dqq?uZM}V9zXj(p+Bc6J5?7g8Xe3Ef`Ss0q&!M~#EV&eG+6hV z9#8V>Sss`5S%E9m_`ci|%c)05gY~gNrtP`m$x`CiCdR?Z$E_M1&w!BjmF{8ONO;ad zSy|U;%TVAhFfK#R&fX>;hQT%;0!#a_+bedqY&f9|N4T-YY^3b#Eqt>)R=Z_sytMEUOCi@ z)Ll%@B=2|6?a8%L+uUBRsZvqyV`C@dKE}4k3g~vt2V3*4`Y@AIJB^$j1Jz`x8+yZk zYNdQeXT+A4@-^X3Sw=_6UDq$hq)RV}60BSHhUKsetXC+&C{gS|pB_&zo-H&{uM_5c zr(qqp#zz2;NalSSgNY?%>?>^8Q_~A}TK8!>1;XT#wKhbAfyl%DfZ-e|=N_HB-U4^p z-mch(cKPkNm1Z9y)M6~-H^go7oRk6s)Mi(?vH+$;8!SuoasTO-g}V|xn{K$;=ds@O`j!)o*ObXnfN!zZ zEijS+VflW~CNGjtB_pG`tI`8_=6yH4yj7;9GW|n`L|j=ZKOAFE!(d)trpd|5$B$gU z304uwwwN8Rsp2$xb9MW@6c^F~>5hu4Vr0Espz%4mD^ux||7w>+(q&Pt&Hkkxt-w9p zsQ6Cm5P&In$In^>EWH0Ryp0fkm9~J zQ^_wS6zQP~T#rkZG?PlpUR%HPu>Lo~39)w+`IaH)9P_+TyP+X(RdsmR%R#xH zmma=DWLoC;r%Ql7_96wFqsC$7xEm(E#iVN^I# zoe#-ZtczVOY-9`M%=Phz+1q=#)-04|c?ESW>8Fl>!gxbn21vR+wTJPTL$Hx}C=nSx zSd5JoKI3d>E)Nwpr8*SBU4#U-0*nqKnM z=|xykX0tSt$Zqk+ZeMlL*m9}QjgQ^jzbWHe+8%VMqdm*m>Pk4ciMGj|=aWjdv)-l8 zrTnS!MP)Myt97^|y@wG*al{)Ifv##FH*#+K!|ZBRyFUrR zDY8VQ_GVP9z*XE*t#OXA(`!_oK6G9B%V|AM*AIP*_`@c;GOpyQ3YL*B>$TngskE(&+7t}6%{7~@=0q!IOmCR5uMkspJ82xWz0c(cPZYg_FM(~ z7xwO|q`$pcl($dmY_UG6_ zZc|IQN4jdxWUS(09nZ&fC9-N9eFv2!q?;_hOn=tXPw$Q9NtpcDs?me=!*qeWf!)cIhkG&Gze~ zls*sF1Z*evRJASwyfKxNJJWI+nC{z;j-Q=aqI(1Z7Df;`oL)RSgQ^%p>{1SKi^(8Q z1(>9RS5e^P<*Dm#QnHcKi|<;~GgLKj_lm1R;vcduNRibtQWPko-DjnqEI)eYq3Ses z@Rqh`gLSGZ$oloZ$sdpNT+j>)GxAcL4$1a}eHw<|%uEpMx}aPG&1`n_O`ZK{ZAc$u zH5}M6ec)}ACy&QU7G@pkz1bgm@OM)Rb*tQG1~@#-x(sVc+r;*&2zTF#elqa9NV9u< zU+*}T>$=wLD$~(fL!hWmnghZ}frE0wMzXS@)z$ewB>hyd@P?>e9-c<+GAE9}kf>^D zF4WkPI{;g>2V_nUAcfNE9fPE3sAJZ#1*zdRPPN5W8?AidWpPYIh|ap+|P$P%8PUK0rVn zyH&#|d`ODk0qqAhk~Wxem)ekvEYP1@w-aLB8y^u{N;Ae!{Lm@jm*(1Lra%GCaAhY2 zTWoVtkAQ`1B)#|#o@*4MI}zl^8?O1f$r8(<0Fs2g?jLRdEjm+eHfJE{qthC-*0#Vf zYw+`K&Bui^3z9X;>o$}GTJiz?d)D0D`OVp=7jo{B>qWRdFVz(hBG=>~ulHKJxX-fU z?6=qd!Fi2Bnx9>7z9$BaeR|tju;RO;kMSX%-FqT`YEwY3vGm-a&cRvM2uUKaVDID_ z$_G^iG18EUr(D&Sn_t(TEJbzSbP^B2VK!_D1Q$706GyIoHHJDqqRn;vQ6sIfx(lXfgln5#M0j+}!r ztEq6}^?h?`0gY3vxztqmo~GfU3JM_0nYIO=wRSi?{ao(v-vdi*AN!@DfQA)TB7cuK z)t-QMLaK%#hlWnhbr+CxG<;tRo`jWO0neJ6kar3;lIU7#c_(JDRnYfl6%O(rS*POP z?rOcpjNXEXt%y67@@pog&jXWiiB@88OE49{I`67T7L11%D@A4*k!mgpCbvYlSR)-A7V(%AsmUy6k<(}*PK+RXjOd5RT`C-@ z3XXe+&1IJR*%mZ;cx`ZbiSgW}^o`ZNyz%FeJIXPExh4cx!zIeEA5 zhccC~{czsR%aOJ|4YNe+ym+KSLZxZR(?$#OP@R5q{Jsp4QmgF0@0nm|I_Ft2r&sV^ znfxPOqiuVY+kbOVA0Fenw{JZlC#;Z>!HgCGzUp%8$T`^cNoy%CxBV$={}kq@hz^AN z$o!#&@F4<|9I5TcSAGcF$jB}5Iv1BM5?vFLP|k(clw<%!XxX$RT3*D(X`*LupVTE^ zbx<~U?4@rTt`8i%(rB<2iheTMCvU|kfo@4FwOgPu6Lr24b@8rCv7DS+JOVA+78ZNx z&BS}9U1wqP<{Uw=KRibVd6xx83zU#on}97s{sDQ;Sk7yokCSi0YOfTS;yvxCuM2km z**p#ax)rZ^`ye38A`e)ZfR{OYc&mY^{DWvdbxM~aylloLD1JcC!TjvAo>Q&gPIyQ7 z<F8-R)4YOa863gwChe~el8izRpog_)Oywi^nk#!Au%4E*(ppp*(1Q<+`XGZfi#R8dZ z+?Q*em}IoP3Z*>U$*Z?)Rf;)w_Q#g`yj}575dK~oezm>XpgH$Zoq^2L-Qd2>24^1< z^#+BcBbOIiEycC^-9cxcCG?VD37?eIrcS}B%YSQa>BT!%Df#u~mtg64&db9_xChkn zmJeEw8lq{%>i}@sT>Qm1ruV`#WgweUSJ>Sk=YJcHktrh+4e~4pbXHJ!OUfHH3TYsS z>ZvN&vyW80$5+3FHm)Alsqa2OdJWliIF`X>WjUmtwkM2hN?++OW53Fdr|Y-(!89c8Mw%*nL!nvNf)-IE7$d1?k+ zNIyX{{+B|{^laEJ@&jewT>Ql4xSD%)JiNItcS^nGo8#drQt;ypPI2ZF0mqxdn2d{S zBj3E~e0BcfwFlp0NB4NV6nyN8+njC&cVJz5xoE zM}Bql7fRZ<>e^RTK!S)XJN^>OH`ynfUjZCbM`27=$?Ak=!LDP{On#Xf3feLc?n3SVI0#rk} zC!+MYJ4#18g&l$&5d0tM4F-;l?8_^0@5{tpY{_P6?I+`4@bT7^l?MjAt2v)KV_;Z!A~o&6@5 zLWqHLslMP55D5Hx^-peXho0Qz#K4ZWv}?kr-1ZYTP@B?K_T7bsl{I?{QcoAt1M0S_ zI0^2RQP{|?gkNRY`K#FxG1>9X@aE}8zq<&)Ecc{8i0Q%@_;X!1Nu-|t#FQ+XUp#!R zeD2_UeBYsR326P1qep`*Zp)=$`K^w{9F@5|lie+3=l{ni8;76ie>OZfksD*Q{Ec3Q z?VgEvzcb=%1%l#_NJ~Z3A4cE7;; z4SA)8@z-V@SCU0bGTc8VsUJ8KHru4v8+!G&>&@zP5o4Qk!l(aFSl;CM378@vi(%}u z-<>@q64ebktUyI>MK%yiZ-+UuRy|m+BMEe_3E_ln;9MQJ@q%p3aMR2|&ZW4N>^a)1 zE+WCSW4j+$%b-@~cdMzfv@2i{1-cjZh~!bN*^#DOZr3PMPm4%CTHbsOeRgYygM2{} zgI&W8YrX~rF(Bbkf84I>5qk}gdqr`oy`}a++4#(~sP6A05lO$DGBI&^+=I0biGK1d zUHUsH$=O8fj@eLQl>Mz2hGQMD;^c~_4nu1 z)YOEEUdsdoyVO&M^l&8|M8n69B>`O3bmNKe&O`9B_*1AR)&!63E!gbGp$=?FM@0}n zfCrXa)$CvItPO2!ia!2!gZ1?l{Ms7)$9*LKZHI#Hy+6@PHS4(0l52=}fsbH8R8Wb- zco=)GQUIqOooSKDY1+=a-QXEVqz#8_!e4)-*Swxv6TC4uZpOx+s~N4wZH8G7)$=b~ zGm)9y?$3Tufcu)l)vTCupNXWIqMjtffp|%+=dDpUwYo~-(?z|cns#kyF-#f=0m~Y> z24Ea4BmX>L%5B)h6jX5I1Q{tM-}{pQ*GczA=+;9#t}+ z$D{xQz%gtu5;N)kb15zb`?CSB;S0YizSjLY#@|Y>U!Y}>lbm+jf2U~P!-hJG^0i&6 zs4>FyWw;NIGS{{JsYJMoG;NlYn1{vcJ%DrhTV4r;?iEiO%==a^VrB9U_fw~!?6P3n z`x4*vukQpxw(y5xsc!&Ca3Sn@s)9zz?yd?7R&NUjM&A!md<*SW)@x98WUd@V=<2% zzRNr~&HrTiwExI@$U}vzwxG3wv2jj7rkP(6P83$E0`IRx5+jmw0pXYRd##^h$*$KH+6eSdi;Nkxw;1OE}HpfSJhz= zs;UYs@H)?UKGx`3KlJyIc%zS*#Say-LHC8;?80rDe?*D*_OpDQ0?!BQ9wnn@Eucxs z9HpgI9P$e{4xA4v=-IfQ9mc;EpAE*ls5H3+2+nLAWE`-S@T-YqNTZ%nV33t zy{f5s^ilfJID08B6vR*m-UJ>U_5$quFMJA$%wbI)3QQB%NyX@jB(pTT+9lNo!Innt=$bvVXc_P452s7@F_Z_07FI0Hhp?~~j-KhwQH`oq zUX7w<9uIUR_+kdK%A~5ZOF1^149rX`wRQNsG;`8pjASO7O`H>t!?eE2KzHT>ZGcjS z-NaFaA2YF*o`}@#>8zDm5kwBte)hXQW}#OEfzGf4@Ez5dA-_ko6eoQqT_(Ftk&gIf z=5f?1lq#MD?t2VAYgTD+%ZNeHM|+Y+s!salKp1A|(R1H*3oGn0ZPyS83$Vea&It2S zFKul5PTLGVb0z=n=QV4d3>u37M*m0U{Pc}A;0Hbj1RlWUgZ^an{DvlUw13C?&5d7* z0MIw}OFvkVPVyM93|Qdvuhb)!D(}n%3g?u)U8+XwP3~S-J=m5dm^{}zKoJ_`a4h{+ z?#Ahdk$kzs6C67tv#r41exAd6AF`Or)dy|A%LfFQ{OIE&02aL!mgdOFIK#-2Ax{@O+M+FJ~HkYGEV%*5i3*b50%=2K*pCr))5z2jod?4?Lg&cl~q6XEQ`|8RysA zVx}af?qcGfK(;*15GkT@Z6QltyL$W?@`Oiqyn4ViiJ4+SV3mKOcpS7gt=K4)h~`^# z?hOyni9$-M!UsNG(c2cd#d`)yrfGS3uP>fK5-nR!ussOIS*>QX3aOmxR+|!?4PM|Y zOAns%M5mXE^WXn1 zoP>_mZ;e&uwtoM?W7C?|zRZ|BRG6)_B;G-D+?VsHDo);cvDw~!O4;g)Qe@fpVqLoQ zTywCYLHD&=v5;B%tMlP8g{rRZfc}D{a*ky=qnX4sjaQuzCvNa74uG8mL-GE_^W{4-LP0#jz`qSvHMw#Kn1UJLH8I9WHtqH|dt0{y;dsRll#v23+p2lz2 zY!XtXXTgOHC$zE21r}^=Jw1AC1EfbS>w&q0uctcpFXD#nb4&b_5qw-cfI<(*-s>vI zJcfYEENf1laf*`^Qy^bMwr3>msPJvGYJ)eIg??=qI5^~&I`n4#r)pD-V{$xlP6Ep; zzs0};3o9!xzZbzq3)Rp6O^jt^^Y*<(^xf=+y8nLuBA0&hd*qYc&hv{nq3e={bqE_@ zF;+6{F?N}?a!>7*wi-|K2Zk_$c*e5QWiGR0q3rLwXYB8_Ck_1tq;?bH%MCVXwsq&< z=s~JG>YoDn?WemFm2zLqPDVe2U@dG+30lQ{Ensi}p6MAFjG#@iP)l;I&AckV8*Wre zAT#O-_h`Z+URmzf(yH=b>UQdi`L;ToF_e5%U{BVcs8HEYT*FuYG=mt*#R7lQoL+8L zh3_;5ROvucaFXp46Vw;@edrMEQ;4fH(!Q@Gp~PIlSy72aG7=N=@@2ENh~8rDqk2Jf ztc;(E;hTk%5kA%-@4?2%$eCBm)~vndTf}6)vOd0UKnP&j_!86S;h9i+Z8+20cY0R+ z3v;4nhiUO*M{t>3uEO+LJ9AP<&uuawiAI#5-0Px{h{phA*;0Gub+hg_uzMU4JQr8_76R$jj2(NRW$h~xAy{oig6Do&&3hUj3Pb<8n z-Om?jD4i)joHn}}oTi3lsp(gV$P?4w?OUJ>4Vi)eS`%15of*Ai27?XgO%aTgqY67; zUu*`AT&a*y%^wgS0Psd)i@n=kK(TkkVr^A~J}LhB^9CA|Z2W!=>cY5?06OREx*+CT z-bGF;cT0%ClN##NAELN1UChl~_WKjU@^Tt|eGvGtVSuUe28i`HLVi+QrYD1ibQ6omNUt ze{BQQPJV#e4#6rVn(s;JhE=#e8{iy&_~F9kt1OcuW@wt1VYdjxQEV*s z_Wcgb%5g}{gb4qWyeExl{XPY9m1ea4gSyJ6+nIjV{$2C?);`q-<52fa)_dY2D1RC1 zp)gmk0=&t4A?!4Bhtf+1wE&%EggWgS+Q#HW_o&c?qS{(ly*zY(;ttnD(W3u&0I#7> z#^foZcAJo2wAJc>r#r_ZoTbgk&E4J7RYUEOG16T=n)d7HJ~Zfcbj@ycf*$Pk7iWR` z^Ex}}u|Hn3DW!JNJvGB`6J3PEiZEshPcdd}CmC3*_EAdk@M2oykkGg`+ zzFR$WX~TQE0>rX5aOt)7+9*!A7qV6jd_y}*%g6S~Qaybxe>_{_jU;j=>;1QJ9&6SmluzvKxI$;CJx%t5V=bbDg`Su2Aldfm7N3Q$N5yV-#z-jEuj03cAxI5PWPSzG`n%6qYkOnhtRaE5eta@v!|qH z^1h;*?f@N9(2daZw}ECR8}xX8y@$7bd(G{!G^D2GK3mY(sH3WthWj}f^H;)X9t(== z)}6w<;BVE}SBq)`ikp*x>?gN-uTsfAGQYi6WVdU5ypX6}`CK)ghtRZeE3b&`+U-a@QjB%NIJDk!|ZQ&@Zj@ zDC3v%!CE}NrAi+gyGjKb!9vj71~E5LJ;$W#YJ#p1zk|&?r9c@YF+Y1ADM2I9`6mPi z9+&Z({)#cE$oTGjEwE*?UEp)e*lJ%31S??^b4tqOfGzV9&9rzvWFH^FEpqAYeuT6PGQHX&FXZ2n+*b_8dwX?SrPV~%}`{pTbixGo?w$Vhsw2}hcP{cjk`-x z-9HmUiTW+WA8S8HJV$0cItr-ZyZwcYfFA}&Ar9jR)Unr~^h~-as?0I`()OS6XMGax zX=mjE4Rrgobo^YaZ?q~Bmwh8BAV$prv7WVi>RSQ7pTp9$q$=gYJ&&oS6f`W*D9DEW z-`iI33;S(({&$%I36e@3n}0LiO;j&35RO`Jd7V*Seq?_w2L9y96Z+Hh1s^`hD=HS~ zy05uHUknV=A%q1;9>g!xaDE~Aa6msVOgi^87ofX0wE{^qI_*Bosh63a@I91EqZziy z*%lVEc~O+*0&s5;Zgs0UN9?AXA+fymzxqLQG_x{kdv z0jMWH>+3z!LqJwLomQK1dXdcy3&Q`HcX*@}S$)Igig0od5Bonkr4A>1%7%A{T4|B0 zsz3Y*gHB60pjtKERTorS=^#HC`$1BBr}{VlcOFi|04ker5~*sX8sGlv(#^FN=V4o? zsEQX13}64%qyZIz2~e|6Ht!OBy%Z*!r=3xcew+}KTnMv_w1ay`VX~R+SOzgAo~MU0 z&}lRzX=s<|a48x19Z3eiX**S#2vj;=-K3_%6VVcoz~AK*(4{1*b9?)u15p!SLXJ50 zS$5RF+3sh0VyQ%z8u2JoU3*OduC~Xc4Q3_M)Oy7bYXho!U@BVy6DpBmivb-a6x#Ah3XxsZmH$4(U zCSv%G*)un~I42x+G>WaebvmGSYWGSH9h7dH7FMi)LQZqs}8%hXGc1=<)8$m2TG3n{`4}9iV8%_YI`1ewcjrJ&< zIx~9Em&74gx3?5#!dq7C4a5P+LU&1-{&_=x+S=xh_g1S<=D|fsX94x#wFJEpP>Rg% zQp5gcdS>(2qi$rgdn%5NU2vZ_P?B$IEvmekWrVflHP-=>ms;<+t75qW^1?WLyll3S zi2W>@dcZ|NzKE|Y(zBMeyu?3va(bF4h zp4;}Q0omIrk$8SHTzf6ow&i3}7A}@^f%dp=u1*&nj5@7P%icf9_U8oXbi%afC&>H= zGLe+(t;DBmoyXabKykK4;Yz&h&e)2|OCMWmEBsv5^|>xsc3?FF>W0xAX=$3@Qp*#E zrA9XX?yA8Ld(^i(r?YV;frc`7+dsZJ!#c=B7}0$IGOf|h76MtS-+jDFuC2g%ka(#1 z*bL}s+|J7~xB-O-3n9Ge+r8U)v}NVzl^!B?>vMai4l}R#8^}Ig3mHJ)LIA1WCwraW zDA2tIT0f)^tdU!2DUu3ff=u_1L65P%L~_r}v*Ud;zwgdwynN_BB0stm_H#)RZLH-$ zfii`VZK`H{S_hQkH>Q#I+eLDd5 zrD)-s_)!V^?t_e7^4M#p%B($BAL~hxN`WZ)!4G0xzeduO4I=WHOuftJokojzJaz`Ajt)-WsW*Yfb4kz>&Aead3`Rv-HM z^+{EgMp}JnB-CHSHgZ8Qak0>~%mQoC<>_R6)c*qRaNxCn3|STFAtW*)B;oooqeSWM zUB`(fA&t~)c;7|dYOD2)g9Y;ZYbywqwf)NI3$(6dsqNS14>Tm)`kH15&ZdLTW@qT> zXCj~jb2gUP3Y4$E<<>f@V~KMU6gSye6)u=9Q_3)XNZ$T!y&zSgwlG!NIn?Rvwr}={ z6@vb-m1qz#_jhWO#MD6hdb@em5syQoX%huci9EqEphRbLP#Weol z@DUo58Ps1CPtm%s1$)+?0T^`HNcvTlR8ozvXKBw96QTD_mW*9(98P_UgH@CgJ6**`k87=YFU`k? zNpUqZkgzQC0-8)JeRb;i#|BEEU7(|Eso~(_d`|D64L=9hvvD8RPmms!*8n_A2q*rJ z`IS%vf_|eUf3GroUZC$vwGQ;$2iq-j9u2g`wt`6~3brBPoa~5b_&;F4<(^J}X7kQ! z>yiB(`_cFDR>Fy*!Y(-3z%{pevq%a6lu%w8SlgxBRs z;_9swS$<>8>B@azP}zp{7tk4{igM?q|0$hsXkwqPEK5g?D11mp_h?97!cXt zq1x80gfU*YfHxxhP#4NhO_ki{-Fa>tv5L`OZIj+4 z0yY*j-Vnvl#l_VyjnLKg*xPz?3k5D4mRYUY3~)Ls=9Ze){qvb!T{^$wa1 zgwv%%-wyQ{@2RS(@&Ae815wbXytlJx1=710;!Q!8^r={H-Wg?+%LTGRmJfbGW`kRp z(#`4GbBssmUIn}E$el|NLU+$m@}S|a%X-kWX6?O?_n1XIcctf%a5K_R#9Eas5xdwX8juLAAXdVPf(*B_8?3NktKZ4qsMfvzl`*T2$o#mUScO? z_ce4VEL=1;HMQcE6HsGjK_e>nB~4o8geWAoTge4wnkg*VptEL3I(5o8@61&n`~K2D z4pm&+VwDuzxDCF7X?;2Tr=({@WRCvlgJYv_Pq2I6^V|Bst^O|kM;t#t2gd_a?G9h_ z6fiShVXbB`!gZ#=;4YZKl3m%*)Z1Jv5c9w&<(4nW5|DZ4_V-qK;HHIJi$6PyPDAt= z5b;XxQI+c-&4MQttr>UUrzQv5wXHlz>Un(KmsHMsB>H~%HA%#?XU~Wx6-AeB8>D9_ zyu1M7M^L9_YCIE5(#QTM>n~|5liAG)WvWTBH1uJh1vCFn9GQK4eekT=rnis zR$JZt1t51Ve^};Jz;vl}gQU2gSD^FHKUZH42|-g5QOO$C{>#5F&wWS_t`u0zS)l#? zMKEx=((q?0LnlkAuh=FE8xv!7B{X1b^msNNKfR@8WR#TGd=!-Jwt&53JaAcQ|&5+;w^N`4LF)gr;+bJm*I$6A|q+;p>dPEgi+BF#;QR;A3W~K=WfT>R!JmI?! zapC8RYH2>?q)yet2@HD0b1fz1n-*V=yVhUuku|M!hXeb9NP;{yDDE3<>^^iI+w$i~ z+-u&pH*32OO;Jo~`bLY-QJ8}XaNatxH2a8qW zml^!HS?xNySI4l2iu2&!DMT>DT3*QrZu@f56WzGl z>Ws3rEtlPEL$%(&Il1XH$^Z4`U~6H=P_`Hl^-BTqrm zb$u{bK<4QS^uNb7S$3>UHk%r7fX!M5ZVFmA*oO+F8X=sCq}?YNFE2bv4!yENjj?cg zGYeG9XtzGfNKUXAcsk^2K@Q-70Meg>wRMk@&zwoB?E_SP9*~zpWUof@2BO<4MAB5)01y@5OPI6H z+bhkipCw68Za&KlfsMg{Tn0qh!K^4%JG&=yo%k}G&W~~l&hhVS|Bk(SrAp+`vYO=9 zcDj4SteP32Nf*hGtj2$!&Ks=PkLLeMJ?H1uPLV3kM2VcJuB~Nflg(Wp95x3`Eb-uM7YWli7-oWa!FHkmsP>BO)-)jK!;5c)2Fps!5^iDQ+nZ0IX&_1c! zy9iL4G0%lAC|)|m$>3Qvm@ybxvUmL|u%wYt`nm6GSTim$p!hvwDyDaFF(rK`j(Sb zJ45=P&(Dvt0O@z>9}c*6u^~u;oyu(f6PZ82e$Z<=N#v;IakX@%Kge3$H~Z{$ym`14 z9V4M7_(D~O5m>s*!JapOFq2y*2n1Hpq*+3xt>WQ+w<^LH42*xr69lZoguF+ zXNQlXn9;Pt z#f+3%V*9qlognIgIjE(L@3TRQVIxo5#EtiTF$@10SFK}j4oSWlbT8eiIliyL4SyYw zfsVtqrfyR|+W{2@-h%{mC}3Rb?(rJzOGu%wm)#FWyRk=h+beZ!t&SgZQfgB4Sa$*F3|GUKnyJS_2` zf^i{($%T}35TBq{(gc;t4+oWrn1pTNb)&Ut{k`gk=WpHA`;31MO}GLPI^dM6HY5(? z{}zCs%&m*p>S*;%!*yks1X#H2u)>RH?lN8~)rX`oK%>|DurW-Qzsp5mL8flUJ8p$f zaqExvU!*+}TD@WP3V~39&t=vL35fE_)V#TIQyFN`=663rdg#`&r)JQg<+SmAEpQT4 zw}UwPzs;Uo%CU-qxk+k(@hvzs+XZ4Oebu9K4yZ_mU9*P3fBsI@>JzaT1j4WAQm4q} zUo`*WIxhHlUu0+cWbg@&>#v|{820in5p-*2DY=7W4d$mFaL4fS`~3to0w{bq;C#A8 z;ZsLiI9@zu|9fdC6@Uf+@NZd`1f8c!FXx-^zH4Mj`_TdGk8tb-#D)J*@1VGvFG(8p zdH+1_<nf#X@?@Fz5xA6=0Rjhbw|78RiL6aHkHAAy;O#Ie7T=mMUN#DrkL#M)R@I;kzH!dPcuHlOxy+RMHAgSCci9A zwo%*V0Ceo+Vkkr)beFX>0TfNu+F5Zv=>fHSy{ z4O|sVM~ACg#MbVu^iA_0pcNy|UMslk*l@4z-tqri1K3^u+6SL?jQ?`|Wl7o1HU);+ zc4an!Xtd4L&&0jsz$L@-JF>H*&w+1elkKyAp-h=9$OduIV+X9!?+@titqO%d8NYY5 z`Oxg_td!rDOJpmnte=5s3|N%#+M)p9%|u;su~_WBWODuYv#*Xr9;=oh$RgqNUcbO7 zZecl&9|i1odCt$H8q}9`I9rn9_9gSm;#kiyD8Zh{+uG(5^$Uog%x!LM z)$*9@dYhJ(2Feb-3RkO;(9p`tN8pZS?YVD7pMDU1lIgxUFdWr?Tf83Mc4%4e$hqTs zdHNa02p=D%6HS(6aH=JU z(z$vB1dFictI*TTV{-^_n*p=QAIziUlL^@z3c{M-fr1?Xf$x;;&x$ia13?Ne&(79t z4t18;b-M4Y*}*$Nkm0Szs= z{gSyJ5((4;a}*Ss0#4+Dx61D3oB1w?U54P+NWkq&c5_|#9Dm=@K@#1&@TbJ>TD6Xg z0tI7zeSJmc{d>#++yW5V*}xWH7;rYGC^wI#%E* z7VQVh&nYTJV!ITvaMeBEEQZu>7YR_u1_Z>d=oyyi9QQ+oxK;2-HCX(E;vbw6f zI@Llcw$U=Yed(4y$mZJazi%_&G|0g&8x=ibk?N{H`k4U1O;9ITJbuh{@#0M(At7Y0 z_JirxG!zCy+}qm&s!Qvi?=paK*aCYaBC^Bs)WDToq!SNK6y_}Ntm)Fd!kZsyJ~TSY zED{FvC;9-TXr@R3G{)(_x7+}P{S@%&GV6n0!?RyPPC+~Vx2|(h(^vb>YoKxXVu{ro z_qo_Nr=IHqbGn1+;;C8dQ1bz&A~PSIgd<*G;9C23QOmX~R|k}_vA~0~1NW(DqyN#~ zwZ}ubcI}bcN`*9`U8JHS(E%HkCT(SqoDVe)DO5^w3Q^O!P-!Y^lT##&b8<>V5<;mV zkyAv36dg$3>z=WD_xpYSe*b*m@7sUuU*kF5&$`#W*0rv+?)79kUIt3w@gBPWfzMZ}L z{%itkoFa4cepj1~HRVuo_4(rq^W6PhfYRhmZU@Q8$;q96+W`elDRE4fLuve_4T}C> zZw`$Pq@+x(E*8z>ANkUk^!`PB<;ofBN4jqj5mkq)-@+<4J$Qfc3^xEXhNb{xFc|1` zz#Pr+-El2AJSs}M+`e3*1p5vxET$karq-HOZ}TF-B!1|%b1F)^Y-@a*R?a|G(U$S= zHOFM=pb~nTax;@EfWlpm+>i{;aEMd1j@`CFmgy}x=^Q^I2F;(}c5S`Ba%0#Z2kX9n z?lOzkn)2qzKtY4oCJnC--No`ypQcD`pF!6MnfK{su}8;Zca_Dy&a!4m2ukvG{5E;= zB&qDz(y8}mE!XwWQWV4jVcg~b|9xQ zaeAnt4$IF!?yOXW+RQ+T@efRkAsY0)I7^14|kUcZ{Ptv?Gsh@ zCR%Pt9;Gq_1JIq~?d1m{uxO%-hF-{q(y2l527%n+t6ZIXD4X&ykeuORR-h0;ox{M+E+? zu>3Q7`vm~s8fUvq4sO4QkqRvEGu&}+|7DpAbJuL1r@B8FzOf=yDfQz=SFDI}w?n7N zY@FmY%G1&M;OUodZ!hVMexaWg%8ij%<1hQDVb}iXyw&*8@v*PHqXUKYOC$Xj!H}j{ z=l47q?RVelNiIKIWq(CRR%Yq$v3wcN_pC!oPn;s@vzMU2SwBU7*6C9e*0xYZ>)I1j zroX&*ppm`f8FVcLZg&op76lj=2eCr4*g7qk7!-ap@m4>%9eC5l5-!b_(vAk^=R}m zSIpi0kHyn{$A;@Y2iwsG3kKUT*1?OGGDDi@zHYkv`TP2cCtId-A<$?MF7|hxqXhc}F=x9YYpm)@sCrx~Goob1PNM!tKo$(}h&K_E6h-u7$c-Mh*bq0n7J znX3v$#*SCi8%2HphSkH4pTUzfKepB`TD9x(o{y~$HIDwsk=YB7OBQ$Fa`@6^8tmpewxkH1m~X@= zBt4H};zXmxI$lSXlG@kCd#BxnbyhtTI>#?Md-fbEHFmhS-l=MVkferbc9UD6mX;Q< z*oxDqPUYn262)l4n_iuz1>RIn*;FB1+=TclmnOBo*I9aPZEXP;E?@(gRSh5?y~XeI z1+Gh9ww*2Jd`~hsX_jWdQVnULZ{Qk-2ab*J0G7M&*hWlH`R`?zJ6f|_4>Rf;2|zQQ zY7)_eOkLASHH_dfZnPW#ZqVzE(A0pXm<2+sjE#Qn`quQof3#y@U|`}Dpim&!ML@0= zsW#a5_7`FLhp&U#N#9kb?EX%ln0GF+irSNIwZFmO;R#~3$)ZZMDahM?cTcoq)nDac z>{^SC?|T+?T;3vuZDHe6%EbfJxgNB2#Kl1MN3h(v@1YvDHT#WMn4h1Yvg1>U0}UD4 zaeAR%+(1_Qx3$s{=S|Y)o921!E%;Qi+ygyQC@w6HmcSzbfkQ=lN(Hee1%&hRKn*hO zk4uWw0OQaAeEaiy6|+FL4-|ZjfLph=7KMN=U(4lp>+Gyv_7h7+&41)Bbn(EAD#w|a zFHWM_i!0kS6EK7MdCJ5B!9te@N9!eoGOPu|&%yL$I8^TIeil8_c6NbBYZ2ef1)2;n z2`~AD*G0j*J3|Cq@D%kVZ9ALwNQ!{VrYI(pGS)Ia772rM7#0>(1R-cr6bWnhB+Y6+ z>k-{j>WoR6OR-Ynj~JiN@AiTy(eU{ci?FD(hlSS-n342{q{y~E1s-i2931=$9YFdit_k%+X1% zqpPc{r;T&;b29+h%8-Bo(*W&Axh}oGB-oh}Q z3+X?%SH02swyy-|qK^-A;K8T?Jd+^`br&sOY?kd}&ADT=sqE5%Q2f_l@YRO5=3`$e zsA@<{oq_z%&tAF@T18Ng3?hSskuQx0vk)nkmX#$zTNP8d_6J(|f$1#)9X~qqe(CUs z1``+r*42=ZCG>r-(xOS~L(b%OPR2U!9Pdl7tx1K*(X2POtp3Uc6h)>OY85dn!a3j<48Z{5We8HNx&H(5!09x|8q zif<5Y3I&w+zcNJZQ#F{Y;#_wJ!IeP@d`;AzN`_WEf4jQGL${*oO828+CM))3^(4ec zuw*1R9AyA{JPTKMA1W2M&B@UsLn|$7+#(8lbqcDAkd(3I;{4~a^eaPkI}XxWzF+%S zAg+nwqAf^Sd;@|X{_h<26bgSm(SdRrf!oHzlG&LsN( z{gZ?2(xqJ^go&NFIV4G}2$<$A%Pmr#4(5XB=&4>i;NhkXRnT4lvs^4^DI}r1CwTM4 z`3V3!d$1P%4ic;~9p3}dz6_eXMzEuPuDV++D4(#tbfR6`A~mOc`iYzLKe9ed6H~Hb zmj;SE9Z^tKt<&aE7c41RIm1HS$W-D?bZe^}Q3b4ZS>3CK$ef|<)MVTGI79#~FGmM{ zRd4>8lD`D^E3LYdb?ffCdKHat;iCgDK4B|0{0S5G3ZPEe`=eblOrldwgc-UF4uMce zMD1saqA&@W-0Fp<3JM@lxOjZFEt4@?I%5$4a$gid3WY3sI0M0hBZb(y9~$do_4CA3Er0R`4yy+b+ARV>c*Na zV zZiPveWc6oY^@Cg72x#QymH|_gjAV}PnVnB6EA@`yBi+gRzh}R&|^6U*~DB^=I_ed?Lm9RFGrwVcXL6$ zjsLLO3abp6#UExHl9d0DkyWxeA-A2X z!;RpZ2!OD#EyCUxPly_t=Pr)?V+p~0` zJuixm^AA`3(W1j@_UV#DHqQEMllvu?f?;!l!gc8=3y#CPYhOXy4Q*inlbEyE>&V6M zF(zNibQe0(W=h%ZKMISv_UvItA1?7jtA0?NAXkU4W9% z1Z0F4E?n62Eczt!Y*8Q>XUKE62s-9tho>6`+0zAI@AU&sgm?IE?mUKxkW30X6eSpx~#zE8+T6p2}p}> zFC2TFl+NOe-Ci`cfNan>++UUj@5!qZ6>rjlnZ1%0pu+n_{0+CJn37$@wXNLqc`uEB zx=zQ&g7W>VGx?35@&aQqRJ*X7%{8P;x%)LJF5vRge;rk2i{XeQ7Y+WGBY3HHlWr!} ziJM!?nMq_RrgC5 zOgyD$t3YfH??n%QlwB)LI<8Quq7!!jM~BXZyZ}sa+OLt(MVIcJv+LJb%vf0Uk9qQ* zXC*0OP%M9M;+Oe!JeO=-CG97LbOtAeY$p)=ZQ`kE{Dh>#@m5vcBcUO5G#@UNn0V6u z)s5m06SR-*auM`{VgPAEb$M*8<7)TSi;{a2{dFO(i7 z!c#&r1bWPLV|}08JPO0PX>{@$-3FW^FPvN zfr)aOXkESsX|a8^vFDR2alY%cq|)^o(&}SVK2DThikq3PBU{bzZY&O8n+)6jJnv1h z$&&GgZ)2M@6UT~@cPmD_&+{EbTWT!4e@Y6wxn!V!j0q{|Ue~*G*J3XwfXbVIvVG|p z{Kni3QIu9M;=+RM19*?vHGJ$b|g6E3`%Exk)tMyZ^fd^i|H7n)jEef27Lq(Wq0EEX46-%I=C83pYki zrnAnZ{GN!WbKreuX-MOM>vHtgi*WgIS7tAY1VEATn#E^}&Y2zOa5iM`rtz77Wugqk zUL!z&r%jvR6@N!}l)Rep2ke_7S~`Opc_YbL?YH})wRP@wgQ;>-6Yqg~oG$OzW{y)E z=4UmioQ|d)62y)%SRL0}t&GS_ZWZ!jn^nt@U%{U#YBRaB#C*Y3y8r+Azf&4d;(f_DN6A`=|RH8V6RF-685aPqYr z2Pz#qw%q^?2+s7%x8|~f>QKELU4))7F)=T5-JQ(Xmd`nzT5^(S~LE)hEke0HHNG~vMqy;(G4CdMt`0m9vd{Q_41OU{9 zc&%loBpz2`4}k>(wf!Pw#g1^s97>TH);v>dwzzoc+izyK!89hIvPe)^_%OcKLa|uL zL}^tOD)~@Yl->K_Hn_Fzc^(NfN?eT*%~%i>5Uc_W{yamvaQtV!LBzU*fx_`-x)~q( zHj$@Nv0aT6Pw!XVk&-%hjbl)MZkH75Ij*i!pnMd8z-ZAxd8MBD3xG6r_4Q4`OTJ99 zKvz?W_f-5Z%8)TIlmy7=3{eVBn|4`-AV*Mx_0>1m-n3doDVjoFU>++XA_Y<2{K(36 zWAgEXNyq__#9yTUmmBvB3S{Zv(Txgxhr3Tum;%Hb&Cfvj2f0=1{Z9b~;cC%!$vb*c zG*JEJuO`I>K^HEVA*#p%Ub^DT&?!tVODeQwS4@STV z?9SSr>eG)pxkT&yFDjRi+uI9vYhNAv2C|TAkfF?BayNKTm2Sk_c2YHEIwdq^5>QwU zft&3N`HkYrPfDBwO>N?B`6%n=!Gst_p;9CcYD5@qW*Ar0`XDo)74DLSXn`*40X3%> i5LJLX{l8#*e{9%MIG643Tu Date: Mon, 29 Apr 2024 03:08:45 -0400 Subject: [PATCH 144/241] Fix bug with recomputing quantities on incorrect grid --- desc/equilibrium/equilibrium.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/desc/equilibrium/equilibrium.py b/desc/equilibrium/equilibrium.py index 7e6faadc0c..570f77cfdb 100644 --- a/desc/equilibrium/equilibrium.py +++ b/desc/equilibrium/equilibrium.py @@ -883,7 +883,13 @@ def compute( params=params, transforms=get_transforms(dep0d, obj=self, grid=grid0d, **kwargs), profiles=get_profiles(dep0d, obj=self, grid=grid0d), - data=None, + # If a dependency of something is already computed, use it + # instead of recomputing it on a potentially bad grid. + data={ + key: data[key] + for key in data + if data_index[p][key]["coordinates"] == "" + }, **kwargs, ) # these should all be 0d quantities so don't need to compress/expand @@ -899,14 +905,22 @@ def compute( sym=self.sym, ) # TODO: Pass in data0d as a seed once there are 1d quantities that - # depend on 0d quantities in data_index. + # depend on 0d quantities in data_index. data1dr = compute_fun( self, dep1dr, params=params, transforms=get_transforms(dep1dr, obj=self, grid=grid1dr, **kwargs), profiles=get_profiles(dep1dr, obj=self, grid=grid1dr), - data=None, + # If a dependency of something is already computed, use it + # instead of recomputing it on a potentially bad grid. + data={ + key: grid1dr.copy_data_from_other( + data[key], grid, surface_label="rho" + ) + for key in data + if data_index[p][key]["coordinates"] == "r" + }, **kwargs, ) # need to make this data broadcast with the data on the original grid @@ -915,6 +929,7 @@ def compute( for key, val in data1dr.items() if key in dep1dr } + data.update(data1dr) if calc1dz and override_grid: @@ -933,7 +948,15 @@ def compute( params=params, transforms=get_transforms(dep1dz, obj=self, grid=grid1dz, **kwargs), profiles=get_profiles(dep1dz, obj=self, grid=grid1dz), - data=None, + # If a dependency of something is already computed, use it + # instead of recomputing it on a potentially bad grid. + data={ + key: grid1dz.copy_data_from_other( + data[key], grid, surface_label="zeta" + ) + for key in data + if data_index[p][key]["coordinates"] == "z" + }, **kwargs, ) # need to make this data broadcast with the data on the original grid From c1b679220fa83238582f0b0385fd06a2eef69086 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 29 Apr 2024 03:25:52 -0400 Subject: [PATCH 145/241] Regenerate baseline image for test_drift with pip matplotlib not conda --- tests/test_bounce_integral.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index eedecbb36d..46744e9aa6 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -629,8 +629,7 @@ def _compute_field_line_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=N Field line quantities that will be computed on the returned field line grid. Should not include 0d or 1dr quantities. names_0d_or_1dr : list - Other quantities to compute that are constant throughout volume or over - flux surface. + Things to compute that are constant throughout volume or over flux surface. Returns ------- From ab74a17d07b64380432f6feb5d61e02be2cb9e8a Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 29 Apr 2024 15:41:59 -0400 Subject: [PATCH 146/241] Remove extraneous details from image_comparison test plot in bounce_drift --- tests/baseline/test_drift.png | Bin 26726 -> 22432 bytes tests/test_bounce_integral.py | 46 +++++++++++++++++----------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/tests/baseline/test_drift.png b/tests/baseline/test_drift.png index 6e4ad73e9ca77f1ca32ffc117443921ed8cb9e6f..1ebf6e13393987351c4d0e2537efc0cfe832ea4b 100644 GIT binary patch literal 22432 zcmeFZcR1DY`#*lHNTEU5BV<#ejEs_T2!&(MY_gA$okGbxM2UoRaBSI|jK~ZJ*<|m% z$M5;l`}h8QuHW_h`}^1Ty6QT~Ydpt&KkmoL;3dKDAtZbo z`HdMr0Ew@V$HLNDBs{#NjP^M18oT0p z`5<*t1avvLt9s*Lkgj1Sk3PB^GNJz;n~B33)%t42`TNz+nHqWD-)*uqb?}y4*CH<+ zUMZ;{p+=!ll-IAPHa0%Fod_N=c%{qsl|1qP$2=dsMjjU7sK6U2pT2zZ4j=2|wJ;6G zx>)|T8c^ZZY+uZTmW7Go3Z;VTLzB0WqAqAg33LwzL(YJ{X%ypoAp61^^5`{Y|8LTZ zI3>?5W$T~h)=Qe!Q%|stCJUb}(am9)bw}%N=7nfgbVG1X?hRo2es>5j5ydyK;Iggh zS>+V4*q&b>djg1>@7l$0#Lv$sOzL)gQ3@p0F%SjLYnwYX9{(c3SA;vTnV$aI}VbwS|5#b|=W9 z*edF}0_}tQzD!__mmkvNX=QjgnR52iIZ(?Kl-NbzRu=4xU=nC*RG{KkAh@8wGRInIjlv<8xNMz45boiB|a6LU_?j>vmDFH=GLd z{xHJ{XPPf`l6wg;pEw!B*p!p_g|>?aq@DJ=A;?XC7?(cF$ z@iX|B2TGOhN)FZRvQHH9s$OJgB61V@O7jYCwyut9YF&oG9B1~$5p@=~STVy655hw) zbFr{ku0@$Ngq|qa;GntMTE3+Q>tZBLOqhQ+UEQX?{gc3Vn~%utjJxt^J3Z^&mkaT> zXf=$pn}bp97zro1)=d_M=dI9+QcIF-zLJwDj>*x(yuThs-cs15`1^PB>dLYNK+@$9 zFu4e45p=w8m}w&km#{E+ao{DFH3^ikewMism&o8y1s+VgUXGZMsAu(h<;VE1Hm)=W z+?)y(i`)wbb1u7#2m7X2C(~5Ao`TeZJ%xeX=J1u;ljbp2V5{Cp>1WJ4pMJ~eMc5;v zNtOGuU)&Q8v&u}P`jV;G%Iz#duNfC705;=r?*VzK-r>$bLgF7L9}QXw-AwJ@^Wn%v zGr|}_^Xyz&V4C9iB&6s)&zf}Hk*00Osc`((5q9l$8!#ZekaoL|WG9qywfyMjDz#hG(ym>Xf z>|NQi5tFF<6WhH!T)dC%cJFD@_NJ=9QscpO6k6BZW@Zs3#5pb}VgRZz_R zzHhZ(?Og}6dVJG>qx}svg?D$+SuU2WF&bGGZz+NQRWbX+P`_?)W!Vc?VD-Z6QRt`c zX(bG!X~IVXRvk6Mx`lpWl4JV?J@s`Tj0E>`lauN1T2gW!I_nv)k>?nW!@s#&)xD#O%*!>LD@}WP)ksJu6}& zBYPRjm-tOdv^wHNP0$A7tc}3V6)x9*r_%L2<(hdM;w!oD;udGNa#}p(drx(e3} zW|O>S=rrlXt={N9@I%7svF>*^j>zR9^K3LOMHWmrJQjcOH3@7Y_1~2JrpOts zz3-*cnmum8CCrL=jrZDya;Sxb{Y=6pE++>TTs=%e9;}fXapZrX$tx|OUizg z<*KA`()9du>y4=d>F*9n)6Vc5y*WZu5E)G_5iI}a*MZG&*b7$gWG60;t|X1A*Nys# zsOjET6gvNfNjX0JrAKzx^3l#KogOt}TMg^?wr2yBA-Z)8$4Uw=WiqjoM zmg)ERUxckwJj|P6090xJaf1zU7rn-h)3?jgD6pQ@QgDsdjD%;`7}GzkQ-e_!mQB@) zJJt<)vGGLGOY&0j*(|qw#j#)!A0~BS{yYs^x?_Nly%i8zZeDS-{kg%QsGxFZHij(^ z(es)uvC>fmFC2#B(vx?(>J&l|p#?@r8+1RW%{oa_#v*OIO`!Q%PT-|56}WD^238I1 zIz~wk?0TKndR46Z)5pZ?5%lE2nICi0Y%p01X@?KsC;)232kW!?2B-zPG#-h22K?zt zVUOu%+Bx38Q_m4B=KV6wc!gQm`UQQ&!&`|GJbXs>NI58~dUegeZETvkJYX}V!L8AP za>j#kUYYkFBzaF;v*>;J>co3}Zh2Asvee!TcT7u^{26N)P(pQdXZ zj5?w5VoZMm#)9=ZN|08|iN_)EVasplimL#@VD1Vac!PriQq~~vk$!*Ld!6Lz!g0yo zx47$UaC6eQR>N03{6FSnH8?F!`QK3SD3*@d(9_VUsi~Wq%c3foDGGPD6 zY2x`AL?|jT=KGIRf@+VpmXmbjs`$0ir@IRM6j8Sx-}@%F;Rc*TRS}~IXAq+{L926c3HG3RKw35R&ll?i$%AF0*l$tk z+)^9BRjZ&1qK`^ahrer$f!*&O(5vY`*xn*_B+RfculU1&8eG>FCbdI2%ez%M>i8u5_>_2 zp1D=~m2Gd*m+vyI6$DG*UEGw<6v$-gW-pJ1yi*@_cBP`8s6O+$bU7ye!GrMhq}Xt# zxEliZ#n=ihmGY`B05pF$S8+!>nVKT_VK7p$f7mv$nW!q2XGDJbqMhqtZ9S(W_(2BX2nPIVbP@-+@v>F_53RB_*052ksucIFpc%P6t%OhNn8fD#_e1MeS| z_g5t)zpU9CqKqqg(Yx$o+0)6nAg$)c`MJ_A*HC9GgbnNQY-4%Gzfg?*qmoAF-i$v3 zIo3k?Z`mB9>E8xKmNR}UM-p|OZ8tMmjEZ7P`h+}?;TY>7frmXIo38!K^|P4&RiYF9 z9nFa$FPl6z?8~U{A!HGVm)b9P8QCXUCJl4u)+ZBMm&g4C7(|$<_<4)1qpvG0_6rAG zhSmB0*AgzYJI%KzFH@C%uVB|{!waUm)K*OI2mlGNilvWYBlZgeMlQDt1&sJHX^K~` zibnqtcZ>NIv+-*M52oc0Ox={^hCe<>k#`qU4VP6sU zZao8Fq_V_~ateL^K4qi!D*pm!G7v#;_CIU>uvOtojRTCCY>^M($W&}I(cf6hWYHqzux=o(|XNxEoyTn ztXjSTdH3{zkPwrf0*v$9KNbRugusakgvkAO`@lMTJ8e$tcm9Z%3+$V|Hq<+cKgT?X zEh;1PG&6_H*Pc%~DhS(9|1{cT)XA?CkvtvrFo?b_+EA=p6se!pP?PZvmI zug|IevvxQb^eBhB5=xOMPVK43(_9sJ(-c*n)!#;$;86Io4g*T+OlUSl-xUEi%TS`$ zPYkxM`rm*=AmY87fA{5*K=O3mX95(uJ0-dNcKJ`)My&VmO~z`!^Qa+5{aMOLxBMSm z3I`~}@yp%0cyXgyl^9kN@y|SAg9_y-GkqA(o37X~_9Okx1v68wiL)ToKz~hIDwjMv z{9}lgpqmrLhYrRXM5ZIw&-=_D2J*4!+dZri3hAnIPc4$9jN%2^Ed{+anXUWYCH}#K z9R!oV3yaLa(+5d?4wxj5iQnU0N%r@|*Ic=GV%V^{SQ}BVm?w=`c;Ic)ixpL|a{Xqz zM3#-%l%Ilt1NuGuq(-d5M|x#>}Ijn!)|H0 zD}Abj4_k*ZT%lumPSB#d@Oj~IW!<4%jGd0$t~;>(66s|UpleWgtr&Bs;+9VIA0C9@29PK&LE-qgnhLg$&5rzirzqL}Lya;j z1`r#v;QQ1rim2;FHsjpYm8S;g756~S;Rw)3CmAjGnU>U%x{@X+5dP)@jl5h?k-b6Z zNK~>+*u~YIMViqlegkxW4(1!rI0++cBK==5_NVGZC;sjt_Ul}YVPoU4Ix1K}p{E`- zQC+_g%@gw^_TBNYbKTEy_=g)ags5pVtin}O*_>?YA8NI*<8vH_6JYE4uLnpSv z_Dv#*aB)_6CL%2sWSUer8PoUYJ*r1?q72Oj({T!{LIN@78^R8rd{fgMH0k|O(UaC+pbialkf)739#;==vd8@*e zSzaBhs8IN-xax8r*pR}A#y$sR&F(AFB@sliqYsrRW1fR=_`tAx6VJMbM6vaZMyb78 zseN`f(YYJ~8$^Nlf>T2viZu}D@uep)^wJR0qu1zhtEw3N@z*{fbv*9Oo%9z)FQ2PR z+z~dUV%S*xU4p7`cgbHPfen%Xoq6IOspuuFQ(!Xy_kz{Xa&Tz|#(HVFchKA>|=D$LcVDKai=xNYN>|w`fO_ zF&EsexZq;WPvS-QzFC}>Y1&5n^?d(sTjaeaQf^>5{mg$c&i32v$M{U0CJP(Qg3mS| zb1*P@|0XGrMlpiD2kJ@e$5lp+NA47h*zN7Fy4AXeZuUVn`uRG32}Y?#tbRS=@jN>! zzul)}_1FUxGpw7^Y;Kqkq9BP0IoYFmMh!Fef~p0l^2Xo#DvH4$b%D$px0TGiauIh^%C{}yY$Xlcp`Buxg%xVmjj%yC2xZ6LM z_V3Hy(GgxoW!x&(T#D$!d2%c;gc;p6L}?U0RR+(}=i@GZwiVI=ahnP@7%3&RW?G!g z(jo8fh67y_=)L7G7>8QEOp2c+&lMHv5PAGNYm=HIIC0nHA2#gt+Bvu*=L)u_Hz8Tp zPt=Pu`W{11?0uKUeO3R*A`=;`s(I3_a#Zsso{X9r>cxu}F`qtV%+Fh8wc)|0>&snw zQ0zUW2i%fH+veUSH_eC@@+xs^|HMLc;Ra&3WTh`lOI&%C1(Cbl zzb5Yo1g$oAfdReu4sg`t4?z~eQL#~cR17!x;(EYN3WyNm?GoX+Z_*@kE5=0px=?0e(EnCP+S>;=bqgc z=Qq>uK76xO?A)c8Gh#~*vh^4?9FLZ2&MiGG!i=XlyH5?!2kH`{|2GMN!7fn=@EiD5 zJLv;)XFv6-T+M-8ykJw=Sk9NLausfTXkai+@{P6{ThE;hBWL`HKTF{BM8e#{;SU5!M=*Q zs~nzsv4(md1e2G@U@adm-9*o{VFYT0@LOLKs99y)eVV1pw2+w< z`ikug5?=sO(yj3V4>U|OuZ*+qYL#$qNd{^d)?miX<|Zy&IspFz;Kr;#@-VW3N&B|8 z$Mk34{m~LSE-X{>j2ZH?{|s;a0#Mp#Zd&*A{vE8+0fnkTOtpg%@Lbcm-L~STMt;80at$SS znxBA-QBIINd#W8eq-C8)ju6W*H&deGny&wXFB$MZ!}F)P?{Gh$<)#sJJzC)^*2|qW z6uD&aHo9%bt#aq*hEx%9PL1zR@e?I?eq3^4um9 z!kFs*XSeHK4wuZljcSAnVX3Un4P#z{8oPLUnQPYfzs5k6*%gRz|?Sc1guuy9Ov!j-CoNmnYHiTQf5YE09*{ z7MVufSkup@s?_2HA}o-}p>Y`)?ARC>tk?twznwdyApGaD|9M{JV0kaett3DG96ig+ zy%n>x3(a(4SNX;Yw8zgjs%yqKs&^BI z(wQwIb+znsphqh0@LwJX$oXIUn_oHZ2eRMVQybou;q-{UyrjO|L`rIpnKe*zFP>?M zUB2@jYzc&P583n$GF_j?EgJNJDep49#{7@&0NY0y`3VQKgwphY)O4neZcFxXFD_m< zdUL2qB+1uMYW3jxR%zG3@yb^5!uoAd1{LAFKK;>5cl7~U0~}}3)zPb;LjISP2sl@U zQ59LVM@pV(D5}J!q=e=woSmwj93HqNRdejwjP1UPFZKC)V?`T%&)Slb@mg*!t}@dr zY_+0d2**9U>G+4gPUe5DnzsQ+QeFgZw{kRx=>CjQBBN7$F#hswR0!49oFsMud+5{^ z0X=Z`SW+HP?T>8>YLyvo6=}`F6=}1=fQc)fj$Gcq^uNI1l#Ne0Udrfj-xL(oxP%cl z!~GjLXY2EcV1&=6r>e9e-BlGy8Yv zj{8^j76@siac$aswzE$cx>>9jHB3>5-N^t83eanSbOc^{RjakrgA@M#M}e=(JURR! z@=21>Svkn*h0RAPg?&n~w+zF(6UWD}uZCy2ge8R69uPVL zbQp^+D4g!hvwY;SavMZwtP4uOwKn*BZsL_2-e&&zdN&Zg&y=MG{)TQVY+^wP!N{4x_=A;&24 z5kKkZN@Yz?i;C6#U2?=H?MI`Mt7#Fw2MX!xs&i{$ukqgjA_9AvU+wMVUa>r&s@^n)VSf|Ld2ni`y!a5F-m79eF+e~i#-j1S*D1B; z&n8)xHJ;YL#|hkdw%Fe>|8{v?AdHUV zTi6IDeMl0+Ctor@^m%~$<%>^2T@1N6FRc%DIR7?zsJliIZ*^ z+edErfB^@ecI27C)z(knG=&tH(+(Q-l%J` z0VL2+eT9c%Maz(f2tZQ$=Pndr`8mr zc6QnmLUm9Xpn1d6UwtL3Xn=UY_uHF2roP|--;C{cpfa7hhK2*UA@` zc4{t*7linTYp$`?q~5bOtPq`$ffBYlfsg!rVT^ose{S*VbK){sMtw>tud)qhM;&Zc-?KJA;S=EFQZ*?$7Nq3Gr9p%GR`)JcD0X?Ya)3H*XD&%?AXja)qIq@6vdeRA4Dy1)l_cEik(;f> z_~&1amKkevK8T%nA-gac zjPW_Da$kGg`M4r^j100Z)ap~UpP39h?cXgaMY+@(=Ew(t4p!yvQg`9scz`7Uu}GjM z@^oLO;>2wsrxeYNKrC7lt0thwTYR1stT|qy`@voZ9r^9b&Ovqrt%`L<*L@$kE3Z}0jqvyq z=tvmoqz)?ftuFXIS-nRqQ)1{*KOgC_Hb(B6G4x1@m=6~r=Ti{rAT?q91_0-V>hS>Yp(lIc%H4i%5RF+Ns3n^XS-Xk!!!SS=4G+E}L%SXuAe|yA5?#ipFiwlEy*TyPAVsdh4o|un1UmD{4 zCq=u9g4G{W^x< zvLJO9m8mJ*i9vg;)qt8}>X(TAU9VDGCza%@x{4XQDoIm5PxGr2|@HCC^Rlllz_YH2g1x=t6Wzu+^TM=cMyM;VAL zMdly+(!%aO%rcY!oOx|E+c3{tf{(81>DrFBs!M4HVx_fsM`cQUnF(L-e7YVwJB!Vm z)k_0(JXSj^JjFKbC)?3`t27fN-8stkxY)|l3_uwwTsjjMMRHPi6Gr}mSH``D;)O8^ zw0y1AMBm_#>HI2uU$|=Fw&`X?X$%MxO!`N!Z;XCd7>=;hdc#GwJ=w+d{XGlsd7I=N zouA>rZMRR406MCz3_)#5=_;?*zA|LGbqATyA?;Rdh4;VIFK+2D%?8z7JMW&nB$C)O zQQw@q7$L>4{FN7%$?mfq6|;cB(z3r%T5PbD>dU6HSokR+m3d0HBkXtNy$1LaNb|sk zUINWc5(ntJ&#OW6(S)?AFF@6?O3NfMpCYG0IUpQn4*wk8-lkB!_lV(NkLIAX8G3sm zy@x_d!Lm~U)K*p(e@1xC`BH@-x)V-awx3p73c3B!c0EH^hJO`bO#Fi92f8IdOpYqC z!=Tv~a9Ux~YLz7g0kIEeLy1N3h0<22JDBbmT|Bx@x#bqx&+b*Q148apl!Vvr{14ET zSRMQIYp#Tsc0W0=KLg`$$S#SV*$z48@&gO3I*0X<+uBH;da;UP)xGC4A7ZuIuzbQk z4al_-k}G(PtcXmF--kO(0-h^zM!wHKawDGR0yx<(Tz3RKJrv_r>JYa$5O=!8w6>gP zSZ-#XL;d~n3ElPppV4T_O|6<1wd!?J{CC8sD-RUJU=N*VLN>s^{b`5<%}*_n$_Q9{sjxGurOk4wnH8C zRPtqjQBV@fhA0SB883hJJ*}d=&I<8>hWT}#Sw|6}-OK{d!lN&q5|gf@LQA(ny)>!z z!K2r5u=2N-Uth_oq^pBJpo-BEFWh$O+W#EnYheuHn$?~=-z)S%%$=K>3RS+$1QhwisNHigC@-$5 zS+M0BDR@@u?RqK8bTZ^F;nFqQ2lm-sYt>o|pghZ_oFKB{;3EB9gr2;5^B0drEHMlg znWHbdkd=piFfJ5i#?Gby(o4`MD77|@)#d0|7FYL?TFM%}G@1)_z`=<9lE}J5MvjY5 zYYz|Rl7!7#uGkF~!D(n{&W4DbMCgCDlx^0MDJUwcJ$)K_?fP{p1_qy|HPGskZ=VQS z4?*LqkWWQHoPzIx*2I@cEtq~!&os#7dsU^l z?%mr;kvIGSMl0)PCe5`@6lGi%$~LT5+~&lg8v3K z^?GmI&j^=^D69ajj{a?G6-7Yc6Y{Nc+MZ`g6#FQ(cH;5nCoV_s0mR*am|=NZ#YIX8 z+y~eYTG0HLwgG52tuwzwawm2n5!4ckf6c#J9*m+`TO#R}ua;87{=oF@ZPhHthPO4d zZ7Y@S^y>**v{NInYO{Lw=QDK*q&G z#Ta4R(AEV8F$K`uNk)Sz-Fpd20%^#k6qfPJ&w(iGs<&--pw4@I18@48+iE*aZ<*}? z{}L&HwH9KGk`ibi+8bdV^~~D-$Fg*`D}8>^gP){+Ro! zZxGn%yQa-nBV|s9eUA28(+~6D3L^?fNbztKI%Tiy(K_vU%&k0_HMN|EBQl0J6%_3RhvvPI@B3>8DArHd{Gx z)n4v)!zCR7khxc7={>OCG_&4!#41u*Pq(z4Rf7#XPXYCgXz>V9pWd!US@)({@2^jm z{|IIyA|$i||2&*ejevw}SY2fhSv2V7r4?4(L=G21gK5ZRDw^!RutrIo^cc2L_d8V{ zz>)KBThOvD=6hV$N(El~X$~~KOZuRFPeKiP1+)M?fn=ptTMn;0k^Rd6VNz04h2565 zfn~A{95e(|*e(wi*z};^%YFoRdl*UkZ~vBW7Oa-=REVq8wqmhl8KrWGOLO?ti2{wi z9xtXE-w&0$q=QoophzJ=#vrBvBB>gn3q`=s0mlDcyar7^)6v9~MDtlBSadbrp_!8! zann_eAaNP>0o5J**ZC4)Ao0SsX>Z>W8n|?TmSBspG?m0mVcY%;`3OenNAOz<*Z084 z47F;i3IzKrTzgC-493ZqJV(0k)?im_G42h6Nf@>ajYWry5iPI^hXp@a*TtBqA_ z_Kw_H!pJk0Z!dWbtmf4DOM_V0ebiL{kCC@Tmx(}BzK6|vtjf>N?zz_Urof-pe^$+B zDT#+!{J~S0yW~P6fs?LQD2chujvEb^IP38Z7eBQx9Zr}s<~gUJ)1Mnyz`+FuN!~D; zt+hNmP-716WbElr)$8CQFZm%@6?eyONGQTk%5g$k0JQzVj2#eJiXdmAkmB80bGY15 z@hSGB?ym($n?JecnFb{#<+4AV2_u!LA9^_E8i~`B!++rf+r_{c77*;}-Nt=-fDpP; zN=HX^gL`{#;4NVw=k14bZEga+-Gj+#10=Rkkat@h`RtQzSgi|e09-tti&2WR0pv}$ z0*SS+T)7W;B|J+r=bUW8l!z`Uu2q31UO)%-yYg&7mBM#$hhR&ZlsWi|>dE(=r2$nc zZjEjL-=}VN;B1Vdib{X`=X-hkC(-}Tr`)$2%6RhrnqQ?msPN?OSA!jD-=28WGu_$~ zM5Y!->sNa^TC&!661{qO@^u`M+AeEoOmNSly*rfVo^zu3t4(yQfKe_uLD`LFlVl9TCO)`fzb8>H;iA zpK7J6sbc#sVYd5)9Ts-yhxxD@ijW?CxqegH!XmB2u5v#&5wN+`Kwu*4Mu5J8c*$pA z^HO8KHiVc;3mgo9-1%u9S^q6C0;M>9&*P1s!tSdh`g3#bpHn%$dOQZFib^s-{DvKs zAKE|&+sc92-mW!rxQSC~b1P(0cq9W*fG4jig_Mdr!?Noux6XI84K5RdVpq!4$}{9P zs;M%3HA9u91k&OBk4}Hl<9>1cC54ZMf7unU^+~QgEa*M{VN3&j@yCxJF)=Zq-OJCw zpaS_E0Cfd&A?3lChs@GIo|d-uCmcI4r$I%~eY7*6)b)@RlQvt^U*T*kI&Dm?iPs>> z%m_tY1NTu(r_m9B>wQA2EaO{RvZaP7LMqdrI#r1z5ZA02d>*erVNlPJc zCMtRPCk0RzrN2KT2I}7sv%_0kTNUB(g0l08V-56ZMV`@08H#z`-zzTpI1E{CKjj4_ z5UwJlqLPR0E0uQ^alQE_4cpVvs@$TYIw&FJ{4iW#J$o7El^2x2*7H0$2TXc;7I;j>YQ@sDqsaE4X$02yafKOp&~U{P|3dX1+Fa>WoRBAxMVkmuCja3b+kW1_ zw{M~9FtFU;uL+1_P^c7;yFFoHVbRSue(ig_AK{k(1=lheU4>#l3FlaO<8(U+0516L6TEUCv5bf1%=0qh^a)n2A zS9mG5*T#(ms@4NQ;tQc;U{KIvcd|^ps7u;ym4u8aaFPK9axO%=3d4siuz%}hrQ`g{ z2kjWX;U09XrWOGNv=ilXUwgsw05z@)R0xu~B-h+3ad}2kCv!mFViR^4(}yrG^C=iZ z5}+iMcX~17C4z)m%wWuQGtS^hpyY3m9v8j*Bgi1(sbT&r=J}WVcFrAS3xfrzfl$QT zzY#3MJw@8n&qo*;;16>qTI|C%Tmc8v{JkmF8ZiVMYQ?6(8pWA7T-_HEX07@4$p)}z z2g3&~Eee-1K`>djV}GnKropKUh^hOyovZNrZTxq^&0%!uT1Gwy;6l~&JE5VMqj|u~ z)_v*&^+~g0($;EN{rAOVCwJ=lu}E#=jI1tpx)P(aHGqK5I&?ut!+HaL(N>St$d-KD&9T6GH`(1hF+$n`neH8%BQ3k$cng z`5v(J8#jZze_qLMqok)lS1bHZ57N(*&3%TrUQFM%23!sXSBj41EhyvkfDjzO0uF?# z9DXo_x-~PhdK7vQhoKezL_C#8!pukwGUu;;r>_#lTz{Ph!sA~?1n90_{1iFQMIQ?_5azmvKdHMNwMCrqqmw-x@SG5@uNChos8gqKVWrtY%QM8s}l<$!; zXz%{P zP9OLa62X7}zHx&v&x9Oeth_fVsbhHj>-?@t=hg)fU4r&yFDG$@QW8Oih1wn zH$nNpep)Sm!=`ej!6JFA`^Y_`fY7}c3{_Q zPND`Ck*^-vmx>SEl3JHSNG+Y4sNYiu(c+4>Y z9(n=-Le(TmS}EVE{yd{3pUz1~tG(4x=r}e6asM$ffFqEglm69HadL7pBqBoV<;!f~ zJwcagQh;8X<$bW}IUOaR%@1B&$3aAN>Q1h3L22U<9BLtc{P=MZuw99x4a z(r(Fy%>R~W!fnp9A<7(0&biq-94Spou35kNZ1P9oP77;9_X-$ei#+sd5F8jiCh$&n zhaEKIz>Z9~e~+DH_urQSk2mx?aTT`emIj(62!Ex>&1rZR=&gpWU(#WS>_g51OafPt zGd|1!6o3cB#)cp=bwK9qYYr^<466ng&%vHnCZCiXT&EbrnQYP!w2+?U+mojJ1&GIc zRzL`dnMTI%d|oYg%%y)6y6+kSRG6&3$P3bPjDE>T#Fv# z1+rb0CdCKy0W2;rSti(~C6Xo6sI~?-175mM7TmxR*V_aJ2m22fGiE;f1%6XiKZE>@ z#|8UDFog$+eg1n_q)9J)&5$VaK5(uEYcN#xst}xmayD=gp@-C-e@S{NKoqbXpqriI z7}z<8iEn_7V>`Y)$Z}g53VHVq@+nGc>I{&#XrCj0m$RtBGJzTl03(EO)?j$?QhqgT z)<8wlEqDiE-mdJ5DsRKTg%}oX@dJZfKwKh>30r;MSUJ9w>Z#6zg=U4$E zXYs2o3nC&Sa00$|0E8Wfb6zM3yqSwH8a4$%mB;L7OXTw4U3d?a8bFE*Sh-lhNw2Fu zO#aE_H}an;wyocQ1|Gwwr)l06+i|)Bc#p8>w$-KcFw4ZB{y`KhvWqm>DC0}*U_>O$ z;Gg+K_zLyX5CZ!15fWV}xbBJfpP$O`=lv-O&y(gs{PXpLaPK7xXC#<4Fpgj-xB^BC zRP?9(aY}GF92F|@7;r)xCy4jgd;pi7ulebLWm8j=H7G>b4CE#X3JQMDOB8jHbEr8; z3&cSdcR$FO3L5&u%@$T@LrNURb0Z`V6#xJp>3VxtUAuOTHyb1cb)~>8FStG4tNZnl zN7&~ONyVkcrnM)7dMqR3x<2vf12+`Vd>*OHf`J3noN%k0aW3cgT-o3FOG-*Ac%CJk zO*&T$5~E=dasgKbR+-1|X?J>j5c2;0-G5iqU&#)5&7yrCg%0=D@=|0!EXL~DN3pH^ zDU?@=<^2sYl%|_T+WIVD+17wDDt0~90!Mf7$WOnH7+sZq=m>N&P#-((27*csM0!xj zQP%;kJ9nOSJ=_ZQ+@3da3WU~(hqIfy^J{EW8aTWBA?r01>7hiydZO+%M0&{?#L(Z( z+cLqXXX=-K|L31ValM~xm0%kVgpoYY3SP*Lx((a?gEg&FTIq>eYW=y_qAs&mo71f~ zt9L<0wp6)RbMN2Gu1>ep1Q4Cc0;UAk#C5j)^9Mt5uifm^t)#Pmuz%LF=#Ne-q|B27 zWYZX!G5EHOn)%|}%JK7n5&h>e=L-Y?y{}ILgE2U!cw%Fd3H1(tj`3u*L3U@& z+n{&nigd2@QhyGv!qqmb4Loz5=Rg134N6A>6^jTU3lM>#9&BGyvz{BcAft%SVG%S* z&>K4sSl3M-Wl;KSML>Q*eD0bXbv}Ee=};9$Y;T`u?eTlQ< zEvVq|AKI1Pz!gkiq7!s;tX}RdqX@`4X@#teU1`9peOtkTKuEpe0-O%H)UV8=nVo+w z`_%%=@B{iw9fU0arMTJHo=A9ZKLb#9x*OjEj_y7KPw@96A-(*9#!yNgWY?fkm>mv+ z(>F$t&wr>1Lga&2waD&Oe_kMs)ok4&GXlM>Um|$JrIr964!{@<0 z`DEuo#Wx&;>*jEVqr3r7<%d#bHDU1E(_j|u(+FU5N@Vr7{K0=rli@X<-6d>;SelVO$&5e7l{ z9q%L}Cgu?m>I<*|6FQ~<69U-r3=oAZCGa2Xw(WQue!$D~JvLqduAvPwCj>xIT(S&t zvBUV-;&*V7H)GrSAw*o$_!z-G`#@~8+d!alk%WH2H;*CJyGIIsTjbV}> zprr)a0i;s!Y3ECKNu5{{}K`ZcfgY zb3xuazHI@;27P0IqqGS?>jwa>DM8H|B1rSy$(nJ1Hcq%Z6Gc8IyaG~`$cG}Xi!os4 z>7$kIdIqGxZ;VJth2&!HP&E313Sk4NT?rf4!N_O?pL_syCAR8~W;#*|3N66$5XCkK zXet$slL1X1p;V?Mn_R&L;>0{b#s)vYt+-Tq3P2oMXPm7Mw-@3czWOc~N}UYI9~MM4 zsQwC;OiB5(pzPk5S00WX*!zBVEqQ&w|I^Nu1~qj>;fH`iv_Qv|K_nX7*i^6}E^LNY z(7_EsS(9R1D2lSCr9coN)TLT1LRA#19g7k`#VUxBk+wjIr8Y{axCBIOK&y;c69m%l z=Dj%1==4|rwA1{{efPb4?!D)pyYe`$Z6`(Z(anr@}TV^wK9`{S^$d* zdlD7l$%R0(H~W&XLmDT=WZb3VmyQS}O>~dr0+nt+`haJ?W2md52Xg*6(4#{kR#=%@ zzR5u?M-(<(T(|vtI>2JDp(!xXrxizLgM;j5|Dq1gBlg;@vx22%ih(EP5Fbi+viU~) zn`R52Dr*Zbp=w3k;H}3=a%((n;Ls133blhTbke3s%A?1)TI8KsO5_h(EF-k_wh5rM zb+}Gqp;fe{!aN7Gpi%02#2^6ohJP2S=jh@TF;g2Xxm0fyZ58cmQV}Vr3vq}f7mqm2 zXQ%BU={QvAWRcp9d1J1yvAnpG)$)~NEa&u91iNcj=1VC5BgEP z!Soocc&`EG$<>J#T4t<@QRV40g+b&nxj?9t|J_G@MtonZOG)EFANAa0y3S`0?WM`( zo8{=gw>Dd$?+Qf_%TQtP>V5AF9Jh^hvww?o4T3A(tCYsCRwOrctKgyw;iY`116YA-U&p>waL zNjlDgie&Z%$pmT?@4|2~VsP|s-FCL|ae|n2!3ODP?;QZ%wr4t<<>+YvZ|Qriv$nX< zwk2_91H(!I$Y-*hxya>ChyQ+NZvY)l^^+O)E~aS<2%`9taD>d{QMuR9ya7{e1v@KWa-1o(uc0XpP4(4b@0oH-KWbzqMZ29(AAo*!xE=DQrr+= z0w)CS9TETn34Vnuk(5-s1aS|Tt~aXUH|9Bpwsgm#)FIIbqRj<6Nkr@)E?%ijJeJm9 z(sk`{{$LLbKcdeMISY1W!Sg63l3OUlg|ps{AEI%WQgC_bborH4Bm|xQIIe0gP=&n{ z=J>!B?hS%PX~n8UElyFa=Fz$4zB|eHh}CuLa>|RTk?GwxW+EvAQ4u6c0J=LBekrNH zPrm%-mDH5jypdFeCAmAY@AkI3clwo?<_Xu{Q-rl30sW{1kWWNk$T5%9zUPWadk%-= zi3n`#1gwa1O?;P3Q(y5bkWFM@aB+{&t;&uPtszpb+w&sQ8NFdaE^HBhd(h$|hi zH>B2PK~6k91kB-m_%mvSqE99NATsrN9^6DfGG!t-h1cMzI$3%VFs`$6J%W-yoq%N@ zLXh;Kwzv)5eY-T{qi-n!(nMU4SIg+9c0u%L zAem(XQ7E+^+VcD~tg+=}Ty$dlk=S76#^y`~(n-fQ%4C5XHgx%x3nXcY zXWD}aScixWQfrQttSI(DQMvQcC5S90cvD3b8?-HSj=j4O;7}p~Q7e<5M4XTLZ55ow zP*_UxuFUDnkKT}G7!tt5m&@&}cI8Pa%yhL%rHYW)P&`NR;Y4#3z9>cFC5cM}R&Kgp z8{T#~<$_&s*}#Qo$POggBp*ga0Zoz37)|jqyiP;ysPjv2mOG8}unnL4>5^QzDiBs| z4T5FT;V9-sH3a8}46>*d6+|IwqW`#EMdA*mo@WeEoeggIEX&kXQ{Ns88ElH8FyM4u eVDjHDk)LVVdjjN@4mAV_qx^gWxMe=P9e)A2ng%Wa literal 26726 zcmeEuXH-*d*Ji|y1q-O42q+*$K&b*!6e*z-2%+Zzsi8`hjv}Z?Gc*C|fsjz5KKT>IMB&TDN=coS2S?fb+&f*GIzCtX_&jaI5@jI*jcc9TDiK}IXj66 z-V+qP&2H=N?&2mTB;@!%e?idM)kcVo>!t^|$VnF!LpKB<>fc|k4M{tvwNCGf zF^8I;0PBFg|1>{G2mR;iS-QX9z;gsahb}Dt{}61$|D(IH?CNo7tG~i+y2NwAsCU6L zPSCWEG6~i@!Uu=L!@|N4)4x)XA1xG73A&u;v#bvf8dFzZ)RC_$Do4cg$vdXluJ%;`^lY zHMJSYq~3?ZAWQpXcY^)D4kvWeWc^J{=?a6>qM$Qsw|dJZ0%Um#)kDp%6bjNYdoB$X z##R(+&q9!!LC1h{B-p0*w`53mEXp1-)6<8%EGa5Y{rKV0liJ@QYW${TkkhFK`__ z@-w=(+9=TL;pxPdJf?GZbanBO`UC`buS3sL?0c!W^i&y*xuF#3ESU0n`H{PR{{Cb~ zr3ZW&m+!eDOle{yDT|cthl3v-YM`dbp{U8MU7FZd2MZQ z`ZF#20a13HW*wntYM7YqOxg$USc;Q$ET|#`f|` z`nacPzBkCpQtsgubw`wHDw<^()%cdR%N=kiDmuoQ-yt4_!PxIUKRUr%rU?f>bepyg z#|RkwgLHQXx4$g7#UtFI9l^4Up@cz?u3g=J=#Z0kfHHZdGLTbT{K5KMqF$ELtFzbs zfgY35`N+8uZpLr9-EA3;C73u%ap$wc zkhM7^(v_o8eLZksS*Kt?b065g3%3-zJO_WSj=)!ywh!jqVz$vV9RVpWu#to3q26QVzLN>8s#N>Nz|1-^uY9Ob+7L4pI=Yx$8wN3K6U z7}^dCd;Md%BKI9n%dqq?uZM}V9zXj(p+Bc6J5?7g8Xe3Ef`Ss0q&!M~#EV&eG+6hV z9#8V>Sss`5S%E9m_`ci|%c)05gY~gNrtP`m$x`CiCdR?Z$E_M1&w!BjmF{8ONO;ad zSy|U;%TVAhFfK#R&fX>;hQT%;0!#a_+bedqY&f9|N4T-YY^3b#Eqt>)R=Z_sytMEUOCi@ z)Ll%@B=2|6?a8%L+uUBRsZvqyV`C@dKE}4k3g~vt2V3*4`Y@AIJB^$j1Jz`x8+yZk zYNdQeXT+A4@-^X3Sw=_6UDq$hq)RV}60BSHhUKsetXC+&C{gS|pB_&zo-H&{uM_5c zr(qqp#zz2;NalSSgNY?%>?>^8Q_~A}TK8!>1;XT#wKhbAfyl%DfZ-e|=N_HB-U4^p z-mch(cKPkNm1Z9y)M6~-H^go7oRk6s)Mi(?vH+$;8!SuoasTO-g}V|xn{K$;=ds@O`j!)o*ObXnfN!zZ zEijS+VflW~CNGjtB_pG`tI`8_=6yH4yj7;9GW|n`L|j=ZKOAFE!(d)trpd|5$B$gU z304uwwwN8Rsp2$xb9MW@6c^F~>5hu4Vr0Espz%4mD^ux||7w>+(q&Pt&Hkkxt-w9p zsQ6Cm5P&In$In^>EWH0Ryp0fkm9~J zQ^_wS6zQP~T#rkZG?PlpUR%HPu>Lo~39)w+`IaH)9P_+TyP+X(RdsmR%R#xH zmma=DWLoC;r%Ql7_96wFqsC$7xEm(E#iVN^I# zoe#-ZtczVOY-9`M%=Phz+1q=#)-04|c?ESW>8Fl>!gxbn21vR+wTJPTL$Hx}C=nSx zSd5JoKI3d>E)Nwpr8*SBU4#U-0*nqKnM z=|xykX0tSt$Zqk+ZeMlL*m9}QjgQ^jzbWHe+8%VMqdm*m>Pk4ciMGj|=aWjdv)-l8 zrTnS!MP)Myt97^|y@wG*al{)Ifv##FH*#+K!|ZBRyFUrR zDY8VQ_GVP9z*XE*t#OXA(`!_oK6G9B%V|AM*AIP*_`@c;GOpyQ3YL*B>$TngskE(&+7t}6%{7~@=0q!IOmCR5uMkspJ82xWz0c(cPZYg_FM(~ z7xwO|q`$pcl($dmY_UG6_ zZc|IQN4jdxWUS(09nZ&fC9-N9eFv2!q?;_hOn=tXPw$Q9NtpcDs?me=!*qeWf!)cIhkG&Gze~ zls*sF1Z*evRJASwyfKxNJJWI+nC{z;j-Q=aqI(1Z7Df;`oL)RSgQ^%p>{1SKi^(8Q z1(>9RS5e^P<*Dm#QnHcKi|<;~GgLKj_lm1R;vcduNRibtQWPko-DjnqEI)eYq3Ses z@Rqh`gLSGZ$oloZ$sdpNT+j>)GxAcL4$1a}eHw<|%uEpMx}aPG&1`n_O`ZK{ZAc$u zH5}M6ec)}ACy&QU7G@pkz1bgm@OM)Rb*tQG1~@#-x(sVc+r;*&2zTF#elqa9NV9u< zU+*}T>$=wLD$~(fL!hWmnghZ}frE0wMzXS@)z$ewB>hyd@P?>e9-c<+GAE9}kf>^D zF4WkPI{;g>2V_nUAcfNE9fPE3sAJZ#1*zdRPPN5W8?AidWpPYIh|ap+|P$P%8PUK0rVn zyH&#|d`ODk0qqAhk~Wxem)ekvEYP1@w-aLB8y^u{N;Ae!{Lm@jm*(1Lra%GCaAhY2 zTWoVtkAQ`1B)#|#o@*4MI}zl^8?O1f$r8(<0Fs2g?jLRdEjm+eHfJE{qthC-*0#Vf zYw+`K&Bui^3z9X;>o$}GTJiz?d)D0D`OVp=7jo{B>qWRdFVz(hBG=>~ulHKJxX-fU z?6=qd!Fi2Bnx9>7z9$BaeR|tju;RO;kMSX%-FqT`YEwY3vGm-a&cRvM2uUKaVDID_ z$_G^iG18EUr(D&Sn_t(TEJbzSbP^B2VK!_D1Q$706GyIoHHJDqqRn;vQ6sIfx(lXfgln5#M0j+}!r ztEq6}^?h?`0gY3vxztqmo~GfU3JM_0nYIO=wRSi?{ao(v-vdi*AN!@DfQA)TB7cuK z)t-QMLaK%#hlWnhbr+CxG<;tRo`jWO0neJ6kar3;lIU7#c_(JDRnYfl6%O(rS*POP z?rOcpjNXEXt%y67@@pog&jXWiiB@88OE49{I`67T7L11%D@A4*k!mgpCbvYlSR)-A7V(%AsmUy6k<(}*PK+RXjOd5RT`C-@ z3XXe+&1IJR*%mZ;cx`ZbiSgW}^o`ZNyz%FeJIXPExh4cx!zIeEA5 zhccC~{czsR%aOJ|4YNe+ym+KSLZxZR(?$#OP@R5q{Jsp4QmgF0@0nm|I_Ft2r&sV^ znfxPOqiuVY+kbOVA0Fenw{JZlC#;Z>!HgCGzUp%8$T`^cNoy%CxBV$={}kq@hz^AN z$o!#&@F4<|9I5TcSAGcF$jB}5Iv1BM5?vFLP|k(clw<%!XxX$RT3*D(X`*LupVTE^ zbx<~U?4@rTt`8i%(rB<2iheTMCvU|kfo@4FwOgPu6Lr24b@8rCv7DS+JOVA+78ZNx z&BS}9U1wqP<{Uw=KRibVd6xx83zU#on}97s{sDQ;Sk7yokCSi0YOfTS;yvxCuM2km z**p#ax)rZ^`ye38A`e)ZfR{OYc&mY^{DWvdbxM~aylloLD1JcC!TjvAo>Q&gPIyQ7 z<F8-R)4YOa863gwChe~el8izRpog_)Oywi^nk#!Au%4E*(ppp*(1Q<+`XGZfi#R8dZ z+?Q*em}IoP3Z*>U$*Z?)Rf;)w_Q#g`yj}575dK~oezm>XpgH$Zoq^2L-Qd2>24^1< z^#+BcBbOIiEycC^-9cxcCG?VD37?eIrcS}B%YSQa>BT!%Df#u~mtg64&db9_xChkn zmJeEw8lq{%>i}@sT>Qm1ruV`#WgweUSJ>Sk=YJcHktrh+4e~4pbXHJ!OUfHH3TYsS z>ZvN&vyW80$5+3FHm)Alsqa2OdJWliIF`X>WjUmtwkM2hN?++OW53Fdr|Y-(!89c8Mw%*nL!nvNf)-IE7$d1?k+ zNIyX{{+B|{^laEJ@&jewT>Ql4xSD%)JiNItcS^nGo8#drQt;ypPI2ZF0mqxdn2d{S zBj3E~e0BcfwFlp0NB4NV6nyN8+njC&cVJz5xoE zM}Bql7fRZ<>e^RTK!S)XJN^>OH`ynfUjZCbM`27=$?Ak=!LDP{On#Xf3feLc?n3SVI0#rk} zC!+MYJ4#18g&l$&5d0tM4F-;l?8_^0@5{tpY{_P6?I+`4@bT7^l?MjAt2v)KV_;Z!A~o&6@5 zLWqHLslMP55D5Hx^-peXho0Qz#K4ZWv}?kr-1ZYTP@B?K_T7bsl{I?{QcoAt1M0S_ zI0^2RQP{|?gkNRY`K#FxG1>9X@aE}8zq<&)Ecc{8i0Q%@_;X!1Nu-|t#FQ+XUp#!R zeD2_UeBYsR326P1qep`*Zp)=$`K^w{9F@5|lie+3=l{ni8;76ie>OZfksD*Q{Ec3Q z?VgEvzcb=%1%l#_NJ~Z3A4cE7;; z4SA)8@z-V@SCU0bGTc8VsUJ8KHru4v8+!G&>&@zP5o4Qk!l(aFSl;CM378@vi(%}u z-<>@q64ebktUyI>MK%yiZ-+UuRy|m+BMEe_3E_ln;9MQJ@q%p3aMR2|&ZW4N>^a)1 zE+WCSW4j+$%b-@~cdMzfv@2i{1-cjZh~!bN*^#DOZr3PMPm4%CTHbsOeRgYygM2{} zgI&W8YrX~rF(Bbkf84I>5qk}gdqr`oy`}a++4#(~sP6A05lO$DGBI&^+=I0biGK1d zUHUsH$=O8fj@eLQl>Mz2hGQMD;^c~_4nu1 z)YOEEUdsdoyVO&M^l&8|M8n69B>`O3bmNKe&O`9B_*1AR)&!63E!gbGp$=?FM@0}n zfCrXa)$CvItPO2!ia!2!gZ1?l{Ms7)$9*LKZHI#Hy+6@PHS4(0l52=}fsbH8R8Wb- zco=)GQUIqOooSKDY1+=a-QXEVqz#8_!e4)-*Swxv6TC4uZpOx+s~N4wZH8G7)$=b~ zGm)9y?$3Tufcu)l)vTCupNXWIqMjtffp|%+=dDpUwYo~-(?z|cns#kyF-#f=0m~Y> z24Ea4BmX>L%5B)h6jX5I1Q{tM-}{pQ*GczA=+;9#t}+ z$D{xQz%gtu5;N)kb15zb`?CSB;S0YizSjLY#@|Y>U!Y}>lbm+jf2U~P!-hJG^0i&6 zs4>FyWw;NIGS{{JsYJMoG;NlYn1{vcJ%DrhTV4r;?iEiO%==a^VrB9U_fw~!?6P3n z`x4*vukQpxw(y5xsc!&Ca3Sn@s)9zz?yd?7R&NUjM&A!md<*SW)@x98WUd@V=<2% zzRNr~&HrTiwExI@$U}vzwxG3wv2jj7rkP(6P83$E0`IRx5+jmw0pXYRd##^h$*$KH+6eSdi;Nkxw;1OE}HpfSJhz= zs;UYs@H)?UKGx`3KlJyIc%zS*#Say-LHC8;?80rDe?*D*_OpDQ0?!BQ9wnn@Eucxs z9HpgI9P$e{4xA4v=-IfQ9mc;EpAE*ls5H3+2+nLAWE`-S@T-YqNTZ%nV33t zy{f5s^ilfJID08B6vR*m-UJ>U_5$quFMJA$%wbI)3QQB%NyX@jB(pTT+9lNo!Innt=$bvVXc_P452s7@F_Z_07FI0Hhp?~~j-KhwQH`oq zUX7w<9uIUR_+kdK%A~5ZOF1^149rX`wRQNsG;`8pjASO7O`H>t!?eE2KzHT>ZGcjS z-NaFaA2YF*o`}@#>8zDm5kwBte)hXQW}#OEfzGf4@Ez5dA-_ko6eoQqT_(Ftk&gIf z=5f?1lq#MD?t2VAYgTD+%ZNeHM|+Y+s!salKp1A|(R1H*3oGn0ZPyS83$Vea&It2S zFKul5PTLGVb0z=n=QV4d3>u37M*m0U{Pc}A;0Hbj1RlWUgZ^an{DvlUw13C?&5d7* z0MIw}OFvkVPVyM93|Qdvuhb)!D(}n%3g?u)U8+XwP3~S-J=m5dm^{}zKoJ_`a4h{+ z?#Ahdk$kzs6C67tv#r41exAd6AF`Or)dy|A%LfFQ{OIE&02aL!mgdOFIK#-2Ax{@O+M+FJ~HkYGEV%*5i3*b50%=2K*pCr))5z2jod?4?Lg&cl~q6XEQ`|8RysA zVx}af?qcGfK(;*15GkT@Z6QltyL$W?@`Oiqyn4ViiJ4+SV3mKOcpS7gt=K4)h~`^# z?hOyni9$-M!UsNG(c2cd#d`)yrfGS3uP>fK5-nR!ussOIS*>QX3aOmxR+|!?4PM|Y zOAns%M5mXE^WXn1 zoP>_mZ;e&uwtoM?W7C?|zRZ|BRG6)_B;G-D+?VsHDo);cvDw~!O4;g)Qe@fpVqLoQ zTywCYLHD&=v5;B%tMlP8g{rRZfc}D{a*ky=qnX4sjaQuzCvNa74uG8mL-GE_^W{4-LP0#jz`qSvHMw#Kn1UJLH8I9WHtqH|dt0{y;dsRll#v23+p2lz2 zY!XtXXTgOHC$zE21r}^=Jw1AC1EfbS>w&q0uctcpFXD#nb4&b_5qw-cfI<(*-s>vI zJcfYEENf1laf*`^Qy^bMwr3>msPJvGYJ)eIg??=qI5^~&I`n4#r)pD-V{$xlP6Ep; zzs0};3o9!xzZbzq3)Rp6O^jt^^Y*<(^xf=+y8nLuBA0&hd*qYc&hv{nq3e={bqE_@ zF;+6{F?N}?a!>7*wi-|K2Zk_$c*e5QWiGR0q3rLwXYB8_Ck_1tq;?bH%MCVXwsq&< z=s~JG>YoDn?WemFm2zLqPDVe2U@dG+30lQ{Ensi}p6MAFjG#@iP)l;I&AckV8*Wre zAT#O-_h`Z+URmzf(yH=b>UQdi`L;ToF_e5%U{BVcs8HEYT*FuYG=mt*#R7lQoL+8L zh3_;5ROvucaFXp46Vw;@edrMEQ;4fH(!Q@Gp~PIlSy72aG7=N=@@2ENh~8rDqk2Jf ztc;(E;hTk%5kA%-@4?2%$eCBm)~vndTf}6)vOd0UKnP&j_!86S;h9i+Z8+20cY0R+ z3v;4nhiUO*M{t>3uEO+LJ9AP<&uuawiAI#5-0Px{h{phA*;0Gub+hg_uzMU4JQr8_76R$jj2(NRW$h~xAy{oig6Do&&3hUj3Pb<8n z-Om?jD4i)joHn}}oTi3lsp(gV$P?4w?OUJ>4Vi)eS`%15of*Ai27?XgO%aTgqY67; zUu*`AT&a*y%^wgS0Psd)i@n=kK(TkkVr^A~J}LhB^9CA|Z2W!=>cY5?06OREx*+CT z-bGF;cT0%ClN##NAELN1UChl~_WKjU@^Tt|eGvGtVSuUe28i`HLVi+QrYD1ibQ6omNUt ze{BQQPJV#e4#6rVn(s;JhE=#e8{iy&_~F9kt1OcuW@wt1VYdjxQEV*s z_Wcgb%5g}{gb4qWyeExl{XPY9m1ea4gSyJ6+nIjV{$2C?);`q-<52fa)_dY2D1RC1 zp)gmk0=&t4A?!4Bhtf+1wE&%EggWgS+Q#HW_o&c?qS{(ly*zY(;ttnD(W3u&0I#7> z#^foZcAJo2wAJc>r#r_ZoTbgk&E4J7RYUEOG16T=n)d7HJ~Zfcbj@ycf*$Pk7iWR` z^Ex}}u|Hn3DW!JNJvGB`6J3PEiZEshPcdd}CmC3*_EAdk@M2oykkGg`+ zzFR$WX~TQE0>rX5aOt)7+9*!A7qV6jd_y}*%g6S~Qaybxe>_{_jU;j=>;1QJ9&6SmluzvKxI$;CJx%t5V=bbDg`Su2Aldfm7N3Q$N5yV-#z-jEuj03cAxI5PWPSzG`n%6qYkOnhtRaE5eta@v!|qH z^1h;*?f@N9(2daZw}ECR8}xX8y@$7bd(G{!G^D2GK3mY(sH3WthWj}f^H;)X9t(== z)}6w<;BVE}SBq)`ikp*x>?gN-uTsfAGQYi6WVdU5ypX6}`CK)ghtRZeE3b&`+U-a@QjB%NIJDk!|ZQ&@Zj@ zDC3v%!CE}NrAi+gyGjKb!9vj71~E5LJ;$W#YJ#p1zk|&?r9c@YF+Y1ADM2I9`6mPi z9+&Z({)#cE$oTGjEwE*?UEp)e*lJ%31S??^b4tqOfGzV9&9rzvWFH^FEpqAYeuT6PGQHX&FXZ2n+*b_8dwX?SrPV~%}`{pTbixGo?w$Vhsw2}hcP{cjk`-x z-9HmUiTW+WA8S8HJV$0cItr-ZyZwcYfFA}&Ar9jR)Unr~^h~-as?0I`()OS6XMGax zX=mjE4Rrgobo^YaZ?q~Bmwh8BAV$prv7WVi>RSQ7pTp9$q$=gYJ&&oS6f`W*D9DEW z-`iI33;S(({&$%I36e@3n}0LiO;j&35RO`Jd7V*Seq?_w2L9y96Z+Hh1s^`hD=HS~ zy05uHUknV=A%q1;9>g!xaDE~Aa6msVOgi^87ofX0wE{^qI_*Bosh63a@I91EqZziy z*%lVEc~O+*0&s5;Zgs0UN9?AXA+fymzxqLQG_x{kdv z0jMWH>+3z!LqJwLomQK1dXdcy3&Q`HcX*@}S$)Igig0od5Bonkr4A>1%7%A{T4|B0 zsz3Y*gHB60pjtKERTorS=^#HC`$1BBr}{VlcOFi|04ker5~*sX8sGlv(#^FN=V4o? zsEQX13}64%qyZIz2~e|6Ht!OBy%Z*!r=3xcew+}KTnMv_w1ay`VX~R+SOzgAo~MU0 z&}lRzX=s<|a48x19Z3eiX**S#2vj;=-K3_%6VVcoz~AK*(4{1*b9?)u15p!SLXJ50 zS$5RF+3sh0VyQ%z8u2JoU3*OduC~Xc4Q3_M)Oy7bYXho!U@BVy6DpBmivb-a6x#Ah3XxsZmH$4(U zCSv%G*)un~I42x+G>WaebvmGSYWGSH9h7dH7FMi)LQZqs}8%hXGc1=<)8$m2TG3n{`4}9iV8%_YI`1ewcjrJ&< zIx~9Em&74gx3?5#!dq7C4a5P+LU&1-{&_=x+S=xh_g1S<=D|fsX94x#wFJEpP>Rg% zQp5gcdS>(2qi$rgdn%5NU2vZ_P?B$IEvmekWrVflHP-=>ms;<+t75qW^1?WLyll3S zi2W>@dcZ|NzKE|Y(zBMeyu?3va(bF4h zp4;}Q0omIrk$8SHTzf6ow&i3}7A}@^f%dp=u1*&nj5@7P%icf9_U8oXbi%afC&>H= zGLe+(t;DBmoyXabKykK4;Yz&h&e)2|OCMWmEBsv5^|>xsc3?FF>W0xAX=$3@Qp*#E zrA9XX?yA8Ld(^i(r?YV;frc`7+dsZJ!#c=B7}0$IGOf|h76MtS-+jDFuC2g%ka(#1 z*bL}s+|J7~xB-O-3n9Ge+r8U)v}NVzl^!B?>vMai4l}R#8^}Ig3mHJ)LIA1WCwraW zDA2tIT0f)^tdU!2DUu3ff=u_1L65P%L~_r}v*Ud;zwgdwynN_BB0stm_H#)RZLH-$ zfii`VZK`H{S_hQkH>Q#I+eLDd5 zrD)-s_)!V^?t_e7^4M#p%B($BAL~hxN`WZ)!4G0xzeduO4I=WHOuftJokojzJaz`Ajt)-WsW*Yfb4kz>&Aead3`Rv-HM z^+{EgMp}JnB-CHSHgZ8Qak0>~%mQoC<>_R6)c*qRaNxCn3|STFAtW*)B;oooqeSWM zUB`(fA&t~)c;7|dYOD2)g9Y;ZYbywqwf)NI3$(6dsqNS14>Tm)`kH15&ZdLTW@qT> zXCj~jb2gUP3Y4$E<<>f@V~KMU6gSye6)u=9Q_3)XNZ$T!y&zSgwlG!NIn?Rvwr}={ z6@vb-m1qz#_jhWO#MD6hdb@em5syQoX%huci9EqEphRbLP#Weol z@DUo58Ps1CPtm%s1$)+?0T^`HNcvTlR8ozvXKBw96QTD_mW*9(98P_UgH@CgJ6**`k87=YFU`k? zNpUqZkgzQC0-8)JeRb;i#|BEEU7(|Eso~(_d`|D64L=9hvvD8RPmms!*8n_A2q*rJ z`IS%vf_|eUf3GroUZC$vwGQ;$2iq-j9u2g`wt`6~3brBPoa~5b_&;F4<(^J}X7kQ! z>yiB(`_cFDR>Fy*!Y(-3z%{pevq%a6lu%w8SlgxBRs z;_9swS$<>8>B@azP}zp{7tk4{igM?q|0$hsXkwqPEK5g?D11mp_h?97!cXt zq1x80gfU*YfHxxhP#4NhO_ki{-Fa>tv5L`OZIj+4 z0yY*j-Vnvl#l_VyjnLKg*xPz?3k5D4mRYUY3~)Ls=9Ze){qvb!T{^$wa1 zgwv%%-wyQ{@2RS(@&Ae815wbXytlJx1=710;!Q!8^r={H-Wg?+%LTGRmJfbGW`kRp z(#`4GbBssmUIn}E$el|NLU+$m@}S|a%X-kWX6?O?_n1XIcctf%a5K_R#9Eas5xdwX8juLAAXdVPf(*B_8?3NktKZ4qsMfvzl`*T2$o#mUScO? z_ce4VEL=1;HMQcE6HsGjK_e>nB~4o8geWAoTge4wnkg*VptEL3I(5o8@61&n`~K2D z4pm&+VwDuzxDCF7X?;2Tr=({@WRCvlgJYv_Pq2I6^V|Bst^O|kM;t#t2gd_a?G9h_ z6fiShVXbB`!gZ#=;4YZKl3m%*)Z1Jv5c9w&<(4nW5|DZ4_V-qK;HHIJi$6PyPDAt= z5b;XxQI+c-&4MQttr>UUrzQv5wXHlz>Un(KmsHMsB>H~%HA%#?XU~Wx6-AeB8>D9_ zyu1M7M^L9_YCIE5(#QTM>n~|5liAG)WvWTBH1uJh1vCFn9GQK4eekT=rnis zR$JZt1t51Ve^};Jz;vl}gQU2gSD^FHKUZH42|-g5QOO$C{>#5F&wWS_t`u0zS)l#? zMKEx=((q?0LnlkAuh=FE8xv!7B{X1b^msNNKfR@8WR#TGd=!-Jwt&53JaAcQ|&5+;w^N`4LF)gr;+bJm*I$6A|q+;p>dPEgi+BF#;QR;A3W~K=WfT>R!JmI?! zapC8RYH2>?q)yet2@HD0b1fz1n-*V=yVhUuku|M!hXeb9NP;{yDDE3<>^^iI+w$i~ z+-u&pH*32OO;Jo~`bLY-QJ8}XaNatxH2a8qW zml^!HS?xNySI4l2iu2&!DMT>DT3*QrZu@f56WzGl z>Ws3rEtlPEL$%(&Il1XH$^Z4`U~6H=P_`Hl^-BTqrm zb$u{bK<4QS^uNb7S$3>UHk%r7fX!M5ZVFmA*oO+F8X=sCq}?YNFE2bv4!yENjj?cg zGYeG9XtzGfNKUXAcsk^2K@Q-70Meg>wRMk@&zwoB?E_SP9*~zpWUof@2BO<4MAB5)01y@5OPI6H z+bhkipCw68Za&KlfsMg{Tn0qh!K^4%JG&=yo%k}G&W~~l&hhVS|Bk(SrAp+`vYO=9 zcDj4SteP32Nf*hGtj2$!&Ks=PkLLeMJ?H1uPLV3kM2VcJuB~Nflg(Wp95x3`Eb-uM7YWli7-oWa!FHkmsP>BO)-)jK!;5c)2Fps!5^iDQ+nZ0IX&_1c! zy9iL4G0%lAC|)|m$>3Qvm@ybxvUmL|u%wYt`nm6GSTim$p!hvwDyDaFF(rK`j(Sb zJ45=P&(Dvt0O@z>9}c*6u^~u;oyu(f6PZ82e$Z<=N#v;IakX@%Kge3$H~Z{$ym`14 z9V4M7_(D~O5m>s*!JapOFq2y*2n1Hpq*+3xt>WQ+w<^LH42*xr69lZoguF+ zXNQlXn9;Pt z#f+3%V*9qlognIgIjE(L@3TRQVIxo5#EtiTF$@10SFK}j4oSWlbT8eiIliyL4SyYw zfsVtqrfyR|+W{2@-h%{mC}3Rb?(rJzOGu%wm)#FWyRk=h+beZ!t&SgZQfgB4Sa$*F3|GUKnyJS_2` zf^i{($%T}35TBq{(gc;t4+oWrn1pTNb)&Ut{k`gk=WpHA`;31MO}GLPI^dM6HY5(? z{}zCs%&m*p>S*;%!*yks1X#H2u)>RH?lN8~)rX`oK%>|DurW-Qzsp5mL8flUJ8p$f zaqExvU!*+}TD@WP3V~39&t=vL35fE_)V#TIQyFN`=663rdg#`&r)JQg<+SmAEpQT4 zw}UwPzs;Uo%CU-qxk+k(@hvzs+XZ4Oebu9K4yZ_mU9*P3fBsI@>JzaT1j4WAQm4q} zUo`*WIxhHlUu0+cWbg@&>#v|{820in5p-*2DY=7W4d$mFaL4fS`~3to0w{bq;C#A8 z;ZsLiI9@zu|9fdC6@Uf+@NZd`1f8c!FXx-^zH4Mj`_TdGk8tb-#D)J*@1VGvFG(8p zdH+1_<nf#X@?@Fz5xA6=0Rjhbw|78RiL6aHkHAAy;O#Ie7T=mMUN#DrkL#M)R@I;kzH!dPcuHlOxy+RMHAgSCci9A zwo%*V0Ceo+Vkkr)beFX>0TfNu+F5Zv=>fHSy{ z4O|sVM~ACg#MbVu^iA_0pcNy|UMslk*l@4z-tqri1K3^u+6SL?jQ?`|Wl7o1HU);+ zc4an!Xtd4L&&0jsz$L@-JF>H*&w+1elkKyAp-h=9$OduIV+X9!?+@titqO%d8NYY5 z`Oxg_td!rDOJpmnte=5s3|N%#+M)p9%|u;su~_WBWODuYv#*Xr9;=oh$RgqNUcbO7 zZecl&9|i1odCt$H8q}9`I9rn9_9gSm;#kiyD8Zh{+uG(5^$Uog%x!LM z)$*9@dYhJ(2Feb-3RkO;(9p`tN8pZS?YVD7pMDU1lIgxUFdWr?Tf83Mc4%4e$hqTs zdHNa02p=D%6HS(6aH=JU z(z$vB1dFictI*TTV{-^_n*p=QAIziUlL^@z3c{M-fr1?Xf$x;;&x$ia13?Ne&(79t z4t18;b-M4Y*}*$Nkm0Szs= z{gSyJ5((4;a}*Ss0#4+Dx61D3oB1w?U54P+NWkq&c5_|#9Dm=@K@#1&@TbJ>TD6Xg z0tI7zeSJmc{d>#++yW5V*}xWH7;rYGC^wI#%E* z7VQVh&nYTJV!ITvaMeBEEQZu>7YR_u1_Z>d=oyyi9QQ+oxK;2-HCX(E;vbw6f zI@Llcw$U=Yed(4y$mZJazi%_&G|0g&8x=ibk?N{H`k4U1O;9ITJbuh{@#0M(At7Y0 z_JirxG!zCy+}qm&s!Qvi?=paK*aCYaBC^Bs)WDToq!SNK6y_}Ntm)Fd!kZsyJ~TSY zED{FvC;9-TXr@R3G{)(_x7+}P{S@%&GV6n0!?RyPPC+~Vx2|(h(^vb>YoKxXVu{ro z_qo_Nr=IHqbGn1+;;C8dQ1bz&A~PSIgd<*G;9C23QOmX~R|k}_vA~0~1NW(DqyN#~ zwZ}ubcI}bcN`*9`U8JHS(E%HkCT(SqoDVe)DO5^w3Q^O!P-!Y^lT##&b8<>V5<;mV zkyAv36dg$3>z=WD_xpYSe*b*m@7sUuU*kF5&$`#W*0rv+?)79kUIt3w@gBPWfzMZ}L z{%itkoFa4cepj1~HRVuo_4(rq^W6PhfYRhmZU@Q8$;q96+W`elDRE4fLuve_4T}C> zZw`$Pq@+x(E*8z>ANkUk^!`PB<;ofBN4jqj5mkq)-@+<4J$Qfc3^xEXhNb{xFc|1` zz#Pr+-El2AJSs}M+`e3*1p5vxET$karq-HOZ}TF-B!1|%b1F)^Y-@a*R?a|G(U$S= zHOFM=pb~nTax;@EfWlpm+>i{;aEMd1j@`CFmgy}x=^Q^I2F;(}c5S`Ba%0#Z2kX9n z?lOzkn)2qzKtY4oCJnC--No`ypQcD`pF!6MnfK{su}8;Zca_Dy&a!4m2ukvG{5E;= zB&qDz(y8}mE!XwWQWV4jVcg~b|9xQ zaeAnt4$IF!?yOXW+RQ+T@efRkAsY0)I7^14|kUcZ{Ptv?Gsh@ zCR%Pt9;Gq_1JIq~?d1m{uxO%-hF-{q(y2l527%n+t6ZIXD4X&ykeuORR-h0;ox{M+E+? zu>3Q7`vm~s8fUvq4sO4QkqRvEGu&}+|7DpAbJuL1r@B8FzOf=yDfQz=SFDI}w?n7N zY@FmY%G1&M;OUodZ!hVMexaWg%8ij%<1hQDVb}iXyw&*8@v*PHqXUKYOC$Xj!H}j{ z=l47q?RVelNiIKIWq(CRR%Yq$v3wcN_pC!oPn;s@vzMU2SwBU7*6C9e*0xYZ>)I1j zroX&*ppm`f8FVcLZg&op76lj=2eCr4*g7qk7!-ap@m4>%9eC5l5-!b_(vAk^=R}m zSIpi0kHyn{$A;@Y2iwsG3kKUT*1?OGGDDi@zHYkv`TP2cCtId-A<$?MF7|hxqXhc}F=x9YYpm)@sCrx~Goob1PNM!tKo$(}h&K_E6h-u7$c-Mh*bq0n7J znX3v$#*SCi8%2HphSkH4pTUzfKepB`TD9x(o{y~$HIDwsk=YB7OBQ$Fa`@6^8tmpewxkH1m~X@= zBt4H};zXmxI$lSXlG@kCd#BxnbyhtTI>#?Md-fbEHFmhS-l=MVkferbc9UD6mX;Q< z*oxDqPUYn262)l4n_iuz1>RIn*;FB1+=TclmnOBo*I9aPZEXP;E?@(gRSh5?y~XeI z1+Gh9ww*2Jd`~hsX_jWdQVnULZ{Qk-2ab*J0G7M&*hWlH`R`?zJ6f|_4>Rf;2|zQQ zY7)_eOkLASHH_dfZnPW#ZqVzE(A0pXm<2+sjE#Qn`quQof3#y@U|`}Dpim&!ML@0= zsW#a5_7`FLhp&U#N#9kb?EX%ln0GF+irSNIwZFmO;R#~3$)ZZMDahM?cTcoq)nDac z>{^SC?|T+?T;3vuZDHe6%EbfJxgNB2#Kl1MN3h(v@1YvDHT#WMn4h1Yvg1>U0}UD4 zaeAR%+(1_Qx3$s{=S|Y)o921!E%;Qi+ygyQC@w6HmcSzbfkQ=lN(Hee1%&hRKn*hO zk4uWw0OQaAeEaiy6|+FL4-|ZjfLph=7KMN=U(4lp>+Gyv_7h7+&41)Bbn(EAD#w|a zFHWM_i!0kS6EK7MdCJ5B!9te@N9!eoGOPu|&%yL$I8^TIeil8_c6NbBYZ2ef1)2;n z2`~AD*G0j*J3|Cq@D%kVZ9ALwNQ!{VrYI(pGS)Ia772rM7#0>(1R-cr6bWnhB+Y6+ z>k-{j>WoR6OR-Ynj~JiN@AiTy(eU{ci?FD(hlSS-n342{q{y~E1s-i2931=$9YFdit_k%+X1% zqpPc{r;T&;b29+h%8-Bo(*W&Axh}oGB-oh}Q z3+X?%SH02swyy-|qK^-A;K8T?Jd+^`br&sOY?kd}&ADT=sqE5%Q2f_l@YRO5=3`$e zsA@<{oq_z%&tAF@T18Ng3?hSskuQx0vk)nkmX#$zTNP8d_6J(|f$1#)9X~qqe(CUs z1``+r*42=ZCG>r-(xOS~L(b%OPR2U!9Pdl7tx1K*(X2POtp3Uc6h)>OY85dn!a3j<48Z{5We8HNx&H(5!09x|8q zif<5Y3I&w+zcNJZQ#F{Y;#_wJ!IeP@d`;AzN`_WEf4jQGL${*oO828+CM))3^(4ec zuw*1R9AyA{JPTKMA1W2M&B@UsLn|$7+#(8lbqcDAkd(3I;{4~a^eaPkI}XxWzF+%S zAg+nwqAf^Sd;@|X{_h<26bgSm(SdRrf!oHzlG&LsN( z{gZ?2(xqJ^go&NFIV4G}2$<$A%Pmr#4(5XB=&4>i;NhkXRnT4lvs^4^DI}r1CwTM4 z`3V3!d$1P%4ic;~9p3}dz6_eXMzEuPuDV++D4(#tbfR6`A~mOc`iYzLKe9ed6H~Hb zmj;SE9Z^tKt<&aE7c41RIm1HS$W-D?bZe^}Q3b4ZS>3CK$ef|<)MVTGI79#~FGmM{ zRd4>8lD`D^E3LYdb?ffCdKHat;iCgDK4B|0{0S5G3ZPEe`=eblOrldwgc-UF4uMce zMD1saqA&@W-0Fp<3JM@lxOjZFEt4@?I%5$4a$gid3WY3sI0M0hBZb(y9~$do_4CA3Er0R`4yy+b+ARV>c*Na zV zZiPveWc6oY^@Cg72x#QymH|_gjAV}PnVnB6EA@`yBi+gRzh}R&|^6U*~DB^=I_ed?Lm9RFGrwVcXL6$ zjsLLO3abp6#UExHl9d0DkyWxeA-A2X z!;RpZ2!OD#EyCUxPly_t=Pr)?V+p~0` zJuixm^AA`3(W1j@_UV#DHqQEMllvu?f?;!l!gc8=3y#CPYhOXy4Q*inlbEyE>&V6M zF(zNibQe0(W=h%ZKMISv_UvItA1?7jtA0?NAXkU4W9% z1Z0F4E?n62Eczt!Y*8Q>XUKE62s-9tho>6`+0zAI@AU&sgm?IE?mUKxkW30X6eSpx~#zE8+T6p2}p}> zFC2TFl+NOe-Ci`cfNan>++UUj@5!qZ6>rjlnZ1%0pu+n_{0+CJn37$@wXNLqc`uEB zx=zQ&g7W>VGx?35@&aQqRJ*X7%{8P;x%)LJF5vRge;rk2i{XeQ7Y+WGBY3HHlWr!} ziJM!?nMq_RrgC5 zOgyD$t3YfH??n%QlwB)LI<8Quq7!!jM~BXZyZ}sa+OLt(MVIcJv+LJb%vf0Uk9qQ* zXC*0OP%M9M;+Oe!JeO=-CG97LbOtAeY$p)=ZQ`kE{Dh>#@m5vcBcUO5G#@UNn0V6u z)s5m06SR-*auM`{VgPAEb$M*8<7)TSi;{a2{dFO(i7 z!c#&r1bWPLV|}08JPO0PX>{@$-3FW^FPvN zfr)aOXkESsX|a8^vFDR2alY%cq|)^o(&}SVK2DThikq3PBU{bzZY&O8n+)6jJnv1h z$&&GgZ)2M@6UT~@cPmD_&+{EbTWT!4e@Y6wxn!V!j0q{|Ue~*G*J3XwfXbVIvVG|p z{Kni3QIu9M;=+RM19*?vHGJ$b|g6E3`%Exk)tMyZ^fd^i|H7n)jEef27Lq(Wq0EEX46-%I=C83pYki zrnAnZ{GN!WbKreuX-MOM>vHtgi*WgIS7tAY1VEATn#E^}&Y2zOa5iM`rtz77Wugqk zUL!z&r%jvR6@N!}l)Rep2ke_7S~`Opc_YbL?YH})wRP@wgQ;>-6Yqg~oG$OzW{y)E z=4UmioQ|d)62y)%SRL0}t&GS_ZWZ!jn^nt@U%{U#YBRaB#C*Y3y8r+Azf&4d;(f_DN6A`=|RH8V6RF-685aPqYr z2Pz#qw%q^?2+s7%x8|~f>QKELU4))7F)=T5-JQ(Xmd`nzT5^(S~LE)hEke0HHNG~vMqy;(G4CdMt`0m9vd{Q_41OU{9 zc&%loBpz2`4}k>(wf!Pw#g1^s97>TH);v>dwzzoc+izyK!89hIvPe)^_%OcKLa|uL zL}^tOD)~@Yl->K_Hn_Fzc^(NfN?eT*%~%i>5Uc_W{yamvaQtV!LBzU*fx_`-x)~q( zHj$@Nv0aT6Pw!XVk&-%hjbl)MZkH75Ij*i!pnMd8z-ZAxd8MBD3xG6r_4Q4`OTJ99 zKvz?W_f-5Z%8)TIlmy7=3{eVBn|4`-AV*Mx_0>1m-n3doDVjoFU>++XA_Y<2{K(36 zWAgEXNyq__#9yTUmmBvB3S{Zv(Txgxhr3Tum;%Hb&Cfvj2f0=1{Z9b~;cC%!$vb*c zG*JEJuO`I>K^HEVA*#p%Ub^DT&?!tVODeQwS4@STV z?9SSr>eG)pxkT&yFDjRi+uI9vYhNAv2C|TAkfF?BayNKTm2Sk_c2YHEIwdq^5>QwU zft&3N`HkYrPfDBwO>N?B`6%n=!Gst_p;9CcYD5@qW*Ar0`XDo)74DLSXn`*40X3%> i5LJLX{l8#*e{9%MIG643Tu Date: Tue, 30 Apr 2024 23:43:20 -0400 Subject: [PATCH 147/241] Add non-batched option to save memory when computing eps_eff --- desc/backend.py | 76 ++++++++++++++++--------------- desc/compute/bounce_integral.py | 79 +++++++++++++++++++++++++-------- tests/test_bounce_integral.py | 17 +++---- 3 files changed, 105 insertions(+), 67 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 29875fcc82..ccefd995c0 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -71,6 +71,7 @@ switch = jax.lax.switch while_loop = jax.lax.while_loop vmap = jax.vmap + imap = jax.lax.map scan = jax.lax.scan bincount = jnp.bincount repeat = jnp.repeat @@ -442,6 +443,44 @@ def complex_sqrt(x): complex_sqrt = np.emath.sqrt put_along_axis = np.put_along_axis + def imap(f, xs, out_axes=0): + """A numpy implementation of jax.lax.map.""" + if not isinstance(xs, np.ndarray): + raise NotImplementedError("Require numpy array input, or install jax.") + return np.stack([f(x) for x in xs], axis=out_axes) + + def vmap(fun, in_axes=0, out_axes=0): + """A numpy implementation of jax.lax.map whose API is a subset of jax.vmap. + + Like Python's builtin map, + except inputs and outputs are in the form of stacked arrays, + and the returned object is a vectorized version of the input function. + + Parameters + ---------- + fun: callable + Function (A -> B) + in_axes: int + Axis to map over. + out_axes: int + An integer indicating where the mapped axis should appear in the output. + + Returns + ------- + fun_vmap: callable + Vectorized version of fun. + + """ + if in_axes != 0: + raise NotImplementedError( + f"Backend for numpy vmap for in_axes={in_axes} not implemented yet." + ) + + def f(fun_inputs): + return imap(fun, fun_inputs, out_axes) + + return f + def tree_stack(*args, **kwargs): """Stack pytree for numpy backend.""" raise NotImplementedError @@ -619,43 +658,6 @@ def while_loop(cond_fun, body_fun, init_val): val = body_fun(val) return val - # TODO: generalize this, maybe use np.vectorize - def vmap(fun, in_axes=0, out_axes=0): - """A numpy implementation of jax.lax.map whose API is a subset of jax.vmap. - - Like Python's builtin map, - except inputs and outputs are in the form of stacked arrays, - and the returned object is a vectorized version of the input function. - - Parameters - ---------- - fun: callable - Function (A -> B) - in_axes: int - Axis to map over. - out_axes: int - An integer indicating where the mapped axis should appear in the output. - - Returns - ------- - fun_vmap: callable - Vectorized version of fun. - - """ - if in_axes != 0: - raise NotImplementedError( - f"Backend for numpy vmap for in_axes={in_axes} not implemented yet." - ) - - def fun_vmap(fun_inputs): - if isinstance(fun_inputs, tuple): - raise NotImplementedError( - "Backend implementation of vmap fails for multiple args in tuple." - ) - return np.stack([fun(fun_input) for fun_input in fun_inputs], axis=out_axes) - - return fun_vmap - def scan(f, init, xs, length=None, reverse=False, unroll=1): """Scan a function over leading array axes while carrying along state. diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 5a2454a46a..bf174e31d9 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -5,7 +5,7 @@ from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt -from desc.backend import complex_sqrt, flatnonzero, jnp, put_along_axis, take +from desc.backend import complex_sqrt, flatnonzero, imap, jnp, put_along_axis, take from desc.compute.utils import safediv from desc.grid import Grid from desc.utils import errorif @@ -898,6 +898,8 @@ def _interpolatory_quadrature( """ assert pitch.ndim == 2 assert w.ndim == knots.ndim == 1 + if Z.ndim == 3: + Z = jnp.expand_dims(Z, axis=2) assert Z.shape == (pitch.shape[0], B.shape[0], Z.shape[2], w.size) assert knots.size == B.shape[-1] assert B_sup_z.shape == B.shape == B_z_ra.shape @@ -927,6 +929,8 @@ def _interpolatory_quadrature( # if plot: # noqa: E800 # _plot(Z, B, name=r"$\vert B \vert$") # noqa: E800 # _plot(Z, V, name="integrand") # noqa: E800 + if inner_product.shape[2] == 1: + inner_product = jnp.squeeze(inner_product, axis=2) return inner_product @@ -1020,6 +1024,7 @@ def _bounce_quadrature( knots, method="akima", method_B="cubic", + batched=True, check=False, plot=True, ): @@ -1059,23 +1064,56 @@ def group_data_by_field_line_and_pitch(g): return g.reshape(-1, S, knots.size) f = map(group_data_by_field_line_and_pitch, f) - Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + # Integrate and complete the change of variable. - result = _interpolatory_quadrature( - Z, - w, - integrand, - f, - B_sup_z, - B, - B_z_ra, - pitch, - knots, - method, - method_B, - check, - plot, - ) * grad_affine_bijection_reverse(bp1, bp2) + if batched: + Z = affine_bijection_reverse(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + result = _interpolatory_quadrature( + Z, + w, + integrand, + f, + B_sup_z, + B, + B_z_ra, + pitch, + knots, + method, + method_B, + check, + plot, + ) + else: + f = list(f) + + def loop(bp): + bp1, bp2 = bp + bp1 = bp1.T + bp2 = bp2.T + assert bp1.shape == bp2.shape == (pitch.shape[0], S) + z = affine_bijection_reverse( + x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis] + ) + return None, _interpolatory_quadrature( + z, + w, + integrand, + f, + B_sup_z, + B, + B_z_ra, + pitch, + knots, + method, + method_B, + check, + plot, + ) + + _, result = imap(loop, (bp1.T, bp2.T)) + result = jnp.moveaxis(result, source=0, destination=-1) + + result = result * grad_affine_bijection_reverse(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result @@ -1295,7 +1333,7 @@ def group_data_by_field_line(g): # Apply reverse automorphism to quadrature points. x = auto(x) - def bounce_integrate(integrand, f, pitch, method="akima"): + def bounce_integrate(integrand, f, pitch, method="akima", batched=False): """Bounce integrate ∫ f(ℓ) dℓ. Parameters @@ -1326,6 +1364,10 @@ def bounce_integrate(integrand, f, pitch, method="akima"): Method of interpolation for functions contained in ``f``. Defaults to akima spline to suppress oscillation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + batched : bool + Whether to perform computation in a batched manner. + If you can afford the memory expense, keeping this as true is more + efficient. Returns ------- @@ -1349,6 +1391,7 @@ def bounce_integrate(integrand, f, pitch, method="akima"): knots, method, method_B="monotonic" if monotonic else "cubic", + batched=batched, check=check, plot=plot, ) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 8e67aab64b..b475f75b24 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -11,7 +11,7 @@ from scipy.special import ellipkm1 from tests.test_plotting import tol_1d -from desc.backend import complex_sqrt, flatnonzero +from desc.backend import flatnonzero, jnp from desc.compute import data_index from desc.compute.bounce_integral import ( _affine_bijection_forward, @@ -41,13 +41,6 @@ from desc.utils import errorif, only1 -def _sqrt(x): - """Reproduces jnp.sqrt with np.sqrt.""" - x = complex_sqrt(x) - x = np.where(np.isclose(np.imag(x), 0), np.real(x), np.nan) - return x - - @partial(np.vectorize, signature="(m)->()") def _last_value(a): """Return the last non-nan value in ``a``.""" @@ -436,7 +429,7 @@ def test_bounce_quadrature(): rtol = 1e-3 def integrand(B, pitch, Z): - return 1 / np.sqrt(1 - pitch * m * B) + return 1 / jnp.sqrt(1 - pitch * m * B) bp1 = -np.pi / 2 * v bp2 = -bp1 @@ -502,10 +495,10 @@ def test_example_bounce_integral(): def numerator(g_zz, B, pitch, Z): f = (1 - pitch * B) * g_zz - return safediv(f, _sqrt(1 - pitch * B)) + return safediv(f, jnp.sqrt(1 - pitch * B)) def denominator(B, pitch, Z): - return safediv(1, _sqrt(1 - pitch * B)) + return safediv(1, jnp.sqrt(1 - pitch * B)) pitch = 1 / get_extrema(**spline) num = bounce_integrate(numerator, data["g_zz"], pitch) @@ -810,7 +803,7 @@ def test_drift(): ) / G0 def integrand(cvdrift, gbdrift, B, pitch, Z): - g = np.sqrt(1 - pitch * B) + g = jnp.sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) drift = bounce_integrate( From 0f825881e4f4a4fd3a1d99c5ffe8e3c37837e60e Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 30 Apr 2024 23:47:46 -0400 Subject: [PATCH 148/241] Set default batched option to true --- desc/compute/bounce_integral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index bf174e31d9..12798d744e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1333,7 +1333,7 @@ def group_data_by_field_line(g): # Apply reverse automorphism to quadrature points. x = auto(x) - def bounce_integrate(integrand, f, pitch, method="akima", batched=False): + def bounce_integrate(integrand, f, pitch, method="akima", batched=True): """Bounce integrate ∫ f(ℓ) dℓ. Parameters From a6572226a502901dca69022b42a7935279439c14 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 1 May 2024 00:26:12 -0400 Subject: [PATCH 149/241] Reduce memory usage in interpolate --- desc/compute/bounce_integral.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 12798d744e..2fdfc037b3 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -898,9 +898,8 @@ def _interpolatory_quadrature( """ assert pitch.ndim == 2 assert w.ndim == knots.ndim == 1 - if Z.ndim == 3: - Z = jnp.expand_dims(Z, axis=2) - assert Z.shape == (pitch.shape[0], B.shape[0], Z.shape[2], w.size) + assert 3 <= Z.ndim <= 4 and Z.shape[:2] == (pitch.shape[0], B.shape[0]) + assert Z.shape[-1] == w.size assert knots.size == B.shape[-1] assert B_sup_z.shape == B.shape == B_z_ra.shape # Spline the integrand so that we can evaluate it at quadrature points @@ -912,16 +911,19 @@ def _interpolatory_quadrature( f = [_interp1d_vec(Z_ps, knots, f_i, method=method).reshape(shape) for f_i in f] B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) - V = integrand(*f, B=B, pitch=pitch[..., jnp.newaxis, jnp.newaxis], Z=Z) - # Assuming that V is a well-behaved function of some interpolation points Z, - # V(Z) should evaluate as NaN only if Z is NaN. This condition needs to + pitch = jnp.expand_dims(pitch, axis=(2, 3) if Z.ndim == 4 else 2) + # Assuming that the integrand is a well-behaved function of some interpolation + # points Z, it should evaluate as NaN only if Z is NaN. This condition needs to # be enforced explicitly due to floating point and interpolation error. # In the context of bounce integrals, the √(1 − λ |B|) terms necessitate this. # For interpolation error in |B| may yield λ |B| > 1 at quadrature points # between bounce points. Don't suppress inf as that indicates catastrophic # floating point error. inner_product = jnp.dot( - jnp.nan_to_num(V, posinf=jnp.inf, neginf=-jnp.inf) / B_sup_z, + jnp.nan_to_num( + integrand(*f, B=B, pitch=pitch, Z=Z), posinf=jnp.inf, neginf=-jnp.inf + ) + / B_sup_z, w, ) if check: @@ -929,8 +931,6 @@ def _interpolatory_quadrature( # if plot: # noqa: E800 # _plot(Z, B, name=r"$\vert B \vert$") # noqa: E800 # _plot(Z, V, name="integrand") # noqa: E800 - if inner_product.shape[2] == 1: - inner_product = jnp.squeeze(inner_product, axis=2) return inner_product @@ -967,8 +967,8 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg assert jnp.all(jnp.isfinite(B_z_ra)), msg - for ff in f: - assert jnp.all(jnp.isfinite(ff) ^ is_not_quad_point), msg + for f_i in f: + assert jnp.all(jnp.isfinite(f_i) ^ is_not_quad_point), msg msg = "|B| has vanished, violating the hairy ball theorem." assert not jnp.isclose(B, 0).any(), msg From cb2b57fede8236ac215ac7fd27d0572f92dd3680 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 1 May 2024 18:34:14 -0400 Subject: [PATCH 150/241] Simplify bounce quad looped algorithm transposing --- desc/compute/bounce_integral.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 2fdfc037b3..e51caa057f 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1088,9 +1088,6 @@ def group_data_by_field_line_and_pitch(g): def loop(bp): bp1, bp2 = bp - bp1 = bp1.T - bp2 = bp2.T - assert bp1.shape == bp2.shape == (pitch.shape[0], S) z = affine_bijection_reverse( x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis] ) @@ -1110,7 +1107,7 @@ def loop(bp): plot, ) - _, result = imap(loop, (bp1.T, bp2.T)) + _, result = imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0))) result = jnp.moveaxis(result, source=0, destination=-1) result = result * grad_affine_bijection_reverse(bp1, bp2) @@ -1365,9 +1362,8 @@ def bounce_integrate(integrand, f, pitch, method="akima", batched=True): Defaults to akima spline to suppress oscillation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. batched : bool - Whether to perform computation in a batched manner. - If you can afford the memory expense, keeping this as true is more - efficient. + Whether to perform computation in a batched manner.s + If you can afford the memory expense, batched is more efficient. Returns ------- From 31dbb5e92392482675019fa3162a616ae59a501c Mon Sep 17 00:00:00 2001 From: Rahul Date: Wed, 1 May 2024 21:47:36 -0400 Subject: [PATCH 151/241] found a sneaky sqrt(2); agreement looks better now :) --- tests/test_bounce_integral.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index b475f75b24..900aebbe9c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -769,16 +769,30 @@ def test_drift(): gds21_analytic = -shear * ( shear * theta_PEST - alpha_MHD / B**4 * np.sin(theta_PEST) ) + gds21_analytic_low_order = -shear * ( + shear * theta_PEST - alpha_MHD / B0**4 * np.sin(theta_PEST) + ) np.testing.assert_allclose(gds21, gds21_analytic, atol=2e-2) + np.testing.assert_allclose(gds21, gds21_analytic_low_order, atol=2.7e-2) fudge_1 = 0.19 gbdrift_analytic = fudge_1 * ( -shear + np.cos(theta_PEST) - gds21_analytic / shear * np.sin(theta_PEST) ) + gbdrift_analytic_low_order = fudge_1 * ( + -shear + + np.cos(theta_PEST) + - gds21_analytic_low_order / shear * np.sin(theta_PEST) + ) fudge_2 = 0.07 cvdrift_analytic = gbdrift_analytic + fudge_2 * alpha_MHD / B**2 + cvdrift_analytic_low_order = ( + gbdrift_analytic_low_order + fudge_2 * alpha_MHD / B0**2 + ) np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1e-2) np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=2e-2) + np.testing.assert_allclose(gbdrift, gbdrift_analytic_low_order, atol=1e-2) + np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) relative_shift = 1e-6 pitch = 1 / np.linspace( @@ -788,7 +802,7 @@ def test_drift(): ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) - y = np.sqrt(epsilon * pitch * B0) + y = np.sqrt(1 * epsilon * pitch * B0) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) drift_analytic = ( From ef860029a50d92de36864effc7e4e514e14b2f59 Mon Sep 17 00:00:00 2001 From: Rahul Date: Wed, 1 May 2024 21:49:02 -0400 Subject: [PATCH 152/241] forgot to add the sqrt(2) + adding low-order drift expressions for comparison --- tests/test_bounce_integral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 900aebbe9c..100772e51f 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -802,7 +802,7 @@ def test_drift(): ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) - y = np.sqrt(1 * epsilon * pitch * B0) + y = np.sqrt(2 * epsilon * pitch * B0) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) drift_analytic = ( From b5f3c268750c46f038a38f01c745416641330d52 Mon Sep 17 00:00:00 2001 From: Rahul Date: Wed, 1 May 2024 22:30:49 -0400 Subject: [PATCH 153/241] removing last pitch points because the analytical integral is not close to the non-integrable singularity + adding a normalizing denominator as discussed in issue #823. All bounce-related tests passing successfully! --- tests/test_bounce_integral.py | 47 +++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 100772e51f..a0664e107b 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -795,17 +795,20 @@ def test_drift(): np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) relative_shift = 1e-6 - pitch = 1 / np.linspace( - np.min(B) * (1 + relative_shift), - np.max(B) * (1 - relative_shift), - 100, + pitch = ( + 1 + / np.linspace( + np.min(B) * (1 + relative_shift), + np.max(B) * (1 - relative_shift), + 100, + )[:-1] ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) y = np.sqrt(2 * epsilon * pitch * B0) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) - drift_analytic = ( + drift_analytic_num = ( fudge_2 * alpha_MHD / B0**2 * I_1 - 0.5 * fudge_1 @@ -816,25 +819,43 @@ def test_drift(): ) ) / G0 - def integrand(cvdrift, gbdrift, B, pitch, Z): + drift_analytic_denom = I_0 / G0 + + drift_analytic = drift_analytic_num / drift_analytic_denom + + def integrand_num(cvdrift, gbdrift, B, pitch, Z): g = jnp.sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - drift = bounce_integrate( - integrand=integrand, + def integrand_denom(B, pitch, Z): + g = jnp.sqrt(1 - pitch * B) + return 1 / g + + drift_numerical_num = bounce_integrate( + integrand=integrand_num, f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], method="akima", ) - drift = np.squeeze(_filter_not_nan(drift)) + + drift_numerical_denom = bounce_integrate( + integrand=integrand_denom, + f=[], + pitch=pitch[:, np.newaxis], + method="akima", + ) + + drift_numerical_num = np.squeeze(_filter_not_nan(drift_numerical_num)) + drift_numerical_denom = np.squeeze(_filter_not_nan(drift_numerical_denom)) + + drift_numerical = drift_numerical_num / drift_numerical_denom msg = "There should be one bounce integral per pitch in this example." - assert drift.size == drift_analytic.size, msg + assert drift_numerical.size == drift_analytic.size, msg fig, ax = plt.subplots() ax.plot(1 / pitch, drift_analytic, label="analytic") - ax.plot(1 / pitch, drift, label="numerical") + ax.plot(1 / pitch, drift_numerical, label="numerical") ax.set_xlabel(r"$1 / \lambda$") ax.set_ylabel("Bounce averaged drift") - # FIXME: Increase tolerance or correct analytic expressions. - # np.testing.assert_allclose(drift, drift_analytic) # noqa: E800 + np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) return fig From c56c7e003d2390c9308bd8cbe72716b8f7d613f0 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 2 May 2024 02:25:34 -0400 Subject: [PATCH 154/241] Reduce resolution and remove plotting test since numerical comparison now passes --- desc/compute/bounce_integral.py | 33 ++++------- tests/baseline/test_drift.png | Bin 22432 -> 0 bytes tests/test_bounce_integral.py | 94 +++++++++----------------------- tests/test_equilibrium.py | 1 + 4 files changed, 40 insertions(+), 88 deletions(-) delete mode 100644 tests/baseline/test_drift.png diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index e51caa057f..b69bdf7262 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -405,7 +405,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): return jnp.sort(B_extrema, axis=0) if sort else B_extrema -def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): +def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False): """Compute the bounce points given spline of |B| and pitch λ. Parameters @@ -511,7 +511,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True): return bp1, bp2 -def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=True): +def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): """Check that bounce points are computed correctly. Parameters @@ -881,7 +881,7 @@ def _interpolatory_quadrature( method, method_B, check=False, - plot=True, + plot=False, ): """Interpolate given functions to points Z and perform quadrature. @@ -1026,7 +1026,7 @@ def _bounce_quadrature( method_B="cubic", batched=True, check=False, - plot=True, + plot=False, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -1130,7 +1130,7 @@ def bounce_integral( B_ref=1, L_ref=1, check=False, - plot=True, + plot=False, **kwargs, ): """Returns a method to compute the bounce integral of any quantity. @@ -1142,10 +1142,9 @@ def bounce_integral( f(ℓ) is the quantity to integrate along the field line, and the boundaries of the integral are bounce points, ζ₁, ζ₂, such that (λ |B|)(ζᵢ) = 1. - Physically, the pitch angle λ is the magnetic moment over the energy - of particle. For a particle with fixed λ, bounce points are defined to be - the location on the field line such that the particle's velocity parallel - to the magnetic field is zero. + For a particle with fixed λ, bounce points are defined to be the location + on the field line such that the particle's velocity parallel to the magnetic + field is zero. The bounce integral is defined up to a sign. We choose the sign that corresponds the particle's guiding center trajectory @@ -1249,7 +1248,7 @@ def bounce_integral( eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - knots = np.linspace(-3 * np.pi, 3 * np.pi, 40) + knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) data = eq.compute( ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], @@ -1257,12 +1256,7 @@ def bounce_integral( override_grid=False, ) bounce_integrate, spline = bounce_integral( - data["B^zeta"], - data["|B|"], - data["|B|_z|r,a"], - knots, - check=True, - plot=False, + data["B^zeta"], data["|B|"], data["|B|_z|r,a"], knots, check=True ) def numerator(g_zz, B, pitch, Z): @@ -1362,7 +1356,7 @@ def bounce_integrate(integrand, f, pitch, method="akima", batched=True): Defaults to akima spline to suppress oscillation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. batched : bool - Whether to perform computation in a batched manner.s + Whether to perform computation in a batched manner. If you can afford the memory expense, batched is more efficient. Returns @@ -1400,7 +1394,7 @@ def bounce_integrate(integrand, f, pitch, method="akima", batched=True): def desc_grid_from_field_line_coords( eq, rho=jnp.linspace(1e-7, 1, 10), - alpha=None, + alpha=jnp.array([0]), zeta=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), ): """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. @@ -1416,7 +1410,6 @@ def desc_grid_from_field_line_coords( Unique flux surface label coordinates. alpha : ndarray Unique field line label coordinates over a constant rho surface. - Defaults to 20 linearly spaced nodes. zeta : ndarray Unique field line-following ζ coordinates. @@ -1428,8 +1421,6 @@ def desc_grid_from_field_line_coords( Clebsch-Type field-line coordinate grid. """ - if alpha is None: - alpha = jnp.linspace(0, (2 - eq.sym) * jnp.pi, 20) grid_fl = Grid.create_meshgrid(rho, alpha, zeta) coords_desc = eq.map_coordinates( grid_fl.nodes, diff --git a/tests/baseline/test_drift.png b/tests/baseline/test_drift.png deleted file mode 100644 index 1ebf6e13393987351c4d0e2537efc0cfe832ea4b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22432 zcmeFZcR1DY`#*lHNTEU5BV<#ejEs_T2!&(MY_gA$okGbxM2UoRaBSI|jK~ZJ*<|m% z$M5;l`}h8QuHW_h`}^1Ty6QT~Ydpt&KkmoL;3dKDAtZbo z`HdMr0Ew@V$HLNDBs{#NjP^M18oT0p z`5<*t1avvLt9s*Lkgj1Sk3PB^GNJz;n~B33)%t42`TNz+nHqWD-)*uqb?}y4*CH<+ zUMZ;{p+=!ll-IAPHa0%Fod_N=c%{qsl|1qP$2=dsMjjU7sK6U2pT2zZ4j=2|wJ;6G zx>)|T8c^ZZY+uZTmW7Go3Z;VTLzB0WqAqAg33LwzL(YJ{X%ypoAp61^^5`{Y|8LTZ zI3>?5W$T~h)=Qe!Q%|stCJUb}(am9)bw}%N=7nfgbVG1X?hRo2es>5j5ydyK;Iggh zS>+V4*q&b>djg1>@7l$0#Lv$sOzL)gQ3@p0F%SjLYnwYX9{(c3SA;vTnV$aI}VbwS|5#b|=W9 z*edF}0_}tQzD!__mmkvNX=QjgnR52iIZ(?Kl-NbzRu=4xU=nC*RG{KkAh@8wGRInIjlv<8xNMz45boiB|a6LU_?j>vmDFH=GLd z{xHJ{XPPf`l6wg;pEw!B*p!p_g|>?aq@DJ=A;?XC7?(cF$ z@iX|B2TGOhN)FZRvQHH9s$OJgB61V@O7jYCwyut9YF&oG9B1~$5p@=~STVy655hw) zbFr{ku0@$Ngq|qa;GntMTE3+Q>tZBLOqhQ+UEQX?{gc3Vn~%utjJxt^J3Z^&mkaT> zXf=$pn}bp97zro1)=d_M=dI9+QcIF-zLJwDj>*x(yuThs-cs15`1^PB>dLYNK+@$9 zFu4e45p=w8m}w&km#{E+ao{DFH3^ikewMism&o8y1s+VgUXGZMsAu(h<;VE1Hm)=W z+?)y(i`)wbb1u7#2m7X2C(~5Ao`TeZJ%xeX=J1u;ljbp2V5{Cp>1WJ4pMJ~eMc5;v zNtOGuU)&Q8v&u}P`jV;G%Iz#duNfC705;=r?*VzK-r>$bLgF7L9}QXw-AwJ@^Wn%v zGr|}_^Xyz&V4C9iB&6s)&zf}Hk*00Osc`((5q9l$8!#ZekaoL|WG9qywfyMjDz#hG(ym>Xf z>|NQi5tFF<6WhH!T)dC%cJFD@_NJ=9QscpO6k6BZW@Zs3#5pb}VgRZz_R zzHhZ(?Og}6dVJG>qx}svg?D$+SuU2WF&bGGZz+NQRWbX+P`_?)W!Vc?VD-Z6QRt`c zX(bG!X~IVXRvk6Mx`lpWl4JV?J@s`Tj0E>`lauN1T2gW!I_nv)k>?nW!@s#&)xD#O%*!>LD@}WP)ksJu6}& zBYPRjm-tOdv^wHNP0$A7tc}3V6)x9*r_%L2<(hdM;w!oD;udGNa#}p(drx(e3} zW|O>S=rrlXt={N9@I%7svF>*^j>zR9^K3LOMHWmrJQjcOH3@7Y_1~2JrpOts zz3-*cnmum8CCrL=jrZDya;Sxb{Y=6pE++>TTs=%e9;}fXapZrX$tx|OUizg z<*KA`()9du>y4=d>F*9n)6Vc5y*WZu5E)G_5iI}a*MZG&*b7$gWG60;t|X1A*Nys# zsOjET6gvNfNjX0JrAKzx^3l#KogOt}TMg^?wr2yBA-Z)8$4Uw=WiqjoM zmg)ERUxckwJj|P6090xJaf1zU7rn-h)3?jgD6pQ@QgDsdjD%;`7}GzkQ-e_!mQB@) zJJt<)vGGLGOY&0j*(|qw#j#)!A0~BS{yYs^x?_Nly%i8zZeDS-{kg%QsGxFZHij(^ z(es)uvC>fmFC2#B(vx?(>J&l|p#?@r8+1RW%{oa_#v*OIO`!Q%PT-|56}WD^238I1 zIz~wk?0TKndR46Z)5pZ?5%lE2nICi0Y%p01X@?KsC;)232kW!?2B-zPG#-h22K?zt zVUOu%+Bx38Q_m4B=KV6wc!gQm`UQQ&!&`|GJbXs>NI58~dUegeZETvkJYX}V!L8AP za>j#kUYYkFBzaF;v*>;J>co3}Zh2Asvee!TcT7u^{26N)P(pQdXZ zj5?w5VoZMm#)9=ZN|08|iN_)EVasplimL#@VD1Vac!PriQq~~vk$!*Ld!6Lz!g0yo zx47$UaC6eQR>N03{6FSnH8?F!`QK3SD3*@d(9_VUsi~Wq%c3foDGGPD6 zY2x`AL?|jT=KGIRf@+VpmXmbjs`$0ir@IRM6j8Sx-}@%F;Rc*TRS}~IXAq+{L926c3HG3RKw35R&ll?i$%AF0*l$tk z+)^9BRjZ&1qK`^ahrer$f!*&O(5vY`*xn*_B+RfculU1&8eG>FCbdI2%ez%M>i8u5_>_2 zp1D=~m2Gd*m+vyI6$DG*UEGw<6v$-gW-pJ1yi*@_cBP`8s6O+$bU7ye!GrMhq}Xt# zxEliZ#n=ihmGY`B05pF$S8+!>nVKT_VK7p$f7mv$nW!q2XGDJbqMhqtZ9S(W_(2BX2nPIVbP@-+@v>F_53RB_*052ksucIFpc%P6t%OhNn8fD#_e1MeS| z_g5t)zpU9CqKqqg(Yx$o+0)6nAg$)c`MJ_A*HC9GgbnNQY-4%Gzfg?*qmoAF-i$v3 zIo3k?Z`mB9>E8xKmNR}UM-p|OZ8tMmjEZ7P`h+}?;TY>7frmXIo38!K^|P4&RiYF9 z9nFa$FPl6z?8~U{A!HGVm)b9P8QCXUCJl4u)+ZBMm&g4C7(|$<_<4)1qpvG0_6rAG zhSmB0*AgzYJI%KzFH@C%uVB|{!waUm)K*OI2mlGNilvWYBlZgeMlQDt1&sJHX^K~` zibnqtcZ>NIv+-*M52oc0Ox={^hCe<>k#`qU4VP6sU zZao8Fq_V_~ateL^K4qi!D*pm!G7v#;_CIU>uvOtojRTCCY>^M($W&}I(cf6hWYHqzux=o(|XNxEoyTn ztXjSTdH3{zkPwrf0*v$9KNbRugusakgvkAO`@lMTJ8e$tcm9Z%3+$V|Hq<+cKgT?X zEh;1PG&6_H*Pc%~DhS(9|1{cT)XA?CkvtvrFo?b_+EA=p6se!pP?PZvmI zug|IevvxQb^eBhB5=xOMPVK43(_9sJ(-c*n)!#;$;86Io4g*T+OlUSl-xUEi%TS`$ zPYkxM`rm*=AmY87fA{5*K=O3mX95(uJ0-dNcKJ`)My&VmO~z`!^Qa+5{aMOLxBMSm z3I`~}@yp%0cyXgyl^9kN@y|SAg9_y-GkqA(o37X~_9Okx1v68wiL)ToKz~hIDwjMv z{9}lgpqmrLhYrRXM5ZIw&-=_D2J*4!+dZri3hAnIPc4$9jN%2^Ed{+anXUWYCH}#K z9R!oV3yaLa(+5d?4wxj5iQnU0N%r@|*Ic=GV%V^{SQ}BVm?w=`c;Ic)ixpL|a{Xqz zM3#-%l%Ilt1NuGuq(-d5M|x#>}Ijn!)|H0 zD}Abj4_k*ZT%lumPSB#d@Oj~IW!<4%jGd0$t~;>(66s|UpleWgtr&Bs;+9VIA0C9@29PK&LE-qgnhLg$&5rzirzqL}Lya;j z1`r#v;QQ1rim2;FHsjpYm8S;g756~S;Rw)3CmAjGnU>U%x{@X+5dP)@jl5h?k-b6Z zNK~>+*u~YIMViqlegkxW4(1!rI0++cBK==5_NVGZC;sjt_Ul}YVPoU4Ix1K}p{E`- zQC+_g%@gw^_TBNYbKTEy_=g)ags5pVtin}O*_>?YA8NI*<8vH_6JYE4uLnpSv z_Dv#*aB)_6CL%2sWSUer8PoUYJ*r1?q72Oj({T!{LIN@78^R8rd{fgMH0k|O(UaC+pbialkf)739#;==vd8@*e zSzaBhs8IN-xax8r*pR}A#y$sR&F(AFB@sliqYsrRW1fR=_`tAx6VJMbM6vaZMyb78 zseN`f(YYJ~8$^Nlf>T2viZu}D@uep)^wJR0qu1zhtEw3N@z*{fbv*9Oo%9z)FQ2PR z+z~dUV%S*xU4p7`cgbHPfen%Xoq6IOspuuFQ(!Xy_kz{Xa&Tz|#(HVFchKA>|=D$LcVDKai=xNYN>|w`fO_ zF&EsexZq;WPvS-QzFC}>Y1&5n^?d(sTjaeaQf^>5{mg$c&i32v$M{U0CJP(Qg3mS| zb1*P@|0XGrMlpiD2kJ@e$5lp+NA47h*zN7Fy4AXeZuUVn`uRG32}Y?#tbRS=@jN>! zzul)}_1FUxGpw7^Y;Kqkq9BP0IoYFmMh!Fef~p0l^2Xo#DvH4$b%D$px0TGiauIh^%C{}yY$Xlcp`Buxg%xVmjj%yC2xZ6LM z_V3Hy(GgxoW!x&(T#D$!d2%c;gc;p6L}?U0RR+(}=i@GZwiVI=ahnP@7%3&RW?G!g z(jo8fh67y_=)L7G7>8QEOp2c+&lMHv5PAGNYm=HIIC0nHA2#gt+Bvu*=L)u_Hz8Tp zPt=Pu`W{11?0uKUeO3R*A`=;`s(I3_a#Zsso{X9r>cxu}F`qtV%+Fh8wc)|0>&snw zQ0zUW2i%fH+veUSH_eC@@+xs^|HMLc;Ra&3WTh`lOI&%C1(Cbl zzb5Yo1g$oAfdReu4sg`t4?z~eQL#~cR17!x;(EYN3WyNm?GoX+Z_*@kE5=0px=?0e(EnCP+S>;=bqgc z=Qq>uK76xO?A)c8Gh#~*vh^4?9FLZ2&MiGG!i=XlyH5?!2kH`{|2GMN!7fn=@EiD5 zJLv;)XFv6-T+M-8ykJw=Sk9NLausfTXkai+@{P6{ThE;hBWL`HKTF{BM8e#{;SU5!M=*Q zs~nzsv4(md1e2G@U@adm-9*o{VFYT0@LOLKs99y)eVV1pw2+w< z`ikug5?=sO(yj3V4>U|OuZ*+qYL#$qNd{^d)?miX<|Zy&IspFz;Kr;#@-VW3N&B|8 z$Mk34{m~LSE-X{>j2ZH?{|s;a0#Mp#Zd&*A{vE8+0fnkTOtpg%@Lbcm-L~STMt;80at$SS znxBA-QBIINd#W8eq-C8)ju6W*H&deGny&wXFB$MZ!}F)P?{Gh$<)#sJJzC)^*2|qW z6uD&aHo9%bt#aq*hEx%9PL1zR@e?I?eq3^4um9 z!kFs*XSeHK4wuZljcSAnVX3Un4P#z{8oPLUnQPYfzs5k6*%gRz|?Sc1guuy9Ov!j-CoNmnYHiTQf5YE09*{ z7MVufSkup@s?_2HA}o-}p>Y`)?ARC>tk?twznwdyApGaD|9M{JV0kaett3DG96ig+ zy%n>x3(a(4SNX;Yw8zgjs%yqKs&^BI z(wQwIb+znsphqh0@LwJX$oXIUn_oHZ2eRMVQybou;q-{UyrjO|L`rIpnKe*zFP>?M zUB2@jYzc&P583n$GF_j?EgJNJDep49#{7@&0NY0y`3VQKgwphY)O4neZcFxXFD_m< zdUL2qB+1uMYW3jxR%zG3@yb^5!uoAd1{LAFKK;>5cl7~U0~}}3)zPb;LjISP2sl@U zQ59LVM@pV(D5}J!q=e=woSmwj93HqNRdejwjP1UPFZKC)V?`T%&)Slb@mg*!t}@dr zY_+0d2**9U>G+4gPUe5DnzsQ+QeFgZw{kRx=>CjQBBN7$F#hswR0!49oFsMud+5{^ z0X=Z`SW+HP?T>8>YLyvo6=}`F6=}1=fQc)fj$Gcq^uNI1l#Ne0Udrfj-xL(oxP%cl z!~GjLXY2EcV1&=6r>e9e-BlGy8Yv zj{8^j76@siac$aswzE$cx>>9jHB3>5-N^t83eanSbOc^{RjakrgA@M#M}e=(JURR! z@=21>Svkn*h0RAPg?&n~w+zF(6UWD}uZCy2ge8R69uPVL zbQp^+D4g!hvwY;SavMZwtP4uOwKn*BZsL_2-e&&zdN&Zg&y=MG{)TQVY+^wP!N{4x_=A;&24 z5kKkZN@Yz?i;C6#U2?=H?MI`Mt7#Fw2MX!xs&i{$ukqgjA_9AvU+wMVUa>r&s@^n)VSf|Ld2ni`y!a5F-m79eF+e~i#-j1S*D1B; z&n8)xHJ;YL#|hkdw%Fe>|8{v?AdHUV zTi6IDeMl0+Ctor@^m%~$<%>^2T@1N6FRc%DIR7?zsJliIZ*^ z+edErfB^@ecI27C)z(knG=&tH(+(Q-l%J` z0VL2+eT9c%Maz(f2tZQ$=Pndr`8mr zc6QnmLUm9Xpn1d6UwtL3Xn=UY_uHF2roP|--;C{cpfa7hhK2*UA@` zc4{t*7linTYp$`?q~5bOtPq`$ffBYlfsg!rVT^ose{S*VbK){sMtw>tud)qhM;&Zc-?KJA;S=EFQZ*?$7Nq3Gr9p%GR`)JcD0X?Ya)3H*XD&%?AXja)qIq@6vdeRA4Dy1)l_cEik(;f> z_~&1amKkevK8T%nA-gac zjPW_Da$kGg`M4r^j100Z)ap~UpP39h?cXgaMY+@(=Ew(t4p!yvQg`9scz`7Uu}GjM z@^oLO;>2wsrxeYNKrC7lt0thwTYR1stT|qy`@voZ9r^9b&Ovqrt%`L<*L@$kE3Z}0jqvyq z=tvmoqz)?ftuFXIS-nRqQ)1{*KOgC_Hb(B6G4x1@m=6~r=Ti{rAT?q91_0-V>hS>Yp(lIc%H4i%5RF+Ns3n^XS-Xk!!!SS=4G+E}L%SXuAe|yA5?#ipFiwlEy*TyPAVsdh4o|un1UmD{4 zCq=u9g4G{W^x< zvLJO9m8mJ*i9vg;)qt8}>X(TAU9VDGCza%@x{4XQDoIm5PxGr2|@HCC^Rlllz_YH2g1x=t6Wzu+^TM=cMyM;VAL zMdly+(!%aO%rcY!oOx|E+c3{tf{(81>DrFBs!M4HVx_fsM`cQUnF(L-e7YVwJB!Vm z)k_0(JXSj^JjFKbC)?3`t27fN-8stkxY)|l3_uwwTsjjMMRHPi6Gr}mSH``D;)O8^ zw0y1AMBm_#>HI2uU$|=Fw&`X?X$%MxO!`N!Z;XCd7>=;hdc#GwJ=w+d{XGlsd7I=N zouA>rZMRR406MCz3_)#5=_;?*zA|LGbqATyA?;Rdh4;VIFK+2D%?8z7JMW&nB$C)O zQQw@q7$L>4{FN7%$?mfq6|;cB(z3r%T5PbD>dU6HSokR+m3d0HBkXtNy$1LaNb|sk zUINWc5(ntJ&#OW6(S)?AFF@6?O3NfMpCYG0IUpQn4*wk8-lkB!_lV(NkLIAX8G3sm zy@x_d!Lm~U)K*p(e@1xC`BH@-x)V-awx3p73c3B!c0EH^hJO`bO#Fi92f8IdOpYqC z!=Tv~a9Ux~YLz7g0kIEeLy1N3h0<22JDBbmT|Bx@x#bqx&+b*Q148apl!Vvr{14ET zSRMQIYp#Tsc0W0=KLg`$$S#SV*$z48@&gO3I*0X<+uBH;da;UP)xGC4A7ZuIuzbQk z4al_-k}G(PtcXmF--kO(0-h^zM!wHKawDGR0yx<(Tz3RKJrv_r>JYa$5O=!8w6>gP zSZ-#XL;d~n3ElPppV4T_O|6<1wd!?J{CC8sD-RUJU=N*VLN>s^{b`5<%}*_n$_Q9{sjxGurOk4wnH8C zRPtqjQBV@fhA0SB883hJJ*}d=&I<8>hWT}#Sw|6}-OK{d!lN&q5|gf@LQA(ny)>!z z!K2r5u=2N-Uth_oq^pBJpo-BEFWh$O+W#EnYheuHn$?~=-z)S%%$=K>3RS+$1QhwisNHigC@-$5 zS+M0BDR@@u?RqK8bTZ^F;nFqQ2lm-sYt>o|pghZ_oFKB{;3EB9gr2;5^B0drEHMlg znWHbdkd=piFfJ5i#?Gby(o4`MD77|@)#d0|7FYL?TFM%}G@1)_z`=<9lE}J5MvjY5 zYYz|Rl7!7#uGkF~!D(n{&W4DbMCgCDlx^0MDJUwcJ$)K_?fP{p1_qy|HPGskZ=VQS z4?*LqkWWQHoPzIx*2I@cEtq~!&os#7dsU^l z?%mr;kvIGSMl0)PCe5`@6lGi%$~LT5+~&lg8v3K z^?GmI&j^=^D69ajj{a?G6-7Yc6Y{Nc+MZ`g6#FQ(cH;5nCoV_s0mR*am|=NZ#YIX8 z+y~eYTG0HLwgG52tuwzwawm2n5!4ckf6c#J9*m+`TO#R}ua;87{=oF@ZPhHthPO4d zZ7Y@S^y>**v{NInYO{Lw=QDK*q&G z#Ta4R(AEV8F$K`uNk)Sz-Fpd20%^#k6qfPJ&w(iGs<&--pw4@I18@48+iE*aZ<*}? z{}L&HwH9KGk`ibi+8bdV^~~D-$Fg*`D}8>^gP){+Ro! zZxGn%yQa-nBV|s9eUA28(+~6D3L^?fNbztKI%Tiy(K_vU%&k0_HMN|EBQl0J6%_3RhvvPI@B3>8DArHd{Gx z)n4v)!zCR7khxc7={>OCG_&4!#41u*Pq(z4Rf7#XPXYCgXz>V9pWd!US@)({@2^jm z{|IIyA|$i||2&*ejevw}SY2fhSv2V7r4?4(L=G21gK5ZRDw^!RutrIo^cc2L_d8V{ zz>)KBThOvD=6hV$N(El~X$~~KOZuRFPeKiP1+)M?fn=ptTMn;0k^Rd6VNz04h2565 zfn~A{95e(|*e(wi*z};^%YFoRdl*UkZ~vBW7Oa-=REVq8wqmhl8KrWGOLO?ti2{wi z9xtXE-w&0$q=QoophzJ=#vrBvBB>gn3q`=s0mlDcyar7^)6v9~MDtlBSadbrp_!8! zann_eAaNP>0o5J**ZC4)Ao0SsX>Z>W8n|?TmSBspG?m0mVcY%;`3OenNAOz<*Z084 z47F;i3IzKrTzgC-493ZqJV(0k)?im_G42h6Nf@>ajYWry5iPI^hXp@a*TtBqA_ z_Kw_H!pJk0Z!dWbtmf4DOM_V0ebiL{kCC@Tmx(}BzK6|vtjf>N?zz_Urof-pe^$+B zDT#+!{J~S0yW~P6fs?LQD2chujvEb^IP38Z7eBQx9Zr}s<~gUJ)1Mnyz`+FuN!~D; zt+hNmP-716WbElr)$8CQFZm%@6?eyONGQTk%5g$k0JQzVj2#eJiXdmAkmB80bGY15 z@hSGB?ym($n?JecnFb{#<+4AV2_u!LA9^_E8i~`B!++rf+r_{c77*;}-Nt=-fDpP; zN=HX^gL`{#;4NVw=k14bZEga+-Gj+#10=Rkkat@h`RtQzSgi|e09-tti&2WR0pv}$ z0*SS+T)7W;B|J+r=bUW8l!z`Uu2q31UO)%-yYg&7mBM#$hhR&ZlsWi|>dE(=r2$nc zZjEjL-=}VN;B1Vdib{X`=X-hkC(-}Tr`)$2%6RhrnqQ?msPN?OSA!jD-=28WGu_$~ zM5Y!->sNa^TC&!661{qO@^u`M+AeEoOmNSly*rfVo^zu3t4(yQfKe_uLD`LFlVl9TCO)`fzb8>H;iA zpK7J6sbc#sVYd5)9Ts-yhxxD@ijW?CxqegH!XmB2u5v#&5wN+`Kwu*4Mu5J8c*$pA z^HO8KHiVc;3mgo9-1%u9S^q6C0;M>9&*P1s!tSdh`g3#bpHn%$dOQZFib^s-{DvKs zAKE|&+sc92-mW!rxQSC~b1P(0cq9W*fG4jig_Mdr!?Noux6XI84K5RdVpq!4$}{9P zs;M%3HA9u91k&OBk4}Hl<9>1cC54ZMf7unU^+~QgEa*M{VN3&j@yCxJF)=Zq-OJCw zpaS_E0Cfd&A?3lChs@GIo|d-uCmcI4r$I%~eY7*6)b)@RlQvt^U*T*kI&Dm?iPs>> z%m_tY1NTu(r_m9B>wQA2EaO{RvZaP7LMqdrI#r1z5ZA02d>*erVNlPJc zCMtRPCk0RzrN2KT2I}7sv%_0kTNUB(g0l08V-56ZMV`@08H#z`-zzTpI1E{CKjj4_ z5UwJlqLPR0E0uQ^alQE_4cpVvs@$TYIw&FJ{4iW#J$o7El^2x2*7H0$2TXc;7I;j>YQ@sDqsaE4X$02yafKOp&~U{P|3dX1+Fa>WoRBAxMVkmuCja3b+kW1_ zw{M~9FtFU;uL+1_P^c7;yFFoHVbRSue(ig_AK{k(1=lheU4>#l3FlaO<8(U+0516L6TEUCv5bf1%=0qh^a)n2A zS9mG5*T#(ms@4NQ;tQc;U{KIvcd|^ps7u;ym4u8aaFPK9axO%=3d4siuz%}hrQ`g{ z2kjWX;U09XrWOGNv=ilXUwgsw05z@)R0xu~B-h+3ad}2kCv!mFViR^4(}yrG^C=iZ z5}+iMcX~17C4z)m%wWuQGtS^hpyY3m9v8j*Bgi1(sbT&r=J}WVcFrAS3xfrzfl$QT zzY#3MJw@8n&qo*;;16>qTI|C%Tmc8v{JkmF8ZiVMYQ?6(8pWA7T-_HEX07@4$p)}z z2g3&~Eee-1K`>djV}GnKropKUh^hOyovZNrZTxq^&0%!uT1Gwy;6l~&JE5VMqj|u~ z)_v*&^+~g0($;EN{rAOVCwJ=lu}E#=jI1tpx)P(aHGqK5I&?ut!+HaL(N>St$d-KD&9T6GH`(1hF+$n`neH8%BQ3k$cng z`5v(J8#jZze_qLMqok)lS1bHZ57N(*&3%TrUQFM%23!sXSBj41EhyvkfDjzO0uF?# z9DXo_x-~PhdK7vQhoKezL_C#8!pukwGUu;;r>_#lTz{Ph!sA~?1n90_{1iFQMIQ?_5azmvKdHMNwMCrqqmw-x@SG5@uNChos8gqKVWrtY%QM8s}l<$!; zXz%{P zP9OLa62X7}zHx&v&x9Oeth_fVsbhHj>-?@t=hg)fU4r&yFDG$@QW8Oih1wn zH$nNpep)Sm!=`ej!6JFA`^Y_`fY7}c3{_Q zPND`Ck*^-vmx>SEl3JHSNG+Y4sNYiu(c+4>Y z9(n=-Le(TmS}EVE{yd{3pUz1~tG(4x=r}e6asM$ffFqEglm69HadL7pBqBoV<;!f~ zJwcagQh;8X<$bW}IUOaR%@1B&$3aAN>Q1h3L22U<9BLtc{P=MZuw99x4a z(r(Fy%>R~W!fnp9A<7(0&biq-94Spou35kNZ1P9oP77;9_X-$ei#+sd5F8jiCh$&n zhaEKIz>Z9~e~+DH_urQSk2mx?aTT`emIj(62!Ex>&1rZR=&gpWU(#WS>_g51OafPt zGd|1!6o3cB#)cp=bwK9qYYr^<466ng&%vHnCZCiXT&EbrnQYP!w2+?U+mojJ1&GIc zRzL`dnMTI%d|oYg%%y)6y6+kSRG6&3$P3bPjDE>T#Fv# z1+rb0CdCKy0W2;rSti(~C6Xo6sI~?-175mM7TmxR*V_aJ2m22fGiE;f1%6XiKZE>@ z#|8UDFog$+eg1n_q)9J)&5$VaK5(uEYcN#xst}xmayD=gp@-C-e@S{NKoqbXpqriI z7}z<8iEn_7V>`Y)$Z}g53VHVq@+nGc>I{&#XrCj0m$RtBGJzTl03(EO)?j$?QhqgT z)<8wlEqDiE-mdJ5DsRKTg%}oX@dJZfKwKh>30r;MSUJ9w>Z#6zg=U4$E zXYs2o3nC&Sa00$|0E8Wfb6zM3yqSwH8a4$%mB;L7OXTw4U3d?a8bFE*Sh-lhNw2Fu zO#aE_H}an;wyocQ1|Gwwr)l06+i|)Bc#p8>w$-KcFw4ZB{y`KhvWqm>DC0}*U_>O$ z;Gg+K_zLyX5CZ!15fWV}xbBJfpP$O`=lv-O&y(gs{PXpLaPK7xXC#<4Fpgj-xB^BC zRP?9(aY}GF92F|@7;r)xCy4jgd;pi7ulebLWm8j=H7G>b4CE#X3JQMDOB8jHbEr8; z3&cSdcR$FO3L5&u%@$T@LrNURb0Z`V6#xJp>3VxtUAuOTHyb1cb)~>8FStG4tNZnl zN7&~ONyVkcrnM)7dMqR3x<2vf12+`Vd>*OHf`J3noN%k0aW3cgT-o3FOG-*Ac%CJk zO*&T$5~E=dasgKbR+-1|X?J>j5c2;0-G5iqU&#)5&7yrCg%0=D@=|0!EXL~DN3pH^ zDU?@=<^2sYl%|_T+WIVD+17wDDt0~90!Mf7$WOnH7+sZq=m>N&P#-((27*csM0!xj zQP%;kJ9nOSJ=_ZQ+@3da3WU~(hqIfy^J{EW8aTWBA?r01>7hiydZO+%M0&{?#L(Z( z+cLqXXX=-K|L31ValM~xm0%kVgpoYY3SP*Lx((a?gEg&FTIq>eYW=y_qAs&mo71f~ zt9L<0wp6)RbMN2Gu1>ep1Q4Cc0;UAk#C5j)^9Mt5uifm^t)#Pmuz%LF=#Ne-q|B27 zWYZX!G5EHOn)%|}%JK7n5&h>e=L-Y?y{}ILgE2U!cw%Fd3H1(tj`3u*L3U@& z+n{&nigd2@QhyGv!qqmb4Loz5=Rg134N6A>6^jTU3lM>#9&BGyvz{BcAft%SVG%S* z&>K4sSl3M-Wl;KSML>Q*eD0bXbv}Ee=};9$Y;T`u?eTlQ< zEvVq|AKI1Pz!gkiq7!s;tX}RdqX@`4X@#teU1`9peOtkTKuEpe0-O%H)UV8=nVo+w z`_%%=@B{iw9fU0arMTJHo=A9ZKLb#9x*OjEj_y7KPw@96A-(*9#!yNgWY?fkm>mv+ z(>F$t&wr>1Lga&2waD&Oe_kMs)ok4&GXlM>Um|$JrIr964!{@<0 z`DEuo#Wx&;>*jEVqr3r7<%d#bHDU1E(_j|u(+FU5N@Vr7{K0=rli@X<-6d>;SelVO$&5e7l{ z9q%L}Cgu?m>I<*|6FQ~<69U-r3=oAZCGa2Xw(WQue!$D~JvLqduAvPwCj>xIT(S&t zvBUV-;&*V7H)GrSAw*o$_!z-G`#@~8+d!alk%WH2H;*CJyGIIsTjbV}> zprr)a0i;s!Y3ECKNu5{{}K`ZcfgY zb3xuazHI@;27P0IqqGS?>jwa>DM8H|B1rSy$(nJ1Hcq%Z6Gc8IyaG~`$cG}Xi!os4 z>7$kIdIqGxZ;VJth2&!HP&E313Sk4NT?rf4!N_O?pL_syCAR8~W;#*|3N66$5XCkK zXet$slL1X1p;V?Mn_R&L;>0{b#s)vYt+-Tq3P2oMXPm7Mw-@3czWOc~N}UYI9~MM4 zsQwC;OiB5(pzPk5S00WX*!zBVEqQ&w|I^Nu1~qj>;fH`iv_Qv|K_nX7*i^6}E^LNY z(7_EsS(9R1D2lSCr9coN)TLT1LRA#19g7k`#VUxBk+wjIr8Y{axCBIOK&y;c69m%l z=Dj%1==4|rwA1{{efPb4?!D)pyYe`$Z6`(Z(anr@}TV^wK9`{S^$d* zdlD7l$%R0(H~W&XLmDT=WZb3VmyQS}O>~dr0+nt+`haJ?W2md52Xg*6(4#{kR#=%@ zzR5u?M-(<(T(|vtI>2JDp(!xXrxizLgM;j5|Dq1gBlg;@vx22%ih(EP5Fbi+viU~) zn`R52Dr*Zbp=w3k;H}3=a%((n;Ls133blhTbke3s%A?1)TI8KsO5_h(EF-k_wh5rM zb+}Gqp;fe{!aN7Gpi%02#2^6ohJP2S=jh@TF;g2Xxm0fyZ58cmQV}Vr3vq}f7mqm2 zXQ%BU={QvAWRcp9d1J1yvAnpG)$)~NEa&u91iNcj=1VC5BgEP z!Soocc&`EG$<>J#T4t<@QRV40g+b&nxj?9t|J_G@MtonZOG)EFANAa0y3S`0?WM`( zo8{=gw>Dd$?+Qf_%TQtP>V5AF9Jh^hvww?o4T3A(tCYsCRwOrctKgyw;iY`116YA-U&p>waL zNjlDgie&Z%$pmT?@4|2~VsP|s-FCL|ae|n2!3ODP?;QZ%wr4t<<>+YvZ|Qriv$nX< zwk2_91H(!I$Y-*hxya>ChyQ+NZvY)l^^+O)E~aS<2%`9taD>d{QMuR9ya7{e1v@KWa-1o(uc0XpP4(4b@0oH-KWbzqMZ29(AAo*!xE=DQrr+= z0w)CS9TETn34Vnuk(5-s1aS|Tt~aXUH|9Bpwsgm#)FIIbqRj<6Nkr@)E?%ijJeJm9 z(sk`{{$LLbKcdeMISY1W!Sg63l3OUlg|ps{AEI%WQgC_bborH4Bm|xQIIe0gP=&n{ z=J>!B?hS%PX~n8UElyFa=Fz$4zB|eHh}CuLa>|RTk?GwxW+EvAQ4u6c0J=LBekrNH zPrm%-mDH5jypdFeCAmAY@AkI3clwo?<_Xu{Q-rl30sW{1kWWNk$T5%9zUPWadk%-= zi3n`#1gwa1O?;P3Q(y5bkWFM@aB+{&t;&uPtszpb+w&sQ8NFdaE^HBhd(h$|hi zH>B2PK~6k91kB-m_%mvSqE99NATsrN9^6DfGG!t-h1cMzI$3%VFs`$6J%W-yoq%N@ zLXh;Kwzv)5eY-T{qi-n!(nMU4SIg+9c0u%L zAem(XQ7E+^+VcD~tg+=}Ty$dlk=S76#^y`~(n-fQ%4C5XHgx%x3nXcY zXWD}aScixWQfrQttSI(DQMvQcC5S90cvD3b8?-HSj=j4O;7}p~Q7e<5M4XTLZ55ow zP*_UxuFUDnkKT}G7!tt5m&@&}cI8Pa%yhL%rHYW)P&`NR;Y4#3z9>cFC5cM}R&Kgp z8{T#~<$_&s*}#Qo$POggBp*ga0Zoz37)|jqyiP;ysPjv2mOG8}unnL4>5^QzDiBs| z4T5FT;V9-sH3a8}46>*d6+|IwqW`#EMdA*mo@WeEoeggIEX&kXQ{Ns88ElH8FyM4u eVDjHDk)LVVdjjN@4mAV_qx^gWxMe=P9e)A2ng%Wa diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index a0664e107b..6702f8da28 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -9,7 +9,6 @@ from scipy import integrate from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipkm1 -from tests.test_plotting import tol_1d from desc.backend import flatnonzero, jnp from desc.compute import data_index @@ -246,9 +245,7 @@ def test_bp1_first(): knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch = 2 - bp1, bp2 = bounce_points( - pitch, knots, B.c, B.derivative().c, check=True, plot=False - ) + bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -261,9 +258,7 @@ def test_bp2_first(): k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch = 2 - bp1, bp2 = bounce_points( - pitch, k, B.c, B.derivative().c, check=True, plot=False - ) + bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -279,7 +274,7 @@ def test_bp1_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. @@ -300,7 +295,7 @@ def test_bp2_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -318,9 +313,7 @@ def test_extrema_first_and_before_bp1(plot=False): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points( - pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False - ) + bp1, bp2 = bounce_points(pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True) if plot: plot_field_line_with_ripple(B, pitch, bp1, bp2, start=k[2]) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) @@ -356,7 +349,7 @@ def test_extrema_first_and_before_bp2(): # value theorem holds for the continuous spline, so when fed these sequence # of roots, the correct action is to ignore the first green root since # otherwise the interior of the bounce points would be hills and not valleys. - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. @@ -448,7 +441,6 @@ def integrand(B, pitch, Z): automorphism=(automorphism_arcsin, grad_automorphism_arcsin), resolution=18, check=True, - plot=False, ) tanh_sinh_arcsin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) assert tanh_sinh_arcsin.size == 1 @@ -463,7 +455,6 @@ def integrand(B, pitch, Z): automorphism=(automorphism_sin, grad_automorphism_sin), deg=16, check=True, - plot=False, ) leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) assert leg_gauss_sin.size == 1 @@ -475,11 +466,11 @@ def test_example_bounce_integral(): """Test example code in bounce_integral docstring.""" # This test also stress tests the bounce_points routine because # the |B| spline that is generated from this combination of knots - # equilibrium etc. has many edge cases for bounce point computations. + # equilibrium etc. has edge cases for bounce point computations. eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - knots = np.linspace(-3 * np.pi, 3 * np.pi, 40) + knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) data = eq.compute( ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], grid=grid_desc, override_grid=False @@ -490,7 +481,7 @@ def test_example_bounce_integral(): data["|B|_z|r,a"], knots, check=True, - plot=False, + resolution=3, # not checking quadrature accuracy in this test ) def numerator(g_zz, B, pitch, Z): @@ -506,22 +497,6 @@ def denominator(B, pitch, Z): average = num / den assert np.isfinite(average).any() - # Now we can group the data by field line. - average = average.reshape(pitch.shape[0], rho.size, alpha.size, -1) - # The bounce averages stored at index i, j - i, j = 0, 0 - print(average[:, i, j]) - # are the bounce averages along the field line with nodes - # given in Clebsch-Type field-line coordinates ρ, α, ζ - nodes = grid_fl.nodes.reshape(rho.size, alpha.size, -1, 3) - print(nodes[i, j]) - # for the pitch values stored in - pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) - print(pitch[:, i, j]) - # Some of these bounce averages will evaluate as nan. - # You should filter out these nan values when computing stuff. - print(np.nansum(average, axis=-1)) - @partial(np.vectorize, excluded={0}) def _adaptive_elliptic(integrand, k): @@ -607,6 +582,7 @@ def _elliptic_incomplete(k2): return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 +# kludge until GitHub issue #719 is resolved. def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): """Compute field line quantities on correct grid for test_drift(). @@ -636,7 +612,6 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): Zeta values along field line. """ - errorif(alpha != 0, NotImplementedError) if names_0d_or_1dr is None: names_0d_or_1dr = [] p = "desc.equilibrium.equilibrium.Equilibrium" @@ -655,10 +630,11 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): # Make a set of nodes along a single fieldline. iota = grid1dr.compress(seed_data["iota"]).item() + errorif(alpha != 0, NotImplementedError) zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) # Make grid that can separate into field lines via a reshape operation, # as expected by bounce_integral(). - grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, zeta) + grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, zeta=zeta) # Collect quantities that can be used as a seed to compute the # field line quantities over the grid mapped from field line coordinates. @@ -670,9 +646,7 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): for key, val in seed_data.items() if key in dep1dr } - data = {} - data.update(data0d) - data.update(data1d) + data = data0d | data1d # Compute field line quantities with precomputed dependencies. for name in names_field_line: if name in data: @@ -684,15 +658,8 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): @pytest.mark.unit -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) def test_drift(): - """Test bounce-averaged drift with analytical expressions. - - Calculate bounce-averaged drifts using the bounce-average routine and - compare it with the analytical expression - # Note 2: Remove tests/test_equilibrium :: test_shifted_circle_geometry - # once all the epsilons and Gammas have been implemented and tested - """ + """Test bounce-averaged drift with analytical expressions.""" eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") psi_boundary = eq.Psi / (2 * np.pi) psi = 0.25 * psi_boundary @@ -726,15 +693,11 @@ def test_drift(): knots=zeta, B_ref=B_ref, L_ref=L_ref, - quad=tanh_sinh_quad, # noqa: E800 - automorphism=(automorphism_arcsin, grad_automorphism_arcsin), # noqa: E800 - resolution=50, # noqa: E800 - # quad=np.polynomial.legendre.leggauss, # noqa: E800 - # automorphism=(automorphism_sin, grad_automorphism_sin), # noqa: E800 - # deg=50, # noqa: E800 + quad=np.polynomial.legendre.leggauss, + automorphism=(automorphism_sin, grad_automorphism_sin), + # tanh-sinh-arcsin quadrature requires 9 nodes to leg-gauss-sin's 5 + deg=5, check=True, - plot=False, - monotonic=False, ) B = data["|B|"] / B_ref @@ -795,13 +758,11 @@ def test_drift(): np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) relative_shift = 1e-6 - pitch = ( - 1 - / np.linspace( - np.min(B) * (1 + relative_shift), - np.max(B) * (1 - relative_shift), - 100, - )[:-1] + pitch = 1 / np.linspace( + np.min(B) * (1 + relative_shift), + np.max(B) * (1 - relative_shift), + 100, + endpoint=False, ) k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) @@ -835,14 +796,12 @@ def integrand_denom(B, pitch, Z): integrand=integrand_num, f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], - method="akima", ) drift_numerical_denom = bounce_integrate( integrand=integrand_denom, f=[], pitch=pitch[:, np.newaxis], - method="akima", ) drift_numerical_num = np.squeeze(_filter_not_nan(drift_numerical_num)) @@ -855,7 +814,8 @@ def integrand_denom(B, pitch, Z): fig, ax = plt.subplots() ax.plot(1 / pitch, drift_analytic, label="analytic") ax.plot(1 / pitch, drift_numerical, label="numerical") - ax.set_xlabel(r"$1 / \lambda$") - ax.set_ylabel("Bounce averaged drift") + ax.set_xlabel(r"$\vert B \vert \sim 1 / \lambda$") + ax.set_ylabel("Bounce averaged binormal drift") + ax.set_title(r"Bounce averaged binormal drift, low $\beta$ shifted circle model") np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) - return fig + plt.show() diff --git a/tests/test_equilibrium.py b/tests/test_equilibrium.py index c0ba9bb023..e02f45a3c3 100644 --- a/tests/test_equilibrium.py +++ b/tests/test_equilibrium.py @@ -381,6 +381,7 @@ def test_shifted_circle_geometry(): expressions. These expression are available in Edmund Highcock's thesis on arxiv https://arxiv.org/pdf/1207.4419.pdf (Table 3.5) """ + # TODO: remove once all the epsilons and Gammas have been implemented and tested eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") eq_keys = ["iota", "iota_r", "a", "rho", "psi"] From 889eb621dc7353afbf131da831415ea029f05374 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 2 May 2024 15:45:06 -0400 Subject: [PATCH 155/241] Reduce resolution, add back image test, switch default quadrature --- desc/compute/bounce_integral.py | 20 +++++++------- devtools/dev-requirements_conda.yml | 3 ++- requirements.txt | 1 + requirements_conda.yml | 3 ++- tests/baseline/test_drift.png | Bin 0 -> 27616 bytes tests/test_bounce_integral.py | 39 ++++++++++------------------ 6 files changed, 29 insertions(+), 37 deletions(-) create mode 100644 tests/baseline/test_drift.png diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index b69bdf7262..96a3f6febe 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -2,6 +2,7 @@ from functools import partial +import orthax.legendre from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt @@ -765,7 +766,7 @@ def grad_automorphism_sin(x): grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ -def tanh_sinh_quad(resolution, w=lambda x: 1, t_max=None): +def tanh_sinh(resolution, w=lambda x: 1, t_max=None): """Tanh-Sinh quadrature. Returns quadrature points xₖ and weights Wₖ for the approximate evaluation @@ -1103,8 +1104,8 @@ def loop(bp): knots, method, method_B, - check, - plot, + check=False, + plot=False, ) _, result = imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0))) @@ -1125,8 +1126,8 @@ def bounce_integral( B, B_z_ra, knots, - quad=tanh_sinh_quad, - automorphism=(automorphism_arcsin, grad_automorphism_arcsin), + quad=orthax.legendre.leggauss, + automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1, L_ref=1, check=False, @@ -1192,8 +1193,9 @@ def bounce_integral( The quadrature scheme used to evaluate the integral. The returned quadrature points xₖ and weights wₖ should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). - Gauss-Legendre quadrature (``orthax.legendre.leggauss``) - with ``automorphism_sin`` can be competitive against the default choice. + Tanh-Sinh quadrature ``tanh_sinh`` with ``automorphism_arcsin`` + can be competitive against the default choice of Gauss-Legendre + quadrature with ``orthax.legendre.leggauss`` and `automorphism_sin``. automorphism : (callable, callable) The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. @@ -1314,8 +1316,8 @@ def group_data_by_field_line(g): assert B_c.shape[-1] == B_z_ra_c.shape[-1] == knots.size - 1 spline = {"knots": knots, "B_c": B_c, "B_z_ra_c": B_z_ra_c} - if quad == tanh_sinh_quad: - kwargs.setdefault("resolution", 19) + if quad == orthax.legendre.leggauss: + kwargs.setdefault("deg", 19) x, w = quad(**kwargs) # The gradient of the transformation is the weight function w(x) of the integral. auto, grad_auto = automorphism diff --git a/devtools/dev-requirements_conda.yml b/devtools/dev-requirements_conda.yml index a9e268ef19..1f2f694b69 100644 --- a/devtools/dev-requirements_conda.yml +++ b/devtools/dev-requirements_conda.yml @@ -13,11 +13,12 @@ dependencies: - termcolor - pip - pip: - - interpax >= 0.3.1 # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. + - interpax >= 0.3.1 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu + - orthax - plotly >= 5.16, < 6.0 - pylatexenc >= 2.0, < 3.0 # building the docs diff --git a/requirements.txt b/requirements.txt index 5ba308811b..142be71bc4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,7 @@ mpmath >= 1.0.0, < 2.0 netcdf4 >= 1.5.4, < 2.0 numpy >= 1.20.0, < 2.0.0 nvgpu +orthax plotly >= 5.16, < 6.0 psutil pylatexenc >= 2.0, < 3.0 diff --git a/requirements_conda.yml b/requirements_conda.yml index 5b26b11625..777c3f0a3e 100644 --- a/requirements_conda.yml +++ b/requirements_conda.yml @@ -12,10 +12,11 @@ dependencies: - termcolor - pip - pip: - - interpax >= 0.3.1 # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. + - interpax >= 0.3.1 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu + - orthax - plotly >= 5.16, < 6.0 - pylatexenc >= 2.0, < 3.0 diff --git a/tests/baseline/test_drift.png b/tests/baseline/test_drift.png new file mode 100644 index 0000000000000000000000000000000000000000..949c2a1916d988b26e9067fd8daf0f42d4c0975d GIT binary patch literal 27616 zcmeEu^;cDY*X;&D5kXKyLJ5PGkPvAQl&(X!Ae~A|BPt+DNO$LW`FJ@6rX5pun&>#E^o>FQzXY=J17x<0gba<#WHV|2H0cCm4CYL|&A@cc4PJ8kp`4xzf{>Y_|Hmtk$hScdJ7swpDb2?z%M)&{7e-n> z|Fw!DAFq0Mkt{Ou{)dT|6ryr$A11z;J-F_x<$o+HKk=@IQR`OytrVJ`wb5!0)H{=nvjovNEFI+Xwys zZ~xz2jmzC<1NQOWPzf*)pF3$sIf&%W-JSEe^{I*vhG}&m~A;!La6O`8A zm4(aAb@-O!GM5AYku2Dw0`DPrQGMm9_h8lA$wNcC3D+oL&2^S=_mEFz9ET z#6(#o`-N*=`MD8G%S&3W_tfcX{bUmP3>4%(K8t%zE+J8&0dYo#^8&eyUE+BwTvOa$ z43qrD*d!x`OGjS|1`9bO9$0jmCsLGKMU1k+GG(2GcQ%?oin1}NaA1!ve1+vK$Qy05 ziCptsu?P-TnZFXX=;;JRJP?^S8uCjAQ5F7sm> z%l8xPRz<}|obK~$PrHtqb=nC@sTmJ;i}>A(2eGjhedO?*ZUjF5v-eC@FSlyOxKD|k zqDqv8zwled*hba`SM?BryAtm3(L_e z%IQhY?TpQpjp$Z64RaGxlSDYKhec}|Bsr?b+G}ZgKXOpM%`c^e)g^B4&&DM3;+DVt z=av6h83SU?+CoZNH!l`N8?d;2{UX&^y@BaB!yBtotn<=x_W18}^XVQQ-klGp-7fUsKrS^HK~<>l_s!!9M~cVvqhPFU87*Z7u8{ z141ShU)8-WdHE>0)|ZN>$dXcF0T|7)0+93ql~UocVf*CXaq(<3zL)cNixuJHI}%SHu|yi(@N5sJBMLKw#r^Vu2{yVRF&Iu zVsqJK{fAP7Fmt_d0{E>XFHl>fCk$B^N}xkR*eAILzPY%lx{ZHPk6$st{l1wUbM<-Z z?q>AL`0LLR@k<2%QyhyDcZ3D~@!onDYe!vY8!UJY$Ewmk@q$Ft@%^!n2$l+X zh`HuXy6pJYAfCZk`^WtA*rHED4&$tJw;5yEu1z#Kpp!$s!61IL!^t{ZDV%Zp=Ffum zS66v?S8%09xG8_dl@*+ce!e1J&urUNUhB8F=e;~}so(-z&UvPsM(^WbbpMjlom2Lo z`X|--Y`fKluj>u@d;1@F9;rHv2^*0veAe7q!)4vYgwY-!LL!q_0mvY!c9RhCuZ zNquLp7Yn6aWu9HDSX-?^ZBCnA*9r;d-2-a#=W{fAo(pJs8J{ajtTblB zm}@29!j%|crkbKm6H?E^9xOptNyyRNH52~2o3TV9LYb;1N9w)YpCN@gI)vTDNCu3a zhJf^uo{)e?J?^^R^gySq3R|kCpRtg|p=}zoY8{Z0oWL4?7MU<8GWD#A;z?I&m$*$LEho8`wB6*uuK1W@De;2__+Sga8}*R%8hP}um9T=ZHLAZgPNSIySZnmy3xq~X9xC& zKtsaAciH}}2GxpDcg)2Ni)(23P#vzf)Z`S zd_`)G%wYlj{9C;agQn{voeM+Kocg;(chA^-B6`8V09UU)NcK$tQSXOdDGIETTDZB; zpqqw{7Kg8tuSTEFRMTJA17qh3wiPs(dZ1VFomb*4b*FRQ;c}|Qw*h2@3{JTv| zMBXv2oTwA5&w$x|HZSnU>@=zF8_j%GMF>s)M-J`)Z~`JBN64{SG{lVc`vf<2{*67E zQjpKA5xnp>T9T6OYO1Ks@nd9g(OzpVQNI;v-tr-37m$-kr!5FxYru}$yi-={mS;^r&i%9b;*PA&8nhSE+bDL z`S`B-LZ4^~+plW7^DGt7aAFDW;^Acx)$ltCkI%k|`Yew%(tBB`(k!qrX|yn=MNGsa zLI;BRl)$EliZ^MXfW#<42jfSE=J1RU`1=^#>_^(0coxSkAfEVr0teHI{AR# zP|3(p{Js3kM(@C4VR2Gt4!4Pkb4**aKfpXaDgWfj@do#!v%fc~qLQv-m&Vm*Qm`=< zH@uzgRN!VeziIcMAqT*Vq{C8a?P}&68$-S2<-O`{dh@ce#ztQIUzf}wgkWJW`Uk(I z4)Ep5Wl}p#RPFWh#zBng4>DC~Svq3KAyki`}A)wYDrCQ}%KiUB+tSN(s$JoNdpYaLdENeD& z;&qJFuPrh=;MiRjJf3m^O!<4NzK~r%Gpz0P^G9?Xvxu2JE|9pZRkfM z7Tkx|bc6(wZ!Ry>3Dl06D;M*sJ11vM%(EBCC3^4fIUNt89d?*eX4&wYja>z2XmXD# zl|pSt?N1syFB;unvuarBp-D7IeLv65aOQrq#qFtq5BMN{e&0&`JV<8zY zei;$RMpe8r9>x~hN*rdqpwOd-Q9e6MNDwgev+Y%xR#=>!a)(qe(mvMnoHv0{gGdo#%8xnE$M`k zkX2qWlByk?gpk^;0x0gPFfmbp>TC>7K3{}E`@@E(>u^=E^R46@KC1*OxE10s&F>Z0 zUq&m5FW>6mi-=#L{EK3$9jfP*oS^1A>k*5T*dg|L_VuPvF*Lp<|5a5 z+$|LX8AF@%Im0_mB3jIz59kyo3=L9=3MW#8|+M&l5Vzqso1@iGaScjf{OIbKc%$Wht?k|0P zODp>L&GoKRh5A*tgx*H??`Q08E?RY2OXlD*)Z}MWd}UhwN=uy1QVqTC;^Qxg4jz3A zA?q|Ne-FzT!npiE4?;;XTriI z(wzyZ{Mi;>QX$c;o~q8JPxHlSe)sag>!vfYTsD}BY>2RzOlMt*K1VQ=T@t*JT-cK; zqI0yr9?|h4puMlR_e-?;$scZ|l+OFD0zv&8T-ic&M~j$ECUU zuA@5~!tF;q!s`Syyyjm*DLJmoySuyV*E-MC_rvLBF&33>%X8KKb+}Xc$5YILUR~{5 zlQCC~Ywz(Uo4yia)E;~LAlP}P)54vEv9r7)x*<3o*IwM@<;;;7s>G4kA>IrN4cGc6 z97Bb;8-A&cDIDETxiRnaEb*ROIMf18u_Q@gT<{b~*Uz2bfjm1_4O77UvegLvo zW(3y>VC`4^=*Y=4-oJlO=0`|CkdSa8#H(o^_odczdD_Q0U&XLKBjuN2<w03@EVczUoOv$%=jd(|nDFP~o`u3k>5Nwzso`g}~ymL)9d=SohcR0R`U9F2^o1kz} zMtT{28h6H8v(}e(-)m2A(89sK%Eg>%(9zYU2=PZ9RyyYPBQxK>tyEIXA83Ozf3-y< zCyzlQlHithH1@rF}>4h@zHI4p-Fh&FeR7m14jABx>bSFWM zxGj}zwv@bQXvCBnE$eLE8ZFr$cPST%YO~6gU$$(6z`!7h_x60(AN+F>rgCE53PR34 zxk1Zhiwccn?6+IBTx@%^Qw`m^!%rbd;D^iP$ZKPuLY}yd#`+9NH45q6b&DU=D&uV2 zVE!Im{sPaQzuXQxz1Vm`}-Sw!vBNEhLQVLLt{HqT4dSdJe+xPb4IjP zPjQi3>qq7D9&}JLc&Gcr=|mCG%;c)_t<7fHnq&OPluJ<);2CRW;l?7uuf!hA(A}ihThsl zSz|+kW|quph%6i8*$DX5j?3lq40 zd17~pzjndA)$Ns?y9`7FPPk}j#PeLACG;+R^y{Ev=)W7wj+0`L9V)nKT*h^A7*jti?Dy+k+>lA0y?jL?9T`IY!PDEj>)SVx z_P4=g3`sG}3mE~r^FSHaxh-nHnRA?MD75NJdp^2NE8)R1m1M z19WZn|NZsaS<79U>nrowSt~Sa4egBQPA9&=nxRu>jQ-YAmVi8v`ex2)ikue@D1Mi{ z?uuE)v7+<^F3MFGVmqs;MR8rL7 zw>{xR?}?Yk+17@F%DyS6oS3&p4f^lqfsk&e%J}Z*??zvLNBoKn|&gxc!2y< z%2iYhCQ}r!iyO>+BpX{U>l>S-p^)7fftkCLst3~uc=(BkD8QP{#j>MKmVI&SdGE$# z=~eesv?LPH9g~bO5x*ruk09z70j5<_N;FlPJs)gVC0VB7xyN*o<;(C{W8Xj4I67ciWR>glUe}g`hFxNV!)hl)k7|8AkwY(#<^iH3U{|7v?5apDK_=J*D&a#Yg_I z{iEV1;MA{tvkUnE>fsA!)R?_3%*r_MV=rRAe`I%~5mLCewHe)~#o#&eR~!4dusi%T zBAFwNclrZ>r|xj%y?L&Jyll9ffZdpl{`w>>f8iVEaoYlX4PtB4)aUGpZ*xY^;UOWK z07BX$uRI++hsYo_eEPRI?fdW0el5v8GHO9<`KRyyFC-UBf z%QFN?2R=3$#DIsS(BHqKL*91$tCW8VL!J%A%fjssdFUDfLD$6@mOOeB z#`pyi5Ux-oUo%4zuSBon-X%fD!GpbRH+b{%c&Z8pOi4*8iy@%C_KT98D+#KiS zr*-$2^JOhqnQ8F>qvp%|&zm8G5a9v3JlNgTXbq)w+ug7sJbBU=VeEj#0 zbnsaL82u%nb){F;V1C44SxvTMFHI4lK)lU*da@S8 z3c7>2g5!O72q~t{|EMYIy8KlU5s}uAbJE+ZQ~n;M_df)#t*`Ulq(dscep74-(qk?t zGv*B2C@vY&+P^S$vnjNjHH%F)?~mL?1UW)4brQdg>s&9LJZ899ugZIS#bZt-<#r~D zNGM4q3DnBcT)ziw{K?eJSGsr**(%FeK04& zyPKG1KTM4yd?IO;l?zF3cu_v8v9Zzb zaL4Lkf4z@6AD{f3&~3SEvI<;Wn65ZnBHw7$;|{zqp#i;;k;>uTE)jvkh3l;@)BVbb zR*pv#pEykdG^G4{NAX$uV^zlF5QR=W>OqQh=g2(6SlkFC$Tx6{z9-^5jLQt?v3pN)^Zg+Enob>7ew znd2c@BmfpH`_f8QoAy+;wzleA=c52UUYr(XIE(mT%bKFj(nZ-0MHw39PWg2)r)!HZ z)Yj&Yl{uBs`7eK|To{O9)xKJh4RLw&F+rEY)03naDAwE-hdGVv-Ie3HjEL9V{{FlR zHz(wi^YojTNavXw&&|JP=+f%^rQ$59(KI+5zdFa(E9|-)i3YTP_5foi8ay@7a_{G- z7d_$ljG>_nCi>M%wBm-lKTZ__5A2E+79QIwK#2^H2y9;UQ4CY@P=La;-W131+hCxy?M4q%;@7viG<-RCp67(q#ijgH>5CU{>>B5nH(>IW07 zfsV6#+(qu{k?q_jox)eSgO5EQ%%n$|xHKhYvyWmE=m2|=|Fsz-<~D9ngA!=enmnM8 zn6kY4Oh>8#NH4T}_t+CNW5TP%n1OD@)@5MpoEA!T4$Hd3iIMlG;1ZB?Xy0H`PSOOt zDIh`|V9j-x*k;|b&_J+ImaNdvmrYyJx#X{I_D#yqzvHEw3%+)np-Q8rEFr?E3wy3r zW^BZvUCbkL5^4V+MJt5SuhSf>ly`QC_LH#6cf4DNQeG<(&1Gno{?I{){=@HL@DBy- zzp5<`muPgwaaOr6>+kIDcJ}pgw^JcsF{#36>`6_{eRji$ol?n2&ClULlh0dOz<5$O ztw?ke4e9974L?Us4cbmf`}gMPKGGHsiXC{j~oB3Qv>N$GPI*Q@Pl%m|9GBQjn-#(O@+5TEoaEEDPQ zkU&cAwCQC|O0FB5OExr=dX?>J>-#-gw;fCc42RX8=TYGb*0KltOMZDv$lxLG;~*^A zPBzp60QvqDPBQK;yY3g>n-k@lWq_33>8#x(8*8?-=fYB}W`17m!3rsM{Crb(4tamM z8*hhTt}lJ$@L&&x0&too$yZjQ($iza>NV>sL3W(BX3+S>9yUN5A#i-4m3#ByI3>d+ zB3D+IykpuC#ncJd$9cA!&BMb3t-(=#0GF%S1BAh9jslaz1iRa^Z6XDxxd{Yr&E+mz z;lrLsGjeg#GqZ6TM-B(5Fozz?eiFD^z&z=+1d+z_nBGB)RF}Elp+7%b&~MTGyL8K* z2XIBhukVauv`m}^mU`HcAuVBFeFZv+s#}7i#SEQ?84TM81wSi77jXxQmTiQH%r$gj z0phbs1rq4jI2uJ#z6GeGRqJda=C`lAx3#=iy!ru4jC^d_S&w7#<~EMWQ;DS&tiCMq zcr!yZ&hz)mdVP|iTe;|(SHh(W$W=V?GGEZUb{v?P@z7-Xl1hqu;(Q z4i)KqNb?3<<=skvT#W*3SnHB2ADuHlkP9+dSA6cXx94r)7AG0GRL7+lU!5!L6?Q%d zL03+lIz6dmdSL2OGEt_`mi!^*rpUkGwOZIx3DKK_aBa1taW@aw5h$02&O+#*)U#S(_5T8PQTNgVBG z&veBvu1wUAHKj6f;>nyXEsZKG1Mh-nZYcl5jE&_QOm)s^W!a24>A1LBWAzqp_GPlb zc2Vu_)k7*v*^`LOl~)0}9TO8q$%0PWfEoo%KN65q^GJd<1@*C1d9GVha_SEP?73f+ zBcEGkmwQDzCsP{Ls=w7R$$HBk=pM>C*wo$4cS?;LFlQ;*@jZ@d>O_RsFQBXufZ9bU zxKT6;CE2y*Z#Ak=?(>MyBF~_{8EkH@^sI1QO3=&*nEv%ZNQcWsi0)|iU1$zx7VR0k zf|lL0UWdD$A=@XA_p}!xRmoXYF2g3=d-U_uO%YQtjVjS##y%zh*{FK0A#azk^JzVVcZL zCj$DSnZqg{1P|7fS)AI0oq_oM>@=xotyiIWP}G|4?3EfDM_N6<9N{{*7*lG?Hdwnm za8!VnR;jWZiED0d#^lIlCfs~*Qm9Yj6LPg4%GLgCIOsVSIdcLDb8{7QeRguj*_;>E zlBE|asv?A)mWrY78!S$$h~YI>sCaM!xk^L-p&Z1`1Fd4+n<%;*DKi;?+9%M*sNr!x zYyvqm)lx>2`3ud0$c??#I2X2Gxr6!JoFp3hK)Ti%S7$3#J!F|nZb-2BsCvmH=6?@TZ`AC1j zPH99(sEL=819^4|4LRsmNlHqFn4}wsBaFN}BweiShX)`bM^#i*nzc61aafiVP)wyn zdV6^c>1!&Qlx;Le)qyi3S@8IOa6I4P)z7YoA+MG1XKXB%VYXM| zFy+>-V{ZOxp6i7-x-!1$u`T+tI#BIzD>p6))5JC_)7%kN;i<-1!d9?2wXfFx^Fnw{`1}kNT24YJSA{x^A2O;8D_CqebO71v z(s!||->LK2nOdJepEodUX&{jIky`n-s8s2Iq1(^d(-lAQQQ^W2*cXvSV7ta@oYJ|B z>T=M)3!gPs?@@-9R`8<)DDSUcZi%dOi{4t=*?wJoR2pSr^|jF8^|203LgSn|i|rTB zrX;7nZ)^>Kn)x#IS4IU~)$3%fL4Ubprq6rJ~vAQ&HnMpVf8kfxX}-e~sumCY|1guJ%F_xfUf zdX9(^qaPB-j0BD zpqfewqrj7#UZ2t`pL8^QXLk3CU8gyFHpQ|^!C3szp!r=26rrK)4GUbmZrIW9b937( zMe5o2#u_{;7KV!QDLJ&QA>``6HG94=TvEhoP+b7953oRhTuV<+`6JVL9A1b`e+tk$L?CyS8?2RXBPEO6LoNzypB8nxV67Hse-50x3@jCWCt+X+(M@5ih z21A^$U0}I)*#-YZ$kxAxt(~u@#R!fjPSYXeTL3A)NCAt3H8FweAWu1(mjB;&Qsrr! ztwywR(nIS&FNft^PO6&UHTunk_ZI?Z2~QzDyu@-uNrFy}?gUUbr=)U5m_rN&CzZpL z9hLDT_^dE@Xm7=cisf4N zTd8MVKnGyEmZzVEk-yIbd=Q#>lF}IQCa_D zxO99Xb{|9w!HHn_H1_;Tf{2G>L8+CXD?%i3^MQiI(G9S8RoSyqvfkGdDAIlFUv`s3 zn%+}P^Q!deD#Hm(?5qx^K5&^IyU&VB1ICY$_aY!PR9g36ZC{;Q2{@dJ)jq7}5C*>X z(-Lq^4GW0AaP-UP?-Mp>jB`VkXyL?c=wf< z-L2S@B$4czk%v1hzXhPo>KamP8>80+pA^3pb!c*K|PCtXoxKQzzIu6EiL6ZPF!bT=sMp>NGvxw zxoSc+a3#-=b?@nx*UC`i+_vU@1-t{FKUTnpd==+MWTXLEhA~JO1ImZYA`ru>e$}Ec z4V9NMJxKzwSML(TZzoYQQjflMZ9N*RQ&Su>GtVaGxpwGw7f0FOUoAU?mE=Hei%`}L z%$$mMU;tu*1|n$Fzn^CyBH4U=b{ZWuE?>1wA(q_+NE>0VjRN$yC|`PL5=a2|2)8tf zi(-nyIB)#k8-e3kWP;t#;#98r9pUZIx|-9F^h5Azn=KyW<+sGz1RtSlBj55nGFtl+ zsgfRdFT8Ro=P;c!WzzHC&&)c<1*~41Wg(PYN!q?iO(A_`@d&{&>L8M1O%?G_(a2Nk ze`DI>3RI~)ZgX*DbAQJr8XQA`(PH(L{>{@fOm6d_Ei7&=Z zoqTK&7t{3Ht%?4lS3}%|lfg05n`*E;Ry9yFnq4$rY%7>9|8$! z1#DwL*QK0hF^lEU83qN|s|pM&bU!m4lh{gV?zB?f}9y<+XPN{PJlkhGy5L7v@P7{tx?;K(F& zXP;f*=JNer_GhA^^=QZUczkGv1w2_O_xkFGw3n0|ar6>? z_K;x%R4uGnHs?X*=9s8sn(1TMPrDKo!*0x8V*dt%vPeVCW!h=-o|S*e?lt0l*Fb4e zI9^b1-2Cd&d~{fk%Tu;P-_xh{?u~`#BvuOql1l3o)E|(Q8n$cG9mh5B!Vm{q*lFaeKxhPo>m)*SEj&DK=g4$ z^09oHp?<|Zv?;<$)n-nm%ek@ExzWD_;%D8(c`VS41yBn)r_YzJ+^w^FIR7D(?pk$E z#R*B7y8vq;e{B5uAr0Om$+KtAqSk_y$;KiNZ5Q2f1>Z=7dGo7Ap0D5 zvE27D5)yz?*1#KTfHDlW*AigN7SsI?`$2-Ky8i*cHG;te+G}u=joxlYhrSR=`j*}y z0X(zQ*8ze+)eGWpm94zT3c>N*ye=-XHCOv7kn=qW?Y)5&H`x2#noK6zLWuMe!5y8b zbJK{R7aPowIsvqhV${;|^3WSoX$bVn=&t}04}k*YFy4N)79po>eQ`}Wg%xb-%cBnx zGn&MnIY>G)^)UhePOI-#@R^>PBfgMJ*k}1o&lgvJv#L z*Jt|DO+3UmuH8f=W7I10ECRj4FJo$*F{_g};G|3zp+%ypb+2>V&ZN~nq~s$XanV$@ zpV!W8E_yZyP$?;p;B@7Kw($FXfYgmnpT8b?T@1RAfQG5D;0KcyF6VW%{p)+LBK>wd z$~}MVtTQx;tA>h>P1!b|jHfMz5@3dR=&o$3|NSmEB;+H?W`c{|Z~w0>D)P65(+!}^ z6MR6S(^L~04MCo}Di3!?t2aeRS>+7!hEPPLE9m*DvvLvi@{sZPtQ4L+`C!7(A|A9G z|C@WN%$E}KAF(QT@!_{!K3aT6eN)OC9Hb25hNw#`h>W51+_ifRBJ|YMtYlsQ$I#wprTCC{Ej_^6*eVb~QS4Rk?K4)i8?tWIj-a^$tLWL}WBk0cARP|0kdAiHX#ylJy(&%;TtpQCFU@Chr|rOA0LCuBA`ephSyd*J20D)qEp2f=rgz!~Es{cQ)d(w!E8(%PC54HFk*;mkWl7V^3KW0^#k}tgYrAoT` zGznsgW_g(N{A%-2+;s?#;J~r*I@qlOJR#t=plQ*a&_52Q3%Qob$#T(Ee-E{Eak1&s z3r-&D+jfij|J9$u}Qdf0>BaYuHT(fk%&BSGQ*v7MTEoN1s zX+is4^2vDm;;%Q&g8g+FY5xmLqw>)g*nc2I3_?{0&PF51fBfp_-Jq|K3+-9>nJ=aR?>c)Lg(w^TQne(JuM0Z+q(pxa2rNS{3g zEpi8Z2)W&fe2ajuEZe{n^T^#Dgmhe{G%i^X^C1w&-TX{c$|%a2xv$WdbKAyz=XKI! z^DI>1Yt-2qW9CPmp=TR_4d9+ZO+m7r0110d7MMt3ws;)}e8hI1=bQJIE%Yl5=MIcx@6K-Y$FP6>S^zTlPX@iK+HhKi(Jg+|b>9g+>qou14XJjqumxQ@Uo8o(=%qfy)tnjvQHB4 zg{1W$bU&wvSdBLPnU=Ttt=1LQxV6RLKhe5JHDv3?vAl1WKUP;c)V_8tmeDfs4;hKf z6;wr2xrMzTz;&b?`SBf^XJc2LDvBox7JVyOvMLqxgbyPD`^$t7PMQ>(hsQB*=Nf zPqsjrjeukW7>O4}sz<)gC3l_^kJY&qy_kn?QmsNv9Ni#?b_{S5NdnQudN;)%WW5?2 zH?PRkPR24>#j*B%64m!N^?f%-hIN`x(7R5IAXoc z97_&;-TnoiBBc(L8R*Y zd~JMypd^8rF(k-_c} z;bm#)K}(vo!wwkdqw4ICv7rHJ{H?*|{2M}K-{FcBUy$(g+4=X+8vJ44PfRN}@5s9S z=w|_(&ap=CZEp%_g$W`0*%}R`S{!%$*3#>>+1OhSgQ3OsaNiR?q5#rpCp}>8gw=3>pK-*95TuQPlwvdi`uH+gISDwW_SStkad!HOas?udpHCI z1yQdW8G|^#$a&~$wY0LLIk@ffbfPjeRVIGp9J|h#WhTa0DJYL&(#hD9S zg+$%2mKU&H3+X`~^=yH5$X1G?M_+?U7Zq5eg^{lc!l zGvJaGfd3QKubcK(M2>A+>~GEV?nyv#)SFfn6P25p@W5D5$0*gt{~l$zrdsw)n5LaN z=fV2F^gm}v7!@)Cd;+0q-gdhEb)DBHR=d=&5ZVw(>4XQDbYU5z)@C?eq!i-g7KWmM zRcwnW@pdS`-7#&{X3(|3J#bjGhhCHe_kBSCHa_Hp(-f%^{>CSO_KJ^>FEwt&R@w|C z(Vd2(I*z~4V&lQAQkVbwTs#D@!_6v};bEFGK5R-!c~XgeUeXymUI4bb7b4XjgMwz+ zop1%!{n=V&Gr)YXIbwoZIlos>2U%?V`g)V?E^*=GNAr8Dxq?o`-oCio z@@Pj|iS?U1!KHMV(igSVqR&;fvwjDgItO5cTHD&vhS+F99fZ2P zh$ukqyTy5#N0OHJ6GrB6e_(gSKlT?#>XvW`TFs4iTB7~vr4mi_eoydZ*a65njK5Qd zO&xkc!Q%Swk14v>Ksxb?K+yW}@xuq+-M1i~EXIL&qE~U}@*21Bb7$mPMUF*MQTt zNa*(M1aMg(8lNJZXbyA%DK;4aQoK_^svY%Jp1*8M6v&DwXlu9-`Q>Y)cMVF@kJq|z zxXgA-HNh1&16wlkQLE7_ZpN#5AH%Mrvd}N>wS(Io8R=H%15${?0fAAunh`&9EE?^K4UB1g*%GZ()eZK$xy>Kqzbz!RM`%B)r^;d zuzVf$ww*Y2cJTX$r#-JufigwO%19?Q-uG``rkBBiLyT!^8o7fJ`d!+FKng>jqZhP~~e@>IUcZCt=s-_zY9v!s_L&piO z?r49x&TfPYd_;rA(EgB6YOB>n;3g>8B_|hr7Pme(+$@gj@`g@t(QpNZ6%KTKe(s(y zzWcOro`HePg)FWR1%Nvn4j&d!I2Mx|8g!P99ZEo#6Zpve@))*U0lOR5uJT#Rfr`); z5xhdb=`u26Q#QaJ*Oahuqcj_u+2>a(>VHsB2e3g5q=oKpVYh|r^1HLiPW)aQ)+jqs z<2Zo}z&pVYLRK{NB;OZriEL=NZeqfAkEAQ4T~$IJ4{vztIJz49*UpZlm4QgFi85qc z`(uC3kiO4@7lS~Y*tEaa<;v(krQqY;?gZTLeq0a7x~%iRwSY;S0rzABQS*1?BXrv#$XCtq%ecb>v}wC94bvK$ znVi8TKN@8@K;I`uYwZ5+Gu`Fpg$HtZc*{&@!Q6r29e!L0C8BY z%=%y;qrB?3hhN{4y#Q=nR*`0hRhTADI!?-wMV+eE>cU*;4X1(lWjHlb1O{q<;VS}laeeDAe+Q zorfJ~jzI(JEJBMxpfTCCurW{!PNjN7P-j+6vj-Xzxmsr4aieQpCFy!k+JTKofykJM zt#xAN*kY}YNBWf3VaJZZk1qsJS_jFRBk4~dPKx{Pw!@wF-~Suzx3^rw=$~0zTMKU4 z7UsMY2%|F}Y7Yur$q@HzxG;98_Ft+O=UR$ z|L^{60hd`Nx0UhH>1PnR#0oL;mu~6ay0O1wRVa6E-iMXy9hf|zEqGC;o&t4icJ^`> z85?-b&m_nHprDluf%|LwCF4mD2~8GMO6@~KT2UyKjB;E5<%3m*u6@Ztb1N3NRY$0I z{yyj!9WB?_wq_~FS4}YfmgF!1Wsbng``aiFlkb>H9N&%hot`HKyVwAPS(GaP(4h$~ zDjfK~hVoSz(5^)C83_M=ewaqDsQi5RVh3H}DJh;!0XjzDwS>G%fZ5aoIS?r%Vs&;33z z-(5|K{Xb8SU({&Pdt-URPLHn0$)pg&G_O%T<5h3y^t$q{<^xkw&ysZL_21htvG#;+KgQsH|KZ zSenZYrma_5YvvtycfR)#lPo(MAa{U)MLUMTfjV}-uf(Wc+r)1t7oAPOkwPLyzu6cZ zteru>d~A5wfTLo%EG7y{IQifw=UkR<*qirS1G!##C_JH&36kMp8r$T zg5H20f4`3vM#v=CMKHl<=?0-ok(7p41}%uPv$Mg9Q1apFsUmRfE({h%qe$=lk&8>k z5)CHve|2}=aXGjD{>~$kqN41EmX^{$Qc_7vY43$5@sJd`%PJ|7(v;Gq9WA8sMADM> zpb|wiwP>IBb@w~x_d2ih=lScL=dY)_eZSZD`d;hvUiUS-ZWeLzALRibx*u!g3-7BR zzA=_m_1J%0IJK@t;nEOSl|vzA`9BKWMwlZ|b0T!ZB^Oh)vzTgL86s%hR~6bfJbaoQ zs74R4@dtB=1y_ZI?sLzF(8i3f?;Y#Q*+lk{RLkk#fJWM_9+vSM8Bp-SMBypLO^L>tK`lFtpV&edX-H4&3~IJvq$JD zHm2(67k!#66Wq(<^NXQH@Hu4JKnupYU=s6-h>lKteG^GUGG}&zI+v$zo|#>*VXj7? zBOg(A18*lPHD_vzymzfegrXz3oQXfbjuUqW`iSr>U|^De7sK}W21;IO&-}iV_1hs{ zXnd^f9Sgyw6Fi1ddM^IDVTtN8lD^9FBeo9z%pmu$z zdV6CWfTRc8;5WXUZ1Exy$~f(ZqIZuEb=V1QhYxXBVOp=R5&K)y1s8J}vo5v5og)ox z{>RznR0l^_tx=&Pz6r>Lc7O#VxERHjPkSr!Rh)(`oGRI>c}*Sad>0gc9e>+8*D)@W z*I`gAt8n;`{rY;Y-wO=0rHbnbK@ciznCno-38_7Jk(S&PyK67`*hP2LKG-nhxzRUn z_fD7dkDBS^sq+Rq^(}M14Ekp?GpexS*hgsX01mMRLguXAu(KZ=%Lo;-<=u}VUQ@RL zGe&i!xQy$VeENx>`=r8+hsyJskGrnjy>s~j0dFpj_13>|{oa)0phL9D7;@et#By(S z_N$rEyC{WDNk+ZgCllXevBQ*^tjbI5Vk4f*uWJL1f5;}e4b|sK)U8%A(o!mD0I4hT zT7nj!R(8);EyD~{I885~e?QnPPsx)@8R13u%s208wmxRcfAVZmx$wO>mFK-Hwy(oR zSG>63nZt|j+fo^>1DAoGD)hlnMu8Q9F{qP@m6l8RUM(V33h}<@U*KaybAYO*^ zAH07CRm5jCKSf3EwO}f2Y4*}fe&VCX6_K7~&Pk~4|4IHq{!hD+Pk9oJaV zd7S(Az8Qz)^mFOe-5Rjep87`uaDbTRL`#|=S+Wil8KivZtU#m+?iYrp=UFuC`J7>I z@n0cwUq-!!Ot2HGHZbG2^KAc1t}tk20MqNBl9ELS>e?mxf)p_{qo^y*C~D(wNTJjK zP3-NlZO{Gt&)*RBYaU#c(O96J))H^_a(*zMNLTz#MSQ6yH&&fFa|Y<&O4%tCNF?K6 zNwKtB$h23LZkh>LU?bR2nbV*464fgf#M$)OA~Ei{wal1Uy(8kxTOEbCt*LgAj}{Ib zkjkyPzF{r#E4a)ZPf)$TEspb$r-V3P@Mj|5So6+7Etu~XAn+7;aIvsy+gFgIY)zLH z%py*G8cr2!FunFs_n}uIxiZ>gkmAS3=HDglNh+r5?==5-AiBWdqqyU=t_c@!2-JxW4{{6BOGLcl_{d1vv|3^2M*_O+4D z_X~9Y+*qMS>@S#B*W@|j#*lGpPyv{k@4}753?4`plLG8of$E7BKFKoSdGY+lu!7K+ zSdTR3Ym=Ne7fkgBy>ycY*VHbJaev&vS z{l^-$Q9i?*BuxT`gsO-83oe}*%5ZhBa~d{wWpCYenluW2Z#^86V7$Z;_j#_9*z66D zi|VxxG`+kCrG9&3OE%WMgNc zd#I?XWT=?A5gN3MzzKfb$I654YqIxWYfcIPeu>`dtyFf8-Q0fC@$PpT0P5qgzGX&r zAXMt&`@buh+St~zoBaT+m}*t@5_-%>;0PVz2PE=Lcy_WtMD~O0B_%wC^@gq5&_~UW z`KD8=&bozaM>EV*887Q9f_XF8U*RFVFXzbxiw@Q)TRNKm7=|v?otXO@@HobT^4uygRjAlo>i>Wf+3&;XmM!Dj*gL=tn#41ID9rCk-*MY;~ zaBiD^!>rZuVbneu!<^7s>0=`dq-xNy;f@9(NX${};#T|HTyex~1-w8&TZ#(D*{>P_ z9O}w9HM+gem}dU|(TEHAQkjlq?Xv6+oztckOd5vYv0Trxu}a`C6`Y(z_|Y6eofxL6 zPgsAu@A%{WF0YV)A~h*g1YkWMQh`QFls(rfcZbhdg`{dXh~M7_1Y+8Af8_=hL&FWrjqyFGmKsdNGWoh zZto)|B+in{Tc-|Q{ab12Fp2G1fS4LUReuP#_DZ?2YF#Q$WQ1+;W!_>>-0>{xy|pKW zI3p$1`873GZ}45nXpFv@drar5*++_dVbBwTJpg!Okbrjd6BKMa zpkgN+9Nt1Y-lZ_~2(&?RPtAXQ!_*M`$M88$bh^jIMq5c9S0|}##$1+$mF@g)^S>rX zSFOImvwJ&}5A>_47|+fh^D_vxg^(CjMedpRMZys=c)sVFv}WnEgyl~ny zG{R5`wSvB{%BC`L>CwoIbkizO5;TBfI6sZpr37rgYBdsx#Z(x?NGX1og1WHk0{;K*j1y>j^t80eiVS5|X{EG{m#^bY+6jAP0GtNbd^ z`Yri^;VJuFh7raG3(W_O0BFD7Bf-M-Z!I$A3imUI0?|hEr}2wK9BKvlAcWLRO4au!lCe~828!06nZ5|0Ee$p(!nO5s> zSkFL1y13aif>j_s5Y{b|6hF}}Ibb~y+;J#P-sogqpFGI`gwLu>e8=bk=z+0z8AR`G zKpNn~eR}gV*yCGD@(teVjb(n#NOA~Eh0>;H1KS_28Uj%Hy*c(K=v!^bm$$&a(?u zZau^FdFHL!k!*H)goWKB8ZS=*c@@{&I0)X^CQ^P^d~#S!-b<93^Hlp0c|ODhg$tMT zH(b{wxbbDP=^e&n7Wu30INp*aPoRsHVSN`52mtsZ>1=yqJ_UUj)}c3(y0tu;gkDg%qxwJL6k##5;Hk+v9f9x^ zr}xILxpRZ(?#N5*{xr^W$z~0C8`2Gr?@kOxLIm0NRBT<7@|a;@R#TCh`($T}ai3&( zh`xs4$K{9amov~lEaeyk=oj_5GIH5n z(tzevhu^$R)w&rmlhJZ!pZoXd6~h}H9|ZY5qm9e@AX}oHXigIZUL)u3dzo5nFfE;1 zbCjf@z|;JZk2hX`_Liif_^gfOMb2Gwy3jT`jk7uv2LwLPLt*w=WgpEm`5vkqH~!KixGq znn!|y|7d)b8E6yD*jG?>frpi(j8G_#dMgnHh=K+-cVI(3C*{XIiOG_@_LySE9wlzD z8~B`Bu%vzN?Hkc@f-+9qr)O7sKSZix(wO+m@xegfVNU19+kSJjR(c3}d*S@-H0fNO z?86m|B-Tlxmcz%V?$#6awW)%N7wvk>bu*%1jzxH{wA;^{Sp$aY;bc+O-XbT2)ana# ztSPjiJ{kl6($o;YxVTew*ypTUK)Jj!=W=#0zA&w>^E$Cd`Saq^=Qv*zt`*DK&z4Z_ z*fk3;ovn@Z^Tg9qP}OOv-87`NjUnv%bvp$$UnWSjkxTiH^`7De55Pg12L`AA$){r# zTDsQ3Rcx^Gm{lGA`c_Y|bhh<`cC;z=)2!qDx*fzN@of|93tHc?#MeY7(XURtvn6Sy zWbHx}${i86xu(t8)GJ4Fc*YR+f~`G4*f7|4O{g!^JtO)Yx=SRolB5pG=NLRbL$xVm z;3gx0k`5Q6L0TAu0TZ4Q=HAlPty;u96x6AFifhe1+M-A?+QsBFbxP>SW=+lcUgG9_hX|o*mS40d zWD#!9(QHgs58yCWRavM;W-PUH*10FeXJ%eoqRxq@BY`xXy9v1zX8E~;5nc=4MqMtZ zkCF(n5$EX}wQdb7Z8A_|d%wqujUR-*!m4=gw+E)ePyO_}9MKk~#oQhU55{F;`A%C@ zYyUvZ=ll%4d~4N%Q9x2MNe59jtPpFx*F4dON+ z5GsDAU0fyjnCu(9*ke>;6=W9Obrj3A%$|P1(W$Ahf&%xTwF(XtC}l{M?GEW`ncS zj>mF<86`n7Vqbh75GBcoVZS1vu87}9sXs5mtX zJ5>gI-}U4eZ5nbY$Qz#9H(ti0M0@|-T1}Z^%_+QY{oI`|j?0EYDe@xuLUa1@oEqkv z47Ansip2Op7LiDW?_A^@uNg_1+|V-cHe^e3pVAu|U8ESH9;yMUZPWp4v)_w4mKS>` zk21|}oEiFa=9{nb4a6^Et&4DO(MmO`m*knR=M~aTS4j;`2(#yWAjDyVTuwZaZIuLJ zju0|pRU}$Ls3uqU_diOT|4O#Mj#it6G%%moMY9Kq{3Qij!PlNZ{5IAzEcAP`Z@5}K4WG?>eufs5oAV95ngv;2ck%MHc)CMm9&%b?nniehzx$XCCmeFS9sV_s%UU?6S zimsG(Ztb(QKYR^QiRdN5kS7ZFeSP=q0?*L%+{VGZ5_Gf<-rZbMGBK#7?ow2`Kd9f2 z>}Qk*s64B}v+r|Gch~QeWEdB@PXoOGb&dDmn`o#eKQw!~ z{pe%u$cHQPmwLrX!%j4*g07%Bx!V+n)eD=UIVD3VStu&=$=1QG2A|U`H6C^hN@vqA z-FIEN3my@9)3IugO;311j&qXB)TiyjMeFBWDH(3fta3oECToOU8@ z=2L8>5Iv5|*htZ~c4C^vtJFjCMV>s}4&+EeV=LTU*fsoWLa{oobg3r;Y!(W{ryi)cfL%4(5IuA>gn-jIpqf3${ch}jV^dbu%SC(b&A@5 z@tHq|*v#-JphP4*u>8&zV_b~6b!)%|@fN^Glf9MeNK8RO>w!xXYDBrgbzs5sJ3QIx zh;nu6Df1D=bv)kCQ!Wc&Kyz>Y2(K?}T)f7{t=tu>*YDs~-{&>w@!SDYv|^GKgvT*; zn+Hr_n4o%RZA_MBVLlX6Lf+ns+5QX^2AIxZW&<0R5Pv8@7+~1Jz$`?QAzjG1Ejv!V zQsES2^E_dp;39v3!hm3|fLWi8k`A)|isPw~uA-6J!nSPN>oZeBX?W+UfT9KXS9c+k z^1z=z&E`Zi#<*c;Iy$ypViTp*45h*q$Dd#j_IXaF0~SaIpr;nR)8hFf{F9T=af3vb zl?eliJ-_MUhN0`nxq;WK^V))4wJgW*mmr)?E|aNr?e#z^2RsCA+cjM{bF7s_>B zh^~;V+L1cD-*ZaqNR+fLk^E=d9CT@a#@{yJHQg6p_x=_9dTEUBfIzv;&$vOmcX{eJ z-Wk%YaB0t}L0y=>2rJ&}0bLGm48+>5nMwdYbkt7}eME} z3}ZiEt5+U8*4EuNt6{LxB$K;2MgM!k?-xjA13|~xOTx^5Rd5-?r$wyoo22ZU0Dyv> zsjI7+d)k}@wkwYKqjCLUQC?ZXzr(KrOY<^H8Ar$mI_}N;JVqZ`6V&+X4>kGm!# z;2TS#C3{~vY1*)m$vMK z&}g?3{pIYG3)-v&b=LO@Qnxm%BsL=c`s5b#>U1V+tx=UEKFkmS`10V8~*$M@O4n%7Ae>0 Wv~qvtN-UB`Qa_}vlBr~K?LPqV(%r=X literal 0 HcmV?d00001 diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 6702f8da28..c81f322690 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -9,6 +9,7 @@ from scipy import integrate from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipkm1 +from tests.test_plotting import tol_1d from desc.backend import flatnonzero, jnp from desc.compute import data_index @@ -31,7 +32,7 @@ grad_automorphism_sin, plot_field_line_with_ripple, take_mask, - tanh_sinh_quad, + tanh_sinh, ) from desc.compute.utils import dot, get_data_deps, safediv from desc.equilibrium import Equilibrium @@ -398,7 +399,7 @@ def test_automorphism(): ) # test that floating point error is acceptable - x, w = tanh_sinh_quad(19) + x, w = tanh_sinh(19) assert np.all(np.abs(x) < 1) y = 1 / (1 - np.abs(x)) assert np.isfinite(y).all() @@ -437,7 +438,7 @@ def integrand(B, pitch, Z): B, B_z_ra, knots, - quad=tanh_sinh_quad, + quad=tanh_sinh, automorphism=(automorphism_arcsin, grad_automorphism_arcsin), resolution=18, check=True, @@ -446,27 +447,15 @@ def integrand(B, pitch, Z): assert tanh_sinh_arcsin.size == 1 np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) - bounce_integrate, _ = bounce_integral( - B_sup_z, - B, - B_z_ra, - knots, - quad=np.polynomial.legendre.leggauss, - automorphism=(automorphism_sin, grad_automorphism_sin), - deg=16, - check=True, - ) + bounce_integrate, _ = bounce_integral(B_sup_z, B, B_z_ra, knots, deg=16, check=True) leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) assert leg_gauss_sin.size == 1 np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @pytest.mark.unit -def test_example_bounce_integral(): - """Test example code in bounce_integral docstring.""" - # This test also stress tests the bounce_points routine because - # the |B| spline that is generated from this combination of knots - # equilibrium etc. has edge cases for bounce point computations. +def test_bounce_integral_checks(): + """Test that all the internal correctness checks pass for real example.""" eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) @@ -481,7 +470,7 @@ def test_example_bounce_integral(): data["|B|_z|r,a"], knots, check=True, - resolution=3, # not checking quadrature accuracy in this test + deg=3, # not checking quadrature accuracy in this test ) def numerator(g_zz, B, pitch, Z): @@ -493,7 +482,7 @@ def denominator(B, pitch, Z): pitch = 1 / get_extrema(**spline) num = bounce_integrate(numerator, data["g_zz"], pitch) - den = bounce_integrate(denominator, [], pitch) + den = bounce_integrate(denominator, [], pitch, batched=False) average = num / den assert np.isfinite(average).any() @@ -507,7 +496,7 @@ def _fixed_elliptic(integrand, k, resolution): k = np.atleast_1d(k) a = np.zeros_like(k) b = 2 * np.arcsin(k) - x, w = tanh_sinh_quad(resolution, grad_automorphism_arcsin) + x, w = tanh_sinh(resolution, grad_automorphism_arcsin) Z = affine_bijection_reverse( automorphism_arcsin(x), a[..., np.newaxis], b[..., np.newaxis] ) @@ -658,6 +647,7 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): @pytest.mark.unit +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) def test_drift(): """Test bounce-averaged drift with analytical expressions.""" eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") @@ -693,8 +683,6 @@ def test_drift(): knots=zeta, B_ref=B_ref, L_ref=L_ref, - quad=np.polynomial.legendre.leggauss, - automorphism=(automorphism_sin, grad_automorphism_sin), # tanh-sinh-arcsin quadrature requires 9 nodes to leg-gauss-sin's 5 deg=5, check=True, @@ -704,7 +692,6 @@ def test_drift(): B0 = np.mean(B) # TODO: epsilon should be dimensionless, and probably computed in a way that # is independent of normalization length scales. - # I wouldn't really consider 0.05 << 1... maybe for a rough approximation. epsilon = L_ref * rho # Aspect ratio of the flux surface. assert np.isclose(epsilon, 0.05) iota = grid.compress(data["iota"]).item() @@ -714,7 +701,7 @@ def test_drift(): np.testing.assert_allclose(B, B_analytic, atol=3e-3) gradpar = L_ref * data["B^zeta"] / data["|B|"] - # TODO: This method of computing G0 suggests a fixed point iteration? + # This method of computing G0 suggests a fixed point iteration? G0 = data["a"] gradpar_analytic = G0 * (1 - epsilon * np.cos(theta_PEST)) gradpar_theta_analytic = iota * gradpar_analytic @@ -818,4 +805,4 @@ def integrand_denom(B, pitch, Z): ax.set_ylabel("Bounce averaged binormal drift") ax.set_title(r"Bounce averaged binormal drift, low $\beta$ shifted circle model") np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) - plt.show() + return fig From fb9cede556c2487b2d2f0a5f0aeac55dbb764ebc Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 2 May 2024 15:47:53 -0400 Subject: [PATCH 156/241] Clean up orthax import --- desc/compute/bounce_integral.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 96a3f6febe..cf0c010548 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -2,9 +2,9 @@ from functools import partial -import orthax.legendre from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt +from orthax.legendre import leggauss from desc.backend import complex_sqrt, flatnonzero, imap, jnp, put_along_axis, take from desc.compute.utils import safediv @@ -1126,7 +1126,7 @@ def bounce_integral( B, B_z_ra, knots, - quad=orthax.legendre.leggauss, + quad=leggauss, automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1, L_ref=1, @@ -1195,7 +1195,7 @@ def bounce_integral( should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Tanh-Sinh quadrature ``tanh_sinh`` with ``automorphism_arcsin`` can be competitive against the default choice of Gauss-Legendre - quadrature with ``orthax.legendre.leggauss`` and `automorphism_sin``. + quadrature with `automorphism_sin``. automorphism : (callable, callable) The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. @@ -1316,7 +1316,7 @@ def group_data_by_field_line(g): assert B_c.shape[-1] == B_z_ra_c.shape[-1] == knots.size - 1 spline = {"knots": knots, "B_c": B_c, "B_z_ra_c": B_z_ra_c} - if quad == orthax.legendre.leggauss: + if quad == leggauss: kwargs.setdefault("deg", 19) x, w = quad(**kwargs) # The gradient of the transformation is the weight function w(x) of the integral. From 71cae5dcd2a81de0b89aa025c5f088b23efee156 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 2 May 2024 15:57:21 -0400 Subject: [PATCH 157/241] Generalize vmap to work with in_axes!=0 --- desc/backend.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index ccefd995c0..f9ff61658b 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -443,10 +443,13 @@ def complex_sqrt(x): complex_sqrt = np.emath.sqrt put_along_axis = np.put_along_axis - def imap(f, xs, out_axes=0): - """A numpy implementation of jax.lax.map.""" + def imap(f, xs, in_axes=0, out_axes=0): + """Generalizes jax.lax.map; uses numpy.""" if not isinstance(xs, np.ndarray): - raise NotImplementedError("Require numpy array input, or install jax.") + raise NotImplementedError( + "Require numpy array input, or install jax to support pytrees." + ) + xs = np.moveaxis(xs, source=in_axes, destination=0) return np.stack([f(x) for x in xs], axis=out_axes) def vmap(fun, in_axes=0, out_axes=0): @@ -471,15 +474,7 @@ def vmap(fun, in_axes=0, out_axes=0): Vectorized version of fun. """ - if in_axes != 0: - raise NotImplementedError( - f"Backend for numpy vmap for in_axes={in_axes} not implemented yet." - ) - - def f(fun_inputs): - return imap(fun, fun_inputs, out_axes) - - return f + return lambda xs: imap(fun, xs, in_axes, out_axes) def tree_stack(*args, **kwargs): """Stack pytree for numpy backend.""" From 7f9eb37fef798ea83cd317d61cc481c5fd7d8791 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 3 May 2024 01:31:14 -0400 Subject: [PATCH 158/241] Reduce resolution where possible, increase resolution until convergence for drift test --- desc/compute/bounce_integral.py | 9 +++------ tests/baseline/test_drift.png | Bin 27616 -> 20083 bytes tests/test_bounce_integral.py | 31 +++++++++++++++---------------- 3 files changed, 18 insertions(+), 22 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index cf0c010548..45d4870815 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -627,9 +627,9 @@ def add(lines): if pitch is not None: b = jnp.atleast_1d(1 / pitch) - for val in jnp.unique(b): - add(ax.axhline(val, color="tab:purple", alpha=0.75, label=r"$1 / \lambda$")) - bp1, bp2 = map(jnp.atleast_2d, (bp1, bp2)) + for val in b: + add(ax.axhline(val, color="tab:purple", alpha=0.25, label=r"$1 / \lambda$")) + bp1, bp2 = jnp.atleast_2d(bp1, bp2) for i in range(bp1.shape[0]): bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) add( @@ -1193,9 +1193,6 @@ def bounce_integral( The quadrature scheme used to evaluate the integral. The returned quadrature points xₖ and weights wₖ should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). - Tanh-Sinh quadrature ``tanh_sinh`` with ``automorphism_arcsin`` - can be competitive against the default choice of Gauss-Legendre - quadrature with `automorphism_sin``. automorphism : (callable, callable) The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. diff --git a/tests/baseline/test_drift.png b/tests/baseline/test_drift.png index 949c2a1916d988b26e9067fd8daf0f42d4c0975d..32530da61ecf07bb791476984af29434b5ea1b32 100644 GIT binary patch literal 20083 zcmeIaXHZm4)Gmr3qC^Fe45H*LIfLW@lq5NWfC!Rv6iFhIh9o%$C4+)wVN@jNBpD@V z7%~H=_xOF^J?GwYs&3ss_xw0-)mv6Ey?3u(y?TXbJ-s8;pDGgIQsQD@U=YBRs}p10R<# z7eB}S7jAA&uAghHKY4 zy~AGoB$YRom%G-|aLDhZ&3t6%?RmbOh zWdDwoU7;^s1d;g=?9kjqog0m3!qp|pdpX6XJu_!+VTXBZ-!tChv=KIS zfy#eE7jQyPBj#~&)mpdK&nG<;YwFP0aIzQ~5gCc+9TmVHczJPYGQ<-oWt-fZ_PKlR z$?xSI=2r(Er>%YA1B}3uuzzAehIW@6OAazKGyL0YM9NA^H+|n+;{6X#DN<-N2!Q`K z@4Qof`jkmhGBZD)flf4@0j3;$asGR5Md*J6Q_NMhos(30Lf{(;9U05lEW;u$*D3j5 zxA@aDGHwch6{bX9%T;AoxwkrT$HebWgGVSL9AN?!kl_72OhOu0wm&nLo6g9UU~`t> z&(Ft4o8}eZp{|pQ8*(W+GMK8F_dKb(R^PCdic7ACdot zI)kA|+!eN-rl$wRz9z=0@9o-4HcSfCb5?`bmbw+^q6J*7C)j~y@!r6a!*sFg{bBfO zF4wT$!Ogo7kBHOft+=;l$|J1uub8OVnl51ZIYCp$zR+Ib>7u7t$|QoQ{1x zYus0KOkQ3PlQ91rY8G=_y)D6<@3~W%42?Iv@fJz_&y)9Hbl|%{e|SJgBAH2NyB%5eQW57f3XRbIfaxA%&$dYJcD{b; zI)_0%SztEE4vPCjfp-F}#k(6hnFX1W*dBhW+%HKq3Y2KN6n#&FS7y+5i9+w3vJ^O5 zfhS9)vBaUu>NajPg#qG;M_UV9b>@B4U-y+KzOZj5Lb3*5dw{wY>OyL_QY~Zw-`-L$lOTfbGM4bi;aP%7P64Jc+q`eSK z)Q&a@4=~ttpu$CpsW$UhLQLU+nLQS6=IKL~WSO=%>UlRxjVO*}fP4Ggi4k|1o0Rvov`Xup#>S{Y&y;8st5)_a!x!U( zH;4#*bHM5T4FV#wblkN3hdFOaf=Hpi>$i0I?6*|j(-Qcf%VbG9U7=!ADUXzakh?#? z(e7e_jPd&zNxgSp@APgTf#2ouiufUthBZ7q)rAC9CWHz}fG2lMC+YtiNyiol3_`X-HSMsDyz9_Gat)L^;J zD`3usjhCr>egs+8a$2mABl=XlS?Rq$1RL7SwdQE@puf7<%X&tuk)mQ3&{L{kX@Nab zhBp^Tht}*o*a6tHg;Lzo%@+8kezlgB9H~X0;rI5?N_X~W6@-ShA-Qc8R z1?14-1*iSvm4UzHuYFOKA(E!2Qedtu(qu=_^N2bi5evb_tGt zHACV9`E2Vadesc*jcVJacVoRjS^eDdS&hbBZuNBEd(#LTseqs9-iQ~&yi|u9h>%-O zquOYj7Oe-45m%jocwyVXldtwvk64It$l5^m=ab90pHVs9uH=&9=(jQ_&%IWnUsZ3w zTKOv#zD$y;b$s|qm4-L?fqY+)BGE)G!S+|I2XSb3kl_`8^&m9gdN7q^r#2nI8varA z6ETg0=}*)#BEwT=t|b;uj9wB+KSrF)f0#<2d?@$g{v&tRK?`x&ux~2dk7Y2ag>U~G z*A2$$2@0!7abH>Ea3meSA-%=G zT0ZH7s!0(*W;!%y^IFMPwPT1TW!#oFt@c%J_fS`p zM_K~|_gDG@NBW70iIqSIwvBtEgPLB4_W$P}%HjQtuJ@_C_hJq|$#4DUvDHLQ-iE>5 zTo{>{;2%DCjr4n*IW5oKgFSfy!e2-(JU%`mCWe`nHR9{nuazq-1SigOF`rW>-u*D; zSpE@K!Gp@_>q)=$P(ga74-$g)U~k7RFssfCW~&#Jj{RZO{9Zd-XT9)}5`J3s!V`5 zMAmzzD*|2h+Acy$!H`=d|3-0O$#r3rbNX_+iuEo9Z&96`b#)mHE|C2G68tzAof5e5 zgCX}{-Ye_u+Gg)eWlWN1WFK^x7r%l3`Vs|YDek~qYJkat?sDs;FU=%rt(7XP_RPlC zsu3>KuZXjQ*j_*besBBRAD6YZRWw{AL&vna_9TzY!D1bW;D3Q3<`L5T6eJZMVAQoA zKfmu^B=#S;yUdbJ<@BSb|TVG1bwFg#a&Fn~Poeq7!CL2a_Y9#0KQ)#vB&bFC_qgiTKp;|7}L z%X4~=Sb~MK*6>K@t1{Rnl{QVR%9}VKdFht?Q}DK5efN(+DIIS+t*S!?plNOPS}8?+ z2e&!JwU;{JS4-INPI=GjTGqg4*3DU(BVJ&lgB15QnY53a8cNrz-f8W3#2)-q=D0jF zemNH6*z!;4Uy#s^+4I>|SG1R`(A5ry9W(hMlIGA`R6)!Ql}UnV>}K7W!Pt?m9G!ndgnewToLr?*n$R&l8qeZ~?% zk3+IP@OL{7pO*TF4RQ=X(96mP^^uj?-Qo>DOjXvkXIA;X&|x`}r8lJtzHZn*I;XI0Xg&9i*X z;p53#PV|s6J}|_}y{`muwxO$aF>Pf3=~+s*f!YFs2w?(z`4BHq$efEw(1D&Wf|t@{ z;`PpkAanT@R!`cFEIn{FCtypB`p3y`*3JN`=5T2~hE{)9Iy7Gnh_KaQ^jiUV2!ab1i z3AcbTV7iX67VVv)Tt8+Wh%P_h>BL6)>xLGT7 zKSVr7txz^|VbJ8*-!bCuz9Vb&SQ-6|H<^aY`c4pKv+2u@$OJAL6P!@oCoA3TH4Tcy zB1CtWQcnfyZAX4p0$-XR-vgff^cAmvz8hyYZ$fQ+zdd=T`0^J4wYVS4rasslhDXv2 zq2*?mn|?>r>Xg*vT|+1Yi%@0ZJcr5pOb3jb+YnI zZ5~PsRut!!gXh2h27YHU_R$cO3^eB<^WVqSYNhThP#ak=ZM-5q;sANS5K!MxDJHfV zqGYE2tVrH|_7a;+lC*!3v%5VLnBOHRe>78*1lXGn{TOM*#UdmOxwFTLe~Z^V=lS^Y zqq!RJ9D&dpdL!lL%P)%_eOVH>7(ZO3J2)HgFcLbVo25JA2lFsP^F$R!)l?tFt*|zE z@smg?=TGAaoVctcqZJYO2E4?$9;tb9zly|ajAJQ}yLF{{<&XEy!SO#y?gL30T-QHV zx+qg`d6bzS7g^Q&xk+RioP9>CiV#v&!RDm-^lrPj$0Ubl@@zY&t7GA4hzBTFS1OCZ zj$O3_jX20ZaGEO9|KKxh`Pa*rSWB>%A;8H>616al9y15bUHLdwFjPCw(B4lZWm7__i|xg%mZTLnNMnn2iI34b#ZK z$&nr(^HgLuq&{YUH&^imfB7S+O%(m6EwGI_aA+u$k2ZN+CBS4 z(vVV7Qo2u1PoFQZc{Wmfr_`s8&K>B&p%e&aprNz0mW2{m}a&;Kju z+McEuN`3zjzTRnUtKj0oySKN;z{q%qjEwAvniX-ffb(L8^4>P>Q`LB?w_+vu@dj#( zpuUBcn$UN0{H3+uu#&;bD-^eFb8?`>!jq85usz%~KxMj8c=^)RVPY5rUpKJa6q&Ko z-656TedDFc165*H(~dzWds$lJ3l`Rotmge7eH#WI(~7Ox7cu=Y6hB+AQxLPR0-{n$;@w#j&y?p9~R zy1_ycM#tkk7l~`PPolr(IQPtoi$U9M#R8yCl%WxIsR;quQpq=>7$#SRm)_-mC}mo` zlh+mNi41Gkfwx$bO821b$s)=yB(oT%;rsY`M{?WHi7u)`U^%>5Dg^9Xz8hFv{@kx5 zf-r($GSj!_hg!$#)_=rGryJgVHFqw(^l_g!08V)vC|opJ>3Vr9?Q&me86G9vbJ}3P zBmL>2#wbOT&%fiFz`$w@9OPPSu`$(HX^_@~mHo+?$gdX(w8)gna62YbL}svWdnPkX zvjKp!Ls#mhddPux1%m2>z9{aY2Z!#3e*B@z2==|bqZkBwm<1R{7b_;zw5NdViR~o% zTr?D>O)UwRIo0ht_<8R5Zwa9803|C?DIb7UoW+CFn8?2Ht3{k+O)6UQhspC_sq`b%+QY-cjIc90Xb2vt({;+;ywrY( zzvd$$QNy)hDs>gOq!#YpR<`Z5(Y86Q{{BsZpMvv^S*>`!xd0A$ z2lky=8f5Ag*qgs85XI$K-iy`s5Q5M}z}b<@eJ>Q+F@9*dB>B%iuI7X~J3ASsi6Wy* z3*>T$M<|TLJCei|lwT@CtHlL@62`3F_J+rm>EaKa(_F8O)ADpZezBXFXHR3W^`EpV z%em-EW@Y$YHUM-!t(?&FTD(OrMLU~G-90TvYyGU5f@7-TJy>=XKyS>|!1nS3yUSpB zHPv2iwL7flbNF>L=~0e~L>5Lhbsx_A>%S94 zGM}1uqUVO-eeC;!_hnw;pZEUVV@rhr5g!U4icDHb{Pffs87FU0s_OCrJj zyr1@k#){vzm<@%*qR)V@g5&Sj(aaefk8lYM5OATHUnjDW0{L95UW^Hg*^~vw?Qt^m z{E4Rk6WgHbHV|M3hhSg_HE-hHeQi5zHver^ie%ds(M|~a+n1Eq^^omAd5C=$&Is#@ z1(t&gOXAoakmd2_?a9E~qB8llO4(gs+w;Xp2qh}|bbA#N5WfS=8*A(@P-;Mm>K|oa znSb{%7-paH3&aG?%fx;A69?Jia|W1b9C)EJ7FPocCH>*LeDGh?tu4J#&Isx%kz)QP zcpXSsc=5r;4ZUshUg#BC7*t${Q_K!5BJQ!Tdh@hE%y|a6vAU~+UL#EbXxs=aZB^emy=aRGwaH&-t z6!+ab2kV{_RP@Y`1~)URz!SKDD@#xy<-}DIL4ZkNqo%j$*%z%g1zD!>7ffx<)#V*(OJLBl)tF-g15kMeG5VS{{XDk z)`$U9!nb|#2Y_|H|9Bj?RR(L}O3`vjmr1;%?G>#C{OYiwkHyi?~rd^vV#)Is6hpZ&9#QoIUz zOH)&;J(CSK^azrp$e=c4w1l3CiITo#<(}2xmq>DTD~!6{<|u$u9H26H(3;SIhIS|? zg(R})I9q}^=+G*_PBiB8-cz(mc1Q@8K%p6iZFnz4LJOo9;0S4oCNmQ;FadP++d;n~ zojP~?Iquk@fnsMn<43UuuHzO@ZgiFa$`D`)yl<4QpB)At&j$CV}OJXnytBx zM_XdT?@7tcJaRwv86P)SOJuNe0rp^Zf3!r_#RWN5D(XIex>D`r+3WjZ&Q>zb)4s4L zmbd5Rxxxe=BP<2l5sIF*BE)q4Kx-hv2(w#4vT{gOfFbk3!J+$B29^xQ? zHo8K<61!eoST1xj1FHmLl&N29Id9?_CN6wb+;-nY?+|lTLC5kSx)&`7FC>T=r@^;W zfmi`Mq;~V4pYp4w1ybD&oPRp}rHF<2qrXQ*nD-|zSUrG-Ix#ALdq+A*zBX|O#3E%< zHZ_<2=c!5d%I9^zhqtyIHbuTNZ+--B0Hmw=s`ot|$oX#EAPQSR@;5z-YVmCtYa9UB8lYHSq0)^2`bFiw^`;+Xu% z-$cw^?h`jMGE%vs4CiHm6rO@hdY|F9Kg-u{raL&2Hh+7y!G5M*06hWF4}rK%FD<`r%mF z!pe;P*P<~Aj@72uoKmlHfDNdgzEGafVuY=w0~^514JVa6yxVkmuj!mCftzS!TsA6K zKyTcR=1b&g;c3V+GG#E_Kl~_ELKmL!6!?eyJbp*Q&dyk7qG#p!096a3#`do1O~bbp z57F3A&{#;swf|kq3Q6MziVp^wJg>b~thI3?teHmHWUfj_(~!(1-|E5+O3>DT5;EX> zB>3du$Rd!O=l|(SAqgXh%*>W_wm?qD#z=*!L>hH{+AdkuDW?LMf6fmo&J*(;BJ=)4 zh5`~`I%};B%anRwa0%(6CCPXL#Q*lB#* zYXy})0!Hqn3=Gm)l9hp16*gf;daE+)(dFG42it!K;$+b;H-}zM)ESTSGdh%B*6goR zu@1rIshY6Z+$41Th(`V2b|gzajnWV}S&6yA_bq2&jRI*BEKm>a<@T2!M^|a5h|ewzbqPg=CvnJ&U2rVk%URQ%3`qp zrvtgl4n;F7yzyoPC&@=IogeCT%0m(JbK{qij%}b&u>t|c>mi3R?o_S(!{_TGt2Q($ zp3jZ@-S75ke|Y(@WN&p*Q3US7Fx>{vAAk&`wM-^-b#3j-%f~g=kWXH{4FP|nKp+m$hT${({G1T?hEksb%*-|?r7M0GJSfME} zpk>78nmzu&i=7!jndyHtZdeWMb!M?4&d-?FyLu_7>n+=Vg$oWC?rU(AL~S43Iq8rx z+gmhs>^xiI)Mvy?e(qhjgshm(np&BCiW*-(I4rL~&~U&+*Pvy?bnEvk>YfYO-IEad zJOB7s5+ha2cwl;c#`6B)Q4EVjK7YJaIyKPJ5DXwRaLm1KPW|8;ZFt;rme0GBR&Ct- zOS;2MsEWbDL0DYCT8AeDL6U#FSTU>)5c%q~-`cv%l2CFRs>TuvD=x0E*6mvKNw&>^ z%7Jf$W9n8}Am`gf-LvKKzOFF|JKaBf?Q{HfyL78QscCXw6J%htru$!+i&ZplfN&3& z(a6SVI<-c6l;+6hywsj(5;@b4kYUstdqp9nTc0AfJhN~M5(3(x0l5cG2!cvKkBB1?>>(6*z9JZqCkwhSiipVIEuAHfjJO zJJa8?_%b*qF*RzsgjT^vXFk6`xKOP6_rhVM^6eRvn|00O; zvPggFzux2Q;5%C%RK0w`YZabgH92O5o`n&brT48|wy&pNGi5Y&(EifGs4jqH!Lo~Z zGxX-{B#KrpK&0=Shr`UZq z8p-n0S_ibW`WH7rwrmD$6+IK=K3Jc0Wl$-+e_dsnheQ6eLs3l&=WSHP((Lc!>iiAj zmbr5YNH)3sn}7r(JHr7xgX(?F)Ug~{P*R9{XS*z|+W7{qy$A8w8XULJ^`8l5irkAv zzw2C79)~;O0Lj?Vk{NrMgUBY~t3bqQKZxnE`8FU)$k#+ckg79(EMBkD>RQ2#WQ#r< z*LGleXU33Z#=kOYG6QLu)WSmXazGGjK#untnhe1SngyL~<5X;gYw4j@;MD?Zfgp*2 z21=cjb<0~Q$I60T`+^L<%*QF_I`&`+v(M@GYzwE^P)Z_V1!P&IO3|@*8Ca|%WCLz1 z)2`-im|y=8e%;`DF@SM-Sg~sJD31J0d%I(uwj?6rySlTNe5T0}KdA0=9FiF1A@g-( z1@(a6ZBUXDNxtT_G8-#3mEv|cVAVO)`w>Oz_;=aUvGCIF@bu-lE+pMGXh9hzhGb(E zF|-C=ONmb1oKCxD$QqDSdwputg4{KodoKhXVJRt=G0l zu?daj`Q~^(x73x3AaYhZc#+`6v9(xviXVa4@eaNt052?{d1L_)7kGh{#2ML=TW2Jq zqYYIfb-GlzY ze&9AZr%bINu3GH9=g8VM~JM zuH)Rf%>aJtsn`lnf12MWu&!0eM8lFL#KaF&kx%s5brt&YQ>3oO`(3SjJ8N{syVIjx zPv*~w{ERCYGUOnkhF?qz`&j>COqOo;;c>c8EsES&IgA{)vD}Fo;WoLtG95o3zRZ%D zVn_EEU|rAp#G&L!kv51q^EIWw{6T*p>f*!rAt6^Uh~(&TQILMzEG*`ara%M$ZOE=9 ziL;(qQMr^gqsr@A!|NI1OhVtfn9y^zZ2d)QM z-GJTURec%XRJo4L)}$FkF!$;vq|WP*)ks*HI$ zF6}=`KKP+#8X{x>A7TZvSZ3yIMZfls$zOo0)l_)9N^S5#jkmUnu?#*Kw_ie6yiYomnu``3nc^KYK z#R;>Gh32A5U?&5;Rn-*CNHS1O0hDvqPWdid+fcbmJxF}Tt55EHdSBeGI3bg<#ROvBAao(`$*Bm^CTe*s-7lur*nbQ9X& zRu(2EAG3vbq?LAL3G6rz6=_kgeacJgy>bTeAn!@ka`hTGr?{_=9yJZZ?}By?c!pJy z79LMef33kiSn$<+2VG1{;p4p<_M6Rd>|9^p965d|n#uRroTz)4-Uh$>0D6Svz8WpL zBsJ615buLr$6kZV`BgjIWez2aZn>Xd$U8Rst+l}3T03UabeX_#BO%q@8u>(8YD)AY zQg)`nse{I8RmgGi2}|O)pR-i;$PtzL#Y6V5=F0?|5}$>jX?W1nY^J;B&b<-8;1NH3 z1_G6ul*CzT%|S*lpj&t?bdizQdoV18C0UM+mq7LaHCJGwx2m5<1e{mdQ(Pd2_vqAP zBECDx{-DwW3GHdkO40h;Pt)hK&9o*gtg=v47mZ+h_WFxm&a0`pKUE%t#PtZUF`n{d|e}nEkoR1PR+n)X-m7OV|qO)D*&kZO_Hpf9T&=#)fO71L# z!g&GV0iOT*vjdI9W7HeG0Dozel$v^Y9ufB@02K&K?7od<7((X+YPY~NU-yYHpK<=m zi&3Ccuh7|=AQ=f1X>xknA>#KL^-!6B>&FI_3G<;8-Tkxh07bCaKeE+oX_^Rq%I1% zA;QWAr-_YB$*msLiV-wNX>SdrL1ySgX})z?2{T8`m(`gqw69{VyROjs8mFmDRetPe zQqIyzWjAew15mnVWj=ErM`dUZ$`=!E@H^6ig~C;`1N=dUfRI}FES2vdvr~*_c40|@ zo6j0S-bKjS{jNeM1Q_cj+KTuWOKZ%I?4hW*Cl6S8vw`3^fp7)2&7qvXOaCp%`JKlv z8UX-1zjvh$Q?>*#0X`J_fgDPHN7;n^$h(rm$xMWgt{vjx8UKQpt9WG}VTrdOo!N*g zpok@PNyi4Wg`ymQqyU^!$RPTZ7g$~rOM6R)|T zAmUC)CJeR0gEh}Kt-Pz`QoSQMRKO1s^&h_$f^P@wkZPZ4T zlrII9#H>+vBR2&dLX;D;ZM9ZssE)RQ&8F+S@)j@LUv^LJSf)v175IABax7+VR|aPL z6p&oqN-t68@lt`8c>zhWvC=X!GD?vQFmraz>xEB4b7b@S-*V_-RJ0O0J1d&%g66yZ zYI9K9Y<)zZmgvDzha=zTPIbT@r0#NHl~7StWq9;xuQ+RKx}GmzfI^XBG*AF;&A`k| zLPw{jsY&eIiZZe(TI1ah`s99KAP&kj?q)dK6 zeWAx1{GSvS&|EQcyTf5hi(>kFG?il_G0|{1v0(r*xu|9#VnEh=LY(F_^LgV0I5bEb zSD;x5Paw4Fz5zVg!3tuz7kEKtRpFxszWvNLWXKx+E8YZWstyf+S|q(PzIR~;I|CFH zkdu{qY^~l}C!msP^ceU^viaK`DFC|G8;cc|#ue`AN$31fv*VjLmPC`fCJWji>M^nJ zKf3eteGscwuFYExW1mfTvb#;M<&E#{nPuR3yG+JEqcrJ6;4z}hGghp(a`UNnW+)%w z4US?^qqI;F(ukflOnlJ0F-HCiP)J0){$_`hDdR$@h2{;HS?@tz;7A^^s4lU;yr;orV^#0dLy4l}zn}JW z_h$O%u%+}g8vtMpXTT+W7jXW_yi4(6ona{=Fa}2}6 ziEb_*noNYE?Uu@yqK?nB{8;CS1@(a%)Y^oP~EE<`W`%=llDXIar5&q;(x zCA3tYV&#}=NEnh;=CeB}1>r^66*}O^qVM0oan^n7N_5)250X3;ZkqGgqS7^ysZ}5g z@5(euf`Ss{69IgPEoGpY3Z;|zt@)tdtYJEHIwKI0j%V7PaVAud0%YYLd#U)93a?Ca z7@~Mr8G9_SQ4*YnA*l8S7*xHBJg_!N3Cf2B%5Co{I!wqS-DSDoBW&f7|lBGC)$r-FcYp=dBl*E>;GyB-g^!_=^5e3%;6?TWc#w*&j^! zcC3sG+Wde@`2pbb$e?_NU#S9TQNOYu$GU)r*2yaR!)B?9H9p_yW#%th`J4%1*e8Q< zOvvrzlWfCPaveJx_7p%|1{CoY5g=TsE9r8XpyQtMur7I{Gshlc5q7ET0!;4T)dgnx zqfJ2F-UUp0TP=X?vT8*)a}h@jNR*3pjx)xK89G4WRjWI5|C&2%Fj@sL?Ftj1g7r~A zP+grQIig;9S^^JMscxEbfl6@Bomm+~6@kCBao1~TN8N@;e% zgcZLEU!}dF<5&xUmY_LbZ4(X(DG}Fs)NvkBaVzSmYFz%oowH}F2!J2}5@CvM-rtmY&+)TaM+;5R zP9pLc@@TBjLh%$$`^CI?n$FgDva&Pz?4Clje8>q5l0~xc<>N=T=-ovc!2%&~3zs2- z`U;9!*_YFSn2vLe8!|1;)V=Oo>!7e{4H)?P$bQOW)7@r8EbTO7*%i=l zy_^Pvp-yj^Slx{l-JvW9pUCHDR3rp=KS*@ON+Tq*C2jhu#;qbc+h@LPTQ7XUJ6$Z| zhNSur{}%`m4B$ig3Q-^_N@gYT*WT$BLbL^UnCL>>+Kn&K20va70v5_y%WyJQIMH>m zgLDw6m(Ra}9A3!q%*DAUc_serBW9ML%ceWF>%G5_F}5di^Dx-n)K=748m5yZ2&nv5 ze8OxERPpi2Z%Z^e9Xq^@jBMfDs3R&?d~nC1*b56#Bh5hi{hc7va{8xpj(rmH95Sy2 zuJ0YPF1()|2#gz~!wDhGv^1+V$=bvA;E?@+xNPziPLz_pLV^|rG^^>QOnrh{rzHXL|l*U7%p?W$D3 zD1czI3P&%Ald6BfR6hQ~=V7yDRj6^#e-kis`=z9g8qkdp+L9N}&Um2d{L6i~9D?US zX2J`B2n<%_j~)TwtJ00tehS=+c|Mf?&w=Mv(Dnj=+xr#5?ir0nV!wwUNa|Qg&}S=5 z3&6iUu{Q{ESHh5MSm%KUjEW4zIfh0;;7USYIacFax_kXobj>t<;0d%5hq-mJ@|a`d zQ(!(+lqpc&9dynVFl z^Ij0ZD^!->ydi^bp#a#68W7SR_a~xLEI=a45L0X=h>**c!6p}2pRnuu<$L2mwD(Wp zJE1{G4=IRNkVSL}<=a?jbrrgHG7hfD{H}|cjdtJq(lHB91odX7!wEl$!4tFJ9mB=q zO}tr`yS6O81@8f65D2ANZp7o~h0H-lkmS`p_di(xnIC|0OMFY0ox02+1c@T6=Vm}D z8ZrlnQ{!?f$R;qBy1_-c4ineCk7PB~=gKchOXP>Y7m8QzUIXpvH#BTkx1k)Q3mH%k zQi+lmu^@RpKcew}0-&6fX%F7x@s_Ofd((;sw|Xb*18sng+|WA0bXkdRp|?BPU9G#X zUD9ro1s(x_R-@fa@8=L(j_K;y|KRD!PO=y1LQ@R2%0Cz=qbk+ACUu1mP<-$Gs&N6X z!4IPnIwT|!Ig7jRdM@0ocP$IhRWR%<3c$6!l*OiWpl@{zYknXnucE$kAhdL~<&A;7 z-QaU05F=TsZLl zEk~wGP}Tmq^oHj`wvj&v8oD5hesRZs5_74m?F}}l7)c@)f<%dUJ{~d@2J&XC=Y0Z~ zs>XPhmDgncm!(|S^rvS*f1fKUz4ker2Q-W2w7j7GNN~aDZ@UcH5Puab)SOK4&?@%x zSlU(CaQyG8xV5zY%W|bpLhOYUot>TEJ38VM66B1Gj3!)x#vG|dFg@pJ2x3brqFV$u zgs%ExU-{M!TFBr*chVMg35qP&qRmxDVT=FSVS}9oS3idUPgbq@=3`)uURS8R3aB9m ztnp#aGt6<-mN2p9*@c|Yp+M+J!7uZzdqMip6^8}7)IzhuX9t^AlN8>gN|~Ro?#6KY zrzbgnMlQIa34|cynty|5cIxtVo$bZcJ%@U1LOK9`f*T#LKVf}HzAWu zJsjdwYWz`mK^@Nu^q9Q315TN!{QqK0C zQ8taeraAWr;nTr|2o!KP_{F}FQV^#47q+IuvB4~%)~_N5V!bQDJ5t#I)v`aeRbde; zxDVwQFy_Jfm3BDeK=B;_`5ey?v0m*>C?>jossz~>VF7h6H2%Z^v2$gvc>^lb88V*N zyMil7fO-V#n1Moq&C{6RL2eE=dQh1C`7B+Mwvr*EnWZ-;x*>ue z48*s5Yms$m714LbVFAEEg)1lsYhhT;{O_;T{)cfliAKC%gHE?|cVk`U#y76a#(fcs z2eaRDk$|8HZi;I#V0unpBQv=PNCu_>6#nYXP7A9mLb}Z<;Gsb~A8rqt=^r3lxJ~uP zbRi)iWdN+kR=Px|((EzPFt#gk;ZIB;YO~>F!>jqn#``JZ=-T+-E`Z%R;GrCdE-Dig zo_)H`t|MJ*uU>%5VRW)#bE|;ZtEATuuxV~^&b_b=VVOZIbSU$}^@3@%6d9cm+2 z1m2CA`JAtDT)I3eW3FAEj|*NPOuPj2W{|#R00c3KBPc5{3|zSc=$2Fk=v6}*HRwjZ zxU%*oETSg#EJm9Cd$m$0d~h7O+>HYgw@=#OG&N<@L#^wrn7P7(gg$DR2=TW1n&-d4 zJ^D6nx$^3{*TGf7ofayTH!xa=aCh?LPrbu$_x#KKZ4*`kC~JWa<-EZ*-!=lM66h@p z&Aq>D8f7dacReImD5vUtk?0J@Zee zmD>K_A!Y;(D@p-`6!22yxK%H%Wrxi6^gz8UD6%B|=YgZBdetxJkm1WN??RRXGCaVB zg4$0YkL`wI12V3-H-K64?zn7}?E(yJvet%brqCl#yzUML3G}be&UPNiG-NCKRNHj6f$%2$aEF4QcKicC zYyg20O$&o3Tszz$>X*b*le`6Lw9}fbc7P0SidIGf?h~5!%*+;#ws+p*7XO7%viY?l z03ES-ba^+;Y`@Y8Y)&oLYV=Xhky(&kg#?@+5R#))Z@u(Uf(6-Rer*4+Z23a$BPSzn z(mZH}hkJ6t1)ik)qy_x14FtP->@Z%h$end`*H=FXN~BQ#fg#st+X3|5%U zK5ao*0~MyA3ijQu4_5Q^Q?C0s^R;+a{(BY$-I3hC7^cZI2yWR-;Q;3V5M%QF7<#{5 z6wYU_JZw9k(uI*}K-Sktc}=O3$?0)j5Vg%tS*yNi(n4FZ6Il;ZDiYx+sTcy7B3l)delMgOwdu6zp%Gvo_k zvE?nI?F1_=GJ9`+*d8jdA5l+}9En@0qpg)GNkR+|w-Q*c^``w@{P@nX(tTX#tZ$v_ zi2pxFBnxqa%a5n~k9Pzay^DY2z1I_a6S1R)@0*GyuQ9;Veb{)kTdjMVj6Z2p*i$CFF@&KySGKq3Y(52JR6}#cRpE2S3@oG80 z>;C9nj=IpAAe|;^{$;c}TEcPeNqmEICx=M<2N;lV38+Qj3VBZx z>M2*BRwzKvYmNtID{WY@&@RmFz0&ne|9g~UF;?BK4^LmNj6;ng^Zx&ckZWHpEc5z< z)3aKydyh?bWRnK#9rUJxr!$~75**nGdg!cL_pR12_AiXW92`CWpy^$jqUdnB)+&1C zr$n8WXl$uixz~8?ZLvmTuJ)fl1~x3>dYy#1^4m2y1xaoSl3z+nU zJpLR_de&I(#6Sa5iYg4}j$ zaIOw(FR38+2n(F5c|!Y;Y3I0S;*y$#IjmX17F^Z@kF;)GV6F^5Y@wR8ljuG~9aa1L zPDCc?ptA*0dWUi!zVPbHZ<)k-tK;080?H-OIc3PWy+S6O<&pg5p>8v?x5VCUKI@QX zC0lTP5xkLPx%Oq<9wLsLC6On{q}gn|ypoKy46RMK_5{f?{wQwyo*>+qy+3NK(>``! zwlVb47R_;}2oTG3qSkvywMEUIh zj@B&*<7__L0?mZTXrT(raV{_9V5KfA=f5X4Awm#@A$>F@`ZH!|K5+l!;sE??5BUc8 z(|4eABAT?0j)xzwu7M`LX*Wa{0*Pq9fK~ZI2TH1XK8-^Bh+H-Gubnp^xK;`M*2`FA zB6G^u&1a1w|6LdUjx@(q%CtKz<)6ksiY9JDBiWIQWcmP}M26SU|#dxSCV&Fr^(tqJAv3vn0s z-^|8ELm7Ig-j|ZUFYjoV$ZY-I-9;9BmC39x&>F!U{HV%YyZyThIPD0~ zC3wSOLXA>PL4&yY($6Ftd7<{yBdT1YJRw`m0_9UqC2RD#WW5bKU4VQ4Xe}59P~7kj zc2`Nc7C0Pb)w#2P|+5 zaA{=P83%k{B~382@k!fT0^iqQyLgj9eO$F08G3a!2W~a@rsbHm-Bx7f0r$y)%}S55 z8ALvr608C_07kWSl;qohI?e*2P-Z_%YaTh5ISMxZjxU; z!{&ee(MWw46I?ARtTVTQSmFRjy%Ql9E`LyPM$!D&nA&p*lXyNl5IMjM{M0Cmh`wIYx)N5Wm!D2weRq(r?Ai>TyMxovs zkO2)kW`FJ@6rX5pun&>#E^o>FQzXY=J17x<0gba<#WHV|2H0cCm4CYL|&A@cc4PJ8kp`4xzf{>Y_|Hmtk$hScdJ7swpDb2?z%M)&{7e-n> z|Fw!DAFq0Mkt{Ou{)dT|6ryr$A11z;J-F_x<$o+HKk=@IQR`OytrVJ`wb5!0)H{=nvjovNEFI+Xwys zZ~xz2jmzC<1NQOWPzf*)pF3$sIf&%W-JSEe^{I*vhG}&m~A;!La6O`8A zm4(aAb@-O!GM5AYku2Dw0`DPrQGMm9_h8lA$wNcC3D+oL&2^S=_mEFz9ET z#6(#o`-N*=`MD8G%S&3W_tfcX{bUmP3>4%(K8t%zE+J8&0dYo#^8&eyUE+BwTvOa$ z43qrD*d!x`OGjS|1`9bO9$0jmCsLGKMU1k+GG(2GcQ%?oin1}NaA1!ve1+vK$Qy05 ziCptsu?P-TnZFXX=;;JRJP?^S8uCjAQ5F7sm> z%l8xPRz<}|obK~$PrHtqb=nC@sTmJ;i}>A(2eGjhedO?*ZUjF5v-eC@FSlyOxKD|k zqDqv8zwled*hba`SM?BryAtm3(L_e z%IQhY?TpQpjp$Z64RaGxlSDYKhec}|Bsr?b+G}ZgKXOpM%`c^e)g^B4&&DM3;+DVt z=av6h83SU?+CoZNH!l`N8?d;2{UX&^y@BaB!yBtotn<=x_W18}^XVQQ-klGp-7fUsKrS^HK~<>l_s!!9M~cVvqhPFU87*Z7u8{ z141ShU)8-WdHE>0)|ZN>$dXcF0T|7)0+93ql~UocVf*CXaq(<3zL)cNixuJHI}%SHu|yi(@N5sJBMLKw#r^Vu2{yVRF&Iu zVsqJK{fAP7Fmt_d0{E>XFHl>fCk$B^N}xkR*eAILzPY%lx{ZHPk6$st{l1wUbM<-Z z?q>AL`0LLR@k<2%QyhyDcZ3D~@!onDYe!vY8!UJY$Ewmk@q$Ft@%^!n2$l+X zh`HuXy6pJYAfCZk`^WtA*rHED4&$tJw;5yEu1z#Kpp!$s!61IL!^t{ZDV%Zp=Ffum zS66v?S8%09xG8_dl@*+ce!e1J&urUNUhB8F=e;~}so(-z&UvPsM(^WbbpMjlom2Lo z`X|--Y`fKluj>u@d;1@F9;rHv2^*0veAe7q!)4vYgwY-!LL!q_0mvY!c9RhCuZ zNquLp7Yn6aWu9HDSX-?^ZBCnA*9r;d-2-a#=W{fAo(pJs8J{ajtTblB zm}@29!j%|crkbKm6H?E^9xOptNyyRNH52~2o3TV9LYb;1N9w)YpCN@gI)vTDNCu3a zhJf^uo{)e?J?^^R^gySq3R|kCpRtg|p=}zoY8{Z0oWL4?7MU<8GWD#A;z?I&m$*$LEho8`wB6*uuK1W@De;2__+Sga8}*R%8hP}um9T=ZHLAZgPNSIySZnmy3xq~X9xC& zKtsaAciH}}2GxpDcg)2Ni)(23P#vzf)Z`S zd_`)G%wYlj{9C;agQn{voeM+Kocg;(chA^-B6`8V09UU)NcK$tQSXOdDGIETTDZB; zpqqw{7Kg8tuSTEFRMTJA17qh3wiPs(dZ1VFomb*4b*FRQ;c}|Qw*h2@3{JTv| zMBXv2oTwA5&w$x|HZSnU>@=zF8_j%GMF>s)M-J`)Z~`JBN64{SG{lVc`vf<2{*67E zQjpKA5xnp>T9T6OYO1Ks@nd9g(OzpVQNI;v-tr-37m$-kr!5FxYru}$yi-={mS;^r&i%9b;*PA&8nhSE+bDL z`S`B-LZ4^~+plW7^DGt7aAFDW;^Acx)$ltCkI%k|`Yew%(tBB`(k!qrX|yn=MNGsa zLI;BRl)$EliZ^MXfW#<42jfSE=J1RU`1=^#>_^(0coxSkAfEVr0teHI{AR# zP|3(p{Js3kM(@C4VR2Gt4!4Pkb4**aKfpXaDgWfj@do#!v%fc~qLQv-m&Vm*Qm`=< zH@uzgRN!VeziIcMAqT*Vq{C8a?P}&68$-S2<-O`{dh@ce#ztQIUzf}wgkWJW`Uk(I z4)Ep5Wl}p#RPFWh#zBng4>DC~Svq3KAyki`}A)wYDrCQ}%KiUB+tSN(s$JoNdpYaLdENeD& z;&qJFuPrh=;MiRjJf3m^O!<4NzK~r%Gpz0P^G9?Xvxu2JE|9pZRkfM z7Tkx|bc6(wZ!Ry>3Dl06D;M*sJ11vM%(EBCC3^4fIUNt89d?*eX4&wYja>z2XmXD# zl|pSt?N1syFB;unvuarBp-D7IeLv65aOQrq#qFtq5BMN{e&0&`JV<8zY zei;$RMpe8r9>x~hN*rdqpwOd-Q9e6MNDwgev+Y%xR#=>!a)(qe(mvMnoHv0{gGdo#%8xnE$M`k zkX2qWlByk?gpk^;0x0gPFfmbp>TC>7K3{}E`@@E(>u^=E^R46@KC1*OxE10s&F>Z0 zUq&m5FW>6mi-=#L{EK3$9jfP*oS^1A>k*5T*dg|L_VuPvF*Lp<|5a5 z+$|LX8AF@%Im0_mB3jIz59kyo3=L9=3MW#8|+M&l5Vzqso1@iGaScjf{OIbKc%$Wht?k|0P zODp>L&GoKRh5A*tgx*H??`Q08E?RY2OXlD*)Z}MWd}UhwN=uy1QVqTC;^Qxg4jz3A zA?q|Ne-FzT!npiE4?;;XTriI z(wzyZ{Mi;>QX$c;o~q8JPxHlSe)sag>!vfYTsD}BY>2RzOlMt*K1VQ=T@t*JT-cK; zqI0yr9?|h4puMlR_e-?;$scZ|l+OFD0zv&8T-ic&M~j$ECUU zuA@5~!tF;q!s`Syyyjm*DLJmoySuyV*E-MC_rvLBF&33>%X8KKb+}Xc$5YILUR~{5 zlQCC~Ywz(Uo4yia)E;~LAlP}P)54vEv9r7)x*<3o*IwM@<;;;7s>G4kA>IrN4cGc6 z97Bb;8-A&cDIDETxiRnaEb*ROIMf18u_Q@gT<{b~*Uz2bfjm1_4O77UvegLvo zW(3y>VC`4^=*Y=4-oJlO=0`|CkdSa8#H(o^_odczdD_Q0U&XLKBjuN2<w03@EVczUoOv$%=jd(|nDFP~o`u3k>5Nwzso`g}~ymL)9d=SohcR0R`U9F2^o1kz} zMtT{28h6H8v(}e(-)m2A(89sK%Eg>%(9zYU2=PZ9RyyYPBQxK>tyEIXA83Ozf3-y< zCyzlQlHithH1@rF}>4h@zHI4p-Fh&FeR7m14jABx>bSFWM zxGj}zwv@bQXvCBnE$eLE8ZFr$cPST%YO~6gU$$(6z`!7h_x60(AN+F>rgCE53PR34 zxk1Zhiwccn?6+IBTx@%^Qw`m^!%rbd;D^iP$ZKPuLY}yd#`+9NH45q6b&DU=D&uV2 zVE!Im{sPaQzuXQxz1Vm`}-Sw!vBNEhLQVLLt{HqT4dSdJe+xPb4IjP zPjQi3>qq7D9&}JLc&Gcr=|mCG%;c)_t<7fHnq&OPluJ<);2CRW;l?7uuf!hA(A}ihThsl zSz|+kW|quph%6i8*$DX5j?3lq40 zd17~pzjndA)$Ns?y9`7FPPk}j#PeLACG;+R^y{Ev=)W7wj+0`L9V)nKT*h^A7*jti?Dy+k+>lA0y?jL?9T`IY!PDEj>)SVx z_P4=g3`sG}3mE~r^FSHaxh-nHnRA?MD75NJdp^2NE8)R1m1M z19WZn|NZsaS<79U>nrowSt~Sa4egBQPA9&=nxRu>jQ-YAmVi8v`ex2)ikue@D1Mi{ z?uuE)v7+<^F3MFGVmqs;MR8rL7 zw>{xR?}?Yk+17@F%DyS6oS3&p4f^lqfsk&e%J}Z*??zvLNBoKn|&gxc!2y< z%2iYhCQ}r!iyO>+BpX{U>l>S-p^)7fftkCLst3~uc=(BkD8QP{#j>MKmVI&SdGE$# z=~eesv?LPH9g~bO5x*ruk09z70j5<_N;FlPJs)gVC0VB7xyN*o<;(C{W8Xj4I67ciWR>glUe}g`hFxNV!)hl)k7|8AkwY(#<^iH3U{|7v?5apDK_=J*D&a#Yg_I z{iEV1;MA{tvkUnE>fsA!)R?_3%*r_MV=rRAe`I%~5mLCewHe)~#o#&eR~!4dusi%T zBAFwNclrZ>r|xj%y?L&Jyll9ffZdpl{`w>>f8iVEaoYlX4PtB4)aUGpZ*xY^;UOWK z07BX$uRI++hsYo_eEPRI?fdW0el5v8GHO9<`KRyyFC-UBf z%QFN?2R=3$#DIsS(BHqKL*91$tCW8VL!J%A%fjssdFUDfLD$6@mOOeB z#`pyi5Ux-oUo%4zuSBon-X%fD!GpbRH+b{%c&Z8pOi4*8iy@%C_KT98D+#KiS zr*-$2^JOhqnQ8F>qvp%|&zm8G5a9v3JlNgTXbq)w+ug7sJbBU=VeEj#0 zbnsaL82u%nb){F;V1C44SxvTMFHI4lK)lU*da@S8 z3c7>2g5!O72q~t{|EMYIy8KlU5s}uAbJE+ZQ~n;M_df)#t*`Ulq(dscep74-(qk?t zGv*B2C@vY&+P^S$vnjNjHH%F)?~mL?1UW)4brQdg>s&9LJZ899ugZIS#bZt-<#r~D zNGM4q3DnBcT)ziw{K?eJSGsr**(%FeK04& zyPKG1KTM4yd?IO;l?zF3cu_v8v9Zzb zaL4Lkf4z@6AD{f3&~3SEvI<;Wn65ZnBHw7$;|{zqp#i;;k;>uTE)jvkh3l;@)BVbb zR*pv#pEykdG^G4{NAX$uV^zlF5QR=W>OqQh=g2(6SlkFC$Tx6{z9-^5jLQt?v3pN)^Zg+Enob>7ew znd2c@BmfpH`_f8QoAy+;wzleA=c52UUYr(XIE(mT%bKFj(nZ-0MHw39PWg2)r)!HZ z)Yj&Yl{uBs`7eK|To{O9)xKJh4RLw&F+rEY)03naDAwE-hdGVv-Ie3HjEL9V{{FlR zHz(wi^YojTNavXw&&|JP=+f%^rQ$59(KI+5zdFa(E9|-)i3YTP_5foi8ay@7a_{G- z7d_$ljG>_nCi>M%wBm-lKTZ__5A2E+79QIwK#2^H2y9;UQ4CY@P=La;-W131+hCxy?M4q%;@7viG<-RCp67(q#ijgH>5CU{>>B5nH(>IW07 zfsV6#+(qu{k?q_jox)eSgO5EQ%%n$|xHKhYvyWmE=m2|=|Fsz-<~D9ngA!=enmnM8 zn6kY4Oh>8#NH4T}_t+CNW5TP%n1OD@)@5MpoEA!T4$Hd3iIMlG;1ZB?Xy0H`PSOOt zDIh`|V9j-x*k;|b&_J+ImaNdvmrYyJx#X{I_D#yqzvHEw3%+)np-Q8rEFr?E3wy3r zW^BZvUCbkL5^4V+MJt5SuhSf>ly`QC_LH#6cf4DNQeG<(&1Gno{?I{){=@HL@DBy- zzp5<`muPgwaaOr6>+kIDcJ}pgw^JcsF{#36>`6_{eRji$ol?n2&ClULlh0dOz<5$O ztw?ke4e9974L?Us4cbmf`}gMPKGGHsiXC{j~oB3Qv>N$GPI*Q@Pl%m|9GBQjn-#(O@+5TEoaEEDPQ zkU&cAwCQC|O0FB5OExr=dX?>J>-#-gw;fCc42RX8=TYGb*0KltOMZDv$lxLG;~*^A zPBzp60QvqDPBQK;yY3g>n-k@lWq_33>8#x(8*8?-=fYB}W`17m!3rsM{Crb(4tamM z8*hhTt}lJ$@L&&x0&too$yZjQ($iza>NV>sL3W(BX3+S>9yUN5A#i-4m3#ByI3>d+ zB3D+IykpuC#ncJd$9cA!&BMb3t-(=#0GF%S1BAh9jslaz1iRa^Z6XDxxd{Yr&E+mz z;lrLsGjeg#GqZ6TM-B(5Fozz?eiFD^z&z=+1d+z_nBGB)RF}Elp+7%b&~MTGyL8K* z2XIBhukVauv`m}^mU`HcAuVBFeFZv+s#}7i#SEQ?84TM81wSi77jXxQmTiQH%r$gj z0phbs1rq4jI2uJ#z6GeGRqJda=C`lAx3#=iy!ru4jC^d_S&w7#<~EMWQ;DS&tiCMq zcr!yZ&hz)mdVP|iTe;|(SHh(W$W=V?GGEZUb{v?P@z7-Xl1hqu;(Q z4i)KqNb?3<<=skvT#W*3SnHB2ADuHlkP9+dSA6cXx94r)7AG0GRL7+lU!5!L6?Q%d zL03+lIz6dmdSL2OGEt_`mi!^*rpUkGwOZIx3DKK_aBa1taW@aw5h$02&O+#*)U#S(_5T8PQTNgVBG z&veBvu1wUAHKj6f;>nyXEsZKG1Mh-nZYcl5jE&_QOm)s^W!a24>A1LBWAzqp_GPlb zc2Vu_)k7*v*^`LOl~)0}9TO8q$%0PWfEoo%KN65q^GJd<1@*C1d9GVha_SEP?73f+ zBcEGkmwQDzCsP{Ls=w7R$$HBk=pM>C*wo$4cS?;LFlQ;*@jZ@d>O_RsFQBXufZ9bU zxKT6;CE2y*Z#Ak=?(>MyBF~_{8EkH@^sI1QO3=&*nEv%ZNQcWsi0)|iU1$zx7VR0k zf|lL0UWdD$A=@XA_p}!xRmoXYF2g3=d-U_uO%YQtjVjS##y%zh*{FK0A#azk^JzVVcZL zCj$DSnZqg{1P|7fS)AI0oq_oM>@=xotyiIWP}G|4?3EfDM_N6<9N{{*7*lG?Hdwnm za8!VnR;jWZiED0d#^lIlCfs~*Qm9Yj6LPg4%GLgCIOsVSIdcLDb8{7QeRguj*_;>E zlBE|asv?A)mWrY78!S$$h~YI>sCaM!xk^L-p&Z1`1Fd4+n<%;*DKi;?+9%M*sNr!x zYyvqm)lx>2`3ud0$c??#I2X2Gxr6!JoFp3hK)Ti%S7$3#J!F|nZb-2BsCvmH=6?@TZ`AC1j zPH99(sEL=819^4|4LRsmNlHqFn4}wsBaFN}BweiShX)`bM^#i*nzc61aafiVP)wyn zdV6^c>1!&Qlx;Le)qyi3S@8IOa6I4P)z7YoA+MG1XKXB%VYXM| zFy+>-V{ZOxp6i7-x-!1$u`T+tI#BIzD>p6))5JC_)7%kN;i<-1!d9?2wXfFx^Fnw{`1}kNT24YJSA{x^A2O;8D_CqebO71v z(s!||->LK2nOdJepEodUX&{jIky`n-s8s2Iq1(^d(-lAQQQ^W2*cXvSV7ta@oYJ|B z>T=M)3!gPs?@@-9R`8<)DDSUcZi%dOi{4t=*?wJoR2pSr^|jF8^|203LgSn|i|rTB zrX;7nZ)^>Kn)x#IS4IU~)$3%fL4Ubprq6rJ~vAQ&HnMpVf8kfxX}-e~sumCY|1guJ%F_xfUf zdX9(^qaPB-j0BD zpqfewqrj7#UZ2t`pL8^QXLk3CU8gyFHpQ|^!C3szp!r=26rrK)4GUbmZrIW9b937( zMe5o2#u_{;7KV!QDLJ&QA>``6HG94=TvEhoP+b7953oRhTuV<+`6JVL9A1b`e+tk$L?CyS8?2RXBPEO6LoNzypB8nxV67Hse-50x3@jCWCt+X+(M@5ih z21A^$U0}I)*#-YZ$kxAxt(~u@#R!fjPSYXeTL3A)NCAt3H8FweAWu1(mjB;&Qsrr! ztwywR(nIS&FNft^PO6&UHTunk_ZI?Z2~QzDyu@-uNrFy}?gUUbr=)U5m_rN&CzZpL z9hLDT_^dE@Xm7=cisf4N zTd8MVKnGyEmZzVEk-yIbd=Q#>lF}IQCa_D zxO99Xb{|9w!HHn_H1_;Tf{2G>L8+CXD?%i3^MQiI(G9S8RoSyqvfkGdDAIlFUv`s3 zn%+}P^Q!deD#Hm(?5qx^K5&^IyU&VB1ICY$_aY!PR9g36ZC{;Q2{@dJ)jq7}5C*>X z(-Lq^4GW0AaP-UP?-Mp>jB`VkXyL?c=wf< z-L2S@B$4czk%v1hzXhPo>KamP8>80+pA^3pb!c*K|PCtXoxKQzzIu6EiL6ZPF!bT=sMp>NGvxw zxoSc+a3#-=b?@nx*UC`i+_vU@1-t{FKUTnpd==+MWTXLEhA~JO1ImZYA`ru>e$}Ec z4V9NMJxKzwSML(TZzoYQQjflMZ9N*RQ&Su>GtVaGxpwGw7f0FOUoAU?mE=Hei%`}L z%$$mMU;tu*1|n$Fzn^CyBH4U=b{ZWuE?>1wA(q_+NE>0VjRN$yC|`PL5=a2|2)8tf zi(-nyIB)#k8-e3kWP;t#;#98r9pUZIx|-9F^h5Azn=KyW<+sGz1RtSlBj55nGFtl+ zsgfRdFT8Ro=P;c!WzzHC&&)c<1*~41Wg(PYN!q?iO(A_`@d&{&>L8M1O%?G_(a2Nk ze`DI>3RI~)ZgX*DbAQJr8XQA`(PH(L{>{@fOm6d_Ei7&=Z zoqTK&7t{3Ht%?4lS3}%|lfg05n`*E;Ry9yFnq4$rY%7>9|8$! z1#DwL*QK0hF^lEU83qN|s|pM&bU!m4lh{gV?zB?f}9y<+XPN{PJlkhGy5L7v@P7{tx?;K(F& zXP;f*=JNer_GhA^^=QZUczkGv1w2_O_xkFGw3n0|ar6>? z_K;x%R4uGnHs?X*=9s8sn(1TMPrDKo!*0x8V*dt%vPeVCW!h=-o|S*e?lt0l*Fb4e zI9^b1-2Cd&d~{fk%Tu;P-_xh{?u~`#BvuOql1l3o)E|(Q8n$cG9mh5B!Vm{q*lFaeKxhPo>m)*SEj&DK=g4$ z^09oHp?<|Zv?;<$)n-nm%ek@ExzWD_;%D8(c`VS41yBn)r_YzJ+^w^FIR7D(?pk$E z#R*B7y8vq;e{B5uAr0Om$+KtAqSk_y$;KiNZ5Q2f1>Z=7dGo7Ap0D5 zvE27D5)yz?*1#KTfHDlW*AigN7SsI?`$2-Ky8i*cHG;te+G}u=joxlYhrSR=`j*}y z0X(zQ*8ze+)eGWpm94zT3c>N*ye=-XHCOv7kn=qW?Y)5&H`x2#noK6zLWuMe!5y8b zbJK{R7aPowIsvqhV${;|^3WSoX$bVn=&t}04}k*YFy4N)79po>eQ`}Wg%xb-%cBnx zGn&MnIY>G)^)UhePOI-#@R^>PBfgMJ*k}1o&lgvJv#L z*Jt|DO+3UmuH8f=W7I10ECRj4FJo$*F{_g};G|3zp+%ypb+2>V&ZN~nq~s$XanV$@ zpV!W8E_yZyP$?;p;B@7Kw($FXfYgmnpT8b?T@1RAfQG5D;0KcyF6VW%{p)+LBK>wd z$~}MVtTQx;tA>h>P1!b|jHfMz5@3dR=&o$3|NSmEB;+H?W`c{|Z~w0>D)P65(+!}^ z6MR6S(^L~04MCo}Di3!?t2aeRS>+7!hEPPLE9m*DvvLvi@{sZPtQ4L+`C!7(A|A9G z|C@WN%$E}KAF(QT@!_{!K3aT6eN)OC9Hb25hNw#`h>W51+_ifRBJ|YMtYlsQ$I#wprTCC{Ej_^6*eVb~QS4Rk?K4)i8?tWIj-a^$tLWL}WBk0cARP|0kdAiHX#ylJy(&%;TtpQCFU@Chr|rOA0LCuBA`ephSyd*J20D)qEp2f=rgz!~Es{cQ)d(w!E8(%PC54HFk*;mkWl7V^3KW0^#k}tgYrAoT` zGznsgW_g(N{A%-2+;s?#;J~r*I@qlOJR#t=plQ*a&_52Q3%Qob$#T(Ee-E{Eak1&s z3r-&D+jfij|J9$u}Qdf0>BaYuHT(fk%&BSGQ*v7MTEoN1s zX+is4^2vDm;;%Q&g8g+FY5xmLqw>)g*nc2I3_?{0&PF51fBfp_-Jq|K3+-9>nJ=aR?>c)Lg(w^TQne(JuM0Z+q(pxa2rNS{3g zEpi8Z2)W&fe2ajuEZe{n^T^#Dgmhe{G%i^X^C1w&-TX{c$|%a2xv$WdbKAyz=XKI! z^DI>1Yt-2qW9CPmp=TR_4d9+ZO+m7r0110d7MMt3ws;)}e8hI1=bQJIE%Yl5=MIcx@6K-Y$FP6>S^zTlPX@iK+HhKi(Jg+|b>9g+>qou14XJjqumxQ@Uo8o(=%qfy)tnjvQHB4 zg{1W$bU&wvSdBLPnU=Ttt=1LQxV6RLKhe5JHDv3?vAl1WKUP;c)V_8tmeDfs4;hKf z6;wr2xrMzTz;&b?`SBf^XJc2LDvBox7JVyOvMLqxgbyPD`^$t7PMQ>(hsQB*=Nf zPqsjrjeukW7>O4}sz<)gC3l_^kJY&qy_kn?QmsNv9Ni#?b_{S5NdnQudN;)%WW5?2 zH?PRkPR24>#j*B%64m!N^?f%-hIN`x(7R5IAXoc z97_&;-TnoiBBc(L8R*Y zd~JMypd^8rF(k-_c} z;bm#)K}(vo!wwkdqw4ICv7rHJ{H?*|{2M}K-{FcBUy$(g+4=X+8vJ44PfRN}@5s9S z=w|_(&ap=CZEp%_g$W`0*%}R`S{!%$*3#>>+1OhSgQ3OsaNiR?q5#rpCp}>8gw=3>pK-*95TuQPlwvdi`uH+gISDwW_SStkad!HOas?udpHCI z1yQdW8G|^#$a&~$wY0LLIk@ffbfPjeRVIGp9J|h#WhTa0DJYL&(#hD9S zg+$%2mKU&H3+X`~^=yH5$X1G?M_+?U7Zq5eg^{lc!l zGvJaGfd3QKubcK(M2>A+>~GEV?nyv#)SFfn6P25p@W5D5$0*gt{~l$zrdsw)n5LaN z=fV2F^gm}v7!@)Cd;+0q-gdhEb)DBHR=d=&5ZVw(>4XQDbYU5z)@C?eq!i-g7KWmM zRcwnW@pdS`-7#&{X3(|3J#bjGhhCHe_kBSCHa_Hp(-f%^{>CSO_KJ^>FEwt&R@w|C z(Vd2(I*z~4V&lQAQkVbwTs#D@!_6v};bEFGK5R-!c~XgeUeXymUI4bb7b4XjgMwz+ zop1%!{n=V&Gr)YXIbwoZIlos>2U%?V`g)V?E^*=GNAr8Dxq?o`-oCio z@@Pj|iS?U1!KHMV(igSVqR&;fvwjDgItO5cTHD&vhS+F99fZ2P zh$ukqyTy5#N0OHJ6GrB6e_(gSKlT?#>XvW`TFs4iTB7~vr4mi_eoydZ*a65njK5Qd zO&xkc!Q%Swk14v>Ksxb?K+yW}@xuq+-M1i~EXIL&qE~U}@*21Bb7$mPMUF*MQTt zNa*(M1aMg(8lNJZXbyA%DK;4aQoK_^svY%Jp1*8M6v&DwXlu9-`Q>Y)cMVF@kJq|z zxXgA-HNh1&16wlkQLE7_ZpN#5AH%Mrvd}N>wS(Io8R=H%15${?0fAAunh`&9EE?^K4UB1g*%GZ()eZK$xy>Kqzbz!RM`%B)r^;d zuzVf$ww*Y2cJTX$r#-JufigwO%19?Q-uG``rkBBiLyT!^8o7fJ`d!+FKng>jqZhP~~e@>IUcZCt=s-_zY9v!s_L&piO z?r49x&TfPYd_;rA(EgB6YOB>n;3g>8B_|hr7Pme(+$@gj@`g@t(QpNZ6%KTKe(s(y zzWcOro`HePg)FWR1%Nvn4j&d!I2Mx|8g!P99ZEo#6Zpve@))*U0lOR5uJT#Rfr`); z5xhdb=`u26Q#QaJ*Oahuqcj_u+2>a(>VHsB2e3g5q=oKpVYh|r^1HLiPW)aQ)+jqs z<2Zo}z&pVYLRK{NB;OZriEL=NZeqfAkEAQ4T~$IJ4{vztIJz49*UpZlm4QgFi85qc z`(uC3kiO4@7lS~Y*tEaa<;v(krQqY;?gZTLeq0a7x~%iRwSY;S0rzABQS*1?BXrv#$XCtq%ecb>v}wC94bvK$ znVi8TKN@8@K;I`uYwZ5+Gu`Fpg$HtZc*{&@!Q6r29e!L0C8BY z%=%y;qrB?3hhN{4y#Q=nR*`0hRhTADI!?-wMV+eE>cU*;4X1(lWjHlb1O{q<;VS}laeeDAe+Q zorfJ~jzI(JEJBMxpfTCCurW{!PNjN7P-j+6vj-Xzxmsr4aieQpCFy!k+JTKofykJM zt#xAN*kY}YNBWf3VaJZZk1qsJS_jFRBk4~dPKx{Pw!@wF-~Suzx3^rw=$~0zTMKU4 z7UsMY2%|F}Y7Yur$q@HzxG;98_Ft+O=UR$ z|L^{60hd`Nx0UhH>1PnR#0oL;mu~6ay0O1wRVa6E-iMXy9hf|zEqGC;o&t4icJ^`> z85?-b&m_nHprDluf%|LwCF4mD2~8GMO6@~KT2UyKjB;E5<%3m*u6@Ztb1N3NRY$0I z{yyj!9WB?_wq_~FS4}YfmgF!1Wsbng``aiFlkb>H9N&%hot`HKyVwAPS(GaP(4h$~ zDjfK~hVoSz(5^)C83_M=ewaqDsQi5RVh3H}DJh;!0XjzDwS>G%fZ5aoIS?r%Vs&;33z z-(5|K{Xb8SU({&Pdt-URPLHn0$)pg&G_O%T<5h3y^t$q{<^xkw&ysZL_21htvG#;+KgQsH|KZ zSenZYrma_5YvvtycfR)#lPo(MAa{U)MLUMTfjV}-uf(Wc+r)1t7oAPOkwPLyzu6cZ zteru>d~A5wfTLo%EG7y{IQifw=UkR<*qirS1G!##C_JH&36kMp8r$T zg5H20f4`3vM#v=CMKHl<=?0-ok(7p41}%uPv$Mg9Q1apFsUmRfE({h%qe$=lk&8>k z5)CHve|2}=aXGjD{>~$kqN41EmX^{$Qc_7vY43$5@sJd`%PJ|7(v;Gq9WA8sMADM> zpb|wiwP>IBb@w~x_d2ih=lScL=dY)_eZSZD`d;hvUiUS-ZWeLzALRibx*u!g3-7BR zzA=_m_1J%0IJK@t;nEOSl|vzA`9BKWMwlZ|b0T!ZB^Oh)vzTgL86s%hR~6bfJbaoQ zs74R4@dtB=1y_ZI?sLzF(8i3f?;Y#Q*+lk{RLkk#fJWM_9+vSM8Bp-SMBypLO^L>tK`lFtpV&edX-H4&3~IJvq$JD zHm2(67k!#66Wq(<^NXQH@Hu4JKnupYU=s6-h>lKteG^GUGG}&zI+v$zo|#>*VXj7? zBOg(A18*lPHD_vzymzfegrXz3oQXfbjuUqW`iSr>U|^De7sK}W21;IO&-}iV_1hs{ zXnd^f9Sgyw6Fi1ddM^IDVTtN8lD^9FBeo9z%pmu$z zdV6CWfTRc8;5WXUZ1Exy$~f(ZqIZuEb=V1QhYxXBVOp=R5&K)y1s8J}vo5v5og)ox z{>RznR0l^_tx=&Pz6r>Lc7O#VxERHjPkSr!Rh)(`oGRI>c}*Sad>0gc9e>+8*D)@W z*I`gAt8n;`{rY;Y-wO=0rHbnbK@ciznCno-38_7Jk(S&PyK67`*hP2LKG-nhxzRUn z_fD7dkDBS^sq+Rq^(}M14Ekp?GpexS*hgsX01mMRLguXAu(KZ=%Lo;-<=u}VUQ@RL zGe&i!xQy$VeENx>`=r8+hsyJskGrnjy>s~j0dFpj_13>|{oa)0phL9D7;@et#By(S z_N$rEyC{WDNk+ZgCllXevBQ*^tjbI5Vk4f*uWJL1f5;}e4b|sK)U8%A(o!mD0I4hT zT7nj!R(8);EyD~{I885~e?QnPPsx)@8R13u%s208wmxRcfAVZmx$wO>mFK-Hwy(oR zSG>63nZt|j+fo^>1DAoGD)hlnMu8Q9F{qP@m6l8RUM(V33h}<@U*KaybAYO*^ zAH07CRm5jCKSf3EwO}f2Y4*}fe&VCX6_K7~&Pk~4|4IHq{!hD+Pk9oJaV zd7S(Az8Qz)^mFOe-5Rjep87`uaDbTRL`#|=S+Wil8KivZtU#m+?iYrp=UFuC`J7>I z@n0cwUq-!!Ot2HGHZbG2^KAc1t}tk20MqNBl9ELS>e?mxf)p_{qo^y*C~D(wNTJjK zP3-NlZO{Gt&)*RBYaU#c(O96J))H^_a(*zMNLTz#MSQ6yH&&fFa|Y<&O4%tCNF?K6 zNwKtB$h23LZkh>LU?bR2nbV*464fgf#M$)OA~Ei{wal1Uy(8kxTOEbCt*LgAj}{Ib zkjkyPzF{r#E4a)ZPf)$TEspb$r-V3P@Mj|5So6+7Etu~XAn+7;aIvsy+gFgIY)zLH z%py*G8cr2!FunFs_n}uIxiZ>gkmAS3=HDglNh+r5?==5-AiBWdqqyU=t_c@!2-JxW4{{6BOGLcl_{d1vv|3^2M*_O+4D z_X~9Y+*qMS>@S#B*W@|j#*lGpPyv{k@4}753?4`plLG8of$E7BKFKoSdGY+lu!7K+ zSdTR3Ym=Ne7fkgBy>ycY*VHbJaev&vS z{l^-$Q9i?*BuxT`gsO-83oe}*%5ZhBa~d{wWpCYenluW2Z#^86V7$Z;_j#_9*z66D zi|VxxG`+kCrG9&3OE%WMgNc zd#I?XWT=?A5gN3MzzKfb$I654YqIxWYfcIPeu>`dtyFf8-Q0fC@$PpT0P5qgzGX&r zAXMt&`@buh+St~zoBaT+m}*t@5_-%>;0PVz2PE=Lcy_WtMD~O0B_%wC^@gq5&_~UW z`KD8=&bozaM>EV*887Q9f_XF8U*RFVFXzbxiw@Q)TRNKm7=|v?otXO@@HobT^4uygRjAlo>i>Wf+3&;XmM!Dj*gL=tn#41ID9rCk-*MY;~ zaBiD^!>rZuVbneu!<^7s>0=`dq-xNy;f@9(NX${};#T|HTyex~1-w8&TZ#(D*{>P_ z9O}w9HM+gem}dU|(TEHAQkjlq?Xv6+oztckOd5vYv0Trxu}a`C6`Y(z_|Y6eofxL6 zPgsAu@A%{WF0YV)A~h*g1YkWMQh`QFls(rfcZbhdg`{dXh~M7_1Y+8Af8_=hL&FWrjqyFGmKsdNGWoh zZto)|B+in{Tc-|Q{ab12Fp2G1fS4LUReuP#_DZ?2YF#Q$WQ1+;W!_>>-0>{xy|pKW zI3p$1`873GZ}45nXpFv@drar5*++_dVbBwTJpg!Okbrjd6BKMa zpkgN+9Nt1Y-lZ_~2(&?RPtAXQ!_*M`$M88$bh^jIMq5c9S0|}##$1+$mF@g)^S>rX zSFOImvwJ&}5A>_47|+fh^D_vxg^(CjMedpRMZys=c)sVFv}WnEgyl~ny zG{R5`wSvB{%BC`L>CwoIbkizO5;TBfI6sZpr37rgYBdsx#Z(x?NGX1og1WHk0{;K*j1y>j^t80eiVS5|X{EG{m#^bY+6jAP0GtNbd^ z`Yri^;VJuFh7raG3(W_O0BFD7Bf-M-Z!I$A3imUI0?|hEr}2wK9BKvlAcWLRO4au!lCe~828!06nZ5|0Ee$p(!nO5s> zSkFL1y13aif>j_s5Y{b|6hF}}Ibb~y+;J#P-sogqpFGI`gwLu>e8=bk=z+0z8AR`G zKpNn~eR}gV*yCGD@(teVjb(n#NOA~Eh0>;H1KS_28Uj%Hy*c(K=v!^bm$$&a(?u zZau^FdFHL!k!*H)goWKB8ZS=*c@@{&I0)X^CQ^P^d~#S!-b<93^Hlp0c|ODhg$tMT zH(b{wxbbDP=^e&n7Wu30INp*aPoRsHVSN`52mtsZ>1=yqJ_UUj)}c3(y0tu;gkDg%qxwJL6k##5;Hk+v9f9x^ zr}xILxpRZ(?#N5*{xr^W$z~0C8`2Gr?@kOxLIm0NRBT<7@|a;@R#TCh`($T}ai3&( zh`xs4$K{9amov~lEaeyk=oj_5GIH5n z(tzevhu^$R)w&rmlhJZ!pZoXd6~h}H9|ZY5qm9e@AX}oHXigIZUL)u3dzo5nFfE;1 zbCjf@z|;JZk2hX`_Liif_^gfOMb2Gwy3jT`jk7uv2LwLPLt*w=WgpEm`5vkqH~!KixGq znn!|y|7d)b8E6yD*jG?>frpi(j8G_#dMgnHh=K+-cVI(3C*{XIiOG_@_LySE9wlzD z8~B`Bu%vzN?Hkc@f-+9qr)O7sKSZix(wO+m@xegfVNU19+kSJjR(c3}d*S@-H0fNO z?86m|B-Tlxmcz%V?$#6awW)%N7wvk>bu*%1jzxH{wA;^{Sp$aY;bc+O-XbT2)ana# ztSPjiJ{kl6($o;YxVTew*ypTUK)Jj!=W=#0zA&w>^E$Cd`Saq^=Qv*zt`*DK&z4Z_ z*fk3;ovn@Z^Tg9qP}OOv-87`NjUnv%bvp$$UnWSjkxTiH^`7De55Pg12L`AA$){r# zTDsQ3Rcx^Gm{lGA`c_Y|bhh<`cC;z=)2!qDx*fzN@of|93tHc?#MeY7(XURtvn6Sy zWbHx}${i86xu(t8)GJ4Fc*YR+f~`G4*f7|4O{g!^JtO)Yx=SRolB5pG=NLRbL$xVm z;3gx0k`5Q6L0TAu0TZ4Q=HAlPty;u96x6AFifhe1+M-A?+QsBFbxP>SW=+lcUgG9_hX|o*mS40d zWD#!9(QHgs58yCWRavM;W-PUH*10FeXJ%eoqRxq@BY`xXy9v1zX8E~;5nc=4MqMtZ zkCF(n5$EX}wQdb7Z8A_|d%wqujUR-*!m4=gw+E)ePyO_}9MKk~#oQhU55{F;`A%C@ zYyUvZ=ll%4d~4N%Q9x2MNe59jtPpFx*F4dON+ z5GsDAU0fyjnCu(9*ke>;6=W9Obrj3A%$|P1(W$Ahf&%xTwF(XtC}l{M?GEW`ncS zj>mF<86`n7Vqbh75GBcoVZS1vu87}9sXs5mtX zJ5>gI-}U4eZ5nbY$Qz#9H(ti0M0@|-T1}Z^%_+QY{oI`|j?0EYDe@xuLUa1@oEqkv z47Ansip2Op7LiDW?_A^@uNg_1+|V-cHe^e3pVAu|U8ESH9;yMUZPWp4v)_w4mKS>` zk21|}oEiFa=9{nb4a6^Et&4DO(MmO`m*knR=M~aTS4j;`2(#yWAjDyVTuwZaZIuLJ zju0|pRU}$Ls3uqU_diOT|4O#Mj#it6G%%moMY9Kq{3Qij!PlNZ{5IAzEcAP`Z@5}K4WG?>eufs5oAV95ngv;2ck%MHc)CMm9&%b?nniehzx$XCCmeFS9sV_s%UU?6S zimsG(Ztb(QKYR^QiRdN5kS7ZFeSP=q0?*L%+{VGZ5_Gf<-rZbMGBK#7?ow2`Kd9f2 z>}Qk*s64B}v+r|Gch~QeWEdB@PXoOGb&dDmn`o#eKQw!~ z{pe%u$cHQPmwLrX!%j4*g07%Bx!V+n)eD=UIVD3VStu&=$=1QG2A|U`H6C^hN@vqA z-FIEN3my@9)3IugO;311j&qXB)TiyjMeFBWDH(3fta3oECToOU8@ z=2L8>5Iv5|*htZ~c4C^vtJFjCMV>s}4&+EeV=LTU*fsoWLa{oobg3r;Y!(W{ryi)cfL%4(5IuA>gn-jIpqf3${ch}jV^dbu%SC(b&A@5 z@tHq|*v#-JphP4*u>8&zV_b~6b!)%|@fN^Glf9MeNK8RO>w!xXYDBrgbzs5sJ3QIx zh;nu6Df1D=bv)kCQ!Wc&Kyz>Y2(K?}T)f7{t=tu>*YDs~-{&>w@!SDYv|^GKgvT*; zn+Hr_n4o%RZA_MBVLlX6Lf+ns+5QX^2AIxZW&<0R5Pv8@7+~1Jz$`?QAzjG1Ejv!V zQsES2^E_dp;39v3!hm3|fLWi8k`A)|isPw~uA-6J!nSPN>oZeBX?W+UfT9KXS9c+k z^1z=z&E`Zi#<*c;Iy$ypViTp*45h*q$Dd#j_IXaF0~SaIpr;nR)8hFf{F9T=af3vb zl?eliJ-_MUhN0`nxq;WK^V))4wJgW*mmr)?E|aNr?e#z^2RsCA+cjM{bF7s_>B zh^~;V+L1cD-*ZaqNR+fLk^E=d9CT@a#@{yJHQg6p_x=_9dTEUBfIzv;&$vOmcX{eJ z-Wk%YaB0t}L0y=>2rJ&}0bLGm48+>5nMwdYbkt7}eME} z3}ZiEt5+U8*4EuNt6{LxB$K;2MgM!k?-xjA13|~xOTx^5Rd5-?r$wyoo22ZU0Dyv> zsjI7+d)k}@wkwYKqjCLUQC?ZXzr(KrOY<^H8Ar$mI_}N;JVqZ`6V&+X4>kGm!# z;2TS#C3{~vY1*)m$vMK z&}g?3{pIYG3)-v&b=LO@Qnxm%BsL=c`s5b#>U1V+tx=UEKFkmS`10V8~*$M@O4n%7Ae>0 Wv~qvtN-UB`Qa_}vlBr~K?LPqV(%r=X diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index c81f322690..0d8847cde0 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -6,6 +6,7 @@ import numpy as np import pytest from matplotlib import pyplot as plt +from orthax.legendre import leggauss from scipy import integrate from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipkm1 @@ -489,16 +490,19 @@ def denominator(B, pitch, Z): @partial(np.vectorize, excluded={0}) def _adaptive_elliptic(integrand, k): - return integrate.quad(integrand, 0, 2 * np.arcsin(k), args=(k,))[0] + a = 0 + b = 2 * np.arcsin(k) + return integrate.quad(integrand, a, b, args=(k,), points=b)[0] -def _fixed_elliptic(integrand, k, resolution): +def _fixed_elliptic(integrand, k, deg): k = np.atleast_1d(k) a = np.zeros_like(k) b = 2 * np.arcsin(k) - x, w = tanh_sinh(resolution, grad_automorphism_arcsin) + x, w = leggauss(deg) + w = w * grad_automorphism_sin(x) Z = affine_bijection_reverse( - automorphism_arcsin(x), a[..., np.newaxis], b[..., np.newaxis] + automorphism_sin(x), a[..., np.newaxis], b[..., np.newaxis] ) k = k[..., np.newaxis] quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(a, b) @@ -514,8 +518,8 @@ def _elliptic_incomplete(k2): K = _adaptive_elliptic(K_integrand, k) E = _adaptive_elliptic(E_integrand, k) # Make sure scipy's adaptive quadrature is not broken. - np.testing.assert_allclose(K, _fixed_elliptic(K_integrand, k, 9), rtol=1e-3) - np.testing.assert_allclose(E, _fixed_elliptic(E_integrand, k, 9), rtol=1e-3) + np.testing.assert_allclose(K, _fixed_elliptic(K_integrand, k, 10)) + np.testing.assert_allclose(E, _fixed_elliptic(E_integrand, k, 10)) # Here are the notes that explain these integrals. # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. @@ -558,9 +562,8 @@ def _elliptic_incomplete(k2): _fixed_elliptic( lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k, - resolution=11, + deg=10, ), - rtol=1e-2, ) np.testing.assert_allclose( I_7, @@ -683,8 +686,7 @@ def test_drift(): knots=zeta, B_ref=B_ref, L_ref=L_ref, - # tanh-sinh-arcsin quadrature requires 9 nodes to leg-gauss-sin's 5 - deg=5, + deg=28, # converges to absolute and relative tolerance of 1e-7 check=True, ) @@ -797,12 +799,9 @@ def integrand_denom(B, pitch, Z): drift_numerical = drift_numerical_num / drift_numerical_denom msg = "There should be one bounce integral per pitch in this example." assert drift_numerical.size == drift_analytic.size, msg + np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) fig, ax = plt.subplots() - ax.plot(1 / pitch, drift_analytic, label="analytic") - ax.plot(1 / pitch, drift_numerical, label="numerical") - ax.set_xlabel(r"$\vert B \vert \sim 1 / \lambda$") - ax.set_ylabel("Bounce averaged binormal drift") - ax.set_title(r"Bounce averaged binormal drift, low $\beta$ shifted circle model") - np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) + ax.plot(1 / pitch, drift_analytic) + ax.plot(1 / pitch, drift_numerical) return fig From 2c20bb519dde2ef3cb8e832df0da8000f01a6009 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 3 May 2024 01:36:20 -0400 Subject: [PATCH 159/241] Remove changes to grid spacing as that is done in #985 --- desc/grid.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/desc/grid.py b/desc/grid.py index e917b4505e..675640440e 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -541,7 +541,7 @@ def create_meshgrid(cls, a, b, c): _inverse_zeta_idx=inverse_c_idx, ) - def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): + def __init__(self, nodes, sort=False, jitable=False, **kwargs): # Python 3.3 (PEP 412) introduced key-sharing dictionaries. # This change measurably reduces memory usage of objects that # define all attributes in their __init__ method. @@ -549,8 +549,6 @@ def __init__(self, nodes, sort=False, jitable=False, spacing=None, **kwargs): self._sym = False self._node_pattern = "custom" self._nodes, self._spacing = self._create_nodes(nodes) - if spacing is not None: - self._spacing = spacing if sort: self._sort_nodes() if jitable: From 81145516a0063c5cfb485c57021e4a53491d46ee Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 4 May 2024 18:31:15 -0400 Subject: [PATCH 160/241] Make derivative suppression option available --- desc/compute/bounce_integral.py | 85 ++++++++++++++++++++------------- tests/test_bounce_integral.py | 8 ++-- 2 files changed, 55 insertions(+), 38 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 45d4870815..930fb6efde 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -688,7 +688,8 @@ def automorphism_arcsin(x): The gradient of the arcsin automorphism introduces a singularity that augments the singularity in the bounce integral. Therefore, the quadrature scheme - used to evaluate the integral must work well on singular integrals. + used to evaluate the integral must work well on functions with large + derivative near the boundary. The arcsin automorphism pulls points in [−1, 1] away from the boundary. This can reduce floating point error if paired with a quadrature @@ -719,15 +720,15 @@ def grad_automorphism_arcsin(x): grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ -def automorphism_sin(x, eps=None): +def automorphism_sin(x, s=0, m=10): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The derivative of the sin automorphism is Lipschitz. + The gradient of the sin automorphism is Lipschitz. When this automorphism is used as the change of variable map for the bounce integral, the Lipschitzness prevents generation of new singularities. - Furthermore, its derivative vanishes like the integrand of the elliptic - integral of the second kind E(φ | 1), suppressing the singularity in the - bounce integrand. + Furthermore, its derivative vanishes to zero slowly near the boundary, + which will suppress the large derivatives near the boundary of singular + integrals. Therefore, this automorphism pulls the mass of the bounce integral away from the singularities, which should improve convergence of the quadrature @@ -742,8 +743,10 @@ def automorphism_sin(x, eps=None): ---------- x : Array Points to transform. - eps : float - Buffer for floating point error. + s : float + Strength of derivative suppression, s ∈ [0, 1]. + m : int + Number of machine epsilons used for floating point error buffer. Returns ------- @@ -751,38 +754,55 @@ def automorphism_sin(x, eps=None): Transformed points. """ - y = jnp.sin(jnp.pi * x / 2) - if eps is None: - eps = 1e3 * jnp.finfo(jnp.array(1.0).dtype).eps + errorif(not (0 <= s <= 1)) + # s = 0 -> derivative vanishes like cosine. + # s = 1 -> derivative vanishes like cosine^k. + # Integrate cosine, cosine^k, and normalize codomain to [-1, 1] to get + # two automorphisms. Connect with homotopy, jointly continuous in s ∈ [0, 1]. + # Then derivative suppression is continuous in s for finite k. + # As k → ∞ and s → 1, all integrable singularities and oscillations + # are removed; the integrand becomes a delta function. + # Setting s = 0 is optimal to integrate singularities of the form 1 / (1 - |x|) + # Setting s = 1 is optimal to integrate singularities of the form 1 / (1 - |x|)^k. + y0 = jnp.sin(jnp.pi * x / 2) + y1 = x + jnp.sin(jnp.pi * x) / jnp.pi # k = 2 + y = (1 - s) * y0 + s * y1 + eps = m * jnp.finfo(jnp.array(1.0).dtype).eps return jnp.clip(y, -1 + eps, 1 - eps) -def grad_automorphism_sin(x): +def grad_automorphism_sin(x, s=0): """Gradient of sin automorphism.""" - dy_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 + dy0_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 + dy1_dx = 1 + jnp.cos(jnp.pi * x) + dy_dx = (1 - s) * dy0_dx + s * dy1_dx return dy_dx grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ -def tanh_sinh(resolution, w=lambda x: 1, t_max=None): +def tanh_sinh(deg, w=lambda x: 1, m=10): """Tanh-Sinh quadrature. Returns quadrature points xₖ and weights Wₖ for the approximate evaluation of the integral ∫₋₁¹ w(x) f(x) dx ≈ ∑ₖ Wₖ f(xₖ). + Notes + ----- + This quadrature is ill-suited for high-precision accuracy due to numerical + amplification of errors in computing quadrature points. + Parameters ---------- - resolution: int - Number of quadrature points, preferably odd. + deg: int + Number of quadrature points. w : callable Weight function defined, positive, and continuous on (-1, 1). - t_max : float - The positive limit of quadrature points to be mapped. - Larger limit implies better results, but limited due to overflow in sinh. - A typical value is 3.14. - Computed automatically if not supplied. + m : int + Number of machine epsilons used for floating point error buffer. + Larger implies less floating point error, but increases the + minimum achievable error. Returns ------- @@ -792,19 +812,17 @@ def tanh_sinh(resolution, w=lambda x: 1, t_max=None): Quadrature weights. """ - if t_max is None: - # boundary of integral - x_max = jnp.array(1.0) - # buffer for floating point error - x_max = x_max - 10 * jnp.finfo(x_max.dtype).eps - # inverse of tanh-sinh transformation - t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) - kh = jnp.linspace(-t_max, t_max, resolution) - h = 2 * t_max / (resolution - 1) - arg = 0.5 * jnp.pi * jnp.sinh(kh) - x = jnp.tanh(arg) + # buffer to avoid numerical instability + x_max = jnp.array(1.0) + x_max = x_max - m * jnp.finfo(x_max.dtype).eps + t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) + # maximal-spacing scheme, doi.org/10.48550/arXiv.2007.15057 + t = jnp.linspace(-t_max, t_max, deg) + dt = 2 * t_max / (deg - 1) + arg = 0.5 * jnp.pi * jnp.sinh(t) + x = jnp.tanh(arg) # x = g(t) # weights for Tanh-Sinh quadrature ∫₋₁¹ f(x) dx ≈ ∑ₖ ωₖ f(xₖ) - W = 0.5 * jnp.pi * h * jnp.cosh(kh) / jnp.cosh(arg) ** 2 + W = 0.5 * jnp.pi * jnp.cosh(t) / jnp.cosh(arg) ** 2 * dt # W = (dg/dt) dt W = W * w(x) return x, W @@ -1316,7 +1334,6 @@ def group_data_by_field_line(g): if quad == leggauss: kwargs.setdefault("deg", 19) x, w = quad(**kwargs) - # The gradient of the transformation is the weight function w(x) of the integral. auto, grad_auto = automorphism w = w * grad_auto(x) # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 0d8847cde0..7eb9a31b91 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -402,11 +402,11 @@ def test_automorphism(): # test that floating point error is acceptable x, w = tanh_sinh(19) assert np.all(np.abs(x) < 1) - y = 1 / (1 - np.abs(x)) + y = 1 / np.sqrt(1 - np.abs(x)) assert np.isfinite(y).all() - y = 1 / (1 - np.abs(automorphism_sin(x))) + y = 1 / np.sqrt(1 - np.abs(automorphism_sin(x))) assert np.isfinite(y).all() - y = 1 / (1 - np.abs(automorphism_arcsin(x))) + y = 1 / np.sqrt(1 - np.abs(automorphism_arcsin(x))) assert np.isfinite(y).all() @@ -441,7 +441,7 @@ def integrand(B, pitch, Z): knots, quad=tanh_sinh, automorphism=(automorphism_arcsin, grad_automorphism_arcsin), - resolution=18, + deg=18, check=True, ) tanh_sinh_arcsin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) From 5ef6e95d088b979b8a7811bcf7756d49c0c461e4 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 4 May 2024 22:17:08 -0400 Subject: [PATCH 161/241] Tighten tolerance in test after trying out preiciosn things with mpmath --- desc/compute/bounce_integral.py | 34 +++++++++++++-------------------- tests/test_bounce_integral.py | 25 ++++++++++++------------ 2 files changed, 25 insertions(+), 34 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 930fb6efde..c2f8fd901a 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -782,23 +782,16 @@ def grad_automorphism_sin(x, s=0): grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ -def tanh_sinh(deg, w=lambda x: 1, m=10): +def tanh_sinh(deg, m=10): """Tanh-Sinh quadrature. - Returns quadrature points xₖ and weights Wₖ for the approximate evaluation - of the integral ∫₋₁¹ w(x) f(x) dx ≈ ∑ₖ Wₖ f(xₖ). - - Notes - ----- - This quadrature is ill-suited for high-precision accuracy due to numerical - amplification of errors in computing quadrature points. + Returns quadrature points xₖ and weights wₖ for the approximate evaluation + of the integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). Parameters ---------- deg: int Number of quadrature points. - w : callable - Weight function defined, positive, and continuous on (-1, 1). m : int Number of machine epsilons used for floating point error buffer. Larger implies less floating point error, but increases the @@ -808,7 +801,7 @@ def tanh_sinh(deg, w=lambda x: 1, m=10): ------- x : Array Quadrature points. - W : Array + w : Array Quadrature weights. """ @@ -821,10 +814,8 @@ def tanh_sinh(deg, w=lambda x: 1, m=10): dt = 2 * t_max / (deg - 1) arg = 0.5 * jnp.pi * jnp.sinh(t) x = jnp.tanh(arg) # x = g(t) - # weights for Tanh-Sinh quadrature ∫₋₁¹ f(x) dx ≈ ∑ₖ ωₖ f(xₖ) - W = 0.5 * jnp.pi * jnp.cosh(t) / jnp.cosh(arg) ** 2 * dt # W = (dg/dt) dt - W = W * w(x) - return x, W + w = 0.5 * jnp.pi * jnp.cosh(t) / jnp.cosh(arg) ** 2 * dt # w = (dg/dt) dt + return x, w _repeated_docstring = """w : Array, shape(w.size, ) @@ -1211,7 +1202,7 @@ def bounce_integral( The quadrature scheme used to evaluate the integral. The returned quadrature points xₖ and weights wₖ should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). - automorphism : (callable, callable) + automorphism : (callable, callable) or None The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. The inverse of the supplied automorphism is composed with the affine @@ -1334,11 +1325,12 @@ def group_data_by_field_line(g): if quad == leggauss: kwargs.setdefault("deg", 19) x, w = quad(**kwargs) - auto, grad_auto = automorphism - w = w * grad_auto(x) - # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). - # Apply reverse automorphism to quadrature points. - x = auto(x) + if automorphism is not None: + auto, grad_auto = automorphism + w = w * grad_auto(x) + # Recall x = auto_forward(_affine_bijection_forward(ζ, ζ_b₁, ζ_b₂)). + # Apply reverse automorphism to quadrature points. + x = auto(x) def bounce_integrate(integrand, f, pitch, method="akima", batched=True): """Bounce integrate ∫ f(ℓ) dℓ. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 7eb9a31b91..06d59d9b76 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -400,7 +400,7 @@ def test_automorphism(): ) # test that floating point error is acceptable - x, w = tanh_sinh(19) + x = tanh_sinh(19)[0] assert np.all(np.abs(x) < 1) y = 1 / np.sqrt(1 - np.abs(x)) assert np.isfinite(y).all() @@ -421,18 +421,18 @@ def test_bounce_quadrature(): # appears often in transformations. v = 7 truth = v * 2 * ellipkm1(p) - rtol = 1e-3 + rtol = 1e-4 def integrand(B, pitch, Z): return 1 / jnp.sqrt(1 - pitch * m * B) bp1 = -np.pi / 2 * v bp2 = -bp1 - knots = np.linspace(bp1, bp2, 15) + knots = np.linspace(bp1, bp2, 50) B_sup_z = np.ones(knots.size) B = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) B_z_ra = np.sin(2 * knots / v) / v - pitch = 1 + np.finfo(np.array(1.0).dtype).eps + pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps bounce_integrate, _ = bounce_integral( B_sup_z, @@ -440,15 +440,15 @@ def integrand(B, pitch, Z): B_z_ra, knots, quad=tanh_sinh, - automorphism=(automorphism_arcsin, grad_automorphism_arcsin), - deg=18, + automorphism=None, + deg=40, check=True, ) - tanh_sinh_arcsin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) - assert tanh_sinh_arcsin.size == 1 - np.testing.assert_allclose(tanh_sinh_arcsin, truth, rtol=rtol) + tanh_sinh_vanilla = _filter_not_nan(bounce_integrate(integrand, [], pitch)) + assert tanh_sinh_vanilla.size == 1 + np.testing.assert_allclose(tanh_sinh_vanilla, truth, rtol=rtol) - bounce_integrate, _ = bounce_integral(B_sup_z, B, B_z_ra, knots, deg=16, check=True) + bounce_integrate, _ = bounce_integral(B_sup_z, B, B_z_ra, knots, deg=25, check=True) leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) assert leg_gauss_sin.size == 1 np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -501,9 +501,8 @@ def _fixed_elliptic(integrand, k, deg): b = 2 * np.arcsin(k) x, w = leggauss(deg) w = w * grad_automorphism_sin(x) - Z = affine_bijection_reverse( - automorphism_sin(x), a[..., np.newaxis], b[..., np.newaxis] - ) + x = automorphism_sin(x) + Z = affine_bijection_reverse(x, a[..., np.newaxis], b[..., np.newaxis]) k = k[..., np.newaxis] quad = np.dot(integrand(Z, k), w) * grad_affine_bijection_reverse(a, b) return quad From f4710c3e38cd82579523f16d0f9da233e9ce71be Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 21 May 2024 10:13:30 -0500 Subject: [PATCH 162/241] Modify create meshgrid for new grid attributes --- desc/compute/bounce_integral.py | 3 ++- desc/grid.py | 14 ++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d319793777..3171a2f0cf 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1420,7 +1420,7 @@ def desc_grid_from_field_line_coords( Clebsch-Type field-line coordinate grid. """ - grid_fl = Grid.create_meshgrid(rho, alpha, zeta) + grid_fl = Grid.create_meshgrid(rho, alpha, zeta, coordinates="raz") coords_desc = eq.map_coordinates( grid_fl.nodes, inbasis=("rho", "alpha", "zeta"), @@ -1431,6 +1431,7 @@ def desc_grid_from_field_line_coords( nodes=coords_desc, sort=False, jitable=True, + coordinates="rtz", _unique_rho_idx=grid_fl.unique_rho_idx, _inverse_rho_idx=grid_fl.inverse_rho_idx, ) diff --git a/desc/grid.py b/desc/grid.py index 34b4f8dbde..1f225c56a1 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -579,13 +579,18 @@ class Grid(_Grid): """ @classmethod - def create_meshgrid(cls, a, b, c): - """Create a meshgrid from the given coordinates. + def create_meshgrid(cls, a, b, c, coordinates="rtz"): + """Create a meshgrid from the given coordinates ina jitable manner. Parameters ---------- a, b, c : Array, Array, Array Sorted unique values of each coordinate. + coordinates : str + Coordinates that are specified by the nodes a, b, c, respectively. + raz : rho, alpha, zeta + rpz : rho, theta_PEST, zeta + rtz : rho, theta, zeta Returns ------- @@ -615,11 +620,12 @@ def create_meshgrid(cls, a, b, c): nodes=nodes, sort=False, jitable=True, + coordinates=coordinates, _unique_rho_idx=unique_a_idx, - _unique_theta_idx=unique_b_idx, + _unique_poloidal_idx=unique_b_idx, _unique_zeta_idx=unique_c_idx, _inverse_rho_idx=inverse_a_idx, - _inverse_theta_idx=inverse_b_idx, + _inverse_poloidal_idx=inverse_b_idx, _inverse_zeta_idx=inverse_c_idx, ) From 4961cae79950d51472a402217544c9400345b189 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 22 May 2024 16:38:51 -0500 Subject: [PATCH 163/241] Use source grid attributes in desc_grid_from_field_line_coords --- desc/compute/bounce_integral.py | 52 ++------------------------------- desc/equilibrium/coords.py | 42 ++++++++++++++++++++++++++ desc/grid.py | 3 +- tests/test_bounce_integral.py | 14 ++++----- 4 files changed, 53 insertions(+), 58 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3171a2f0cf..df5516839e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -8,7 +8,6 @@ from desc.backend import complex_sqrt, flatnonzero, imap, jnp, put_along_axis, take from desc.compute.utils import safediv -from desc.grid import Grid from desc.utils import errorif @@ -1249,7 +1248,7 @@ def bounce_integral( rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) - grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha, knots) data = eq.compute( ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], grid=grid_desc, @@ -1278,6 +1277,7 @@ def denominator(B, pitch, Z): print(average[:, i, j]) # are the bounce averages along the field line with nodes # given in Clebsch-Type field-line coordinates ρ, α, ζ + grid_fl = grid_desc.source_grid nodes = grid_fl.nodes.reshape(rho.size, alpha.size, -1, 3) print(nodes[i, j]) # for the pitch values stored in @@ -1388,51 +1388,3 @@ def bounce_integrate(integrand, f, pitch, method="akima", batched=True): return result return bounce_integrate, spline - - -def desc_grid_from_field_line_coords( - eq, - rho=jnp.linspace(1e-7, 1, 10), - alpha=jnp.array([0]), - zeta=jnp.linspace(-3 * jnp.pi, 3 * jnp.pi, 40), -): - """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. - - Create a meshgrid from the given field line coordinates, - and return the equivalent DESC coordinate grid. - - Parameters - ---------- - eq : Equilibrium - Equilibrium on which to perform coordinate mapping. - rho : ndarray - Sorted unique flux surface label coordinates. - alpha : ndarray - Sorted unique field line label coordinates over a constant rho surface. - zeta : ndarray - Sorted unique field line-following ζ coordinates. - - Returns - ------- - grid_desc : Grid - DESC coordinate grid for the given field line coordinates. - grid_fl : Grid - Clebsch-Type field-line coordinate grid. - - """ - grid_fl = Grid.create_meshgrid(rho, alpha, zeta, coordinates="raz") - coords_desc = eq.map_coordinates( - grid_fl.nodes, - inbasis=("rho", "alpha", "zeta"), - outbasis=("rho", "theta", "zeta"), - period=(jnp.inf, 2 * jnp.pi, jnp.inf), - ) - grid_desc = Grid( - nodes=coords_desc, - sort=False, - jitable=True, - coordinates="rtz", - _unique_rho_idx=grid_fl.unique_rho_idx, - _inverse_rho_idx=grid_fl.inverse_rho_idx, - ) - return grid_desc, grid_fl diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index ba243349d5..5dbc710cec 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -504,3 +504,45 @@ def to_sfl( eq_sfl.L_lmn = L_lmn_sfl return eq_sfl + + +def desc_grid_from_field_line_coords(eq, rho, alpha, zeta): + """Return DESC coordinate grid from given Clebsch-Type field-line coordinates. + + Create a meshgrid from the given field line coordinates, + and return the equivalent DESC coordinate grid. + + Parameters + ---------- + eq : Equilibrium + Equilibrium on which to perform coordinate mapping. + rho : ndarray + Sorted unique flux surface label coordinates. + alpha : ndarray + Sorted unique field line label coordinates over a constant rho surface. + zeta : ndarray + Sorted unique field line-following ζ coordinates. + + Returns + ------- + grid_desc : Grid + DESC coordinate grid for the given field line coordinates. + + """ + grid_fl = Grid.create_meshgrid(rho, alpha, zeta, coordinates="raz") + coords_desc = eq.map_coordinates( + grid_fl.nodes, + inbasis=("rho", "alpha", "zeta"), + outbasis=("rho", "theta", "zeta"), + period=(jnp.inf, 2 * jnp.pi, jnp.inf), + ) + grid_desc = Grid( + nodes=coords_desc, + coordinates="rtz", + source_grid=grid_fl, + sort=False, + jitable=True, + _unique_rho_idx=grid_fl.unique_rho_idx, + _inverse_rho_idx=grid_fl.inverse_rho_idx, + ) + return grid_desc diff --git a/desc/grid.py b/desc/grid.py index 24ea149661..619c024e69 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -584,7 +584,7 @@ class Grid(_Grid): @classmethod def create_meshgrid(cls, a, b, c, coordinates="rtz"): - """Create a meshgrid from the given coordinates ina jitable manner. + """Create a meshgrid from the given coordinates in a jitable manner. Parameters ---------- @@ -631,6 +631,7 @@ def create_meshgrid(cls, a, b, c, coordinates="rtz"): _inverse_rho_idx=inverse_a_idx, _inverse_poloidal_idx=inverse_b_idx, _inverse_zeta_idx=inverse_c_idx, + is_meshgrid=True, ) def __init__( diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 23c0704fa2..3c9f335a3c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -25,7 +25,6 @@ bounce_integral, bounce_points, composite_linspace, - desc_grid_from_field_line_coords, get_extrema, grad_affine_bijection, grad_automorphism_arcsin, @@ -36,6 +35,7 @@ ) from desc.compute.utils import dot, get_data_deps, safediv from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import desc_grid_from_field_line_coords from desc.examples import get from desc.grid import Grid, LinearGrid from desc.utils import errorif, only1 @@ -466,7 +466,7 @@ def test_bounce_integral_checks(): rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) - grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha, knots) grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) data = eq.compute(["iota"], grid=grid_fsa) data = {"iota": grid_desc.copy_data_from_other(data["iota"], grid_fsa)} @@ -608,8 +608,6 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): Computed quantities. grid_desc : Grid Grid on which the returned quantities can be broadcast on. - grid_fl : Grid - Clebsch-Type field-line coordinates corresponding to above grid. zeta : Array Zeta values along field line. @@ -636,7 +634,9 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) # Make grid that can separate into field lines via a reshape operation, # as expected by bounce_integral(). - grid_desc, grid_fl = desc_grid_from_field_line_coords(eq, rho, zeta=zeta) + grid_desc = desc_grid_from_field_line_coords( + eq, rho, alpha=np.array([0]), zeta=zeta + ) # Collect quantities that can be used as a seed to compute the # field line quantities over the grid mapped from field line coordinates. @@ -656,7 +656,7 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): data = eq.compute( names=names_field_line, grid=grid_desc, data=data, override_grid=False ) - return data, grid_desc, grid_fl, zeta + return data, grid_desc, zeta @pytest.mark.unit @@ -669,7 +669,7 @@ def test_drift(): rho = np.sqrt(psi / psi_boundary) assert np.isclose(rho, 0.5) alpha = 0 - data, grid, grid_fl, zeta = _get_data( + data, grid, zeta = _get_data( eq, rho, alpha, From f919bcc4a2799ebf930412d00a851d80e41a82b6 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 26 May 2024 18:50:47 -0500 Subject: [PATCH 164/241] Pass in quadrature points to bounce integral instead of function --- desc/compute/bounce_integral.py | 16 ++++++---------- tests/test_bounce_integral.py | 17 +++++++++-------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index df5516839e..da298c20f9 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1126,7 +1126,7 @@ def bounce_integral( B, B_z_ra, knots, - quad=leggauss, + quad=leggauss(19), automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1, L_ref=1, @@ -1189,10 +1189,9 @@ def bounce_integral( the derivative information should be captured without compromise. Can also specify to use a monotonic interpolation for |B| rather than a cubic Hermite spline with keyword argument ``monotonic=True``. - quad : callable - The quadrature scheme used to evaluate the integral. - The returned quadrature points xₖ and weights wₖ - should approximate ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). + quad : (Array, Array) + Quadrature points xₖ and weights wₖ for the approximate evaluation + of an integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). automorphism : (callable, callable) or None The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. @@ -1209,8 +1208,6 @@ def bounce_integral( Flag for debugging. plot : bool Whether to plot some things if check is true. - kwargs - Can specify additional arguments to the ``quad`` method with kwargs. Returns ------- @@ -1314,9 +1311,8 @@ def group_data_by_field_line(g): assert B_c.shape[-1] == B_z_ra_c.shape[-1] == knots.size - 1 spline = {"knots": knots, "B_c": B_c, "B_z_ra_c": B_z_ra_c} - if quad == leggauss: - kwargs.setdefault("deg", 19) - x, w = quad(**kwargs) + x, w = quad + assert x.ndim == w.ndim == 1 if automorphism is not None: auto, grad_auto = automorphism w = w * grad_auto(x) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 3c9f335a3c..db0ed035f6 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -444,16 +444,17 @@ def integrand(B, pitch, Z): B, B_z_ra, knots, - quad=tanh_sinh, + quad=tanh_sinh(40), automorphism=None, - deg=40, check=True, ) tanh_sinh_vanilla = _filter_not_nan(bounce_integrate(integrand, [], pitch)) assert tanh_sinh_vanilla.size == 1 np.testing.assert_allclose(tanh_sinh_vanilla, truth, rtol=rtol) - bounce_integrate, _ = bounce_integral(B_sup_z, B, B_z_ra, knots, deg=25, check=True) + bounce_integrate, _ = bounce_integral( + B_sup_z, B, B_z_ra, knots, quad=leggauss(25), check=True + ) leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) assert leg_gauss_sin.size == 1 np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -482,7 +483,7 @@ def test_bounce_integral_checks(): data["|B|_z|r,a"], knots, check=True, - deg=3, # not checking quadrature accuracy in this test + quad=leggauss(3), # not checking quadrature accuracy in this test ) def numerator(g_zz, B, pitch, Z): @@ -696,16 +697,16 @@ def test_drift(): knots=zeta, B_ref=B_ref, L_ref=L_ref, - deg=28, # converges to absolute and relative tolerance of 1e-7 + quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 check=True, ) B = data["|B|"] / B_ref B0 = np.mean(B) - # TODO: epsilon should be dimensionless, and probably computed in a way that - # is independent of normalization length scales. + # TODO: epsilon should be dimensionless, and computed in a way that + # is independent of normalization length scales, like "effective r/R0". epsilon = L_ref * rho # Aspect ratio of the flux surface. - assert np.isclose(epsilon, 0.05) + np.testing.assert_allclose(epsilon, 0.05) iota = grid.compress(data["iota"]).item() theta_PEST = alpha + iota * zeta # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 From f105ec32695e1f718c717fbf707faff67593762e Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 28 May 2024 17:35:59 -0500 Subject: [PATCH 165/241] Change keyword argument to batch from batched --- desc/compute/bounce_integral.py | 20 ++++++++------------ tests/test_bounce_integral.py | 8 ++------ 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index da298c20f9..b3a7aa43bb 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -278,7 +278,7 @@ def composite_linspace(breaks, resolution): breaks : Array First axis has values to return linearly spaced values between. The remaining axes are batch axes. - Assumes input is sorted. + Assumes input is sorted along first axis. resolution : int Number of points between each break. @@ -1027,7 +1027,7 @@ def _bounce_quadrature( knots, method="akima", method_B="cubic", - batched=True, + batch=True, check=False, plot=False, ): @@ -1069,7 +1069,7 @@ def group_data_by_field_line_and_pitch(g): f = map(group_data_by_field_line_and_pitch, f) # Integrate and complete the change of variable. - if batched: + if batch: Z = affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) result = _interpolatory_quadrature( Z, @@ -1285,15 +1285,11 @@ def denominator(B, pitch, Z): print(np.nansum(average, axis=-1)) """ - - def group_data_by_field_line(g): - errorif(g.ndim > 2) - return g.reshape(-1, knots.size) - B_sup_z = B_sup_z * L_ref / B_ref B = B / B_ref B_z_ra = B_z_ra / B_ref - B_sup_z, B, B_z_ra = map(group_data_by_field_line, (B_sup_z, B, B_z_ra)) + # group data by field line + B_sup_z, B, B_z_ra = (g.reshape(-1, knots.size) for g in [B_sup_z, B, B_z_ra]) errorif(not (B_sup_z.shape == B.shape == B_z_ra.shape)) # Compute splines. @@ -1319,7 +1315,7 @@ def group_data_by_field_line(g): # Recall affine_bijection(auto(x), ζ_b₁, ζ_b₂) = ζ. x = auto(x) - def bounce_integrate(integrand, f, pitch, method="akima", batched=True): + def bounce_integrate(integrand, f, pitch, method="akima", batch=True): """Bounce integrate ∫ f(ℓ) dℓ. Parameters @@ -1350,7 +1346,7 @@ def bounce_integrate(integrand, f, pitch, method="akima", batched=True): Method of interpolation for functions contained in ``f``. Defaults to akima spline to suppress oscillation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. - batched : bool + batch : bool Whether to perform computation in a batched manner. If you can afford the memory expense, batched is more efficient. @@ -1376,7 +1372,7 @@ def bounce_integrate(integrand, f, pitch, method="akima", batched=True): knots, method, method_B="monotonic" if monotonic else "cubic", - batched=batched, + batch=batch, check=check, plot=plot, ) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index db0ed035f6..739a15e0b9 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -495,7 +495,7 @@ def denominator(B, pitch, Z): pitch = 1 / get_extrema(**spline) num = bounce_integrate(numerator, data["g_zz"], pitch) - den = bounce_integrate(denominator, [], pitch, batched=False) + den = bounce_integrate(denominator, [], pitch, batch=False) average = num / den assert np.isfinite(average).any() @@ -532,8 +532,6 @@ def _elliptic_incomplete(k2): np.testing.assert_allclose(K, _fixed_elliptic(K_integrand, k, 10)) np.testing.assert_allclose(E, _fixed_elliptic(E_integrand, k, 10)) - # Here are the notes that explain these integrals. - # https://github.com/PlasmaControl/DESC/files/15010927/bavg.pdf. I_0 = 4 / k * K I_1 = 4 * k * E I_2 = 16 * k * E @@ -635,9 +633,7 @@ def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) # Make grid that can separate into field lines via a reshape operation, # as expected by bounce_integral(). - grid_desc = desc_grid_from_field_line_coords( - eq, rho, alpha=np.array([0]), zeta=zeta - ) + grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha=alpha, zeta=zeta) # Collect quantities that can be used as a seed to compute the # field line quantities over the grid mapped from field line coordinates. From 1d8609de77dcd1fbaef3a0f6ac4fc874362c5698 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 28 May 2024 18:09:44 -0500 Subject: [PATCH 166/241] Remove sort option from get_extrema --- desc/compute/bounce_integral.py | 55 +++++++++++++++++++-------------- tests/test_bounce_integral.py | 2 +- 2 files changed, 32 insertions(+), 25 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index b3a7aa43bb..71816a947a 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -345,9 +345,16 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): +def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): """Return |B| values at extrema. + The quantity 1 / √(1 − λ |B|) common to bounce integrals is singular with + strength ~ |ζ_b₂ - ζ_b₁| / |∂|B|/∂_ζ|. Therefore, an integral over the pitch + angle λ may have mass concentrated near λ = 1 / |B|(ζ*) where |B|(ζ*) is a + local maximum. These correspond to fat banana orbits. Depending on the + quantity to integrate, it may be beneficial to place quadrature points near + these regions. + Parameters ---------- knots : Array, shape(knots.size, ) @@ -367,8 +374,6 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): relative_shift : float Relative amount to shift maxima down and minima up to avoid floating point errors in downstream routines. - sort : bool - Whether to sort output. Returns ------- @@ -376,8 +381,9 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): For the shaping notation, the ``degree`` of the spline of |B| matches ``B_c.shape[0] - 1``, the number of polynomials per spline ``N`` matches ``knots.size - 1``, and the number of field lines is denoted by ``S``. + If there were less than ``N * (degree - 1)`` extrema detected along a - field line, then the first axis is padded with nan. + field line, then the first axis is interspersed with nan. """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) @@ -402,7 +408,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6, sort=True): .T ) assert B_extrema.shape == (N * (degree - 1), S) - return jnp.sort(B_extrema, axis=0) if sort else B_extrema + return B_extrema def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False): @@ -547,7 +553,7 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): ) if plot: plot_field_line_with_ripple( - B, pitch[p, s], bp1_p, bp2_p, name=f"{p},{s}" + B, pitch[p, s], bp1_p, bp2_p, id=f"{p},{s}" ) print("bp1:", bp1_p) print("bp2:", bp2_p) @@ -560,9 +566,7 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): ) assert not err_3, msg_3 if plot: - plot_field_line_with_ripple( - B, pitch[:, s], bp1[:, s], bp2[:, s], name=str(s) - ) + plot_field_line_with_ripple(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s)) def plot_field_line_with_ripple( @@ -573,8 +577,9 @@ def plot_field_line_with_ripple( start=None, stop=None, num=500, + title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", + id=None, show=True, - name=None, ): """Plot the field line given spline of |B| and bounce points etc. @@ -594,10 +599,12 @@ def plot_field_line_with_ripple( Maximum ζ of plot. num : int Number of ζ points to plot. + title : str + Plot title. + id : str + Identifier string to append to plot title. show : bool Whether to show the plot. - name : str - String to prepend to plot title. Returns ------- @@ -653,9 +660,8 @@ def add(lines): ax.set_xlabel(r"Field line $\zeta$") ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") ax.legend(legend.values(), legend.keys()) - title = r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$" - if name is not None: - title = f"{title}. name = {name}." + if id is not None: + title = f"{title}. id = {id}." ax.set_title(title) if show: plt.tight_layout() @@ -932,8 +938,8 @@ def _interpolatory_quadrature( if check: _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product) # if plot: # noqa: E800 - # _plot(Z, B, name=r"$\vert B \vert$") # noqa: E800 - # _plot(Z, V, name="integrand") # noqa: E800 + # _plot(Z, B, id=r"$\vert B \vert$") # noqa: E800 + # _plot(Z, V, id="integrand") # noqa: E800 return inner_product @@ -988,7 +994,7 @@ def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): ) -def _plot(Z, V, name=""): +def _plot(Z, V, id=""): """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" for p in range(Z.shape[0]): for s in range(Z.shape[1]): @@ -997,16 +1003,14 @@ def _plot(Z, V, name=""): continue fig, ax = plt.subplots() ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel(name) - ax.set_title( - f"Interpolation of {name} to quadrature points. Index {p},{s}." - ) + ax.set_ylabel(id) + ax.set_title(f"Interpolation of {id} to quadrature points. Index {p},{s}.") for i in is_quad_point_set: ax.plot(Z[p, s, i], V[p, s, i], marker="o") fig.text( 0.01, 0.01, - f"Each color specifies the set of points and values (ζ, {name}(ζ)) " + f"Each color specifies the set of points and values (ζ, {id}(ζ)) " "used to evaluate an integral.", ) plt.tight_layout() @@ -1153,7 +1157,7 @@ def bounce_integral( Notes ----- - This function requires that the quantities `B_sup_z`, `B`, `B_z_ra`, + This function requires that the quantities ``B_sup_z``, ``B``, ``B_z_ra``, and the quantities in ``f`` passed to the returned method can be separated into field lines via ``.reshape(S, knots.size)``. One way to satisfy this is to pass in quantities computed on the grid @@ -1246,6 +1250,9 @@ def bounce_integral( alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + data = {"iota": grid_desc.copy_data_from_other(data["iota"], grid_fsa)} data = eq.compute( ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], grid=grid_desc, diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 739a15e0b9..29a234f351 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -223,7 +223,7 @@ def test_get_extrema(): rtol = 1e-7 extrema = get_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) eps = 100 * np.finfo(float).eps - extrema = _filter_not_nan(extrema) + extrema = np.sort(_filter_not_nan(extrema)) assert extrema.size == extrema_scipy.size np.testing.assert_allclose(extrema, extrema_scipy, rtol=rtol + eps) From db33416ca7e2c2b5864fc2569f42c765fc221ffe Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 30 May 2024 14:29:38 -0500 Subject: [PATCH 167/241] Move things from #1003 into #854 --- desc/compute/bounce_integral.py | 62 +----------- tests/test_bounce_integral.py | 165 ++++++++++++-------------------- 2 files changed, 63 insertions(+), 164 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 71816a947a..d2393b89e6 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1130,7 +1130,7 @@ def bounce_integral( B, B_z_ra, knots, - quad=leggauss(19), + quad=leggauss(21), automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1, L_ref=1, @@ -1161,7 +1161,8 @@ def bounce_integral( and the quantities in ``f`` passed to the returned method can be separated into field lines via ``.reshape(S, knots.size)``. One way to satisfy this is to pass in quantities computed on the grid - returned from the method ``desc_grid_from_field_line_coords``. + returned from the method ``desc.equilibrium.coords.rtz_grid``. + See ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. Parameters ---------- @@ -1234,63 +1235,6 @@ def bounce_integral( Last axis enumerates the polynomials of the spline along a particular field line. - Examples - -------- - Suppose we want to compute a bounce average of the function - f(ℓ) = (1 − λ |B|) * g_zz, where g_zz is the squared norm of the - toroidal basis vector on some set of field lines specified by (ρ, α) - coordinates. This is defined as - (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ) - - - .. code-block:: python - - eq = get("HELIOTRON") - rho = np.linspace(1e-12, 1, 6) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) - knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) - grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha, knots) - grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - data = eq.compute(["iota"], grid=grid_fsa) - data = {"iota": grid_desc.copy_data_from_other(data["iota"], grid_fsa)} - data = eq.compute( - ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], - grid=grid_desc, - override_grid=False, - ) - bounce_integrate, spline = bounce_integral( - data["B^zeta"], data["|B|"], data["|B|_z|r,a"], knots, check=True - ) - - def numerator(g_zz, B, pitch, Z): - f = (1 - pitch * B) * g_zz - return safediv(f, jnp.sqrt(1 - pitch * B)) - - def denominator(B, pitch, Z): - return safediv(1, jnp.sqrt(1 - pitch * B)) - - pitch = 1 / get_extrema(**spline) - num = bounce_integrate(numerator, data["g_zz"], pitch) - den = bounce_integrate(denominator, [], pitch) - average = num / den - - # Now we can group the data by field line. - average = average.reshape(pitch.shape[0], rho.size, alpha.size, -1) - # The bounce averages stored at index i, j - i, j = 0, 0 - print(average[:, i, j]) - # are the bounce averages along the field line with nodes - # given in Clebsch-Type field-line coordinates ρ, α, ζ - grid_fl = grid_desc.source_grid - nodes = grid_fl.nodes.reshape(rho.size, alpha.size, -1, 3) - print(nodes[i, j]) - # for the pitch values stored in - pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) - print(pitch[:, i, j]) - # Some of these bounce averages will evaluate as nan. - # You should filter out these nan values when computing stuff. - print(np.nansum(average, axis=-1)) - """ B_sup_z = B_sup_z * L_ref / B_ref B = B / B_ref diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 29a234f351..6604227686 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -13,7 +13,6 @@ from tests.test_plotting import tol_1d from desc.backend import flatnonzero, jnp -from desc.compute import data_index from desc.compute.bounce_integral import ( _filter_not_nan, _poly_der, @@ -33,12 +32,13 @@ take_mask, tanh_sinh, ) -from desc.compute.utils import dot, get_data_deps, safediv +from desc.compute.utils import dot, safediv from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import desc_grid_from_field_line_coords +from desc.equilibrium.coords import rtz_grid +from desc.equilibrium.equilibrium import compute_raz_data from desc.examples import get from desc.grid import Grid, LinearGrid -from desc.utils import errorif, only1 +from desc.utils import only1 def _affine_bijection_forward(x, a, b): @@ -85,7 +85,7 @@ def test_reshape_convention(): rho = np.linspace(0, 1, 3) alpha = np.linspace(0, 2 * np.pi, 4) zeta = np.linspace(0, 6 * np.pi, 5) - grid = Grid.create_meshgrid(rho, alpha, zeta) + grid = Grid.create_meshgrid(rho, alpha, zeta, coordinates="raz") r, a, z = grid.nodes.T # functions of zeta should separate along first two axes # since those are contiguous, this should work @@ -96,7 +96,7 @@ def test_reshape_convention(): f = r.reshape(rho.size, -1) for i in range(1, f.shape[-1]): np.testing.assert_allclose(f[:, i - 1], f[:, i]) - # test final reshape of bounce integral result won't mix data + # test reshape=ing result won't mix data f = (a**2 + z).reshape(rho.size, alpha.size, zeta.size) for i in range(1, f.shape[0]): np.testing.assert_allclose(f[i - 1], f[i]) @@ -109,9 +109,10 @@ def test_reshape_convention(): err_msg = "The ordering conventions are required for correctness." assert "P, S, N" in inspect.getsource(bounce_points), err_msg - src = inspect.getsource(bounce_integral) - assert "S, knots.size" in src, err_msg - assert "pitch.shape[0], rho.size, alpha.size" in src, err_msg + assert "S, knots.size" in inspect.getsource(bounce_integral), err_msg + assert 'meshgrid(a, b, c, indexing="ij")' in inspect.getsource( + Grid.create_meshgrid + ), err_msg @pytest.mark.unit @@ -463,17 +464,22 @@ def integrand(B, pitch, Z): @pytest.mark.unit def test_bounce_integral_checks(): """Test that all the internal correctness checks pass for real example.""" + # Suppose we want to compute a bounce average of the function + # f(ℓ) = (1 − λ |B|) * g_zz, where g_zz is the squared norm of the + # toroidal basis vector on some set of field lines specified by (ρ, α) + # coordinates. This is defined as + # (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ) eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) - grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha, knots) + grid = rtz_grid(eq, rho, alpha, knots, coordinates="raz") grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) data = eq.compute(["iota"], grid=grid_fsa) - data = {"iota": grid_desc.copy_data_from_other(data["iota"], grid_fsa)} + data = {"iota": grid.copy_data_from_other(data["iota"], grid_fsa)} data = eq.compute( ["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], - grid=grid_desc, + grid=grid, override_grid=False, data=data, ) @@ -495,9 +501,26 @@ def denominator(B, pitch, Z): pitch = 1 / get_extrema(**spline) num = bounce_integrate(numerator, data["g_zz"], pitch) + # Can reduce memory usage by specifying by not batching. den = bounce_integrate(denominator, [], pitch, batch=False) - average = num / den - assert np.isfinite(average).any() + avg = num / den + assert np.isfinite(avg).any() + + # Sum all bounce integrals across field line + avg = np.nansum(avg, axis=-1) + # Group the data by field line. + avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) + # The bounce averages stored at index i, j + i, j = 0, 0 + print(avg[:, i, j]) + # are the bounce averages along the field line with nodes + # given in Clebsch-Type field-line coordinates ρ, α, ζ + raz_grid = grid.source_grid + nodes = raz_grid.nodes.reshape(rho.size, alpha.size, -1, 3) + print(nodes[i, j]) + # for the pitch values stored in + pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) + print(pitch[:, i, j]) @partial(np.vectorize, excluded={0}) @@ -583,79 +606,6 @@ def _elliptic_incomplete(k2): return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 -# kludge until GitHub issue #719 is resolved. -def _get_data(eq, rho, alpha, names_field_line, names_0d_or_1dr=None): - """Compute field line quantities on correct grid for test_drift(). - - Parameters - ---------- - eq : Equilibrium - Equilibrium to compute on. - rho : Array - Field line radial label. - alpha : Array - Field line poloidal label. - names_field_line : list - Field line quantities that will be computed on the returned field line grid. - Should not include 0d or 1dr quantities. - names_0d_or_1dr : list - Things to compute that are constant throughout volume or over flux surface. - - Returns - ------- - data : dict - Computed quantities. - grid_desc : Grid - Grid on which the returned quantities can be broadcast on. - zeta : Array - Zeta values along field line. - - """ - if names_0d_or_1dr is None: - names_0d_or_1dr = [] - p = "desc.equilibrium.equilibrium.Equilibrium" - # Gather dependencies of given quantities. - deps = ( - get_data_deps(names_field_line + names_0d_or_1dr, obj=p, has_axis=False) - + names_0d_or_1dr - ) - deps = list(set(deps)) - # Create grid with given flux surfaces. - grid1dr = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - # Compute dependencies on correct grids. - seed_data = eq.compute(deps, grid=grid1dr) - dep1dr = {dep for dep in deps if data_index[p][dep]["coordinates"] == "r"} - dep0d = {dep for dep in deps if data_index[p][dep]["coordinates"] == ""} - - # Make a set of nodes along a single fieldline. - iota = grid1dr.compress(seed_data["iota"]).item() - errorif(alpha != 0, NotImplementedError) - zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - # Make grid that can separate into field lines via a reshape operation, - # as expected by bounce_integral(). - grid_desc = desc_grid_from_field_line_coords(eq, rho, alpha=alpha, zeta=zeta) - - # Collect quantities that can be used as a seed to compute the - # field line quantities over the grid mapped from field line coordinates. - # (Single field line grid won't have enough poloidal resolution to - # compute these quantities accurately). - data0d = {key: val for key, val in seed_data.items() if key in dep0d} - data1d = { - key: grid_desc.copy_data_from_other(val, grid1dr) - for key, val in seed_data.items() - if key in dep1dr - } - data = data0d | data1d - # Compute field line quantities with precomputed dependencies. - for name in names_field_line: - if name in data: - del data[name] - data = eq.compute( - names=names_field_line, grid=grid_desc, data=data, override_grid=False - ) - return data, grid_desc, zeta - - @pytest.mark.unit @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) def test_drift(): @@ -664,12 +614,19 @@ def test_drift(): psi_boundary = eq.Psi / (2 * np.pi) psi = 0.25 * psi_boundary rho = np.sqrt(psi / psi_boundary) - assert np.isclose(rho, 0.5) + np.testing.assert_allclose(rho, 0.5) + + # Make a set of nodes along a single fieldline. + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + iota = grid_fsa.compress(data["iota"]).item() alpha = 0 - data, grid, zeta = _get_data( + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + grid = rtz_grid(eq, rho, alpha, zeta, coordinates="raz") + + data = compute_raz_data( eq, - rho, - alpha, + grid, [ "B^zeta", "|B|", @@ -680,9 +637,11 @@ def test_drift(): "grad(psi)", "|grad(psi)|", ], - ["shear", "a", "psi", "iota"], + names_0d=["a"], + names_1dr=["shear", "psi", "iota"], ) - assert np.allclose(data["psi"], psi) + np.testing.assert_allclose(data["psi"], psi) + np.testing.assert_allclose(data["iota"], iota) L_ref = data["a"] B_ref = 2 * np.abs(psi_boundary) / L_ref**2 @@ -703,7 +662,6 @@ def test_drift(): # is independent of normalization length scales, like "effective r/R0". epsilon = L_ref * rho # Aspect ratio of the flux surface. np.testing.assert_allclose(epsilon, 0.05) - iota = grid.compress(data["iota"]).item() theta_PEST = alpha + iota * zeta # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 B_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) @@ -765,6 +723,7 @@ def test_drift(): y = np.sqrt(2 * epsilon * pitch * B0) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) + drift_analytic_num = ( fudge_2 * alpha_MHD / B0**2 * I_1 - 0.5 @@ -775,16 +734,14 @@ def test_drift(): - (I_6 + I_7) ) ) / G0 - - drift_analytic_denom = I_0 / G0 - - drift_analytic = drift_analytic_num / drift_analytic_denom + drift_analytic_den = I_0 / G0 + drift_analytic = drift_analytic_num / drift_analytic_den def integrand_num(cvdrift, gbdrift, B, pitch, Z): g = jnp.sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - def integrand_denom(B, pitch, Z): + def integrand_den(B, pitch, Z): g = jnp.sqrt(1 - pitch * B) return 1 / g @@ -793,17 +750,15 @@ def integrand_denom(B, pitch, Z): f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], ) - - drift_numerical_denom = bounce_integrate( - integrand=integrand_denom, + drift_numerical_den = bounce_integrate( + integrand=integrand_den, f=[], pitch=pitch[:, np.newaxis], ) drift_numerical_num = np.squeeze(_filter_not_nan(drift_numerical_num)) - drift_numerical_denom = np.squeeze(_filter_not_nan(drift_numerical_denom)) - - drift_numerical = drift_numerical_num / drift_numerical_denom + drift_numerical_den = np.squeeze(_filter_not_nan(drift_numerical_den)) + drift_numerical = drift_numerical_num / drift_numerical_den msg = "There should be one bounce integral per pitch in this example." assert drift_numerical.size == drift_analytic.size, msg np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) From d5a00c88176a9807cb27db49c7206ddb3e480e6b Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 30 May 2024 16:24:56 -0500 Subject: [PATCH 168/241] =?UTF-8?q?Make=20sure=20d=E2=84=93=20parameterize?= =?UTF-8?q?s=20the=20distance=20along=20the=20field=20line=20in=20meters..?= =?UTF-8?q?.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit clean up code for final review --- desc/compute/_metric.py | 20 +++++++- desc/compute/bounce_integral.py | 42 ++++++++--------- tests/baseline/test_drift.png | Bin 20083 -> 18687 bytes tests/test_bounce_integral.py | 79 +++++++++++++------------------- 4 files changed, 70 insertions(+), 71 deletions(-) diff --git a/desc/compute/_metric.py b/desc/compute/_metric.py index 42dd5c8218..11bbf37959 100644 --- a/desc/compute/_metric.py +++ b/desc/compute/_metric.py @@ -1327,7 +1327,7 @@ def _g_sup_zz(params, transforms, profiles, data, **kwargs): label="g^{\\rho\\theta}", units="m^{-2}", units_long="inverse square meters", - description="Radial/Poloidal element of contravariant metric tensor", + description="Radial/Poloidal (ρ, θ) element of contravariant metric tensor", dim=1, params=[], transforms={}, @@ -1340,6 +1340,24 @@ def _g_sup_rt(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="g^pa", + label="g^{\\psi\\alpha}", + units="Wb \\cdot m^{-2}", + units_long="Webers per square meters", + description="Radial/Poloidal (ψ, α) element of contravariant metric tensor", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["grad(psi)", "grad(alpha)"], +) +def _g_sup_pa(params, transforms, profiles, data, **kwargs): + data["g^pa"] = dot(data["grad(psi)"], data["grad(alpha)"]) + return data + + @register_compute_fun( name="g^rz", label="g^{\\rho\\zeta}", diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d2393b89e6..fbadc844fe 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -823,11 +823,8 @@ def tanh_sinh(deg, m=10): This callable is the composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the items in ``f`` as arguments as well as the additional - keyword arguments: ``B``, ``pitch``, and ``Z``, where ``Z`` is the set of - quadrature points. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. - Note that any arrays baked into the callable method should broadcast - with ``Z``. + keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. f : list of Array, shape(P, S, knots.size, ) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral @@ -918,7 +915,7 @@ def _interpolatory_quadrature( shape = Z.shape Z_ps = Z.reshape(Z.shape[0], Z.shape[1], -1) f = [_interp1d_vec(Z_ps, knots, f_i, method=method).reshape(shape) for f_i in f] - B_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z, method=method).reshape(shape) + b_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z / B, method=method).reshape(shape) B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) pitch = jnp.expand_dims(pitch, axis=(2, 3) if Z.ndim == 4 else 2) # Assuming that the integrand is a well-behaved function of some interpolation @@ -929,17 +926,18 @@ def _interpolatory_quadrature( # between bounce points. Don't suppress inf as that indicates catastrophic # floating point error. inner_product = jnp.dot( - jnp.nan_to_num( - integrand(*f, B=B, pitch=pitch, Z=Z), posinf=jnp.inf, neginf=-jnp.inf - ) - / B_sup_z, + jnp.nan_to_num(integrand(*f, B=B, pitch=pitch), posinf=jnp.inf, neginf=-jnp.inf) + / b_sup_z, w, ) if check: - _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product) - # if plot: # noqa: E800 - # _plot(Z, B, id=r"$\vert B \vert$") # noqa: E800 - # _plot(Z, V, id="integrand") # noqa: E800 + _assert_finite_and_hairy(Z, f, b_sup_z, B, B_z_ra, inner_product) + if plot: + _plot(Z, B, id=r"$\vert B \vert$") + _plot(Z, b_sup_z, id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") + # Note to developer if debugging: consider plotting argument to + # inner_product to see how singular the integrand is before/after + # change of variables. return inner_product @@ -1088,7 +1086,8 @@ def group_data_by_field_line_and_pitch(g): method, method_B, check, - plot, + # Only developers doing debugging want to see these plots. + plot=False, ) else: f = list(f) @@ -1141,7 +1140,7 @@ def bounce_integral( """Returns a method to compute the bounce integral of any quantity. The bounce integral is defined as ∫ f(ℓ) dℓ, where - dℓ parameterizes the distance along the field line, + dℓ parameterizes the distance along the field line in meters, λ is a constant proportional to the magnetic moment over energy, |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, @@ -1158,8 +1157,8 @@ def bounce_integral( Notes ----- This function requires that the quantities ``B_sup_z``, ``B``, ``B_z_ra``, - and the quantities in ``f`` passed to the returned method - can be separated into field lines via ``.reshape(S, knots.size)``. + and the quantities in ``f`` passed to the returned method can be separated + into field lines via ``.reshape(S, knots.size)``. One way to satisfy this is to pass in quantities computed on the grid returned from the method ``desc.equilibrium.coords.rtz_grid``. See ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. @@ -1275,11 +1274,8 @@ def bounce_integrate(integrand, f, pitch, method="akima", batch=True): This callable is the composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the items in ``f`` as arguments as well as the additional - keyword arguments: ``B``, ``pitch``, and ``Z``, where ``Z`` is the set of - quadrature points. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f, B=B, pitch=pitch, Z=Z)``. - Note that any arrays baked into the callable method should broadcast - with ``Z``. + keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. f : list of Array, shape(..., S, knots.size) Arguments to the callable ``integrand``. These should be the functions in the integrand of the bounce integral diff --git a/tests/baseline/test_drift.png b/tests/baseline/test_drift.png index 32530da61ecf07bb791476984af29434b5ea1b32..01b7a0133a601b4365129023eab2c84e70ef4bb8 100644 GIT binary patch literal 18687 zcmeIabyU>d_b)CgAgwZ#lu8aABHa>0OM|q8bk|5pDu@WuNQ!`TN`ruibV`RvgY?jK z&wKpb-~IjW_x^MLx$F1Geb#yw&wA#}IlIr^uf5N_QB#p4z`cii?bPIiuNcGhP1JuF;YtsNbBIRrQ! zvE6^+=H}!o#K~#@|198ebg|;3WuS8ho8UOf>$zULMq~>8i;*XuYklpSydC0+q^8%$ z%^A=4nihXNcZb{FKYen$;^DGH@&l{eH|gbnJV3x;H(p~r84IS6dAo=&O?Wq!S+M{8 zlb(#%7~J>esS=N(W@kJvY9%pG50%nYe~mi0|I+p?Ko)qs^G!bs9vU7Vj;2S%WZ!y? zhYW!JW85@AK879-^YF(tXc>$f>ov6KHMa!#Lof3d<01G1H^aXT z{vhr=|G!`TKO?5Z;|%Q8tEqZV&i&=yPimP`i#$l9d+#p~*12zFSOMj(`I8HKuodZ7;UZW?*2z6qURR`G21zm!!ok z3jSHWgR7{l%)reZ9UBWHN`*}B!IhD)_vQ8fU)UWpBel@YwKd6-_rPI9v0j_??JhVC zD?4pBI{kSvU<+KtECM4d@dE>1r2|1pGti8LzWzW2^#A3225w~EcU!w|m4;(K`R($8 z%TL6-p9aMOysM|*SOQiN6Jv7fmdJ~J?@@rUKLHZBpZc;@05hX+Y@doTTFq}PT@-K+(~ShNB;oAHsKp##mqfit?tvhh{MKXjUX zWKK+%PEZ}HwewiIWe?A_3vo$Wn_ojFHNpc(4o}zls70+E&A@@T6c%UIHb%*8AVBrN zdq3142cb;xZ%eZP<{ZQLZ60k8UBtXk$QPkbK=3kA2I10jeh*4Xm#NAx!&%FVruE%7@E zA9M6e?fkH?Hi82jV~*IRgIba^jvmofgMWL?I9TcdP0=1RS(^8Q>i)Xak|Y1#;2dId zF%hzpy_QtZ)+q7THX>e`UUu1-0uq`XF$YU!mfN`E`x-0Mokg*41bTkv6sRUR`%1y7NX^TAB<)C`>&33 z`&dD7_fp`-lT%IwBej@c)H=iupVXQz={CO-mXVF5&r};s7f+p9c$UdINWn;_oWt?S#pWF=?c(5y{!oQDlg-Ht9R&~om;kvE z`qKk}nD(TlyHk;xR7TOyYn#m1)N2m)Yj$OIOYfttd=O|sMQTZrwIaWbg^)^A0!Zul z&rinX{xBA;3V z0YPPjVLVK|zOp13#jqK#R;8x39$C_nS0qirhr88Cw$(tK=5>8&_#g8PAVA9}$)wqN zC(fD)9W}TTrCPaLuJj9guYJyC*5&%%M$|Uj$YpZvd(s6^S185(Avlbdte=v~&XGPO7jy&Bv9$Wb6b>`4I z4~t7yy%}e|gq8Y_`E&*4yd7>pS(T6S)G)p}5d7PZ{J#3lj~~vcNUIE=%oMB-nX409 z`d;C(YG-Sd-q$R~XVt#hs0JwH!M_cca0P1FbiD0o?-k*({n%`;=f+48UgW)&flNuxSbBs|qYps| zEMSV*aV4f9VZg`5)j^@Srzf)luDf$x$TL**WrnBYt&l({Cb+Gi;Jt>upVS$#lnEp8 z@!xsb6~!!_&}={dhABgpw&uu4!1*ECR&vuY9%AOL*2?HjS+lP!3WbH!v8dH8)#Fp$ za%{jrAeM4j(z1ZmiVP(M>|O)d zXqLo(ti&^}DMEO8x}|ISXn*T`!EXANRzYj^92T1I5ukErD6p1{_=V`$HY;55T~O>O zYb-Y&vA#T%IN96jWctUrH`Z&I#TJJvzvJBLV?42E-%>a7xqRox!0qzx{lP|%5fk9p z#fQKVwm$d?8{`ESy;cYWf*xGMmGaTrNBMPytEGX+g^j_*8J@Q(++jwW5$aVeN=bLc zD8a3(QnI*c9`oUY|Nev+kyToy8WQ|7KnIIYPF658dkBZa>FMcjipo7TIpVDUl5UW$ z>USRe^ni(HZ$2eSm@JeaXvzT(nM#i+>u4TRNsBT;JpJ3s%Xz*+S)Qz1&>z`ooGXp} zVvI2DCEcDOaD~_e5 z*if?M14y~Hf$`p`Yx;dqxY6*gyRYtTy*kT&HI9aW)3vU+prU7Jk)zTn?yF;Iad}FT z&V5ak9eb2@mTyVlc!H1FhsafD&@HE5bqB6}1uV?MMDUL*SMja@O~D>{f6;cEd*f8J z?&+;6o0jiy{v9bg7sz1Yl&SW*qd=@r>!c*Bt%oqyZdZ_?FTJm*n$+aTp=P@&gAByW z3xAL-S=?ohs5IZTk2pE*h>}-klg^!;TALt7q5_DS0wIHdSq8`_$)7}YR}}9Hx%^l$ z-%DCrJ>2ub!Vh@n7}6GoX2Be+QOT^pEz4hE=`uD*dv)fxeAwd1ZL57;{GQeyAJS^L zJJ#!1oYbTXrCAkZO1;8dd&Z2X`gZTj@vowh;C6`MM4xAt1lUvuw{U~{8Odz^8R@WR zW2d|ChV#y`7?7EA%D+`{V3muZqQ}zA==R=azisDe5!Fe_`CD)uT%Knn01wgyL~&=x z&fkL~7HWXIsrXqadEAKcgT9y{)PH_6u5(wLGYoF>f^w}P2k@5 zL5>N;4%Y^Y@H^94#rO65KN;?Nu=S3;3Re~TwxCLttHC7T!G>N%3T@RH&p;WpPLSna zbXln2pY*4n-+UzaVx)xV>B?G z>{H1a$bE7{F&?gYUOEe0y!8naSZEr0YgG3(#WB?PEYOG%WDY_%o(wg+&j^l{eI3?U zFjBwiP#+x^7OMW`?%zogGy&0o#LG|q(;sIAGl|1&JS%MU(xa9Mk-7%%? z*9RQiSs&S^^<_#+asoG|cm-tb3@ zPd+SH(q8bNinz4$n*)=d;{qq}fGmu@c1<9y_!2*p#n*dznDLQqV>;xiKr>k(&?dMc zQX&Rl_<0>u5q%qE+!ufMVoS~h=lp3Qk-!K*(Vi=FQLs^wxp9x94#kzCkdU>?9^oq; zPmkaMM~IE3x>BcX=_|IkzVUgs&v4LcI4BKJc^$Z&P*Go>_u~lRzNNnJF9i%X zX&jJ^yp9BdcTsF$``ds`YceRMAuzJky@(DDkDBkR`n7sEP~G%EF(#WF>=2$vh*Mow zzQEJhV9w;26PGPJML;D&4i!N#1hC83EH9lYz8;N#i(4`L`7(sXS-QK-XJ2`s0Jujs z(CEXhKwb_L#i-9CoYR5S43kd3439pPbF;7N0hhiG_`2mK^LJP?a7m$#pV8Hy&v|6<}C}U*CeGIh2z|uY*BzF>ZT9 z{bYVcUU8oUNWj7zh%it4_uB@aOVreK>?fk`ilgP2H1^}e4r8AD~^N%_Kv+HaU$saQ#^1!h3uSmWRj65fpun(CWTH6Mg`eTQBaCsKq|uj zYh4}c^Az;Cj4&WBlmxdWaaP&8X#CzG?Pc5_ z$9VJUxl*Q-nV6Wr&1167 zzwIgMsI0$=>;g_GiGd0ryeoinbv^16=fq1V^h>sRnNz;i;aT%{&BdqTXgI?P_?bVn zLbKc|Os0f?`O?RB;%xYi1G!o_JY@w-R1xr>3J4g+3HjZQUHn`<^5Elf*4^Hmv4y52 zRp*BnPX>ko8Q!~+L<8Fg08nZ0-oIR~$j+vZiHW(cPj8{rdv9@RvfE1SWH5J?_LOp? zS;5q!O{SYM2{E6#h6X}O31n*_!NI{9 zEAmM{h{PqTI4UNP7Pfu8z8u@`NiCq5p#&zbjeA!zC?Qei}I>iJ%5KP6%jI-9>EPtB$x)$ zt-Gz2gSfNyTn4Mq|5aDeM(NS-Sx8hcUSKBNxyXat+eA(2Qr%>w$PU?JTD6!FM|H^1 z)RjQUYR_&BG41YIpFY;Zed+O0I;n+(yw+M1QbrdF`u@gSUX$5uMX$WyE<@_ihWspr z9n!s-q7N(fJDOW)IsyUac_sJNj?2wSq-1XIy0Zo& zulUF`VfPmQ#g5Fz;e*P^ri>0p5lGbV$!%7V{Zksg2LnVi+&IrizKs7K(=dIaFr z%ZPj}(}h6W8k-3rLdOD`4+!Pes!}p*rlG(zMua+m=H!R2)bef@8hT(6L@5V)UwE0y zh&$YKB?f2>F?t&+Xq$lPTGPGk+rArz^u8kxwSCJY@7^HR-T#%GkqfefOb{qh*dbB_ zxkL>YHY9(->=$!W6V$z*Sj-w+<&=Oxw?+g4-I{8?G3_X4Qve=WgB=cE) zpx0j$v?U_}@_T_fW`wsfX8!3%8AfoT~?&>_XFc! ztcamz;CDmtfkFzvH{%JZ(u1*ZvbE}U2Ag+ZI;*jp(AMx2SY)^c#+B53f2zYaB0qAEgg{G=6t}7bOC&I*^AASr27l78*szO&1u^_fLQ6 zxp+bIIW+dqh3Gcg^1cBXyWJyU#o8E?u5E}Ed~`qLLfj&^XMpGJ49{aoHki<1!)yS$ zs(p_wWYEd4T9o;FHU|Pkge~VmGpDfzvQAmy;!sHRmjQ^Se(En{2t2B{<| zFcHi`Q)%65LKk1-5a|-T#lNV6Jh6Ns&O)D8#A{6G&EPad#SWT)V1%`Km=qLuG? zI%pR&Lx={Qq@?|XP#OdK;O4V{7`SWo5d+{D6{;N-IorIk<6;p@#)bDyovu?h&EAHT zqk;oWE=!*IW+Sqte!DdRZzI~ufTdP247ZJSXZe&Lp{h!R%%VpyLH5F|S;3f{Ezx2Z za$MQ(!m`qo>fl;G=%TN9NIO9b?RLon?Kbi`p*5UjpxoPoboS^T-;uJ(VMhg=FB`VK z?JW*5dcgn~l_~ouXgbbCE^s@(VK$JJ^z&_@=*S^y1zwxsJV>^vTu8P{QV&*#FylGH z8fgvNiTX=2^?NyBMBVYPae#&)IF7n4HJ_;oQEM@ORH62^@X$<{0z^u||GZ}g15ZhwHUHudXx9+lVTA~a2){L9z zAPUq4$<3}$n23}km_{_?>c$(GUXsEuMVkSDGHv;;7`>wrzlVB)P{#);Ih;5=DvZoH z_=|?=uCVvS<)S$a$K|~n8d&PiUNRo%m2|6bKu!voCgW%gx83-)iSkgd&r60PE|&_2 zG}DT2BpDK$e31~l+&>}Ww(<*1dXt^ro)>8#^8I5J4y5>Ah;$ne7D$f! zzNZ7Z=)GAa-Vc1ih>}y)K!8=e!J9aXWvc5b94raDhEnhzpE`F>7FLS;tF!pjEH=;&l9{)xovSSzyx%l5J8V zbRe_8U!o{w8WQ*PptrkUnrTmJsha$&Gx<_z+`+EMkCA z@SqMtENI~yKp;Iw!|mCZL;wgy&Us>medyS;iF^QpUp$X=_fE?Z?M-MQ0$O;j+%m-P z!esK1Zq5Dg1)DzmU+q%Ce=_Kp-M}aNSKR7ugpZIh4=FrO0gJVrNh9m zhWvl!5EMScwzeK9Jk>%8Jm*U#7IGaE_8fF`(E!?Ug=9|g8p!-Ky||fXv?L6R;P+Q; zK>@Kdz8Fdsi`{qaFk0P@e8xp+nFoOc2;ncECIVo302a1|wM9_hl^J`^o7Wi1nVitP-^C8_BIM+(vI11L;7@7JVE>~Ex@*Xn{k66&QW z5_R8kwxRii4Gw>kot+&(iWqW?W5$5P2fh$La*T=Fnn{1IonYgZ_$$=xMg;fQL0tzn zpXUR-3~Sl+OiUj>emqaUWB5c>ITwW_BR7U6^#=>2sKO!}`Maj+6d6dJCG%Mvpz8n~E{0gb*pGqnT0{hQAXx zs#rd%RyXXgK5OLcFYMg@`=(_DDseLjgYMG55_ z35Jja*QRnIYlJaCN`Xgn(ki7Bg7(x6EQu0r60fu6NKJOlxE~+xxsFZO3w-ntSWSw_ z4gi*$t-%Njq!Be~$2~r^&C@=#n_T+X1;OhBBbl)j`k|Y1jzq{Ddbk8cKrE{U7O--K zxZm;5r2ZzlF|kgSER63ykEK$NxkD-UTUd=A{utVH_zy@Ze&BnpF?cPJ)D&1h%^B5K z9U?%udDY)2%BlL3K-YXzR3ipv3hi16YJ;XT6M)Uq>5(X`w4{Fm0zCvn7hJYmytiBGYdA_69{P*&j@{m$3fjxQ>xGRVw&8NAg{BIFa_>xgkuv`(0gN_ z_V?=UC+PH4EWi)^Ht2PYe3|bk!y*Wj`=1vy8y@JuMePSRk8h3>@F4)m*svUWW6d6fRn>&f`fs8+&5Z_s#||E z-PN05jAI;ooVoe-E69$12up*A1$ocY`5s==b1!n9o3c@%l&q;O(mGM*cAv_6&Yc)> zg6@Jwm@gzxCa4IIiB!8K-`K>v=~gCl}i@ zA=ch~@$SIKc;jEKC}proE&4iKA)%grfP|`P`-{a#LR6K8`5CELqAOiWon1~(h(%t4 zLDb9H+ORh3Fv#{#gP^q|^?O}A1;~20S1-3-aoG2nY8L4==$~m}<>Kdn{L${p?zo&&kA(r5F|o%WD}4e9Q9C&w{f z?@vFNfAGJ+^-*)y-qxo^DIvo3Aj23I+*cwOCY5~`-1(vHsf)e7;$!!T?sD65>|In`WmTTrCUUC+z+ z+1)3TRE^6{Gy}fQQ&mtB`m12RPdM|X=LOn~GEHOWnT4>UJjFj7{R)KrQH+ zT0JL0R2a2Q>!&d~Sv4;#{mxH6y4R3j<1Bn~TIfJ~viC*DCEZz28Ve!`^bHW<6z&_L z+xWFxLU~Vb7!QihGm$?LpLnYAAqHixOw=iA!vGYlf{h8FtJ3a3_P2>2=XRYtjJ=7ou?NeYt z(p{R6DJgZQko}0no1@^dz@?{`DBfnRsNJx1s5@uWSF)bRa7PG*K+Ag0I#M4{@`nax z`*@R|BE^32C-3y%SZ~KBvFg>a4Fm$9fCMl!XXwpPs87L)yUep2dgd))UvN;`}w@2Ge@r6XQsnP5G}?JhVr z<}xyw&$OGA+3?=_ub{K-ZiVzc)w0E}3|9@3zO84(kR?*QgU)SNkhLTPB|KKC8Qw;~bFksZ7_ zgXESz0zrRnL{!*}sk4=lt zLn_&r!0)pl%?{DVvXV8NF!m&3bSD6=w#U6FAp0s~i5dh$5iC)8JuBkAX!F$v%0>7O zu~!EZ7XhFLW=J0T&qqu)52CEncSvY1YK zTVp{_oUCLrYqih(xRhMclY;;R3mF37|Jfg66K{R_@;>$^wXoFwveR$~`}njwZ^%AL z&Sahq%4j_0Q%4jQVD-8ckc=BH$PJ~#S{FM@Upzf7(o1)22$r!Y+wF<$A8>RR?;>LD zmwPFW3pU4ulD{>W%_QuxV`w*L*!r*eXfeH2y&N)Ip<>gk!HZaO%|J;?EN=-z^&XHQ zBZS&Hngq^Se-5ikH~Y=sShu=NmS5leQpr9VU+h{pF7I%F5rgLD+j?7 z3P$Iy#{s9`%WAy~wj?Yo6I=2P3z&sUnZb~Y17rYMkWf@Used+EkbN=u=ZaRX#Al1$ zZmllFY=!hwUyac8SzjZSkOwm)!8?$Ga`iyoXh*J;d=rl>lqk5SQY61OdU^jQ(m!C%DkZDd)4Lf@a6cs5<) zzo$zY3~RBF`-ESQycYs2){}aq5a(!)HtXPkdScvy?!;yB6UaoM0R10_PzeVPp&kn? zHj$sm2#I6lPpyy;`f1Lnvx|^*&9IC=iqjW8ys(f<4aKi!D0hN~e{XN`J!+|Qe;<#< zpxFA2zhck4Dpz;X0bYN!oiqN;?Cpd|C{(DdC=8{8&S?xRB%CCOfHFgasb+;H?-^O~ zPwi-B!^eX=$)T00gYns45a({IxsYhmp(_CP*=0F+%qU3E^*#fGkA&|@Yr|WIN69$K zj9l9l-Y`ntw+zAwH-V8~azQS^?781t>dqPqzj3h9M(rta(aJtP_TKn(g ztwdMsYNeVG(dI22;IJ1^Tvf-4Wv3K5qGB_`PxregmLX=V&w=g~f)1T+lfLv|b@*N% z(>5e3KY;hq!*3Mm^Z5iJi}al~iUz*2YYH}tJ=+?WYCXZ37P++CN_)v4_Kq5g2N;lJ zQ5ZG(y#I**Nv`cJ#k}wnu_+%KSN*>EDxnf?w*@r?LU`x0BR`Z+K`zS^q;_kv@>Orj zd@Ek2i^cKOoRk~fnT%4%@xB;G+K{y3{!o5`K(yb()!G;{s2EpgbeXW2*FlNx-V`}w z(=5C-acJ0Ir?OE~kD8fo{_STIS=5e(uC>(PLJ_CS{LjLCw4BK=mg8MH=BL+xyzS|W z;5#eJ=)Dg4*WfFeBnW}fU}(P&S%p%*oAr3jotT&mS1XXbho|cW6?xiy?)t0+;Si~F zTRpEnVS}3y9tt^_g_-x>xAzaY?C-G|6Uu~t3tc)%n)G3j-XO9#KU+YM$9_AV*#I#f zE(@)050H&)<G;h8&}?ic6#Uv>@Arp! z&c4zydLI<+G+WRkG5wHX@vbIC4o|T<}Yxd-4W>BGakyA%m}TH5+DzQQyc*q^&BCIIK^GKchJrumL)XRjNAHs)yF#mKnsaX0OH$?{Y6qBe)&+<7;F z;SfWlK*hWI+X6k}uXe9&er4$Kt`0nS($;DZU1pfBWZ8dq!bNIPh<}kpoe{c;>B0CO zT*KseXQ!ErlW@*gH*sk*VPAep&dY3#Sd&^WE>(ORADN$x4;sbvh-(1YX%e-Yyore? z^;u!ysMCw)%dtTBB>O^vaZ?ruBlM2SgwSdz=pdS%PnPBt@o2qorXg9V9;m|C+3xyu?SGN$twy&Pny3EU!CqE7$q;3rxacs)tOR01CGcB^BZvBMelw)H9~&6wdI_6r;GJ*uCG{e9HF@szXDO5Sme8IX`w_h zC^sqxxzXQqZzibB-22)I%0bu?q!Uz_q1zocgIk;p*Gigj|3`jgLt2d3hxe`tRM4UO zEBV>X0D=QxCj$o|jn}o&;*d{r54i9AAnNZ3W+bK+;Gef~QyO~Xg!)t*c zQB;_U4tN5SZLyhFhn5@XR4O*9h{R@D)rsX5Kd?SDEZWK}=$7 z0#(y96>(WNB6yBI)Iw}xLTtJze0<5#lT30cV?NFW>P}pmyw3RIXUwjq6zlIw6puXX z(MYHU115kF!M(1;eA9Cl2M&MFXM0Q*%^lQdY8j!2pC=kn{kTCUDbwQEX*&!edN70BHO~1;|`V zO)$Vf$uZVJs%>~h=02AG>_Ci4g;4Y(zBlPaS5&|c&;UgCaK3CL$QQy%GN01bY;R3B zxGdaee4P5TKD`OTK6M*y-9Z^D1NjU0EF*fpQk6yN0uY0q<_fhWi zoj5O#lL{(vXAy9>tMEdU$UI@bSv^Pi<;(n;bXR-`TpF(|dy|JIf9JL@=}{&~FlH&h zln)e-!}Dy5UxCtS_T~I5?2Or7y)}=`VK%$^_x5v+pMbwKoUPRZVY?+mF$(?%6vF_~ zJ=r&%`U*(Dc$N>94$yS0(8Bu8w{vU(j^u0)ea4n+(!85*CTJ(4FxM%%l;g z#}_t-3vpE?Y5)QjyIR`^kOaEgh=Jt+1_8>m!A>rCS0N$%lk8Sn;d*H}E#U=}izHUs zs{>)ALf91o*MRw^Vf`h@hqN>lqK5?IgmgwS+ha-HQfK0%I|xBmJ_JoufI|PcgJ9ir z8e^tjx|`9PLJgkOa)8Bav(Max&)ydHQXC(dll=h-GTC`Cbl44hxIIZXC?A9J`T@s9 zChvw&5#8pKdyt~5K+}|$;X(#2F$MlPDp27Rl8@wLellWf?V1`L{&PxNnKJRtgLiUw zV-R%u;cKe@C=l8f@mir?9V0UGC9%=$R4%a%1R<-j5I`VGt%*On5`vqve)+4pVJ^YCytS;uJqm#9Ux9%ga4kb^<#ybf zD?cz0SXbQ?nshKaR4%VL*>9&>Pq8Tg^)JEMwCJ--0rqT_9&)nZR`Qo!hli}!72iU$ z?WxkG36Di+{5-nXqA}Er7C@-ZCoH>Jd%xLtaC*+%j0T85C!#~8(pq7K$)6{n4;4xjCQKP`E>)%3WuO}h`M~!WCUioQq}~JF z8|pbExRMDRr4H0oqPYC=;wj7472f5OA4JMfI3PZC6Jh(`Lgv6UpimaOMuM(6D=Y_^ z45`wie1emdSIYd3kO00bo!=k;(7PF!4g=S%6evzy%#V$UnWRdV`I6M^>7Wtm@HSQ+ zHg{c!OteiWx=h*68%s+)c%imrCiH_H8nSas{`IEM(R0~paz2Ae0gXz;nDx1jh4hDj z5oSXn%axtjR}EUW0?)s`-PLLwvW8Oa>_kxA;#1>-q;0WtN5A9nZ9!A`w*N&MXh^Zm zmI*ws%}U)i?3wWIm_g*F+}s1XaoO+b%zy!GYaGh#A9BPvC8k-Nal* znYEj=Vdd!&BP&^?ia19jO3f7Wu?{)f@f4{GF ztIHIgS#i&8o1$*u1at6^smN%Q74p;fR_r?(4lXL zkO6)ra#1vky(vfh>EO`~^rU70QsA>%m-orI2*i$0!RYkF(Y>5)$0tzB-g`qy!BCfF z^^Fh#nD<4fs9g41ogHrCy-;4hqj2=#$A7H{=r3qHqS`)1wN~^?ZRX7FNk_`s&D6h{ zCkXwZ`Q2ZQ9Pj+)=HkgMFjxdI3I;rpeSCmX6p(%Fh`N~jw2avcHI||{{9@6qVrCgp zNI`3=bxZL(&&pjax?8g9zj%D{3r!l6eYB(C}7%~Ph?psRii?g<=i*?t{e67EtJ9MAJVi}h5l&`1`g}Thp-?^n;k7Et%4>52X_4b)w z5e+vlQ3V4WUViQSvmZGw@X)lwYF8iX*1`1o`az@f1KfA@4w8LcW<@Vm(BoVe+Ud7U zFc@XZReMJT3VeFTAA^l&8V19QEV3Q$=3|`oiPLsGR+i$tz^F6MF(GNd@Io@4@#|LJ z^3$CT^>LlEI7qMs6MMmsxwuW9FBts93@iBX_yvRU)iE!KX{N)~DPWoe6P*bPMRL^M z)GiD_S`=GtSJ)!v{KevAHO) zU^x3V+H@ZRa})D5`H?O1mzF1p*R%37fa<|8DR)P;2^b5L8DOvewjQ@TQXn2nC&Q6V z);Qki&wpWiz@%sp4@8`s+6>&2UPd?iVX2?t>nw2*#CiPCS>*ZHJFTp5DE|?7l^Ca)71WiycymIWNhh+PnpIA}m z36%?hJ|vhDahrthCIkj<2pIKGoG#(X98?@XZBK}$)qi7n<^d*kgxJyk!#q$4XG_ry z;5D7lBG4+h%<*g0&WlekhsMqi5C9s&`U*uqs8k)sWWJW>f06 zQVvF^a?y%|g_K(aIk)Za{l?sOLG2HXkn5#*itF758)Tye@stG?xRT!0RA-_lLh~ZFf2z`V^XI08Q(U z(RBP%G-++#f*cERv^Fj)pJ=1gz`MG&BebrS9vb&mjGz7%!A*M_Q0-ULVo0>jBL#E+ z_aW4Hv-?{X;A3Sm-)n5+x-BhE@gST9Ek;Jy77DV`(?0J-u_Ixlsi}Z1A}a7rzJK3b zUB9kVQQBr#Zj{_wU|f|g*S{e9Itv}eV-azUd2GWmjCVXY*|2xNoU4+yZNJ0%&b*mH7nQl=ojVz>{9r3!o7$|_*qFEw$QQ2}$jC&OEskUix zC=D?^dmu2J17?`Z(1sJvJx11$hEQfIW9<+&@Lma`XK0CGkStLzgtDg?GqoaCn?W#h zF4lL&qr;c>4FrW8W$-cEzmG>|K5yq}MQi)`Ny~AljLU9L?o3#Lk1ZkSkiI=njrn^9 zGN|VKZ@1udDR=Gu_9t!j}r!n&F;M<^6elIo!Z@Kaa$h^fnG`` z@qiI);1oX$oexPMTsf3cQs}LQ8=Dp219jSlsE(>0#KKsMj6G zW`af}VDLYmUQ5e6y~%InL095C6V7Bj392bBFQxqg)I@TiJ2nZu;xreOL>)WNLU~s{2pY_sDm&ESfGXU6D$ppW2OVvP97pQ& zX=W7M7cymz_QyA-YEbGg2Cn%5_}rokA5sJT1;Buev%yQgX|ZMo;#`{L-1H1BR_H;J zMc3-T6)Js&f)sOJBD7lZxyZO}9dCpX)%SL7>Qy#B=0uuiORSz*J+V+q? z4Fl`;c9?gIw?D7nF8W)yWjRzY9tL zLM0F8uu`E(eQ41zr2IuphP6ip#0iGZu@9u73>JE?(e>gVOR9Q*%DF0A)Wk05pQk3mJ1QJ-7%uuS z|M5>+G@l3bLRLizcm-WkOl9WdkKf9 zJ&QeCo{>W$g;r9btO-0cwNe2mLBj)pGYg=nff7q`?JfPz%FW?xFQ!Ose&N!}Uo9mB zV6&{F-9QM=$612dWVP7`g9@M~XU@D0nq=Zv_)xGClA%PaiK)oA*AII98uS?d`tKpd zdBE6%ayz|m3;()E^r@8_ct)z-581t)e??FU`{4%&Zw2sJ{7!rM`*;_eW%KVW2FM=j z{U&7g&N{CF7@xHy*inv_`{B5KI1LoWV^rQeTp`Vz)jII6<*r#u{Ig(_V)ugt52|FY!96=48cA2^r zTPPtauFly!w{~p3bCq_h8p_c3!VsK?ZeZq4|zJF=le zpj%f}O%rCqa$_u*6WQtSzueWIjifYf4gUQVuKSPI+CshZp$Y?4#s=|M0Gd2sSy}38 z>E(x-zHf+IHZ`2kT1+LSCKhtBJmdw2e$1bKw048m=NhjS)+41?XwP@kU+hYbARVhV z`5Rw$)sT2Q!^b;MpCrxi-sNSEQTuJQ0&|t8%@%2BL&_s0%pS(=~F|5(5X!-YVD93QR_Fj;oly)ths37&#q zDV9OdKA1xNRrkUcJmmcb=DN<8_mKw~@^}0(-|+oowV?F`R|C9UeLKOk%%9%T-U43O zHh7nsN=q!joc}GU|0*-Zb5(KTQ*o6t=QCSKVc>0XY;?OFJb(sKDc7)KBPE)TgLR?$ z-Bs@jx}C@6D+JIpu*1{;>`>wIQ6{K>@_dV%Enu{$Ey^s_*W+5-e-3zBN`HkV0qv9K-+=+4|9XA^2m?-_0h-Z2>@F`#?uS#OH+}ffHolO4=Kc5aqUQ+V zXi^jvCi;R^{|@yU9f^Fp(X$s>0cZmcPyD`5EXaRU%XaGXbU@=DG>EtZp0VWlSX;#V zl|5R8GPSx^hElGqwpraSolH8k&cphsL!0cG)9 zDDFb&O&y4USt8hXr_cr>V-TlUQ0j4{Pe?5cL&0RA?gkiMEO4^bNrK7KH@uJt}#8v^l|Evm|Jn9_SPZ#(2=L!o#6%20h|c z9-uaCn6Sd(s4gRyIS~C_%&hU+_Ma)DB~hmB|9+6<74*b2JVEo3gxNp4`leZ1iCf@N z^aqS8f)?IA6#fOapZH++HY4(6<_Uju z`YI}_dH8bIGv%`3zrW^v>@Y2$U+qAR!iS!WN#K72WL1hD literal 20083 zcmeIaXHZm4)Gmr3qC^Fe45H*LIfLW@lq5NWfC!Rv6iFhIh9o%$C4+)wVN@jNBpD@V z7%~H=_xOF^J?GwYs&3ss_xw0-)mv6Ey?3u(y?TXbJ-s8;pDGgIQsQD@U=YBRs}p10R<# z7eB}S7jAA&uAghHKY4 zy~AGoB$YRom%G-|aLDhZ&3t6%?RmbOh zWdDwoU7;^s1d;g=?9kjqog0m3!qp|pdpX6XJu_!+VTXBZ-!tChv=KIS zfy#eE7jQyPBj#~&)mpdK&nG<;YwFP0aIzQ~5gCc+9TmVHczJPYGQ<-oWt-fZ_PKlR z$?xSI=2r(Er>%YA1B}3uuzzAehIW@6OAazKGyL0YM9NA^H+|n+;{6X#DN<-N2!Q`K z@4Qof`jkmhGBZD)flf4@0j3;$asGR5Md*J6Q_NMhos(30Lf{(;9U05lEW;u$*D3j5 zxA@aDGHwch6{bX9%T;AoxwkrT$HebWgGVSL9AN?!kl_72OhOu0wm&nLo6g9UU~`t> z&(Ft4o8}eZp{|pQ8*(W+GMK8F_dKb(R^PCdic7ACdot zI)kA|+!eN-rl$wRz9z=0@9o-4HcSfCb5?`bmbw+^q6J*7C)j~y@!r6a!*sFg{bBfO zF4wT$!Ogo7kBHOft+=;l$|J1uub8OVnl51ZIYCp$zR+Ib>7u7t$|QoQ{1x zYus0KOkQ3PlQ91rY8G=_y)D6<@3~W%42?Iv@fJz_&y)9Hbl|%{e|SJgBAH2NyB%5eQW57f3XRbIfaxA%&$dYJcD{b; zI)_0%SztEE4vPCjfp-F}#k(6hnFX1W*dBhW+%HKq3Y2KN6n#&FS7y+5i9+w3vJ^O5 zfhS9)vBaUu>NajPg#qG;M_UV9b>@B4U-y+KzOZj5Lb3*5dw{wY>OyL_QY~Zw-`-L$lOTfbGM4bi;aP%7P64Jc+q`eSK z)Q&a@4=~ttpu$CpsW$UhLQLU+nLQS6=IKL~WSO=%>UlRxjVO*}fP4Ggi4k|1o0Rvov`Xup#>S{Y&y;8st5)_a!x!U( zH;4#*bHM5T4FV#wblkN3hdFOaf=Hpi>$i0I?6*|j(-Qcf%VbG9U7=!ADUXzakh?#? z(e7e_jPd&zNxgSp@APgTf#2ouiufUthBZ7q)rAC9CWHz}fG2lMC+YtiNyiol3_`X-HSMsDyz9_Gat)L^;J zD`3usjhCr>egs+8a$2mABl=XlS?Rq$1RL7SwdQE@puf7<%X&tuk)mQ3&{L{kX@Nab zhBp^Tht}*o*a6tHg;Lzo%@+8kezlgB9H~X0;rI5?N_X~W6@-ShA-Qc8R z1?14-1*iSvm4UzHuYFOKA(E!2Qedtu(qu=_^N2bi5evb_tGt zHACV9`E2Vadesc*jcVJacVoRjS^eDdS&hbBZuNBEd(#LTseqs9-iQ~&yi|u9h>%-O zquOYj7Oe-45m%jocwyVXldtwvk64It$l5^m=ab90pHVs9uH=&9=(jQ_&%IWnUsZ3w zTKOv#zD$y;b$s|qm4-L?fqY+)BGE)G!S+|I2XSb3kl_`8^&m9gdN7q^r#2nI8varA z6ETg0=}*)#BEwT=t|b;uj9wB+KSrF)f0#<2d?@$g{v&tRK?`x&ux~2dk7Y2ag>U~G z*A2$$2@0!7abH>Ea3meSA-%=G zT0ZH7s!0(*W;!%y^IFMPwPT1TW!#oFt@c%J_fS`p zM_K~|_gDG@NBW70iIqSIwvBtEgPLB4_W$P}%HjQtuJ@_C_hJq|$#4DUvDHLQ-iE>5 zTo{>{;2%DCjr4n*IW5oKgFSfy!e2-(JU%`mCWe`nHR9{nuazq-1SigOF`rW>-u*D; zSpE@K!Gp@_>q)=$P(ga74-$g)U~k7RFssfCW~&#Jj{RZO{9Zd-XT9)}5`J3s!V`5 zMAmzzD*|2h+Acy$!H`=d|3-0O$#r3rbNX_+iuEo9Z&96`b#)mHE|C2G68tzAof5e5 zgCX}{-Ye_u+Gg)eWlWN1WFK^x7r%l3`Vs|YDek~qYJkat?sDs;FU=%rt(7XP_RPlC zsu3>KuZXjQ*j_*besBBRAD6YZRWw{AL&vna_9TzY!D1bW;D3Q3<`L5T6eJZMVAQoA zKfmu^B=#S;yUdbJ<@BSb|TVG1bwFg#a&Fn~Poeq7!CL2a_Y9#0KQ)#vB&bFC_qgiTKp;|7}L z%X4~=Sb~MK*6>K@t1{Rnl{QVR%9}VKdFht?Q}DK5efN(+DIIS+t*S!?plNOPS}8?+ z2e&!JwU;{JS4-INPI=GjTGqg4*3DU(BVJ&lgB15QnY53a8cNrz-f8W3#2)-q=D0jF zemNH6*z!;4Uy#s^+4I>|SG1R`(A5ry9W(hMlIGA`R6)!Ql}UnV>}K7W!Pt?m9G!ndgnewToLr?*n$R&l8qeZ~?% zk3+IP@OL{7pO*TF4RQ=X(96mP^^uj?-Qo>DOjXvkXIA;X&|x`}r8lJtzHZn*I;XI0Xg&9i*X z;p53#PV|s6J}|_}y{`muwxO$aF>Pf3=~+s*f!YFs2w?(z`4BHq$efEw(1D&Wf|t@{ z;`PpkAanT@R!`cFEIn{FCtypB`p3y`*3JN`=5T2~hE{)9Iy7Gnh_KaQ^jiUV2!ab1i z3AcbTV7iX67VVv)Tt8+Wh%P_h>BL6)>xLGT7 zKSVr7txz^|VbJ8*-!bCuz9Vb&SQ-6|H<^aY`c4pKv+2u@$OJAL6P!@oCoA3TH4Tcy zB1CtWQcnfyZAX4p0$-XR-vgff^cAmvz8hyYZ$fQ+zdd=T`0^J4wYVS4rasslhDXv2 zq2*?mn|?>r>Xg*vT|+1Yi%@0ZJcr5pOb3jb+YnI zZ5~PsRut!!gXh2h27YHU_R$cO3^eB<^WVqSYNhThP#ak=ZM-5q;sANS5K!MxDJHfV zqGYE2tVrH|_7a;+lC*!3v%5VLnBOHRe>78*1lXGn{TOM*#UdmOxwFTLe~Z^V=lS^Y zqq!RJ9D&dpdL!lL%P)%_eOVH>7(ZO3J2)HgFcLbVo25JA2lFsP^F$R!)l?tFt*|zE z@smg?=TGAaoVctcqZJYO2E4?$9;tb9zly|ajAJQ}yLF{{<&XEy!SO#y?gL30T-QHV zx+qg`d6bzS7g^Q&xk+RioP9>CiV#v&!RDm-^lrPj$0Ubl@@zY&t7GA4hzBTFS1OCZ zj$O3_jX20ZaGEO9|KKxh`Pa*rSWB>%A;8H>616al9y15bUHLdwFjPCw(B4lZWm7__i|xg%mZTLnNMnn2iI34b#ZK z$&nr(^HgLuq&{YUH&^imfB7S+O%(m6EwGI_aA+u$k2ZN+CBS4 z(vVV7Qo2u1PoFQZc{Wmfr_`s8&K>B&p%e&aprNz0mW2{m}a&;Kju z+McEuN`3zjzTRnUtKj0oySKN;z{q%qjEwAvniX-ffb(L8^4>P>Q`LB?w_+vu@dj#( zpuUBcn$UN0{H3+uu#&;bD-^eFb8?`>!jq85usz%~KxMj8c=^)RVPY5rUpKJa6q&Ko z-656TedDFc165*H(~dzWds$lJ3l`Rotmge7eH#WI(~7Ox7cu=Y6hB+AQxLPR0-{n$;@w#j&y?p9~R zy1_ycM#tkk7l~`PPolr(IQPtoi$U9M#R8yCl%WxIsR;quQpq=>7$#SRm)_-mC}mo` zlh+mNi41Gkfwx$bO821b$s)=yB(oT%;rsY`M{?WHi7u)`U^%>5Dg^9Xz8hFv{@kx5 zf-r($GSj!_hg!$#)_=rGryJgVHFqw(^l_g!08V)vC|opJ>3Vr9?Q&me86G9vbJ}3P zBmL>2#wbOT&%fiFz`$w@9OPPSu`$(HX^_@~mHo+?$gdX(w8)gna62YbL}svWdnPkX zvjKp!Ls#mhddPux1%m2>z9{aY2Z!#3e*B@z2==|bqZkBwm<1R{7b_;zw5NdViR~o% zTr?D>O)UwRIo0ht_<8R5Zwa9803|C?DIb7UoW+CFn8?2Ht3{k+O)6UQhspC_sq`b%+QY-cjIc90Xb2vt({;+;ywrY( zzvd$$QNy)hDs>gOq!#YpR<`Z5(Y86Q{{BsZpMvv^S*>`!xd0A$ z2lky=8f5Ag*qgs85XI$K-iy`s5Q5M}z}b<@eJ>Q+F@9*dB>B%iuI7X~J3ASsi6Wy* z3*>T$M<|TLJCei|lwT@CtHlL@62`3F_J+rm>EaKa(_F8O)ADpZezBXFXHR3W^`EpV z%em-EW@Y$YHUM-!t(?&FTD(OrMLU~G-90TvYyGU5f@7-TJy>=XKyS>|!1nS3yUSpB zHPv2iwL7flbNF>L=~0e~L>5Lhbsx_A>%S94 zGM}1uqUVO-eeC;!_hnw;pZEUVV@rhr5g!U4icDHb{Pffs87FU0s_OCrJj zyr1@k#){vzm<@%*qR)V@g5&Sj(aaefk8lYM5OATHUnjDW0{L95UW^Hg*^~vw?Qt^m z{E4Rk6WgHbHV|M3hhSg_HE-hHeQi5zHver^ie%ds(M|~a+n1Eq^^omAd5C=$&Is#@ z1(t&gOXAoakmd2_?a9E~qB8llO4(gs+w;Xp2qh}|bbA#N5WfS=8*A(@P-;Mm>K|oa znSb{%7-paH3&aG?%fx;A69?Jia|W1b9C)EJ7FPocCH>*LeDGh?tu4J#&Isx%kz)QP zcpXSsc=5r;4ZUshUg#BC7*t${Q_K!5BJQ!Tdh@hE%y|a6vAU~+UL#EbXxs=aZB^emy=aRGwaH&-t z6!+ab2kV{_RP@Y`1~)URz!SKDD@#xy<-}DIL4ZkNqo%j$*%z%g1zD!>7ffx<)#V*(OJLBl)tF-g15kMeG5VS{{XDk z)`$U9!nb|#2Y_|H|9Bj?RR(L}O3`vjmr1;%?G>#C{OYiwkHyi?~rd^vV#)Is6hpZ&9#QoIUz zOH)&;J(CSK^azrp$e=c4w1l3CiITo#<(}2xmq>DTD~!6{<|u$u9H26H(3;SIhIS|? zg(R})I9q}^=+G*_PBiB8-cz(mc1Q@8K%p6iZFnz4LJOo9;0S4oCNmQ;FadP++d;n~ zojP~?Iquk@fnsMn<43UuuHzO@ZgiFa$`D`)yl<4QpB)At&j$CV}OJXnytBx zM_XdT?@7tcJaRwv86P)SOJuNe0rp^Zf3!r_#RWN5D(XIex>D`r+3WjZ&Q>zb)4s4L zmbd5Rxxxe=BP<2l5sIF*BE)q4Kx-hv2(w#4vT{gOfFbk3!J+$B29^xQ? zHo8K<61!eoST1xj1FHmLl&N29Id9?_CN6wb+;-nY?+|lTLC5kSx)&`7FC>T=r@^;W zfmi`Mq;~V4pYp4w1ybD&oPRp}rHF<2qrXQ*nD-|zSUrG-Ix#ALdq+A*zBX|O#3E%< zHZ_<2=c!5d%I9^zhqtyIHbuTNZ+--B0Hmw=s`ot|$oX#EAPQSR@;5z-YVmCtYa9UB8lYHSq0)^2`bFiw^`;+Xu% z-$cw^?h`jMGE%vs4CiHm6rO@hdY|F9Kg-u{raL&2Hh+7y!G5M*06hWF4}rK%FD<`r%mF z!pe;P*P<~Aj@72uoKmlHfDNdgzEGafVuY=w0~^514JVa6yxVkmuj!mCftzS!TsA6K zKyTcR=1b&g;c3V+GG#E_Kl~_ELKmL!6!?eyJbp*Q&dyk7qG#p!096a3#`do1O~bbp z57F3A&{#;swf|kq3Q6MziVp^wJg>b~thI3?teHmHWUfj_(~!(1-|E5+O3>DT5;EX> zB>3du$Rd!O=l|(SAqgXh%*>W_wm?qD#z=*!L>hH{+AdkuDW?LMf6fmo&J*(;BJ=)4 zh5`~`I%};B%anRwa0%(6CCPXL#Q*lB#* zYXy})0!Hqn3=Gm)l9hp16*gf;daE+)(dFG42it!K;$+b;H-}zM)ESTSGdh%B*6goR zu@1rIshY6Z+$41Th(`V2b|gzajnWV}S&6yA_bq2&jRI*BEKm>a<@T2!M^|a5h|ewzbqPg=CvnJ&U2rVk%URQ%3`qp zrvtgl4n;F7yzyoPC&@=IogeCT%0m(JbK{qij%}b&u>t|c>mi3R?o_S(!{_TGt2Q($ zp3jZ@-S75ke|Y(@WN&p*Q3US7Fx>{vAAk&`wM-^-b#3j-%f~g=kWXH{4FP|nKp+m$hT${({G1T?hEksb%*-|?r7M0GJSfME} zpk>78nmzu&i=7!jndyHtZdeWMb!M?4&d-?FyLu_7>n+=Vg$oWC?rU(AL~S43Iq8rx z+gmhs>^xiI)Mvy?e(qhjgshm(np&BCiW*-(I4rL~&~U&+*Pvy?bnEvk>YfYO-IEad zJOB7s5+ha2cwl;c#`6B)Q4EVjK7YJaIyKPJ5DXwRaLm1KPW|8;ZFt;rme0GBR&Ct- zOS;2MsEWbDL0DYCT8AeDL6U#FSTU>)5c%q~-`cv%l2CFRs>TuvD=x0E*6mvKNw&>^ z%7Jf$W9n8}Am`gf-LvKKzOFF|JKaBf?Q{HfyL78QscCXw6J%htru$!+i&ZplfN&3& z(a6SVI<-c6l;+6hywsj(5;@b4kYUstdqp9nTc0AfJhN~M5(3(x0l5cG2!cvKkBB1?>>(6*z9JZqCkwhSiipVIEuAHfjJO zJJa8?_%b*qF*RzsgjT^vXFk6`xKOP6_rhVM^6eRvn|00O; zvPggFzux2Q;5%C%RK0w`YZabgH92O5o`n&brT48|wy&pNGi5Y&(EifGs4jqH!Lo~Z zGxX-{B#KrpK&0=Shr`UZq z8p-n0S_ibW`WH7rwrmD$6+IK=K3Jc0Wl$-+e_dsnheQ6eLs3l&=WSHP((Lc!>iiAj zmbr5YNH)3sn}7r(JHr7xgX(?F)Ug~{P*R9{XS*z|+W7{qy$A8w8XULJ^`8l5irkAv zzw2C79)~;O0Lj?Vk{NrMgUBY~t3bqQKZxnE`8FU)$k#+ckg79(EMBkD>RQ2#WQ#r< z*LGleXU33Z#=kOYG6QLu)WSmXazGGjK#untnhe1SngyL~<5X;gYw4j@;MD?Zfgp*2 z21=cjb<0~Q$I60T`+^L<%*QF_I`&`+v(M@GYzwE^P)Z_V1!P&IO3|@*8Ca|%WCLz1 z)2`-im|y=8e%;`DF@SM-Sg~sJD31J0d%I(uwj?6rySlTNe5T0}KdA0=9FiF1A@g-( z1@(a6ZBUXDNxtT_G8-#3mEv|cVAVO)`w>Oz_;=aUvGCIF@bu-lE+pMGXh9hzhGb(E zF|-C=ONmb1oKCxD$QqDSdwputg4{KodoKhXVJRt=G0l zu?daj`Q~^(x73x3AaYhZc#+`6v9(xviXVa4@eaNt052?{d1L_)7kGh{#2ML=TW2Jq zqYYIfb-GlzY ze&9AZr%bINu3GH9=g8VM~JM zuH)Rf%>aJtsn`lnf12MWu&!0eM8lFL#KaF&kx%s5brt&YQ>3oO`(3SjJ8N{syVIjx zPv*~w{ERCYGUOnkhF?qz`&j>COqOo;;c>c8EsES&IgA{)vD}Fo;WoLtG95o3zRZ%D zVn_EEU|rAp#G&L!kv51q^EIWw{6T*p>f*!rAt6^Uh~(&TQILMzEG*`ara%M$ZOE=9 ziL;(qQMr^gqsr@A!|NI1OhVtfn9y^zZ2d)QM z-GJTURec%XRJo4L)}$FkF!$;vq|WP*)ks*HI$ zF6}=`KKP+#8X{x>A7TZvSZ3yIMZfls$zOo0)l_)9N^S5#jkmUnu?#*Kw_ie6yiYomnu``3nc^KYK z#R;>Gh32A5U?&5;Rn-*CNHS1O0hDvqPWdid+fcbmJxF}Tt55EHdSBeGI3bg<#ROvBAao(`$*Bm^CTe*s-7lur*nbQ9X& zRu(2EAG3vbq?LAL3G6rz6=_kgeacJgy>bTeAn!@ka`hTGr?{_=9yJZZ?}By?c!pJy z79LMef33kiSn$<+2VG1{;p4p<_M6Rd>|9^p965d|n#uRroTz)4-Uh$>0D6Svz8WpL zBsJ615buLr$6kZV`BgjIWez2aZn>Xd$U8Rst+l}3T03UabeX_#BO%q@8u>(8YD)AY zQg)`nse{I8RmgGi2}|O)pR-i;$PtzL#Y6V5=F0?|5}$>jX?W1nY^J;B&b<-8;1NH3 z1_G6ul*CzT%|S*lpj&t?bdizQdoV18C0UM+mq7LaHCJGwx2m5<1e{mdQ(Pd2_vqAP zBECDx{-DwW3GHdkO40h;Pt)hK&9o*gtg=v47mZ+h_WFxm&a0`pKUE%t#PtZUF`n{d|e}nEkoR1PR+n)X-m7OV|qO)D*&kZO_Hpf9T&=#)fO71L# z!g&GV0iOT*vjdI9W7HeG0Dozel$v^Y9ufB@02K&K?7od<7((X+YPY~NU-yYHpK<=m zi&3Ccuh7|=AQ=f1X>xknA>#KL^-!6B>&FI_3G<;8-Tkxh07bCaKeE+oX_^Rq%I1% zA;QWAr-_YB$*msLiV-wNX>SdrL1ySgX})z?2{T8`m(`gqw69{VyROjs8mFmDRetPe zQqIyzWjAew15mnVWj=ErM`dUZ$`=!E@H^6ig~C;`1N=dUfRI}FES2vdvr~*_c40|@ zo6j0S-bKjS{jNeM1Q_cj+KTuWOKZ%I?4hW*Cl6S8vw`3^fp7)2&7qvXOaCp%`JKlv z8UX-1zjvh$Q?>*#0X`J_fgDPHN7;n^$h(rm$xMWgt{vjx8UKQpt9WG}VTrdOo!N*g zpok@PNyi4Wg`ymQqyU^!$RPTZ7g$~rOM6R)|T zAmUC)CJeR0gEh}Kt-Pz`QoSQMRKO1s^&h_$f^P@wkZPZ4T zlrII9#H>+vBR2&dLX;D;ZM9ZssE)RQ&8F+S@)j@LUv^LJSf)v175IABax7+VR|aPL z6p&oqN-t68@lt`8c>zhWvC=X!GD?vQFmraz>xEB4b7b@S-*V_-RJ0O0J1d&%g66yZ zYI9K9Y<)zZmgvDzha=zTPIbT@r0#NHl~7StWq9;xuQ+RKx}GmzfI^XBG*AF;&A`k| zLPw{jsY&eIiZZe(TI1ah`s99KAP&kj?q)dK6 zeWAx1{GSvS&|EQcyTf5hi(>kFG?il_G0|{1v0(r*xu|9#VnEh=LY(F_^LgV0I5bEb zSD;x5Paw4Fz5zVg!3tuz7kEKtRpFxszWvNLWXKx+E8YZWstyf+S|q(PzIR~;I|CFH zkdu{qY^~l}C!msP^ceU^viaK`DFC|G8;cc|#ue`AN$31fv*VjLmPC`fCJWji>M^nJ zKf3eteGscwuFYExW1mfTvb#;M<&E#{nPuR3yG+JEqcrJ6;4z}hGghp(a`UNnW+)%w z4US?^qqI;F(ukflOnlJ0F-HCiP)J0){$_`hDdR$@h2{;HS?@tz;7A^^s4lU;yr;orV^#0dLy4l}zn}JW z_h$O%u%+}g8vtMpXTT+W7jXW_yi4(6ona{=Fa}2}6 ziEb_*noNYE?Uu@yqK?nB{8;CS1@(a%)Y^oP~EE<`W`%=llDXIar5&q;(x zCA3tYV&#}=NEnh;=CeB}1>r^66*}O^qVM0oan^n7N_5)250X3;ZkqGgqS7^ysZ}5g z@5(euf`Ss{69IgPEoGpY3Z;|zt@)tdtYJEHIwKI0j%V7PaVAud0%YYLd#U)93a?Ca z7@~Mr8G9_SQ4*YnA*l8S7*xHBJg_!N3Cf2B%5Co{I!wqS-DSDoBW&f7|lBGC)$r-FcYp=dBl*E>;GyB-g^!_=^5e3%;6?TWc#w*&j^! zcC3sG+Wde@`2pbb$e?_NU#S9TQNOYu$GU)r*2yaR!)B?9H9p_yW#%th`J4%1*e8Q< zOvvrzlWfCPaveJx_7p%|1{CoY5g=TsE9r8XpyQtMur7I{Gshlc5q7ET0!;4T)dgnx zqfJ2F-UUp0TP=X?vT8*)a}h@jNR*3pjx)xK89G4WRjWI5|C&2%Fj@sL?Ftj1g7r~A zP+grQIig;9S^^JMscxEbfl6@Bomm+~6@kCBao1~TN8N@;e% zgcZLEU!}dF<5&xUmY_LbZ4(X(DG}Fs)NvkBaVzSmYFz%oowH}F2!J2}5@CvM-rtmY&+)TaM+;5R zP9pLc@@TBjLh%$$`^CI?n$FgDva&Pz?4Clje8>q5l0~xc<>N=T=-ovc!2%&~3zs2- z`U;9!*_YFSn2vLe8!|1;)V=Oo>!7e{4H)?P$bQOW)7@r8EbTO7*%i=l zy_^Pvp-yj^Slx{l-JvW9pUCHDR3rp=KS*@ON+Tq*C2jhu#;qbc+h@LPTQ7XUJ6$Z| zhNSur{}%`m4B$ig3Q-^_N@gYT*WT$BLbL^UnCL>>+Kn&K20va70v5_y%WyJQIMH>m zgLDw6m(Ra}9A3!q%*DAUc_serBW9ML%ceWF>%G5_F}5di^Dx-n)K=748m5yZ2&nv5 ze8OxERPpi2Z%Z^e9Xq^@jBMfDs3R&?d~nC1*b56#Bh5hi{hc7va{8xpj(rmH95Sy2 zuJ0YPF1()|2#gz~!wDhGv^1+V$=bvA;E?@+xNPziPLz_pLV^|rG^^>QOnrh{rzHXL|l*U7%p?W$D3 zD1czI3P&%Ald6BfR6hQ~=V7yDRj6^#e-kis`=z9g8qkdp+L9N}&Um2d{L6i~9D?US zX2J`B2n<%_j~)TwtJ00tehS=+c|Mf?&w=Mv(Dnj=+xr#5?ir0nV!wwUNa|Qg&}S=5 z3&6iUu{Q{ESHh5MSm%KUjEW4zIfh0;;7USYIacFax_kXobj>t<;0d%5hq-mJ@|a`d zQ(!(+lqpc&9dynVFl z^Ij0ZD^!->ydi^bp#a#68W7SR_a~xLEI=a45L0X=h>**c!6p}2pRnuu<$L2mwD(Wp zJE1{G4=IRNkVSL}<=a?jbrrgHG7hfD{H}|cjdtJq(lHB91odX7!wEl$!4tFJ9mB=q zO}tr`yS6O81@8f65D2ANZp7o~h0H-lkmS`p_di(xnIC|0OMFY0ox02+1c@T6=Vm}D z8ZrlnQ{!?f$R;qBy1_-c4ineCk7PB~=gKchOXP>Y7m8QzUIXpvH#BTkx1k)Q3mH%k zQi+lmu^@RpKcew}0-&6fX%F7x@s_Ofd((;sw|Xb*18sng+|WA0bXkdRp|?BPU9G#X zUD9ro1s(x_R-@fa@8=L(j_K;y|KRD!PO=y1LQ@R2%0Cz=qbk+ACUu1mP<-$Gs&N6X z!4IPnIwT|!Ig7jRdM@0ocP$IhRWR%<3c$6!l*OiWpl@{zYknXnucE$kAhdL~<&A;7 z-QaU05F=TsZLl zEk~wGP}Tmq^oHj`wvj&v8oD5hesRZs5_74m?F}}l7)c@)f<%dUJ{~d@2J&XC=Y0Z~ zs>XPhmDgncm!(|S^rvS*f1fKUz4ker2Q-W2w7j7GNN~aDZ@UcH5Puab)SOK4&?@%x zSlU(CaQyG8xV5zY%W|bpLhOYUot>TEJ38VM66B1Gj3!)x#vG|dFg@pJ2x3brqFV$u zgs%ExU-{M!TFBr*chVMg35qP&qRmxDVT=FSVS}9oS3idUPgbq@=3`)uURS8R3aB9m ztnp#aGt6<-mN2p9*@c|Yp+M+J!7uZzdqMip6^8}7)IzhuX9t^AlN8>gN|~Ro?#6KY zrzbgnMlQIa34|cynty|5cIxtVo$bZcJ%@U1LOK9`f*T#LKVf}HzAWu zJsjdwYWz`mK^@Nu^q9Q315TN!{QqK0C zQ8taeraAWr;nTr|2o!KP_{F}FQV^#47q+IuvB4~%)~_N5V!bQDJ5t#I)v`aeRbde; zxDVwQFy_Jfm3BDeK=B;_`5ey?v0m*>C?>jossz~>VF7h6H2%Z^v2$gvc>^lb88V*N zyMil7fO-V#n1Moq&C{6RL2eE=dQh1C`7B+Mwvr*EnWZ-;x*>ue z48*s5Yms$m714LbVFAEEg)1lsYhhT;{O_;T{)cfliAKC%gHE?|cVk`U#y76a#(fcs z2eaRDk$|8HZi;I#V0unpBQv=PNCu_>6#nYXP7A9mLb}Z<;Gsb~A8rqt=^r3lxJ~uP zbRi)iWdN+kR=Px|((EzPFt#gk;ZIB;YO~>F!>jqn#``JZ=-T+-E`Z%R;GrCdE-Dig zo_)H`t|MJ*uU>%5VRW)#bE|;ZtEATuuxV~^&b_b=VVOZIbSU$}^@3@%6d9cm+2 z1m2CA`JAtDT)I3eW3FAEj|*NPOuPj2W{|#R00c3KBPc5{3|zSc=$2Fk=v6}*HRwjZ zxU%*oETSg#EJm9Cd$m$0d~h7O+>HYgw@=#OG&N<@L#^wrn7P7(gg$DR2=TW1n&-d4 zJ^D6nx$^3{*TGf7ofayTH!xa=aCh?LPrbu$_x#KKZ4*`kC~JWa<-EZ*-!=lM66h@p z&Aq>D8f7dacReImD5vUtk?0J@Zee zmD>K_A!Y;(D@p-`6!22yxK%H%Wrxi6^gz8UD6%B|=YgZBdetxJkm1WN??RRXGCaVB zg4$0YkL`wI12V3-H-K64?zn7}?E(yJvet%brqCl#yzUML3G}be&UPNiG-NCKRNHj6f$%2$aEF4QcKicC zYyg20O$&o3Tszz$>X*b*le`6Lw9}fbc7P0SidIGf?h~5!%*+;#ws+p*7XO7%viY?l z03ES-ba^+;Y`@Y8Y)&oLYV=Xhky(&kg#?@+5R#))Z@u(Uf(6-Rer*4+Z23a$BPSzn z(mZH}hkJ6t1)ik)qy_x14FtP->@Z%h$end`*H=FXN~BQ#fg#st+X3|5%U zK5ao*0~MyA3ijQu4_5Q^Q?C0s^R;+a{(BY$-I3hC7^cZI2yWR-;Q;3V5M%QF7<#{5 z6wYU_JZw9k(uI*}K-Sktc}=O3$?0)j5Vg%tS*yNi(n4FZ6Il;ZDiYx+sTcy7B3l)delMgOwdu6zp%Gvo_k zvE?nI?F1_=GJ9`+*d8jdA5l+}9En@0qpg)GNkR+|w-Q*c^``w@{P@nX(tTX#tZ$v_ zi2pxFBnxqa%a5n~k9Pzay^DY2z1I_a6S1R)@0*GyuQ9;Veb{)kTdjMVj6Z2p*i$CFF@&KySGKq3Y(52JR6}#cRpE2S3@oG80 z>;C9nj=IpAAe|;^{$;c}TEcPeNqmEICx=M<2N;lV38+Qj3VBZx z>M2*BRwzKvYmNtID{WY@&@RmFz0&ne|9g~UF;?BK4^LmNj6;ng^Zx&ckZWHpEc5z< z)3aKydyh?bWRnK#9rUJxr!$~75**nGdg!cL_pR12_AiXW92`CWpy^$jqUdnB)+&1C zr$n8WXl$uixz~8?ZLvmTuJ)fl1~x3>dYy#1^4m2y1xaoSl3z+nU zJpLR_de&I(#6Sa5iYg4}j$ zaIOw(FR38+2n(F5c|!Y;Y3I0S;*y$#IjmX17F^Z@kF;)GV6F^5Y@wR8ljuG~9aa1L zPDCc?ptA*0dWUi!zVPbHZ<)k-tK;080?H-OIc3PWy+S6O<&pg5p>8v?x5VCUKI@QX zC0lTP5xkLPx%Oq<9wLsLC6On{q}gn|ypoKy46RMK_5{f?{wQwyo*>+qy+3NK(>``! zwlVb47R_;}2oTG3qSkvywMEUIh zj@B&*<7__L0?mZTXrT(raV{_9V5KfA=f5X4Awm#@A$>F@`ZH!|K5+l!;sE??5BUc8 z(|4eABAT?0j)xzwu7M`LX*Wa{0*Pq9fK~ZI2TH1XK8-^Bh+H-Gubnp^xK;`M*2`FA zB6G^u&1a1w|6LdUjx@(q%CtKz<)6ksiY9JDBiWIQWcmP}M26SU|#dxSCV&Fr^(tqJAv3vn0s z-^|8ELm7Ig-j|ZUFYjoV$ZY-I-9;9BmC39x&>F!U{HV%YyZyThIPD0~ zC3wSOLXA>PL4&yY($6Ftd7<{yBdT1YJRw`m0_9UqC2RD#WW5bKU4VQ4Xe}59P~7kj zc2`Nc7C0Pb)w#2P|+5 zaA{=P83%k{B~382@k!fT0^iqQyLgj9eO$F08G3a!2W~a@rsbHm-Bx7f0r$y)%}S55 z8ALvr608C_07kWSl;qohI?e*2P-Z_%YaTh5ISMxZjxU; z!{&ee(MWw46I?ARtTVTQSmFRjy%Ql9E`LyPM$!D&nA&p*lXyNl5IMjM{M0Cmh`wIYx)N5Wm!D2weRq(r?Ai>TyMxovs zkO2)k Date: Thu, 30 May 2024 17:12:53 -0500 Subject: [PATCH 169/241] Add g^pa magnetic axis limit --- desc/compute/_metric.py | 10 ++++++++-- desc/compute/bounce_integral.py | 2 -- desc/equilibrium/equilibrium.py | 1 - 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/desc/compute/_metric.py b/desc/compute/_metric.py index 11bbf37959..535bedb53f 100644 --- a/desc/compute/_metric.py +++ b/desc/compute/_metric.py @@ -1348,13 +1348,19 @@ def _g_sup_rt(params, transforms, profiles, data, **kwargs): description="Radial/Poloidal (ψ, α) element of contravariant metric tensor", dim=1, params=[], - transforms={}, + transforms={"grid": []}, profiles=[], coordinates="rtz", data=["grad(psi)", "grad(alpha)"], + axis_limit_data=["e^rho", "alpha_t", "e^theta*sqrt(g)", "B0"], ) def _g_sup_pa(params, transforms, profiles, data, **kwargs): - data["g^pa"] = dot(data["grad(psi)"], data["grad(alpha)"]) + data["g^pa"] = transforms["grid"].replace_at_axis( + dot(data["grad(psi)"], data["grad(alpha)"]), + lambda: dot( + data["e^rho"], (data["alpha_t"] * data["e^theta*sqrt(g)"].T * data["B0"]).T + ), + ) return data diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index fbadc844fe..46ead63373 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1031,7 +1031,6 @@ def _bounce_quadrature( method_B="cubic", batch=True, check=False, - plot=False, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -1321,7 +1320,6 @@ def bounce_integrate(integrand, f, pitch, method="akima", batch=True): method_B="monotonic" if monotonic else "cubic", batch=batch, check=check, - plot=plot, ) assert result.shape[-1] == (knots.size - 1) * degree return result diff --git a/desc/equilibrium/equilibrium.py b/desc/equilibrium/equilibrium.py index 41be7c4f31..0113cd7fcf 100644 --- a/desc/equilibrium/equilibrium.py +++ b/desc/equilibrium/equilibrium.py @@ -963,7 +963,6 @@ def compute( for key, val in data1dr.items() if key in dep1dr } - data.update(data1dr) if calc1dz and override_grid: From 711f73453465932060673563b3fb5bc03fa63e1f Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 30 May 2024 17:25:42 -0500 Subject: [PATCH 170/241] Remove confusing paranthesis --- desc/compute/bounce_integral.py | 4 ++-- tests/test_bounce_integral.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 46ead63373..d0b990fd1b 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -460,7 +460,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False): P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 intersect = _poly_root( c=B_c, - k=(1 / pitch)[..., jnp.newaxis], + k=1 / pitch[..., jnp.newaxis], a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, @@ -632,7 +632,7 @@ def add(lines): add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - b = jnp.atleast_1d(1 / pitch) + b = 1 / jnp.atleast_1d(pitch) for val in b: add(ax.axhline(val, color="tab:purple", alpha=0.25, label=r"$1 / \lambda$")) bp1, bp2 = jnp.atleast_2d(bp1, bp2) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 399b39fb14..fbac41cee0 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -96,7 +96,7 @@ def test_reshape_convention(): f = r.reshape(rho.size, -1) for i in range(1, f.shape[-1]): np.testing.assert_allclose(f[:, i - 1], f[:, i]) - # test reshape=ing result won't mix data + # test reshaping result won't mix data f = (a**2 + z).reshape(rho.size, alpha.size, zeta.size) for i in range(1, f.shape[0]): np.testing.assert_allclose(f[i - 1], f[i]) @@ -642,8 +642,8 @@ def test_drift(): B = data["|B|"] / B_ref B0 = np.mean(B) - # TODO: epsilon should be dimensionless, and computed in a way that - # is independent of normalization length scales, like "effective r/R0". + # epsilon should be changed to dimensionless, and computed in a way that + # is independent of normalization length scales, like "effective r/R0". epsilon = L_ref * rho # Aspect ratio of the flux surface. np.testing.assert_allclose(epsilon, 0.05) theta_PEST = alpha + data["iota"] * zeta From 8c667bc29af2dcc0a2a4144603da59b48fdd3066 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 1 Jun 2024 15:53:34 -0500 Subject: [PATCH 171/241] Update things after last merge --- tests/inputs/master_compute_data.pkl | Bin 6466979 -> 6504262 bytes tests/test_bounce_integral.py | 40 +++++++++++++-------------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/tests/inputs/master_compute_data.pkl b/tests/inputs/master_compute_data.pkl index e5ca71d53f3ef923c1b71c07f06aea159219be4e..67fa91e841a1c77c025a875ffeada791ed24bae5 100644 GIT binary patch delta 70634 zcmc$ncR&=&(&*U*mYkQIb5U8Gh=eu%5kz68uq*sKCf@c?PTpfC`>PO- z5ybpW2+N6wX@cH}pzD8$;Nl<1u%21h)#bkV{o4fpaH5F67D3cslyN2H;yq?&{|<)b z{`ct(*nhNOS>?Xg-Cv49!he>YOjA%wG@$*pVl?15^<4$)ssj!#`@glg$Ur8z`ExU@ zAs%+~e?VNp`a=h4$3BV%2T7R{-l3#Qy*xv3BU23GS(yp+6mD^S*+j(noJ?^nBQPhP z7Wqt0Z0dcgOd01Sa#J6X2^#m90<^dYbaZr-%ycs{wfQLlk*Qld2z6vCBNwrROs$tC zmMT+spC#6i2}V?{+r)MYDn!(qymRgC;pOpCsd%o4!yO%bh%&4j*>OpOv^ z_(Y~Y-$bk;Q}5Z5KL6;)d7PnvOlc}0GE%Pv)7O%z_g68Nktvp|3Eb51cZ6y()$;(c zmYg_am7tusd1;6;O90Cc#fgEJAFQTIM-Usy)Cn8auiOkixG)%=mFzYTc^pdGL^b9n0IRRf)u=+C6kXv{e zX{y!T7-=P~j~QFYRM)bv0a;8YHj}Ae2EM92-^NHQQS15Y;>B%@`82)roL}QebN^oK zSG^S{>1lNg3t#o>=X~w+{_R)oe6?R+Gs%!>Vc9;#NPF$3&X7l@IF!)>kgmu`^T!P` zBq&pN9Ax~8EB6QPtrEroT0WMOXiB!+B-N5Bw7(>(n=7f3Od%wZ)bS>}%kR>@aAUto z;=jxA54-qp^ZYx2&EFY%D$^TM7XcTKG0FM3j0^is68~L>f7r!;o9EvF@Lv#mU&p>4 z;ygR{k0kzQ6~Cw)|5LJG1AzZ|XnqL*{!eoAGXPh{;v$qDvtd6g@t^JR7q`cMn)t5; zz<)kue<=X|6FI9@>(Z}&$%g%v6920e{-i$qFZbx@0Pw#)q(23~{6|iyhv3(*^DjLcNzX+7yoUZe+R&SLFg;?o9d>8aA5yP;(u20i^}moCHpl1_@9U7mjGb@ zBsc#S0F^1{>uKD)_sIhWYi{f}N&I&i{$ZDP+YA0miT~9K ze^MX*mwWVc0Qg@Y(w_pr|5r}m-XQI(7x@BymBfFQp+B*M|1wYiF#!JSLj4B-77!8E z;M51cN&Jr&ze(c%m*n>V;Qtkh-vWUDw_N;%0D@Oqy*aKA1K-&XE zZOJ2X6LFo~@N2BboY-%Y`0q0O!!G{YJpT@W|ANq4m#}5k$gyDmNaBB1@r%mwKPCG$ z0QjGW=9d6q|0Fm6763K20@)KTZ7C0^mO% zvcD7n|B0L}bBr$}+-Jc4N{Ro~3V%``{+E07a{%~XAJU%!!2efH2lp>l94=tMewD<3 zm7zbega0y5|1kjm>q7kpfT8O}n+^FS?3={@c=4Mg{(nh+4*>pOq4+HT_-`%!`p1CZc6w1CjgWK>vjn`B z`SDCv#V)2QGR5^CGxxPjXC~UgOgmU;2P^GhqaEzDgM)T((he@#@wFWf?ck*ye6)j1 z&2;7ubtbNnq4-^77N+=Y=3=2jg5U~j`MxiDsX!*f;*(rAD94TXTZ zW_a`5$xm;O@nCOCbLV+f*pZi*-u;Aoqmc9AJ7cjA?ASs7TKg}BbSQ#ye2H*FEjX&M zQ!J8c7Ouahx#ymg06Jti`p}ua8LaQ$d1u5a4_2I7Rh`fxfdm?+7#m;{*s$qM&Knat zZ0F1;EquaB3avkXhM_j542+Ho?w{}Dz+7%L5oDS~kl(S*{$)9p@Pt_a@xd-GjP1_D z!I?!y^nPuWbkyBJ==EAfZ6uh4xiH79SFD+Vk=81;hYTt~%`^QOx3n=>S$9xv^+_JY zzjROPmK~qLhny20Q&q`8j5E{g^Kv1?E}4io^enFhg9>u0Rn7#gGH2)U)84{pXs57{ z`{QyjLoX}wTAu^c5nSsqGfPJ45&j*$F1gUK@jbuzb`GpS{>{2=4+x0WSN!FA>0VfH zqm4+)BVyOI-iJKT8i#=zYR~F;N&&}O>0}x9VJIQHosG1f9q~84W3$w+1X~wyf#AAz zg&df_?)5%aJ(2?U@C>thIw<~7g@+{<)JP=DA?SQ0fBQbw{^MN(hvL~%{8pN31mzdbj zj;3avK5sBB2BM!OdP9|1F~UV7#&u&vBzFqVac)@o-19nO&NGGYrn)^GsPa3Ia~b zDpI6KvPW=YitOsDVkMOD9t)@`s0Xp{hqzg3L(%hw<7f7K&_n6+!kfI(CZWi)oy7*D z@~GhaE_O>}4C%5>ADt{=K@ZNIN%P(-jw+o#dSjsX{A!Ds> z1HCy-R5BXNqZ`D7s;9{BRA)J{EB4EbP0Q(VbVGdl630F!6uhf0)#i*WmV5VwyznUj zl)AJyk~zKq|_>vK!@ZvtRN8TE&*Gil!@SNawr*nxkw=3^oT*g z?!wG5B4)KdXzD)G1YFi!UMJo-1G6?SMkI5SF}4luH+ONeqi~CWb6%^mL8E2uNuFR9 z#8bHa^fNgrr2Da6@-lBRTqXB$xAb~?bgX;Mx`ROhsYN|`U3j+x+DjY8m|1ngx-iz$ zvw8|><;zg+t2@VFFz4*tVLY)1oUG%T@xLdIT(+2P+p%O8j`GEH>ffIOv&IJ8ndT#K zfzA@pMUFxIW8n4bDppMK=)Qmu5k_=SdBn^4c?!_q_QF7Gn1Bp7+^xSu7D57VnIDF3 zPKVv*#qTKh#$lL59NWckDU=|(MO13E28LEM+|4d10rps-A)dHi8mVtN)VwjY7v3;@ z=DF8#9B9=vd-Y2SA;yktJEsqhL*p>54Ls4)z($KmIo-=Z{PyZS8DEm~!EF^vx`J~*DsFp~iBiuH^Q2_5jX?re71 zj0mbK+k-#SFe!p-lcfuFLi#~c@L}9%nHZV~d~Y^GZGpEx`EaACIUwO%u|vj-jHcz& zx5e!nhL21XLK@Ad!Bww^$Hbk7;qg_T)9XotFfVgscrjQ2V-sV1t`m!J{UW{n>xf{$ zui$ypGj<%>j5cpNJI{@RG1b6piILFqDeiU!ODKe!a{TJLvIWrb4Z6iSNeG%a@FPx+ zNpN=6LW#=@K@{nC?Mi)U4b&|bt!IiLVAd^j-X``uXg*OqCwjgQK5kH+3GA5$!X;YI zc@;)rf*lL(0^tiReDeAF!`w;Gwws`MsAwK8ISni?RpQ`sxkkZ2x3pl;Hj4O4?=-SS!g${|O za79MeISncUHZQnSz)hAN?K%Xvsn&GBvk8|w`v#}M)Zz1v_IHLMc3yJBUB~zUS&b?l$e5jK6c=Ug9fI>SPb4j ze}boP<3{+-g^LDbae!JcQn-~m1GX=_ke9xU2kE3Io33g80QVg^e(n*8347r=s})&G zkEX*`Y$BVsK$hiBOEW{KK!4#rbIm2a@R@3Nlf8T=4nu z?fu(P(97>?o!@LHT(RgG+^j}I*bDnBB8DeH+khje5h(-vO%kcc+?mn%^T!s=&L2SV zY~~BslzyPEy6Wh3EE}r!Ii+dJoCnpDMt#S5nXr&mSGOkZ>xZavw6F0@GrZF8*|1wd zWE@OtX0^0eeuQD0Zf&@3-wFr%dj;&dM*t;)P1c#>t1c^z%ima&@{mOpNgg`)#n+#J zrdHTw7z%43sD#E#9_XSK55@EnDtJ+1nYC(@zXsCidJtK=XC->7G#D8@DTL^{oO?OX zE29Ty-BR(kx`>d%ewsm07#+X5^^539ek4%3+B5m8L|CpsT0m51;%p2v;K;M(O?Hh;XUqRs^dO z`oI~?I}$~Qc#G6DOWunkL9d;uPKj!0=e(uLf({dMAJ%}ElHp9RmDXAn`srb@HpPeqVKhggC=pbe5o60TZY-$}r18;+aT-Ao5B=zC$vqjtFe z?r4VFEm7_PP zb=CVDp4Yh?BD7&!djTJ@5kr7axBFw_H2lPSuP)#!?MvSKo%`J(4rFtZ7!9%};o*)o zbK3Y2SQ+&a@2q|UbNGA)ytgz!{>u{#j}CBPVK#CMPaZO&em}Vr52vc()rCi%c~l~j zZOCe-+#@3c*=Roz+Eq+>?E$!0JrA?qO}{Z-ErNJfisF1pCwifV(UJpZ=|ixgvVcE9 zTL?XKRCvEnZVbkI?_LbuRtw$nNw$e`7WD3Q-3bNLamcaiK>4M2T_A+eDL@Ojzr^r}jxrv|QWldOdDX0q-V$-l`cn4rCUGL zLD^(HG=y>cC=65>w@M!nK>q!2mv8>k3eAT0)3x^9kTp4HqTs0mQw@g37FFGE!X&$!Lycw&+Hu@$YWW9cE?Gt*0R5EOy zej0+NiI>6!{Te}uyC*3(xdnP{(~#|zY=iSzY3a;C3t*E86ubG7fN;8S_iV|?QuwYm zR;6|C7~Cf|m3O+G9SyFrTXu(>2lprRmb0?f!z)4~)}C8=k;>CMm+EhSfI*%Ix{a4K zg64xkR~&RW5wAd$?XA5X@PwW9n%iuHpyu8Ew0pU8aP!4j*BhpNkgU66S8II>(2jU_ zOOU@6>Ys2)c39tr!*ihSoI>Udcs}2Jc^yVT#I7Duj+O$bEY!v6P8fz8k}oQ6Ghjhy z8R#h*Qm-JZ%Iv0%%~jCyG{<5J6E`|`-YtVfxCYM%-quTYXa*-XNm(7!BO;@O`ghv> z9gzHBx2o*=Nw93gd6f;h^)R?T{)pnWRyee4Wzp?Cks-kNF6yexxlGvCW5a*2wFcU@ z?cgOYTl_IdHdpHkQ$V2rGd?Wnnv%a-5%FfIE>!gLpeta5iA}f`y8L_>^Kg$QVhmks zHD)J*cHU0ir*=aNZGE0Pl*FZj{B10XUiOJ0EWq!Gw!9k3-rldqeSR6Dv{oM~E)zrs z@~ik{X!iyu6nw*Py^=w7)r2SVIwUlvBmHP+lsLxAmiEGFI6P%c5l+XXf=@Go$_c?kH$9%b*Ur z23zF`dDN0JhT%up(K|LDA@N30^!Tn;nrOQ;l3wR|tYSS23S;--;0&ZgZ)$~S<1g`| z6>gkduR8E4$bLrKIq*C;c4zix6uVagfay2<(>8a5*_g_Md?#cu`W4lu>-Bii68$1+ z!jo}mnOb%S4pPbJWaF1NEfTZrgD zQoHhLSzZ()QhrLzLkJNSjz8H|NN315f}w4A;bXkWrgc z*J@J%wEM}n?4YnQ*el%7jSKQ1wKKJIkCzIeUALEql}OCMmxsDWg=AP!(^VLO7c}#t z;KcO<%`9^;ZrjU4g_DC&U!ioq$CHSf-UfCgxer3kCdH81Xd>3cqT=u}{~d_EULx8n zRts*t+|r>IM#dKDp5CC4Sdi_h?s;{QF33~;YHjx=R%8%)Vq;M&FKXH+e13;+KYSWE zWh`@!1%)@XSjM`N(I>WW+_A@J6y9#nx?p#R9tED-I%_n>iAsbIcraQ_!i9zD<4(QZ zP{u!VNN!*O`U(~fKUeRErYV=G5A5eb|K7V1-uDo+fVv-fI?8~t$GZ#1Qn)em$EP+q z9b-g^e&JHiyIY{#YwfIhHAW;lv}8xnBW^Tqvha{AwjBz4JI*3$|TfcI8%mVrk->?2kY810jRT-s@v%Fl-FH8Qna+)G4+$o$uH$3Md>9GfU9 z?gX?ESLP_Z7t4VTZ*R)(Wv+u4;tKcWV2e=w(3thfJ>2Nv#J%Ch_%4{BXjUj1Gzy)( zJ$aX@lF;cu@#8m?hGFU&=Ccgo6Wo`t>*&Qa4d=^js-|}KK~nZhIc(Jscqt>Fw8gsz z+Lx@@aY-p1mPPxVy`8~`?bf-ro#?k!+3=Y6jr_k$Q^#_*qi;V=r!)Y zBDS>vwpbfqGn$!(ZCcd5tqhzGMmb^oi(p|LN}g>-yo{ zPtFb*X^)}WMtpUT*eFCS#UdG~15OB;e#R09!3y3Py7R^Zu-mjgoaC5~!(*9G3oLID zF&)t*5@A6Lkj&worsx2!Nn zVOC`OvNC(g_G%aN|b504hU@oZ4Nu=aYkrYPnfuq8Fb z{0Uqe*UZ#+kANAg*56cVqJ7@(guLqMVnf2CFKr7fr{U{UH>*<8#87R-?CF)Wd`QaV z9eW350dke$suH{+sKL#yFTaMAf6u1uF?c?TMR+Wn59O;2 zlZsAqp^Ex369UH+R8rvN)9 zf7UMeIKLq3z$zZJ-}y~cNFFO1Cw15yq>Ys3b%WgevRr7Htc`o>0T!fpArRjbt2PQP zb7P4|_Y;t~FB7~tz=+-+7dIi948hZWDfgo{eg@Z`c&4R?)1g6GkGH}&geq6|a()zN z!XB+N8Y|BD2$v2hAFk=_2No=yi*<55n9nW|!&egobWpn?39D?RrR<3F@sI2X#fogn zD`G;VyhHa*ZneYM1YBKXr5_6l(R~oq>(7V+HZO&OPX^(&>#;#lat<1E~2KHLkK@;=i zf^XPJX!|-K;mc6Df?5+WdxAD3y(j@G%f}^e) zZ3Z4Ujt)Y`Cocyy{4CSdlb-joPU?eB6&W>~1g3Ws@uQuWCo736R0yRwnUj4trx`kjpyMdU4Q#Z$5> zD9Wt=L(h>nuu{L@?sXC$HmUY5C*4N|nUs`M>>e;d>cwC_NFl4X#FSa zO`%o%Si-om6NO&~n>Ukj@oZ#37E9TSuVv7qZMQs6RNqxV*{VX41b;qcT&l-rcZ`Tq zP2=Non55B(m7s!!!a}4wJb={wltdhN(6m8mhbL=CL9_)EZh66 zj0xMaP~Q>Z!-pH0^_4&8hvGxM>n%aW)MteFCUzE3XmvD{l1i zYARpo%OQA^%V7ll_c zZKI&4DL=BsNB{$GF6L_c_COKR9m;lI0#+8^UB!1^46{=&2z}=^4*N1VrV<4QV5QCd z^(*}a(Y@6^lH;aCG`+?(|6u44d^vlrG{%+(k?u~D7S1rE8iBCWa^?jPK=re3i6%yLRFCiE`I!Nbt*(81|0ixtLr?o4 z{qAP?b{)^n+C#J8qKi+3jTS#v==;q4ll%zGIQcAfjbR&P?>=W6u#OKU3sI7t@65x4 z2bvAyX1ZZpujHtGIU9<&62B)}jUG8{TZ@M~Uh9M+0d0?r3kRT9?2>VX(@ZFtwD-FE zDFlfxH*dN}o&-0!r4ES_=#ePj13NSGZm`w3L3zQA73+yF&;GJl3sZMllnk^?fK-V`Pgxz+8$Xa!z0s>@rKTYBTL($5g*>$aA}f+ za`Y?u>yIqL$OfkmvZl1rbNUWz#D*St;hLg=R0Ta+Qmjy^X_y6*cQZXQ9UKMrS8|zM z$LP@6p_~QQqBgM3N!ydHg%P{ndt=LnCk=41d_bwgnl=f@b#EhdXT?TCLuRIi2O(oq z$9aj1Ik36C!tKd0JBq;dqvafzOu)0+xUybN1GHMY_3$Gd29y*!C>eEk1_nOK^ksE! zgI$(tt40#LVUji3Hix!)r>dP#U$~G3y%)k$YifG%|gB{SjlE1IAeh?gX^DcLBV#E%-du+6vs}JHK>at@A=dVJmKF;eaSlH0q z;q*0IB}ZYp{x)ahlxpbgE^^*dlpguQi9L(ACt&jSh`ko5dWom*KqXi}mo2dB-$%};q(`sh{z7^-|zC}sgD<;tb?Bn`vvELel;d0 z5L5)*?4?%HE|#Ph7!RDv1i4Dv$LnfovybNelj)*o4^Bg~9C8!wAb98^QGikxT053*p{Czs{DPm#ic)Az8r zy;49e2Wb~Uv;3G^J<-B-?J}%WXP|*#LdH>-Tgok;-NM-75uyD)xjNX^&=H)_TMl&u zZ!BzdA|SC|7U7Q~^5}JZ7_amWc4TDkZFF^J93DWkVUsl?XrpVX%Y>saN|T=Z-O zThoN)TnR|_wdY#O#eT>y<6eJdH4j?yMO%L-8xN|`+sxAAeinME0SCLZI#}=9S&(~) zjFN-5+N8*jfcTZHtT}iL3$|aneltA8jb1R>X>%tofma0J-L- zfxRz1=Ac?~rtBdLB9^eUuUIt&xU*Gl`ag1GRY8_xdQDSMSN+LqZ}oW~^l`(kCVMg_ zefHhWDxG|=$>EG+XK7n zkpVf-S>tP&*%INMJ$phgNo2u{3_iO@p{z*2D2deB(hkx_Uptl^S^yR|?JoPSVML10 ztRED=7zL4m{7X+AV8yJ9nnK<)jY0ixG0Vprra-Wx`rDaj?BQxPLn7K76(zm*%@B~@y1w&FJ|mWR zeffOU!~is{6Y)EjI|hPsPET$8$by}^`snEm-a^1>FT^9iwjB1J)#mNl%8KN1*0%8r zH#?wp{oRF^ZuM|I>8jRF;YFD1x=tf^>nsf0bar^xFzvJI8&8j6 zP_$XAs^X>^J3F;fPc2iVOuSn4QDb&XIk%&(4)^*&Ug7bp@T|6iS$qneAQ0O zhqRfgZT_~Q__HZkvX^eDTjU4WRFQgS&5kk{M(3Su7(%?_VH@LC_1c5M=HT?Lk)BeI?EOBO`CHYln`Zt-bBnrfnDM9 zTq)dMEIyyLQ8Xc+9ckn#C? zCmUiru-dO}ize-QXp8d+XK@r~6v{VFn?)~j&~V|glS5DZ_2WV*s;JIpM%ri%6Xqdp z7FIi53eJVuR*J|>Xf0oIAzhjmX|0P|*s@Lx*}YotZ)G_NCYcR)W)!vpqK%uez&$C{ znL&KclS{&4yi`^uJm<$AxvI&1ULl5RwzOVtSc9`;dT&0~%rVMf!It`KJ27tbkng15 z!3kE(fBE3bTjlcD(i_}OcDs5&^c&TS&9g&LpIqI$wO9c0T#-nN3!qIlwTsQAt@!{u zg%7{u4JM&&2j|adFX2NT>jM)=)dN6VI{B94-d6CabxH3@4*{fYx{rmma{;8xSloDz zBMvMrt(>RgDlgg+K0okmnuwtdMvIZTf|!-+OU`&3Ms$;~dY0!Z5lbA~&k>L=hSC1{ z_hfueAFz0@jXgb`aJhWC-MicMwHS}WPmgo^d{ zjV5!TIo7U%={XOkls;GxwPO()ixxLGx6gv`MAtsDJU^BfqprJlcRz6CxOg+7wGA4$ z%K3I6HnjXjlE|55Bk*8cwWE=B9`q94u%5GP7OoSx8XcBELZtjq{5V5T6CkE#^;+jitXeb!F^b1vFSML!#+c--xH z;+846M$u2mxOW^BM6diPbD0|>vaC1lZtMel)yw_YoN9vD3&qo~%$QJ3o>`8$Ngw>e zIv}UHkPpj!@i49s(iG$x(TjAuNknylGK3J{YH(VMAa_doEhsANqGA?ID0SuZz2{S3 z0E^(8^DogXAn%atIwvgdQ2^R)MC4NH+*7Y{o?9_KA^UIOp6+~NQ!{-5;G~2O3=p$G0Z0O27H20XavsW$w zyORWtjfW(H*9mu@t$am?Dw6__$FFPy$*kw--SkGmi##@0V5UC$-klv7|ym) z)D6XrV3TOT9sX7VZN){_m=dloy8*xWq0mLUaSXh;xq0ikqH(B)TLg~S*Uuw{4v6XObT#Ke%clCC{@yuu-#?F9U-nweA^c)@5D1ADRS4Y4&3{8n4} z$e}?Hn7e3L)z|`~1fJe1Q|3kT=7&s!eHqc2qiOXIHJU-gnRBy`losLMq+7&$ulSK= zM)azlE7M?kMZLpSlX1|>&W`Wu7eZDcRopt7L~QL%kJ3ealn1+C3(k~=lTmfbI8SLZ z5exBCu~@1iiY+d-cg2q|BdQ|LD=*f0P;l3&`F*r?F)5RYC^t!wL{u}9&^ zOAl6LxpjY`K=1<0V=q|FIavWtB|miRT{8)TcQ-Da#OP4tV z2?82C33zxekAtCnm2>^LAQl+enYyI88w|XlJPpdPf&2Lcu1w!yK((Cw);6Cf;lLBW z;Y`Ba0tZICJgrD>03K`) z%Y2{Cfcl^o514q}zd9okZx!S%k6S&a!y$eeC>RYd$C*lDnNJGerJ#a>qz8{;M- zaqjM!Z4ZY)+_Tis)~&4AcH7)3hrPpa(^Z_g^H?`v>k2u4wUrZF`{CrPaGzE%ZuvfR zM^`xv^vM_}?VE$I@lRVhmr4)APq_!#b~8``@;|#icDjdlh8s6|)7Hfmq+%@8ZJTMH zjhkJMr~!f}54omZq(@`6Hy1;=dcaGhvuVkbS>QF3RrknZ0jichvCwYo1v*ymSZi%b zSXW=S@BE4mXkjRHNg|>J2zZxUg;%j)+SB)sJSc4fxaiAli%h{%=tL!3wd|OJQH_t? zZ&eRMm#z2vVm`kH76f14&DtH%MDe_1fx@@>+8T7;zoYlvkIIUOG-&Mp+j!g zmp2?c-VP{A29F-7P65{l-PS!%rXg#5Oa0}HP9PnJ_K2RK$3EoBllatmmcnisiCv*{fcdN)0r(Gd!#4aP;L^K zW|?ytpBjhd*}FUlX6@inGTYh=Q4u=K>Ou=c>qrN5pK`3h0$V_CJFCoxy|l#}f}X4r zMY3LiiDD}wyMe-0FCc`Q9?{Ov+Qk zII-7{^Lm23m{IIUIl{7=I%q4;t84R%g4lqa!?pAWB3P)@`4@GCE0A${MY%wa2$p$L z>e0xqC770{rv(btK?)p-aTQ7e*!;e!!T3ro3@}sm+T)dR#1K=zQJ#(wJ7oJ_YXeaW z)A;CUE-vx}oP!y+GS^f9_+H#ZxrmG|w0^$T5H5sPKItOv)Sm@8OPJOip>3QsIMJq_ z(=3O~?q0F!WMaZrEEn_f&Sb}UcE5Xh!&edQUMf{`mXQr3QMg*RDoJ5FEerL%+H&aG z^ro_LHrjpnnWKD+EooYQ`ol?v%r3Wy7r#o>Vva(!=L*D=}6jQLO%n`#5O&)X+-<=5kx5z{sXNd(_w|3WzTM&_h-Et$K z!;JLZIcl>%b$}In$NG(R>Y?+PP4qRyhi;r{a(o#%2A({+7yEJBA~+DxAC7-H#*c#f z4=+E-J_CZIh88=Rc`!}tq;ZKZ56as8)baMoNpOxWTICg&Aa>nKH?*mY9z9Xzdyuob z3s@d7FI;>kfH~Y%>b3LofXX(vwkxTefTyagMl4I_;DSW>tuWd~ECtK5=hbnIz~O+H zsR%<2biwyJ*4^bqdE01b9NU`#z!tpiT29Cm7z!I5P&v(w`kKw{I#|bmN+riO;s!Qs zpHY9eL^uml*PM=A=QaY=PSS;Yz30WQ@9b4J+%pU33U0+Nw`~FRF-K0jW$lmbSZ+(| z2e#$xfmgLYwusAQgU;52*e)jB&i?7?FnJSW*_Q7nJ;_E=-iWbe`ob)aG35|EURS;&}&?S*f!6@u+-v7ONM5!{WJ4+_7CG=HTxOA zy}Rcj1F7SwWm-EZZA{#m*d#*4-b}QOq=@vwC)n)M{p?lXw}e-O|ik;?9d%znRXqM}k4Z*%3Eh{a#{^r)!kay zL?WUx%C1#zEW=>Y#*oOs$BE7#^S8QhD1&wznxxjp&VzJ~bAcAMpP>BaTbtQbR8jbI z#}<3qa~OVDWhQ4BC*~IHdMzkY9a%B2nSNPDz#fRq>oVtvV_YYfbM%sw(V2LH$HQKH z45Z{<>EvjU#?D=HJM`|XC<-%t`l(B~0D?E~&fH~|#1d9=3!bu?fpart|9+=-Kp)z3 zT{vN7W%t*R05koIe5_T`a4R`X?XrJFPo<{mVYT0RXbSQ3Mus4!z? zwn^M-RWitoD5DpzMcV>=kr)q2Ih?(XKybNz;2v14|qx&Quv%!967rt3u0!g z>IPo%VA-c6?jMNjg`Ir7t2dB7f*VG+?~1vP(`R!!IH4 zfw;y--6aS3kfliQPj375tezSLIDf`NOMJXFtP#?m~)^xtRZLME%&yk(YeK?q=7CU|KW zp2fu{HL`pQNH}J;wAN<=1f6t_ zSQf~FN(ZPa@hCiqswN|h07BWCZba?Jqvyo=VPenU904&vV#TrmWLg(E&|7*0aBcpfln zR|HICE3;#?tD#krQ=86xBKj~ezfp|77I+$v2CM|W08kN2M_SE@^mUTcA8%*@?xcXx z-IGMjmiKnxhUWxC5iP%Ye5wI-A7)^l>tw^m3JjOlRd>O|GQ7``Vm56HrLXOl(zirR zh8m_N5#9{!hbER}`h`QU3kQ`CFB^j4);5kk219VDFQ$(%H4AveW^xZjw!o&1Z8}%f z7hwXYcES$Y<5Ncd_g-nN{{-H9>icrZ5KzXA@%n@1bzo}V-1)=oFo@k2Lp7o;NlBj2 z-5-$J2%^)gzZA0wvtg1gFIb$mv_s7+jA9>G=E74FHYw{(36wP?8A0m%mjYa?DareV z_^1P41h`jIK7J8Upo*#rI!#f&KI2HCm9kJ@$_g>9rVn_w{M$p4)V%#dr=}PJu=>ulI>!G@vi%&nH9Ai`YUj1FqgDLUqL-YZ=emu5G(>-vA>LDs=ok|~2ru&`0 zw@R|%5GBnt4Qa{4`yjvM4zB%%8R9YSU!#2g~iG}R99*+{_-~0herGfqzb&&AZ|Jmd#G^PHI_4m@| z!EYU@G!OatzkB+F)OPa^(jQH}f~ZnI2`hZly}wd%DRp3pLMzQGm8SB4b*8z=K$Wmo zX#4@CO4$^kNb~T$_4mkV7%F_{WmJ=;%$X}OQ^K<>z6Zv4hWT62s5E;bUy*2XDfQTk zZ>GLwf_gFQJH)rNJnXTSrG{rIe9OqUlF%B9|5-wX1jRohqDyTMQ2Z~fuSuZ}sw#f< zgYqhQuNGyVYpD_y>`?s1_tj0>1H-!{sZwhc|FED%X%f~_qaM5Ty$#Lg_mEM+rSE#a zmQZO9_kQ#Khl}hKQ=MJky}ui)_$`Oug6ILib)wSV@cfYhDh=y<=dZfIhK`CB{u?!| zgqgzeo(8eNr}Pb!hQ68NTShRTV#-QazuNtVe(dHJ$^TjGNyDMiBK}SNw>C!$EPu4I z&F1s|_IdTI_(L15=ql|;IqxY&E3*%?(9wPWNmP0(%KmB@B}#J^lM0nPP05=~7^Xfb zQ~LZ>p6F0A{(1)SgsCj0gwXbIQTK`dI1j!SQRfdRe@jN@Mq2$#HkBVIm6V;B$|IJlsZt!fEo3SGhpq39r#k!|Mg%=Q+>&yr1Vh=fUK~N(`+PEBeigs14>a1ZECnhCez)s1?KX%YbV;BjNO- z7}FZ9#L&h65JtoZ`wx}CYR*6)rpn>>rQ4lfOR<+?jH2zbi zwIYPyWDhVp+Zud34NEb-MbyNdJajKqJbl1s#Dvjj}A62#km z;-CL@ID&RfgWDM-RtE{j2E*0q;%@?m4x8k*OZ=an?jn-6ORS3GcQ)Zin7k};YFyw? zmse}7ocvcqg}D8y#D7TpEfS*41Bq32TIKo@n~Wq8pT0%_*401iWF7vmi8Xaf>i$z^ zwTr8G@ZEp#{7xZaou2>AU96idztv=wYem~6SGk5gtm@8cNv!Vb;9f=an^!qjlK-I) zOITAK&C8O1xq^+;8H+U~@V+m(sy|m|vB}`_BO|^)`tvs#YhQ-2kN7t^GWB2SHT~1* zTvH;9CZiOD#rB+cI=@0S#g5WdTtff$LGT4md6O05ny z;@DrCttZw8ORZA>JDE6@AXO{&U$yqEic_XVYPBb;H77jIU}N?Fyoc8EiC8|DI95CT zlaVkX@}@LD;cqj1#I$|Ve^l!~nOI}Ye-x&0pETijv;WEa+lnahjj8l%+v%cM)WF%JLI1Bn*Vf{fffGskr||@;i@M*C4&xzkj%ilS9&#|F<;(stf}Wo0$I< z%n_0K{NL>e5=b|t*AtACtau36B*99wFp>%RZydq|QkwaCf=s>fpI7V|51D^d|7L|C zA+60=V3nI#RyG$=Jw_(xw|xFX7DmLXp8QP_BpC0MUH@N01W%Rm;UY>f%Tj);;BOWX zF&2#eHyLYnoNLV2Mg`eg_Rp02AFL#AtF@tsW$LZba4x4`c2y^_ayUYR4%V07ql18r z4pE|Y`8w==X|kgt({E#A=XK))1l*vRz<*UaK$w&hk^G;6lpSaOp`Cq+Rr+n5{H=tM zuTEh1lUp5ye1B8-KQjk{u>dwUSB;VX+wgx#JNfEQW7sTv;glRd#*I~?Fe~OCek=h; z@ZN8}>o4lRwf`lUl%LTj>^NlpuklqHf6t|VtNf-hWI>jh7*HwKwZ?P!Jre#uJv~53 z%d-4e^KW<86EGv?e;HVejP+MdmA~x>ezdgu`~NQo))TOS`@c0;GqFxF5WG3ed9W&J zTm)km`85XoWA@*^GZ0*w&Hi=oZ|ZvDN|^lWOzAW z|JUFuL$PB2IK0Y4%xL*v7GdcJh?wcJO2JthtA7|)Tl=R`e>;NFf(^KxvKGX&2w4{{ z0&ZG<%U{w~3;$N7zlHC3uc9mAA_VVO+>RB)46$7V2`5E%;?RD@ncpMiZweQY<+$Sa zzbOx}GT2WE`zaG8oK&1H(l!%A+f;hFFoWvGXO)#D?D@u!>h?hv+b#c$phIT}>0^6# z5{s+%J`*6*vr ztoL_9z~0-OZCkp)n(E?6rs;uisbe+*?EOFhd$M1gNz{#;GIRB(U)u%&Xc1uXp0 za?1Bsa7Y;mr?2mT9g_-SvU+`xJnHk6aT+UkjwyLF(=a#$?>!s!y#bO>Z0-4#*#rUI z8HU#t+rW$zSL*84g9nDtv`i(D0pLGEx7Uh$6yk?!iX%;Gp=yPh(MquqT6mNc$vUlY z9ex=4Idnn%>(|sfK@=e04o(m09f5BYj)(6ZYXG;vb|}535rPNh=neO`fqiQH^wuYx zV0BL?##Xowf^R8%kiQNA^99BhugF@^%2QVUwFTb-6VyVP!$TcFbA0-p&tx}bh-Qq3 z&UFGkSN=KOtpkuAtLCL;*$Ad`j+W(b+Trf~fTd$?J#ZoPvo|-M0@0MOwPso!5PPC- z{keO6(DH)iN?1)4n11^UE;Ys&1YheBBMUiA|&&`RP^= z+FC=_y4?v{`(^vj^HG4LAKGHyP64i0MKv}$Q_z_BA+Rie7_51oUiLfPM7R^jC5KN_ zKc!Ut9HaW=&$QqzonYP3x9qyJ3%uFCnRok(~Dj7!o3Vr8}lHr<|4FCH_t&q`BGp2p3 z6(ac#kF{I2LlwUxb8%8H=I45kgVkJu3jB(`JXPDNSV*LRPZ2L#;MT;`&tq?zfmy=0 zSiHIgzSVoTf8E#%49p)aM0Qvw2558x z&9=`^V>;Tv+hcki{qANEWam6O%GL-o>>E!Hdtx9RHE<^dx#jv&9)Dx)#Q{CNQFF>2@a=sU4&d!|=!tx7h2qlb!9`N=Y z(vGVlgT7peu>Kh`$ZVSl0V@hj-BA>2`qTp%`ngFF4tPwkw@s5>t^43fu6LJH9u>lE z7>CcvP$5fsS8{6YAk6hvk1T%d2AvN#3d)pG!3V<`Jh(hLIb$F|$F54r*8 zG&%n0CN_^|yni{5jUrWbu2QcP*w&4J#;GvBA!ws{I;ys11aM;0tlu%c<(1;rZ@WYW z-n@oaJFTgZD;(5*Z5;CkvJElGb-}j6)AHZ+H#{E$nuOV8+YpTJu5KINU&MB*H*s#M zJ%u4YEU1!oXb^e`?H;qvBe1y4IxrrG`NXxIykExC2NoKg29xJ7c7IIt(YrJV40e18 zwS|K)`onhp^X3td=)K8#=;d)>H|v9ZJ(<310d;Q#4IX10_2qT#{+wZKyb^;116k9 zpnF(4P?T>PzMfz_)b_j^&YY?i*(6T^bD!4Ru_+_a^UIFGbAk$oUme@Ls|#~Fu|ePp zP$#teK`?x4yHgf+j{UON^$VgDh!m`CGmIv~b;~{BE9d$k(&yCt@%%2pv47op2^+rN zOCimASE<0hkt;dj{QyLUUtc-VPX(cP_l3!=WWdRHJfBSN01L^Z7u20Fd6`rP=qU^U zNv56AM0E%T{mU&YB?s}aM-z3l&yvC6gI1i>x(>i)KO#I;A%k`M^yTI215kFj*)M-` z2s-qyelb5k2<`ob!nZ$Az(uQO&&gk%@GWrf2Wwq2gc?`x>Q5Sgfc3o}LurRW5@t_w zj0`|rzU8FMH7YnvrD*#c=!4fU#?9E%$S^D?P-(b~$*pYcketEIA#mZN2`!zX!htXn zoBWLdu%Sq1`tGE{FYJ&$-98HZy4AYp;?qGm&Arf#!v@BJQu7D%%4{~9fiF1#Nwu=owIO*caSj(P2awFZj z>Eu&h4c_7(4ta}J0$ES_irURm@I*~-DOp88GyX$EU^)kAGQS2GZOH%{-P)jDd=m%~ zvK`{KHb6l0N>hv$`oeRlA<_!lyeJz{8o@RDKB&N!V7o6h3*R17@AEVCKgnxMt<2Jk-o z~=9F(epjE>YR%_4&<6c(md~uyHqbcm^Dfhkx zWf-SC!yXpckn!U|P&Hcxc>4@#Rl+TKNNCBFvE4|X6Jy3u|WS*T*C!}T3bJB6Q z1I>_SF@CrKNX+~8#Riw*fo96WI^@}XAT^%gv`8vMF8=9~g@xGbimr}_H>nGp_CMGl zej4NEkqz4<&Q^kV(!-)<$4EpIe|`t})ML9z`z{#o!6JXq`j6#@bOWpNL58yDO<*1Q z#!OtI1|q}q4{zj8L&pAJE5eEG$k$mUlwPYJ*p;4&kM`iZAlhr%r$w&`a2Lbhg$>mJ z&94#W7uU0qb_p;k5FPGlyl_u~kW3|b{j>(~!=L*Lmtp=@Y|md_hCJb3E?mwRw-^jQq3eo1Y4 zD}SgG32NTg>Mf+e@}sV)>iAALSQMXkW1$ra-WNt%-Nh_F63fKpl2Y(4@?{t(CHz)5 zdAzUOzNMTVusA{}r>J&eTr4#9X72!!QOvD_FWZ3T;f4(UV{L#7{cSO~{q@+IAkzNwm~JH|mf_JXzq}^k9n_{{u z7TbV*LuMf36^yH2_$J?}d;&a8>Yfz-_-cquO*%U7S_iem%7W&Px`BJvu|ecxGth*n z4w0nmA#Kj%YcOjy&`_k7c)yhcY3fQvuW2pV=B9C2@?Jd8TNZs^GT1TKR+3C;Jb zA+3HtN3KvQSogL&8C!h<(S*otbznlI3m(rCt`<7AfZu@ZJ1)^iXpk$9*0ZYy)eBA*{sR>-+H>K0 z#|612NI`R3$C=w8ber?ngH{c&94_o5gE^779F9Jt6!#GV%EBJ}Ow9zEi3){qC=`$_kjrDfHUI}scqL6J zbbz-Z1O;4g1LMQ<8{0|c1PFse7*Hl_&sjm%~4PI&-*=E4q(Gq>W9rM2vl$yd1rcl9prd9jGrcq#THShNtY{L#YYrD~(#Rh_U zFw50m%&gs`X#eX*C&*fFqNGVt;beZFLUG|3+~<=`ut;qOjkb1$tzAuE%+I-)RM`mL zr(A}QW?};{vg*5hBL%!;wLdH*eTG1Kt_SLg9nkv7KlnK#_Vvbv+tY0-i`mb*hpiO$ zcYsr~%*%RtDg+xE-D%xB317CTN3|U51jC6KachBAAXC3v$Y7rvG(1N(a#gfLq_fvK zPwad3W`2-Y#_I`K&+?4=E2slR+`JEdbZmtQg+fmay=E}(A@6MTYXfPa4!Uou6v*(o z9&k`TdKC5+y9wY~S|OFGP#HO6Q8WiMKb^-^cZhT=rvJ&c0tT%hXyCf@jmy9v zxmk=f#SeIPLFCc*5yQv2f%HLsgYz3q?fv=lIFU!3XIM=8v>xVtQ&`n&~7xT*OH7GxlGYg0Kf z<1ljY?AVGl_R$>6Tq-r+hX-#x^$c0FZeXzJjo35N0dXM`n#BTSSR503+Ig4)$-MF= zW+@cNy|*L9z=I6m@&ri-rTW2L4fp0;dq1#fGcGbz_JQaj{l@7GGE|)ITewZIIAv_%=Ka zq`6I$qgQ4?V*C>wuW}cZiQhKelidS?iea7#9ev=Zw5K_iV0=Xkvzgmq8w2~n{SzGjVy6*VJpQR2nL2EX|~aw9*` zZBe!BfJmdM*SK>Sd|==^a1WDLhuNJAn>rzK*QHbQd%K`RA&7%m&<&&9=|A4>z+O{r zkCTGDFnIVP<>=*haA`B9eOA>DWMpp5y1NUWRV^pib@al`h!@A?+ObuYPd744aM)a( zJ*0fdpdHw4K5#{GVPW08Rdt# z%?>aY$$Z>ChV{I00r}-Ns5{!Q|9D{!EGS-@unX*iHtDHcstW~XPOfizTRs3< zX;^4CmLBNKksgcaZ3oY;Pj;J?d!d8;qhHCOAD;C;apbv71%?gGtsM*BD^j`@E-kPfmcp@(}HxI3Q zVaiG{$FaBZB)x^bn~Nf4=nKH^vYH^RVKIab$A^pTAVI{}!H;R(xp2C!qj}v-4Dc5$ ziH^}H!+H6TQy;IDU>oFI5mUP5Fj=AA6S1`v>gAYt@>39)_4BcnZBGIXXaBVG{OMpd z-+cE{1ili|J{`RyHd+O-vF0t#66G+U6kmV3wiI68f8t=}m;s!nQD^n)Gl4T;@0qDL zwSd3%{3}}e2sIb&YdSqEKyCB-6)Ec~DBv!l`+DmG4Dc*=w|~rrlQ;A@qx~A-Xq97- z5^Wt!^QTF7a#q3JVQ1A>?`z?dZ7hC4zPA``q#H|gl87L+EGXrW(*#~8YJc7jsskVT zN7oP*y|Ozik>eO`J>1P~xoN6WO_=+nA&2je6ye18gu)kf+epr@QJ_7h&ab5#4H2e9 zw#xW0*d<|dQ~T~cxUSL|;q>(mcK+-(Q=3o%*yTP>Go@s}gxye!25lyo>TnqGQxhSR z;k0?BaUe9`#|hti7KGfQ^wnQWX2K)>SJZubiZE-ZeKNKhy6KX%vGYGpIVlD zV8=W3P(LneCni@pbNltn?4RJ7G0(fL2dg1L?sPzHDhZa`cE+0Ti35&~1-*k<6kvmQ z>a%V?${{YLxLV5(`@D)Mch?u1{R9&MnW|Tw^Mhjk=ciiI$sx z1e|%h@lV?}>{yG%kZD8%0AFlzRYMtcZxU%+xtj$i-#42+qvjzM+quBVEsb#XC$1^K zq7x36=_RW^Y{dNd`Q*#EDnMjF5{}dqfrwh@#8Y{B66(>2jME8hg{|TbSl@MagZ117 ze*KIlQ2Wlx&bq!D&i6x-^XoE*6v!`C0RgNw3|H8l@uA(hMgfuJpE2*MPxes zT3N|ncP|6n^3Srribw{_+c&shErkN#5p&vWG>_5p#fS%wZQ~GCzF^DE(0n+pBfUUu zD8PdvuQKgxn;fVk#b3!Xh=s?IGg;@(zd&|(-)A;hrK5Dl#m^$5r69Ea#%V1~4dRE) zy)4?W0bqO2|E5qXY%7*KsC@Pfav;iNtJ<-7c~6CR7FEXL*%E_hDh387_F=h$#iO z=59Y}&1`th{(e3Ex>&?K{VTFx8&5(d7vE6aPSgSap^QZOGj$-Ygasietc0)a^tVln z3c$c)^F5_A@n}zoxB60l8M^(J`QcK3BZSX*UA<@32pKpqJYM?|49n<`w4W^ouMhhz zzStz80-vR>f#z!Ddh>CJ)vupY6}a0HmEL$@?)O z3~gML-ahyx1)iIKocj@%0Q$Wj_j=!n0Hu}o&6@VJng=Hi8*5EOq66Q~7#B4sBIZ(h zmFFxupoH)bH#uek$&2E1jVlR4Y@+xE>fG>f7KruS=MxcmM{}pNR~A}iDF{do|EQr`!{?hm9b7)es>h12$Kw^ROPJ4L^kY5N>a)T8KL~IKh7Cs_q ze@{2c8CL*aS}iP5yQ6`(T5>UdiPk3Zt< z>t%Cb`($}IWxgCGIxhHsF{?#yoP7hA(@CJk^XZx8_6oS6c8+uRVm=t`*S|J$d&<2hFEQi~C!d_0cU3n8!>lbijIeKUR<0r2$Mx7p=jZzwzKnduLj;%p(T53!A7c zzlcUh?p4^;@YTWM*YosK@ z+}1B444PS>7+Yf?IR6&T@Z6K{%+5gK7u=Vc))k`Xj=Nnc%cTvgP9rl%ZqWU)MB+re2WoW6_d9$)1lb1U4%c@?q7CV) zU){MQ(Ybh!1F4b02tOCHs2+|*{PgSMV#>2dw@t0sCKMvjz1;gzQXdlWh=hntVbkx> z{Mn1e?DOI1rpm=$S%UyL5kVP$xpWV1MD{o~YsH}0mW!88oXkX@7}(mlh)(Y;IhLy|H`2Rq_4qu{?~8`(&H$$kGh$GR1`*qXKCY5h@0cndb&7p zHVQQkGs}l{6z3BC&}78rbEblOsRkW?sa003pN7sJ+$7zT7l#7g2M$+eCBwx0NpBnP za^U67x~*1`jB-oNQ~d;*QLg<&mZowna`b$7++2P+7TNL)hZ;Di!MZVxy~ErMu)ve5 z*4*lV1_SpAfEDEhV z;5$|@{Thww>#I5UK1SaYWFwMTU`OsWm_c9;d&dD^p)Rm2js$LwYx%U!r zA7FpBY4kaa^$o_EJP3uxy(y8MHTYPByR;Jcox2243U2k<%;lr`;@o(>q!1+V%;;R5 zd^o6cidCd&q{3;F3p%;c;mA4XP_$iZ1u_~BDL?SG7%e%dc~?$8M_WD6wjJWJ5S?H! zKl&vfYI!n@4-a2QsHNXpWA-Due!1bZ#cUbMSXsI?-t31|w!I&hkw2CM^H$x}#*GzV z#yhg*lSm-2XC}8s88@QX!dI%taFvMC*n7&)>?V?q6fig2nnsZC#N^hXLrJ`18H_wF zP7FSN6NEG~gYroRkC6{iS#$%x3ySy2!?y`}fLB1+^M`kyg5-_3+pk1pk#Llw34?7c z`gMGOEw45l5fqR3o)z#$WRJ&k58EHWg-sunT2{iK^ExX-r(g=Q^Vi~iLYs!-Y$DFM z&&HtyR{ZioGao#PNlpx)Ne%=bcBziX;?Z#D=0S(#^%=;7>*aCnV>yV%c%|~EMH-_2 zczE>vPakwk?+d}I_7!OA_ogX0yah$>8w0_SndpbzphX0IA@cv&wYzyP8$~X4P4O?^ zK@*uxEw|ssz$0S~fBMVspq!iblXy@DI`YNV{bD!182O|c=6cZ;AWqr33Nv0eRC_Rg zwkRnP%m(}Tq(MS-L_hKxX-Mo9+|i3pV#E6KvlwoJB}ft;AZ``;QPixIHI|-tCTmIz|oD# zO@cNpU%%BlXL&(e4iY zya)d8kx;4TdsWc@v}r@^Mj^>Jc!=F2J#*PJ1$6q?XTWmZZ_YlE7_X5GnpahYx;omzvC`4_%egU|h z;i%=)P!uz55|}=*|FGj{7A&~W7Mb45hn;lk%v6tZbcMRp&hRR}1SPWFGt{k$K%M-G zO}Q57K-}Z^)w?POSj_mt6nKi@t@{I`eWFz;Gts2lLa+h}zcwhgS&l%X=@9E8l>-@y zcc!-QAp#$t$ou#7B*@;#PcDwBLGQkpGnxq1ptj(G8ddBCP%u08AZ|+mIG*A!u~E%| zrSpuD>*Ws@!?uU#_?QMip#ZY>ra<}z)HI!2Qt&PaIm?u}e12FAlLve@AB)X|;uZgo z0Wu|oxgJbzs*4^wI)_8Bvy(LU!#8;#xs)!Q2|HhGk^e}Z%zP+(z1x}2%M=L{43yF# zg$U?-D5PZ&^$xueSKjB%_z?ByNpltng}}jewKpG9-$C((3ib0Ysqj|ML0wEQ75JPQ z!;DliP?E=`W0H3RkyU}ow`*4-p>@e}y`Ee;9#l?tg|ii8gVGkw_qAd*OkO0kdf~UDU^I+!To>=?ndrX6dvw;?s6giHDcbK zcQ&1n0=8-%Hq>4O?}yj5+|VqAS|-!;`a8<8h2NvbFHcpVqY|~cdf1y=DOXz7;8hxw z@#$Ga50t<@)}l=(ipwFg*gXg5Sp}sQ^0(RqYS8HcT9Y=f7}Oi6Zq3D&0VCJmp5!N1 zK;zkjQxmRL5NScoY?WuKh52YPNqP4M)LFB~CWSo_?bNu)v*AiQTx6(Xe3exTc9Zg2 z+$y!;b|d69vuHh`sSlIez4#*5Lcv=o$lAuBE6^7#@$X;suL}diy`%i4)JQlqG3UN6 zJQ5g8u1`JVZpU8BdX41Zel*ljnGw01cu^#Ix#Y*yfF4%4eE1ES89T zfBwaIuq^nL7;YDUa@)?57iXfu+P@|-Whw(4o7QQwx)AY@Cw*IpaVHUA+l zIx1Wttn@JrcIYVxoS`CkJ>dGaS+E48)m_+<0*gVf?`m!Qd4wIj#|uB2eSv~l+Va0iHc_kcctudKw^XD z)RmXjK-x25>b<23#%>wE3nrJrjaku&2J1=)mM$^KO?!nb-b{Zt;>m^j_D)W(n0lC) zxSb`VT?>^ z+galKvXA^Ih9QsKoGwGmE$e#lzqPe}|D54gSI0bWUX}c_I)*oynCb4_)vn=tdS7pd z%Yep0Mq56)q@}?55#A|gtWm?(yR$4>>dC+K(VM&Ze`*KA{L4W7xSc65Obk57y)g&c zig_GIKIej;T?f@fG7FXmOQqgOMu0R?%)F{Q67I?s#c%G-1#j<{+gK9HKt;Rr%=q;h zJScbE&zTjegh97LCZ(_pIDBiIOYu<*G}kYjvS}%VN0B=>HTu^=L6v|?VO9&!Ci?EX z`>+}6ckO$`d6Wb;hQi5x*P>u#3*BetxqRr#4v3Mz*9h_*ab?m4Js`F$Y~iSnEptXE zeiFD-4LeoMmY=-|f-=?bNBx}e89-EvpM96pf-O|_6rRW2q_%$1Af%fO!mTbjO{LT( zsG%rbjxM;5Xl6wAQ}m95)3>vId6@s+Zr1BLr5{J&yD8o@Mr|B8oGp8vEOvt3j%RzB z?NiX}69d)Lta(UTh$>j}r5}_PC%GPnj6*{V7V8}2G9o>~aC;303pRG44W379T z^%1S>>IOsb{gl6oG)El-@zfcI+RVV+$A@^j>&GGR(5$72OdA?ngE?#PCHv4=>NQGVty28fdrmEG7VsTBoJh6lBbq8 zyWce-|7y2&_$VySwi)<9Rx>*EV8_f9M+>?c7uFie+m34I<%uh2dXRAvvayhBM%DAz z@Y^ojZAJElFRN~TX+{#d>@P)hn$S(Q>+@{-ZRpT1DU0y_esp7tbz{!;R%E-r_}S2M z2Xfx-_tP`985!;sQQ6V?2_2U>;1+tQ1+DjP&t(5hK?bhE@v?o*OD4#1+ zZ-ixL-FoY|V6ZFX_1xn+g6@-;^*&u2N2NUpaGHyPamd;e-nGL4UHtj4w=}7s7(Ao2 zi0z(=6sTC~Vu4OiMHjoBkeWr?jL^x~_Wfx5t8}Yw)gaiupYhYZ{u$2r(LL)d!dwSz z@nd-9&xEH-8awfiIhA87@cro98Vr;OKH^p0i=O9uR&3(#Lu!MD3Xb)C=#aCij~p!- zg>Ab_oX91k?3g9xb2=2nEj~tXtJsUa`Vl|(G%=^&7*Io`bV=`KDe>HIZ zCk3%fyRsF!4xrpYcbeJaUX-Am;>&)a7mu{$@78=eidCpl9_h#@Bg_u&zj|Q+Wl5M` z_$ob&tiqEBV$%JHOOP{`DX|yjxSBCd<@TXy^X-|}r6?%zlG6E{`a#4!d9bqf>KL*g z5lQmG?hy<2i>zOG(2K?cqH;Iv>qo^m-ej`8p`c(kPnowI!)QmtwlAJ1@RP`oB2T%| zOhI`z&l3)}_8@8+4GZod8J#YdXN)$aBA#CFhga!Gkk&lVA7q(D@>RTc&b$LC^=QA- z_=zTDs>z(jRP=ofeqDnNYjAN5F0H}6LyBQJ zI0{NAJ--S6O_qvO5*# zY=am_=)HcO33)PzL>fZRBhg{Ro36I8ICL0`(i&VQjol_m=bso|bf_Qcei|V&j#5$C zmPdWlr-spDl-owlM)9+b zHKV9xuW+)HCl+kmxw$TE;xqC-bk@mmaumJzIr1#{Y9I1jzqhfIe;5(AbDH7lCXm_> z2f3?nr_l1oDOLf}6ymyYocrvfF*L5(rge$CAKl)v!=CtK6j|;2%q`70gCukCb$XXJ z&mvbvh0+a~Ge}gDTF~-l6s4Z(p0H;iBdh&6$BQmcpm;lG2DWQ+NDF7vQ2S#Jh5WD^ zsmPl}<%d(8qAv}hQ#=e?(}T%K`|{HEB()h7b1*jBMe+;!*u+%!T;~gl$oRpk>@tVG zy`A2H!|v)Kv(lf6yhlOb@vf)O+Y4eBHo!SMl>8NK(7w>Uujngc`D*9+Lunove_5bv zT*^Ti%T48$_u9~dHCV6)!`9%{HTYo-YL*o5)3$6z2QJ%d4b`+F0UESF{CNjr^N|rg zwa|rxx(Nyr@;wNj&cq|0Ins@8eR}w^*r5U0YsABaPc4Z4{!PXPoh~#&q8tdh*^jWT zm?F156x6ZtSY+roGP-lMWkkKa21N(&R-c|~LN`9rs_95{qqKqC^T+9_NV{s&GdV1# z@x;@q2Oq@;QJCT-hREY(Xj-ayOA}{3;uRPOx5@3oqy1?L--QeZk=$!ht=*aT zC(QAWFvms})(!`;a$u4k_e7%;> zy!Px_d2=HJX6+k0UNo7G$VV%#p;v*k65aUBW|B*(MQwsAxei}y(bn%p-v!JnQPu0( z>>ZUM*x3vbt=DG}+LT>F!XI5}M8bjr1t&N=QPbzrz$)fWBzV&(@zK*Jq_~^A*5_9? zFn$d12$?TLKPUO=Dr`GYm+u3<$GKF*QCr9I)QF0bUA~4hm3E*xch_{^cja)5$^N=~ zUpBIA+4;6Ay$_|iC_UP+XB@4|Os*cRA4Q6f!J@ah7Zu*3;@NKvHo~x(b6!RCI2$Hil?-g^u5^sr=ud_FKacq4@HD{3m=rJ3icxggyWnjI z!wM39f^F%N`@6kLhT-Z*-2^D+ARi zZET&4k47TX#y0~?-vFPp-Iwh<3c&n;SF4O+F*B5PMT!jPGz7dm(7~eOhBDzsbcLJo zV|Pdbstj$rMqgD9?1sO@$2(gP=kE4P`tE(mHf)pi)&UACiLc9k)YO68$|qhtNJ&Sd z3rx6?4b@;3@`5KzxEn#OX@zg*02&kFX^X@HI;L)J4?A-fyG!+~4a0d`p){oR&9(O3 ziCUl<$$eHxCZj5u&pMy9M-iu_)3t4t(@6c3{df1^LFDma({S9;Fr?Dl-Fk+x0f<+` zt4_ZfK(S)9C&2~_h|Hl@!(}#xe#KomX?h|B9tt?u`mZ#C)X23Dyi&u6 zhyA4A$^w1{Z5dbnsZh6sp3@&Ted05Xd}}+_(Jq^kGIk>jBETGVyb0&;C+hNDUA0L*(JD~i}6b$#*;M&v;t!vLO)?mYL zh`-kcUT=C&2^BTM$2&R{ZHq>bi*fpBXW0asVDs&quogJvFcY%JyB)+57U~a3cLB$; zw@iFd4e*%%lA*3t!Sqg>oH_AN5WaAJ`jUM!j69vp(6j6Wcg_pjU%u)C1Hyybmv5KA z#?da%^{fc;uY9(7_M-wK_PO82j|Vox_ycJMB;5n|3i+_zNGuvj$WnURH*DGFL`;$0 ziv*x^tI6H}z65B0{?s7*x5A+^=J>rLm=kQL&Bf#ggFt)XvE&F(3|LEEezzRxkDTc! zb-jxuXzBfGncvh2lW$+D9a%R335dp?_16eET(^DY@+%OXUoOFGKd8+_CsXbF-V2w2 z)5TvcOZUle;rNXN`h{Uw7BuQv@t%NV2eNcG>t!LFY~o@AV+rEDcy!a5u5u`x8|mJ- zZ5VV~PqBCfPrwE3#kQxM%4UTST5GT1>!_w?u}ksfc@91A1l{= zgHx61c~4oEVdjr11-)x<%>;w_wddhA`1iThA$`Q{0~u;XeRkk;F*oxTxzonGC~yPc zWGa&LkpjMcpSU^$sIWZa>}+f?0Kqjv3li`8;NlZTof7VTEC8Boz->GkIA}k5^Sr}c z#7@2E{((8f5z{>G7#tsjfJ|oLht56li^ilg4)sFR&(96Cx3FU~_D2pZOi@7lk*5n99nCrD zTB81J2%M%@0t&ZIL9a6Pm6rb$EHex;XDLoW6ZMonIn%$tkt5IMD`4{!xhsT$2>xKa0SYh` z)2wJb`PvIHhLbHXq$t=G=6rJNF_FpowPoIm9EPV`sW#CWeIOlsXu8*+2kdvNqwO-(f!o{+n2k38!E4qH`6jk8^#OgYf} zW~LuP`7+)pDG$Q~2HBLk?g?PO;70fSD-~!i@BWZO(+i0^Jczc`HsH$n{CK_(^RGL@ z*q?R;JBuLyYnYXO20RX&{BR9B+}HiR!OVWX7j$t!_zYT_M#w&z6~|Z72u^Z`41K>* zpzkK>dv3ri*5Jw>74~tktM{^yOznq5*>)xlQAJSDLCU73CIHu?>nc9d&5&cV(lK>v z4h+2;C8;CRu#45ZbH{lKyt&Fh{v?rz)^GBS-7L|Jex02fqLr;hfn6Ok4+iI;{t$k3 znFGfEtY!d{!Jp^!wda8~Shxm@iZ(vWUTlUNg##W_JgxB9mRBNorVV?!e1Da-xf8}> zqZl&6y5Qi>rc1sFU69ulb@lAACOF4W6Q17E42Mo$^zFjl6+_Q-)AB~U!5pt|ty@h` zhLfMAaSMuMU?n+}n(#NmQ$fL=<)4j^uKp_X@*>vI(P^=+=w4uY5I?KkLIn;XgP=Wc z2jFG2B1KKI0aE9D)>{mH0&-+YxyH>V&=)pyQ%%Gi0QpuD=iG*%bEe|7T=ob&nLNJ7 ziN79l6CT}e)+q!0h7C!nA!U^iy{?B|S)>Qj>aI50q7hiGcpA5)JOOz!QwD0Kb?{v& zOM;Z2g55ewez2D-8WNo>(+%=FAw=bh1#RpYsHV869eOeYwVN93S)bR#@yFF#q5K)> zit(5I3G2$ROQ4?ZyGh>;c#|DhSxU!&eENv`3H^D@St@3`RV&tGR)Ia{w-V~m+&9x_ zgz8RYEuUF=0XssMRp6pn=rRsfpWo*V7B7N{R`<B0VlUFaupC7296=pl z*=fbtYGFggyw`f{AkE3F^NQ3w6|Q(Viq8R?{8jqO~U~ z8R^!GF?($Ld!(7^Fmd*&ZY>ufZC;j2P4B;IDiq_pmd{L{5 zon<~ryiU;_`Hk@8qV6X#I*)BcRwccQ4z$ulkJoywT)*8YIf)lD^n5hwUP%0@NjIw( ztV!pxwp=WA@ZG0`S~{HcD&)Y)tUZhTfh4g%=_I+eXO1?%jU8B&XV;uhTmlEUaMpRV z6hk+9oOjW@pYh3G=m_aa&ehz_q|!r6%2#ygNOA%UzFFrt;F9{b)Z1rr<4$-_oICf0 z5vSZ;e(I#nS3G*ME2d!uixcnpWm1(jfsV8#*wbPFM~}0a=)(CDx8U@&Qg$dZu;7@u zIi8+sn?Zyx1e1yFn@I_ATvJyzt|yUgmkd4_(c{?LBDz^N^59H;$RVvkY&b?Yw{Oex z!)Uo%@R_C4X3{H88V{+G{nf6Y_=T>*e#|oohZeHYh!3LbvuK_AdEHs@HFoWeB*1_{=kltq;!&T(86{J zyC$dZ^OYbL+&l_lJuAzKyZMU8a`1H~Y{Jswd}cT+++yToiiI*4E=-^0#=c1gTt3lfvlcTQ zX>mZ8{dFG~>Dl8}p_|byh~(hV4a@mV)(9hR56pYOR71PA(UlY;E~U(bwawuqjcB5C zMMBC{#xh?-rffroYbdV7&9F?hcP=i`RZ2)?RumPACcm}U-shZsPI=$^yRY~28~x#| zp0(Dqp7pHtth1iy*=K)m1{o8^7uuhAlO-WmO&`(lHUuHOj|yI}@uCHN*39ru4Z;F= zVr8~!n7uC1+QqUkK1_qyaO{KoM1%|Xn5EO!ES=uO(!m+OE;jx-`b6~msJnH83p9xp zKUz6F9f01WdX4b+@o!EqA5fY5-m{LH(eOSy?dxYyygaMI?KgyuK6w75ZaYN8FRip% zZ3-);5>vzPKlIX}lcGFVRNB{2Nw!@_#rxKPA@7p9tvP5!qBP>wViLB)F<**?@JearKvqP zI;I*^I`q-?Ud5*_Hc&%ew=R^TTYg%>&g*f9jR<;ouWptz42dw!NUaRrY>-=x-Vtdp zp+&p(s`R~ubf|Xr?cCrxFsiy@U+yM^ycyVc*9l~fnmfe~%PXUwfeCje%nJ9>r=2Gz z4s16_n@*o@lV1>D2eM)|_j10jPkerKp>pdP10v`5$etTEmVz}Q;Q?JMjA)OUBS+Sk zOXvkhBWI_qtOs*`8QQD;UPEH@UTevhgZjku@%>g$+x`Juo9Vp1!(mfe|MM7~(jQIe z@W;i$bQGi`s;jdWk$h(I`ZTXX(4p+9$=Q<{G)Jcov2@Cor4vJUn`P;RiHM3#OY1e~ zzXs-KYTbtTf2C4(-&py&rH~#xAo zL9)-1g}>EPTN(;igx%1f|NLpn-dS5SK+F#tE~Tu{AWqHvBl-CnJz`yd%Z5${AHl6k z$?Mn0>!=iu!F_DwH0YhZ@;gqRmJRHG`t_vc2o0itf;X}0lmSr@wDirqgla%r?}?V( zT~J4X$feKUdJE{3SF>_RqZ|b1-q+#rm9L0W<}NKJaiwQZq&gKmOVCSDy(VjF->nSFTQ7KVhgLqrdoTXn!H%%y_~dZ};E$Mz#z zO2xEz{j4RMq*p-efO)9{7Sw?~v!Xlc|LAH!2t9xBo;;==v^F-K5W7S`+c{L3m^&HL zZMW{!>%aUY_-LB=@aU5-AS|~3+JSL~L`->u%aPD}FnqhsM88Ku+P}ve{}g)@x->~} zNO$;SD&#=3bKTvlL7w2%&&2{G;zZlBfN}Bl;8WG!m$I)~^o?H)rP4FH^pj|<68qCv z%7Krc&Wqi?pTQEFKIms$llu=Y-ONXrL25JpUr{=J9kjB!%R#_kC)O8<$ zZ>3@G?w?DkCBN2oFbJ-rra2!hZH?gK&+V$;R)Zi1idT&(5$~!6b=C)q7Mgwm16rL- zZ_%Taiq055CdF_|4Rt?G*z5NF9O`wK68eC{M=<&DtfIlAJ_Bmww!^uu^$4E$>6>NE z%BZyERE{}OLxojN4fUuzPrZHfa?0j?A3^5TmhJAo_z3imIFffJYY?J{`87N4zoRmf zV`r2;t);{fNrPX_-%Y((ILO23%?I#yRm%CaITg`hb=sGz@yi56zmoXVUl6Lgon(qb z3$X^>XJ^%m>D^9I!vjx;9tp1m>#i0XJ-_=FOo%U9wCSUONO-m>ZtLa`)a`Y3t9;O# zr<>;)I|KUyD&fn)#pq}I;QF3g>j#C;!I=1>h9?7s#P2N+OsUGOqJD_gxRG2TrpL59 zoqSj)`UBO%JMvPWunLfQ^vcH@Gwy;ROP84Yr)d%uX$8Bo27aLyUR$MI^;kmd9jb88 zb3@3t*Ut~Ot$7D#1gz8;CPTkC`FcC@!EtTE#3=D`azzbgGsd;!DP(I*z*p%ogRn*McX-V#Xe4wVV6#!REsRC12 zI!G*RzVV0b>y$5yYv<}#O|9uY`f;o4rBqBWzt_%#pMz!>*520pp&HmqUTGv3m4m^p zVwQKW$)aXge$jrpoYU z_V073|yJZDdWngjKVlA8bxnPTL{@3n~kEv-h*TkGT zq(xW2Q%fz9%c&{^ZXDxT366do{d)G^SD=69$#JK*p9i@Q8y2>Xd_hGYI`!gcshD2* zsm+nqkLsxQc_lKt%dbGnwj;|to<0E^)8X)E#TnGZk(Ztx+WUrTTM@k^>~@?Uy`#O* zaT~%hdwnl);g%17fUH|a$7Uqm0k@3q&Rq)&s4VmQN2uT5QdSG6&9K>EOzWlujl8(x z3FT8X%XDAM60kYitxfAUMIbdOwXd_`11f}DQeS3`(Gp+d)?B`%{wy7AiRRWk(D#Ek z@vMrPPrSr6S>w!7V=_M^aqSB4f1;V!VoaL+U zP=U^7PadqU1n1Au8}s7+0Ose1o+ut!KuKRWUl`iMkk)A?asRT$fZjFy#_$U50?JD_ zZPVfK7ob)$%s(eJI+ZH&(mj_t;1%UrmKi55F`;i?nQCA2vk5)yz!UdM(<)G5lqual z0=*1n=A8TB*)5QikQx(*%;~!PXy%UUNbcz7=wLl}RC7ly^OHaq!+k9+v8B1PhFg~ z&r-8l11L<~`SGA0!Vn_9ruKF%1DEQmv@flGM%hTceQtX;qen_I>-zgksdJ17ec&kGOYxNKHwtToV1PP(;rUdiM0%$ZYD+?S5@{#tVp3 zbkCm8N*chpxhMS{M6ba7*r@@BkLFP;B36`Cca_kwQ%h|7$evN}hB=i|9W@BA0k7%R zg|(o2*p=uuaYf*1p5CdzUb&Re>E5t@#|-F%gG<~$U->{C&ONq$YQJjmp?I!i2CN1i zr#eJ0Sn86C-qbREva{eGRd=x8&6TH(>9`L+>ExITXzQ*oA8Z-%0t^`9@^x;P3UDXa z&N2KV1#$rU^b7+PSSb;G=*MmF3nG) z7FV0@s9aJ)nb}rN$g*xt+sxAsK3U$3Mn9wN_=?u91$jmWgEM#Ur~HdXJX%7Zpf4E;Ar39i2ZcyN>c9kE_pyAdICE zb;XwF(Z%FerO;&A`#P$)=ck0|+>JuQQG1rDy%7o!wAtUG-^5x_lYO|sWN-bE~$M(^)(Kt$VX zlt*NR8`DuK^F}zGyGc!Lv7$Jx*9UOYs#EbKloFbowqI|+h3C}QHLrpVLdEo+ApP6n zGh5IXrw@0zcA*?x9e?>`sFK-VRyy)P{-cxBXb)?w#8iJ<%yo-%^lyVUe zGcF=hv@;r4P;a;hh$UM=hb=$m__Ft1&~NXQ4VK8K``t7$a%zCTT`S~?ndZ2w`x3(jmffZH8&M{w&h?VQYE9@T`5(R~0gXVO51`6(nM;N>j z(;X{!Jz7NT(oQceXO&(;PxwV&Xcu9XVobdIK0At*rpjBeYI}u&%Vw^ zw7a+e$>GP|Qort4*y+q=gff&goXoM*Bk1jyzV?_~0S-)DT)+9D7VWs%y;I}^V>+Sp z=vQ`m8gzE|2g9eH`vM9_^c}5-Hs9qJH8S(SN|0wYC*SdCGg@Et9Q2MfrL&Tj1Xm?A zr^P$G@Cw4jYV63?&B*QKwH z3or)_4d9kom>MH^40fy0@uBhh;z9pJTr zBhPn2pD`gLQSjLT1}tIeA1{u~WrNM+(tHZe%N8UHpiI37`!`CpbX5`sN zN?}Mi{{EGp4YgBNmPJnM|JEKg0;aqS3Fy zb>iR2S?(Dt&0*r;vx24Qm6ZdaeVlZQg8&7vGo-fUi!|vOffHL!PnV_$rja(~(vGBC zhE$u}O<}@s_Yq+>Ej=xnX2oeB)SKyYM^8bB)r^`*4!R<3OWMwswSc28ORcm7TVcpG zX%8X#+;d$TsUkHCEVQ3SY4=QCOYNYv%?^N~!-a@U(>F=?&(H1IXmEMtTU!l?vZ=BYouu_E@3z5Kf zO(@;`6EnI}shrYXs#VFHt`~k`$c=QJT&qgvbp5!VAvdY(c!_J%sy2u(cTH|u)0YLV zrdma44M?{fX)Cfy;A$dJE`(@#Xyt1Y>!GC6iRLCG5W435t<|`v<=S{PY{}*#*A(9N z<_!8ZqRQ<68}t=1CSZf!t%IhhuwT}oJCHHW(e#v5&LVsz%&5v$;4C6mPnFCqPP&0A znOmH4BZk~)aoU=yk}pChb62z#np_uhS&MHhjED^U!S%E--- z!Gm6|y+wit@S&gUXpwd)8X>fd%-HC^6HdzUNzfK#!$Eue$C{A|Uan$zA==f=Ot@S` z26?&LK*Q_)rXt;Jxlp08A~Is9pCg=f+uz(wPz(E8j{^+r#U0S6;R8iG6D>4IDQrW# zN||P1cU^I_ltR~DB66eqI2|)(+pW!*wv1GX_zQxTL{@Oi2PsHPqs4eG_L{>~5V?o9o9-|B2_Y1IP7-g?xr8L~aevaLVJ}s3J zTvqhH$;txX1qX~~diGls07ODGa>XbwYeuU#~lkqNSVHXAHAR&IcFvgt`=J+)!cR8j^l?EpaDV!E zJ2TUGFKu!^2?2Ow}QyAi^{W6G@cQh{BjA5Mi1CRWgHV z!l205LB~gKHmY@Uh!Y!_J;cvO)Lb$7D2Px_)c7-q6%au|sPSi@ARvN*a8_l_KtVtR z1))lYsI@v229 zx#Vxv7sIh@CiLbT@=I00SnLDXC2E4&8liXP4}}O|pfMT44A`=|P)=BYz--t<24g%B z(8g=x5LJ!3a{vTQ$jeL`5YDaaP6(n^QrkXqj022?(1fPt^;&S&Sygq1EBP|}8ZWaJL`Vs$+Ja~q zRmX%#e=GR2&s3fL#Kv|b$5Uv$T(LYU&`3iWY^bITr_Eb+DbOORNU=e=J|u!Y@MYJP zw=)JQzLnIm86d(n>8-MPA(D70e~x!~a^n4iz#1B#iKHC1;TyYZD1$oH)btRN!%`Ii z*d5M5G&FH?%26Gf8byf%fuJ24ZwQD$9{3}L+<=o>y8kFb{_Ez5Nj*i@Zxm-*E5*4= zC`y&EsH3~RNOI3qZvOXS#vb3EB12`rr??LK_jd0lGTW@^`m>^x%nl_{cJYQeQD|_9 z@?z8v8pTZN!G5EXyZzkTL5It}=*yG&h9v{N(BJ8P?v^6*z(Du5FeRI*(hqePljdvO z+mbOZW3=JGQ1@mclXzdDn&oXHT{pUSh7Lu(rXtBUTlqJr6Wn}^5kW~A0&`MTDN<;3 zL6zbS-7^?5nX8^)BHVnP`J>B3nGVcJaPKb?%!WqyE>)QXzI{13l23oY2Pr z5s6j|-+iK>_5Pr(+E(!w)iteLO(&*xr|mPxhQo;{D5Tf<}M(BLs$LY7e;Vqs_*OxE$N)|6x_8rd^f zX>$_s@_=Knd#uzn4^?u&w+%dGZ?>7?Xs;J%YdohlCpWe7(v_4c*i44yt31p2jAxRF z62kJd;PJ%w7R)nJ41Y8743qHKi19G&%y>e^G~zyybgJM(iXMv{zyhY1$pFoE2Z&A&e4T;Qm6dHku?-r5zbwh{$D{)=G6@HxMfc z-$HQ7TQPV$I4R!qi-=K4S2%X3r$7^LcV^fVg|61HyqUMk)Ui8i4zX-#>IPlBu)ai& zUJLl@ZppYt*lhDPSzp|qr68-C!04=e6zU+XYEQl!I^G&$s-#L`V!d!<#!`*o5`CX6 zP1e^kC-L#1JH^XYA{eC@;-023#Ex;ftQGYGB&PAJH5%TtJy{O+6G4)x9JQ#6 z9M%*8lcGJsJK->#>6j+iF({|x#eEZP8p(?#=t?5c({IibVW56P@~#k}p{l73!&V|- zPQEnlP9uUAfzfCwv5ZA}=z9wJ~tHDU4mur2d+ z=Arm*WrG+>pT4o)u;)TAcCcp6o5uzF^qf$PPtRZxHZ>#cm?0H&j(y`HuzQ9a`&LbC z6y($UH9?lI0q4#)=Be=B&-QI}FwY&^)J~9l+hU0D3)QqJuS39?XW7H-PffdcNA)nE z(gd&@&*?_d86I7xzR%FAH6aVESnKVq#Zxu*bDz7)n3B zv9^#O--H;9-^`JDvS5#%tDAeA=pO$eQm(Uf=`uv^b@z#AC3 z^=byHbm<$<2Gwycy}D>P1kN!=&i%wDb{3Xcj@ZcUNSt;LaL53; z{%ZRQx&53Bos@g0D-$xoopAsimvFF~DTUl1PdxLsg2i{xnJaK$eJT1ey3$z}DlS_% zMu7X_7y&L-k|;)C25%R5XKS*70!+S7qCkN;#2k#29B{PYaeFeLxBCwy>SF@KfAPAg zX(0>6p#$6%hYoNlnJ0!8!vrp?MCX&BABp^J{+*GW=;dy~D4n}QGJyhwZOdH>uWIUs zdUTQB;j$E+?`cZ>6y$s!S&Jc)4lR8g;G`;#Yy4&;uNkj*F^??t>q2JtLsv}s_*t4{ zvq3%`+50K^a;&^4)U||z%zZv+!cMJy!tot9Wf!{RqJHS2y4H?d+CGT8@#-pXyeL9Y zu?jhGlC_F!X)y1CtE(iVcz}9KDNjy;ADmvp?0=;ahVZI&m1H~2=q-Cc7dr9(VjoAq zuCfuV9`A#cm7&-#IGf42~W_WosM-;K;g4@^TS33ndAsqWSK*W@tux@^+3~ zTY@TRxEN{FBz^$*EQKNsRz*b01)AVO1Orzyq3+?vE$6BT<%68@kc1IOq7!GnKgO zznH1SJk&CkmME#1r~cf}U(8)%e)s@BY5z}hmrQo?uVpGRQ+&{#gh9pQzFVe}mnO!C z_euC?$apC~{iv3xM1enL<`5?<2bx*Me^w9QDHEN_^d$KD@nZED5I<*!FN;lPFnKCadOM91>+<_*4XWnyG?rcA@-R5HO`4rE;?W`Gh6c zSLx_9(h>ZN2}_RCw-c6J1NnAg-J8Ffu;e;pzxe-1!V*szJ0rM+6P8^>X7S~QY(zH{ zr!H|Fm%21kUY@Wsg1+*RGvdVME^$ljfZ)!p?c?y!AYVuPhpZR@Lov^Tte~3PGb@X4*3SF_t>r#Ku|6Q(1e)lF_6&G}S z_u))d;i}cbe~a^iMfiKp3-0OlXFL^I`%Gi6sC6&i5G36UR3;NpR z`7F34_E~W0f0NJB1=-I)9kqvZ{s(w5-}|x5|7Jc5b6wAWsDb|}uIt0KZ8JqfrDrv{I}`+ZIE#eSyR$!-y8bB${a|WyUwN)>c6ZXfhg)0NygmpA zok>SySzo~o7-}qgV=UA)Bs0>arevw5OppB8ST=@vHfmXY!7My~&9Av9?1kcHw zL9!a?+E=zhEGUKOkNG-+KcUNLSwwRI_bb-h-m=5;->#yvIJ)ZvQq$tcyWqd*)B2kO zqaQ-cLMM=$x5zBWu|LUlq31Z+O#yi|QD!L5#6|nd#AKN-`Y&*T>>&FSVRI5u4atta zvIL<{Kb&e}D7f7rh7z$`X3e%K@|LwA3;bj&B|81&%fRK^P>*CDHI^sFgaFw?fnYn2 zGJc{gPax^0;IWTa=NqJ2cLdi#G@;i5K`O7IUog{fw1UY+UR}UcX2`1yr8PNjn(Ub1 zHd}5Jipmd^Kcp=VmA#a&^wJq<(y@!tf1OC*aBZwk?x(Y;Y8866STI}G4%*F>EfUBT zY8WolC;tdTwQN3<#Ds`hvZaE3Wce&vvTz@#KvW?nuZAPt?Nf-%=??daijXbkRmDOy z81l5XvxELVM>ba=xvuEBk0l4sQzai^$)ZR`#8mc=#3-iCQ$^Q{EZJot)8;DkhXt7) zE)$W@7RlBlJt+xD7wdV-g58(M1Omxhg#{`t#GDLWDvSFcY9{xinT@2OP!RqVI<^Wi zd3TxY5N}&^i;T_Vz;YQj4<=NnY(_?GlCj;ZrkD2KDDXPc4q!3rw2uFH}z z1wm3-Xm=8B@l^F|I%PU~_jTN~wUY3TeY*{kek#0xP0k&|v%vmTm zwN1N`0j2MhX_EyzWLM?(CZ}EzUUp{BE=)PLOLj$YnGD}8E0tV!Ry(WMKHCtd^$D_c zUVSWY?8rUL*xPxO{C%<>LdjbNw|%_2J_l6mj_~Ss9b!18@+e;tnbunU<#Jubbr8*Y cgb_woCsb8}|qoc}qq zWX?I0@UN?j6zh&JR5;C6);*}Naq7u)88rhoYF0n&Rq&rTzDuk5HKw+kqMK4tSk|vL zN$FSX%X^ZUv04qKR5Qv`d=AeQu*aTUK<#7PM6PpfiBnH5x3)~#SC>2O-etMV7O9Xy zb?r}z0I!#7|0pVZxg=xRy~}1Ti>ma0tTLdob8S375IdYy^vA!U%D(v5-zkI4TB_01 zo>BJXYHQE%!<{8woRLY}s|cvAQl#*ewY4j;N5r;j|IhTS%Qi#8F+J@1m0uN2w*Jbu z1JC{ZmS}jOgW};-NZ+IQLJymKmGA3`{;IOww#Wg`NAq<9NqGl_&$bhj6PJ=ZPxlFa ztt+=p4;5?qBYLRWsl+bl^xSd%wiQl?^knihl?L94<__qc>jt@VchG;ZB$fL(8uY#R z&-Bn^JAYgcWtH4fJ+~2h%;L`Kl^d}gTpq1BtA~l7Do^W4!<(E6VwZA#EBQKKCLZqp zgFC3Fm_1;OJF-|>tb#LB6^LPJjuYwx3lv9LM<>`gokvjqUnr0~cm}QfeLej<>^mj? zHT6@T(nFtc{&PLd{>o{u-Xh|FLW$58KdU&Y=M)5)hry)^9(8>zo@$vY zs-xUGOj06@EiWq1=pl586F%=8rb2k(S1FW@2Poy29~Ag}=nW^d-pX_PsW zME>6y_+yd{hb&Z~^+qgIsbI!5)u(#m{HiKK@Si`iT~5`vX>C*rZ6#QeE3}#BA^@Qk zOh4?yE`_R5-=;Y#g{CT6i7qsWtaShgO`E1w00`F^o8|x*K0H`I`oyV18($Qm`Ot8vugU6m0@P zkc~wP0(eGu)@SX{-=*EK6mGWRZE7dn5aDeSEXfsaY@*u=K)8u9Y68H-ogXgm>i=EZ zEtSHpGPuS5!YylXYXF2>H{mS-5UxLlf4<|q`|P;y(r#NR+-?Ck;4j=Z&2AuAk}KRk z)7}&ST>m4w+<9)|yKPhoZ6#QeE3}#BA^@RPOb?&!dTYM^yR^oo&{Rb$(S;_FwGIHG zu}NA-uq1bggE9H<0Fa+HaGoT3wJKgXZJPV$TNgai+o%-UO0XnXXfw@40HbfG=^KrF@*fe<0p(OY8HY$a-5-iCT+Dvm1fY1u2ubF)QJN_?Sx}#EPs-l(XLX*f^2Y}GD zX<7wKxlcBHVc5pJf`sEi}d68n^TH%-y+xaHY$a-5-iCT+Dvm1fY1u2 z|8&f-9&fF1O?OlZO;xlKU1$i`g%HchJlNWae5+!O#v*h-i2nbPN6YF`rO;M_CAmVIX)Xc~TEX;% z2ej`T4R=m=R0>U1v=Uut5?Sj25Slhks{ly9&e+@(0C@N-x?LZPUwFys&fnA8s1({t zuq0P#GtET+LMxd5iJ`~exMsSN=BN~!s%Ry;&?K_f0U$JOnpOcITxV>W13(q#zC1+5 zr#H70n%kj;+Y8M`+_C_K=9{cV0SL_)vxLc8_H)aW=`C3bEzPi*{Dc8(`?ttF@lIYO&x>KK5~dZso5=r?WF=pi4d(pf2(AKIuCEJ)f0 zfM7L6n*b1GW6^>D8n%Ajb;}T4`VC9rW*gq7cESx2-X_74T;awhx~%|u-NYC<20+G4 z!&u0#ZE8}B0{DO~rDsc+;?F84p_EDvh zv#UbA$mT#lHN=GbPWeqyx3N3x1oeS_9;qa6jc*{SyWGu(tclg?$hC`}Dk9wPz7Up; z^4oryOses34@RTEUn=?Yv48+rQsX}fd5m@?Ke_ey2akmTZmhtj+rUrb0sR|BA4HN{Y%0pGi zY=UIa^5Txp+gFEH|38$9R-i94nLX8xx?6>!xJmHaU|)7h-8v5K0BC% zm=fIJ^uUPM^ddQy%udT!k`XES8ggk`en6uJ3FOLG#bJ=XCE{JFO9IhlCMAMlOT>0c zsmX~-AO{LNX-M!d#Q~Ov+`*_=a(YrHB`Kbo6bEJRiLDuV`U32aly&%p%O4^Fxp9C1 zy}axZh1I4HaD^p5h&{0h0~={iO(r47QNq>2P7IH^osyiK5UGJ({W_g8!vuu+4ShpV z0D?2WNgzR6A~Ym@cc*ahm~7#bK#pI?7g6j~1;3`}t3-97L%rA)b?-r@{5wB@VY0Ld z&C4GuqWnW6_F`&3P$V*YZFS^2eO+WED$bC+HYJh)u_A9Mm&ERmiL@%^F*aXR-iC_f zry=oPIl)X~YTS1`#dfh|>wVyw!s)s?!qG$nVl%%< zASEt{pDnK0SvqklF-ZDqCFUCZQZ^?*F8u^h9@O)GmKeEL}@X zFlufV91B}UiEqrBX-KuIKs492z9AMQ%oa*=;`1(v;D8?Ol_PEf4(cW@9EqFbo>=qn ziL3yPW7Z*o(B)-mBM#3C#j^F46(_APtV@7V$~d;Z%$Suh*)Pt@jIFN**H95Wvk_Zh zOZ?)t>dB;{IBE$rQUbhQ6vqaKaurwE^3fh*HzdiHnT5=T@q5Cg0lr~#1xwjFTTHq? zX6JN%hi%ieGg;!_MMK<|M?}FBIh{q=jL%zz$(pA|c2SC-SlH*~Xlk@oiRom`p?D=h zp&GLFP<%MtHLf$xugd2;x2JnQYr#OUrNt@%D}SXRtld#;9DMZ8&TL8}c>vi}n5?7% z;o!%e|4C)#_;(3|&A)eUFbf7`%M@j7W~P_xi2Dy+!r^LI!fSeBIGN}SU(p{ikOaYB z6_)0VeH_G_64vSA@}h)4X(K9tpnyO^l_eJ@;U`Cq+>`RV=E2r`6RQyjVDC>8A$ zMe>HwQWteTlIe|GiQ++$dftfee5^&dL+d#XE zWp?-xs4WE$OenSEoWkV(Y+kdy*31rV$@Eo{h70+2mOFDP-ARm{O+jTJGu$2X9CuXmlemS=M2;~EAbCW3w_~|K;IT|31En#sPnU$p0 zCTSlgFq7aeG2lot>IY+vh|C$J&3&S*FOkJ9NABHm6zJ|qN6g>mh#hz9J@9Mr9Z(XRRk7hUgW^0u4=MwSy##yPn=;doLttG zP9FM9@Y#c$og_|1+yk(&iu|^ttBV~iG5c6q|Y5Cl4SQFh7 ze@Y}49!l+b?RR|tP|sdLXSrNh5!n09Xl^gPv?6oLn4kc5UyM#j7sNVH@ zbX#=Qy>lv&LFwnczorMT0T+88r)3DA(xB&dZ}&-onb_?0+LS{db8+yvsSke8#X;5m zeO!#XI3F`#bwBD21=cR{?+r)x_vx03EOjn@5koBOP40^uaV*R$>HBsxvd(yZVk?6T~sss%_MUgyuyI@>W4;ZfGyGePSA2us#s1iVyUQm%8dw@(*2;Qvft)Z zG`{+Ue&@zhuUijV_x{JmE9$&glf=?YKe&8f|DDljMp)B-V-1!XLh_hcT@bG^Xh(aevc`QiTXZl2G1{576e zGAk-M7Z~b4_%n=mbf)I@hb{?&=VMF)S;67SgO{a3zukk!7_r%U{g5Y8J?nZ&Kh|SW z&IMA8L$*d+husCP&K!c4c7fwHL!L@?cL}hJ$@tQHA0)fKY3ECJZA$G8hYk)Yj>dLz ze~9g!-G@GvitpDLh91U|nWh#;WhpNuzrB10g9 zt|5mj4fP`Q6Id|7sH7XItukzo^^u|0kgAD>daYHXGC=8(ymp*L<^+ZZ?z_vNl#}lk z$s4N;SGb_Mv%KB-S^Lir8-8&&?LXJTc0MDMasv*6@^VFc2%mYOJj*|AYK*EC^NS2Dt6a3RkwHLP!J*|D|MvgE;9 zLl;gowt7z0&*waY2au#}gPuHEV#Gn4V9?Mtr6DfMaMsza1-k)Y#-a1W7nVNNXc<^n z@XJpdHp^@+TM#O;@ENeC38(PT@<`{)Kw&ox##(R;ge_*Lp($SyFHCBW?|&^_mD4UacI?>Q2`i^cwM( z=PFA`jelVre(de=FEn_OkVgwN;@0I&);yEt3cV8x!@S7KD-{~}=%}H}jd=K#XvFu~ z-x#{9xq1j5RhX(~J=F=yJMO6384Ob@3VW%!b>Q}=;e{A3o`fDUdXps?*;@GVsKHaM zt6FDuF}cvv3$~ckP*yG&NrzOfoj- z1vCC6>%I~dRIe>@Qis*8(%9{0R!+4}BG4x{mM~7o-$qWTOTZp1y{zpvs8~Dv^8#7> zTcHo}m@q;|s?$nU@brfz9Vm#IfkmwATVZ1PXAG7BRu=DASnQXC)?5~ap464Hok+J)4N&y9thKb3l_*>vGLi*2X@!TVn$V!gPYfW5k@wR=O%AH*3l zKBd{k9lZDh!Ej-4ajKf%8$zbE&krUzQ39wpuvkwnd3emE{)J8kPzsG>0JXnbYgI>z zm0|!D9jvnK5Suy}6P8_NLt&9sz{qM^BXU;b{!-lTg<@NgfB~bsZ2My+kc6V8x#IOWlSi(*(=CET}!j5cq z_*qyY5O4tp%NNH&D`P+(eV#3Pv)9f7SOJh2HPTld7|z+vK&xWosP8%$Fb9(&7vgWx zh{c4YP{yv#`Mx3^n?W{vpw^&<)JnFbcgs+t-u9?55(5~Cu^mccj&Vq{N5<}?m7N&f z!MKN`Ifh{nTTy`O&Xa4JYpW@`!uAu@5+8xt`6GMC`~EINVKzJqmxMI75}avYZU+Tp zCN@x}S|-#u+vo{;{uGo<|E0F5r9cadsDzBY!632NO2b@RRr;aJh@T<&M~pZD@^Um9 z7;14~XEsuRkr$h7ywu7Qa42oHr*SQeqBJr#3#DL4#)i{AJ9Pm=X!hs|lclkfndw0p z0*u<&Az+~vjM_L-2!k#XIvKksYzHWfMR#Cka*oZc$gq6$h@myo(5paN#x~0a!z_&v zziUM-MwM2b9BH>d(0CAFJjia}!g!ELTJM3L*IDo8)ym>U(#(?M0t^~GWV@+g&}fu{ z8I6%6yM{D!1l410w$#d<@YAXC7(6)fBx7k-CkRO$<)#jP+w$pF#+#>Ju4QjM4b>OR zc)qgKWne}5$^G<5cr~clwPg1(v=pmTJXQ$w$9sHM^;ZP zQbDLa7C>YN*( z<_0nf9PE~RS0FimYP2&r?;174i#_sH?WOa=G0S-&0CL{W%@pe)3C5&*>FYkR|A?tU z-)?n5Fr79bYAyucIy@#@>_U>Wi~S%Q!|{}!b;(TUtNsvyur4kZaa9&-u-syAIws#E zBP*$W$%z*Vf+&m*;&GXCg$(s*3}(_(H;ZS`@o=9sTCdhwrvZ9GI6a<*Yfi=$DjEH# zGbFi{b(J^$QX=8WJ2by2s8n7eHEBLmmfHw6Z?zwsM9Lx5`-`%dOP}prWyI{n##>)zBC1kF~(!tzXmV4&eXtBj!3w~w9Q_D+&#i=Ay_jTGZ(Q3_u%;eEl zjo7(@VP*$5_Qbq4_Vf-xJ#y`)A*U3_%{r!)bnXm>H4o)Kui!UCkdUm3V8FRqEZ6uk zlC!!o6g17uet8dQ`CZuU^}=9+i=m6i39Ej{k*c=7II1zn0Lsw^aLN{GbyhT9&ScJ5 z<1=t@dfxA9Iyn$&D8@rCNg(RkD* zfqP1VN%`iy_99ob$&0%Yu}l{YymFx6fwcufjLoiW-CJ6>*Kx28g5`+^O6^X(TM;H|`8$s*rQVK-(&Z!7`c zSZV_4O>kgJ;a#SiXle$>ZaQ_Lo6B2@pt`%9#}%b7+==;aMZbvB_A`6`#mVet~HoI4$?WV7!s1x*@nH|Qy{OCEs^ zI-@@&hDQLyBR|?sIkT@NMn(Xyd)PHV30tmYT5lmK2Sv7Og|S4vWBcOndr zID%<&uf)-QXEi#FhXBSy?2K-0Ktmw}Lm?7un`O-A7IJ8E3|J_nv32SAh})XX3M>>N zS^?U57T9UZLLrSgvG@drLZq25W5?nl)=^>MABQ+;P{e+ku%L*!bH5x%HfEApWTY`F z78$YU$s!{XPwTxGjEpp%5G*_*HKkJrFg#+nQ?tm3y$BW=k@_)w7#VT2Cz!9xI9jVL zMqM z_H>$Hbj6EIzPG>=7Eda8TOr0-SRKY$tbE#8`LH<4D-ln9kmCo~@t{Hswy-`d*utJz zu*DmyD+`V)_$?9S(nvh0m(MIXr9ihE11r+Ol>Fj52EPD7=`?Z@1=)`xYAynv{GtH& zh0by2gXAXK36GXm%0C)Wsv1$=q?EJx0d~&A?sRVD^B%!A7#5;uS`xAdF^I?8BQiSI zAbSHJZYikNYCo`8w!L`lNc93SLKI7^|Qt!3hiji zN%;>7^`e3@sh)tg5|5cynD>bFG6Q&v6eVp#V;h}eYl#uh<*o$78s4H&Sy+S4hUMR* zU3D~;AVvUag4gQ7&_lnh@~Dk_V^{fQqe`unEz32^y?7Z{dSL=ZI&$OrWf<~EG&(fh z>b%Ad3OXX){R$0YIyO$0eZ784)|0QghXnc;c5FsgHp?9TVmC^xY1wcuet9sNGr=Hc zKE=!`U$qWa4;Qni4Mr)8Itoi!oRTf&d2m-nVQ(o%o9)F{9e}g}B{^Q&qn2oBbJDgt z%r1I6+f8x1M6N8m$N~}wVQ1m+8{~1_+GR6^rFPj9i-WQ!7Pe$h zEYQfFSg?;h-Hz0*$oR&oU5BZ&CNjAaSUmW)P$pLb*;1KY37^$zX`~$7WG2_DY4bC= zQeU$-c&nLQiJhIl;xLnIMcqP~T(c@PuijFbT#;u>WO8NdHY<}WA-G;9SKJ)V+XpV> zn?uv7T?v=PfpMJLm0GcT{LN17N}Z_#1IUhVvV zIZaEl?=SGRq;oyhu2pl*RaI@5*~OE=HVX{9^e)!DWvR1C@s1gTb$cvz)?{x7Oa>po z|4nriJqKN~|2VAyZr7+B^C|eoNnmrH^YZEfcKhdVlJv!tahk>ZVVU1mLv3`qn{%D7blikXyGK6|$bT6^CY~B?{NLly$d)T}Xf`FwZ=K${M|0zgs8r&O zFQU51Z&fk}W)m#lSi(F2ymi`L?OAuq{wo|!+(X@z+-#3V=7+H05@W$UnoY3evWVip zx`xUaF^2}t(2Q%Sjr|wd{azMOLVlkt8qv=ysBWX8AM{g6sJ-jZgkf zyD$=r8@Mo1tN+f0kvjkVT$pLNkT(b1ttW9|_nRvx@hy7;+UU7}fE)HpftE_($uSQXom@HnCo$^2h zUX!)@RXvgcug|8a`P5|5!SZ@%v=+a}ytu+ycV{niHfmrpNqX7P5wS6f`iemWuZk{&D^M|0!!3aOcU3HK=C=ph zw)`A^-&ZkD!|#;Q0XgJ3^J{yNam!so$TKfj=pgiD#Q+zs0}1`MB8XgbH@T6C7c1J6 zb>}K(;rCf7RY?3NQTW7smexjPaFc@Z`fWv392pa23WICkR_x~j>SpHM!Ty=Qls;>j znDi6Bv+YHyJxv<2HB#v+JBU&VosBkkOfL4($$gMC*kW^kBHt^3IoR##=3&jr_AGCjxN-CHzK zIgbWSa5JgMZqD=&cQ;ArP4(KlEx7t)>u?gFFg+wAmdW_PuQ1J^R`lq zakr(;6`a+kd*pR9EOk0~QA0V+mr;heP)@H}I4x#%fOwkom5r$I(Feyh$<( z9C0&ooOXw0fUT=z<^QTRF{7R7Q%-l-qK9k|)XV0t_>E_2{tnce*lcI%T{hd>dx;e5 z{BJ;ttiR3tulF{sZ#?xM@I6n6U4cOLfPSZR^;V%)KG8JEF$5WM#6VCvAAt!i0UlTw-#yHx Date: Sat, 1 Jun 2024 19:21:31 -0500 Subject: [PATCH 172/241] Add get_pitch utility method --- desc/compute/bounce_integral.py | 83 +++++++++++++++++++++------------ tests/test_bounce_integral.py | 11 ++--- 2 files changed, 56 insertions(+), 38 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d0b990fd1b..d2d5da0591 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -270,32 +270,64 @@ def _poly_val(x, c): return val -def composite_linspace(breaks, resolution): - """Returns linearly spaced points between breakpoints. +def composite_linspace(x, num): + """Returns linearly spaced points between every pair of points ``x``. Parameters ---------- - breaks : Array + x : Array First axis has values to return linearly spaced values between. The remaining axes are batch axes. Assumes input is sorted along first axis. - resolution : int - Number of points between each break. + num : int + Number of points between every pair of points in ``x``. Returns ------- - pts : Array, shape((breaks.shape[0] - 1) * resolution + 1, *breaks.shape[1:]) - Linearly spaced points between ``breaks``. + pts : Array, shape((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) + Linearly spaced points between ``x``. """ - breaks = jnp.atleast_1d(breaks) - pts = jnp.linspace(breaks[:-1, ...], breaks[1:, ...], resolution, endpoint=False) - pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *breaks.shape[1:]) - pts = jnp.append(pts, breaks[jnp.newaxis, -1, ...], axis=0) - assert pts.shape == ((breaks.shape[0] - 1) * resolution + 1, *breaks.shape[1:]) + x = jnp.atleast_1d(x) + pts = jnp.linspace(x[:-1, ...], x[1:, ...], num + 1, endpoint=False) + pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *x.shape[1:]) + pts = jnp.append(pts, x[jnp.newaxis, -1, ...], axis=0) + assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) return pts +def get_pitch(min_B, max_B, num, relative_shift=1e-6): + """Return uniformly spaced pitch values between 1 / max B and 1 / min B. + + Parameters + ---------- + min_B, max_B : Array, Array + Minimum and maximum |B| values. + num : int + Number of values, not including endpoints. + relative_shift : float + Relative amount to shift maxima down and minima up to avoid floating point + errors in downstream routines. + + Returns + ------- + pitch : Array, shape(num + 2, *min_B.shape[1:]) + Pitch values. + + """ + assert min_B.shape == max_B.shape + # Floating point error impedes consistent detection of bounce points riding + # extrema. Shift values slightly to resolve this issue. + min_B = (1 + relative_shift) * min_B + max_B = (1 - relative_shift) * max_B + # λ is the pitch angle. Note Nemov dimensionless integration variable b = (λB₀)⁻¹. + # Uniformly space in pitch (as opposed to 1/pitch) to get faster convergence in + # an integration over pitch. + pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) + assert pitch.shape == (num + 2, *min_B.shape[1:]) + return pitch + + def _check_shape(knots, B_c, B_z_ra_c, pitch=None): """Ensure inputs have compatible shape, and return them with full dimension. @@ -499,19 +531,9 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False): bp1 = take_mask(intersect, is_bp1) bp2 = take_mask(intersect, is_bp2) - # Consistent with (in particular the discussion on page 3 and 5 of) - # V. V. Nemov, S. V. Kasilov, W. Kernbichler, M. F. Heyn. - # Evaluation of 1/ν neoclassical transport in stellarators. - # Phys. Plasmas 1 December 1999; 6 (12): 4622–4632. - # https://doi.org/10.1063/1.873749. - # we ignore the bounce points of particles assigned to a class that - # are trapped outside this snapshot of the field line. The caveat - # is that the field line discussed in the paper above specifies the - # flux surface completely as its length tends to infinity, whereas - # the field line snapshot here is for a particular alpha coordinate. - # Don't think it's necessary to stitch together the field lines using - # rotational transform to potentially capture the bounce point outside - # this snapshot of the field line. + # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, + # we ignore the bounce points of particles assigned to a class that are + # trapped outside this snapshot of the field line. if check: _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot) return bp1, bp2 @@ -913,11 +935,11 @@ def _interpolatory_quadrature( # Spline each function separately so that the singularity near the bounce # points can be captured more accurately than can be by any polynomial. shape = Z.shape - Z_ps = Z.reshape(Z.shape[0], Z.shape[1], -1) - f = [_interp1d_vec(Z_ps, knots, f_i, method=method).reshape(shape) for f_i in f] - b_sup_z = _interp1d_vec(Z_ps, knots, B_sup_z / B, method=method).reshape(shape) - B = _interp1d_vec_with_df(Z_ps, knots, B, B_z_ra, method=method_B).reshape(shape) - pitch = jnp.expand_dims(pitch, axis=(2, 3) if Z.ndim == 4 else 2) + Z = Z.reshape(Z.shape[0], Z.shape[1], -1) + f = [_interp1d_vec(Z, knots, f_i, method=method).reshape(shape) for f_i in f] + b_sup_z = _interp1d_vec(Z, knots, B_sup_z / B, method=method).reshape(shape) + B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method=method_B).reshape(shape) + pitch = jnp.expand_dims(pitch, axis=(2, 3) if len(shape) == 4 else 2) # Assuming that the integrand is a well-behaved function of some interpolation # points Z, it should evaluate as NaN only if Z is NaN. This condition needs to # be enforced explicitly due to floating point and interpolation error. @@ -931,6 +953,7 @@ def _interpolatory_quadrature( w, ) if check: + Z = Z.reshape(shape) _assert_finite_and_hairy(Z, f, b_sup_z, B, B_z_ra, inner_product) if plot: _plot(Z, B, id=r"$\vert B \vert$") diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index c3cb6d0b29..3ff75bc675 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -25,6 +25,7 @@ bounce_points, composite_linspace, get_extrema, + get_pitch, grad_affine_bijection, grad_automorphism_arcsin, grad_automorphism_sin, @@ -234,7 +235,7 @@ def test_composite_linspace(): B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) breaks = np.linspace(B_min_tz, B_max_tz, num=5) - b = composite_linspace(breaks, resolution=3) + b = composite_linspace(breaks, num=3) print(breaks) print(b) for i in range(breaks.shape[0]): @@ -694,13 +695,7 @@ def test_drift(): np.testing.assert_allclose(gbdrift, gbdrift_analytic_low_order, atol=1e-2) np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) - relative_shift = 1e-6 - pitch = 1 / np.linspace( - np.min(B) * (1 + relative_shift), - np.max(B) * (1 - relative_shift), - 100, - endpoint=False, - ) + pitch = get_pitch(np.min(B), np.max(B), 100)[1:] k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) y = np.sqrt(2 * epsilon * pitch * B0) From 9ba02bb5b54011cf5737177405fa502ec27fdd51 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 1 Jun 2024 21:13:03 -0500 Subject: [PATCH 173/241] Fix assert statement for shape in get_pitch --- desc/compute/bounce_integral.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d2d5da0591..9eaa61de4c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -311,11 +311,10 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): Returns ------- - pitch : Array, shape(num + 2, *min_B.shape[1:]) + pitch : Array, shape(num + 2, *min_B.shape) Pitch values. """ - assert min_B.shape == max_B.shape # Floating point error impedes consistent detection of bounce points riding # extrema. Shift values slightly to resolve this issue. min_B = (1 + relative_shift) * min_B @@ -324,7 +323,7 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): # Uniformly space in pitch (as opposed to 1/pitch) to get faster convergence in # an integration over pitch. pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) - assert pitch.shape == (num + 2, *min_B.shape[1:]) + assert pitch.shape == (num + 2, *pitch.shape[1:]) return pitch From 61a8c3d22711ad3c8253acaf41b4c4b338845935 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 3 Jun 2024 01:36:52 -0500 Subject: [PATCH 174/241] Fix use of rtz_grid() after merge with other branch --- tests/test_bounce_integral.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 3ff75bc675..5f0f0bf73f 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -85,7 +85,7 @@ def test_reshape_convention(): rho = np.linspace(0, 1, 3) alpha = np.linspace(0, 2 * np.pi, 4) zeta = np.linspace(0, 6 * np.pi, 5) - grid = Grid.create_meshgrid(rho, alpha, zeta, coordinates="raz") + grid = Grid.create_meshgrid([rho, alpha, zeta], coordinates="raz") r, a, z = grid.nodes.T # functions of zeta should separate along first two axes # since those are contiguous, this should work @@ -461,9 +461,11 @@ def test_bounce_integral_checks(): # (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ) eq = get("HELIOTRON") rho = np.linspace(1e-12, 1, 6) - alpha = np.linspace(0, (2 - eq.sym) * np.pi, 5) + alpha = np.linspace(0, 2 * np.pi, 5) knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) - grid = rtz_grid(eq, rho, alpha, knots, coordinates="raz") + grid = rtz_grid( + eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) + ) data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], grid=grid) bounce_integrate, spline = bounce_integral( data["B^zeta"], @@ -604,7 +606,9 @@ def test_drift(): iota = grid_fsa.compress(data["iota"]).item() alpha = 0 zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = rtz_grid(eq, rho, alpha, zeta, coordinates="raz") + grid = rtz_grid( + eq, rho, alpha, zeta, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) + ) data = eq.compute( [ From 6711df06b9ec71474de3db1ee9bcf3189751f928 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 4 Jun 2024 11:48:46 -0500 Subject: [PATCH 175/241] Update interpax requirement to 0.3.2 for differentiable spline --- devtools/dev-requirements_conda.yml | 2 +- requirements.txt | 2 +- requirements_conda.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/devtools/dev-requirements_conda.yml b/devtools/dev-requirements_conda.yml index 1f2f694b69..ce379988c8 100644 --- a/devtools/dev-requirements_conda.yml +++ b/devtools/dev-requirements_conda.yml @@ -15,7 +15,7 @@ dependencies: - pip: # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. - - interpax >= 0.3.1 + - interpax >= 0.3.2 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - orthax diff --git a/requirements.txt b/requirements.txt index 142be71bc4..ef4faaac42 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ colorama h5py >= 3.0.0, < 4.0 -interpax >= 0.3.1 +interpax >= 0.3.2 jax[cpu] >= 0.3.2, < 0.5.0 matplotlib >= 3.5.0, < 4.0.0 mpmath >= 1.0.0, < 2.0 diff --git a/requirements_conda.yml b/requirements_conda.yml index 777c3f0a3e..03e0b08812 100644 --- a/requirements_conda.yml +++ b/requirements_conda.yml @@ -14,7 +14,7 @@ dependencies: - pip: # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. - - interpax >= 0.3.1 + - interpax >= 0.3.2 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - orthax From 8077d4e8d7e4596ee989adb987f6e8ff8e5d8372 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 10 Jun 2024 03:45:04 -0500 Subject: [PATCH 176/241] Avoid complex arithmetic when computing roots to fix bug in effective ripple It's not possible to numerically distinguish real roots from complex roots with small imaginary part. This commit modifies the root finding logic to compute all real roots without using complex arithmetic to avoid this problem. Prior to this commit, the issue mentioned above could, in a rare case, cause downward spikes in effective ripple near rho=0.8 flux surface when the field line has length about 300 field periods on W7-X with spline knot density 100/toroidal transit, and likewise near rho=0.6 for 200 toroidal transits. In this case a false positive root was being detected over intervals where flat polynomials (i.e. costant functions) compose the spline of B. --- desc/compute/bounce_integral.py | 178 ++++++++++++++++---------------- tests/test_bounce_integral.py | 37 ++++--- 2 files changed, 109 insertions(+), 106 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 9eaa61de4c..04e4715c6c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -6,7 +6,7 @@ from matplotlib import pyplot as plt from orthax.legendre import leggauss -from desc.backend import complex_sqrt, flatnonzero, imap, jnp, put_along_axis, take +from desc.backend import flatnonzero, imap, jnp, put_along_axis, take from desc.compute.utils import safediv from desc.utils import errorif @@ -90,56 +90,75 @@ def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): ) +def _nan_concat(r, num=1): + # Concat nan num times to r on last axis. + nan = jnp.broadcast_to(jnp.nan, (*r.shape[:-1], num)) + return jnp.concatenate([r, nan], axis=-1) + + def _root_linear(a, b, distinct=False): """Return r such that a r + b = 0.""" return safediv(-b, a, fill=jnp.where(jnp.isclose(b, 0), 0, jnp.nan)) def _root_quadratic(a, b, c, distinct=False): - """Return r such that a r² + b r + c = 0.""" + """Return r such that a r² + b r + c = 0, assuming real coefficients.""" # numerical.recipes/book.html, page 227 discriminant = b**2 - 4 * a * c - C = complex_sqrt(discriminant) - sgn = jnp.sign(jnp.real(jnp.conj(b) * C)) - q = -0.5 * (b + sgn * C) - is_linear = jnp.isclose(a, 0) - suppress_root = distinct & jnp.isclose(discriminant, 0) - r1 = jnp.where(is_linear, _root_linear(b, c), safediv(q, a)) - r2 = jnp.where(is_linear | suppress_root, jnp.nan, safediv(c, q)) - return r1, r2 + q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(discriminant)) + r1 = safediv(q, a, _root_linear(b, c, distinct)) + # more robust to remove repeated roots with discriminant + r2 = jnp.where( + distinct & jnp.isclose(discriminant, 0), jnp.nan, safediv(c, q, jnp.nan) + ) + return jnp.stack([r1, r2], axis=-1) def _root_cubic(a, b, c, d, distinct=False): - """Return r such that a r³ + b r² + c r + d = 0.""" - # https://en.wikipedia.org/wiki/Cubic_equation#General_cubic_formula - t_0 = b**2 - 3 * a * c - t_1 = 2 * b**3 - 9 * a * b * c + 27 * a**2 * d - discriminant = t_1**2 - 4 * t_0**3 - C = ((t_1 + complex_sqrt(discriminant)) / 2) ** (1 / 3) - C_is_zero = jnp.isclose(C, 0) - - def root(xi): - return safediv(b + xi * C + jnp.where(C_is_zero, 0, t_0 / (xi * C)), -3 * a) - - xi0 = 1 - xi1 = (-1 + (-3) ** 0.5) / 2 - xi2 = xi1**2 - is_quadratic = jnp.isclose(a, 0) - # C = 0 is equivalent to existence of triple root. - # Assuming the coefficients are real, it is also equivalent to - # existence of any real roots with multiplicity > 1. - suppress_root = distinct & C_is_zero - q1, q2 = _root_quadratic(b, c, d, distinct) - r1 = jnp.where(is_quadratic, q1, root(xi0)) - r2 = jnp.where(is_quadratic, q2, jnp.where(suppress_root, jnp.nan, root(xi1))) - r3 = jnp.where(is_quadratic | suppress_root, jnp.nan, root(xi2)) - return r1, r2, r3 + """Return r such that a r³ + b r² + c r + d = 0, assuming real coefficients.""" + # numerical.recipes/book.html, page 228 + + def irreducible(Q, R, b): + # Three irrational real roots. + theta = jnp.arccos(R / jnp.sqrt(Q**3)) + j = -2 * jnp.sqrt(Q) + r1 = j * jnp.cos(theta / 3) - b / 3 + r2 = j * jnp.cos((theta + 2 * jnp.pi) / 3) - b / 3 + r3 = j * jnp.cos((theta - 2 * jnp.pi) / 3) - b / 3 + return jnp.stack([r1, r2, r3], axis=-1) + + def reducible(Q, R, b): + # 1 real, two complex roots. + A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(R**2 - Q**3)) ** (1 / 3) + B = safediv(Q, A) + r1 = (A + B) - b / 3 + return _nan_concat(r1[..., jnp.newaxis], 2) + + def root(b, c, d): + b = safediv(b, a) + c = safediv(c, a) + d = safediv(d, a) + Q = (b**2 - 3 * c) / 9 + R = (2 * b**3 - 9 * b * c + 27 * d) / 54 + return jnp.where( + jnp.expand_dims(R**2 < Q**3, axis=-1), + irreducible(Q, R, b), + reducible(Q, R, b), + ) + + return jnp.where( + jnp.isclose(a, 0)[..., jnp.newaxis], + _nan_concat(_root_quadratic(b, c, d, distinct)), + root(b, c, d), + ) _roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") -def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): +def _poly_root( + c, k=0, a_min=None, a_max=None, sort=False, distinct=False, real_coef=True +): """Roots of polynomial with given coefficients. Parameters @@ -161,6 +180,8 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): distinct : bool Whether to only return the distinct roots. If true, when the multiplicity is greater than one, the repeated roots are set to nan. + real_coef : bool + Whether the coefficients ``c`` and ``k`` are real. Returns ------- @@ -168,31 +189,26 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): The roots of the polynomial, iterated over the last axis. """ - keep_only_real = not (a_min is None and a_max is None) + just_real = not (a_min is None and a_max is None) func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} - if c.shape[0] in func: - # Compute from analytic formula. + if c.shape[0] in func and real_coef and just_real: + # Compute from analytic formula to avoid the issue of complex roots + # with small imaginary parts. r = func[c.shape[0]](*c[:-1], c[-1] - k, distinct) - if keep_only_real: - r = [_filter_real(rr, a_min, a_max) for rr in r] - r = jnp.stack(r, axis=-1) - # We had ignored the case of double complex roots. - distinct = distinct and c.shape[0] > 3 and not keep_only_real + distinct = distinct and c.shape[0] > 3 else: # Compute from eigenvalues of polynomial companion matrix. - # This method can fail to detect roots near extrema, which is often - # where we want to detect roots for bounce integrals. c_n = c[-1] - k c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) c = jnp.stack(c, axis=-1) r = _roots(c) - if keep_only_real: - if a_min is not None: - a_min = a_min[..., jnp.newaxis] - if a_max is not None: - a_max = a_max[..., jnp.newaxis] - r = _filter_real(r, a_min, a_max) + if just_real: + if a_min is not None: + a_min = a_min[..., jnp.newaxis] + if a_max is not None: + a_max = a_max[..., jnp.newaxis] + r = _filter_real(r, a_min, a_max) if sort or distinct: r = jnp.sort(r, axis=-1) @@ -200,7 +216,7 @@ def _poly_root(c, k=0, a_min=None, a_max=None, sort=False, distinct=False): # Atol needs to be low enough that distinct roots which are close do not # get removed, otherwise algorithms that rely on continuity of the spline # such as bounce_points() will fail. The current atol was chosen so that - # test_bounce_points() passes when this block is forced to run. + # test_bounce_points() passes. mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0, atol=1e-15) r = jnp.where(mask, jnp.nan, r) return r @@ -442,7 +458,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): return B_extrema -def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False): +def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs): """Compute the bounce points given spline of |B| and pitch λ. Parameters @@ -534,11 +550,11 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False): # we ignore the bounce points of particles assigned to a class that are # trapped outside this snapshot of the field line. if check: - _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot) + _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs) return bp1, bp2 -def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): +def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False, **kwargs): """Check that bounce points are computed correctly. Parameters @@ -573,8 +589,8 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) ) if plot: - plot_field_line_with_ripple( - B, pitch[p, s], bp1_p, bp2_p, id=f"{p},{s}" + plot_field_line( + B, pitch[p, s], bp1_p, bp2_p, id=f"{p},{s}", **kwargs ) print("bp1:", bp1_p) print("bp2:", bp2_p) @@ -587,19 +603,20 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False): ) assert not err_3, msg_3 if plot: - plot_field_line_with_ripple(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s)) + plot_field_line(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s), **kwargs) -def plot_field_line_with_ripple( +def plot_field_line( B, pitch=None, bp1=jnp.array([]), bp2=jnp.array([]), start=None, stop=None, - num=500, + num=1000, title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", id=None, + include_knots=True, show=True, ): """Plot the field line given spline of |B| and bounce points etc. @@ -643,8 +660,9 @@ def add(lines): legend[label] = line fig, ax = plt.subplots() - for knot in B.x: - add(ax.axvline(x=knot, color="tab:blue", alpha=0.25, label="knot")) + if include_knots: + for knot in B.x: + add(ax.axvline(x=knot, color="tab:blue", alpha=0.1, label="knot")) z = jnp.linspace( start=B.x[0] if start is None else start, stop=B.x[-1] if stop is None else stop, @@ -680,7 +698,7 @@ def add(lines): ax.set_xlabel(r"Field line $\zeta$") ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") - ax.legend(legend.values(), legend.keys()) + ax.legend(legend.values(), legend.keys(), loc="lower right") if id is not None: title = f"{title}. id = {id}." ax.set_title(title) @@ -706,16 +724,11 @@ def grad_affine_bijection(a, b): def automorphism_arcsin(x): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The gradient of the arcsin automorphism introduces a singularity that augments - the singularity in the bounce integral. Therefore, the quadrature scheme + The arcsin transformation introduces a singularity that augments + the singularity in the bounce integral, so the quadrature scheme used to evaluate the integral must work well on functions with large derivative near the boundary. - The arcsin automorphism pulls points in [−1, 1] away from the boundary. - This can reduce floating point error if paired with a quadrature - scheme that is aggressive with placing nodes near endpoints, such as - Tanh-Sinh quadrature. - Parameters ---------- x : Array @@ -743,22 +756,18 @@ def grad_automorphism_arcsin(x): def automorphism_sin(x, s=0, m=10): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The gradient of the sin automorphism is Lipschitz. - When this automorphism is used as the change of variable map for the bounce - integral, the Lipschitzness prevents generation of new singularities. + The sin transformation is Lipschitz. + When used as the change of variable map for the bounce integral, the + Lipschitzness prevents generation of new singularities. Furthermore, its derivative vanishes to zero slowly near the boundary, which will suppress the large derivatives near the boundary of singular integrals. - Therefore, this automorphism pulls the mass of the bounce integral away + In effect, this automorphism pulls the mass of the bounce integral away from the singularities, which should improve convergence of the quadrature to the true integral, so long as the quadrature performs better on less singular integrands. Pairs well with Gauss-Legendre quadrature. - The sin automorphism pushes points in [−1, 1] toward the boundary. - This can increase floating point error if paired with a quadrature - scheme that is aggressive with placing nodes near endpoints. - Parameters ---------- x : Array @@ -777,16 +786,11 @@ def automorphism_sin(x, s=0, m=10): errorif(not (0 <= s <= 1)) # s = 0 -> derivative vanishes like cosine. # s = 1 -> derivative vanishes like cosine^k. - # Integrate cosine, cosine^k, and normalize codomain to [-1, 1] to get - # two automorphisms. Connect with homotopy, jointly continuous in s ∈ [0, 1]. - # Then derivative suppression is continuous in s for finite k. - # As k → ∞ and s → 1, all integrable singularities and oscillations - # are removed; the integrand becomes a delta function. - # Setting s = 0 is optimal to integrate singularities of the form 1 / (1 - |x|) - # Setting s = 1 is optimal to integrate singularities of the form 1 / (1 - |x|)^k. y0 = jnp.sin(jnp.pi * x / 2) y1 = x + jnp.sin(jnp.pi * x) / jnp.pi # k = 2 y = (1 - s) * y0 + s * y1 + # y is an expansion, so y(x) > x near x ∈ {−1, 1} and there is a tendency + # for floating point error to overshoot the true value. eps = m * jnp.finfo(jnp.array(1.0).dtype).eps return jnp.clip(y, -1 + eps, 1 - eps) @@ -953,7 +957,7 @@ def _interpolatory_quadrature( ) if check: Z = Z.reshape(shape) - _assert_finite_and_hairy(Z, f, b_sup_z, B, B_z_ra, inner_product) + _check_interpolation(Z, f, b_sup_z, B, B_z_ra, inner_product) if plot: _plot(Z, B, id=r"$\vert B \vert$") _plot(Z, b_sup_z, id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") @@ -968,7 +972,7 @@ def _interpolatory_quadrature( ) -def _assert_finite_and_hairy(Z, f, B_sup_z, B, B_z_ra, inner_product): +def _check_interpolation(Z, f, B_sup_z, B, B_z_ra, inner_product): """Check for floating point errors. Parameters diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 5f0f0bf73f..39107a7af5 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -29,7 +29,7 @@ grad_affine_bijection, grad_automorphism_arcsin, grad_automorphism_sin, - plot_field_line_with_ripple, + plot_field_line, take_mask, tanh_sinh, ) @@ -281,16 +281,16 @@ def test_bp1_before_extrema(): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size - # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[1], 1.9827671337414938) - intersect = np.insert(intersect, np.searchsorted(intersect, bp1[1]), bp1[1]) - np.testing.assert_allclose(bp1, intersect[[1, 2]]) - np.testing.assert_allclose(bp2, intersect[[2, 3]]) + np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) + np.testing.assert_allclose(bp1, intersect[[1, 2]], rtol=1e-6) + # intersect array could not resolve double root as single at index 2,3 + np.testing.assert_allclose(intersect[2], intersect[3], rtol=1e-6) + np.testing.assert_allclose(bp2, intersect[[3, 4]], rtol=1e-6) def test_bp2_before_extrema(): start = -1.2 * np.pi @@ -320,19 +320,17 @@ def test_extrema_first_and_before_bp1(plot=False): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, ) B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - 1e-13 bp1, bp2 = bounce_points(pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True) if plot: - plot_field_line_with_ripple(B, pitch, bp1, bp2, start=k[2]) + plot_field_line(B, pitch, bp1, bp2, start=k[2]) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size - # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[0], 0.8353192766102349) - intersect = np.insert(intersect, np.searchsorted(intersect, bp1[0]), bp1[0]) + np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) intersect = intersect[intersect >= k[2]] - np.testing.assert_allclose(bp1, intersect[[0, 1, 3]]) - np.testing.assert_allclose(bp2, intersect[[0, 2, 4]]) + np.testing.assert_allclose(bp1, intersect[[0, 2, 4]], rtol=1e-6) + np.testing.assert_allclose(bp2, intersect[[0, 3, 5]], rtol=1e-6) def test_extrema_first_and_before_bp2(): start = -1.2 * np.pi @@ -344,7 +342,7 @@ def test_extrema_first_and_before_bp2(): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, ) B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 # If a regression fails this test, this note will save many hours of debugging. # If the filter in place to return only the distinct roots is too coarse, # in particular atol < 1e-15, then this test will error. In the resulting @@ -362,10 +360,11 @@ def test_extrema_first_and_before_bp2(): assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[0], -0.6719044147510538) - intersect = np.insert(intersect, np.searchsorted(intersect, bp1[0]), bp1[0]) - np.testing.assert_allclose(bp1, intersect[0::2]) - np.testing.assert_allclose(bp2, intersect[1::2]) + np.testing.assert_allclose(bp1[0], -0.671904, rtol=1e-6) + np.testing.assert_allclose(bp1, intersect[[0, 3, 5]], rtol=1e-5) + # intersect array could not resolve double root as single at index 0,1 + np.testing.assert_allclose(intersect[0], intersect[1], rtol=1e-5) + np.testing.assert_allclose(bp2, intersect[[2, 4, 6]], rtol=1e-5) test_bp1_first() test_bp2_first() From b14426a287169758a2471516242a9d28a4f6621b Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 11 Jun 2024 15:45:36 -0500 Subject: [PATCH 177/241] Clean up some documentation and docstrings --- desc/compute/bounce_integral.py | 64 +++++++++++++++------------------ 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 04e4715c6c..b3ec5f35fe 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -128,7 +128,7 @@ def irreducible(Q, R, b): return jnp.stack([r1, r2, r3], axis=-1) def reducible(Q, R, b): - # 1 real, two complex roots. + # One real and two complex roots. A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(R**2 - Q**3)) ** (1 / 3) B = safediv(Q, A) r1 = (A + B) - b / 3 @@ -435,9 +435,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - extrema = _poly_root( - c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots), distinct=True - ) + extrema = _poly_root(c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots)) assert extrema.shape == (S, N, degree - 1) B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) @@ -617,6 +615,8 @@ def plot_field_line( title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", id=None, include_knots=True, + alpha_knot=0.1, + alpha_pitch=0.25, show=True, ): """Plot the field line given spline of |B| and bounce points etc. @@ -636,11 +636,17 @@ def plot_field_line( stop : float Maximum ζ of plot. num : int - Number of ζ points to plot. + Number of ζ points to plot. Pick a big number. title : str Plot title. id : str Identifier string to append to plot title. + include_knots : bool + Whether to plot vertical lines at the knots. + alpha_knot : float + Transparency of knot lines. + alpha_pitch : float + Transparency of pitch lines. show : bool Whether to show the plot. @@ -662,7 +668,7 @@ def add(lines): fig, ax = plt.subplots() if include_knots: for knot in B.x: - add(ax.axvline(x=knot, color="tab:blue", alpha=0.1, label="knot")) + add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) z = jnp.linspace( start=B.x[0] if start is None else start, stop=B.x[-1] if stop is None else stop, @@ -673,7 +679,11 @@ def add(lines): if pitch is not None: b = 1 / jnp.atleast_1d(pitch) for val in b: - add(ax.axhline(val, color="tab:purple", alpha=0.25, label=r"$1 / \lambda$")) + add( + ax.axhline( + val, color="tab:purple", alpha=alpha_pitch, label=r"$1 / \lambda$" + ) + ) bp1, bp2 = jnp.atleast_2d(bp1, bp2) for i in range(bp1.shape[0]): bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) @@ -702,8 +712,8 @@ def add(lines): if id is not None: title = f"{title}. id = {id}." ax.set_title(title) + plt.tight_layout() if show: - plt.tight_layout() plt.show() plt.close() return fig, ax @@ -882,21 +892,13 @@ def tanh_sinh(deg, m=10): _interp1d_vec = jnp.vectorize( - interp1d, - signature="(m),(n),(n)->(m)", - excluded={"method", "derivative", "extrap", "period"}, + interp1d, signature="(m),(n),(n)->(m)", excluded={"method"} ) -@partial( - jnp.vectorize, - signature="(m),(n),(n),(n)->(m)", - excluded={"method", "derivative", "extrap", "period"}, -) -def _interp1d_vec_with_df( - xq, x, f, fx, method="cubic", derivative=0, extrap=False, period=None -): - return interp1d(xq, x, f, method, derivative, extrap, period, fx=fx) +@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)", excluded={"method"}) +def _interp1d_vec_with_df(xq, x, f, fx, method="cubic"): + return interp1d(xq, x, f, method, fx=fx) def _interpolatory_quadrature( @@ -1206,21 +1208,13 @@ def bounce_integral( the labels (ρ, α) are interpreted as the index into the first axis that corresponds to that field line. knots : Array, shape(knots.size, ) - Field line following coordinate values at which ``B_sup_z``, - ``B``, and ``B_z_ra`` were evaluated. - These knots are used to compute a spline of |B| and interpolate - the integrand. The number of knots specifies a grid resolution - as increasing the number of knots increases the accuracy of - representing the integrand and the accuracy of the locations of - the bounce points. The default spline method for |B| is a cubic - Hermite spline. This is preferred because the strength of the - singularity typical in bounce integral is ~ 1 / |∂|B|/∂_ζ|, so - the derivative information should be captured without compromise. - Can also specify to use a monotonic interpolation for |B| rather - than a cubic Hermite spline with keyword argument ``monotonic=True``. + Field line following coordinate values at which ``B_sup_z``, ``B``, and + ``B_z_ra`` were evaluated. These knots are used to compute a spline of |B| + and interpolate the integrand. A good reference density is 100 knots per + toroidal transit. quad : (Array, Array) Quadrature points xₖ and weights wₖ for the approximate evaluation - of an integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). + of an integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. automorphism : (callable, callable) or None The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. @@ -1230,9 +1224,9 @@ def bounce_integral( augment or suppress singularities. Keep this in mind when choosing the quadrature method. B_ref : float - Reference magnetic field strength for normalization. + Optional. Reference magnetic field strength for normalization. L_ref : float - Reference length scale for normalization. + Optional. Reference length scale for normalization. check : bool Flag for debugging. plot : bool From deb12d669fe49095b85f4f9c0292757429391a63 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 12 Jun 2024 16:45:37 -0500 Subject: [PATCH 178/241] Fix type hinting and reorganize order of methods --- desc/backend.py | 35 +- desc/compute/bounce_integral.py | 1208 +++++++++++++-------------- devtools/dev-requirements_conda.yml | 2 +- requirements.txt | 2 +- requirements_conda.yml | 2 +- 5 files changed, 596 insertions(+), 653 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 345589fb80..800c2fb2a2 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -405,25 +405,13 @@ def tangent_solve(g, y): ) return x, (jnp.linalg.norm(res), niter) - def complex_sqrt(x): - """Compute the square root of x. - - For negative input elements, a complex value is returned - (unlike numpy.sqrt which returns NaN). - - Parameters - ---------- - x : array_like - The input value(s). - - Returns - ------- - out : ndarray - The square root of x. - - """ - out = jnp.sqrt(x.astype("complex128")) - return out + def trapezoid(y, x=None, dx=1.0, axis=-1): + """Integrate along the given axis using the composite trapezoidal rule.""" + if hasattr(jnp, "trapezoid"): + # https://github.com/google/jax/issues/20410 + return jnp.trapezoid(y, x, dx, axis) + else: + return jax.scipy.integrate.trapezoid(y, x, dx, axis) # we can't really test the numpy backend stuff in automated testing, so we ignore it @@ -441,7 +429,6 @@ def complex_sqrt(x): ) from scipy.special import gammaln, logsumexp # noqa: F401 - complex_sqrt = np.emath.sqrt put_along_axis = np.put_along_axis def imap(f, xs, in_axes=0, out_axes=0): @@ -823,6 +810,14 @@ def root( out = scipy.optimize.root(fun, x0, args, jac=jac, tol=tol) return out.x, out + def trapezoid(y, x=None, dx=1.0, axis=-1): + """Integrate along the given axis using the composite trapezoidal rule.""" + if hasattr(np, "trapezoid"): + # https://github.com/numpy/numpy/issues/25586 + return np.trapezoid(y, x, dx, axis) + else: + return np.trapz(y, x, dx, axis) + def flatnonzero(a, size=None, fill_value=0): """A numpy implementation of jnp.flatnonzero.""" nz = np.flatnonzero(a) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index b3ec5f35fe..d871ee72a6 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -17,26 +17,24 @@ def take_mask(a, mask, size=None, fill_value=None): Parameters ---------- - a : Array + a : jnp.ndarray The source array. - mask : Array + mask : jnp.ndarray Boolean mask to index into ``a``. Should have same shape as ``a``. size : int Elements of ``a`` at the first size True indices of ``mask`` will be returned. If there are fewer elements than size indicates, the returned array will be padded with fill_value. Defaults to ``mask.size``. - fill_value : - When there are fewer than the indicated number of elements, - the remaining elements will be filled with ``fill_value``. - Defaults to NaN for inexact types, - the largest negative value for signed types, - the largest positive value for unsigned types, - and True for booleans. + fill_value : Any + When there are fewer than the indicated number of elements, the remaining + elements will be filled with ``fill_value``. Defaults to NaN for inexact types, + the largest negative value for signed types, the largest positive value for + unsigned types, and True for booleans. Returns ------- - a[mask][:size] : Array, shape(size, ) - Output array. + result : jnp.ndarray + Shape (size, ). """ assert a.shape == mask.shape @@ -62,21 +60,19 @@ def _filter_not_nan(a): def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): - """Keep real values inside [a_min, a_max] and set others to nan. + """Keep real values inside [``a_min``, ``a_max``] and set others to nan. Parameters ---------- - a : Array - Complex-valued array. - a_min, a_max : Array, Array + a : jnp.ndarray + a_min, a_max : jnp.ndarray or float, jnp.ndarray or float Minimum and maximum value to keep real values between. Should broadcast with ``a``. Returns ------- - roots : Array + result : jnp.ndarray The real values of ``a`` in [``a_min``, ``a_max``]; others set to nan. - The returned array preserves the order of ``a``. """ if a_min is None: @@ -106,7 +102,7 @@ def _root_quadratic(a, b, c, distinct=False): # numerical.recipes/book.html, page 227 discriminant = b**2 - 4 * a * c q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(discriminant)) - r1 = safediv(q, a, _root_linear(b, c, distinct)) + r1 = safediv(q, a, _root_linear(b, c)) # more robust to remove repeated roots with discriminant r2 = jnp.where( distinct & jnp.isclose(discriminant, 0), jnp.nan, safediv(c, q, jnp.nan) @@ -157,43 +153,43 @@ def root(b, c, d): def _poly_root( - c, k=0, a_min=None, a_max=None, sort=False, distinct=False, real_coef=True + c, k=0, a_min=None, a_max=None, sort=False, distinct=False, poly_is_real=True ): """Roots of polynomial with given coefficients. Parameters ---------- - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. k : Array - Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. - Should broadcast with arrays of shape(*c.shape[1:]). - a_min, a_max : Array, Array - Minimum and maximum value to return roots between. - If specified only real roots are returned. - If None, returns all complex roots. - Should broadcast with arrays of shape(*c.shape[1:]). + Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of + shape c.shape[1:]. + a_min, a_max : jnp.ndarray, jnp.ndarray + Minimum and maximum value to return roots between. If specified only real roots + are returned. If None, returns all complex roots. Should broadcast with arrays + of shape c.shape[1:]. sort : bool Whether to sort the roots. distinct : bool - Whether to only return the distinct roots. If true, when the - multiplicity is greater than one, the repeated roots are set to nan. - real_coef : bool - Whether the coefficients ``c`` and ``k`` are real. + Whether to only return the distinct roots. If true, when the multiplicity is + greater than one, the repeated roots are set to nan. + poly_is_real : bool + Whether the coefficients ``c`` and ``k`` are real. Default is true. Returns ------- - r : Array, shape(..., c.shape[1:], c.shape[0] - 1) + r : jnp.ndarray + Shape (..., c.shape[1:], c.shape[0] - 1). The roots of the polynomial, iterated over the last axis. """ - just_real = not (a_min is None and a_max is None) + get_only_real_roots = not (a_min is None and a_max is None) func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} - if c.shape[0] in func and real_coef and just_real: - # Compute from analytic formula to avoid the issue of complex roots - # with small imaginary parts. + if c.shape[0] in func and poly_is_real and get_only_real_roots: + # Compute from analytic formula to avoid the issue of complex roots with small + # imaginary parts. r = func[c.shape[0]](*c[:-1], c[-1] - k, distinct) distinct = distinct and c.shape[0] > 3 else: @@ -203,7 +199,7 @@ def _poly_root( c.append(c_n) c = jnp.stack(c, axis=-1) r = _roots(c) - if just_real: + if get_only_real_roots: if a_min is not None: a_min = a_min[..., jnp.newaxis] if a_max is not None: @@ -227,17 +223,17 @@ def _poly_der(c): Parameters ---------- - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. Returns ------- - poly : Array - Coefficients of polynomial derivative, ignoring the arbitrary constant. - That is, ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, - where n is ``c.shape[0] - 1``. + poly : jnp.ndarray + Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, + ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is + ``c.shape[0]-1``. """ poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T @@ -245,30 +241,29 @@ def _poly_der(c): def _poly_val(x, c): - """Evaluate the set of polynomials c at the points x. + """Evaluate the set of polynomials ``c`` at the points ``x``. - Note that this function does not perform the same operation as - ``np.polynomial.polynomial.polyval(x, c)``. + Note this function is not the same as ``np.polynomial.polynomial.polyval(x,c)``. Parameters ---------- - x : Array + x : jnp.ndarray Coordinates at which to evaluate the set of polynomials. - c : Array - First axis should store coefficients of a polynomial. - For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0] - 1``, - coefficient cᵢ should be stored at ``c[n - i]``. + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. Returns ------- - val : Array + val : jnp.ndarray Polynomial with given coefficients evaluated at given points. Examples -------- .. code-block:: python - val = polyval(x, c) + val = _poly_val(x, c) if val.ndim != max(x.ndim, c.ndim - 1): raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") for index in np.ndindex(c.shape[1:]): @@ -286,61 +281,158 @@ def _poly_val(x, c): return val -def composite_linspace(x, num): - """Returns linearly spaced points between every pair of points ``x``. +def plot_field_line( + B, + pitch=None, + bp1=jnp.array([]), + bp2=jnp.array([]), + start=None, + stop=None, + num=1000, + title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", + title_id=None, + include_knots=True, + alpha_knot=0.1, + alpha_pitch=0.25, + show=True, +): + """Plot the field line given spline of |B|. Parameters ---------- - x : Array - First axis has values to return linearly spaced values between. - The remaining axes are batch axes. - Assumes input is sorted along first axis. + B : PPoly + Spline of |B| over given field line. + pitch : jnp.ndarray + λ value. + bp1 : jnp.ndarray + Bounce points with ∂|B|/∂_ζ <= 0. + bp2 : jnp.ndarray + Bounce points with ∂|B|/∂_ζ >= 0. + start : float + Minimum ζ on plot. + stop : float + Maximum ζ on plot. num : int - Number of points between every pair of points in ``x``. + Number of ζ points to plot. Pick a big number. + title : str + Plot title. + title_id : str + Identifier string to append to plot title. + include_knots : bool + Whether to plot vertical lines at the knots. + alpha_knot : float + Transparency of knot lines. + alpha_pitch : float + Transparency of pitch lines. + show : bool + Whether to show the plot. Default is true. Returns ------- - pts : Array, shape((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) - Linearly spaced points between ``x``. + fig, ax : matplotlib figure and axes. """ - x = jnp.atleast_1d(x) - pts = jnp.linspace(x[:-1, ...], x[1:, ...], num + 1, endpoint=False) - pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *x.shape[1:]) - pts = jnp.append(pts, x[jnp.newaxis, -1, ...], axis=0) - assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) - return pts + legend = {} + def add(lines): + if not hasattr(lines, "__iter__"): + lines = [lines] + for line in lines: + label = line.get_label() + if label not in legend: + legend[label] = line -def get_pitch(min_B, max_B, num, relative_shift=1e-6): - """Return uniformly spaced pitch values between 1 / max B and 1 / min B. + fig, ax = plt.subplots() + if include_knots: + for knot in B.x: + add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) + z = jnp.linspace( + start=B.x[0] if start is None else start, + stop=B.x[-1] if stop is None else stop, + num=num, + ) + add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) - Parameters - ---------- - min_B, max_B : Array, Array - Minimum and maximum |B| values. - num : int - Number of values, not including endpoints. - relative_shift : float - Relative amount to shift maxima down and minima up to avoid floating point - errors in downstream routines. + if pitch is not None: + b = 1 / jnp.atleast_1d(pitch) + for val in b: + add( + ax.axhline( + val, color="tab:purple", alpha=alpha_pitch, label=r"$1 / \lambda$" + ) + ) + bp1, bp2 = jnp.atleast_2d(bp1, bp2) + for i in range(bp1.shape[0]): + bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) + add( + ax.scatter( + bp1_i, + jnp.full_like(bp1_i, b[i]), + marker="v", + color="tab:red", + label="bp1", + ) + ) + add( + ax.scatter( + bp2_i, + jnp.full_like(bp2_i, b[i]), + marker="^", + color="tab:green", + label="bp2", + ) + ) - Returns - ------- - pitch : Array, shape(num + 2, *min_B.shape) - Pitch values. + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") + ax.legend(legend.values(), legend.keys(), loc="lower right") + if title_id is not None: + title = f"{title}. id = {title_id}." + ax.set_title(title) + plt.tight_layout() + if show: + plt.show() + plt.close() + return fig, ax - """ - # Floating point error impedes consistent detection of bounce points riding - # extrema. Shift values slightly to resolve this issue. - min_B = (1 + relative_shift) * min_B - max_B = (1 - relative_shift) * max_B - # λ is the pitch angle. Note Nemov dimensionless integration variable b = (λB₀)⁻¹. - # Uniformly space in pitch (as opposed to 1/pitch) to get faster convergence in - # an integration over pitch. - pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) - assert pitch.shape == (num + 2, *pitch.shape[1:]) - return pitch + +def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs): + """Check that bounce points are computed correctly.""" + eps = 10 * jnp.finfo(jnp.array(1.0).dtype).eps + P, S = bp1.shape[:-1] + + msg_1 = "Bounce points have an inversion." + err_1 = jnp.any(bp1 > bp2, axis=-1) + msg_2 = "Discontinuity detected." + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + + for s in range(S): + B = PPoly(B_c[:, s], knots) + for p in range(P): + B_mid = B((bp1[p, s] + bp2[p, s]) / 2) + err_3 = jnp.any(B_mid > 1 / pitch[p, s] + eps) + if err_1[p, s] or err_2[p, s] or err_3: + bp1_p, bp2_p, B_mid = map( + _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) + ) + if plot: + plot_field_line( + B, pitch[p, s], bp1_p, bp2_p, title_id=f"{p},{s}", **kwargs + ) + print("bp1:", bp1_p) + print("bp2:", bp2_p) + assert not err_1[p, s], msg_1 + assert not err_2[p, s], msg_2 + msg_3 = ( + f"Detected B midpoint = {B_mid}>{1 / pitch[p, s] + eps} = 1/pitch. " + "You need to use more knots or, if that is infeasible, switch to a " + "monotonic spline method.\n" + ) + assert not err_3, msg_3 + if plot: + plot_field_line( + B, pitch[:, s], bp1[:, s], bp2[:, s], title_id=str(s), **kwargs + ) def _check_shape(knots, B_c, B_z_ra_c, pitch=None): @@ -348,29 +440,21 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): Parameters ---------- - knots : Array, shape(knots.size, ) + knots : jnp.ndarray + Shape (knots.size, ). Field line-following ζ coordinates of spline knots. Returns ------- - B_c : Array, shape(B_c.shape[0], S, knots.size - 1) + B_c : jnp.ndarray + Shape (B_c.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - B_z_ra_c : Array, shape(B_c.shape[0] - 1, *B_c.shape[1:]) + B_z_ra_c : jnp.ndarray + Shape (B_c.shape[0] - 1, *B_c.shape[1:]). Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - pitch : Array, shape(P, S) - λ values. - λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as the index into - the last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. + pitch : jnp.ndarray + Shape (P, S). + λ values to evaluate the bounce integral at each field line. """ errorif(knots.ndim != 1) @@ -392,95 +476,32 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): - """Return |B| values at extrema. - - The quantity 1 / √(1 − λ |B|) common to bounce integrals is singular with - strength ~ |ζ_b₂ - ζ_b₁| / |∂|B|/∂_ζ|. Therefore, an integral over the pitch - angle λ may have mass concentrated near λ = 1 / |B|(ζ*) where |B|(ζ*) is a - local maximum. These correspond to fat banana orbits. Depending on the - quantity to integrate, it may be beneficial to place quadrature points near - these regions. - - Parameters - ---------- - knots : Array, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - B_c : Array, shape(B_c.shape[0], S, knots.size - 1) - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - B_z_ra_c : Array, shape(B_c.shape[0] - 1, *B_c.shape[1:]) - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - relative_shift : float - Relative amount to shift maxima down and minima up to avoid floating point - errors in downstream routines. - - Returns - ------- - B_extrema : Array, shape(N * (degree - 1), S) - For the shaping notation, the ``degree`` of the spline of |B| matches - ``B_c.shape[0] - 1``, the number of polynomials per spline ``N`` matches - ``knots.size - 1``, and the number of field lines is denoted by ``S``. - - If there were less than ``N * (degree - 1)`` extrema detected along a - field line, then the first axis is interspersed with nan. - - """ - B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) - S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - extrema = _poly_root(c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots)) - assert extrema.shape == (S, N, degree - 1) - B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) - B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) - # Floating point error impedes consistent detection of bounce points riding - # extrema. Shift pitch values slightly to resolve this issue. - B_extrema = ( - jnp.where( - # Higher priority to shift down maxima than shift up minima, so identify - # near equality with zero as maxima. - B_zz_ra_extrema <= 0, - (1 - relative_shift) * B_extrema, - (1 + relative_shift) * B_extrema, - ) - .reshape(S, -1) - .T - ) - assert B_extrema.shape == (N * (degree - 1), S) - return B_extrema - - def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs): """Compute the bounce points given spline of |B| and pitch λ. Parameters ---------- - pitch : Array, shape(P, S) - λ values. - λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as the index into - the last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. - knots : Array, shape(knots.size, ) + pitch : jnp.ndarray + Shape (P, S). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + knots : jnp.ndarray + Shape (knots.size, ). Field line-following ζ coordinates of spline knots. - B_c : Array, shape(B_c.shape[0], S, knots.size - 1) + B_c : jnp.ndarray + Shape (B_c.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - B_z_ra_c : Array, shape(B_c.shape[0] - 1, *B_c.shape[1:]) + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + B_z_ra_c : jnp.ndarray + Shape (B_c.shape[0] - 1, *B_c.shape[1:]). Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. check : bool Flag for debugging. plot : bool @@ -488,17 +509,18 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs Returns ------- - bp1, bp2 : Array, Array, shape(P, S, N * degree) - For the shaping notation, the ``degree`` of the spline of |B| matches - ``B_c.shape[0] - 1``, the number of polynomials per spline ``N`` matches - ``knots.size - 1``, and the number of field lines is denoted by ``S``. + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (P, S, N * degree). + The field line-following ζ coordinates of bounce points for a given pitch along + a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right + integration boundaries, respectively, for the bounce integrals. - The returned arrays are the field line-following ζ coordinates of bounce - points for a given pitch along a field line. The pairs bp1[i, j, k] and - bp2[i, j, k] form left and right integration boundaries, respectively, - for the bounce integrals. If there were less than ``N * degree`` bounce - points detected along a field line, then the last axis, which enumerates - the bounce points for a particular field line, is padded with nan. + For the shaping notation, the ``degree`` of the spline of |B| matches + ``B_c.shape[0]-1``, the number of polynomials per spline ``N`` matches + ``knots.size-1``, and the number of field lines is denoted by ``S``. + If there were less than ``N*degree`` bounce points detected along a field line, + then the last axis, which enumerates the bounce points for a particular field + line, is padded with nan. """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) @@ -547,176 +569,129 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, # we ignore the bounce points of particles assigned to a class that are # trapped outside this snapshot of the field line. + # TODO: Better to always consider boundary as bounce points. if check: _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs) return bp1, bp2 -def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot=False, **kwargs): - """Check that bounce points are computed correctly. +def composite_linspace(x, num): + """Returns linearly spaced points between every pair of points ``x``. Parameters ---------- - bp1, bp2 : Array, Array - Output of ``bounce_points``. - pitch : Array - Input to ``bounce_points``. - knots : Array - Input to ``bounce_points``. - B_c : Array - Input to ``bounce_points``. - plot : bool - Whether to plot some things. - - """ - eps = 10 * jnp.finfo(jnp.array(1.0).dtype).eps - P, S = bp1.shape[:-1] + x : jnp.ndarray + First axis has values to return linearly spaced values between. The remaining + axes are batch axes. Assumes input is sorted along first axis. + num : int + Number of points between every pair of points in ``x``. - msg_1 = "Bounce points have an inversion." - err_1 = jnp.any(bp1 > bp2, axis=-1) - msg_2 = "Discontinuity detected." - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + Returns + ------- + pts : jnp.ndarray + Shape ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]). + Linearly spaced points between ``x``. - for s in range(S): - B = PPoly(B_c[:, s], knots) - for p in range(P): - B_mid = B((bp1[p, s] + bp2[p, s]) / 2) - err_3 = jnp.any(B_mid > 1 / pitch[p, s] + eps) - if err_1[p, s] or err_2[p, s] or err_3: - bp1_p, bp2_p, B_mid = map( - _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) - ) - if plot: - plot_field_line( - B, pitch[p, s], bp1_p, bp2_p, id=f"{p},{s}", **kwargs - ) - print("bp1:", bp1_p) - print("bp2:", bp2_p) - assert not err_1[p, s], msg_1 - assert not err_2[p, s], msg_2 - msg_3 = ( - f"Detected B midpoint = {B_mid}>{1 / pitch[p, s] + eps} = 1/pitch. " - "You need to use more knots or, if that is infeasible, switch to a " - "monotonic spline method.\n" - ) - assert not err_3, msg_3 - if plot: - plot_field_line(B, pitch[:, s], bp1[:, s], bp2[:, s], id=str(s), **kwargs) + """ + x = jnp.atleast_1d(x) + pts = jnp.linspace(x[:-1, ...], x[1:, ...], num + 1, endpoint=False) + pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *x.shape[1:]) + pts = jnp.append(pts, x[jnp.newaxis, -1, ...], axis=0) + assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) + return pts -def plot_field_line( - B, - pitch=None, - bp1=jnp.array([]), - bp2=jnp.array([]), - start=None, - stop=None, - num=1000, - title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", - id=None, - include_knots=True, - alpha_knot=0.1, - alpha_pitch=0.25, - show=True, -): - """Plot the field line given spline of |B| and bounce points etc. +def get_pitch(min_B, max_B, num, relative_shift=1e-6): + """Return uniformly spaced pitch values between 1 / max B and 1 / min B. - Parameters - ---------- - B : PPoly - Spline of |B| over given field line. - pitch : Array - λ value. - bp1 : Array - Bounce points with ∂|B|/∂_ζ <= 0. - bp2 : Array - Bounce points with ∂|B|/∂_ζ >= 0. - start : float - Minimum ζ on plot. - stop : float - Maximum ζ of plot. - num : int - Number of ζ points to plot. Pick a big number. - title : str - Plot title. - id : str - Identifier string to append to plot title. - include_knots : bool - Whether to plot vertical lines at the knots. - alpha_knot : float - Transparency of knot lines. - alpha_pitch : float - Transparency of pitch lines. - show : bool - Whether to show the plot. + Parameters + ---------- + min_B, max_B : jnp.ndarray, jnp.ndarray + Minimum and maximum |B| values. + num : int + Number of values, not including endpoints. + relative_shift : float + Relative amount to shift maxima down and minima up to avoid floating point + errors in downstream routines. Returns ------- - fig, ax : matplotlib figure and axes. + pitch : jnp.ndarray + Shape (num + 2, *min_B.shape). """ - legend = {} + # Floating point error impedes consistent detection of bounce points riding + # extrema. Shift values slightly to resolve this issue. + min_B = (1 + relative_shift) * min_B + max_B = (1 - relative_shift) * max_B + pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) + assert pitch.shape == (num + 2, *pitch.shape[1:]) + return pitch - def add(lines): - if not hasattr(lines, "__iter__"): - lines = [lines] - for line in lines: - label = line.get_label() - if label not in legend: - legend[label] = line - fig, ax = plt.subplots() - if include_knots: - for knot in B.x: - add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) - z = jnp.linspace( - start=B.x[0] if start is None else start, - stop=B.x[-1] if stop is None else stop, - num=num, - ) - add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) +def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): + """Return |B| values at extrema. - if pitch is not None: - b = 1 / jnp.atleast_1d(pitch) - for val in b: - add( - ax.axhline( - val, color="tab:purple", alpha=alpha_pitch, label=r"$1 / \lambda$" - ) - ) - bp1, bp2 = jnp.atleast_2d(bp1, bp2) - for i in range(bp1.shape[0]): - bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) - add( - ax.scatter( - bp1_i, - jnp.full_like(bp1_i, b[i]), - marker="v", - color="tab:red", - label="bp1", - ) - ) - add( - ax.scatter( - bp2_i, - jnp.full_like(bp2_i, b[i]), - marker="^", - color="tab:green", - label="bp2", - ) - ) + The quantity 1 / √(1 − λ |B|) common to bounce integrals is singular with + strength ~ |ζ_b₂ - ζ_b₁| / |∂|B|/∂_ζ|. Therefore, an integral over the pitch + angle λ may have mass concentrated near λ = 1 / |B|(ζ*) where |B|(ζ*) is a + local maximum. Depending on the quantity to integrate, it may be beneficial + to place quadrature points at these regions. - ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") - ax.legend(legend.values(), legend.keys(), loc="lower right") - if id is not None: - title = f"{title}. id = {id}." - ax.set_title(title) - plt.tight_layout() - if show: - plt.show() - plt.close() - return fig, ax + Parameters + ---------- + knots : jnp.ndarray + Shape (knots.size, ). + Field line-following ζ coordinates of spline knots. + B_c : jnp.ndarray + Shape (B_c.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + B_z_ra_c : jnp.ndarray + Shape (B_c.shape[0] - 1, *B_c.shape[1:]). + Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + relative_shift : float + Relative amount to shift maxima down and minima up to avoid floating point + errors in downstream routines. + + Returns + ------- + B_extrema : jnp.ndarray + Shape (N * (degree - 1), S). + For the shaping notation, the ``degree`` of the spline of |B| matches + ``B_c.shape[0]-1``, the number of polynomials per spline ``N`` matches + ``knots.size-1``, and the number of field lines is denoted by ``S``. + If there were less than ``N*degree`` bounce points detected along a field line, + then the last axis, which enumerates the bounce points for a particular field + line, is padded with nan. + + """ + B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) + S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 + extrema = _poly_root(c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots)) + assert extrema.shape == (S, N, degree - 1) + B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) + B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) + # Floating point error impedes consistent detection of bounce points riding + # extrema. Shift pitch values slightly to resolve this issue. + B_extrema = ( + jnp.where( + # Higher priority to shift down maxima than shift up minima, so identify + # near equality with zero as maxima. + B_zz_ra_extrema <= 0, + (1 - relative_shift) * B_extrema, + (1 + relative_shift) * B_extrema, + ) + .reshape(S, -1) + .T + ) + assert B_extrema.shape == (N * (degree - 1), S) + return B_extrema def affine_bijection(x, a, b): @@ -734,19 +709,18 @@ def grad_affine_bijection(a, b): def automorphism_arcsin(x): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The arcsin transformation introduces a singularity that augments - the singularity in the bounce integral, so the quadrature scheme - used to evaluate the integral must work well on functions with large - derivative near the boundary. + The arcsin transformation introduces a singularity that augments the singularity + in the bounce integral, so the quadrature scheme used to evaluate the integral must + work well on functions with large derivative near the boundary. Parameters ---------- - x : Array + x : jnp.ndarray Points to transform. Returns ------- - y : Array + y : jnp.ndarray Transformed points. """ @@ -766,21 +740,18 @@ def grad_automorphism_arcsin(x): def automorphism_sin(x, s=0, m=10): """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - The sin transformation is Lipschitz. - When used as the change of variable map for the bounce integral, the - Lipschitzness prevents generation of new singularities. - Furthermore, its derivative vanishes to zero slowly near the boundary, - which will suppress the large derivatives near the boundary of singular - integrals. + When used as the change of variable map for the bounce integral, the Lipschitzness + of the sin transformation prevents generation of new singularities. Furthermore, + its derivative vanishes to zero slowly near the boundary, which will suppress the + large derivatives near the boundary of singular integrals. - In effect, this automorphism pulls the mass of the bounce integral away - from the singularities, which should improve convergence of the quadrature - to the true integral, so long as the quadrature performs better on less - singular integrands. Pairs well with Gauss-Legendre quadrature. + In effect, this map pulls the mass of the integral away from the singularities, + which should improve convergence if the quadrature performs better on less singular + integrands. Pairs well with Gauss-Legendre quadrature. Parameters ---------- - x : Array + x : jnp.ndarray Points to transform. s : float Strength of derivative suppression, s ∈ [0, 1]. @@ -789,7 +760,7 @@ def automorphism_sin(x, s=0, m=10): Returns ------- - y : Array + y : jnp.ndarray Transformed points. """ @@ -819,24 +790,21 @@ def grad_automorphism_sin(x, s=0): def tanh_sinh(deg, m=10): """Tanh-Sinh quadrature. - Returns quadrature points xₖ and weights wₖ for the approximate evaluation - of the integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). + Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the + integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). Parameters ---------- deg: int Number of quadrature points. m : int - Number of machine epsilons used for floating point error buffer. - Larger implies less floating point error, but increases the - minimum achievable error. + Number of machine epsilons used for floating point error buffer. Larger implies + less floating point error, but increases the minimum achievable error. Returns ------- - x : Array - Quadrature points. - w : Array - Quadrature weights. + x, w : (jnp.ndarray, jnp.ndarray) + Quadrature points and weights. """ # buffer to avoid numerical instability @@ -852,43 +820,80 @@ def tanh_sinh(deg, m=10): return x, w -_repeated_docstring = """w : Array, shape(w.size, ) - Quadrature weights. - integrand : callable - This callable is the composition operator on the set of functions in ``f`` - that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. - It should accept the items in ``f`` as arguments as well as the additional - keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to - approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. - f : list of Array, shape(P, S, knots.size, ) - Arguments to the callable ``integrand``. - These should be the functions in the integrand of the bounce integral - evaluated (or interpolated to) the nodes of the returned desc - coordinate grid. - B_sup_z : Array, shape(S, knots.size, ) - Contravariant field-line following toroidal component of magnetic field. - B : Array, shape(S, knots.size, ) - Norm of magnetic field. - B_z_ra : Array, shape(S, knots.size, ) - Norm of magnetic field derivative with respect to field-line following label. - pitch : Array, shape(P, S) - λ values to evaluate the bounce integral at each field line. - λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as the index into the - last axis that corresponds to that field line. - The first axis is the batch axis as usual. - knots : Array, shape(knots.size, ) - Field line-following ζ coordinates of spline knots. - method : str - Method of interpolation for functions contained in ``f``. - See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. - check : bool - Flag for debugging. +def _plot(Z, V, title_id=""): + """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" + for p in range(Z.shape[0]): + for s in range(Z.shape[1]): + is_quad_point_set = jnp.nonzero(~jnp.any(jnp.isnan(Z[p, s]), axis=-1))[0] + if not is_quad_point_set.size: + continue + fig, ax = plt.subplots() + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel(title_id) + ax.set_title( + f"Interpolation of {title_id} to quadrature points. Index {p},{s}." + ) + for i in is_quad_point_set: + ax.plot(Z[p, s, i], V[p, s, i], marker="o") + fig.text( + 0.01, + 0.01, + f"Each color specifies the set of points and values (ζ, {title_id}(ζ)) " + "used to evaluate an integral.", + ) + plt.tight_layout() + plt.show() + + +def _check_interpolation(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): + """Check for floating point errors. + + Parameters + ---------- + Z : jnp.ndarray + Quadrature points at field line-following ζ coordinates. + f : list of jnp.ndarray + Arguments to the integrand interpolated to Z. + B_sup_z : jnp.ndarray + Contravariant field-line following toroidal component of magnetic field, + interpolated to Z. + B : jnp.ndarray + Norm of magnetic field, interpolated to Z. + B_z_ra : jnp.ndarray + Norm of magnetic field, derivative with respect to field-line following + coordinate, interpolated to Z. + inner_product : jnp.ndarray + Output of ``_interpolatory_quadrature``. plot : bool - Whether to plot some things if check is true. + Whether to plot stuff. """ -_delimiter = "Returns" + is_not_quad_point = jnp.isnan(Z) + # We want quantities to evaluate as finite only at quadrature points + # for the integrals with boundaries at valid bounce points. + msg = "Interpolation failed." + assert jnp.all(jnp.isfinite(B_sup_z) != is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B) != is_not_quad_point), msg + assert jnp.all(jnp.isfinite(B_z_ra)), msg + for f_i in f: + assert jnp.all(jnp.isfinite(f_i) != is_not_quad_point), msg + + msg = "|B| has vanished, violating the hairy ball theorem." + assert not jnp.isclose(B, 0).any(), msg + assert not jnp.isclose(B_sup_z, 0).any(), msg + + quad_resolution = Z.shape[-1] + # Number of integrals that we should be computing. + goal = jnp.sum(1 - is_not_quad_point) // quad_resolution + # Number of integrals that were actually computed. + actual = jnp.isfinite(inner_product).sum() + assert goal == actual, ( + f"Lost {goal - actual} integrals " + "from floating point or spline approximation error." + ) + if plot: + _plot(Z, B, title_id=r"$\vert B \vert$") + _plot(Z, B_sup_z, title_id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") _interp1d_vec = jnp.vectorize( @@ -897,11 +902,11 @@ def tanh_sinh(deg, m=10): @partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)", excluded={"method"}) -def _interp1d_vec_with_df(xq, x, f, fx, method="cubic"): +def _interp1d_vec_with_df(xq, x, f, fx, method): return interp1d(xq, x, f, method, fx=fx) -def _interpolatory_quadrature( +def _interpolate_and_integrate( Z, w, integrand, @@ -912,20 +917,22 @@ def _interpolatory_quadrature( pitch, knots, method, - method_B, + method_B="cubic", check=False, plot=False, ): - """Interpolate given functions to points Z and perform quadrature. + """Interpolate given functions to points ``Z`` and perform quadrature. Parameters ---------- - Z : Array, shape(P, S, Z.shape[2], w.size) + Z : jnp.ndarray + Shape (P, S, Z.shape[2], w.size). Quadrature points at field line-following ζ coordinates. Returns ------- - inner_product : Array, shape(Z.shape[:-1]) + inner_product : jnp.ndarray + Shape Z.shape[:-1]. Quadrature for every pitch along every field line. """ @@ -935,10 +942,10 @@ def _interpolatory_quadrature( assert Z.shape[-1] == w.size assert knots.size == B.shape[-1] assert B_sup_z.shape == B.shape == B_z_ra.shape - # Spline the integrand so that we can evaluate it at quadrature points - # without expensive coordinate mappings and root finding. - # Spline each function separately so that the singularity near the bounce - # points can be captured more accurately than can be by any polynomial. + # Spline the integrand so that we can evaluate it at quadrature points without + # expensive coordinate mappings and root finding. Spline each function separately so + # that the singularity near the bounce points can be captured more accurately than + # can be by any polynomial. shape = Z.shape Z = Z.reshape(Z.shape[0], Z.shape[1], -1) f = [_interp1d_vec(Z, knots, f_i, method=method).reshape(shape) for f_i in f] @@ -946,103 +953,23 @@ def _interpolatory_quadrature( B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method=method_B).reshape(shape) pitch = jnp.expand_dims(pitch, axis=(2, 3) if len(shape) == 4 else 2) # Assuming that the integrand is a well-behaved function of some interpolation - # points Z, it should evaluate as NaN only if Z is NaN. This condition needs to - # be enforced explicitly due to floating point and interpolation error. - # In the context of bounce integrals, the √(1 − λ |B|) terms necessitate this. - # For interpolation error in |B| may yield λ |B| > 1 at quadrature points - # between bounce points. Don't suppress inf as that indicates catastrophic - # floating point error. + # points Z, it should evaluate as NaN only if Z is NaN. This condition needs to be + # enforced explicitly due to floating point and interpolation error. In the context + # of bounce integrals, the √(1 − λ |B|) terms necessitate this as interpolation + # error in |B| may yield λ|B| > 1 at quadrature points between bounce points. Don't + # suppress inf as that indicates catastrophic floating point error. inner_product = jnp.dot( jnp.nan_to_num(integrand(*f, B=B, pitch=pitch), posinf=jnp.inf, neginf=-jnp.inf) / b_sup_z, w, ) if check: - Z = Z.reshape(shape) - _check_interpolation(Z, f, b_sup_z, B, B_z_ra, inner_product) - if plot: - _plot(Z, B, id=r"$\vert B \vert$") - _plot(Z, b_sup_z, id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") - # Note to developer if debugging: consider plotting argument to - # inner_product to see how singular the integrand is before/after - # change of variables. + _check_interpolation( + Z.reshape(shape), f, b_sup_z, B, B_z_ra, inner_product, plot + ) return inner_product -_interpolatory_quadrature.__doc__ = _interpolatory_quadrature.__doc__.replace( - _delimiter, _repeated_docstring + _delimiter, 1 -) - - -def _check_interpolation(Z, f, B_sup_z, B, B_z_ra, inner_product): - """Check for floating point errors. - - Parameters - ---------- - Z : Array - Quadrature points at field line-following ζ coordinates. - f : iterable of Array, shape(Z.shape) - Arguments to the integrand interpolated to Z. - B_sup_z : Array, shape(Z.shape) - Contravariant field-line following toroidal component of magnetic field, - interpolated to Z. - B : Array, shape(Z.shape) - Norm of magnetic field, interpolated to Z. - B_z_ra : Array, shape(Z.shape) - Norm of magnetic field derivative with respect to field-line following label, - interpolated to Z. - inner_product : Array - Output of ``_interpolatory_quadrature``. - - """ - is_not_quad_point = jnp.isnan(Z) - # We want quantities to evaluate as finite only at quadrature points - # for the integrals with boundaries at valid bounce points. - msg = "Interpolation failed." - assert jnp.all(jnp.isfinite(B_sup_z) ^ is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B) ^ is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B_z_ra)), msg - for f_i in f: - assert jnp.all(jnp.isfinite(f_i) ^ is_not_quad_point), msg - - msg = "|B| has vanished, violating the hairy ball theorem." - assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(B_sup_z, 0).any(), msg - - quad_resolution = Z.shape[-1] - # Number of integrals that we should be computing. - goal = jnp.sum(1 - is_not_quad_point) // quad_resolution - # Number of integrals that were actually computed. - actual = jnp.isfinite(inner_product).sum() - assert goal == actual, ( - f"Lost {goal - actual} integrals " - "from floating point or spline approximation error." - ) - - -def _plot(Z, V, id=""): - """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" - for p in range(Z.shape[0]): - for s in range(Z.shape[1]): - is_quad_point_set = jnp.nonzero(~jnp.any(jnp.isnan(Z[p, s]), axis=-1))[0] - if not is_quad_point_set.size: - continue - fig, ax = plt.subplots() - ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel(id) - ax.set_title(f"Interpolation of {id} to quadrature points. Index {p},{s}.") - for i in is_quad_point_set: - ax.plot(Z[p, s, i], V[p, s, i], marker="o") - fig.text( - 0.01, - 0.01, - f"Each color specifies the set of points and values (ζ, {id}(ζ)) " - "used to evaluate an integral.", - ) - plt.tight_layout() - plt.show() - - def _bounce_quadrature( bp1, bp2, @@ -1064,19 +991,58 @@ def _bounce_quadrature( Parameters ---------- - bp1, bp2 : Array, Array - Each should have shape(P, S, bp1.shape[-1]). - The field line-following ζ coordinates of bounce points for a given pitch - along a field line. The pairs bp1[i, j, k] and bp2[i, j, k] form left - and right integration boundaries, respectively, for the bounce integrals. - x : Array, shape(w.size, ) - Quadrature points in [-1, 1]. + bp1, bp2 : jnp.ndarray, jnp.ndarray + Shape (P, S, bp1.shape[-1]). + The field line-following ζ coordinates of bounce points for a given pitch along + a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right + integration boundaries, respectively, for the bounce integrals. + x, w : jnp.ndarray, jnp.ndarray + Shape (w.size, ). + Quadrature points in [-1, 1] and weights. + integrand : callable + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list of jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Arguments to the callable ``integrand``. These should be the scalar-valued + functions in the bounce integrand evaluated on the DESC grid. + B_sup_z : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Contravariant field-line following toroidal component of magnetic field. + B : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Norm of magnetic field. + B_z_ra : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Norm of magnetic field, derivative with respect to field-line following + coordinate. + pitch : jnp.ndarray + Shape (P, S). + λ values to evaluate the bounce integral at each field line. + knots : jnp.ndarray + Shape (knots.size, ). + Field line following coordinate values where ``B_sup_z``, ``B``, and ``B_z_ra``, + and the quantities in ``f`` were evaluated. + method : str + Method of interpolation for functions contained in ``f``. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is akima spline. + method_B : str + Method of interpolation for |B|. Default is C1 cubic Hermite spline. + batch : bool + Whether to perform computation in a batched manner. Default is true. + check : bool + Flag for debugging. Returns ------- - result : Array, shape(P, S, bp1.shape[-1]) - First axis enumerates pitch values. Second axis enumerates the field - lines. Last axis enumerates the bounce integrals. + result : jnp.ndarray + Shape (P, S, bp1.shape[-1]). + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. """ errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) @@ -1085,23 +1051,13 @@ def _bounce_quadrature( S = B.shape[0] if not isinstance(f, (list, tuple)): f = [f] - - def group_data_by_field_line_and_pitch(g): - msg = ( - "Should have at most three dimensions, in which case the first axis " - "is interpreted as the batch axis, which enumerates the evaluation " - "of the function at particular pitch values." - ) - errorif(g.ndim > 3, msg=msg) - return g.reshape(-1, S, knots.size) - - f = map(group_data_by_field_line_and_pitch, f) + # group data by field line + f = map(lambda f_i: f_i.reshape(S, knots.size), f) # Integrate and complete the change of variable. if batch: - Z = affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) - result = _interpolatory_quadrature( - Z, + result = _interpolate_and_integrate( + affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), w, integrand, f, @@ -1121,9 +1077,8 @@ def group_data_by_field_line_and_pitch(g): def loop(bp): bp1, bp2 = bp - z = affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) - return None, _interpolatory_quadrature( - z, + return None, _interpolate_and_integrate( + affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), w, integrand, f, @@ -1138,19 +1093,17 @@ def loop(bp): plot=False, ) - _, result = imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0))) - result = jnp.moveaxis(result, source=0, destination=-1) + result = jnp.moveaxis( + imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0)))[1], + source=0, + destination=-1, + ) result = result * grad_affine_bijection(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result -_bounce_quadrature.__doc__ = _bounce_quadrature.__doc__.replace( - _delimiter, _repeated_docstring + _delimiter, 1 -) - - def bounce_integral( B_sup_z, B, @@ -1164,94 +1117,92 @@ def bounce_integral( plot=False, **kwargs, ): - """Returns a method to compute the bounce integral of any quantity. + """Returns a method to compute bounce integrals. The bounce integral is defined as ∫ f(ℓ) dℓ, where dℓ parameterizes the distance along the field line in meters, λ is a constant proportional to the magnetic moment over energy, |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points, ζ₁, ζ₂, such that - (λ |B|)(ζᵢ) = 1. - For a particle with fixed λ, bounce points are defined to be the location - on the field line such that the particle's velocity parallel to the magnetic - field is zero. + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1. - The bounce integral is defined up to a sign. - We choose the sign that corresponds the particle's guiding center trajectory - traveling in the direction of increasing field-line-following label. + For a particle with fixed λ, bounce points are defined to be the location on the + field line such that the particle's velocity parallel to the magnetic field is zero. + The bounce integral is defined up to a sign. We choose the sign that corresponds to + the particle's guiding center trajectory traveling in the direction of increasing + field-line-following coordinate ζ. Notes ----- - This function requires that the quantities ``B_sup_z``, ``B``, ``B_z_ra``, - and the quantities in ``f`` passed to the returned method can be separated - into field lines via ``.reshape(S, knots.size)``. - One way to satisfy this is to pass in quantities computed on the grid - returned from the method ``desc.equilibrium.coords.rtz_grid``. - See ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. + The quantities ``B_sup_z``, ``B``, ``B_z_ra``, and those in ``f`` supplied to the + returned method must be separable into data evaluated along particular field lines + via ``.reshape(S,knots.size)``. One way to satisfy this is to compute stuff on the + grid returned from the method ``desc.equilibrium.coords.rtz_grid``. See + ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. Parameters ---------- - B_sup_z : Array, shape(S, knots.size, ) + B_sup_z : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). Contravariant field-line following toroidal component of magnetic field. - B^ζ(ρ, α, ζ) is specified by ``B_sup_z[(ρ, α), ζ]``, where in the latter - the labels (ρ, α) are interpreted as the index into the first axis that - corresponds to that field line. - B : Array, shape(S, knots.size, ) - Norm of magnetic field. - |B|(ρ, α, ζ) is specified by ``B[(ρ, α), ζ]``, where in the latter - the labels (ρ, α) are interpreted as the index into the first axis that - corresponds to that field line. - B_z_ra : Array, shape(S, knots.size, ) - Norm of magnetic field derivative with respect to field-line following label. - ∂|B|/∂_ζ(ρ, α, ζ) is specified by ``B_z_ra[(ρ, α), ζ]``, where in the latter - the labels (ρ, α) are interpreted as the index into the first axis that + B^ζ(ρ, α, ζ) is specified by ``B_sup_z[(ρ,α),ζ]``, where in the latter the + labels (ρ,α) are interpreted as the index into the first axis that corresponds + to that field line. + B : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Norm of magnetic field. |B|(ρ, α, ζ) is specified by ``B[(ρ,α),ζ]``, where in + the latter the labels (ρ,α) are interpreted as the index into the first axis + that corresponds to that field line. + B_z_ra : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Norm of magnetic field, derivative with respect to field-line following + coordinate. ∂|B|/∂_ζ(ρ, α, ζ) is specified by ``B_z_ra[(ρ,α),ζ]``, where in the + latter the labels (ρ,α) are interpreted as the index into the first axis that corresponds to that field line. - knots : Array, shape(knots.size, ) - Field line following coordinate values at which ``B_sup_z``, ``B``, and - ``B_z_ra`` were evaluated. These knots are used to compute a spline of |B| - and interpolate the integrand. A good reference density is 100 knots per - toroidal transit. - quad : (Array, Array) - Quadrature points xₖ and weights wₖ for the approximate evaluation - of an integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. - automorphism : (callable, callable) or None + knots : jnp.ndarray + Shape (knots.size, ). + Field line following coordinate values where ``B_sup_z``, ``B``, and ``B_z_ra``, + and those in ``f`` supplied to the returned method were evaluated. + These knots are used to compute a spline of |B| and interpolate the integrand. + A good reference density is 100 knots per toroidal transit. + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. + automorphism : (Callable, Callable) or None The first callable should be an automorphism of the real interval [-1, 1]. - The second callable should be the derivative of the first. - The supplied automorphism is composed with the affine bijection that maps - [-1, 1] to the bounce points. The resulting map defines a change of - variable for the bounce integral. The choice made for the automorphism can - augment or suppress singularities. - Keep this in mind when choosing the quadrature method. + The second callable should be the derivative of the first. This map defines a + change of variable for the bounce integral. The choice made for the automorphism + can affect the performance of the quadrature method. B_ref : float Optional. Reference magnetic field strength for normalization. L_ref : float Optional. Reference length scale for normalization. check : bool - Flag for debugging. + Flag for debugging. Must be false for jax transformations. plot : bool - Whether to plot some things if check is true. + Whether to plot stuff if ``check`` is true. Returns ------- bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every - specified field line ℓ for every λ value in ``pitch``. - spline : dict - knots : Array, shape(knots.size, ) + specified field line for every λ value in ``pitch``. + spline : dict of jnp.ndarray + knots : jnp.ndarray + Shape (knots.size, ). Field line-following ζ coordinates of spline knots. - B.c : Array, shape(4, S, knots.size - 1) + B_c : jnp.ndarray + Shape (4, S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. - B_z_ra.c : Array, shape(3, S, knots.size - 1) + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + B_z_ra.c : jnp.ndarray + Shape (3, S, knots.size - 1). Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. - First axis enumerates the coefficients of power series. - Second axis enumerates the splines along the field lines. - Last axis enumerates the polynomials of the spline along a particular - field line. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose spline along a particular field line. """ B_sup_z = B_sup_z * L_ref / B_ref @@ -1290,37 +1241,34 @@ def bounce_integrate(integrand, f, pitch, method="akima", batch=True): Parameters ---------- integrand : callable - This callable is the composition operator on the set of functions in ``f`` - that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. - It should accept the items in ``f`` as arguments as well as the additional - keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to - approximate the bounce integral of ``integrand(*f, B=B, pitch=pitch)``. - f : list of Array, shape(..., S, knots.size) - Arguments to the callable ``integrand``. - These should be the functions in the integrand of the bounce integral - evaluated (or interpolated to) DESC grid. - Should have at most three dimensions, in which case the first axis - is interpreted as the batch axis, which enumerates the evaluation - of the function at particular pitch values. - pitch : Array, shape(P, S) - λ values to evaluate the bounce integral at each field line. - λ(ρ, α) is specified by ``pitch[..., (ρ, α)]`` - where in the latter the labels (ρ, α) are interpreted as the index into - the last axis that corresponds to that field line. - If two-dimensional, the first axis is the batch axis as usual. + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list of jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Arguments to the callable ``integrand``. These should be the scalar-valued + functions in the bounce integrand evaluated on the DESC grid. + pitch : jnp.ndarray + Shape (P, S). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. method : str Method of interpolation for functions contained in ``f``. - Defaults to akima spline to suppress oscillation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is akima spline. batch : bool - Whether to perform computation in a batched manner. - If you can afford the memory expense, batched is more efficient. + Whether to perform computation in a batched manner. Default is true. Returns ------- - result : Array, shape(P, S, (knots.size - 1) * degree) - First axis enumerates pitch values. Second axis enumerates the field - lines. Last axis enumerates the bounce integrals. + result : jnp.ndarray + Shape (P, S, (knots.size - 1) * degree). + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. """ bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, check, plot) diff --git a/devtools/dev-requirements_conda.yml b/devtools/dev-requirements_conda.yml index ce379988c8..19ca612fc4 100644 --- a/devtools/dev-requirements_conda.yml +++ b/devtools/dev-requirements_conda.yml @@ -15,7 +15,7 @@ dependencies: - pip: # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. - - interpax >= 0.3.2 + - interpax >= 0.3.3 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - orthax diff --git a/requirements.txt b/requirements.txt index ef4faaac42..fa5b86bba9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ colorama h5py >= 3.0.0, < 4.0 -interpax >= 0.3.2 +interpax >= 0.3.3 jax[cpu] >= 0.3.2, < 0.5.0 matplotlib >= 3.5.0, < 4.0.0 mpmath >= 1.0.0, < 2.0 diff --git a/requirements_conda.yml b/requirements_conda.yml index 03e0b08812..da2996429a 100644 --- a/requirements_conda.yml +++ b/requirements_conda.yml @@ -14,7 +14,7 @@ dependencies: - pip: # Conda only parses a single list of pip requirements. # If two pip lists are given, all but the last list is skipped. - - interpax >= 0.3.2 + - interpax >= 0.3.3 - jax[cpu] >= 0.3.2, < 0.5.0 - nvgpu - orthax From 433397328594e9dbe2f5f1f495734d04a643ac34 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 17 Jun 2024 19:40:57 -0500 Subject: [PATCH 179/241] Fix sign of B^zeta and B_z_ra --- desc/compute/_field.py | 2 +- desc/compute/bounce_integral.py | 116 +++++++++++++++++++++++--------- tests/test_bounce_integral.py | 18 ++--- 3 files changed, 94 insertions(+), 42 deletions(-) diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 3e68276674..fced03f261 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2328,7 +2328,7 @@ def _B_mag_z(params, transforms, profiles, data, **kwargs): ) def _B_mag_alpha(params, transforms, profiles, data, **kwargs): # constant ρ and ζ - data["|B|_alpha"] = safediv(data["|B|_t"], data["alpha_t"]) + data["|B|_alpha"] = data["|B|_t"] / data["alpha_t"] return data diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d871ee72a6..cde55fbdc2 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -8,7 +8,7 @@ from desc.backend import flatnonzero, imap, jnp, put_along_axis, take from desc.compute.utils import safediv -from desc.utils import errorif +from desc.utils import errorif, warnif @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) @@ -55,7 +55,7 @@ def take_mask(a, mask, size=None, fill_value=None): def _filter_not_nan(a): """Filter out nan from ``a`` while asserting nan is padded at right.""" is_nan = jnp.isnan(a) - assert jnp.array_equal(is_nan, jnp.sort(is_nan, axis=-1)), "take_mask() has a bug." + assert jnp.array_equal(is_nan, jnp.sort(is_nan, axis=-1)) return a[~is_nan] @@ -305,9 +305,9 @@ def plot_field_line( pitch : jnp.ndarray λ value. bp1 : jnp.ndarray - Bounce points with ∂|B|/∂_ζ <= 0. + Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. bp2 : jnp.ndarray - Bounce points with ∂|B|/∂_ζ >= 0. + Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. start : float Minimum ζ on plot. stop : float @@ -354,7 +354,7 @@ def add(lines): add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - b = 1 / jnp.atleast_1d(pitch) + b = jnp.reciprocal(pitch) for val in b: add( ax.axhline( @@ -451,7 +451,7 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): Polynomial coefficients of the spline of |B| in local power basis. B_z_ra_c : jnp.ndarray Shape (B_c.shape[0] - 1, *B_c.shape[1:]). - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. pitch : jnp.ndarray Shape (P, S). λ values to evaluate the bounce integral at each field line. @@ -489,7 +489,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs line. If two-dimensional, the first axis is the batch axis. knots : jnp.ndarray Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. + Field line-following ζ coordinates of spline knots. Must be strictly increasing. B_c : jnp.ndarray Shape (B_c.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. @@ -498,7 +498,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs polynomials that compose the spline along a particular field line. B_z_ra_c : jnp.ndarray Shape (B_c.shape[0] - 1, *B_c.shape[1:]). - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. @@ -527,7 +527,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 intersect = _poly_root( c=B_c, - k=1 / pitch[..., jnp.newaxis], + k=jnp.reciprocal(pitch)[..., jnp.newaxis], a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, @@ -624,7 +624,7 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): # extrema. Shift values slightly to resolve this issue. min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B - pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) + pitch = composite_linspace(jnp.reciprocal(jnp.stack([max_B, min_B])), num) assert pitch.shape == (num + 2, *pitch.shape[1:]) return pitch @@ -633,7 +633,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): """Return |B| values at extrema. The quantity 1 / √(1 − λ |B|) common to bounce integrals is singular with - strength ~ |ζ_b₂ - ζ_b₁| / |∂|B|/∂_ζ|. Therefore, an integral over the pitch + strength ~ |ζ_b₂ - ζ_b₁| / |(∂|B|/∂ζ)|ρ,α|. Therefore, an integral over the pitch angle λ may have mass concentrated near λ = 1 / |B|(ζ*) where |B|(ζ*) is a local maximum. Depending on the quantity to integrate, it may be beneficial to place quadrature points at these regions. @@ -642,7 +642,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): ---------- knots : jnp.ndarray Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. + Field line-following ζ coordinates of spline knots. Must be strictly increasing. B_c : jnp.ndarray Shape (B_c.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. @@ -651,7 +651,7 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): polynomials that compose the spline along a particular field line. B_z_ra_c : jnp.ndarray Shape (B_c.shape[0] - 1, *B_c.shape[1:]). - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. @@ -949,6 +949,7 @@ def _interpolate_and_integrate( shape = Z.shape Z = Z.reshape(Z.shape[0], Z.shape[1], -1) f = [_interp1d_vec(Z, knots, f_i, method=method).reshape(shape) for f_i in f] + # TODO: Pass in derivative and use method_B. b_sup_z = _interp1d_vec(Z, knots, B_sup_z / B, method=method).reshape(shape) B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method=method_B).reshape(shape) pitch = jnp.expand_dims(pitch, axis=(2, 3) if len(shape) == 4 else 2) @@ -1024,8 +1025,9 @@ def _bounce_quadrature( λ values to evaluate the bounce integral at each field line. knots : jnp.ndarray Shape (knots.size, ). - Field line following coordinate values where ``B_sup_z``, ``B``, and ``B_z_ra``, - and the quantities in ``f`` were evaluated. + Field line following coordinate values where ``B_sup_z``, ``B``, ``B_z_ra``, and + those in ``f`` supplied to the returned method were evaluated. Must be strictly + increasing. method : str Method of interpolation for functions contained in ``f``. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. @@ -1052,7 +1054,7 @@ def _bounce_quadrature( if not isinstance(f, (list, tuple)): f = [f] # group data by field line - f = map(lambda f_i: f_i.reshape(S, knots.size), f) + f = map(lambda f_i: f_i.reshape(-1, knots.size), f) # Integrate and complete the change of variable. if batch: @@ -1104,6 +1106,51 @@ def loop(bp): return result +def _fix_sign_and_normalize(B_sup_z, B, B_z_ra, B_ref=1, L_ref=1, check=False): + """Correct signs for consistency with strictly increasing zeta requirement. + + Parameters + ---------- + B_sup_z : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Contravariant field-line following toroidal component of magnetic field. + B : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Norm of magnetic field. + B_z_ra : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + Norm of magnetic field, derivative with respect to field-line following + coordinate. + B_ref : float + Optional. Reference magnetic field strength for normalization. + L_ref : float + Optional. Reference length scale for normalization. + check : bool + Flag for debugging. Must be false for jax transformations. + + Returns + ------- + B_sup_z, B, B_z_ra : (jnp.ndarray, jnp.ndarray, jnp.ndarray) + Same as inputs but with corrected sign and normalized by length scales. + + """ + warnif( + check and jnp.any(jnp.sign(B_sup_z) <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Correcting signs of B^ζ and (∂|B|/∂ζ)|ρ,α.", + ) + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. + # This is equivalent to changing the sign of ∇ζ (or [∂/∂ζ]|ρ,a). + # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ + # induces the same sign change in e_ζ|ρ,a to retain the metric identity. For any + # quantity f, we may write df = ∇f⋅dR, implying ∂f/∂ζ|ρ,α = ∇f ⋅ e_ζ|ρ,a. Therefore, + # a sign change in e_ζ|ρ,a induces the same sign change in ∂f/∂ζ|ρ,α. + B_z_ra = B_z_ra / B_ref * jnp.sign(B_sup_z) + B_sup_z = jnp.abs(B_sup_z) * L_ref / B_ref + B = B / B_ref + return B_sup_z, B, B_z_ra + + def bounce_integral( B_sup_z, B, @@ -1134,6 +1181,12 @@ def bounce_integral( Notes ----- + The strictly increasing knots requirement enforces dζ > 0, which constraints the + signs of B^ζ and ∂/∂ζ. The signs of B^ζ and (∂|B|/∂ζ)|ρ,α will automatically be + corrected to match this requirement, but this correction cannot be automated for + arbitrary f(ℓ) in the integrand. Pass in ``check=True`` to be notified if the signs + for B^ζ and (∂|B|/∂ζ)|ρ,α required correction. + The quantities ``B_sup_z``, ``B``, ``B_z_ra``, and those in ``f`` supplied to the returned method must be separable into data evaluated along particular field lines via ``.reshape(S,knots.size)``. One way to satisfy this is to compute stuff on the @@ -1156,15 +1209,15 @@ def bounce_integral( B_z_ra : jnp.ndarray Shape (S, knots.size) or (S * knots.size). Norm of magnetic field, derivative with respect to field-line following - coordinate. ∂|B|/∂_ζ(ρ, α, ζ) is specified by ``B_z_ra[(ρ,α),ζ]``, where in the - latter the labels (ρ,α) are interpreted as the index into the first axis that - corresponds to that field line. + coordinate. (∂|B|/∂ζ)|ρ,α(ρ, α, ζ) is specified by ``B_z_ra[(ρ,α),ζ]``, where in + the latter the labels (ρ,α) are interpreted as the index into the first axis + that corresponds to that field line. knots : jnp.ndarray Shape (knots.size, ). - Field line following coordinate values where ``B_sup_z``, ``B``, and ``B_z_ra``, - and those in ``f`` supplied to the returned method were evaluated. - These knots are used to compute a spline of |B| and interpolate the integrand. - A good reference density is 100 knots per toroidal transit. + Field line following coordinate values where ``B_sup_z``, ``B``, ``B_z_ra``, and + those in ``f`` supplied to the returned method were evaluated. Must be strictly + increasing. These knots are used to compute a spline of |B| and interpolate the + integrand. A good reference density is 100 knots per toroidal transit. quad : (jnp.ndarray, jnp.ndarray) Quadrature points xₖ and weights wₖ for the approximate evaluation of an integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. @@ -1197,23 +1250,22 @@ def bounce_integral( First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. - B_z_ra.c : jnp.ndarray + B_z_ra_c : jnp.ndarray Shape (3, S, knots.size - 1). - Polynomial coefficients of the spline of ∂|B|/∂_ζ in local power basis. + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose spline along a particular field line. + polynomials that compose the spline along a particular field line. """ - B_sup_z = B_sup_z * L_ref / B_ref - B = B / B_ref - B_z_ra = B_z_ra / B_ref - # group data by field line - B_sup_z, B, B_z_ra = (g.reshape(-1, knots.size) for g in [B_sup_z, B, B_z_ra]) - errorif(not (B_sup_z.shape == B.shape == B_z_ra.shape)) + B_sup_z, B, B_z_ra = ( + f.reshape(-1, knots.size) # group data by field line + for f in _fix_sign_and_normalize(B_sup_z, B, B_z_ra, B_ref, L_ref, check) + ) # Compute splines. monotonic = kwargs.pop("monotonic", False) + # Interpax interpolation requires strictly increasing knots. B_c = ( PchipInterpolator(knots, B, axis=-1, check=check).c if monotonic diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 39107a7af5..2e6d9271bd 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -33,7 +33,6 @@ take_mask, tanh_sinh, ) -from desc.compute.utils import safediv from desc.equilibrium import Equilibrium from desc.equilibrium.coords import rtz_grid from desc.examples import get @@ -426,7 +425,7 @@ def test_bounce_quadrature(): rtol = 1e-4 def integrand(B, pitch): - return 1 / jnp.sqrt(1 - pitch * m * B) + return jnp.reciprocal(jnp.sqrt(1 - pitch * m * B)) bp1 = -np.pi / 2 * v bp2 = -bp1 @@ -477,14 +476,15 @@ def test_bounce_integral_checks(): def numerator(g_zz, B, pitch): f = (1 - pitch * B) * g_zz - return safediv(f, jnp.sqrt(1 - pitch * B)) + return f / jnp.sqrt(1 - pitch * B) def denominator(B, pitch): - return safediv(1, jnp.sqrt(1 - pitch * B)) + return jnp.reciprocal(jnp.sqrt(1 - pitch * B)) + # Usually it's better to get values with get_pitch instead of get_extrema. pitch = 1 / get_extrema(**spline) num = bounce_integrate(numerator, data["g_zz"], pitch) - # Can reduce memory usage by specifying by not batching. + # Can reduce memory usage by not batching. den = bounce_integrate(denominator, [], pitch, batch=False) avg = num / den assert np.isfinite(avg).any() @@ -493,10 +493,10 @@ def denominator(B, pitch): avg = np.nansum(avg, axis=-1) # Group the data by field line. avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) - # The bounce averages stored at index i, j + # The mean bounce average stored at index i, j i, j = 0, 0 print(avg[:, i, j]) - # are the bounce averages along the field line with nodes + # is the mean bounce average among wells along the field line with nodes # given in Clebsch-Type field-line coordinates ρ, α, ζ raz_grid = grid.source_grid nodes = raz_grid.nodes.reshape(rho.size, alpha.size, -1, 3) @@ -626,6 +626,7 @@ def test_drift(): ) np.testing.assert_allclose(data["psi"], psi) np.testing.assert_allclose(data["iota"], iota) + assert np.all(np.sign(data["B^zeta"]) > 0) data["iota"] = grid.compress(data["iota"]).item() data["shear"] = grid.compress(data["shear"]).item() @@ -723,8 +724,7 @@ def integrand_num(cvdrift, gbdrift, B, pitch): return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) def integrand_den(B, pitch): - g = jnp.sqrt(1 - pitch * B) - return 1 / g + return jnp.reciprocal(jnp.sqrt(1 - pitch * B)) drift_numerical_num = bounce_integrate( integrand=integrand_num, From d51aa16d126b0d8cc505c35eabfe3ce2171d3fb5 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 18 Jun 2024 00:16:28 -0500 Subject: [PATCH 180/241] Partially undo previous commit The sign of B_z|r,a was fine, it was B_sup_z that needed changing --- desc/backend.py | 34 ----------- desc/compute/bounce_integral.py | 101 ++++++++++++-------------------- tests/test_bounce_integral.py | 55 ++++++++++------- 3 files changed, 69 insertions(+), 121 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 800c2fb2a2..e5cbb04179 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -116,38 +116,6 @@ def put(arr, inds, vals): return arr return jnp.asarray(arr).at[inds].set(vals) - def put_along_axis(arr, indices, values, axis): - """Put values into the destination array by matching 1d index and data slices. - - This iterates over matching 1d slices oriented along the specified axis in - the index and data arrays, and uses the former to place values into the - latter. - - Parameters - ---------- - arr : ndarray (Ni..., M, Nk...) - Destination array. - indices : ndarray (Ni..., J, Nk...) - Indices to change along each 1d slice of `arr`. This must match the - dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast - against `arr`. - values : array_like (Ni..., J, Nk...) - values to insert at those indices. Its shape and dimension are - broadcast to match that of `indices`. - axis : int - The axis to take 1d slices along. If axis is None, the destination - array is treated as if a flattened 1d view had been created of it. - - """ - if not (axis == -1 or axis == arr.ndim - 1): - raise NotImplementedError( - f"put_along_axis for axis={axis} not implemented yet." - ) - if isinstance(arr, np.ndarray): - arr[..., indices] = values - return arr - return jnp.asarray(arr).at[..., indices].set(values) - def sign(x): """Sign function, but returns 1 for x==0. @@ -429,8 +397,6 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): ) from scipy.special import gammaln, logsumexp # noqa: F401 - put_along_axis = np.put_along_axis - def imap(f, xs, in_axes=0, out_axes=0): """Generalizes jax.lax.map; uses numpy.""" if not isinstance(xs, np.ndarray): diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index cde55fbdc2..f4c0995330 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -6,9 +6,9 @@ from matplotlib import pyplot as plt from orthax.legendre import leggauss -from desc.backend import flatnonzero, imap, jnp, put_along_axis, take +from desc.backend import flatnonzero, imap, jnp, put, take from desc.compute.utils import safediv -from desc.utils import errorif, warnif +from desc.utils import Index, errorif, warnif @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) @@ -457,26 +457,34 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): λ values to evaluate the bounce integral at each field line. """ - errorif(knots.ndim != 1) + errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") if B_c.ndim == 2 and B_z_ra_c.ndim == 2: # Add axis which enumerates field lines. B_c = B_c[:, jnp.newaxis] B_z_ra_c = B_z_ra_c[:, jnp.newaxis] - msg = "Supplied invalid shape for splines." + msg = ( + "Invalid shape for spline arrays. " + f"B_c.shape={B_c.shape}. B_z_ra_c.shape={B_z_ra_c.shape}." + ) errorif(not (B_c.ndim == B_z_ra_c.ndim == 3), msg=msg) errorif(B_c.shape[0] - 1 != B_z_ra_c.shape[0], msg=msg) errorif(B_c.shape[1:] != B_z_ra_c.shape[1:], msg=msg) - msg = "Last axis fails to enumerate spline polynomials." - errorif(B_c.shape[-1] != knots.size - 1, msg=msg) + errorif( + B_c.shape[-1] != knots.size - 1, + msg=( + "Last axis does not enumerate polynomials of spline. " + f"B_c.shape={B_c.shape}. knots.shape={knots.shape}." + ), + ) if pitch is not None: pitch = jnp.atleast_2d(pitch) - msg = "Supplied invalid shape for pitch angles." + msg = f"Invalid shape {pitch.shape} for pitch angles." errorif(pitch.ndim != 2, msg=msg) errorif(pitch.shape[-1] != 1 and pitch.shape[-1] != B_c.shape[1], msg=msg) return B_c, B_z_ra_c, pitch -def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs): +def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs): """Compute the bounce points given spline of |B| and pitch λ. Parameters @@ -561,7 +569,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=False, **kwargs # At each step, the likelihood that an intersection has already been lost # due to floating point errors grows, so the real solution is to pick a less # degenerate pitch value - one that does not ride the global extrema of |B|. - is_bp2 = put_along_axis(is_bp2, jnp.array(0), edge_case, axis=-1) + is_bp2 = put(is_bp2, Index[..., 0], edge_case) # Get ζ values of bounce points from the masks. bp1 = take_mask(intersect, is_bp1) bp2 = take_mask(intersect, is_bp2) @@ -606,8 +614,10 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): Parameters ---------- - min_B, max_B : jnp.ndarray, jnp.ndarray - Minimum and maximum |B| values. + min_B : jnp.ndarray + Minimum |B| value. + max_B : jnp.ndarray + Maximum |B| value. num : int Number of values, not including endpoints. relative_shift : float @@ -1106,51 +1116,6 @@ def loop(bp): return result -def _fix_sign_and_normalize(B_sup_z, B, B_z_ra, B_ref=1, L_ref=1, check=False): - """Correct signs for consistency with strictly increasing zeta requirement. - - Parameters - ---------- - B_sup_z : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Contravariant field-line following toroidal component of magnetic field. - B : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Norm of magnetic field. - B_z_ra : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Norm of magnetic field, derivative with respect to field-line following - coordinate. - B_ref : float - Optional. Reference magnetic field strength for normalization. - L_ref : float - Optional. Reference length scale for normalization. - check : bool - Flag for debugging. Must be false for jax transformations. - - Returns - ------- - B_sup_z, B, B_z_ra : (jnp.ndarray, jnp.ndarray, jnp.ndarray) - Same as inputs but with corrected sign and normalized by length scales. - - """ - warnif( - check and jnp.any(jnp.sign(B_sup_z) <= 0), - msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Correcting signs of B^ζ and (∂|B|/∂ζ)|ρ,α.", - ) - # Strictly increasing zeta knots enforces dζ > 0. - # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. - # This is equivalent to changing the sign of ∇ζ (or [∂/∂ζ]|ρ,a). - # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ - # induces the same sign change in e_ζ|ρ,a to retain the metric identity. For any - # quantity f, we may write df = ∇f⋅dR, implying ∂f/∂ζ|ρ,α = ∇f ⋅ e_ζ|ρ,a. Therefore, - # a sign change in e_ζ|ρ,a induces the same sign change in ∂f/∂ζ|ρ,α. - B_z_ra = B_z_ra / B_ref * jnp.sign(B_sup_z) - B_sup_z = jnp.abs(B_sup_z) * L_ref / B_ref - B = B / B_ref - return B_sup_z, B, B_z_ra - - def bounce_integral( B_sup_z, B, @@ -1181,12 +1146,6 @@ def bounce_integral( Notes ----- - The strictly increasing knots requirement enforces dζ > 0, which constraints the - signs of B^ζ and ∂/∂ζ. The signs of B^ζ and (∂|B|/∂ζ)|ρ,α will automatically be - corrected to match this requirement, but this correction cannot be automated for - arbitrary f(ℓ) in the integrand. Pass in ``check=True`` to be notified if the signs - for B^ζ and (∂|B|/∂ζ)|ρ,α required correction. - The quantities ``B_sup_z``, ``B``, ``B_z_ra``, and those in ``f`` supplied to the returned method must be separable into data evaluated along particular field lines via ``.reshape(S,knots.size)``. One way to satisfy this is to compute stuff on the @@ -1258,10 +1217,22 @@ def bounce_integral( polynomials that compose the spline along a particular field line. """ - B_sup_z, B, B_z_ra = ( - f.reshape(-1, knots.size) # group data by field line - for f in _fix_sign_and_normalize(B_sup_z, B, B_z_ra, B_ref, L_ref, check) + warnif( + check and kwargs.pop("warn", True) and jnp.any(jnp.sign(B_sup_z) <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", ) + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. + # This is equivalent to changing the sign of ∇ζ (or [∂/∂ζ]|ρ,a). + # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ + # requires the same sign change in e_ζ|ρ,a to retain the metric identity. For any + # quantity f, we may write df = ∇f⋅dR, implying ∂f/∂ζ|ρ,α = ∇f ⋅ e_ζ|ρ,a. Hence, + # a sign change in e_ζ|ρ,a requires the same sign change in ∂f/∂ζ|ρ,α. + B_sup_z = jnp.abs(B_sup_z) * L_ref / B_ref + B = B / B_ref + B_z_ra = B_z_ra / B_ref # This is already the correct sign. + # group data by field line + B_sup_z, B, B_z_ra = (f.reshape(-1, knots.size) for f in [B_sup_z, B, B_z_ra]) # Compute splines. monotonic = kwargs.pop("monotonic", False) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 2e6d9271bd..c889e54486 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -252,7 +252,9 @@ def test_bp1_first(): knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch = 2 - bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) + bp1, bp2 = bounce_points( + pitch, knots, B.c, B.derivative().c, check=True, plot=False + ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -265,7 +267,9 @@ def test_bp2_first(): k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch = 2 - bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) + bp1, bp2 = bounce_points( + pitch, k, B.c, B.derivative().c, check=True, plot=False + ) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -281,7 +285,7 @@ def test_bp1_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -302,7 +306,7 @@ def test_bp2_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -320,7 +324,9 @@ def test_extrema_first_and_before_bp1(plot=False): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - 1e-13 - bp1, bp2 = bounce_points(pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True) + bp1, bp2 = bounce_points( + pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False + ) if plot: plot_field_line(B, pitch, bp1, bp2, start=k[2]) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) @@ -354,7 +360,7 @@ def test_extrema_first_and_before_bp2(): # value theorem holds for the continuous spline, so when fed these sequence # of roots, the correct action is to ignore the first green root since # otherwise the interior of the bounce points would be hills and not valleys. - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. @@ -444,7 +450,7 @@ def integrand(B, pitch): bounce_integrate, _ = bounce_integral( B, B, B_z_ra, knots, quad=leggauss(25), check=True ) - leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch)) + leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch, batch=False)) assert leg_gauss_sin.size == 1 np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) @@ -452,19 +458,30 @@ def integrand(B, pitch): @pytest.mark.unit def test_bounce_integral_checks(): """Test that all the internal correctness checks pass for real example.""" + + def numerator(g_zz, B, pitch): + f = (1 - pitch * B) * g_zz + return f / jnp.sqrt(1 - pitch * B) + + def denominator(B, pitch): + return jnp.reciprocal(jnp.sqrt(1 - pitch * B)) + # Suppose we want to compute a bounce average of the function # f(ℓ) = (1 − λ |B|) * g_zz, where g_zz is the squared norm of the # toroidal basis vector on some set of field lines specified by (ρ, α) # coordinates. This is defined as # (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ) eq = get("HELIOTRON") + # Clebsch-Type field-line coordinates ρ, α, ζ. rho = np.linspace(1e-12, 1, 6) - alpha = np.linspace(0, 2 * np.pi, 5) + alpha = np.array([0]) knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) grid = rtz_grid( eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) - data = eq.compute(["B^zeta", "|B|", "|B|_z|r,a", "g_zz"], grid=grid) + data = eq.compute( + ["B^zeta", "|B|", "|B|_z|r,a", "min_tz |B|", "max_tz |B|", "g_zz"], grid=grid + ) bounce_integrate, spline = bounce_integral( data["B^zeta"], data["|B|"], @@ -473,21 +490,15 @@ def test_bounce_integral_checks(): check=True, quad=leggauss(3), # not checking quadrature accuracy in this test ) - - def numerator(g_zz, B, pitch): - f = (1 - pitch * B) * g_zz - return f / jnp.sqrt(1 - pitch * B) - - def denominator(B, pitch): - return jnp.reciprocal(jnp.sqrt(1 - pitch * B)) - - # Usually it's better to get values with get_pitch instead of get_extrema. - pitch = 1 / get_extrema(**spline) + pitch = get_pitch( + grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 + ) + # To see if the knot density was sufficient to reconstruct the field line + # one can plot the field line by uncommenting the following line. + # _, _ = bounce_points(pitch, **spline, check=True, num=50000) # noqa: E800 num = bounce_integrate(numerator, data["g_zz"], pitch) - # Can reduce memory usage by not batching. - den = bounce_integrate(denominator, [], pitch, batch=False) + den = bounce_integrate(denominator, [], pitch) avg = num / den - assert np.isfinite(avg).any() # Sum all bounce integrals across field line avg = np.nansum(avg, axis=-1) From 02afa2c2a698f65d65c0effeaffa0d8d3f0364cb Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 21 Jun 2024 23:10:03 -0500 Subject: [PATCH 181/241] No more nan in effective ripple gradient Refactors compuations in bounce integral and effective ripple to avoid nan gradients that would arise due to limitations in JAX autodiff. Although it seems the issue is now that the gradient is zero. --- desc/backend.py | 4 +- desc/compute/bounce_integral.py | 306 +++++++++++++++++++------------- desc/grid.py | 9 +- tests/test_axis_limits.py | 12 +- tests/test_bounce_integral.py | 96 +++++----- 5 files changed, 251 insertions(+), 176 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index e5cbb04179..1fed11ba64 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -307,6 +307,8 @@ def root( This routine may be used on over or under-determined systems, in which case it will solve it in a least squares / least norm sense. """ + from desc.compute.utils import safenorm + if fixup is None: fixup = lambda x, *args: x if jac is None: @@ -371,7 +373,7 @@ def tangent_solve(g, y): x, (res, niter) = jax.lax.custom_root( res, x0, solve, tangent_solve, has_aux=True ) - return x, (jnp.linalg.norm(res), niter) + return x, (safenorm(res), niter) def trapezoid(y, x=None, dx=1.0, axis=-1): """Integrate along the given axis using the composite trapezoidal rule.""" diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index f4c0995330..3f41c446e6 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -2,6 +2,7 @@ from functools import partial +import numpy as np from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt from orthax.legendre import leggauss @@ -51,23 +52,35 @@ def take_mask(a, mask, size=None, fill_value=None): ) -# only use for debugging +# use for debugging and testing def _filter_not_nan(a): """Filter out nan from ``a`` while asserting nan is padded at right.""" - is_nan = jnp.isnan(a) - assert jnp.array_equal(is_nan, jnp.sort(is_nan, axis=-1)) + is_nan = np.isnan(a) + assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) return a[~is_nan] -def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): +# use for debugging and testing +def _filter_nonzero_measure(bp1, bp2): + """Return only bounce points such that |bp2 - bp1| > 0.""" + mask = (bp2 - bp1) != 0 + return bp1[mask], bp2[mask] + + +def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf, sentinel=jnp.nan, eps=0): """Keep real values inside [``a_min``, ``a_max``] and set others to nan. Parameters ---------- a : jnp.ndarray - a_min, a_max : jnp.ndarray or float, jnp.ndarray or float - Minimum and maximum value to keep real values between. - Should broadcast with ``a``. + a_min : jnp.ndarray + Minimum value to keep real values between. Should broadcast with ``a``. + a_max : jnp.ndarray + Maximum value to keep real values between. Should broadcast with ``a``. + sentinel : float + Value with which to pad array in place of filtered elements. + eps : float + Absolute tolerance with which to consider value as zero. Returns ------- @@ -80,55 +93,70 @@ def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf): if a_max is None: a_max = jnp.inf return jnp.where( - jnp.isclose(jnp.imag(a), 0) & (a_min <= a) & (a <= a_max), + (jnp.abs(jnp.imag(a)) <= eps) & (a_min <= a) & (a <= a_max), jnp.real(a), - jnp.nan, + sentinel, ) -def _nan_concat(r, num=1): - # Concat nan num times to r on last axis. - nan = jnp.broadcast_to(jnp.nan, (*r.shape[:-1], num)) - return jnp.concatenate([r, nan], axis=-1) +def _sentinel_concat(r, sentinel, num=1): + # Concat sentinel num times to r on last axis. + sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) + return jnp.concatenate([r, sent], axis=-1) -def _root_linear(a, b, distinct=False): +def _root_linear(a, b, sentinel, eps, distinct=False): """Return r such that a r + b = 0.""" - return safediv(-b, a, fill=jnp.where(jnp.isclose(b, 0), 0, jnp.nan)) + return safediv(-b, a, jnp.where(jnp.abs(b) <= eps, 0, sentinel)) -def _root_quadratic(a, b, c, distinct=False): - """Return r such that a r² + b r + c = 0, assuming real coefficients.""" +def _root_quadratic(a, b, c, sentinel, eps, distinct): + """Return r such that a r² + b r + c = 0, assuming real coefficients and roots.""" # numerical.recipes/book.html, page 227 discriminant = b**2 - 4 * a * c - q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(discriminant)) - r1 = safediv(q, a, _root_linear(b, c)) - # more robust to remove repeated roots with discriminant + q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(jnp.abs(discriminant))) + r1 = jnp.where( + discriminant < 0, + sentinel, + safediv(q, a, _root_linear(b, c, sentinel, eps)), + ) r2 = jnp.where( - distinct & jnp.isclose(discriminant, 0), jnp.nan, safediv(c, q, jnp.nan) + # more robust to remove repeated roots with discriminant + (discriminant < 0) | (distinct & (discriminant <= eps)), + sentinel, + safediv(c, q, sentinel), ) return jnp.stack([r1, r2], axis=-1) -def _root_cubic(a, b, c, d, distinct=False): - """Return r such that a r³ + b r² + c r + d = 0, assuming real coefficients.""" +def _root_cubic(a, b, c, d, sentinel, eps, distinct): + """Return r such that a r³ + b r² + c r + d = 0, assuming real coef and roots.""" # numerical.recipes/book.html, page 228 - def irreducible(Q, R, b): + def irreducible(Q, R, b, mask): # Three irrational real roots. - theta = jnp.arccos(R / jnp.sqrt(Q**3)) - j = -2 * jnp.sqrt(Q) - r1 = j * jnp.cos(theta / 3) - b / 3 - r2 = j * jnp.cos((theta + 2 * jnp.pi) / 3) - b / 3 - r3 = j * jnp.cos((theta - 2 * jnp.pi) / 3) - b / 3 - return jnp.stack([r1, r2, r3], axis=-1) + theta = jnp.arccos(safediv(R, jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1)))) + return jnp.moveaxis( + -2 + * jnp.sqrt(jnp.abs(Q)) + * jnp.stack( + [ + jnp.cos(theta / 3), + jnp.cos((theta + 2 * jnp.pi) / 3), + jnp.cos((theta - 2 * jnp.pi) / 3), + ] + ) + - b / 3, + source=0, + destination=-1, + ) def reducible(Q, R, b): # One real and two complex roots. - A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(R**2 - Q**3)) ** (1 / 3) + A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) B = safediv(Q, A) r1 = (A + B) - b / 3 - return _nan_concat(r1[..., jnp.newaxis], 2) + return _sentinel_concat(r1[..., jnp.newaxis], sentinel, num=2) def root(b, c, d): b = safediv(b, a) @@ -136,15 +164,15 @@ def root(b, c, d): d = safediv(d, a) Q = (b**2 - 3 * c) / 9 R = (2 * b**3 - 9 * b * c + 27 * d) / 54 + mask = R**2 < Q**3 return jnp.where( - jnp.expand_dims(R**2 < Q**3, axis=-1), - irreducible(Q, R, b), - reducible(Q, R, b), + mask[..., jnp.newaxis], irreducible(Q, R, b, mask), reducible(Q, R, b) ) return jnp.where( - jnp.isclose(a, 0)[..., jnp.newaxis], - _nan_concat(_root_quadratic(b, c, d, distinct)), + # Tests catch failure here if eps < 1e-12 for 64 bit jax. + jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), + _sentinel_concat(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), root(b, c, d), ) @@ -153,7 +181,15 @@ def root(b, c, d): def _poly_root( - c, k=0, a_min=None, a_max=None, sort=False, distinct=False, poly_is_real=True + c, + k=0, + a_min=None, + a_max=None, + sort=False, + sentinel=jnp.nan, + # About 2e-12 for 64 bit jax. + eps=min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e4, 1e-8), + distinct=False, ): """Roots of polynomial with given coefficients. @@ -165,18 +201,22 @@ def _poly_root( ``c[n-i]``. k : Array Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of - shape c.shape[1:]. + shape ``c.shape[1:]``. a_min, a_max : jnp.ndarray, jnp.ndarray Minimum and maximum value to return roots between. If specified only real roots are returned. If None, returns all complex roots. Should broadcast with arrays - of shape c.shape[1:]. + of shape ``c.shape[1:]``. sort : bool Whether to sort the roots. + sentinel : float + Value with which to pad array in place of filtered elements. + Anything less than ``a_min`` or greater than ``a_max`` plus some floating point + error buffer will work just like nan while also avoiding nan gradient. + eps : float + Absolute tolerance with which to consider value as zero. distinct : bool Whether to only return the distinct roots. If true, when the multiplicity is greater than one, the repeated roots are set to nan. - poly_is_real : bool - Whether the coefficients ``c`` and ``k`` are real. Default is true. Returns ------- @@ -185,14 +225,17 @@ def _poly_root( The roots of the polynomial, iterated over the last axis. """ + is_real = not (jnp.iscomplexobj(c) or jnp.iscomplexobj(k)) get_only_real_roots = not (a_min is None and a_max is None) + func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} - if c.shape[0] in func and poly_is_real and get_only_real_roots: + if c.shape[0] in func and is_real and get_only_real_roots: # Compute from analytic formula to avoid the issue of complex roots with small - # imaginary parts. - r = func[c.shape[0]](*c[:-1], c[-1] - k, distinct) + # imaginary parts and to avoid nan in gradient. + r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) distinct = distinct and c.shape[0] > 3 else: + warnif(not np.isnan(sentinel), msg="This may not prevent an nan gradient.") # Compute from eigenvalues of polynomial companion matrix. c_n = c[-1] - k c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] @@ -204,17 +247,15 @@ def _poly_root( a_min = a_min[..., jnp.newaxis] if a_max is not None: a_max = a_max[..., jnp.newaxis] - r = _filter_real(r, a_min, a_max) + r = _filter_real(r, a_min, a_max, sentinel, eps) if sort or distinct: r = jnp.sort(r, axis=-1) if distinct: - # Atol needs to be low enough that distinct roots which are close do not - # get removed, otherwise algorithms that rely on continuity of the spline - # such as bounce_points() will fail. The current atol was chosen so that - # test_bounce_points() passes. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=jnp.nan), 0, atol=1e-15) - r = jnp.where(mask, jnp.nan, r) + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) return r @@ -284,8 +325,8 @@ def _poly_val(x, c): def plot_field_line( B, pitch=None, - bp1=jnp.array([]), - bp2=jnp.array([]), + bp1=np.array([]), + bp2=np.array([]), start=None, stop=None, num=1000, @@ -302,11 +343,11 @@ def plot_field_line( ---------- B : PPoly Spline of |B| over given field line. - pitch : jnp.ndarray + pitch : np.ndarray λ value. - bp1 : jnp.ndarray + bp1 : np.ndarray Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. - bp2 : jnp.ndarray + bp2 : np.ndarray Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. start : float Minimum ζ on plot. @@ -346,7 +387,7 @@ def add(lines): if include_knots: for knot in B.x: add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) - z = jnp.linspace( + z = np.linspace( start=B.x[0] if start is None else start, stop=B.x[-1] if stop is None else stop, num=num, @@ -354,20 +395,24 @@ def add(lines): add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - b = jnp.reciprocal(pitch) + b = 1 / np.atleast_1d(pitch) for val in b: add( ax.axhline( val, color="tab:purple", alpha=alpha_pitch, label=r"$1 / \lambda$" ) ) - bp1, bp2 = jnp.atleast_2d(bp1, bp2) + bp1, bp2 = np.atleast_2d(bp1, bp2) for i in range(bp1.shape[0]): - bp1_i, bp2_i = map(_filter_not_nan, (bp1[i], bp2[i])) + if bp1.shape == bp2.shape: + bp1_i, bp2_i = _filter_nonzero_measure(bp1[i], bp2[i]) + else: + bp1_i, bp2_i = bp1[i], bp2[i] + bp1_i, bp2_i = map(_filter_not_nan, (bp1_i, bp2_i)) add( ax.scatter( bp1_i, - jnp.full_like(bp1_i, b[i]), + np.full_like(bp1_i, b[i]), marker="v", color="tab:red", label="bp1", @@ -376,7 +421,7 @@ def add(lines): add( ax.scatter( bp2_i, - jnp.full_like(bp2_i, b[i]), + np.full_like(bp2_i, b[i]), marker="^", color="tab:green", label="bp2", @@ -396,11 +441,13 @@ def add(lines): return fig, ax -def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs): +def _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs): """Check that bounce points are computed correctly.""" - eps = 10 * jnp.finfo(jnp.array(1.0).dtype).eps - P, S = bp1.shape[:-1] + bp1 = jnp.where(bp1 > sentinel, bp1, jnp.nan) + bp2 = jnp.where(bp2 > sentinel, bp2, jnp.nan) + eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 + P, S = bp1.shape[:-1] msg_1 = "Bounce points have an inversion." err_1 = jnp.any(bp1 > bp2, axis=-1) msg_2 = "Discontinuity detected." @@ -528,62 +575,78 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) ``knots.size-1``, and the number of field lines is denoted by ``S``. If there were less than ``N*degree`` bounce points detected along a field line, then the last axis, which enumerates the bounce points for a particular field - line, is padded with nan. + line, is padded with zero. """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 + # Intersection points in local power basis. intersect = _poly_root( c=B_c, k=jnp.reciprocal(pitch)[..., jnp.newaxis], a_min=jnp.array([0]), a_max=jnp.diff(knots), sort=True, + sentinel=-1, distinct=True, ) assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. + # Only consider intersect if it is within knots that bound that polynomial. + is_intersect = intersect.reshape(P, S, -1) >= 0 B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) + # Gather intersects along a field line to be contiguous. + B_z_ra = take_mask(B_z_ra, is_intersect, fill_value=0) + + sentinel = knots[0] - 1 # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) - - # Only consider intersect if it is within knots that bound that polynomial. - is_intersect = ~jnp.isnan(intersect) - # Reorder so that all intersects along a field line are contiguous. - intersect = take_mask(intersect, is_intersect) - B_z_ra = take_mask(B_z_ra, is_intersect) - assert intersect.shape == B_z_ra.shape == (P, S, N * degree) - is_bp1 = B_z_ra <= 0 - is_bp2 = B_z_ra >= 0 + # Gather intersects along a field line to be contiguous, followed by some sentinel. + intersect = take_mask(intersect, is_intersect, fill_value=sentinel) + is_intersect = intersect > sentinel + is_bp1 = (B_z_ra <= 0) & is_intersect + is_bp2 = (B_z_ra >= 0) & is_intersect + edge_case = ( + (B_z_ra[..., 0] == 0) + & (B_z_ra[..., 1] < 0) + & is_intersect[..., 0] + & is_intersect[..., 1] + # In theory, we need to keep propagating this edge case, + # e.g (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. + ) + is_bp2 = put(is_bp2, Index[..., 0], edge_case) + # Get ζ values of bounce points from the masks. + bp1 = take_mask(intersect, is_bp1, fill_value=sentinel) + bp2 = take_mask(intersect, is_bp2, fill_value=sentinel) # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is # required that the first intersect satisfies non-positive derivative. Now, # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, # there can be at most one inversion, and if it exists, the inversion must be # at the first pair. To correct the inversion, it suffices to disqualify the - # first intersect as a right boundary, except under the following edge case. - edge_case = (B_z_ra[..., 0] == 0) & (B_z_ra[..., 1] < 0) - # In theory, we need to keep propagating this edge case, - # e.g (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). - # At each step, the likelihood that an intersection has already been lost - # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. - is_bp2 = put(is_bp2, Index[..., 0], edge_case) - # Get ζ values of bounce points from the masks. - bp1 = take_mask(intersect, is_bp1) - bp2 = take_mask(intersect, is_bp2) + # first intersect as a right boundary, except under the above edge case. # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, # we ignore the bounce points of particles assigned to a class that are # trapped outside this snapshot of the field line. - # TODO: Better to always consider boundary as bounce points. + # TODO: Better to always consider boundary as bounce points. Simple change; + # do in same pull request that resolves GitHub issue #1045. + if check: - _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs) + _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) + + mask = (bp1 > sentinel) & (bp2 > sentinel) + # Set outside mask to same value so that integration is over set of measure zero. + bp1 = jnp.where(mask, bp1, 0) + bp2 = jnp.where(mask, bp2, 0) return bp1, bp2 -def composite_linspace(x, num): +def _composite_linspace(x, num): """Returns linearly spaced points between every pair of points ``x``. Parameters @@ -634,7 +697,7 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): # extrema. Shift values slightly to resolve this issue. min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B - pitch = composite_linspace(jnp.reciprocal(jnp.stack([max_B, min_B])), num) + pitch = _composite_linspace(jnp.reciprocal(jnp.stack([max_B, min_B])), num) assert pitch.shape == (num + 2, *pitch.shape[1:]) return pitch @@ -834,8 +897,8 @@ def _plot(Z, V, title_id=""): """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" for p in range(Z.shape[0]): for s in range(Z.shape[1]): - is_quad_point_set = jnp.nonzero(~jnp.any(jnp.isnan(Z[p, s]), axis=-1))[0] - if not is_quad_point_set.size: + marked = jnp.nonzero(jnp.any(Z != 0, axis=-1))[0] + if marked.size == 0: continue fig, ax = plt.subplots() ax.set_xlabel(r"Field line $\zeta$") @@ -843,7 +906,7 @@ def _plot(Z, V, title_id=""): ax.set_title( f"Interpolation of {title_id} to quadrature points. Index {p},{s}." ) - for i in is_quad_point_set: + for i in marked: ax.plot(Z[p, s, i], V[p, s, i], marker="o") fig.text( 0.01, @@ -878,28 +941,27 @@ def _check_interpolation(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): Whether to plot stuff. """ - is_not_quad_point = jnp.isnan(Z) - # We want quantities to evaluate as finite only at quadrature points - # for the integrals with boundaries at valid bounce points. + assert jnp.isfinite(Z).all(), "NaN interpolation point." + # Integrals that we should be computing. + marked = jnp.any(Z != 0, axis=-1) + goal = jnp.sum(marked) + msg = "Interpolation failed." - assert jnp.all(jnp.isfinite(B_sup_z) != is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B) != is_not_quad_point), msg - assert jnp.all(jnp.isfinite(B_z_ra)), msg + assert jnp.isfinite(B_z_ra).all(), msg + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B_sup_z, axis=-1))), msg + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B, axis=-1))), msg for f_i in f: - assert jnp.all(jnp.isfinite(f_i) != is_not_quad_point), msg + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(f_i, axis=-1))), msg msg = "|B| has vanished, violating the hairy ball theorem." assert not jnp.isclose(B, 0).any(), msg assert not jnp.isclose(B_sup_z, 0).any(), msg - quad_resolution = Z.shape[-1] - # Number of integrals that we should be computing. - goal = jnp.sum(1 - is_not_quad_point) // quad_resolution - # Number of integrals that were actually computed. - actual = jnp.isfinite(inner_product).sum() + # Number of those integrals that were computed. + actual = jnp.sum(marked & jnp.isfinite(inner_product)) assert goal == actual, ( - f"Lost {goal - actual} integrals " - "from floating point or spline approximation error." + f"Lost {goal - actual} integrals from NaN generation in the integrand. This " + "can be caused by floating point error or a poor choice of quadrature nodes." ) if plot: _plot(Z, B, title_id=r"$\vert B \vert$") @@ -963,21 +1025,13 @@ def _interpolate_and_integrate( b_sup_z = _interp1d_vec(Z, knots, B_sup_z / B, method=method).reshape(shape) B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method=method_B).reshape(shape) pitch = jnp.expand_dims(pitch, axis=(2, 3) if len(shape) == 4 else 2) - # Assuming that the integrand is a well-behaved function of some interpolation - # points Z, it should evaluate as NaN only if Z is NaN. This condition needs to be - # enforced explicitly due to floating point and interpolation error. In the context - # of bounce integrals, the √(1 − λ |B|) terms necessitate this as interpolation - # error in |B| may yield λ|B| > 1 at quadrature points between bounce points. Don't - # suppress inf as that indicates catastrophic floating point error. - inner_product = jnp.dot( - jnp.nan_to_num(integrand(*f, B=B, pitch=pitch), posinf=jnp.inf, neginf=-jnp.inf) - / b_sup_z, - w, - ) + inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) + if check: _check_interpolation( Z.reshape(shape), f, b_sup_z, B, B_z_ra, inner_product, plot ) + return inner_product @@ -1002,14 +1056,22 @@ def _bounce_quadrature( Parameters ---------- - bp1, bp2 : jnp.ndarray, jnp.ndarray + bp1 : jnp.ndarray Shape (P, S, bp1.shape[-1]). The field line-following ζ coordinates of bounce points for a given pitch along a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right integration boundaries, respectively, for the bounce integrals. - x, w : jnp.ndarray, jnp.ndarray + bp2 : jnp.ndarray + Shape (P, S, bp1.shape[-1]). + The field line-following ζ coordinates of bounce points for a given pitch along + a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right + integration boundaries, respectively, for the bounce integrals. + x : jnp.ndarray + Shape (w.size, ). + Quadrature points in [-1, 1]. + w : jnp.ndarray Shape (w.size, ). - Quadrature points in [-1, 1] and weights. + Quadrature weights. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the diff --git a/desc/grid.py b/desc/grid.py index c637f65329..b9734f4fa6 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1814,8 +1814,13 @@ def _periodic_spacing(x, period=2 * jnp.pi, sort=False, jnp=jnp): x = jnp.sort(x, axis=0) # choose dx to be half the distance between its neighbors if x.size > 1: - dx_0 = x[1] + (period - x[-1]) % period - dx_1 = x[0] + (period - x[-2]) % period + if np.isfinite(period): + dx_0 = x[1] + (period - x[-1]) % period + dx_1 = x[0] + (period - x[-2]) % period + else: + # just set to 0 to stop nan gradient, even though above gives expected value + dx_0 = 0 + dx_1 = 0 if x.size == 2: # then dx[0] == period and dx[-1] == 0, so fix this dx_1 = dx_0 diff --git a/tests/test_axis_limits.py b/tests/test_axis_limits.py index 14efc4ffaa..6ae092e029 100644 --- a/tests/test_axis_limits.py +++ b/tests/test_axis_limits.py @@ -61,7 +61,6 @@ "gbdrift", "cvdrift", "grad(alpha)", - "cvdrift0", "|e^helical|", "|grad(theta)|", " Redl", # may not exist for all configurations @@ -93,7 +92,7 @@ "K_vc", # only defined on surface "iota_num_rrr", "iota_den_rrr", - "cvdrift0", + "g^pa", # will need to refactor dependencies to avoid nan in AD } @@ -132,6 +131,14 @@ def _skip_this(eq, name): or (eq.anisotropy is None and "beta_a" in name) or (eq.pressure is not None and " Redl" in name) or (eq.current is None and "iota_num" in name) + # These quantities require a coordinate mapping to compute and special grids, so + # it's not economical to test their axis limits here. Instead, a grid that + # includes the axis should be used in existing unit tests for these quantities. + or bool( + data_index["desc.equilibrium.equilibrium.Equilibrium"][name][ + "source_grid_requirement" + ] + ) ) @@ -377,3 +384,4 @@ def test_reverse_mode_ad_axis(name): obj.build(verbose=0) g = obj.grad(obj.x()) assert not np.any(np.isnan(g)) + print(np.count_nonzero(g), name) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index c889e54486..61771f08c3 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -14,6 +14,8 @@ from desc.backend import flatnonzero, jnp from desc.compute.bounce_integral import ( + _composite_linspace, + _filter_nonzero_measure, _filter_not_nan, _poly_der, _poly_root, @@ -23,7 +25,6 @@ automorphism_sin, bounce_integral, bounce_points, - composite_linspace, get_extrema, get_pitch, grad_affine_bijection, @@ -149,7 +150,7 @@ def test_poly_root(): for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) root_filter = _filter_not_nan(root[j]) - assert root_filter.size == unique_roots.size + assert root_filter.size == unique_roots.size, j np.testing.assert_allclose( actual=root_filter, desired=unique_roots, @@ -234,9 +235,7 @@ def test_composite_linspace(): B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) breaks = np.linspace(B_min_tz, B_max_tz, num=5) - b = composite_linspace(breaks, num=3) - print(breaks) - print(b) + b = _composite_linspace(breaks, num=3) for i in range(breaks.shape[0]): for j in range(breaks.shape[1]): assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) @@ -251,13 +250,11 @@ def test_bp1_first(): end = 6 * np.pi knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - pitch = 2 - bp1, bp2 = bounce_points( - pitch, knots, B.c, B.derivative().c, check=True, plot=False - ) - bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - assert bp1.size and bp2.size + pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) + bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) + bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + assert bp1.size and bp2.size np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) @@ -266,14 +263,13 @@ def test_bp2_first(): end = -start k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) - pitch = 2 - bp1, bp2 = bounce_points( - pitch, k, B.c, B.derivative().c, check=True, plot=False - ) - bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) - assert bp1.size and bp2.size + pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1, intersect[1::2]) + bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) + bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + assert bp1.size and bp2.size + # Don't include intersect[-1] for now as it doesn't have a paired bp2. + np.testing.assert_allclose(bp1, intersect[1:-1:2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) def test_bp1_before_extrema(): @@ -285,8 +281,8 @@ def test_bp1_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) - bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = _filter_nonzero_measure(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) @@ -306,14 +302,14 @@ def test_bp2_before_extrema(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) - bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = _filter_nonzero_measure(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) - def test_extrema_first_and_before_bp1(plot=False): + def test_extrema_first_and_before_bp1(): start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -327,9 +323,8 @@ def test_extrema_first_and_before_bp1(plot=False): bp1, bp2 = bounce_points( pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False ) - if plot: - plot_field_line(B, pitch, bp1, bp2, start=k[2]) - bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + plot_field_line(B, pitch, bp1, bp2, start=k[2]) + bp1, bp2 = _filter_nonzero_measure(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) @@ -360,8 +355,8 @@ def test_extrema_first_and_before_bp2(): # value theorem holds for the continuous spline, so when fed these sequence # of roots, the correct action is to ignore the first green root since # otherwise the interior of the bounce points would be hills and not valleys. - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True, plot=False) - bp1, bp2 = map(_filter_not_nan, (bp1, bp2)) + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = _filter_nonzero_measure(bp1, bp2) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -443,16 +438,15 @@ def integrand(B, pitch): bounce_integrate, _ = bounce_integral( B, B, B_z_ra, knots, quad=tanh_sinh(40), automorphism=None, check=True ) - tanh_sinh_vanilla = _filter_not_nan(bounce_integrate(integrand, [], pitch)) - assert tanh_sinh_vanilla.size == 1 - np.testing.assert_allclose(tanh_sinh_vanilla, truth, rtol=rtol) - + tanh_sinh_vanilla = bounce_integrate(integrand, [], pitch) + assert np.count_nonzero(tanh_sinh_vanilla) == 1 + np.testing.assert_allclose(np.sum(tanh_sinh_vanilla), truth, rtol=rtol) bounce_integrate, _ = bounce_integral( B, B, B_z_ra, knots, quad=leggauss(25), check=True ) - leg_gauss_sin = _filter_not_nan(bounce_integrate(integrand, [], pitch, batch=False)) - assert leg_gauss_sin.size == 1 - np.testing.assert_allclose(leg_gauss_sin, truth, rtol=rtol) + leg_gauss_sin = bounce_integrate(integrand, [], pitch, batch=False) + assert np.count_nonzero(tanh_sinh_vanilla) == 1 + np.testing.assert_allclose(np.sum(leg_gauss_sin), truth, rtol=rtol) @pytest.mark.unit @@ -460,22 +454,24 @@ def test_bounce_integral_checks(): """Test that all the internal correctness checks pass for real example.""" def numerator(g_zz, B, pitch): - f = (1 - pitch * B) * g_zz + f = (1 - pitch * B / 2) * g_zz + # You may need to clip and safediv to avoid nan gradient. return f / jnp.sqrt(1 - pitch * B) def denominator(B, pitch): - return jnp.reciprocal(jnp.sqrt(1 - pitch * B)) + # You may need to clip and safediv to avoid nan gradient. + return 1 / jnp.sqrt(1 - pitch * B) # Suppose we want to compute a bounce average of the function - # f(ℓ) = (1 − λ |B|) * g_zz, where g_zz is the squared norm of the + # f(ℓ) = (1 − λ|B|/2) * g_zz, where g_zz is the squared norm of the # toroidal basis vector on some set of field lines specified by (ρ, α) # coordinates. This is defined as - # (∫ f(ℓ) / √(1 − λ |B|) dℓ) / (∫ 1 / √(1 − λ |B|) dℓ) + # [∫ f(ℓ) / √(1 − λ|B|) dℓ] / [∫ 1 / √(1 − λ|B|) dℓ] eq = get("HELIOTRON") # Clebsch-Type field-line coordinates ρ, α, ζ. - rho = np.linspace(1e-12, 1, 6) + rho = np.linspace(0.1, 1, 6) alpha = np.array([0]) - knots = np.linspace(-2 * np.pi, 2 * np.pi, 20) + knots = np.linspace(-2 * np.pi, 2 * np.pi, 200) grid = rtz_grid( eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) @@ -488,26 +484,28 @@ def denominator(B, pitch): data["|B|_z|r,a"], knots, check=True, + plot=False, quad=leggauss(3), # not checking quadrature accuracy in this test ) pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) - # To see if the knot density was sufficient to reconstruct the field line - # one can plot the field line by uncommenting the following line. + # You can also plot the field line by uncommenting the following line. + # Useful to see if the knot density was sufficient to reconstruct the field line. # _, _ = bounce_points(pitch, **spline, check=True, num=50000) # noqa: E800 num = bounce_integrate(numerator, data["g_zz"], pitch) den = bounce_integrate(denominator, [], pitch) avg = num / den - # Sum all bounce integrals across field line + # Sum all bounce integrals across each particular field line. avg = np.nansum(avg, axis=-1) - # Group the data by field line. + assert np.count_nonzero(avg) + # Split the resulting data by field line. avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) - # The mean bounce average stored at index i, j + # The sum stored at index i, j i, j = 0, 0 print(avg[:, i, j]) - # is the mean bounce average among wells along the field line with nodes + # is the summed bounce average among wells along the field line with nodes # given in Clebsch-Type field-line coordinates ρ, α, ζ raz_grid = grid.source_grid nodes = raz_grid.nodes.reshape(rho.size, alpha.size, -1, 3) @@ -748,8 +746,8 @@ def integrand_den(B, pitch): pitch=pitch[:, np.newaxis], ) - drift_numerical_num = np.squeeze(_filter_not_nan(drift_numerical_num)) - drift_numerical_den = np.squeeze(_filter_not_nan(drift_numerical_den)) + drift_numerical_num = np.squeeze(drift_numerical_num[drift_numerical_num != 0]) + drift_numerical_den = np.squeeze(drift_numerical_den[drift_numerical_den != 0]) drift_numerical = drift_numerical_num / drift_numerical_den msg = "There should be one bounce integral per pitch in this example." assert drift_numerical.size == drift_analytic.size, msg From 3b5e9f936499fe68cf04df29b9fe6b123edef50c Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 22 Jun 2024 14:47:44 -0500 Subject: [PATCH 182/241] Add test for finite nonzero derivative --- desc/compute/bounce_integral.py | 10 ++++++---- tests/test_bounce_integral.py | 8 ++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3f41c446e6..735830709c 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -25,7 +25,7 @@ def take_mask(a, mask, size=None, fill_value=None): size : int Elements of ``a`` at the first size True indices of ``mask`` will be returned. If there are fewer elements than size indicates, the returned array will be - padded with fill_value. Defaults to ``mask.size``. + padded with ``fill_value``. The size default is ``mask.size``. fill_value : Any When there are fewer than the indicated number of elements, the remaining elements will be filled with ``fill_value``. Defaults to NaN for inexact types, @@ -135,10 +135,10 @@ def _root_cubic(a, b, c, d, sentinel, eps, distinct): def irreducible(Q, R, b, mask): # Three irrational real roots. - theta = jnp.arccos(safediv(R, jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1)))) + theta = jnp.arccos(R / jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1))) return jnp.moveaxis( -2 - * jnp.sqrt(jnp.abs(Q)) + * jnp.sqrt(Q) * jnp.stack( [ jnp.cos(theta / 3), @@ -166,7 +166,9 @@ def root(b, c, d): R = (2 * b**3 - 9 * b * c + 27 * d) / 54 mask = R**2 < Q**3 return jnp.where( - mask[..., jnp.newaxis], irreducible(Q, R, b, mask), reducible(Q, R, b) + mask[..., jnp.newaxis], + irreducible(jnp.abs(Q), R, b, mask), + reducible(Q, R, b), ) return jnp.where( diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 61771f08c3..bb3b7cf4fa 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -5,6 +5,7 @@ import numpy as np import pytest +from jax import grad from matplotlib import pyplot as plt from orthax.legendre import leggauss from scipy import integrate @@ -756,4 +757,11 @@ def integrand_den(B, pitch): fig, ax = plt.subplots() ax.plot(1 / pitch, drift_analytic) ax.plot(1 / pitch, drift_numerical) + + # Test if differentiable. + def dummy_fun(pitch): + return jnp.sum(bounce_integrate(integrand_num, [cvdrift, gbdrift], pitch)) + + assert np.isclose(grad(dummy_fun)(1.0), 650, rtol=1e-3) + return fig From 390e78215990c1086b6c1707d2815d2710de92dc Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 24 Jun 2024 23:02:24 -0500 Subject: [PATCH 183/241] move changes from ripple to bounce (make some functions private) --- desc/compute/bounce_integral.py | 98 +++++++++++---------------------- tests/test_bounce_integral.py | 8 +-- 2 files changed, 37 insertions(+), 69 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 735830709c..d4e39f3844 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -13,7 +13,7 @@ @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) -def take_mask(a, mask, size=None, fill_value=None): +def _take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. Parameters @@ -53,10 +53,11 @@ def take_mask(a, mask, size=None, fill_value=None): # use for debugging and testing -def _filter_not_nan(a): +def _filter_not_nan(a, check=False): """Filter out nan from ``a`` while asserting nan is padded at right.""" is_nan = np.isnan(a) - assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) + if check: + assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) return a[~is_nan] @@ -67,42 +68,10 @@ def _filter_nonzero_measure(bp1, bp2): return bp1[mask], bp2[mask] -def _filter_real(a, a_min=-jnp.inf, a_max=jnp.inf, sentinel=jnp.nan, eps=0): - """Keep real values inside [``a_min``, ``a_max``] and set others to nan. - - Parameters - ---------- - a : jnp.ndarray - a_min : jnp.ndarray - Minimum value to keep real values between. Should broadcast with ``a``. - a_max : jnp.ndarray - Maximum value to keep real values between. Should broadcast with ``a``. - sentinel : float - Value with which to pad array in place of filtered elements. - eps : float - Absolute tolerance with which to consider value as zero. - - Returns - ------- - result : jnp.ndarray - The real values of ``a`` in [``a_min``, ``a_max``]; others set to nan. - - """ - if a_min is None: - a_min = -jnp.inf - if a_max is None: - a_max = jnp.inf - return jnp.where( - (jnp.abs(jnp.imag(a)) <= eps) & (a_min <= a) & (a <= a_max), - jnp.real(a), - sentinel, - ) - - -def _sentinel_concat(r, sentinel, num=1): - # Concat sentinel num times to r on last axis. +def _sentinel_append(r, sentinel, num=1): + """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) - return jnp.concatenate([r, sent], axis=-1) + return jnp.append(r, sent, axis=-1) def _root_linear(a, b, sentinel, eps, distinct=False): @@ -156,7 +125,7 @@ def reducible(Q, R, b): A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) B = safediv(Q, A) r1 = (A + B) - b / 3 - return _sentinel_concat(r1[..., jnp.newaxis], sentinel, num=2) + return _sentinel_append(r1[..., jnp.newaxis], sentinel, num=2) def root(b, c, d): b = safediv(b, a) @@ -174,7 +143,7 @@ def root(b, c, d): return jnp.where( # Tests catch failure here if eps < 1e-12 for 64 bit jax. jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), - _sentinel_concat(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), + _sentinel_append(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), root(b, c, d), ) @@ -237,19 +206,20 @@ def _poly_root( r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) distinct = distinct and c.shape[0] > 3 else: - warnif(not np.isnan(sentinel), msg="This may not prevent an nan gradient.") # Compute from eigenvalues of polynomial companion matrix. c_n = c[-1] - k c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) c = jnp.stack(c, axis=-1) - r = _roots(c) + r = jnp.nan_to_num(_roots(c), nan=sentinel) if get_only_real_roots: - if a_min is not None: - a_min = a_min[..., jnp.newaxis] - if a_max is not None: - a_max = a_max[..., jnp.newaxis] - r = _filter_real(r, a_min, a_max, sentinel, eps) + a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] + a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] + r = jnp.where( + (jnp.abs(jnp.imag(r)) <= eps) & (a_min <= r) & (r <= a_max), + jnp.real(r), + sentinel, + ) if sort or distinct: r = jnp.sort(r, axis=-1) @@ -461,9 +431,9 @@ def _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs): B_mid = B((bp1[p, s] + bp2[p, s]) / 2) err_3 = jnp.any(B_mid > 1 / pitch[p, s] + eps) if err_1[p, s] or err_2[p, s] or err_3: - bp1_p, bp2_p, B_mid = map( - _filter_not_nan, (bp1[p, s], bp2[p, s], B_mid) - ) + bp1_p = _filter_not_nan(bp1[p, s], check=True) + bp2_p = _filter_not_nan(bp2[p, s], check=True) + B_mid = _filter_not_nan(B_mid, check=True) if plot: plot_field_line( B, pitch[p, s], bp1_p, bp2_p, title_id=f"{p},{s}", **kwargs @@ -599,13 +569,13 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) is_intersect = intersect.reshape(P, S, -1) >= 0 B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Gather intersects along a field line to be contiguous. - B_z_ra = take_mask(B_z_ra, is_intersect, fill_value=0) + B_z_ra = _take_mask(B_z_ra, is_intersect, fill_value=0) sentinel = knots[0] - 1 # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) # Gather intersects along a field line to be contiguous, followed by some sentinel. - intersect = take_mask(intersect, is_intersect, fill_value=sentinel) + intersect = _take_mask(intersect, is_intersect, fill_value=sentinel) is_intersect = intersect > sentinel is_bp1 = (B_z_ra <= 0) & is_intersect is_bp2 = (B_z_ra >= 0) & is_intersect @@ -622,8 +592,8 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) ) is_bp2 = put(is_bp2, Index[..., 0], edge_case) # Get ζ values of bounce points from the masks. - bp1 = take_mask(intersect, is_bp1, fill_value=sentinel) - bp2 = take_mask(intersect, is_bp2, fill_value=sentinel) + bp1 = _take_mask(intersect, is_bp1, fill_value=sentinel) + bp2 = _take_mask(intersect, is_bp2, fill_value=sentinel) # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is # required that the first intersect satisfies non-positive derivative. Now, @@ -667,9 +637,9 @@ def _composite_linspace(x, num): """ x = jnp.atleast_1d(x) - pts = jnp.linspace(x[:-1, ...], x[1:, ...], num + 1, endpoint=False) + pts = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *x.shape[1:]) - pts = jnp.append(pts, x[jnp.newaxis, -1, ...], axis=0) + pts = jnp.append(pts, x[jnp.newaxis, -1], axis=0) assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) return pts @@ -830,7 +800,7 @@ def automorphism_sin(x, s=0, m=10): Points to transform. s : float Strength of derivative suppression, s ∈ [0, 1]. - m : int + m : float Number of machine epsilons used for floating point error buffer. Returns @@ -872,7 +842,7 @@ def tanh_sinh(deg, m=10): ---------- deg: int Number of quadrature points. - m : int + m : float Number of machine epsilons used for floating point error buffer. Larger implies less floating point error, but increases the minimum achievable error. @@ -920,7 +890,7 @@ def _plot(Z, V, title_id=""): plt.show() -def _check_interpolation(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): +def _check_interp(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): """Check for floating point errors. Parameters @@ -1016,23 +986,21 @@ def _interpolate_and_integrate( assert Z.shape[-1] == w.size assert knots.size == B.shape[-1] assert B_sup_z.shape == B.shape == B_z_ra.shape + pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Z.ndim == 4) else 2) + shape = Z.shape + Z = Z.reshape(Z.shape[0], Z.shape[1], -1) # Spline the integrand so that we can evaluate it at quadrature points without # expensive coordinate mappings and root finding. Spline each function separately so # that the singularity near the bounce points can be captured more accurately than # can be by any polynomial. - shape = Z.shape - Z = Z.reshape(Z.shape[0], Z.shape[1], -1) f = [_interp1d_vec(Z, knots, f_i, method=method).reshape(shape) for f_i in f] # TODO: Pass in derivative and use method_B. b_sup_z = _interp1d_vec(Z, knots, B_sup_z / B, method=method).reshape(shape) B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method=method_B).reshape(shape) - pitch = jnp.expand_dims(pitch, axis=(2, 3) if len(shape) == 4 else 2) inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) if check: - _check_interpolation( - Z.reshape(shape), f, b_sup_z, B, B_z_ra, inner_product, plot - ) + _check_interp(Z.reshape(shape), f, b_sup_z, B, B_z_ra, inner_product, plot) return inner_product diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index bb3b7cf4fa..a87d13774d 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -21,6 +21,7 @@ _poly_der, _poly_root, _poly_val, + _take_mask, affine_bijection, automorphism_arcsin, automorphism_sin, @@ -32,7 +33,6 @@ grad_automorphism_arcsin, grad_automorphism_sin, plot_field_line, - take_mask, tanh_sinh, ) from desc.equilibrium import Equilibrium @@ -64,7 +64,7 @@ def test_mask_operations(): a = np.random.rand(rows, cols) nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) a.ravel()[nan_idx] = np.nan - taken = take_mask(a, ~np.isnan(a)) + taken = _take_mask(a, ~np.isnan(a)) last = _last_value(taken) for i in range(rows): desired = a[i, ~np.isnan(a[i])] @@ -150,7 +150,7 @@ def test_poly_root(): root = _poly_root(c.T, sort=True, distinct=True) for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) - root_filter = _filter_not_nan(root[j]) + root_filter = _filter_not_nan(root[j], check=True) assert root_filter.size == unique_roots.size, j np.testing.assert_allclose( actual=root_filter, @@ -158,7 +158,7 @@ def test_poly_root(): err_msg=str(j), ) c = np.array([0, 1, -1, -8, 12]) - root = _filter_not_nan(_poly_root(c, sort=True, distinct=True)) + root = _filter_not_nan(_poly_root(c, sort=True, distinct=True), check=True) unique_root = np.unique(np.roots(c)) assert root.size == unique_root.size np.testing.assert_allclose(root, unique_root) From fd1181622ea23391e1182fce3f2fbc82d009f00e Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 27 Jun 2024 23:51:57 -0500 Subject: [PATCH 184/241] Fix imports after merge --- desc/compute/bounce_integral.py | 8 +++----- tests/test_bounce_integral.py | 6 +++--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d4e39f3844..d1e764bf5d 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -1181,7 +1181,7 @@ def bounce_integral( The quantities ``B_sup_z``, ``B``, ``B_z_ra``, and those in ``f`` supplied to the returned method must be separable into data evaluated along particular field lines via ``.reshape(S,knots.size)``. One way to satisfy this is to compute stuff on the - grid returned from the method ``desc.equilibrium.coords.rtz_grid``. See + grid returned from the method ``desc.equilibrium.coords.get_rtz_grid``. See ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. Parameters @@ -1255,11 +1255,9 @@ def bounce_integral( ) # Strictly increasing zeta knots enforces dζ > 0. # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. - # This is equivalent to changing the sign of ∇ζ (or [∂/∂ζ]|ρ,a). + # This is equivalent to changing the sign of ∇ζ (or [∂ℓ/∂ζ]|ρ,a). # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ - # requires the same sign change in e_ζ|ρ,a to retain the metric identity. For any - # quantity f, we may write df = ∇f⋅dR, implying ∂f/∂ζ|ρ,α = ∇f ⋅ e_ζ|ρ,a. Hence, - # a sign change in e_ζ|ρ,a requires the same sign change in ∂f/∂ζ|ρ,α. + # requires the same sign change in e_ζ|ρ,a to retain the metric identity. B_sup_z = jnp.abs(B_sup_z) * L_ref / B_ref B = B / B_ref B_z_ra = B_z_ra / B_ref # This is already the correct sign. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index a87d13774d..bd1f7043b8 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -36,7 +36,7 @@ tanh_sinh, ) from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import rtz_grid +from desc.equilibrium.coords import get_rtz_grid from desc.examples import get from desc.grid import Grid, LinearGrid from desc.utils import only1 @@ -473,7 +473,7 @@ def denominator(B, pitch): rho = np.linspace(0.1, 1, 6) alpha = np.array([0]) knots = np.linspace(-2 * np.pi, 2 * np.pi, 200) - grid = rtz_grid( + grid = get_rtz_grid( eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) data = eq.compute( @@ -615,7 +615,7 @@ def test_drift(): iota = grid_fsa.compress(data["iota"]).item() alpha = 0 zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = rtz_grid( + grid = get_rtz_grid( eq, rho, alpha, zeta, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) From b9de417e0b4486a3b73d0574efc9c530c355811c Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 1 Jul 2024 01:45:54 -0500 Subject: [PATCH 185/241] Remove unneeded compute funs --- desc/compute/_basis_vectors.py | 20 -------------------- desc/compute/_field.py | 18 ------------------ 2 files changed, 38 deletions(-) diff --git a/desc/compute/_basis_vectors.py b/desc/compute/_basis_vectors.py index 7896debe3f..995e2d165f 100644 --- a/desc/compute/_basis_vectors.py +++ b/desc/compute/_basis_vectors.py @@ -547,26 +547,6 @@ def _e_sup_theta(params, transforms, profiles, data, **kwargs): return data -@register_compute_fun( - name="e^theta_PEST", - label="\\mathbf{e}^{\\theta_{PEST}}", - units="m^{-1}", - units_long="inverse meters", - description="Contravariant straight field line (PEST) poloidal basis vector", - dim=3, - params=[], - transforms={}, - profiles=[], - coordinates="rtz", - data=["e_rho", "e_phi", "sqrt(g)_PEST"], -) -def _e_sup_theta_PEST(params, transforms, profiles, data, **kwargs): - data["e^theta_PEST"] = ( - cross(data["e_phi"], data["e_rho"]).T / data["sqrt(g)_PEST"] - ).T - return data - - @register_compute_fun( name="e^theta*sqrt(g)", label="\\mathbf{e}^{\\theta} \\sqrt{g}", diff --git a/desc/compute/_field.py b/desc/compute/_field.py index f236f19991..030e7b2c69 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -86,24 +86,6 @@ def _B_sup_theta(params, transforms, profiles, data, **kwargs): return data -@register_compute_fun( - name="B^theta_PEST", - label="B^{\\theta}", - units="T \\cdot m^{-1}", - units_long="Tesla / meter", - description="Contravariant straight field line (PEST) component of magnetic field", - dim=1, - params=[], - transforms={}, - profiles=[], - coordinates="rtz", - data=["B", "e^theta_PEST"], -) -def _B_sup_theta_PEST(params, transforms, profiles, data, **kwargs): - data["B^theta_PEST"] = dot(data["B"], data["e^theta_PEST"]) - return data - - @register_compute_fun( name="B^zeta", label="B^{\\zeta}", From 670ad66b5141de1d4f7941ae3bc0decfc80c6be7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 1 Jul 2024 20:26:51 -0500 Subject: [PATCH 186/241] Change label per Rory's request --- desc/compute/_field.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 030e7b2c69..80a0e22aa2 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2316,7 +2316,7 @@ def _B_mag_alpha(params, transforms, profiles, data, **kwargs): @register_compute_fun( name="|B|_z|r,a", - label="\\(partial_{\\zeta} |\\mathbf{B}|)_{\\rho, \\alpha}", + label="\\(partial_{\\zeta} (|\\mathbf{B}|) |_{\\rho, \\alpha}", units="T", units_long="Tesla", description="Magnitude of magnetic field, derivative along field line", From f07cdae5a871ea5f2b0d27b9b7e48f17be53e49c Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 1 Jul 2024 20:29:38 -0500 Subject: [PATCH 187/241] Fix label --- desc/compute/_field.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 80a0e22aa2..a2239939b9 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -2297,7 +2297,7 @@ def _B_mag_z(params, transforms, profiles, data, **kwargs): @register_compute_fun( name="|B|_a", - label="\\partial_{\\alpha} |\\mathbf{B}|", + label="\\partial_{\\alpha} (|\\mathbf{B}|) |_{\\rho, \\zeta}", units="T", units_long="Tesla", description="Magnitude of magnetic field, derivative wrt field line angle", @@ -2316,7 +2316,7 @@ def _B_mag_alpha(params, transforms, profiles, data, **kwargs): @register_compute_fun( name="|B|_z|r,a", - label="\\(partial_{\\zeta} (|\\mathbf{B}|) |_{\\rho, \\alpha}", + label="\\partial_{\\zeta} (|\\mathbf{B}|) |_{\\rho, \\alpha}", units="T", units_long="Tesla", description="Magnitude of magnetic field, derivative along field line", From 3322e94ddceaa4ba0cc4dc416a73de92988982b3 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 2 Jul 2024 15:25:39 -0500 Subject: [PATCH 188/241] Remove g^pa per review request --- desc/compute/_metric.py | 24 ------------------------ tests/test_axis_limits.py | 1 - tests/test_bounce_integral.py | 11 +++++++++-- 3 files changed, 9 insertions(+), 27 deletions(-) diff --git a/desc/compute/_metric.py b/desc/compute/_metric.py index 535bedb53f..305a815ad8 100644 --- a/desc/compute/_metric.py +++ b/desc/compute/_metric.py @@ -1340,30 +1340,6 @@ def _g_sup_rt(params, transforms, profiles, data, **kwargs): return data -@register_compute_fun( - name="g^pa", - label="g^{\\psi\\alpha}", - units="Wb \\cdot m^{-2}", - units_long="Webers per square meters", - description="Radial/Poloidal (ψ, α) element of contravariant metric tensor", - dim=1, - params=[], - transforms={"grid": []}, - profiles=[], - coordinates="rtz", - data=["grad(psi)", "grad(alpha)"], - axis_limit_data=["e^rho", "alpha_t", "e^theta*sqrt(g)", "B0"], -) -def _g_sup_pa(params, transforms, profiles, data, **kwargs): - data["g^pa"] = transforms["grid"].replace_at_axis( - dot(data["grad(psi)"], data["grad(alpha)"]), - lambda: dot( - data["e^rho"], (data["alpha_t"] * data["e^theta*sqrt(g)"].T * data["B0"]).T - ), - ) - return data - - @register_compute_fun( name="g^rz", label="g^{\\rho\\zeta}", diff --git a/tests/test_axis_limits.py b/tests/test_axis_limits.py index a83635af16..04040f3687 100644 --- a/tests/test_axis_limits.py +++ b/tests/test_axis_limits.py @@ -92,7 +92,6 @@ "K_vc", # only defined on surface "iota_num_rrr", "iota_den_rrr", - "g^pa", # will need to refactor dependencies to avoid nan in AD } diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index bd1f7043b8..380c62cc65 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -35,6 +35,7 @@ plot_field_line, tanh_sinh, ) +from desc.compute.utils import dot from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid from desc.examples import get @@ -626,7 +627,8 @@ def test_drift(): "|B|_z|r,a", "cvdrift", "gbdrift", - "g^pa", + "grad(psi)", + "grad(alpha)", "shear", "iota", "psi", @@ -678,7 +680,12 @@ def test_drift(): gbdrift = data["gbdrift"] * normalization dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) alpha_MHD = -0.5 * dPdrho / data["iota"] ** 2 - gds21 = -np.sign(data["iota"]) * data["shear"] * data["g^pa"] / B_ref + gds21 = ( + -np.sign(data["iota"]) + * data["shear"] + * dot(data["grad(psi)"], data["grad(alpha)"]) + / B_ref + ) gds21_analytic = -data["shear"] * ( data["shear"] * theta_PEST - alpha_MHD / B**4 * np.sin(theta_PEST) ) From c10a59a8a61b995f6aacc2e48fe8c0a1721b6fd7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 2 Jul 2024 15:29:43 -0500 Subject: [PATCH 189/241] Remove old code --- tests/test_axis_limits.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/test_axis_limits.py b/tests/test_axis_limits.py index 04040f3687..4459595137 100644 --- a/tests/test_axis_limits.py +++ b/tests/test_axis_limits.py @@ -42,7 +42,6 @@ "curvature_k2_zeta", "e^helical", "e^theta", - "e^theta_PEST", "e^theta_r", "e^theta_t", "e^theta_z", @@ -67,7 +66,6 @@ } not_implemented_limits = { # reliant limits will be added to this set automatically - "B^theta_PEST", "D_current", "n_rho_z", "|e_theta x e_zeta|_z", From b00ddc5812701e3673efd5a90a0470283c2efbdf Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 11 Jul 2024 18:33:43 -0400 Subject: [PATCH 190/241] Merging fieldline_compute branch --- tests/inputs/master_compute_data_xyz.pkl | Bin 7718930 -> 7703174 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/tests/inputs/master_compute_data_xyz.pkl b/tests/inputs/master_compute_data_xyz.pkl index 41faf549882321c2449dde4ac15d9de39f7f692f..cc1413097fe5f4abc625880596645d39adea78df 100644 GIT binary patch delta 116552 zcma%kcOX{Z|9JKu@kk+Cp%RiZ4-z7oNh+j_(vY+?9!)Aml-p1!Eg_>^WgIIMWh=60 zLn7_vckaE9_2m8e{`#YH?me&b+UIrF>pmjFU$ULJP|wB@#C}0W^c=@xRu-porgylD z&euuP8uX0LeO8G)$zmAk!~s~8BImK`N0zWGj@->^$i}08AaXs2Y~($58o$!6fb;g} zLpfKunfOJXW08!U&L$d}$jTi#%%UN%daN;Edu09*8djOOj#KRL3#;VKxe>a86_VUyoX6!Yr4P=P=0XKSMI9z0JGXE7&m$>M*kSrds?T!p2DD0euZk~fsWp_3bC z=f$Cd*~W0r!QCTR4vaQ&p(a}|4kf&(l7&R@D~zLwK#-ZmQcDxPAR)vuh4@EG5`rXo zjQ^42d`UpGHn24kWr7E5Jy9A8v9{9q86-LM511Ul;$Cpb;VbrYB+_^o@7c_;s$pk& z+*W{-B)9S>OFd0wR8I^jc5^KCAh!bEVMRfeiEuO!B(!VU$+Gzt3rTuMH%lk+etrsD z8DjRY;5cRpwTI%uZ`ldu;fMFIl47|+c9Qw17aXsM_pYI=xH`>PL@3S3SU8rl5e!z4mjm0DVuX8mGv)-|m8{fsnbyJ14 ziUpU_;HscWUnr8F$kyg6^vp1yIzH%bSV+TS?RN=buU)wKxIUuBP!3NlPs2h2Q+E;+ z#U<8oW!n?@UL|q8qDhYR=Gwm1(|f&J!1>K&CoWIScyxfPolg7^Jw{sI| zqV$d<_h=lOOAsJ__aOI68vhu3>@JiQ??-Sa(qzZpb5m^l`;YSNMko{c#dL~%%x&&I zf_MB(H+KWcyw6Wm5&stjDLU>f#52Q?fZu7&<6$UzlgSM>qn0h}rKZ?xDC2DpdHM|azmHgMGo2M%e|hU_Ec`aN5Ylqu;V@0(~!7p5sNsk9EYQNF;tX?wtWw{5$&k|PAmt9Dd> z=)tm|(I0Q6p^-v4*xNO{b);JI{CMxHOdA2oAavU9hZ3z&{zf7 zaQFMX(@8}Ie&B7SvFftoPON-A_OuI^m|YafV!o1Z774sPl26o-ZKd&?(7&p zpUf;AK@kqQcoNIL-NXM6FNQ3N_)sW+BB3NKE{>nZM-C8*I^mQvC`ap#AX!r@ z!bS|2%+IJYeG*Yf$G{VK1SAY6x`SM*j4D>~b2bgjYuqS9IflN40KxY+0Vb;>9jcxX zXr-~{v*LVL1f)ql&uI{tW+*mJ&m*3}VULA}$w@?svG8Ksq`0{7Q{sX*NiH?!3Q_}v z-&nAbru;9v0i&8{#0qM}2s#N)HI%-f_pd$~GztGE?0_RXXpR#!6bAuXo5x9?Xkm|1`c@lyMhGXHrHB z+9=d%AbsJ+zrkr7@1w?u;*(*=jE?!TWAyWavvAaTyt@(Nki5-r7ovDe`XVH5h|TjC z=9y?+G}sMsSuNpdq^TDB3sc6M5iHz7TJgsf)Fgd@b@4xz9>-PwcAr_8YrGf@cWo4A z_8(H+_oq))F~q`u&gLP)NuSeH${S04r#3JRIHRXZ5>ajZeE7Z+oqLmMGjLRUlM6qT z^*|9yUGL9|$c%FIZT9{#qceBqGE=$3;O>$EFQW{lv0w9 zt7sK%*68^8$G-wb2LrDsHc((AL(P;50iqO*!>J5J>xxn3-?1M+`6F)gT2zXO_Ysv% z$6{@~*|Fp|uHv}gYtc_=>@S#v<*HS*R+=y|El{DK+tZbp0nxykfyqV-N8`J;(j1rq zQv!_{Z5!W?B@JfW8M-^>{;+AN77zGCp$PBlvCxprdpG}?tw<_Q1=CIK(8$!vrp z2UePBU#vrn3SqeHvKd_ue zM9wXfM9%9W(msR58$iwvh^>bW2N?eW{_PVvGGBGZoQ9BzyD>8lFM2%F~PM-`CLy%IK5zB0D zY`}P-1ZC_$VbdLus6*QSgbsYo^XalA<%4`PC>COu&6qyfB*6&SvqvegV|i&?7{lmg z$P7xzg~c-nA#YDYY8AT#MIq2cVkSxRQjo+nL(E`}+qh5|%Hw%K-AtNf92@Wz|B`K4BYm{q6|WPm|vJ2=@r3Ya^t;%VLi$=_`q{H zV(Ja~cLo_7QN(Pmi?yy)<;L2$jHD@xAL5s1mYZQuZY<9gFw{CZloz#foxE`gX5{Fp zFb;z7iEIvqYBYtIF|&!>`I3afNy#!>6PMklfFi*^Oc3j0 z!uPnb6n#%gJhn^m7ZVBw0J-ku#=@^TO-hJ>BGpNt_=Hxp|LhGXxFhmv#%>dBDyZ$T zSBw&K?D30HQo{L`FQ#IT>{BJu%N9LV5;45cH+!NsF>qn%HBrn7sx;ph!|O zUkyi%#HyS9e`Fd!qna5t0p+m&g5n6&aT1=p2_`Hm#*L{5FJQ-l)*7Lq zXfub|Wk$w4H|7_JmhZ72v?c``^$sdJf~fmCB(p)q5HFyQ|WlO%*x&w3er zAYtDBpQ3U1E%TTSI-*&2OgM5qchs@<;sicst4>fXZuoW{Gry$DCfbn-9FuU4V9m$u zI0}YB`2$Zik%_Au1UnK+hF(qp;GL}rp_m((Ve z!_Q|E(U&|*<0K9%$%D$sJPvg{$p;-&@^1b)vZKW-B-CqZGdGW&Zj!x64x_l%Z}IcN zrruemf|Y*aRK50zMH${VtL$1k$6chXo7p2u@qS)2hJ7?=<7T|Dl4h%?36hPJI$0ez;ppn7Y^28za|FNuu3C%gYzP=X zG@Tut`XW3$=(6JO4z^b`fpIvHy@=Zs(ge*X*!I)-$4n5)6&ZugrLKHDo}e}c?(Ll% zts~bl#B|oaO-mIEYSCoJ67@Z~v9sH~&=G+Ti@>;z$=I3xXopJ81!(>A%#a5;vkwiG@oP^t z6PaMqN?jFCSuW1_#1sL?8HEv|cyHVIt@Q)DA?I25d+T~FTQxfyDsOINubZ|JW?c z=8O$yFO$cCk2W=BemH=-@C%?Bm6~TjfFU~wpItP5>Oio}eu}`z z(9D=RCWoLXksorAVXy#%7Hc#YN04sJEk8qi@4MxXJf_ z_x|&qq=_V(gr*vn9WpWt`ept@3H2O5PfQM>iji6ynCPSo#Z|RtnHH=B@v3UX^ zLZ({DznIBXq@Y*%7c(mty&peEw928QhKXKHsu`(&{2W1^NK67Yfh|VZ{2#E4IbsqB za>U7A)#1i2HiNnN{N4YsMF@l;%aPkMXfm%VGZPH<~454II9Jv}Z%@G{b%7tufG-*_^=%L#(R7snr$E*P3YfcK7 ztdpJ;-aJR|C2cJCV8U!R3Wez(^Cg)U!2O)`ZlGzff9n6z2hkBn=zo*+rG5XKK8EOX zW9ZZnh2_&|9e*%LkJ*T0T&#&T)cW5Le!uj&$HjLN8=->35b0u~s?ok%0S~Ly`##Bg zOeR8K7K<&BCzq7?>srJVfhOheq-ZdNMt1aYSrO-@=`#yKP{ZiD2C`UXG1_^M%WM*i zK-+4dUq{1Qz4e*=1>K9uV~3s_Na4Z``W5J;-S%ItJQnj*gZ_N2%t`P>hTEQ3ifLOzQg{T3!IkBCOgfO*k5W>9q7ij;!}KLthz;GbzQnZP)*T2yIwa++8^Dl{mZJHmA46bZ%OXl@ir?% z=9!%3nC}t{ozKcl+WoJWIgWPi%q;Fu-C|B|*eB~mVkDMdlQn9XuLs!3;FyccnsK4Y z+T9f}jbkLMIYz9!Cc{Apj1BOvpHyPhXgDrnAOi6;EFlC;ofyjqC1c5(Cf6EC-S!Xi z&q-8T6|vqNNBeJPouIoEc|52Y3^0vbWP){*;X&^W$=id;8j+eNbupy)|9ZcK+)hu1 z40T$9p`G-9t9yb~*zwQ-BWCGXwoc9vDaD9A*>QAhP=dU_@_%|iX(A%%O~&jPE6J=2 z|C?jPk2$9Az2(15G^v&d6JfG9y`=~TVT92C@<4(~GICZ*7&Avs)Haz(FQo3Alq~5n zY6gz1nsll-*I!X5tQ5O-oiGZ|U>3;)iCZ8Aw*Nk&bbnV!0{Dvkmx=ISA`Y;miMxZbRC5*i>o?vliLZdlp zB#M!Y$B8H`PPsE=7?-F_!jI-paWFEd(LvdKb#=ULs>gTEvHycOF?Xw-{8}H7x)_J~ zL>lhMaXiu4Z6sTei@GTok)I25Uu81;e__=g!^-G8f{GKRbgdt|G9aHuXmS!MH?#g1 zUY=3B_+b+hw5mmC@hexEQ2B!kx0s;wud#y)HWO^U5RW2ma@eHHKvZ{<>q;GndJtzE z(`!voE@j!sRYfR|2?mO1NE4@qxYApbM4G@DT5gm_P4c<4)%kGiuHI>)6ISy3kbxO)m-6HG2jT!z&D z-uFw9PV!kF&N8b*?Wgm+yc0^m|4ffbji66W|R;DK4uiZeFx0Y4ISe5 z={Ykh3BI$!jJieQ5Hznv)FAV1+ZZ+EBUJ+)jEj+J3g#q}c+nzrNyEuf4B0Hv2RG3R zEbV$9Kg4ON035EXR43IQ8RAyH+EXcX-3iAcpP;ZcZ$ejh7G!K$>WBXEhwL#`-MQ>(DE^8XOsN%S@>@`i8xJ{!%7Pkix zO)%n0koY))3pdJOji+wO@~f6nucJCAwrpI*ZdmKt$!x3QOeF zNL4NyEbEXKZJ3!wF08wk0W|$)F3?LqACXw0;gI#;2Yj7 zM4_isn*E(#>OHV`RA@qQ^KoeSu|awDoNWg~^7%njnBEpukcMko7`1Ag2r-g<^1_IIZ#nJK!n z!}=c~(WeCpSd9A$b^Hnc3N)feCmjOE2(!ZYR{j;W<3CQ~MY9e(Z>x-7)m|}PbfOoz z@k0X0j-?{t;o2)6k^4z2s}*R&IjSlGenc=?G94KAvtoc+?^;3RXYOPcb3$4ac~WJm zVZ)CtKjkh-rTrb)wPG^0<4El&+)U8Y1YXIEXGMGkmkqN`2o`aq`_D|>3FlrPy(p`9z)rWQG467%64a>kwl8_HozUfNKT)9pS4JQj2iOc;wT z1>-l(mRWt|2I#;XTjmWA=|@+LY$x@j(i&Toj)DJyRQ6edt;nh5hMJfcFfu&Ej?Y??lVnj z)KNk?(3hfa*tyHRa+uFvOLh9g(gxv~3y%Vs<#LOl*UOp#sInZwZ*<`9)J+`mv?r`JO*DSRZh(Kkx2(RS$}hm97zK| zvL0xD(vJrFT3V`N^KOD)LbW4BIujJWcvZtT za066i{ZM|(nF#VUqSlq|EC9b|(PH||o&dEHy{4EMg}_3CS3D~Tf~potp`w}hfa%t- z6mO|iAUITWWNQq)1birWIIQxf6kLv9S-Wk{6A-cRPK9fEAy|AY)W^j)4XnuNah!aZ%s7pBua0(2&D8k#)^K6 z1ApJ>Ntu{k2UhmDP|2OkKw*yEfh9+df;Ee(Vzzuc4Bsjo;;omz3v%$&DfaDY;JIga ztv*XS=rR?tk?KeQRot#COKc;6+HoVrqh}-GvN#L9t8>$Vq|}uTgW0*@)TfspGWO?y zWUtnB0wxdW!13tMErk}B0n4Uc^R%DFz^QvfbonyDBH)^P z&P10d3%m&vxv_Fl9FXWNN;H+e4J|Eh#mOPNK63P8|1JC+*xQQVp0VsHD9~^XFHg<` zC+g+mSI)i#Twi@QU)FLTmfg}D-1F-R*qxOB)Fgyn4N{-%wU8ft4u-?i7dt8yg4E0B zB0mc!ft;Z2&+Jtm!sEXTl%K3D13iu}ZU1bn2fe$UyX|8t!7R!6{L;!Iz{;moKYZi? z(7j({f41W>eC_cmB6eC8F#cmyomJcbG>Xg<7oV&GY%-FXMYLj2*FG0mKg$Iwx)=M- zXlmxc8wRnrmPXWpT9shI+YPV5!cQDh7HQR>?WdryAp29G-3IP$yIKThNoN&mi4p~`vK2mx19InIOTArn11KhnK&zjJ*qu-kYv?0|eSIFzrkOndz`n1auLsOymlWst>ykXUbMv+KJ!@WoU(3=G zINj(kfciptZK<3RQ1X)Vkey97NZB{_=t8$UkRvKc??-D9TzZrLc%i}*9J8Pao=>w;ue1mI&NLxsU%SGfIt2mr* zvs8l1Eem4wG7G^{hy9!vS(BhzrpQKjP1Oph5Wr{5`??-fv;FF8y<80x!Y;VFx>W-M z%bTk?EuMh&PZ#)2D@cW5i-(tNajM3+4xvG3cg_3I>!zzr zfy~d%A+ubd6<5o~KULvyNy6OeWp^*b)-P%^yS^lWNXN2Izi!Eu z1n>$m?*w>9x~4&G{{#3;kQUA*pA7~)yq<->cm%lYowhvK`v7dwd2;E*nRB3Ep-c1Y z<}|1kF|8;!Uh{$ph&dvMWEAL6Cc&^^NGQZ1C8)Rv>QHbGbBlxFo zh{MaU0DgCzdM76?AEb#otG?JxF95+^5ixUk3&Dj0C%*{7WN{=LUn2u_^F)UTip8$7Vui~F4MHHfusl!u*3j-)wnVi}lix-MoGo420;1 zgEMEp5B@$M1aeGexRv_X!?FUQn|Vejp|x@IxffF-p=onh&|(z));y_9yQdxtt`sNQ zzfX(=iC3!Pg1%e9SDep!?Nlzo!-qdEF4}hkdMMg0`hDR#e5}c>=^=0%%ueC#=h_no zb}covK4!KTgk1uA)7RXfLyviM-=dIYC^dXtNAu2Ic*)n}l79F-u%Qhz&e(hp$jvCN zQ{8kPw0+6^n!WlSyf7#hIP2y^SpOzT@x#<)D7WuOkA6}jm?!Ziirer3xD@})N_OZb zI3<2mv*BSHWZAU$M>Sgx^z)sytNnK}R9tCcT&r;x9Q*APxbiza8|?P9v+3WS4sIC$ z1((liTsG0xXukG2!+) zcry9n`Caxmu#&^bZl;ZUtUh>d1efvEfxWx|@VQH$)?AYiI4}I2VZz36sByZ&ROLn( zY}Pr|sV;aN>gQD)S-E8ouq`W?v*yk|n7kz`WcjfucrHA%r{YyClu2FuZLe4?JbY)L zX>#m&cxW){t6uamAaSOV#{1YEcG|fJ^(}~jahb>B<~>TF!@Usaz5MYuJiLB$zVJ{q zgo`#et6PPGv$5^(F8w$PlwzBFYfNsywA#&E*Q`y4rQA*08aEJCFuDG~Xx=vfP5sdwtJTu-*duY%bbf zcFY2w+O`!~Tug)($K8s>emsL}mrRPDTBN|zWqfK)k8i<_`w=}Q4-!G|(Apr6peH~~ z{J|mJ;zZa~urfJBrUoi`_dPJwj)&EOCs(c1{2dR!iuGKy+Li+7Z}oM0#416xWU_8e z(@mJLrnvFEcq81H8yRzF*IKY(mWz61V+?ltD6-qPH$OYKmL7pOBTwwvwf_u^6tM2w zRUZzIhR$gh=Lm&caF;zjy8GcgX(zUmf7S!H-tH4-oZ&D-vLnSEzXF5y*+sWpj)RIz zPrS|kcopuqu+ZJsd>WpZ^@zdG3Z$RBlt@+<26JdTqEQ`?N z+jO{apSEqa?0J|gRIQnlm%ke_v3Ny+pru|(Ru>!ax=U}tPr!06hKl6_Hy(n0D z#%99{&1kTLrc#`wpA5PjHZ9G(9u3!JSYG$6D~9XeMLjXSnGbOv-Jp^W=U~NNC?TyH z55(_U4ZZo21te%`w#$10;QZQFcm2UK*tNa#z4KrZbYb~<@Lf#^6p^_9Zoa0|Jy7cM z`l)SIF>o2~nf6>M0z{`J*2URY!R@(M7CRZ0K*h%9bs>%ipje#DO2-)~SnzRVx4%o< z6LzdV51pOYbe+By2@TUDv+}G?!{fNNGKZ8OymS09y8#I&qWtd|LbhxeY=IQcRd81X$?SUY?fd=3!gZjeoas$nM6 zLs?SbVviWNy@R(PpCJABGRNa|c=cXF_>{y5;4M7w)oIo1!2iH9pG5XFC`Y@rYKc<@ zWU(DC`Qw}dSG-vM=Ki1K@UrSUMh>Gp*JRh45mA$&0V>okQQd#4y znokd*{Ex!G$Ltl)hvuO6*1>RriV z!H%7qb{InNdT^a98xa&(1RX$-s~rB$8joC>zg4*3>TngQ4yr7kZJ&jsiA2HO}Ym%_gZ zc2AvUi{WsLRldjX7?_d@;{E1jfNffb`)AI^fw-j7{rjAGfR{_EE&s+d*#2cDkDdH8 zNXN3DEk+Z-`@y%tx29!-A8VxwJQinwPH&!AcFpDaU{&x2N#3sKP_KOnUj%0*tp1o; zkbOH6+Nc%zy+4@`dY2xb>J*<2P7a5ah0ia*%ugb_iGJND-Ov*S|4L6e7R|R0CL}g4 z?ejbVW?r^_Jb&mS_;#crbvkb>*vs8s*u5YIsGX8Gv5LD5xz)4{f_P3svBzqnx$;q9 zDqH!1lO4B#=cZykZ@*;VYwn|`VxA1H`&M1HS(FA91GhTOJrV)WX4`$=xg!>A{jr+e zTqBhZWY#uD3uQk9C1Qs6%T+Q!%~7XYPoHMOGFx7+nK2mL(6c9f`?i~)ddu~KQ`0hl zZH3KAslqI9U{RZm0op!Ijx_3bJJyPfg$I!)ek@bHGp zT+O|e@aK!!9!bIpP*s1kILD4uuz{DNA^i-irl;l5|!`a>-ADVqt=R~_ft|pvYKXV*5*4vGwXRmv}ZWHSiipQ z>%eueFYH-DQhyrIuVz~!=!et6Q?*0#JaRbT>~-?JxiJk$#`tih?umf9cONbeu}c8@ zN!w^wxgP-m8y~lB>0B`5p09v+Ob*C&v}5JW$prmhI1eU2kAz_g#fl5dQh>DC0vWds z2(EPQ`c@-b03`MNgziP;1KSq=@;lxT7#*c6o*KLWKk(NZ8+FsuK|x&4@LY`|5aECD z)-LWRV3@W~P2_eVV3SKUcC;%5rtMp9zu~(CML%Lk=6PlSe|E$E9haU0(b;|WLDNdX zql31$&$ktU{e#MVmF^`#aDIVRX37`Af<{2zSF& z#p^}jd-$(nA^SV!z`x4!Hk7{tGv7w^qTfOSW%-&fHkwugmYq#miIvr0NY&t>^)ECo z12Z!kS5$%XW4{l#S!?u`gNHZ`3q0UImpl=p+w~2+ZdQ1yf5O$EIm)k7|8rV>R$D=B z_O?^>EjQj)>2C~bD7ro`bo71y{#jCgmYfGemm(}~$t8gwc*F)Vt_%>8FYep>J`-?y zzUr`>`vCmxESz_3ZVc#nsIs#3bu8%9$h|YQITM%#U6K&ETLfO1G`PJBct!_%UmnXC zR4M^G_vG*{yqpGv55A*mow@<~pMPHGQJVv1#mezjg_VOtr80|h9@K)ayC)2eoU8$* z3I?Y{9r8f5m11Jc&Nwh%jP*VLr!2trF#Lw*(JJuc<*g#M>?Yv*ThZCqq8W5vPk1hK z_$j!h?)dxcwMdYl{nH_23q1|=>D(E-oly&%H#X&X|Ly?FU-Yq5$1ZTSQNwUyay3Z& zxN!UR>|-z}S<$%Nd=yn#5ldh zl+_B*qPJ9Xvsx^8>lPk&Ql|jQZt&<;uk402VE1{2Ul(-Tv@5zhtQ~&()^zINv~+Mv zBy;tqiYkz&YF6QX>pbYso*p=RS1&x9&u{*G=R0_y`7Us!wZjvR?t$R;OR&w1?)~kc zVF$PxHL&n2cR9F|waCTn=NtIxdj6g@^9JDyYq<8RS1UaGO|8zXvH^MYV4f(_7(pMtN$^dF%a8n&G*uz+yhJ7O2~`Vu5c( z3pCzle_Vs54Teb^!TU1X;Qbpvv^Okmhn%W!IX7xGL%x&v^S_?W@a1j26OJoeVAHo| z5h2?)_-*f%&VWDdaJt$qp`2Zvu)1sSl)?OF7^a%9j?_p^6a@o3luKX?tPWj z1`mNvt^S)j;j=mRo4%>_z#Gx`Fcq~{=p-u|&zsN;ukUi??ayq1r&i8P517{uuWeoE zol(&RXMb2$Qttm2p6gY*w?DcKUJi+^_#E5}tHR?lISg9i)!o<91+F1A3I)!;DbfQQ zDkQ!HuAzT`IqjP5yKCCv9glN&m)AAH+LS2*M$6iu;S)`s>nl2-NORChf6iXGU(P2F(XOkXpt1%F`Q{HJZAwjJ;% zqx{AwD;eciM)`wL8g^@4&M<0+cMHAw=-<^l;G(;}9J_Qo;f+$|MQu~Mpi0a`Ll4s~ zSbla!_v7E4Fsso>>&@~uXxla{)P*#{xBstORA?9cQgP86DkH{Hb-D5{_P}h1^F?Y; zx?%g-gzLGMXkh(|)(eA= z-Awa6*8&5aI?S)icEPMF)%Cv5-oVE?iiukS-^2IYYF=FKdk>FTuHUla!yB0Jr}yl6 z{}yDm&)(w4gV26?(w6I6yWtjb?is1)+o1aPA2aXi4#3o9 z@elpxet|XByhZ1heu0i@zXY}YK0%+GAEk{nTHrwe&ULXz+hGBHm#epz>=4`wHh99s zZ?MW_(`$p=Z?N#IXW*}eL(u)>=MMd?88Gd4^^?^{>tQgX%x09A8Kpm?e8eaX3-S$? ztgeBk+r3P>pVdK`DbP6j+$;Fw*nGuxpBtgvYiz+B%_d0C;1yR*?|lt}pP#&x?^6kV z^zVR8&rw)BdVr^LX(J5HYd4KL&_$j}l3#wjKHkoDE3srec9D26)LRU>ii58zn@Dhhx;H+(h@ajS}Ghw4w zaNq3D4%Oj3&~d=>(>w3Cu%zrzW8v3#kX0uEe_?{bf$UY^S8yJIMGimN&flnpH-`T1 z<$U!98ZS~SJofP|9ITq=W32QM%G){UoYz5TN4uujmhQgr6wI|tlzriv2l-NC{r>p9 zhiZUBePBs1EC4=(sluONot#Crl}kRD|2vfb&Z%bbXOZA5AC)JdU|#39-BqS1!?H8K)^h!4E|I!9#bE-63%f5%5pIkFUKCM-`xg;0k z)%p=U4%=Uy;M@lq=|r z%05P!%qX20Wvx2LWp|rcSmrm|#-=P4K5u#RNo;=_G+fA4_uduRYr>bAUsZaVY1Sh3JwlBO!y&O-0OVkDN2a2zu zWVwCuiS$nRL`l3p*8UB|roO!Fw!V!HxjeYNH_E3#`|rETZ?7o_JvJKe7bLd9-udsB zK40<%YR=uVQ=;S}RDbUEbMN^sDEOGK=a$1|$W!yW&W)!MMDI{7b-mIF50zfx5gF)% z$+p&^=dC`&DWba1XpV1T-0dA}?bjrM2Qu5r!-lH>YwylS)93X-9pSYh!=LE`aOmyg zKMP*`fJvOo?L&`$gxb&c@qTrG2JXGwE2?Kw2juY=Vg0VXaC=jRXHwh{Y!QwMNcWnO zclI3E^|JOedcVPrNA4x~eDc?$-_ftY6DkCwO9e7y14ifub&Tf=jIxp_=|}6qmTS%H z#o~x^Y6qtG_<8m#q^cjen9rO|%Zho#XRci!Zzk}xA$$bVA zR{oveFDM_lf7E*D;9ub zoB!7SIMxO>tlE8-^K%b)E^E^?9MlIsnLIF?YW@J$swI4_3TKCS|3R}HJ+>KH|h_^?`VO8LgoH#^&Q)ZRUh+CZ7|acO#H8`xN@;cBbU z4g%@b_DUIL?ciw0b6P`q2N)XIw$0YL6I48t|2*e*3&;rNSy~|03JUFag&(4~f!|Zg zg2ZpPgO+vo#eRM40Jl^29kyE41tQY<6;Ez!0#jM+8qm2ti2n1wlI36<@bI!W{oLOU zj82&;Demb6QHr~Dm+5qauh(a=@vdqFbh$b4;S11g_I1c`MK%y#q z_Y=!5Ab;;--<5`Lu(wiU+4bYE!0+|gP36-qKxI06+n%Bhz}-_7?y1uS6xE#zH(l!n z_NOvw!-PrreR4k7z61?-WkgSTwSg72+82grbb~KaN!#>-d%)qCO`d!wd+9*xY6I=T zrEb8g*|W5pUJtC{Giis_C}y*FEck`J0lAT1rM+If1<}n}`8;~>z(ywr=g@PVpmIe? ztJP2)2(;W)pm(+#1b-Y3&zaE=K5BPdF$wDj?c81b548G0V#jy$W3sKFxP7=`&zU-~ zN5^+~i}X8iR6h5~v$yn5;Cn^q2Sb)2V4gIXyd+@|h`66icju}GSEwBkC})(ljIxqZ zMleb`qg=U0+~QP4GZ^~(wz}4^1@Npt7o|0~6@08#P)%FX20|UhwXDCk0p4xhJa=8& z!Ho~Mk}T=Ur~@!@rEO>iM}#cflApBzt?amD{Teh1UYujO9Mu6XufALHC8rZCS?m~A zwXqr09On#ixzq$~rVQ(cerpEVD?Zd-nAZ-Peqo_W{#QdsxqX>i{9!XFbZ8(hN9d z_Td{l>H%lQ`_n@$jUd;Jr!~d88(3=o?GfZ00BxpgAMO0s4Sar8I(iK?gB+tsdK$|V zbn4{r;Fe@T6;RQzTyf%iJ4iT?_cJqm5Tr;A&t4>qj!7!_JI_sS1^l?DoljgYcu=4B zkfj493Y-jBbX=_lgxC$g>RETvb??dbgo+Ju7|q`ljL%nqq{p$Ryg&VM3)>Kt^) z;Mw2JqF#=EtoGIX;I2=g!jeuNbAWf$X$Ii6qMlP3&z+1ihfyZxa-DtnwFa!o>D<>Z zUI#KZPM?!GP!IOV{JipDY6Ey1AIF_`xe?5ktKNFzZX;-`j`LsdR1Ho_O^HsetpS>A zH=k%kbH$^xW+{)~yapC@3wN`poNd7Ey_(Tytu`Q*=Tm4WRRwHiWt)EgsR9@DuB31O zS_fPlKB_ccZwB1KcLtZ#c7XSCR*|!Bb^@*&TJ5@XD*^A&adzkK=io?e!4v%h)j(L$ zanIs}X5b|`obYK+H~2nK?5gpw7wqm|HG7Lx1!%f^>QK$nB0x80zn2_UR01}zHF0Vy zHGz9C{Hr#?UQkth=GG7GKCnf--%7Xe1^Be^!JNFTB#_PC7OX?N4otSJPPNKv02deS zaAt{r3v`qGbS*;%fS^F7m*BYy@aWW2lZ#SmFx>Ww@m;nem>GZ8-~i`Ku-Q)9U!d?E zxaw-Hx5i?K4oq&&bg4rU)1+swJb3p7Wc^`(7JJ$Ne`=(cY?@UAGP3=&a{S(bxcB!T zcjbQtnr5#LY*gW)N~5wxMg<(WrD^tfB%8f5tAQJiU3En(C<7UfVeI@^g#p42MzvVhkgPI zRkjP-H;v0_L~q;_?0d)8XYfS}2wNmxcqCecf#4Kr)7*ia6f5#^E`^9rcvwho0Dp zj9+|z0y|1$J;;xT9e1F93oEJ6p$2IqfSD6S?({1Mlthavn?5*58nP2vzmZLB+9Q9W zgxwg6t7K6iKSnqg$GV?}MO&@oA&#V}KXMYO#ZF_fh+Kw#4pIq!Rmw^Hof14`j$_R@ z5fJk2Mvf@y7FBFkGAi>oJE9+~M{juPs50&o2X=4jN5nt2pyz6zo6ZM}|#D&soxSz|J@R;uFOQ&&8`At$j@t=l@#@ijVqBI zk4ob)`-sXJX*}fYO3I*qH;78CH13?alK8Y@H*+E_!mRUrB~mr2NOUkNzrHds*Yi1{ z?g(?PEKxz2Z!L79Jmid}6Z*Rl#CM?t`h#vx=*J)t4)*}3cVrU$@{>+)C+3Lb=%aTT zrXLwPXTHjO?02iMa0Polye-}79#f7k0fx7fIuW_-<^S``MG_(UMI>E3&v^MYCNzkF zCAj(!Vtir>F!3Hk@7ZxT?p4gc*+3>KpV3?;i``Fm;>Kc|fhwLkbJZ;-9ApkP0%OXd z#t}ozLXVJR{6J&)fRQSm)L}gtMhsz8#QKaED&ubctC&-l5h8QkHTWNv#|Vu4_OK#8 z^CU{%o+vg-Nms>M;sY2TW;=gdiL&}fQ<;hQg&>wuY24hl$RYn}L_Z=zq|%eWHYC)v zYA5nAHRkb&g3+0QRS#8&7-TBp+(Z*vAv^Ms2Di6cKmZm?{DljL`Oe7W5e$ndf*~rc z&dyXueC}4~O4`_$Nn>9Uv3t8>>%|4h&jh)!Gn4 zC-RS_xaSiT5vX821N7}XM2L1lXA%*LhMiHAUm=Uf(N?1hV(*X&qq4@3sGz3B4vaTq zKBzKIOipiCe}8PpJa&0p?%?Y2ndPY0t8QA0KA5!{ ziH3S?uVnOC-c3}{8gz!3zob0~zaUo(PS*mvnmo~r_9YjyxXK9xncrh3BI z?A2@6W)9}63tGN^-mU_ylg@>83JL{_1i<(CBmwBdcs66SPsyaxf zbs5hojP^&Em+TrDdL)G=`x=ENn}H@fl>*E5r@CbuQfRW>8P9ha?GGuk+0l&tp$vQ{ zDMe?~7n_EhuJ(%Zbr_yU`Z`c&JbztGjs@_50nftlWp@J#-%A4j+9sfHq)Waq9H0omD^v%- z%@{21BZG+|0D~CMQyA?D4ERAteUN8uNQbvdA6ag^h^Fe|@Ze_QRW1X3tzEQs5ZVt(A zL!w|R-hEp`dy2DORfe)7F%`xZ}Mg^zDBzlM2#sppiDc6kckeIY`C`nNFefQF>VMnw1s{982(5(!dl2i=(S=3LTf=Pi@Z$`dK({HIW#j!kk=ju5O}(UA3#h&W~k7MHdgQ?ENB`6}dlKsDD=1Rutmc zw(@AQgNTob?C#r_dn$;VT|jZ!D-f3r192G#;<7zWVvAclc91n1|Lgm0aywZRuIJ(Y zK$OqL^JQROHWiT{DHfGryvM_>A8aiSsrz9T~4UoNfAVk%qs)1YtAs3KSfNp6e zv{{2QfbC(}MK5AkjmIvlYP&_aFGem9fam3M4iq{g7m$Df9t}_lkPRLUkTk@FBZw0@ zxRxSLNRSJN!2p%Z0VgD-O9UE_W;$ofTXP4UXW+yaGuRYXbwTB<|M5Et20#sj&J}yWX%gep+tb{ zE6MUVu$G%57XXJ?<`i%NF+ekGkoq$hQ0?cULsB#0yn+wDMFSmx3$%hYpKAgUitQj` z`AJ|)QpA2+Fu)qLfE!Zn2S5P40T|%J@&)}o=8#N`PZBLW5!alGA&Gc%d_x(&X)L~R zF0PN`z7*dWlZYZg<%dEJi5MiT$1%w;hkoM%H$fi}l9W?0R!kz|j#-KZSWF_KQg{*S zSjGfFRk&c4EXJxqs3xPZ%nY#1y5qV4%Wx;k?@*lQ6r7$gQcx&`Iu;tW^FUY|Fj|NCC?r%c*-}=M?&=-sBK++Q$Q-<4SPWZxL5q2&WJ@CWMmWiDX zKi?B|>~ygx&mcXJBRyD*^dJD~0n!h$3gmxd2Tzd=DC!?RrG3R8=wL73iU+Wj5-HMy zV5A4CkC_n&Hj)c~9qC99dLq~)AlTU8`Y~cn!AX#x9Mj9(SKC8H2Il+>j;uA30ed)3 z79cjd;yN3#vEnpf<19RH9JAeA@oB!S*MZorE4xy!r)f(DcCJs{*SkjDEWW~OC;2-q z`rK7iAr;nqk4M5j9S`!PS=O3xZlN8`pME#;Q^PsbV5iv+wen{439c94h5ZziKl=pE zd%1$Xs$r~GSxImQS=v4@*y|xU;Mfx!>K@}}p&*U&xpSx0$oayB0iPv@!io08&XNYr zCet0|*O!fI-<7=Xk{g?BtV1-AVEKI0{xWB>k10arh&qRaUOl2*PAPA0S5T}oxp+gDsZSLI;e7(IST|Je%}28qJ?cWb+YI!*>2FT& zSl-(?E2Se@GtTm}k4;y)<5WoQ+JI&-C~(*?3~>5 zpIB+~mawF2ZK>o=Zc>!h9mW4HKd?Kp9kppf-bC{)TRa!`Nfvm*SUh%{j4L8Xj8$l z-QUDc+j9Q30}DUk8x;!ztN|RLVo<=!K_Sn80bU{!4Dh+uf&mr{(Boi$t@tDu;0@F? z)|BcumHB{vwg?@t74$c2NK_t*9b-rz#f>m5c%Ewoo<~Q3=V^Gb5h`khj+_3@@$(>0 z4bU+$0Y{mE!p-oiKverult;%qvbpHF1U+~9&GVSgN(bfWAma*GEy;qbm!!i}ILk0s z;AweVcv|)=KBYVfQl$i+S{I*Qd>2x|1nq07acVo5H!}9Hl z)tD61tFPiaLvzr_?*VKO6h! zE$p8np?}8oxAkqqO-H9jt6F9caGG%k5GX(tGenT6R{@~00a5JrSwJyoM6o+Z0d!J3 z0|?zi6mvop`=~-O(C^Eteo%=pb`oK1J)+nuM6r2@V$Z+;?~W*DUkwiRMF-!Z!_2^# z-gdtF|4(NN?t1eCw(tqvZjE8MAX{h#*FZ$b7W&~mn<85v$QE?)o_oi**(*3w0p8}< zG9VF+yTNnh;B79#+q6MWkdC*x7&(Cvx`D4lm^R?1WG19XCrp=fm_7_ba0;ZHIz1Ni z$=l!zHZz9!m}q5a_da<2>HFyp`+ME7<5>?6d`{#BItQ&<3le(8)seuAhAIX-~dP}0VL$zkpr{>i06Y4*t`%)BN0ixV#Yf9YEKp;!lOE#0d>3- zffdzJM08e+#{&>I_n>-nG#7>DRqmjEmWg?;Fvm%30PkH;%nOh8Ks7ti%fV=72NIP4 zkf*tDo)tR*-b?%d?h-(rp>~o5L$bkba;xl1VAW_hO;vaZmt-^mM0}`hi z4Dh*#|K`XY^U$ClI-qjsH~dF(%d}Mlz$w_S+!9K`{WjF{dn}c&eSp62##-jEw8U7; zTe0-+VJ$OQn?(rttw0}-`uQ+iKgazdg?>))`G9Oe1*%%sLHdrL4`>BCP!DUl3>}Ql z0EZb2KyvAn@QEq{yHyA5UXP*skz3IHs2B;sryfr^noH%rKw_uMoD35ic4b zJvfK-zye{5AZ)cq*eXPNkb|)04ChI{iXMP|_Ki3Gpc(<~EYgFsNDpoxpv^=;>xO`~ z2my^ndSH(ZsvKsV|6f55xT%1Hr29t=TxV1oCoqG7G^o;jokkKtXYDc*C1 z>YhPAt6IOpYd(j(fpbOTK=Ga>c+W<7&y5V=%LFGR4*KYz%3)?a1i)+rU=G1VdWNZA ziJ3+*^&4U8R{$5DBHD!_=OJsg4HtC|p0n{r8yq2k?9#rv3|L7TqxQ zjbdv0`)W5?g#IznF`b-!`*V!P_C@pB&lBbL3-s%jZD60EJV%2- z0bvRRg(R$>V1P)#0NH^1jhYJ%QBzbPb`lIwM$6hcKx>c4#=v^X!j7?oUDkNh@+XU_ zRKNG7^lQG9SsABnofUxGgsN@_7nB#%GrJ!IeDr_bg z*dQASYuXSD5ON6Ge-?v*2Yf7@^cicml#a*`<5}KI#tKkilfQA@@6$obpYGqy=12 zZf7{K2^EY+g?WO34I+D3le4f-1Hb@f`oR-c+=e%nhQJ$#fI-T1g?%x~pTqOcoj6Du zsk2Z8GO0NvRR|^-(*TmL*EyjI2xB=-Sp}rX6_eBk)_ItuVw9KQc>@d3XD~piKUfq1 z`pH(YAC(XlU2OanzsMB4b^HH35; zxO#B!2fkF^kxc4$dKAgsTG+s*v!!C##kA_)NhU;Gnfy6qkUrrg$KeM7C?LiI4yb?$ z2Za(=X<&qmSt6`yZ`da&Pu<}>VNMA8s6nw{fTDgHg8S$YHG%USVHSZxX6{P;Ml360 z3M?#i*EA(w-%e_2zOo|n^4rf2zqBOGrdu|L9z^nM(ObQ@&eS=_KJGxSHDO9NPSASWhKh^3 zUAxR_l<=w_*rR|v2@YIA1gb1j?S-~MP^bshKbd6R+kpbYfI`aT2nKkGnP8BX;SEKjycHNA zOe!d31`9bPV+zcbIYa}hgdC!hkMP8jQg~u{8+g*ZeVmX)MD+v7H_QP%a2^#fMWB!w zB_xrEkqT=P1^Yx^2uUh0ha_Y)AxZD=!z67BYcV*$qC>=;Lj{Z+6f*sUI+iippemLQ z5bBuZ>IrpB!?p-@Oe8WWGo}X0jNxHju^IPK{uey2(kF2e%2>qgfU@Q`f&dMG0|XUN z35C9pTLz5+4G;z#po~5=mQ8pb4Jdx)5BP)u0sMaz0(>H*De#FXY;=-h0q!{(HV`_U zK12HHR4;Nagi~$3*3AS1($Wu#f#gbCs6H}`NT5RHx&8AEEWE@ zMzoW_0U~)MDBuH-P~>(2xXi^hLt#G*?a)E#VI`!Z66E@ zTu@<67i2pM%Lb(AFis5hrAsIj@7#p}Dv1+{x_A^ml3(r4CeNlq`4Whd${I>xC z${4~MGiL+^Y$H%e<$dstan0<6{UR_x?eP3lBq-v+z$YZOfd8r-t7rs#Lh+kGN>)u6 zt|br%F%k-0T>|W$Omjeo*cv~V%h+IkUJrtXbbD7 zxX*>)fv%_^02PXDkWhF*qqvK8F4u;-mt!B0?1VlM@E!VuWEZT(xGzQdH>y721NtO= zg+|257+>ggPe6bKzyWHj>U6Qt*~HL@C<6wlJMQPA{Bu0dc!NG>4DtzE;1huNRz&<6 zTD3%gd#;)lW3TRkghGOAxvE#=fm{`#5Q9P{00{+RLz;>Yd;+kM7bBKv0odpwpAZ8U zP_fbhF0PuFtI$cMKnDrMZ6FlPUO>0@h<4%=zz;%IXxRkNQT__hlEHNV?n{n?d><8B z?gxFj85aFH1s;nen}i>bUWnIdfH)zZqe5bkjd{dXaLu3&_K~qAQUZrXCOc@pdb`0Afoyhp%^31L%tvZ zeKg<+@`(m;V|Q~n;AG}_zyK9UP$A=rgo1%5K~hR3fcF%VNb!WN;f*ne6#tw6Ys?{@ zLHXmVCq{h?CuC_a5py2!e+MR+k9;Bw zsv;SyMBD-R1ec&Hvx87(QmA9fB*5B0Rc7tszynp8%>;#V2P71;g*CYe`&0t*1kY0m z=p$IhL~=;b&k=JlKm))datjqOV1Q%}0YZ^$5BUES`9K9Wu5|2Vl6uhDG6x}_Xb9^F z+}CLW1Vr@(2P0DI<3fn}P#_diFhEg&bcX|U3Me3w27*Gu;1?83V_3)HzB|gRW2;7o z>3IM%ktX7Q0AhutlLBivk6;~-BY*M;-E0q7$Jpr18YtU2N1_Yn{Xzym4;H3*l@wtRP3Zt{g@x@J+3cV zm)mZ5qar&}GSB!*mo`|6_Z-9ini=*#dLQJ*h->FMouyGu5UqMHm=uG#075z2j% zwpNg*E;8XfElAkNDHT#QKz6*}Agt5G+DZd7N(u++l?et&SoA1Cp=89Flab#uWZjnz z77QL;B^aQyZh=EZJUF=E5jd<0px?mMV<|h1Q?=mXGZW|SBA*v?KZfk^52s!4uRKDI zvLrUo$*T|7gY;UMR0C!6Xfq}kW|p_Z9_ zMJ{)QHGK>Fgj1U+9H4oifXo7g6b;Z|T<75aYcN2=P{G_=pfE~2b?YmCKUrKR7z|zy z4rqXW@*W*rhYm}Uga9}q$(&Z!|GF))+Nx*rrL_+=*KztaKVcNdt1PyZ9b7A{Nj-mIpH2jMVh87`KIl{Fr#*3Ph5Lg9d2S?$ z1_RU?43M+ePi(FT8!yU5gXh5jk)T61=%5%KX6``%l8Q3RGAAFpW8ch}{atNnlj&4* zcl1!&EIYQd{zxmD)z59k_C|fkgCAuZZq2Zv1{1x>V6M7>Y!Fkf`~hj?BnYSukY=EO zWP(D121pUEk4zH`5M3}pUVs7GM;-JrA@_P3y5uK2sR?U(1s(Y10XU?t(9bD7v_+4Z zN{E2*o6)-Iqi%$eH<~wwdCs@eTaqT(;YRNc>>4lG?o66o8)>(Ho-Gx1O6j_2s0j%e zx6$foh=J&vuCS)ouum)o3G(!-u5g|(NrFBW^;1ULh*KJ13sK-T7@)VW3I@nj15hY` z{Psqi?EsmlJ{TN~4j6#}at+Q?Mg|TuK^TBD{`VUE{3*&^!9ZGF;~)}OjN~{*(o_!& z+hZi1FcP^clJgh|7*L=ktWV?qJq+YN2C@(X5kG}M0x=MG4kOXRNVcjXL5~@XKwb_{ z6D@`0>V?T=j>(mcZ<~S1B@a?0mm;^sm|M*-x2!N%@I2w9piuEOn_z(C;QA&e7r_Jh zm|VS7$>jpc#bR=$paVQ6mjOCxiw?`NI>dLNT9$7S%AKPISWfM*yz;T$eX;zii=f_* zVZArOdLN7Ru8Z|N2NV)CKo{Y<6Ydj~{{_o^sGy&7Bm=SD-=V>)SnqSuAv8d?gF-5K zfc1V5TJQqwXg9ILeZ>}h1zYgp-?iX4Y{AvaeQx?7@*P`h7Pi=}sum0eXcVr~aQ`{V zuf!I7A6xJOY{8AQpaq9x3&s}z8%89lTJVY3`J56&#v-Ux18R|02s%d*6^mX2C?)6s zD&9j>eDfYqvG5_FTCYcda>0m-I*5v>pS(d-^hH!;AFkw-uuGS`1O@dF7KbA$&Oub1 zlK~DicnPTJfev;?R1CrZR5<^i6BX`1W-5OVQQ`g#Q_;nUG%&*ZaAoxQJ-C5FBr5hu zR1V^kEI^`Ch(yI7iHa#c3Emh*{e=6BZ(@LNG7XtZI5L$D_$0mXNth=M21X77t7=gW_bf7Dvdw^>5o-`1}KZ` z8myAHSS6u&eh^m4c_$z$iv*&=H6bdd(gc}G8}u*$J!YaH0`X0#`xV#+RDGd6_K75H zTw?4arP#Q(VB@-vjYrkE^puT>OBh9e>s(;qx6aibJC_(cS0E~|00U$&Hm(dbxEdSR z0(7VYoF|)Ajq5Hpu1|m^$Kz(ka7w0P1HGPxAegiQupsYLU@{fJ#0SA-2!hE}#0D0z zB3GeMp^r$w08y>8rvgz~fb#ly-bKBf6POQElqE2g(Fi8$=s+2Qi6uJd1qLYd8Hvh$ z#CHkcI}#0=hxq;t;Fp>syodP%Q7J~E(jD=i3_yHGf+ZPERbwO!D%hclBo`u)U?fY>0Y{9a89J!qhztXPi0hXF z3&y9~jy&K4K4l#8gmaKv9L@EEAAKyR;&t%BgkrsWV)@<2dOwWy z{vONK4$BwM(|w?i2>Q9}w)8muKxhbxe$W9A%tZwQvEF-Ny}v?(JF(t9u-;V-DFq#7 zuwhBEp@BWYhPDBTN}-}da>@^jC!j^rE!ct$O5nrdCTziGEeiouAA&5)=}UIL&0NjqdJXn^D)Dvm`|)OiTZ<|`7Fb%=^4h>8sm z6<;Ayd4;Gr3LV7n2Z-kHi3(TuU#a{lqQd>vuT<%lBxrzq#V2WvPhyHhr5TV@YUvGMsj&DYJMl?skf;>B z0Or&HpTt7Y&+Q_rFNGxIA<2BOOmvV3sJ>D;j8!rRt7K>?R7nU{Ngigo>KheZBq|G4 zRRa2H7_M6=_s35dPgGy2OhpB|u}mb+P$oC=E0sX3lKMzg4AH@z=&)o81n{qaALgiG zOvPkhYmNyKnRc|0*K<24!n{uux~`)roIeNz%xfuBipQ zo5M%rNoR9e7}=yojc=W<80XWTxaqw2GEZqndoN3ET;iC_dg~g7Cpp#^b(^*HLZjyA z_*46rk?8gH$h`HlOlMDP!BH3*^@aU%a~qQud5hPY{Yv^|MJBtjQhHNkUmBmg;lmQU z(;ryd@}0+fCfBE@d^MV`?*5I9eXd=S+~Np784Sph?u84xC zv_um&j*x|jJBXIWXiZc6E=0$9-5G7)CNdZb)kKyNxj(;Q#rx#x+YfmTn%+R>AK7)r zV8epsQ@$xKuRZF^?3)H}_P(bs>)ItDV&1ccvU?ql#04E_EjvGIUaHQp>hqt^?szuW z!I|PP4GJ@rJjvSU!WFYHbg41*QWLJ2BXid(?>L)p2v?pWmk_%T% zec_5_T5!eWF<3XWU)9*iHBb-wjeVh>nRLhYE7&@3R%w&iCH{d@xR1d zLz2@s+Cq|(bvuP5=Qm;w?uitVoSgwlPS-<{6Britg$Z>YiEFs7FO2~0`IpjF+ z0y$2CAjkPlr9zIghd@6kx>69FsD3C?qs~pT*)cJfLY2R12Ktf*zuj^FdQvfuvy@nvt@NEuygguZjqPO?W-sOP zPxjv&vvy_?J9VD_)^E3e@K2;u7RN1q%fjrYOx{!0@M=QYL3ZaBJ88{^hwT1w=M#%g z9%Y$lYr}LKKXll$&TjOJ%ESCF=UKBJ-^=89W{F5IXu~c(Oc?GYXLXuwJvqti9xsmH z()^HhK07M))#UcWv-k|%{6?qdUS#(yzI5x+(Jj3HH~Q)PlB2t1Qk|VqwrYzQq zRYoFZJ3bKt;h$U=0%2j6(`9zn5FrqD=TspOc3@$;G7#3&a1`YfktARD5F+72O@&DK z;O;^sJWP{1!C&(hBH^Fj79wFg>IjkW&2@!H_$S4RNM1+qz2!n6yjyP}5Z<#R+>~dZ z6%#*(l<_!XiHDI(-+6h~08W`)Jj`LpWs4xWcs)oiwhEGqhoMfHtesp)E`IiQA-VW0 zMN`&NM z?Y|4j#kx%rl8bc*0}k$;-iBX1kj*kMeTj#84edx%sBUT?%xC-R5zRd zS*UIv21a~fM?iH`F$d+%W*xYrEN?#Jpith)*N+P2%|00+lsA7+DDST$5_2 zv$~-|b@TGRLUpq#Uxe!BKlQ$O(BWYr`#NF5%B#I{`822F3A+wVU_XFx<)Fh#wuKZ}MHgfhg~%$p$9$ZvXq0b!fJDH! zsdv05l86mqKqCB+M0g>IxU&{W#4rFjvJU``_C^F{9;hvxU9pAY*d`H-aq1=N&3b4W zw>1#Kn5T}S263*XhrWNcbf~|5c5Dl?)}qeM4AKu)e{_I3QB6h8374-w4y`X*wz6K= zp(7fJW?t@~{v?b=u9L2b)W{t&f!*#NgKYmZ>Nqd(iCr({@xZb$p;~wT zB_At#SJkI$Jz2EaOx$)>@%f~Zg{7gar|Lc_3}^BrA3gzvfZQQiz?SfkMFesdsE&xL{rq3KYT#E|}MY3ugDM z6fT(eM+PB924RN`LU!vxN<<#_ip8l+Jd75xAQO>7z!ay}q%Bg2Tu5H@=>+EEka6fn%Gif@9Pf{6!CAueh*ms64o7zg!%pNynp zAd-r#>p&{*9RgCpNaqTq0=o`-4Z02w2A;5i&~^B{X}~GIL)YQMq3f`%&4sSRzk{yB z_8)zMQ??!c0~kndsQ0eV8TCf0DNQ(V=R3>X0hR$%opSkHWEr-|GV+mSgb4$CGWk4Y z8IrBQGW`93W%xb?mT`RzungY;oPY#;XJi>gf&w|)0a=CtvJ7uz8E*gx*gGngQ3Wi+ zb|tWkm0%z_W8J9RO~W3khBfgpyy+fq_8nLT40p=ngE_GP&V12hU>VE}U>PtTO2IPR zk!4H+mf>G6fHUt|I#OU6g+9PCHk%>K=nX8RNSGdVf))7z%a8yz^V2#2%a8*$vs{<& z0ywk#FC)va1D3%Q3kH&1##?Th6ZxFQsZA_QaZ<31YG4^KQt_5D3S58UdZzGX_}36ANG&vYx;)U{2M4v5Z+6z%t^HWd!R3%LxAl zEFZ_(XvDkv?1Y{X7{i!v*iYy}$S%&xTScVa@i~?xE^x_&|86&Yn zu0fU&iY#L`vJ7=QPB{el3#$z+n3o{S*z`M=VTUZk8(BtwWEmmY<3qx7PBz)FMumz5 z<~}J{#us22Fc+#ZX`%Y=Vhga0wFdaz1q_y|Wb<|b%P>{3jF`m{3U;v@zp9ww1uO%` zR=wk$kYxnT0G8o~EW;65Mqm`Mj4{YE_9Dx$LXefV);ltK_*Hh6{>SCZpPLc6Q77+$ z$YOTnqs8*O-Cy#>3#L!BEs%3;|A^K3@^>8jqWj^eZ6_JaHl}Njoc&-a50j>@^Gj~I zJ}#+Wz@D zF&^sz`_l^dg7iI&t;zi1_20v7azBcskIKMd#)$*pl&8&(aVA<;3V%BKeOjp0qLbhRi+e_CJzdPiYFU=~-)*$ni zzwX1mKKPzziugC9220u2>x=zjo|h#zd+irqY8}VJtf?aQTJxh`D^i-$3u+ti zkJooPx2|g??|DFJU_I~g@%U$p3*Y$$x6@+RzOP4K2fT3&?V?R#))hlizg_T`bv(;| z)+@L=h!i=5dDnB_SHiQ3VOgAFT2?E1AZ^;YHhva3P0N(3rfVghHy9jwdj^|!`uypp z>`r#}{2m`FQ(v%Watz&O78USDel3sr+Gex6`)psZ?DQ-CL809j(!I!`(RM$*XK(XZ z7%}yPw>hD$r#trxyHB%l>H3NSevL*%#VeabeBuR8X@KJ|F6c9A@{B}YD`D-N8{uDg zl1;MZ1Vh~_YFV1N)8>`eVx{IH5`&v#jf4$Z#?0y7KI^N zT0}KeD{*z;B**Y&?55p!%lnQyz($fGcfZNg`S7!m^DkXr#ed_zU%z+j*CaMqEBJfw zgj@WE{^iHz!>>3bj5@ld>$}@HYw85w`t5vaz4&O>EjvLXjmc&m)ZDfgzCFo%Mkx(! z=Cxffw0Jk>EdTiWh_`L`yYZ&U+r`3ctIK@m2JJJGuf1aD>R%YBV|pgpsLCLw z=fL+or<{(pw=j!Me6cO9e#U7Yre|qT)ikZds9Ptui>aE);-e_M|;=WwFoJ=2e*fMizl|r)VjXS~XQG5(4Ns z6az?u0KV-~2EhGS1T8TF-PsU<&+j5g6hj1|#Sp=~qe28cvqEVgp3m9`0h}^{0OE6h z4&Vv~Fcky1rwo8oL;%yWa(KBrL;%yY6cOO?D$g~LL_OmniTuToL>A{EiNrOKLphj3 zEu0{Sq?ki6bLs@2SO7UxjyV+A&}}=XOrpfKkVO9Om_#X%M2S5hiD2s0Wj+Iw=z$(2 zkpU(VOwGztJQ?H=Ox4n$swrE{Oeka93@GCp^Pr5SIfmR%bsURzJQ?e_KGrczpL)WZ zU>$eHI)0=Bb?iAG%6R`~DC0|qpp2Vh8N;xv54<6k@k%V?X+!%$9mBY+Ds~RmF^tpF zBC7FPOft2QH6 zJwdEWLac(pQaL=2Sk(`)>L6lO;uIwoMzTo=Rq7r9RXGS%FxjerZ;epZ6QGI{?Ln-9 z(O4C%Zz*6EjL!0=s!>}1Azk>BY++hm(}jP}7CfiNd|K(-&aN}wc#~7W4xZtSzrY)> zz#E6*Q#pLeMqmfMVjLqCv!N{ULScke0lyG0)C{q|XBWVJ7=cy61|assh%9fa8kvR(5WIe3sblZQ}`}oe_|n|@IXxA z-Z6Ts6#naAhQiFNRx}Y46lPkPQq_zs&m?FG?e0QLxPvX>Fe1A;BKtP%2>#d+YPJEg z!zigIyw%Sgf#bXBVN19KErE%FmXM4sp*^+)m}T{W*TI&+VM_=`WQSQ;RqWE=bp+My ztVCaE%CXy_DQ|LAHf1iI54;9l+2;pzxUv z41q1PX?}HG)8#91*I}8BkTBWgX z5!wx&E4c}G(V+;*2}GE~pHW31%%ytI%VE$J>v;~dXwT2k9)1#%C;;ijKm7I$_#q6$ z%3{r)LK4BiECsaVJgP(|sN>RWP{(~044qR_^bjoMZ@xg$w_+i$`duNz7^x>bgM~a5 zq5XPmr0D&jj%&jWmgAuDZQv|Dy2v#uo zs)F@L@Q1-z-c&Vrs*b^*`0F$JO*c3$- zaLVrr^wgmWEL4gq6e`GU{;nX zRn5-&m;ZrjOOkq{7`oBdq;bQ?^JfNbT1MPrzHIc>Zg2HvYh`YDJQ+Usb>y$k(d7T% zR$dGD`)l#*)HS{L#bv}Z$811q+z!GVJrwr&;y$unzvs&cCXP&ZGyPE6X)8(Jcx9>M zYzh5~@``ajf3JaB^mBhXf?5pJqMxh#E3MU`oS^VqmyJ2n*8RmG?&XS2WLc{SJ+(pG zNxAj1ufH7GNb;|Kmg~KqNoTLBwyc*FOaJvi9m{inw}D#pbN_h6KRd)J-k^@JR4~wf z+*f+#?rid)_dAng2Fu8jntW!8R|FY4EVxZ%^Lg~a)@3u?Y<{6!->+(&-Co4e|MWl| z%X5G5sE!TPqMxh%ivCuIIK>gYQ z;U(sJPP>X)9mN%RHQg-*< zp?+k}+?zME!$WA%?JfJh=0{PBg0$l2AGcHY`*BNLm+YXecX&*kdSCojTopR{NflNnOfwf46~J^mG4s#6LU4)%}(JYykA9bx8~wm@$TK>N0W0XAK{E zC*0}wT;fhUb4$M8{-w)QQr2mA`ILlJBz3Q3)|k`V$iF>M$MW1CJgQ>@wdm*m@rZwR zh*P{l9YtJmq@hvc1T%8Cr-iJ_YaYq(9OyB--D=Y8!;-3wWE+_jysJmq>OEwA$CDNN z`8XoTbAR?g9m{in@TiUr)S{oO{fhophsd9Mv!-JQdAGhdmDH@X>bI+SJ=qiUNb1|E z_5VFTULB|Scbp^Nm*qux#Sqcj>V;mVapc7hZqcLW31n{^$2 z{t9)yv0ai$uhD|Uk!4*DNdKqu+}~}W7X92`j-VC;wdm*S{!0I2hqMO_{#yU*jZNf0 zjBk=}x5mo#|M?{s)L(ANyb7T=Ta;(pZa7Rs@)y|`MxCb{Zgl^YdEx|Z_=YRw-yS51 zEoQdee&7PBV|niHHc*Rx?jMi%XNS1DztW!#DD=m+8yUliR?-Qr;@TvGAJy6H;+#fuuV*|D5=l=1Ce|Csdyg^+= ztZD6@{ib;kvDDww;@r~Nbim@ZD;$rlA=f6>UtZiWiOlFR&FksQ!i?*2GLmUr`dyy?JE@~YDc zyOBDlNawJORX*QNkbT}K<|lZ@lmA4X`?Ck?Sf2aK5&SiWxVpd6KjjejyEofW;qLM@ z@(l5tPp6n3IY*YIJs$bN;~bg%NF(Q+|0z1ATR6G&a#Ao^q{+0e!dG7BvP>X)< zFGo;|fm-x)b$_L`IW%Q)#fpvka#FZ)oaP&D1|^cLw+ZJQE)umj@&0VzUx`t}6OAqA z@1ru(Zl>A&6#CPFI+o}DZUeRG=l=1Ce|9KH@dkB#rAh<8``Su!!PkmwekRSv+CoDf z=51atnP|udmksub_R)}E+xmo>&2n!&Hd}iS6MWhxT2(i$Z9HlWbo-?HLtSgE@F;jmlDM{PCaO-&j-6|L5go2|6H$>}z!C*L}7*@#oI z_2f;ynmw5u^WPt+V|nfm9@VjdTJ&@Oc*H+D#3|mOE+U@md8#D3m%9A6y7>#TRb5^k zTz>NxEe&~);sf%Jcn!H|ahnlic6%t z`db~c{u6I@qwxkk3mrB2^%y^@_~$a8$qvUKmoW1GJATZm!igGoXKvQRA9UWCyD`Ul zGV;w-cTvkpjQq~58NDVAWaOjY?OLeq|AUIgYn=GhsD{>&Jok4Ss6{{bk4OBoLtNco z>CXoKMgK9+61CxbYv`^?K`(2r*U<2D;clF6Tn*jgG%QO$`#ZIyF=jgxs_BE2zSU_j zs_6gjKpo3-fAFY|4b-Ba`^O{x*&$Bx26Y*+rdIi!NUnyOJxFl8Hmrs^J*xNSu=G2n zQ{vX_UsFx%TW=C2CsonrtGcL9jI5&EpFL2=^4uRhs$&DS=vQ1-ZCCWSI>ag7;LjR} zUwZq4dGHVFyinI;!R;C<+UwQuZO(UE>hR&}*H6`SSWLu1_DD62t66gFd;9P7zmwRrCkFsn^PA zzM`k6O#U*e-mV|CiGlHILmNi^Kb7bHZUeRG=l*g8wHT;HzXJd3`bz&(hi3n>XVeZZ zMNMwAWzxP}Up4v4Pb*SuI;zPRHeNyP^wi`j{Thr;3Q&`0H=KNXm72O-I8ev(+}~}W z7X92m9`Vl(af&yn<177F0~4RrNY*E+%M0~2)Yi{cm!~H$eBU@oU4CG}k zZmluDL|q;)@7b^FriQ%M19dFV{lTL;Hc*Rx?jMi%XNTMsZ}1l*{@vGm<8S+#aT8=8 z?Ax7lqNx{lXENTtlsGZNbG`=A$6IZCT$mI>)4uB-n-?EMZ-wtGIy2Fonm6k;cG@W? zTK|xFhXW&h>1=jGY;sW$-EPr9>tkRDoi}mJ%wTB{-SsZ9!Jc#GTH%c*2lYi8MZoWbESnriu5>t12;f{8KZkQ8Y`SgO->@Pmls#QOoGjTz* zcc`Xqf_ezOyQLj_&?Sg^4cKz@T?cpCXhB5$%t=nPTg+%9ziu957xixuG|h=dz8qeE z;#(hD`1;g*!x)8sV=wn*KTVPkvXQtM5zY zh7R+ep9-R7BVKg%Z4*NEWBfCHwHu$XZ{V&d5QvP1W~PSp4X7wd4vOF_V%DYv(kwY$?hN=#Dj zIsMY=niK61de-FqQ7_syrg@fP+vt*^W9&jP`89m}vmIxhXs;f>RCe0sMfn&<<@UNJ zI;VRFi^=}^<=vVdb)sj6ZA&&>=S6$OOjmBFRM$7!J4{U0Mfj)RIpRe7d!De7FY}@U zVsbIuk*o=mjWNY4Q6VV$#;$(Sg1@BMj zfjg3BRq-Bl;F*0*?#~!aHy^yQdtcsgy3l;1=2Hzz(x^qZ4r_D$$o9#8-dlze8gxNz z+QeNRblSqE2X;&t&C!ut)->?FH=NG4%Q+riVL=+boNcF`=|>XVx~+@rLujFM+3VRG zJgE2Oz=?SyMpM0z_NgYa;WSH@{Ho<^3-VGL+f97dkF=bW(IeZC(5$@yeTtWP&{Ioy zIi>d>O)ZyBZ@x2OI9>dv{BZiIP2aM+D=Y{dbb6omOh58s@6@U33ImB|-_ovf9(22xf3lHyG@W_> zkg9Q~`_ESVxyBn| z$v0o9O(6byWi!qfTG3(araa_rooHZUX}TjPqQ~!b{OG0l>sp)KHeZPL^d{BQA9yw^ z4-iq~2dpS|HFLdqV*&-U*sMmsa4Q)uGoz>+eM{hFW$g%}%-vpAx z_uTrS*;e%Wa-UPWCQj74VyN-;g(9k9A302ZyAN5odc}Q1eQ(ma#eyN1?*@`Nw?kK_ zDdeLYwc7pE!ih#$RZd#EUqqdJgX71{=|eUJabH^;tMDSlxAX4B@_}SO^WvNCYb!;O z7)qmD6lFd#DlPJZh`Lodc6+1NhurTlWJISsUSvd%d523A1BrwAl(M8!h5Ve7!;8l{ z(HBpBx>0>c8hUGdK)qkBN$wQO@6b9(i(razjBA1;q_q7S)) zrrXm!9cg@E;hEBKYtnu4;zYX0iyYGaob9qEki?mJ_wSiwP2XHO;sUwovkPA;5U#UzT zMbz8Hs$0wtAWI@6u3wjqBR$IWz0I0*poqffwIOTYcF&e2k0LYU z+uyl5J%G#}p&n+saU3a$d9bJGyD{B+a*OVCg}nOPS$$<2ZOCfr)}+aYMsZ|R&DoXe z;{wQ!;4!qt+;QYmFT9|jCqrY6(rYYt+)7LFs^!@hsA?d?XkFB;JMb0_CH?Hm!KvLQdF4=1` zj!fpS_gn4Xg-#EdBK;E6nOZ;l-0y)~AM!qBuija$Q6zJ4!k(Fp0?4uj%uzL+ain1V zm{OOVF0^Iu&uu=oGon*AT3H4ua%Z3J1?%w2k;I2wU9&%L99cRfuxl09WE^>XbJ@bN zYr4@R4WG?+oMKFUdo?;#;$cmOzp=Qce}5!lqU`oMb>@ z^Tqb?+NAEJv8D^NT*sBnlyGeT|JP zrylA~zV5j&cxGEy(x%@ncP>MF3<$i(LP_c zbtfM-h}%Eca3%RQ9UZD0k0HN!n!D_a^&*w2cUtD(awFdLZ+c8iQKM^uK7P#%>rPD8 zY-l~Fz=hcA7i{QIGMX%MWsmG!=|wtj)vWS4;YJpO_Ifh3T#Y8h#&SWrX5Gn;{e9Bj z@h+r6)BH@$d!xyvxt6C>CwUR~)=earwz!d-kyDmR!`11OjvA(oFLonV_PhCJkuK!W zjq~rGA0AB-O->XHAK*pKPucsLUF1d_`>jamC(@vOjr7A7_3B3KC)achcXlB!J3bbt zhmR(AY$lf48+wtqt+*|lhD~=P`x86v)Vrub#~T?xG`ZZBctp0h)@|WJ7WD|(t3F~h zd9L$v>L|vG1V1S+nm56X*aT0LdW@(?{bzK~Gj;1q>?d76RQPZZQ7hhh%*A{(iOl|1 z(kO2Xsd{%Tbd8T2(dm-a#w@Y{jWXIT^Euarv~qXXqEiNu0&e<{PRnXMNv~kJ;~m); zlH!(CG1AVBd`jEGJdNo<`iEW%4(Vo1OpDEDnGYO54m`|WfBjcilG3ZuTH^(dME}^} zfwloA^l-rGXN8+OkhZ%s2kIJGlWAeg(o*{kAoDhD(q5e4N}Al9^00cQBPs9r?&4-& z6IwdsX!3$39SB!-dCFxoEo-u2o9S|O>j7ll-X&VLJ6y@yMt2-fj#V5mX>c`lxCz}B z@O0leuMT8!*~J>uZ&t*jp|PlO#{pzT$M5%6E^;N0HIH4M;^0V*nWp-93^1W1cD?BK zMY{v>*3_R{nQldzZ*jR)TH;J5FYfd8#&B11zN+uaz8a1kX`Xnq->P9IG_!Th=)g1W ziPzWx7EMXJ~%YfIrb1#zA4w|O4?b#dDBB%rLEHelj>0wE39s1suLdy>UhY&#Gqtvh>X2ecyD z?E8in{LHBD*l(NX9j%gV!veP_)Jfl@gZH}epQ?347q3+@TfhxE7Qy>Eow=2MY%PM_c5cIji#8! zBoCs`2304f%RTAmVxPvy*)@Viw$4`lMkGO3fQhOH~+fYU=QL z%Mnkivvl-p^ONmJ@(bgsyKS_|+XGFTmM1qN1N!D$9na`N<67R|H{#wP`hLd3hN(%O zbmI#;@T_|~vcUF-*Z%tLNa@aPwExojr1;4DX|C&g((*aFw(GtOqA#D=zi>b9Nvm&j zTQ<8pwj&n1wQpSBr%7CAyjjmoQ6o{^;w;6Bd(oJ#!v>^wb)hS|W#0-t=Slkq8IAqw zrcI*zx_WwgwISOribti}eqkdXZInefG^g(_eLLM}tqc8>9;`DY)sv329ofes_X8hy zcbKDJqzx??XVX2%)s@cAlNyPqc+zFu!-26Csbgu&@`Qt@@&f6?)cK7Ty!gOJL=Dg* z%WUY$r>{mzon7fQ|MpXjgFWd`NsEx0%VQ}ypf!kj8A$EiI$b~f?gQ_!VO`VW`8L%2 zLt}$Rwyv}~;kf4;A5Z%6gT^_h6JzNw=gOLfX9v!zSF5AL;gy?`_w$llx2?dft1__JM}3 zG-*2ptv25a7-7dfCq-nKRaAf4!Z$_R z>M_Q7aGe4gJ-3@N-lc^Lbw9YN>4q3re{;80GtmUr$9erqY!Jztn-aPAV?_J}$wfvN zODVyhiM3_*4|(-oYxd*dV&>bNJ-SGyJpKoVsps^b`n?acmBVAR1^>al|KQyzwzAKM z_u)D@l~cKFx@b<}?I&Q0DTKAz+NLh~M4;>VyDE)&a7F>t;82iw~cy*WOfDRs@ zxc`HBOJz%!tM*~Czv`OR{W|DM$@~YeJWUpP`f?xI-{@-JJE((Al;(f%sesCC#`}G^ zD8jgWVwEm(QkMV0rM{gr?4fdO+V!In^8x#4JIl;)H_{$ZOfexDKG zd7|uu(iHar!pc;4^;*=wYD~Paa=vN%R$J7U*grF+=!Ef<%zx3D@AH0;e2r1+yW_3v zytb&olNEVszZ2$Bn*T)~bE{bNwKYb##6G)y#ReDFseK9B>VyH5<$uw+>~*b)xodxBhNXu+hEi>y7#XhN9>@)|BIe4Rhp9AZH$_dmmSVD+F(qc(U|C4N1Ucm|3%mM zMESeQ8KdpRHES-swL$LkHW$%%j`)!>@h>`Y!=UP8e`6e90=*YgHaJ~rr@ZN}Beqb4 zhs?LJ{>MW(I%IyB%xOa(h!P3I=}&_ki;aFbrN&PW?=iri2~B$X&qG0G|EfPlXA^23TJnX2tR}6dpUR@AK441Qv0BoerCD zj4IK4ex5H3zbp#PCB5*&xL>C$jG7HlL!t11$AeHZ94}mJut7Z$21+kxWtoI-hiRT*Hw&S3wd+o3=zKTe63PXsNWXZs42!?C+ZTP^)(D4sS? zxcM^14--!qT6f6L z*tVWmO*hg2JBk8>SuTabtbDdxA|G-4S$?15+@^4x-WjiWq&^hiUyS`dtnY_ub629K ztqkyyfeyd3bts%FkeKRNOn`}%=};xMa7=rq8BHq>MPY`4#ybT+Je0jNDfO@c{uSvs zctt-Hu75PsX`W`xct?6MX>L+qszqH2>hefxKw@m6|W9}~Xgp;8V z*E1(wMN5F~hUaC@u!JMeTB#IXBE5#(e;)Sx{BXfIp8tKYK3-$aJhs#f1!JD~I-oqG2drXgALtaR@~B@>MBQVIJsVxmG6I z*BScT@-{qUUIV2FZx@YI2{6Bw9KU(%$xs}TF5AU%IuLn|+!5<1dEf)9PZ~ocXRwra zDM-q{1`26{>L+$2fF9K8ocb>mSNOL@IqeU`;;ViCty1#9rlM@sY%OOfV_BFij=ct> zYpx86FW&$+xebbadqZ*kv-#E5f`OP~P+7I(fCsYL8`qGGoFQ3uRw=p1`x=~$PK{kfQ&U7$JkN@7PoQJ_w#(k^0iD9t#4L(0_14GV2}LS3$5rBDojkW8poe^dEfp zsd`OiQ4mN8EKOMMjD!CFP`vcSeq6q+0Q-g*`LG!}lvWsnbrd zajWMJk=Agyp7v$=Lw5wAtoe%nODoW}OGu4Ub3~tJ-YCiYhPeNGaOLh^CwOVCm2HdR z;3z9p;Qu-TWYQydyFa#q{zem%*9RO?FXBVYRH7kX$yGdfm+l0j&3)k|55r+=HID@A z6LJI`Fv}C)o^1sSyfWv03OeFi|73}+R}E45#|FWTIZkjPcfiv47Lnl-_FlLh0gra% z4{WAb0e2@k!g{SECMw(@PPKw{ z8qyJd%MQqQU%_yXn;}MgT@<-Vc69Lw1VpfNgclz z959iQJ6B|7hj>I5+~QQ;w@aJZ)06&9@>0sEa2lm}R?;k?S>W_JD`2E=^)>pO8q@gRR< zYsz;6oTuy@BZ52pHN@=L&(!02CdTzAZbxoRlpHX9D5pH*d2hY znlBbDuiFFKf>u?3s-1zZ=(?xlxj)pICJ7yR=LP!?o%@i>=ZK*bHECBn0?b#N-JJdO)(OOe*72X7@&iRq*Ec3cUeNQ?D8z=v0h`3+=NQZW zSQ~QMJInDD44Qi^tg5twifcC@p=u`v8r#Idxd?-D)O4ARO?z7G* z+Ma?EnJT^BGCR;-8r-~1*9mT{Ce?el*~Jwv%kt4haSH@8!hjL%I?2Kb&j6`KfzJ*mGPT) zu+!FlHFb{@u>Km`ZI^l)GgtVFM+&{sXIO2`UNJvRR;()M;6DXQsk_FX2iSoYlRQ4f z;{?A|4N7D#pT>TvG`7l{-pDtz&W4Y}5BaC!(_Vi#2~Jz)+-lA2AX9DRy}_6x=*ID_ z(bYbU7sHKIH(l|@tTW^~InPmFoZ{FWp&x&eaF1J$B`ey&c1`u*jrERDSyWJoyG~M;4NeG)1%1+N?xvI7^(p_!bAjxhF{OuEZ^8nM;BV7(f_ z(YY;G_@4XXC8y;wPqvew6|S>IcF`8Bve(jAZaEUpr%_?!vOgNwq^DWhyWbn%J&BKP z&-O*<&yNSUM5w{=uC1oruWZ5o&CO*CGC|Ol^B;K2G%$Y0%4nLHH*yT`po05~liL?9)LJiT*sJ1(M@-}2BtG}Sci-fn$;|oS`~B&+&&Y`* zc_pG{-_|@i(Al)@+M#|e6!SW>tai)|&zZ^B<0$Ya{To zGgS^AMkxJBZPP;X`!}c7>?6`YEMq^O?}Hrn2Lj?pd~l(GoaA&^9NnwsS8fEz0r#$r zLuu7oSg>`{_&J{&?wsoQ;(W`8jBO%c+9f)CaNDY0$sQYV6f9%Toviz6})EUm75R&tKG#4iYyrhi=$z<#d&-g&aHE#pn-Q(djR1 z;0rAu^tt8L@;uH53-+m{a~_t&WXI}_m)8^Csw7&(A5SezP(3&{f87;lWaA!@4*8&( zC22gt*9Y%dw~uEfOJSYiv)0rr2jPoKMQ^t?QGp|MH{*j`aUA2e{}LfMQ@G*KT^k>q zt`##_I9{|DbqWUWcs-SYmG(zR7&cl&H+=MGr<*GZIs8b|5%j?WTg~qK5ri#tFVf@o z+o1PIjC{PT6LM4hrwCO;{lXfz<=r-T(I_7p_c@^-gDBe{kv5chCNeSz}D`$wZraN9>}c{ey>2U;OSrVvR5MiU>Wf zbVLJ6{Xe**(&g`)32Rh)x+3tm!V%RezyHDA^ke%^2-={a!H4RmK1Zyh?4BWXq{o*$ zM3bLh#_}pxX7Od5 zeOu-m+UbWf@rk=LgZz+7>zxRRIIb%>wD56fu^mp(O?Ipfb%#6V*VnJ)Tt-{jefvgV z`{6?wo<(5~KYUSkD2&^ekjO%tQt`1IJB%614Ab{;hyPq8IIR*cV-7!ig=86FN6FNc z%bWS(y9fExhY1Ng@T$#J>V_ST@oLj~jNHL(!2S5HfXnD^Zd%NC#}D72ZL+qCAHMDP zU!$hugBOLxq8xneuyEpy_msRl1SCeBzHW0F)i1xixJ)JtHODTMt1;q!cw;!D@qip5 zVZnS${RVa@ct-Z~JwbO!B+DGs*1e4FW@T5`I}tVB-Y>Iztsi!n#Rc*Z0uyri(8Z>` zc6c*oT7mM{4ZfYI^jo8J86|A;^F7b`;gKL%_L%lXg&!V8#cV!E?YSp?Yc;XjFUIZt znmpkKE)Dx~+zwyHJU(G=c49&(-`}JeIOL1Zc3k`J^_v*hbiOP5_S>R`l~M)ktQ!Q9 z(`0jKiP<#)XI#U#z9m`I|4J?v|a8r9pPrO=fYhLSr38~wee1))d@ zc;Z-9LdMVMJ})eKUcn7{YMkUGZ+F~faiOE&jy0^Nm-9!lM#GOIi?jU4Ls4C?Tf?`I zjFh`oGc?yLcq{MeK!uw-W^S-PAVaZ+a}Ps==a-|vW?S*omP4T!rftAsN+DxPmrigZ z=?XSoPR%JWcgHgx&Rh_04fc6=u;y13yb6@JNfQsn{|>g11+E4Xdh*-#J9$rCL0Po` z^Cv3q_?qQ1mIJ-EqNj zcJD2BYe-A68E9feL6>(6&zn`DSb4U`L*XzP2bQ<6?BKkDgLZbuMrPd5uI$L4)t1(v zK_dUWy{aGz{=3lhrm~NWQNN13yEhU(x(sf8ozVd7tWDymdgF${J5Ap{GO~tg?GLN! z;)oi%?wX0DlW`Sy8jxl}FqCz$zqlm;RerH*ZMa9IzdHPddEOd$Wi+`rU5bLjv_Nm} zQZhPB-}CA055WT*`3L`22Ox*l$C2Jc(QasT)Ocaq*&4V;g*T4aML|{1`KsC)GI|*9 z*i0G>!JDS{K2Mbf;NHXig%gk6P&xSFdx1`C5UHtIyV@ZN{@;B(|M9F^b@|e*v$w9V zzId53S*C>DI6K3+Qf|i4sP-S_$aD~Z)_q{xdkDO|ucdzum z_W~^@TvL0!X>}qKyi%3M^a~JuL{% zJ#ajkzQP2?Ih8x#i)uk!xYp0Mn*p~2?54?f1`OpqKhZwHfWyxOs#YCg!ZNUbZ+OQ9e{scIw18SropFBn za~l(4g1r^WEt#;{a`X4ErA&yrQW!;UK2!^&HABzhU)91d^>bm0HAFaUm$x`k!&@1$ zDZS=Q_$-~N^tXr!<%TC@8l`K&U!-;CMMf=niHt7^QEOqap8b#@FB1-juqi&$XF|a1 z^Y^lHm~hHukEH0XT2Q%tLfx~x7D`1%GOMry6aywyWQai`_#B&HJI_vZFSjL}xWxpAYu1tf5mGxn-1tp| z=pDh^uD|!-WPp(}&$~VD49GAJ3G*X*!HyGG*g684kavgo;?Gnj*uR{G-j56jmH6Sm zZn6f>LadddAp=x4gungWL1qxkwO>}+iIJ>gmhU1GBU#}dvw>j-1LUVS@t*or0}_3g z4Zi^c%siCu96<(5DY=FvX)_@~)=A`?BNJM!TB9c)GvL*x{>@vPYam0_^zIp11{51; zzwIhvfc?yw{ozDY2+T{~VHh!?Kxm96aF+q=l%H!<{`aZ|X1`htZQ4r=7>jb({t&E{ zY@2XjBKmPiInyd#j|th94WHK&EM%N?XZ?D#23AwXmI<}(QnPoHxC|?6w zlx-^ne5^{OVrXLx+~39WG5SCae59QD2VcK7et?&&2KH}kQk@sB0X|CLKe(l~yX)m2 zI^-{_?mn@$2IeS_{=s1@Uw&O4p~LSh%vs|ZI?Pgf{=s(lSh|$k=wRV1>FnP}he!(d zUjjyjpQBpUbfC%qon~pI!$rz50%mEYnEy4G;d_~~JGDE54jqg#`tRp-V56iHv1Ho} z&-#aV=%BHh@#Q!V9Yl(Do{iC^gLD3w=N8xKkTW^E)yI(zA1Iv}uQCuO%@#$+PSN0s z>d0$eE;?uhCLh;1Lx+}WB?W;{Hgt%$BkEdnn5Y}aIyK+4pF4%tEIyy}Q zKHJqzL-(mrYNYV4<}`xF#$>lmjR+@|92n)obWr?})O>gW;rg)9yItfq8f;#_^ldtv z3Q|EUipnww0_XgB_CG}^5~$5!-#~|JPxAzy_95sAKGzPdq(N4}Hm$X;R1kc`F}P72 z;mKI8!hs@$pk2v4Nko&p(OxIy+=if1w9}Zam<9&U@od{Hs3055+7Q%BgTL!cANChPUnU@E0(^RtaKNLl+zu5qv$im6$uHe0Jhvbrr37QXz=+?bcr&R3KC!K-OUrLp+7fD zyy$fmL~jr+-(yJw^P-QV7b9t~aIj3(z?ue8o0q12Q>hS;PWI<2cd3Sa@5FWEIaR=< z4^46@&|vq{F=?I)H25TEFjlTe1L+gP&+|Q~5afU6`{q;CKyG-nO(d`i+O7m2zq6SJ zXMX)E7CJ@)`;X5!O9W^jKX7+4O@|8acy7)HiC2TNa&l6xNfpc~NC(=lP{Hge<(-%q z4O+;aCWo_^sSx!rY}oQJ72K6=nB%D`AdaBMxU(hHX3N9>+btK zLxnb8zP$na34$1ZNY(7sut`|vajbk5JgzG(^P$qf8_)Y_$I&1riRbRSPgHoZC$@@G zTMenU)gQ)(tKg!SSKVn^6_DEul1GdxXt4U$-5^DRMYKk#<$?E9kd(fr+efR0!uK^+ zmY=IYU?{nWQCS5yUj`^AFP~5a z5(yGQMGiD5v=Mtb_a6;P1xlMluTf#>#M#bT?`n7&UY58mr3wyqM4$7#SOvcsdyGGW=zwcX9;dG;f`c<83sC!p5tLRw;RsX$2kxl?`3Gy-v{qF?{MSAQ&t58oKT^N=mZ)jCv7M3D-)guQ zQUj=E46mpcm(}`_kE)?G;rGWL`95{+FH)-Oiq3(i0mpmv)Eh>+sx;?HI%7ft@$Ck z*%P6o@57$AT{PI_dF{WRK`QW4znWbCT@CJuYfe^{Rl`K;)URBkJpCg_565dG_-v>n zzvq5V1KtMrFkw0sdXkSUHNUS0Ene*xD#_IlWYW+>W1+%Bt(0FP#7LL&W9P`|bsE%i zq!o+aCPu)V^yZn;YEX1BHOzObhFJf)S$a=3h(u547BAAknk%j0u^ll0O~{3Ec~c3i z=?Q%}wi*UMGcG((uZE^#^5$<>pI5`F2bt*O6><)>hxN`qu-jn8$06V)bl^Bm6A zaBMwGIGaQ@G!!bYZM#_wAwnnXZHR7GTcl^FD@B9-7L5EgN2qW;_3YkKJ))*(L1>n{ z8k7#cAO3Tt8opS%EzXwGpw(9WxwsGwq~^vVEcQ}iJJ~unlV#!0nvrcrFn|UoPNK7(&`z68#h-o-J?$74_4g|5zFNfZqBc}6>b^GJq(t&LL z?fY|IOFA6pYLB@66(MAAsIOLp5)QoLx-z_ad9LH9b8Qn_9X451DHSDaW#m@5EMS7K}^H0 zriVYSGesC3ODdb&M2DYg&13apbV#aFSm(h{%#!WRh2rN3DX;0%%bucvv4HVfx-2o9 zMtP}zB{;WNv}?HJB2n`_J11yFIKX72)>@jF{|Ar091d(Dge2H9K#}N9z2TpiCVLSg z4|+e(x1s}op;X@15rkJwSEJMU5qe@ZA6J*rAeeXQbO}+=!m<0Y%LJzy*h`}&2t|gw zLo1H;BSMn|J(R(PV7X=AX=VWpOdFNH#SGD)qBea~bvc4<0jF6h`FjnRKl?SigIojq zV%+Sjl?mnV*8HbGPy-?R+#P4H)`06LKJpex1~_G``YQUh2E;6`PUi>Kz|Xg@1@w>A zKyc*3miXf}AhTWL)0NN~*x{!V8YIR5v7O!v4TR=w{V@Ia4X+ybQ?RC-bg%{z>Jn8b zhiX82&HgH~xo-`WM)}V#Z(+a{ch%_JrW&Zb^P{}Ux&~a{JblzIS_6*`YA`r<)qwtH zr57;{HBeqLKuP6bz=YO7|G~-{xNz%^!Y3Uf-4oWx^H8~sqZf+|< z`JNH=*67$8*k#%*?nV%DN|4paXOIq|zZhc`Lv;94_gwn1WDOh$ujKw#R|64e52wo! z6`Gbe>MY=`f!m3$BhxKJlhju3b7>{MUtRcWg}(;W6op);p4LEAAz7AT;Yeub$#0&? z|E0r-iC9B3A>}$No{eG*Itbk6IJ}mFXqwCC_nynIfui4TlzuCsAS2J2^#sAr#WBWr ztB4D>w!GKvApTWuM8>hQ)WDY8obo^K*8scOl$&oG10MLwsdJ_?fH$}z+nTT^E$coE zPvkRTHQA~Atr?XO1Eu)!hpBV_vRUagyHEnQ*lz!g)p0(){FsGnI9*z2}^p;-+tODhcG;KHFwbexH90YPKiiH7nWgaKi z?9eADe?4Lxmq-|vwoK9WgolgsYi0g>6~Yw%Rv`-_4Kx%@Jioe?0jhDC z&4-g1An?I$$1cM0^#6PID8G0}^(<-?FL+4>94wI))|*Md&O)xp5h96kY)a|oXIb^{ z7kq2ClTeJ)HkTS!N{c8-=Ks$0{jWRISJASIq`}5QrU*!pWLS7B)TKx}SXr7X9QKfq zg+;hRav!N;HH&yfpA4yS4J*;kkB*SevdmFdj*xb+*i%I0iCjl1y7EM*fpSrvBqhk^ zr_$WuJ`1LYilw!bCV+MAq=MhIAJ9rEmM2-VWKh;A5Q!gAq!maST(Pt9+mDIO!CguS zfpWbHab(Yn{sng^`3fXG7Ej8Y0!f}lha#;=0v1!sMdFW3h(q5@T5KFDDdmc!9nOXu ztWA&ZnS-Ymlb3Az$Kad(=*%XMACSh>?=!wQ2{#luNS2E;AS-?E$huc^VBG)V{C9yl z5Epy3cE9*IIP-IT?_vA^emTit7L7^JKa_iSkI5{YTIq-2l36f2y~WR#aQie+l#i06 zMAf@BwyYVN0f!{-K#KD?7^N|T{y6-C^*V&E`uHcY#VLwNX0Z9$HWxjYDY*H!bUV{- z3bt>&u!+xj3MN*cDCjvk1sQ)_6sHbPLhYGsW_HCS9KUZe{>pU`i_|}{Gg7l)Sg}82 zZ0#&KQ7#-K7#2;*Iz~$1I$B>A_NHwLG$}`p6HK+BL>(uQSq@aJQ6l-V@O^19&TSu` zgtqJ+;rh8rNTA$MCT-!1ztX$9v2+S9*=*Za&o>R7lyYT)fH~szX`;o0DQkdK!ct5r z2cpIEisOgH>n4D$OYaY=OcD-Dxmj_KDOgXLBcfS$Q$$oqj$FP6lo``Uh^kX=sE{oA z7=qz*XXR&r?TvXMU(PfHP*zk3>Vhb`sw4_Q`iLq?L(pjUd%NSuNoe_`TqfB(3F!yx zn&O$0gs({b1o7o-l#3@wcUcxG>(q#lC*_D5$(ZE^<%Sx`k>wI)PK}_{oFaLWh9pQCT+9p8&I_Nia3SXSb>+A($d?iV#&7itj1n zOP!RyQ=}~{G|Jp5;)9wLNp(^*%W6uoI>CVf$^{LQ9$&BeY}cCQNl2@nPfCiOC1jJP zK}ukmr0AX|a>`L0PLm1YB64^}oPS*wUm&Asr9rSFoFt zqT#_iTN~pTavx~D<{!WGts5)`7xk^H2LKQMXM2+HQ7f<05m`FgkJLkc8LuBr;MYBp zwofuv;MlWo>x-`c#+j0atX`3R_3zHjSCY*{7hAJY|OD{{~RJNsFSC^AL5{-HaRAkOBv)x9q+k z{t+HDfBFPzA8>t}=D_Yp&p~r5@8M>r7vOU%U4e8o7geNpRR7*j=(id74zJW!!c?lz z!7YS?F7lgK(GkZ2yn~zLq<__7X4%5=G_4_2^dWIKjuNaN^ot!geT^&COkJ`j*%k%@ zOODWYC88kbDb1wi6wtmewb_SQkB!~-e(Sin6Nc5~Z>#uhfG)0Gzw4*{(e`M@t$D#m z=(!;YpS2BRN>M{EE#@N%wdQk#>>on@ihwV{E*)rTQWY|;HwOtERu-*SX0XA0)l%KL z252C)UR%{z1_s*e$c>4EeW>R?T_);UhZADQGymM{!a{TNNkJnfj-Gb){1nmy>F3p5 z(bsg;EU{&hH7J2%d@O`|G#1QnSPh|Y486k1YF7lKffnTE^mB^Y{(r0`CP4ph^`*0CiTT6_-EV^KvY;~l&> z7I7a9HzRkdiXCdnk}~HfbisO=XV+_5I8c8GhH7syCWEWg`WFeK?)d28`)}8ml`v>Q zBl^?femvhfJo-xVGyII!IpZwcg*jC*Q_Dx*L-HR90h7X3Bvs2t(g=OHg0ElPrV8wJvQl$a6JH9Bh7`%(@bQTlne>ZFWI}^lze>nc9fC52(hgP^hrD9~KirDs;9GDt9!Esh814mEn7Cmsz8IA=N z2WDXm1eo0}rJHGBP0ry}=XJe7FS8~=d$|<_vu7$^z3PV-CxzqWC%e!%Qn=4i{4?;F zajcdZ?7(8S(U5tcT5#1lBxXylyNUA$gjyY5d4u%Kj{J0?9K6nBUH(#q0{-kgm#aHU zL3Dm?O4gGW%zu)c&uV!VC+4S~wXr4;hr2nF_Z3m$mmzJ#m!CCg=aH?c+F45MYPggX z!+4A{8ePS2ic%mmkI`lpoQ1lq4uPX)*FdyAb0Ged1=>+xh?Bo7`+zKO^vdG`Z|n;> zDx6Jj0~4?NTKU!eI2CYv-EG!x$hN%}+#=bBEz$kr+Wzg(8Fr}LkC-DAtduew!glFZt3*0)VzJ1L9&zI& z5EF4Hx2!d^{fpcB3O7pSq_FoYTk!VO1se?f$OFKl?Z(fjbdc&t>h^ zTJM76)$B=QJJn%FN!dnYU4PVl+Pg5WA%m=6jxiK@^8h@KW)>#)gRYP8(GTYRn3~XA z6XW**iVwQxRE4w>M;^EJ#}P}=u0ZxL2cE0D{wAtHsGhh}}QM}9xJ4S_e$oc`!k zhR5s^iyLng1KV-sOI86zD7ui*Av~T1Lg(gJkw%Ph;#qT`ms2FFkqc%S^Iy$TqcSCW zqDUQA$W6ts(uv2F4kz4oQ13#mjZs3$IV0d`QT%bqgNa4UB@vwbZOC{b%}AT?A&#kM z+B-;hBa?aZ?mluO*rdps7XPJy(~Fa`M@JrjKtsfT+2Nj`Wz{#oy3Ys0A13*a>JFm4 z%_Hu(x=%osEgSF5AZG&UAn3Jytil(}_p$4Ps-R$Shv`AZa(tvmH7u4+1Z`%O^8QdS z{23eej#ud#SZT@Io$m_AoZl0F)>fUtDze+}XBz%UT!&C@Khq7pHN@z&th+ z-eC4EmAI6hb*Q@`Ok2@v5Jnb{eOr3hNyZF)*TTBpuc19E=(}3)YpmRG^AkC_73^(u zg6jdHt0*G)NLUl_c&^;g(P#s!nUh=|sU5J&?ACGj;4%CeaaCdQav!MubjVl;e}@|C z9z0*SwSh1WmWK;gqN`6^#)xGBUOL`hYf8^1oPi@hKr}e*c3&!ZX)G(LMe*)!p_$w!I+M|GW^v8gccksZL%-uYzJMp)C!Ntw) zAF+X3J8f~*Ach4;`>OT!VD*r7uhQHc7*sSG;M-|b**F@u@}AiDQJ4GvMrsM9*FK*5 zwbF;{gk(3q`p|?1FC%Z*k#F|k^(Zr%<8da^WvuS(aQ+0_Jj?Ud8{42$t)SdyVi^?< z(>ZNfSyYODt>~st|3E#fq2EPzuRu`N?b_|aa9mCrto-oqK3uYxKKi5~2`f%0&2+aA zw`Z*Ra$eQY2_!c9uZ=re028wFkL^eMa0?}1v5)ZyH_jY5LB5vO4vpuJejhtXu~3>G}ZS3UYu01X-byJMpBa7dj;=6H)YY+~PWvePyR!*BHpKRp%?hxaJz ze6c9NC-P3hJAT(f_PTGI1MIsYQglz^u07xI&~+;%5uH}Jd&tyWo>)5FEq^dlq_G|S zD|^e@RXf3@=s*$ptcO-&Fuo(>l-rSBU* z(hnh9qPBQjOTy6P`yw2wQPFPtpMlku!7}7&?eb76$HboUh|ka_jZ{9ftiF zM0aG`Fh7H5w37LUOVI?th=uxn~|}Xmq(6R!F+knI9p_;0MEpU&WqaK z!%R*yivngYtQnpCu~y_I8n$bvUAtNgX5x&ZOJ#L<|HAL*$C`=>pY8i_?yb47$RgJz zHXjM8mveq4t=B4xA>R!&x(jfn2R zG?~cE)o*J-q4iQA-`h-V7cZZexEle*ws(c^wPcZTc~9=oGSe7X=G1=`KKcY0Cfixp zt$T}UEpsuiZ+L+cwalNYpMZS&gSDC)sv%|4%W(M%9esy%OvSzz!!PoGn-@(U;q~U9 zk2Eqxzqs8&%jn`+lkef+x%F{f-w$K7thfJhHv2Nz-_PSBnNks?o(x_hGxfz@yg8 zc=StRQ#;Rf2kyJ7f8Oy(7w^cNo~9a=5htp1s}t|N1^0^;DjVve@M51#$#F=8qSNOX z{HI@{ZnA~+aS{@z);LAG$eNEZpg~|leBT3jt<=rjax4IJr2m`Dvw^uL&$33G{ujB?Aupa8#S{=?9LF?}Jz*`mxn|^w|Br z5Ad39C9$pK9rD;*qAUHahb)D@dtBSfaY4*`?HSYCF#M8Vo2)&Y2WzC-)58?AaidGm z=fwI-u>4wBvh?mLdYgT--e+466OsxxODScT_VJ=gYIiQ4%2(esWj_TU?DD8tX$8i&4L3HH&~nXtx^8wVR`rIC~P-5YG})%h1V}hr?+rf zVPN3pH9CifU!gMPsyRuDto2LRf+yhvO7MKRzV3b_$OaEpwRNOJ$u9pLndBTOlpmP6 z%XJxjHm+(M-xF z4jwxH64?Zh9Osn=ayNN<)z@0%XVQ;DPwKapA5QFbWzTX@ZA{&DhGHMFToaavw$fz@4-PfAE_5LV2=&Z*b|%S9@7 zX^U#3I7~Xw;%U(Z+cUN7gdf)9MT2)IMN^tUJgkW$zM&jvQ(L1?3>2V-=)v}DB*F+7 z#9M=0hA94JD9PqPFtH10j6}G_OrDzmed0L)hP=gWvDWk0(&84EHa7)ndybCf$kA{U zWuFBx_jgJ7FeSy9@T+OHec+8=j5NA*gKpK2;{)==({U?6&6UYh68wv6>~H3O6Zj6o zt0Ht+w$z}Xmx|1HM~AWPd52J_QBP86HUQ20Tc>jUy| zCK`1feFG5`^9w}vEVEUfCQNrgL28J(aoub1P0U}abg9BMH*)4aJ&8m^!=wa`?k!Nf z;Re~}SujN2@$^~IOb1-waO3ZM1)3!-%NJgG0b4YGR>x1);WHm)*Zkli$n;wIsWvi= zhujr^aK9Xa?%hMilZ78arBCE))}CRgzM44tS)-PWLcR-*PfOq9Q%Bbp{VQ$AU3N+> zQTi1w1ZUcvc6kBetBh+uGe$7BCw>u{=ioqZ=euP8Ib?e{F_(6R347UZ+Jt0%fXL3E zBcHpz!zR_R)fU4pV7BhD-Cw#a#5QIfHRpE5LyTvx9r3ZaVpmSm=nTLGnV!3SMZ2=0 z#pann*12p{H#SOt)$$zC@suFvMq=ONfvmccgaY_PNw*}#H zdsd~;)2ahHzi4vae*P3JC_O|na+Tevgw4+ZVlS}G@0K{C*B;yyc37tq%OCye=%4pO ztBe!PY-gNNk;(Sk^rIb|Jyx@Pt1lGUA|mGY4BUWy=PU31$li&*nQ6lXZz*8&_pTeXtc3>+PKMT>T2cJN_uwZVdVx#CG4iohAI9!zDr4d5Bf}a0sA#E523p0` z(^A}CprEW&{l*m%G|$do(J;7&^GSY1J>kUCt&o}7JhKHQSi4L3TH0XKR~Z@Zif%Nl ztiN)>YXD^KiV5#k?!>46MVQ^HeF}lsRhAzMP~m==357x>gVn__C9MV%D0w7)oj21R zJ};`X-ZUm(L%*vcbqN8NpjBK}`GR;ltO~Zg?zNnU7X#)xdp6vK{jc6g-o9Rno?cv^ zrWs|BQhR2C`{P@DUv%9M9JSlfUIwDELKfjN?+~S*@#&HETOiYt$2*ieHb$YTkJJ z3F6)Rj;`aZMJmTP^Xcj~*l=U_#-;9UP~_+TQIR;owP5vUFFno$w6b?cK|rh+5Y@z>xhHD_G(R~AxR2VVNt6Y4`A&$f-n@-bN@V#=nB0-N=6e~^o>7C>b9 zr=C43r8u4~P49xcP=3U0qQ-yMSY_;E#HU(b-h2diyQT z$64sFuYCvOGW+A(rJ8X!2MbHVgV)fOMA6(-m5d=}@5ekeo?-FzH>-W!@8Zd$r`tbN zP_SEj-}&k0|6oQiiySxp*cN;gyg*;h2BubW;vJuaVf)Ug#RMKVRM|eT)~Y!cBrcwv zWiQUca=BhkA)aDLRj~f)Sf7Ws?7xokE@i>@t_!?clGzZucJFA&#|PNQ?|Zyt{5lL3 zcT+|6Q!%}o79`bi39d&_l)L%MaT|ZjOM#46U~5fI-=+Dc5ig5JZ)cjcg6uiAKvB1P z410e&a+h8mj4p8rx$38&rStob4=$IWOIE4>GG8e%%6#xzr)+`IR=sLmf}!xTKzRDU zr}kjH-#WrM+6!#YJ9gW7UBguoIlH*cJc#{70?s}cZ-bUwr{eNh4hr#82F?oKgilWW zGub)?g*YLVnkScD0<&EUHkTw@Q22k^`|^OAv-f}ZmTq(-T1kruB~+uu5}gQH#*#JZ zrhTQ2D+ZOp*g}>%*^6vRN^}rTDTOQ{CK1^(_RQFqncsPzyS?AH%V%aj=KEQGpMP}j zbDr~@=RD7I&ikC#`#EpTXTEuqdI{L}yLQn(^E`H~@z8I7oa7JfJTwEpJxCC$^h4`9 zI%BoWBjkf6`?1KgYC~=7W59h%%fx!yDqOet zpK8Z~qB@X^XMTFtrV69JcIVdD<$=M^FRv-+g1}O+xAcd&(I7djPS8?JJ`6BtRpoEv|;ZKz8%K6&E4sQk)fpw@AK?#|lB z*lJ_bvom;izyimH(6^@L_}IOS>qT$i;vnzV2{VT7JF=@j6KLLw8TiYSSkTtu$RpiN zbHHwd7WL|}9$3Y0UvdAJ16Z`U<-1|k2f(&_rCPRq%CW|bpYA_(s|RWEPSy2eUt=#P z&l{IkJOHy673MbgtOifcuNgVa_#Qat-|(S+b16<6zp|!S`zoGMRP6p+NhK~YJn7uq z{=_#}&q-a!Q3nCb=(Bon((AF_pibEbi^(gk+!Gyx?!F?Iyq$x+&5S)a*sQXhlZUU& zE*MwRZ8cfdOr9*+<^!5ENrvKJZ^widyVAe08ma55(ap-ZTC2WB*Fch!*LL2b zlc1n1@7nt6mpJ{=)sv=E8^AZ(`V;L!FM{y*q966E?}2W|YfjW^t_LUMjjzNwljNlK z8Z)e~o!f!EHqR^X@SI#Fn`T;=|2Q7#-Y7F7pWXp0<3q$wMMv=D!Mi;t^f?aRyi2`M z{;U+Az3-jcGp!CRu5dOC?DPU3sjqMv9!Or=JU&0LyY~%ns3vZhZTda1aMqa&fo%zH zfA+ce4DFxr8=oz0mb+cTPRZ6UU#BFT!5>`D+CJY>2xf=HI6pa9j~ypB?=&dsHRw4e zzq4oQYdj*j+x(M7WkBcX0HOBaYv5|&i^+S0kHDEz?Td#^3ISFtE?+q{dIGl23m%ic zBMv|5y~}*rnA!O0hp5dh%p;C=J+$`YQJ&Hdf)} z(T8ul!f4#$gW=f2Lx$rW7yGmvT^bGs>Q)q(*e%361l>&*H>?8f4;R$j@$tjilWO`8 zHAx0eU0sYq>JWCDm9zWO^b?@Vv%Thy=P%&hZtpVF;%b3zN_^@48@I>}v+(s>eki;J zddzg|fALB&zTT--*@PE`SU0nI!TJtAfK9t+Y@5|(7v8nG8$Lm@zuwOB82UE35@X#@ z<6BLy0-GcAOl&l7;LF43?NV=b2OMtIWw!2+t9bKwFZVvT$_G~Z)>)Ax*XfjYb2RR7 zJkU8ma?8e@-NDG;23~!$IUyWe%*ksq=kOw&*?L4)@^5jV=b$&iJs-#K)*AVZ`BjFU%>lgCq0QO zIRl<=$(hr@FT(5RsqYM(TZzpoU(7dpRtt7_t@yDrtqhl%?!UhC#YLd`_~XYCGdn!l z(faTP`ykLIye8ps6#1mV@q&b)CO_avk;@GG^%L-cxF4sFKd}&;GHsJ=@3k58wFlWX z`;Y^S+q_$CJZ=ZR{%ZfFgvD{m;NBewcb4aXEO0G1&-MV$d|!FhD`FRz_3Ko>v*Z-m z_e$RJTC-G;^Te&mrY(ioI``Vv?K{tawe5=US$bW>DR2E9!|zrBY_zF%b{TmyQf@Nr z*!|NWymZ>qb~?$pMOAI#fXj#QdCYt8-gXkWc>lh)z3onXET>OGuO3Ccz{=j6*FLxs z0L)kIxiR#YG2o47V8DX2iP+G2QP8W2({M>j6bPD?1X3UN>$*E_6OL|jbCU7dY!F(f zR=WK6O#HYm-%->AIl1C~60`eYB~nU)Mv})ok5^^;T(kXJJ8 zUamS93v~Rmj6_)omyX*0WOdF7kPdt2ohZJ7AKY2CY1I6SzMamya3Mc`>+a-Oam6To^$22JPImY%93({Zbd(+#>v4`Pd!pG)@Aamij#P zCKvJT;%_RCBd_Bd`CT{VudM+yoF?DQ{8*BJHMG9<4Igm;m#pvQ*er2A zPWS6*d&OlLo@L_xae`hUux>R!VrtJa+-Kw zF~9Z_?&sqlmlT!@P@BQ&2i5a{!|A$}ffccMeay=9-R`Z!D|3eV?i-&)E^|C`8{xPO zAF{b2w9`shh)?K| z7oXmL9Zy?fA09vH4j8?A(c{$7kFa6oM7OpVEAh!8b%zqP3UN~(gA>BuxnSx;zokvA zuYhj$J=Z4~BJgyl<=6$!&j1%a9e+#mesQ)<`mgQa?^C^X6Uz1L_gVU%!duE~kD8}m z#a_K;eAF+#0h0Zr@5k5P#v80>l;H=3Al_ny|BlY{z!A+h^gMn6h=S1==`GgXD~FWED(jf#g3T~V_s)G0x>Zy&U^~H z0^0dPwZk*-4FykMHN*FNl;Vhu0gH}wxPpr^d24daEAc&#${P-okKuNyr(Hrf902DE z@2DUDt{&gNWn%qn$ZL>T^}~?oO<$4EGey{E?>GXq;PpuF9-ZTm*Sg1(*W-?-vi8E{ ze9$(3^7vfe)41K<6(YaZ)nI9U+mg|uQ@|HGM|t;1s6LV3b@faFcRg;sFrJ*E?`^Z` z z_O?x@4|rY$<(p^vyR_YhU#@&N%s^`a-VlDONm+FQSYR0;EK1%09J?m|zNGIl?A6k- zx7oa6_#(=?O~L`!D45l{3iK4%&pI1f3l3}VPI@x)KJb4f9(CTM7`L0En-$gJCwx9* zxm~sCWn9qm*|)7HUBHI7uOBxfw~911+J)J~*W-kjUY)9VufdrOg6DN*ukjq4(%&*d z&w!$i2BFCVYrwXrhq4A*JOm@o8#Igy9}ddL*PQOPFo|5}?@`m^XwGc%eV65Vug-+y z45MowTImhPP8n5I2Og{iBXJTr4B5^yMOAt zxo5{g7s zt|R*x?`APglhlGIyFrhZd~+0j$2(h(e}#{X-gsB*@wL^^FIu8Idg{!DYBcg~({p-i zDGn{6UA;)3G>e`V2K#03En(u@rk#;%1%I|))9`YVe~DcCtN%pG+G?cSW`g=vz2-@h z``As#2En|!nijC=(pUj zPLJQ4j}AGCZtAH!Gb$|V)3lbTQ^6e%F&0iy5)inN4pIWh(eEZK$X_zn%yC zMTk1WQ30Youx?g>0Bsv4DpljpYst_pVA4p@LH>5>kKj?F9AO6w>HYQaGqkM*bn_79 zXer1Spx@yj1Lztc(u9ViMF&_dPD=&7A1%ru$&tIqhyv7kBapVI$bnD(_V*I?W&S35 zi~2BskNJpPn7U^QGys>1MCh%zpM^DLsJHkt$qHLD>k^xh0O}H;il&x~qI`Gz7kp`p6 z4pv*q&@ZxDxJy)WokCR78X;H7gOVZ9R0GY76#d5IkFbD#Nn$f-=PA-gnbBPK0Ev{V zv0U;L30YIjC5t8G-f>)VJVQ2wzOzJyyoIo6r>H&D87&gP0pE)bXswj1wwhtnhn3l6 zc&4*OdeHcLQ8VZ~TV(SGoO6)>Ij=aQvf_NO%=9@}5VYedHXl$W6QaFG#OmMVI}Yvd~n zxFe0TG{{zr`Rh1j*^1%2{&MoBIsZXO?$O$wEjz5gF;DjnYntjP=!0R_HK~6$3u; z&h5!Q=EyoKN>?D!(~uAIeu>_KoNc4lYQQNcM4EhaK78>m+7d<8i?jv8)pEt^d8l76 zUwZ-VM%bA=Yr?ma%5ZBwB;8x1GBr^y~h*%0QpR{(*<`48t7QDyj3)3sXWeP-v+w*Q*5|&2#k~ z#8&A!ybZ+$F!!ZjYqVuwl(s-i(a;$3Zi(8Xr~Sk91o{dxUtKoN4lt%Nv@-%j!%W#S zognXclp(a+Hn};fo$o_NqG$}{b*)fkaF{k*@ZhF0!9y4>i-J2*9oTY5)3!(_$k&uD zl5}@Vv<{j3xg={qJEQ(WR(F$FP0SQp1S)!x4<-m^%$oi!+H}l^W8(xq9 zM&Wcc>euKL4VJF&8M|1ZN6RO3LgJoe2qe@vA@Ni~j?N-vR^+~xkL(TO>@Mhh-eiuM zTL~reZ?8d&Y&qy4WahFJnsk1$DSKeuq^!U`Mks)Jk0u-Odr;PI(9=gmNmfL-3Yles zOw*q_$(FPsQmHm~!#Pq%H)ahKGHV%U(mtl!P2`&EYbFpXR*9)X$@FG*oxn%Y+heA( zR|pbHqe^0IP}g1&K#BgsIrJM?*UwRik`Kf~6pLgwX)GF7;}k1W)o>oON( zRaPJz0NrX4>!#G*sxqPhOKgN(W#YVfyZw3>m@2n{?T3Z*hvvDy>Tt_hQ$tB_ED=35 zWn(2G$Z*_X!-ZgTX$UDyM=Hs{+{xnxP>l@*@Vh6zty#}J!VzOV#T||46YI>@af@;X z=-Guv0rsuCAAq?V$(k5FELJ)C$f(_6@}}T+jm)}VdHkrMxu25J7`wQ@$}(Tgf(;Qp z*qZfk#u_>`c}`d(^D&*HQJ)$G_L=AJNC=CFSy1DG} zr^uI@1pdLiY0Cbf(eq@kvaXQO-H?vXH03Ekf`nR7<;0``PBtIvsuoTfg=By|RmB+7 z64}`_>W4M?WKhaJ-y6LE73t(b`vDS*|7POc&-p6-A?*`Y9Hx}@BN`Ec4WPbHEl|1vO#!r(y^9NCq6>90^ zuPfk>lrlU`nx-cZrd!K~>?x&pTg#>UQF; zk;%_v6dNAnEl>;p(G+Eb`a28w$lnzm>9pnhr(vO=;c4549*us2j7Zm-{ zm*Y;dMw8u#HM*HOoii=VrR`Xrm#_|1HPwg+8zd~8&RIC!%~|I}z6a+Qf#aAHC?qG) znJ7^dxlx0)qhm0IUcPClD3H)u=Rq0Llj}R5nILO4U~+6^GZsU&>K&rwan&1{eIMz; zcB_;QeQI>f8byYz(eUl68hOc&vwHe)%FBROyf_&DGQdT8{=f1vz?i3pUc3yLp~WYs z?c!r0oer)HJg3I5lKeoG=R(M@TInx4-_Vn4DU$;=8Q_LtmedRBPHu2lvdECLeQ`k0 zR4sbw=3A5fT0l^wmexp8CqV`1DccYhIR&;xzLtT5w1kQ}a=@lSlMe=!sR_Ht+t?F& zK#bqwb44vSJHpdAJx?aPGpa&4`);g$pfWyWi9E(mkJyyXr$#t zmX<<=EYHvM;r|&6eUQ3O*>ZkTIp&4IWoizJLDD@g)roG#X^HN$Xo+GNv=kxLnB`Ow zpyUAR0<+J!n4!e6!Cq|JNGf|Xp{>cZicgT+7JVG||mZfpgjOq@LwyRycM-dEEn_vIA6t5jBciTSyq~q#>K=73fL9m(B{a z(ETrU8a;i`5(STFsTb4yGt-pqv{bz*?HWbAjN#TeYfV@O{8VyC9jXZ{iz19vh(Ls) zNXg`c1w-gFminAG5{ecF_ExqND}2-#$iqN| zf^y`khpGUOg%5$HRt|3*`0Afgnt*`H3zHLEB?>e|i3fwOsHWc*aj z^*;C{`{_9KGm^JsNi4OAtd9d>~60KbI-V_Lz8~a#>hM@ zu(~mu4QhxB{-8uB>z6gm*0i@!)reHuz;?O5ni9}MwODINdMCS0)qO(JCwVY1c!z2Z zC+Jfh?8Ly!d1?}PS@BR@E?FyCbA~fuf`FJymd!>SHgL+Y*bbkmw1#%6js$2nB>%X^ z#Hu5LiYS{^hSh1J!x_PE8XL_9e$<_!vUh5<8^BLxCGn>XV-j^VDyNZHQt9y-D|Lqi zc2Fs?{h6NDu&7rLNrXTWAtNLYP_9iVo6d?J>@e}gjlzVmQIB_kWWOYR5_1Jn-0?|3 z4-2HCIj#I{rs_(W3YO4rP@u8$g3axP>Tz^&u**MWb3!D3RC>>RQ!h>U_IzM_q*EBo zS$Rnm$uyNLfOHAtYCsvPfnNv7`o^uK>V20(;+CvrWG*X0mH?dXp9XhU=a3-bmw1pd zYimg(14Sc>sM7VEXhtBk09AoYP(@XAL;%I^>Xw5wlmG^Wbc&j?q_a^|6?2k8c)x3e zD6bR*!c)~h1x!;-RZli0)Ko=pGOV;WYXwlUog6S#(VW3bl@}0}VJT`V8#?Xkb8ChI z7$eZB%85oHOuACAI-t<0tOJ7g^hQPBHnJRp4^ikebpx|eV$LTl5fnd_iC~Nm34W?_ z>7ocO0a3|rjFG`L6N808f~2afRDh_BZHS1I9F^mh0A+|g_>~tvVkMO^K-};-x*Kcd zVvU-jrm|5pxT*v-ZLFR$YAOs0QJ(DtvXu7D>VQJ0vJPaxA31ywQS7G8v2^5P@A=s)LNz1*aEigw8j8Wv5`)1q0)z?yxoq5q!CF~6WP-KI zHn=C%w8L}kWqa3`(h3R&LD*`lp~NEmQ}*zJ(Nr_(f^tgU;JCB_-2^jSEzMPM5h#}S zN4a>Xpx?ugZxP*H!#kt>4I#05E9vh`wxL$iqQ4)ficg?XvQZAwGEI5cLg^u3+Y#fT^pKXLlqxS5@m!Q~RkZ*(fZ0F8LcXJ891m^SL{36&VBa7gHOf)FaI0Abr z`p{C)DeNgdp0LOi>}-5$Auj`{6pDVaus&uB6BXJU5#hbPMQW2HP1{9BPd zz{-0{(O|vx`*QLV$uv^DexZw&$dRm%e`5xewmw{A7{<=H`4eeSRJpI121Vul9cfTh z@c&5~l%yx-p+-x8Ee%Rv>1~2MA?V*J1ImP*HTI?CCu#`#+@+=eqU0w!o`0PDL>2}` zuF@aKfFikNlgq-JvEJqV3v!_h1W>NE2{K5J@6`45H5x6^H&wJmUoX)T zeJ?~y^c@T>eT9T5shqE!5G4~!Hqn1IAxb7fcF0$mcfm=BQc8ULLM{|r_SepZlF4R< z{^xU{WUaqmE|gs2mvf?oO7?r~mqc9e{lInVy7>?mo&|IqBHhUwgt zz(1Z0MaNA~-?Y>?8H$oWmkdQKzmyC`8R$Wrmi}5Yl&(NKe4#v{NtPGYEsjY#Mks0*|GN#CmVSwe-)~uwOzaedkXnRqvtt55Io+w%)r?IlA7*pJp zSFjSQh>$08`rn&X#maK(2njsU=&A@S%gN>+G5iaYthxwvQw*9aE#3dBv?~XJFsq9U zXY!!ASA>)=C;j&%U`g*2@#2tEW#nyZl89yWA#&`0JW4^9ig=qS_P+jWK^2aD{$8kp zbCKvzpcGWEuZU7md4C5=K?VPxpcImx7?gsR{u)Xl)HhA>xYP#Qy)mDoA+zHlO~_u!{d0RuLXYVcuwQdq}S6%|z!0h{@eI1zuA^ z>zM0%pko{Q+kM6tU*K|p#%iLK~WvQ1(Wm>4DA!OHn7iyO+M1^lUyuoXKj&WURl=1#RiI8J<> z$McZ5bxo9ZfWd7u`^@pRSU(tc*YsD65%_8w^YF5(50@^Pjyu_^ZN9_`Eyf8{W5MohEUb<8qucaVMjd@`S5p1(e zynxk$7?ZZRy^PbsUDEUNB+@gnBUPj83h{IvFO9KdH*|g_`Jt62?>Rui6TON{o+2T) zUc)7e5s?Vl?c*6nLs+?ln9w6tyi8(3qHznzgW`o86K;iK7k0F6(+)o-530t2dpS*< zA~!B8W|8}P;zr>mc~e>74%oS9Tt;ck4&T8gPmx6E$X#5rG;lq#xMX(Vj>r~g@*Uk^@lKJMBzDO{U= zij%Zlx}SU|Nu(GP!lT$}Z$egUtCG&z=JBSz)=$bFCV1iusgS0*%J}yiiAe*wp`n1mQptw{^L6$~( z%Y&p-UV*rj)q+GiX>psyOurRo0BQGih;&aPoqWemN&!d24t)OM0g_MbQKpQegrs#? zt_1y7n3P=&bL;2_!=!aV#oRjj)i47@WcFi=qhAd(fK>SoW#S6S9H2j2l#44kbKq6E H*yR5KO7~)H delta 105944 zcmdRXXFwEJ_xDf*1Vvc|0Ra&aQ4z6GmYEe46;TmUQBg6m#fk=v8g;)A&*1UJ_IRm(?dEXE3|HJ?J5clpq_q2P;Z|)BFnVNOcW`5=+ z8#}#imK8J4F1(z5*WNK1!$;2JvK=O!YP)HWG{5=0XLYQtIxSck$HZNFfFjhD4m z7j@`xVBrNH&6Oyfuz5~5&4OxH(F@L6g)i7^-O$QgSbDon%LPu>?1IKN(N@hC=&d81 z26~UmSeUeMmQ{0=!vfLDOUS?7rkqg!PMgL~13Mx?SA{^(-)>XG+B!)X(a=Vth!-xj zP0~0tLmFY_xt8$t?X)C~Rd=B%Eh*dyXC1ZU7xc06TQJyKW7SN!db>??>wz5?WZNVV zRT8aQ+5?%Gu(;kCjqrC|CkHD#;ot3cUZVa<)OL;az(ZT}Jg#XR4uLEip~mQ>PGaU! z>+dzfv)i15^FW-jOaJ~mA#ZutAYta<9>HRd8`eM4)qmE%r74dz_LN5IQB&l;v)!Of zi*-`%Z_y7fBkZrz1(`8_Ypf1!9oF>@`A006Vt-vzd&0t?fu7R`da1&kz8jv=r}wZ? zLwk>0IM6<;YDFtX`JeE-O{@QNj_KR+4Tu_m6M3LSj1>IUJ ze=&3|M^)K+_m3MIXH`>00GN~GVjV2b{Iedgi`bcZNmE|c!@f)J`L=Zg!_W2VZmnS& zuPD6#w$)Wd|1O`;Yn-Uw(*}A?Sh#h?dF5+yMUmBQ<=?KERu?sHS+=NoiGkwbj#f7n ziU+x|SEy%p_e%xOG@Nu^nJCnfrQ~0>ne}z$-xH&(FDd^%YMXSiL^a!C{S^J{Z1vf& zR+){`_qb3pYdx%=xOsAe<5a*AsmCSy;pFfJCp6w!Ze^5$&9_bdlI5gd8=R->x^HT5 zQRAXI#kPCD{{2h1KP_l*nNl6M3cWx-q<-pgPUEDKTeT57F0Pj-UUeZcRjI@gAhVqC zd5V2~fo_70dnjx|l_26{HK?41em87E+UMF^Cl4X}IB2pM(y+m8jm@CRR$_(AM6c>u z?q9LTZsfq>!idQ>Nn+Zd=yZ*6W+zv^B%Z$=i1AEikI3o)vY-FnMfn zT4L;}REXy#<5G`n+{#$dsrQ0^ZGy#1GpyGun{g(y<4uk2WD>QaR|4$b!&6pQl&Lg- zYbE~0m$y={QEye;1rz!QJx-H|sa?hx@l90f-<0%nV(Ja$!`9a}G-AZ{{jDw&W?RQT zq>*y&ZR$-*IA$(kUP0XfJ8gV$3`+`)pXl4idZQx!=enUZ2zw0faT3Frrk|+)5rQw; z&FYfI5#3-nOgtm?IIQt1&O~zu*tQb#l)V3Dt<}cjoumF&N{0CKT(#v;Pn4RFvL^L9 zRkQat;u}?n{W#&dzphqszKZ;B)>OxaeUfgKB<{F=n7Cr*lc=>CN7YDsL7rQ|TPW;k z=A+vZiVYH>Ap-R1G%SRAa4Q+lcW6gz8I~ zBaZ6)#Y8LHjhgCZR*zFwGt4aee!a&DldjlT6HO1QJ&V?4xz&UsSga-C-6YxewnltC z!8T8$D0F1L?G=rC2}=i58}#0>-nJU+{JPk9+azIL^W>Vj_lO?K>P_!0uLgToRI~$o zcDA$ohIN}?p{zC`v+PJkBb*#pk`EU2ptgzcwi8YzdJOvBR@i*z`))$w*2)#dz+Ah6 zXvJ+AVfMT0h|3SyU(mReSX3^im+((wTrF|U6??vpCd;XrnHofTh)p??S!Zp<@*d?p z=oZxKQjX+HGh5MWaJhR)CHqXoeArVikaa0>ie0~6N(-zeq{rDfi=94}yG>jlZNb>) zkq(3>5m&tf`QTBaqND0`@LR>c4xVP#K3~`?R*);ya1iyY92{A}kNj6Ld3ipv9Q*n! zEMX%DJ2_TjHCgHAwi6b=w068&I@s*G3-ek#28f+6H`%E1v7n?HZVvr=3oGAN2omcZ zt5FdB_ zdMEJN^@UClDdO+sn~<`4Wl-mlG{6up852E6TyPB%=VsS5*OU77FQMKw)z)j|O6|#PZjo{j!>t)m1GKc1Z+C%xmN9FGj8jgGBLL zP53EW|3NH&RTET-F>ZZEu~*>>7>l1yl&{1JCr*@i{F;AFgVJn4`TTn|Uqn+{#mQ6K zID^DKy`0@ypRDp_YXRzu*~AKIKe^h9rANySJ(cfmVE)Wy13^m2jsNC)Bc%x~RAqUg(LwNjjEYs*FN%h4OGSud@52@IG{IR@%`>q~NMt|&ixsX{ z^rlSfdAb4%hX&+BsWyP+BcnKmocamPN4NJC5;xf_6ANAHrhX+mH6qz^5??Mqxl%_= zF}Z?~{V%vGj@V+Zq*5;2Dt)maT)T|Q`>-0alK}j^2+}yw9bNsVg4Z8fPR#W54 z$P*?NR*4nTnzi>QXiTHD{#O=wkK1W!T`R$KD%e&Ct<=R+Ad8oY+%8QjErJ!BR`H_( z8)j4~EpQfzuQS#OzCd1WYn4vXK3Px83M?rPBGIoWWtAUN)y%B&L#u*WAsebzr!?&z zR#l=er0i~q!)m*ON}2uK-C1FFR<*K)F;ING+C7YN^1VgK%T&%lfe?5I>D$}ema^UT zef{NP>lz+O#Cwm*8Y8alJZ5RgeD^;g4SOtOic~p=&k8xH~Ro_kM9! zH85`1xoUZUed*)3xaL7M4Yg-ipeMm8q>Tts_6mXC``j(G}TqJkM%0 zS#jnijqsSV`fEG@hK$|eS(z15zxOKZ;z&UsIM7z8*RDp0IJSvb>4Z_b38}HP88`@q z_eXdEN-Ikyj`-dyg}B1EY-bYBZ}tL1Hy}HPfmIN^EhqQ%WE9OQ7|SLzxN6jiM-pQMfN@_gQYHm5=?u>GO{#8g3tb+OSv`>MZ=X}uzqY^Z?dq0YSB#9% zb$fqz^2~kVQ&hG~;4Y?jBc(X)eqVPnbF0q>3uL*Ye023rHy%l;$8jd*c}>{L z_+uwO7gqRu+RrSVsNIF$BT2hfo@Oj8&pThIZK;T~L_s1_B##GG2+;HUPcPHhC4jf) z*QGTMYE>$4El5)WTOfh$D&%Y;xk3|0X&<8Bu^L6Rb9g%VL;Ak;CLb)-DeRVy8~Rr! zI@|nTy(|(b7N8s}BwUZg9h;c$Z|?Y#OeNkA7QaS8peg#}YZSCs=!*K|CJ{0vINq1- zP^E%{K3ga~SqUgNG3}GnU9;3NbEJY-6mX0v{>#EZxRvhI^jY)Sy_%%*eEsP_Lm>gg z^MBI1RvhF}sufb^6F<8s53)zg+9e-+^QB}fTsR10?At~L2!Vdqs? z`!2Osbqi^@Xq1nGFm31%Or|+YYnkt+4*R7OY zJjJw>I$vu_w&K-lbu1Kw7$LJC=cBAtI?jJOTY)L8Fry-&=C1k0o#Ot0E`HUY{?{7_Ti89VK zaC#@U?3ApWy3TMR>e!_ZBtPoF@u=v!uikn1JE3N7^+!czaZLHF;@66ufC_~ zbGQDZB&Fy$g@^pDaqDC5mIE&0`-Bj9`170@a!+GTzOgohJkVInwqn7y5O_E$|1!ki zEE*?u4h<1LZW&QiNc+{sMZDQLbhSnp(mB+zQT1ru`TrM%gypiT{D07rLe_^! zPrOa6^|Kp*)>>Gwcl3y0F(R*w!&Docg(2O@aiH={0Uis{JDGc|m{ug)V&-{~Y?o8n zV1^wOPMwPm5`S|gU8hXAhloof!^}gK?)^Le0X1{Sh2?zBqm)S9Yw>j7FbhF~JFMeJ z`Vv$o7Q@^(u7<5_`(xcIg*KwHjOYT9M5eh@)Ch7FGNaRIutp1{5juzovlLwSSlp+M&P2$r83FezwFmL`c&#Gw(6ufA%#uvBVQfkICKCshL^41Pj>Cj64omui&iEdN1kyJ!y)Y}QyT(C1kV z^B1_F!4BfsdQB}9AKXQ=?sBwCQ*$M$E69!`}WmqU-DUz@>UrEE2oc`1k zpH3*VNO6-$YJ$Z=LcT8*ZYAv{bdNki7BO*vNLk5lR$s%+b5NcuqKUBMb+;n;BUkhjH6Trnq{{J=>TPFx*M zezzhYdhZA~_aiw~sVRgeM0g9=7kSg(Ai5TXEB3#w^k4RutTPTutF5dt_e2RrVb4ec z17eh;=i(<6>7=41(doEz#MgD&cyt7Hq>HFeh%gVy5&`7c&snVBEW*4`DHI|x?jw~H z(kJ4kS%KT!Gm;aC0nW-%wWr`3ZRI5XHM5K)LKX`K(4RqQ$)IG40X+?t*(zs|i^hdj z5N{rf*kD1fxan8~c@>Iqm~}Ja0&)1f|8zJxi4v!+QIiWidwCBHyFnfPVaE!EI zC|Xeb69AN)V~&yLex=ryoC#5DDVd_{q1NWSMMCdohTy4RiGAe3KsipQEc|*ANeYYC zzeJLkF~7(r)i}T6v&#Un&2prFyV+c%P{MzRkqr`qw^%ZQ6H-I#1&V%KB6F$Sz~_+{ z$S-I;%eq;#w~s3QQ#4Vs&@;+{4_lSsPjOsmlzD1XwY0yI)3h94gsDKLnC;s=%97b4 z+0$24zZ#P*iR1*zkmAq{zgiYa>~s*)d#EBQnIbZAKM|>X2)#^LmpT=-qrWo3afjJ zem!vzNFoO*ZvvXfW%HB$Ew0@AvOin=dy}W~ZgZ|#cy0cEa8FC7HGgJVmoE&l=oCWH zN+9P878NBPi&ahw>shj5bJob%;zI^YN{x8-RWqp(IT~Rw7i|XFN1RWyXdzLBdev6Q z{74|Hh3zF?ZAFotrOB0DhP0yR1C}Pqs~P^HA0J(4=Iu)HBQ5Gec7ZK9YqrF(#P6g^ zS=5$LSd0PkeXQ_XWSB*l|1*4~#YM$iLCydyZY1$dtf0U%MOh{O&;MuqQv;Q^UC91g z`rSZ_XS-uaZh5&T*XXl7{<36>a<6wWe>h7X8=2Yn0K zm)CKCMJEyszV>~rVCX);;x-n0iq=z0I(94^8AGe6>iR$a6OO6&dJDQ=`*?kYp+(>C z9XJw0y5^X6GF!nYFfOwM0CGnady@hom*NQEyms8j->autFqb-}C;OU{_w&fwqnDb)3txW}Ad?wZG4MLhdM*ao{*oL6tG@d# z23ED47z3*U7a)y&c8nYAmR0r#L$TgHq;>fabA=paMU5&a7FLWUe@j6$J{%AWo?90Y zdq-nExtchueJp{D`7Yw1Z(~7x{21jMLL4+Z7W8mh6$^SCI35dntj#A3zj)DZ&gjXJA2l{KY5}WewQd2@2ls42ep@BdTaRqvQTlyp$M|4@O}4EL5wIwyML~3_ zx2YXs=v&3!V$QMl1b`70huE|Pr4u|_da=G)5oUkxFsS$krCB!dz3EH);)*UUr_^zd z@Kle+Y~qE672^HHj{25leY|l4 zt~$JE*5*_kylA$*d@J8Cf1AJBrWM(1#!fW$YIT!9+EeB~L6NC--?zH15sY)XIf%2T zwITua)Kl!cycO9}@*#FhtD5u|uz`iGu4$Z~7Q44xOtI6}+O_`DX)&rLNuI5#({dZM zCjKBEx*prwEWi$?;MB^Yt$o@RC5hIh@oK{e!^11qeHdRO%_ck}5gmLuRHUX?>NkG}f^dc{p;on!-kkC~A zBt%?(rwv3=+jnhW_1T{g+RoM8T4`a{|OM6JyLN6g5TS!&hc2 z6;(vyG>F&diS5WPlX`NpUppw!8^*PRdOBlnyYpX`bq_M;;v(5XsO^|Z?=C-Dp!12+$qj|)!tI@io^||YrEDRppXUk#1G|0k`E<6@hCp| zC#OT0==VzpaN7H&<^&=ku&N@Q$mu{|hkSF;oIv5w6e2n5S_g3B!)F~x@{wRUZJS8m zJRl#$YKhm09Sh8hB$-+fl6ar?v0q{pV$z$2rzGB`AG!=pgbF-nL!#Mx-m4!b5)mY{ zr+-#fkq;x>I+9i7LrPG`OGMUSb6KQ4M@Dyq`sbb05j39wnG?jOnI1wg{)wv_ycHnh{{k{_B?b$ZaMo8Q?Fes$wE5orBoa;X#nrG9n$ zPqN~lbws^e=db=!U&40$@6nOs>7dRlz9QI9G)8we|9wMU?kBXq9vLbYM0YmVoBTCH zO`+NZVp-b!&bESMO)J+UM&~`aF zM%Y@mrj&Z!R8=ZKFhvuk$P2jvqQj{Q=DHVKC;pxHAEqlBh_3fLn?LN6b`>n7yMIf6 zdWsfDf9!02nn@_yz|rl@C7sNrlgSWaMg4E#$9VD4Q6-&7?XMQqWqTc^?40S_g(R$9 z4RO=hF3|U8&r!Z1Q-fFIH}N~VoFQ>p-$Jg`I@yJWb?zcSMXb!t3&woxQ@i( z&+$VQ`;=>%igq>B2NU}Ny)sCDooo{XmlSmLN+BL0l211vgo-X3Tcn)TcohG=(-%ue zW{@*joxhOAbqf`rcf+N_hbc=5kA$C*N1es^js2ha&F#G@@GNlfV#*bweY%a=GaX6p z-yZ|b_L<$tg$(53V0e{oB<;zEq~LC3Pe_3a<&+OZRE6YjrP0Y*Tj9;XZjJa=e>4<- zy52!+sJ*kQuf1u7%rKP`9^a4ia-#a3k!N8gWAZjI~CB+=m%r@JDBJ6yC^-I4eRADAb=}s zKn2JHaNGjtTkin+0l2=V!jC^ULY^is&N>^>G9q*8`QJ@FyJ=_sx^-0Zfr;8(PitlN z+umBc{KZ?()OGRN8S(E2jhvpUD~xQTWpTU-=XW6e8eBgO`OTpGo9vteW8(^# zH%f$_CjyMfz&H*V8d(rv?2lu0ej5ldE<*+=z&IVYz^FkFZ9xljHh{(c&eX&5PCvQ@ zTz(LEXrJG=ys{@;Qx_n(yapDmKnAMIr@$6ytKpVxgn2pTcSi|SsKq@A;|hnZoV#I`m)y70;CmgLl;hI=z1Sde&1;FT zhhG#S!5LiO;Rp;EkBO9_Jrz*f$P`imQV|H?_5rKhx+mb1;d+L(c`y_*SU32yQ zbBXzIBxCTl15)<>)Y<3xIiK15Pw+(!5~TUV0>37}AoLY5)W<3Uw(Mp=0BeGIRtNN~ z3D&a=@MEVb0_J>{Odl!#;(5OS%AeH}1$m+&oA`nXVD*)#1-o-@!B`hS6<~Eyuwcw) zY5}WrKB`#2UOoZ>@+-i22hRI71$u+J{x@r!@l)p@NeBAMnuP+cbuZ%pb5NnICBk$;?jJG6uKs7;fnd51s}QSA<{a zk-)GE7Puk)Y`sw0eDaegk+Ps8=h8sO(c`PbGmv!G7! z1F=v{fCU_bb)XO!a`_*iP!!-86_BSPeLAjZ^uTaE;t5>{6sDJ0s77Mp3dKTp1ll{# z6=9`ojex=)2^Qi4Uj!8TM!;|;0t!5Mf1IB~>G=fN z0~wea0>i!tD7>-RjKOA6xEq?sJ!~dg1QbEg@441EW)M$!YzO+jxIPH^r6HcEp^TOO zkBEiK+({R(NeCDav2cYU&@Q_ppm+m_Motrfei#VgD4-zxF6Zm=y*ETfd}!mhyzspt{o8V_HNVeD6P?H*$bh{_uU%vSLuqnJKw_!<1JWWOoSE2 zA7O=Y6qAA3#7k}>t7+i!3YpM!3FY9sBtJjBA(4g0N{fr zGH7xTj&spu-DWU(2r@vQ%KZ@_AYkCeqR;bj51esN>f#<1)dvt5MaKjLZbOtfW-Dyk zXk>ssLCi* z7nnCQF%1HMfviP9pr%bCj{R_cG19AfXTr330EI=%CX#k`$G@|af;!kM9f5XL}d z;Y{x#@pbAL=d;Zd-<78T;S5w30uZPGDG@8rLZFv(U_FQY6-XlQK!V?J zK^ihp8K$TJL5A9EdIta@ANnz?*4AxUYu91z&BrnJcUdn&tv29#orwpAEf67?K!B78 z1hO9>5Gf6tg9->%AwW0_0%X0~JOF_duh*HN8!>MtQh<>80J;@Z3jqSJgLbC2H+@BD zcbVA!@+-mkjN1NKpwCAJeh-o1M@7Jv-L4>jO$)^S9S!{$*R#lv)notW4X)6@FRKk4 z1hAw*EB%>GyAKVVsRjVS^&X5(!7$HoAmGRr00?Oa5K;~!0zrWA0O$P>P^8tR>v>kE z3aAbU0R>2Y^(cYl7YP<$1_qKI0YWGUU=0Wmis!XJ?*{@Tzq{a}ArM1G2Dc=WZZQFY z`M77UxQ8kTT!VXz4O}+hybc<^q{A37A;VONAuY2PVhZOuomr5cz@RiJ{CNUn&*K+~ zAfPx(3lYYu4F6C8!VpL^9cIaom^DW*i&9j80R5Zw!?8LqAibWf=gE@-#~6U&LKPv5 zheDA80*V_xrm5_WbwP&!!PTrz%mV?O3NV-q;3ptJ$j1YMqwVMO^u*yutXzRu$r7-# zok4(b1_6RC&M!lHETuAom5xV%z#u><#8NvA=;g|=zW9LQvWbU>xsQktypaGAi2MW@ zFbEI^AV5%0942C~VBkPNI)VT}Z8nRcnQ@J=`CP?b6N0s>@HATZc*1Q?E8)v?(P+fl}u zAV8Yr3kvM}01DMb1q)DNW)~R1L~pBgf**E>4Oo3NIca!fZnD*fpF?Uz%5Un`bn|wK z=Z-YoWYs=9R30W;pKXzHn=Pn!eeL639@>$+=~#LJ^PF{kN-xiX^;|gclc|7VhoqJ` z*5mv=;4g0?>v$({j=HAF`J!n^ivCp`!zw_PV>$h0V{7rd3Ua7?eH;wCYtY@fvKJ46*k%zpvxxK@lb#EFd&)YgMe`cDq zaaHdyUipHvZ$t^?hOmJ100uHLlyozw0O`y=DnP1;^EHruF9?tag8*)LaGz(b>W67J zpuk4Jp9@2UJW)ZzJ{~mI%?1nf$KAYA#pA4gKbw)i<0)(0y&!$#0XJEHI)CG|MyI6! zxkumM3ZAoXmSs%5KG;=0^JkSo0rJh=A7IRR!@Q(BMd{fbSkLJXQhpNhmzaGpp3EZw za|RZ)Mh47sDuBzcTlMgcUv0HGme)w2faxSCl#L29Ab{1M1Ou414QHjL++=mp|J<$q z^DCBVvVC3(^Ime8kUxC?-g2Djc{sDuzewV{Jo7M(uUy2Y+*>we!H(Ne;V_m8fq?ac z1+pJ7U`;G#C^7L=fP?}hb-r*s6(FMk_Q6xiU(#FI%y{in-q;uAdAy(s@KTVfQDe}M z0Rb%24lH2e&=UOK&Pu!5ZpFk zAfEw-lD-?{d(Jt?_Z*zpnC1I-$j}D!JsSl!zq3rJt}NU#&oa4Lj}VU$@Rqw zSAZq>B$nVcSc0Em3EqSy_#Kwuw^&jKVu|gjmfSj&zw|AR`{Vp|q~FRTgDfn;hp_~A zz!DsWC78hyJQ5Y`g(X-Y3DUzw*cDr0SLBaiTU>!{@g!}F zfwB&};#0LNCPG)lE+)Ulu6Pi;;y~<*KVerKja{)DcE$SG6~jJ4S8N9S%gYbdw%A(O z{c|^;|Am0WcQgegYJabYu5XU6w*${`uhI1}==z#}0e93otu+|8H2 zj~;eH58pu#UqKI#Ll5Vohu5Qr#~|*giF)7;a!eJ_Q=bkYQw2k4BZkz!81si9B(3H8 z!ni5jYV*4=W`oq2hV@co9OHWKImYw`3~4_M>D3t07ma`_f-$7sFr+1v-%t(d5ul&x z7n-CztG^6oW))^}P0Z>wSY{?;)<eevu#U;LSZ3;@LR(P5P*m7770b*FteZQaj;8GbRAI`8!e5AmzXCLU zIsY#x{2FD<4|e79)yfYG*i>M^F}tzyg8)en0;GC4ue$}6-_Jx*gXDpzLKgz4!W{*U zM+HnbL7@~>P`{JS;1wLCS1o7)z04E^u)=^%I}MvS16^G*U~im;&3Ga<<4~Aq#~`XO z!Fra#-pc@gNgoL)gYf4S+`=Z}f+FnZA;{2x&3Fq6OhE71t&vanXFTAe9cYOb1&={X2C9|-16CIUn(KQ|0g?^`NP2Z1=@W5%fr=`u z0Ur%PLky@uI**ps=Js8Wqri04V_#WIzDRxPt+v zW{@puCn4SMKsNE*nm}#6%Q18-YH)8#uA)~C70Pp1xPD#Tpj06U8J!Rt0<8K13B45LLv&dJbWV%p?G+ zxQ8g?1NOX=*z@KE=5xx^ynGs)-bieE25fps*z_V1Rz#wLcT`jn`@a=c41;d{3Jq~W z!>kZhv_@3%7(L$-J@1B|?~bVAEZr$`%n^jHFG5!{L#P1uEspAhXFB(*Hs;;8-1XduUw*04QfBTC zJ{B~-zN~Ng`AEM;Rb)Mm=iq!c((7=2S`6^hQU3jnCg3l(hj|X^dAU0<;PiK?0Paj1 zDu81i!}uqh&j$f=I5NZUIaiE9H z{IqP=_P_!*{CHSY(Bu@^P}j?&y7wpP2p#h&vVIp3Fu366Bw!GO3_ofV0dtIDFqSKT z04@~hIpzsnF9m@B&g4%8un%y|oPl{%h_hAKEBtt`WpYT$JGTeF-`P2(-HjV-YX5CK zGIZ_qpC61h`hD7X-eccEqZ4=A+XM4*I?&7d7}&ya5FqP@z#cEgF%t;$g-E{**C+e(5Hx36&@3>TLTS{> z(=f;!>}eLri6YGcj%hSR$pVgPg+bZ`#@0CRhx7(DNE-sbLdw6t)&TrDoR|H0SioJz zG}4DrhNOo;nla%pevk75k$w&cko`aar;GmU`L*D0w9J?zefrPfxKAyo$2Yh@qTibv z(0p4mcV=OMeN+E1S)ViHV}p%#IOg^4X9HXLa6E%wq$7dB3l=nuLk8o4p`KL)BpI0r zbS%fiJeLOaauBTNke{soKn2Lm7AjCq!FdzXbFiMgBsCEjG9#z>IIygiInuD9_l)w~ z;`dvsXl^y)Os`J;{rf**9P>iBRAbdg)>O@K;eEMCZraL5?jI*b$vRIMn+joG{~G9b z;CjXc{Fn&}{}3aSO~JK-J4IbRZ$nZzD>(oRe`t)xdIc8Jg{p$By$OA7&u3DSlnA5+R`)!r`mF2Ws zwGZs&ZC*6F^}tX^1;lBYp>)jU!o17?Jr@e=W#%d6$1&DH2lm>$ zWc9CL{OS@_K-MGuC9<9;8p`?+z)*jQTAIT+S+df>J})n-#20EIpsnu*~U0190rXf`=G)@Q>!gY=IPPmDx< z36%dF<7j2vg7kk+>~{Iy*e`LFGF%fNz4YuqJ-cu*X_(yEHjn*--#y}j0;Jk$N;}T$fhXLKjcR@KXvdxL@bQ8)!Nc|?XN(u?R$&z*M?}F~utDD({GdAtV`d7>yHa|-2j_|mZXrVhAUEwH zgbmTSlc6Ahyof*f!+mRLRuWeqP`-G&tohGA$_R8FX`sa8x(#7i$`MUhr=|t3{yj20YHH%9#h8+ z#`|zS9qEH`eLnKjH${-p6_S5F=B~~Ia^C>Og_oISknE-+KndzN5eQ@k7u->kzXvd6 z!Vx6&hHCQ$<~f5qR4)gtUd&RcR{DRjdM$^sj?VL8Ts|(ij|>@hXhYiCS zL4v6=G!Q)~Aeo*)14+a6D!=?w021`8pkuMv!SXjl&tFJ7UwrJk0oRj;p`aK} zcY-R+YxRrZi;89f!c_qdz)*h)1=zsY0p}A@q4KJLrKJTBP%xd2S7~B%mBiJGaw~WCp91k(WF$fYU zu;f>LlvETkP2E#O8B7X-g3AC33^32>A48L8USm*#0L~j?MrKq{pav}laS*OY6vHu* ze?8|3xHmF`A*~i;R`_{BTIn&Z^y?sv7!?%YI8;qLq`#Q(*r}!fQJnF@>Bq)$9cvH z3OAw_E)Dq0M|kK}+Rs?}Q?V8J`64Lj39Z5dTZthGT1g6ynME+KL;8Oa6a*qDIDv-@ zPB4~3u$6@ZJ;&=0QwCBHHbYYwG(!}?>HmQFOk5C%4E&Ix^MX%NKi7FKKWH^q?AE&( zxA>o`9UKZHxF_iW``-N#%Jtg&V^Hm3HROe-JABqQc`m)&wkTrx%lTTouKoiIzUf2-a4mYSO5b?NU<|!M1;~$1f&%^X zL7`9(AP0Z|E)EUQuUhs)hk21)TDK2-{uthsyE9PyWyQCB0cto1H6~D(TA({O#Qb?TJDJnX-V^)!T_@SP=e(y&Cq;1WX8rbik6)tYmd~FadG{jB$itY6fO)wY z(33}Qx?Tc1Dnd$LuJkK%mmq;zqribr&A;O-{$QnFLG$Y8JM?o zjt_4t=lAQz#ecLnmb0Q`?gq@uDHkX`iFV3>O96%)3gEJFEDeEq7U|XXYpsDF6MC+G zjn;Q}^En_uK8Xq}g9Wk=YG^=>O+{btbMJwkmkWRa$7EB6ER#$H z5UdE}%Q&A43*`6vsQ_+(A24LD-9Nf2yruC67<0*}K-*TJ&>2)vRhUUZ1E?W9v1A)q zvI7avqa}&R$OkPkpe1|Jk}$NS02Rwo4KbP-k^^G{&aXpyYh^t@I-DDYmgvxuU(k~7 zs6Z52vRJhQ6=o8^5*==hD{fU)+&V4BmVFGw)>1XL`a*2|OU672Htrq<*F+4KU<|Hw z1C3UW0Rdb-j;~{I^+)NbD>ra9K`n6ENreZ-miv{j6mf-bRf=xWB!Ac1(#1iaz7D}cA zlt``+OKKjLSc6)EL4Z6T$H#E~G}4d95}bGf_+?@V_Pq-724M;2i%_FWs39`s^qatf zzSvaWVOMm*rZet5G@(bgp(&kkg|2uKyW-*p&=vo<1Wm2;WoUAJu`7B&)8b_mz}?5T z*bUk)w*$LkAa=!DNzfG&u`6!;8@gf$cEzXI6+5AVzStF21JpMEB~ao2pOMP{3{c_! zpOH!lQ1QVnu|f;aAW+$eK;;esm0k!`R^yiVBT&gkpfZC16`mj)IT-h(1~8O;5ve@I zE%C-Jd2|8xFz^=a=`aK;4RK4FB2a033Sz<^x1=g=Nf0XRrvjCU%Xr%7gE38PzXPQ5 z0)uiPrb#o*62mJ1D)SJiyuc`bk5O+8>$%oymQ18UEH}sTznCQ#k^Z|`JVhehNfoKY z08;rCkxCa#lh3G7c~me86=tSFmM~bjlCchWU|m4qA?HC|;Uch(yu-pZSVbxhW|gZB zR<6-lsZfBt7{`t{Ulj}21T0*RSh(6_;hK*ECt=~5i-oH?D#(YU#=1PLTu-1B=dS{! z5(2ee&cJ5EVKdo^%_MCIG!u6OD%D|L-if^-4tqr+@Z)sApUZ(U&%GE7NM#8w@I(eX z)Mk>ON0G__Y$ndAz&mUv?NPzSsIdMi0+kck=leo?S8>2gZ10h@yYk_(ql#2|0#ez8 z{oMomy9=~m`H0%zGqAsd0GUaL@iv@4j{Us=fyy`7-u34JsEow+9*6y3PQ(6gRQvlt z(3m-c7Q7k;kZU7+A6^|G7eXj*JmLf`8sgUn49Q1BQqd3%8d49|Q{clB497A>RDi5P zFh3;7Lrdx*gN3Rk`C!Q!v}778&<_pqMgoR|y2G@KHt`(@j zkEl>CA{7H_tSg5W9Dr<@hS{U0QzcBVJDBf@n10tV-}Nx&yfNLKc+6iFsbph*n?Qhk z0>>_x@6Tq@Cm(JO=DYe}q5_rQQQ$(%_wP_4FUaRcv{)FR9oIeQkygU*a#3E8*SSZ2nh*Y{` z34VeKEky;NswMa?7~qagr3isaMQl2$2voXbS4_dKxQcc~zOOa_+oBJ)#VAB7-(gp* zf?d%AyW(B!imBKYmmpA?gFs~ncE!5b6&qt$T!ld8EOy1t3V-taFMDHKtcn&`Mk@a^ zK!yMRf22|ZRJ3SNIW*9OK&2l7mGO6DczUGr8TVv6B9(o(CAupBG-B{b#RIn_5VwTI zEqR4o;*48jK%kO~K&5IjU`HKp$#vY41E+XED%WlTnzhHE2|Moj2kD zH7Ms`mSkd~NRWpM1|kDL%#vv>AWNPh zQ0a(S@(!~k02LgjX31bMU}46>;#;@+`IG+}$En-8h4IG0y5QYiPOx}E6blzdHI(}; z{^k8)n?@X5C{;<`THvu{f4}=|*2YdF+8ll$wTgNYFd#Rdg$t>y z(XlM%$kgMK_xHC9*CwA~`@J(9vsrmW%AfOiV#Kz`Y~fN{^cjq@vyjgi$m2;kzV&hqxxCu;p@xFo^FQjQ#JZRh(cuAz^%6 zy!nglhWHWLx~}#z+)VY3y<_Mfwni^YH5`+BKfW3&KlErFa_>n5SM%jd=9FDMt#_3% zZhsXvk>@ttyRoh&hG6?!TUfXm>!H-kH`L}`zZdMC5tm|LjN2m(=skT+gVBX- z78_+;v+a(wVfOevvzQHRt7SK?wHKP?cJu0}-aQ3ZMd`FB+f++?Iwn*LS4jnHr~G@& zduV~Z_IysX{&k+8HZI`my4}4QocGkGe{&*u!J8`DZ|5c%2kJbvys$Et{Cw*+DgXl$)gW@wEMW@ZSLZ~M@6-e@vaXUE{5vNJ-$I*%)-S{j&f%! z>SCT7HB<5NMad3a%)!l5@1(vzQ5Umaz{T86aIu^UE;eew#oQHeu{^vDb+H6DUp-_m z_)r&1-uSDI@_lk>cHc0D)y(0PbbBFYD%S9%S#rvNmxDTo!mkvQL z!+lYexacP|$fdLmG|1Vt5abfvH)X{&fgqQky3-)%R{u(aT$+b581)Mca&{;LxjY?% zoP!&$o=bC&(;#QtkEB5^J%b=;XG4%nts%&{{t)ESp4T+U+4Y!3FCX>aV4ZV@#cQBg zxEHFb-1|{s!(?qWytwLvlsoEK#|raqv339O$*OXVKOwb$?mS8}^(qS&Pd%10pT9nS z>ny_^GCXPNbn>~>^VMmq$Qc=|Lw1vWeRe*L(tOA`)S}M_X?iE8KPwD-$Udu4yGLy3 zE2)THcyo;n=TJBA)x7j>yX>iPS( zQ}**A`RuY4IoV6sek;`i0lUxMUwr=H#upOa2gSnuP@h=+sh0CUhwo+K{-}KE?CN7( zXO1XjJCE+U@cpTOr3;2Z6IM;Q&%*r_XQacs%k9s8vyNT2Aj0r@&Ly^I+Kvr{TQ;$e zug^^PcDxi7H9MjUfAih%(zO;NM_xU5M(RINTfOg`r4rnLWn$f(7j7Ala!%5&S>Uq1 z@m)6I@U8Evb;y(2dEa%|I%*d?Y{H)7S2it>^4`l&cMLixZJDOgaXptyg&<&ea&mR2 zJ+Jnv*Fs6kJyG^W=cEf){eK+O>UW9UGsTmetn#EKq2=Co&NN6lm!f-x_dd@K+j=+c zx8DVYfXwfe?nJsLC0@;x8cqL@7X9)ssi*D96K|Jpl$h?x@|Z?rqE-wTS+jAIY*y4> z{x0$CI(CU$hX(Z{CbMu6)@HVCn`^yRj{liWJ+^*=q0Vk8*0IXNpN3CjPuAl5)W|%r zk)3*__Q1Ed$Fs98uc))J{BpK^tDvjXdM;%*BvApo`(Ko=I%xK@cs~>i_eGr}FMCl- zSh#Q_k6jW?4Pm{mQA4C_ho~Ve+()&S-H<>HVH*vnhOq8@!3o6@wzDq?I+#Q)k*?LD zmPi|ds3j8Ih_zX2+l^Wxy*ouMVUM{}OQe@x)DUTZ5j8~WX`+Tmv7yuusk1wH>c_^D z`#l->M#5{NBpxoN`XpuL(cqHc@{@e_F$9-n2f@X@gW!_<=(Q3g%}g}7B>f^9T+%Kr z4KB&+Dh;mP-wcoFLSl=(8%JY{J^2%jEf(%JF-c7zws_VNf=e0z!NoT6qQNEY*VEu) z^`B^Pv9J5n;9~tiKosM5f84IM7g@Y7N`m{Ns&eHa-(|R8>I1tcgXX(bcde4|C)lUY zXu3;qKNbI&?F9KQcY=IpS8t&CE}dIP^L_W`tu)`+yGbQ>U$cJ@25?&FgvxaX?8wpI4tgI9;y zX@8nx8<>#fpiSQ!a&n_<741A-U7g#=7vauA8@XG9YEF*2`{O(=?k@P0U0-hSW_R}B zNx$3nezAM1Fi$2udh5%vuZOXnRlmw`jYns$?K1!uaOaexTm;~PGt38UF%Gc>Tw3)` z+OQa~MF?UGgl}>qv>M&Y84z1+=tAAvU4mP)9DI`c+C( z@Y`A+`8RMY7lHd`fBeFs@po>qc$bv)QkY~S_PHRoz`$lR5nI4jRaRUeVv9$JE%Igq zwovhH!Fa$H{SaFqfac(?tmo1ggccarQXWE!T@cs2gaDfBj@aVSL%j7JAUNDMci=$9$*aL_yO!P9IYixI@HInyGz!vYI)=1w&tzj!8w)h)r4g09wW(qCx zf&cFD)gLz5ZtWoBO;RkqnTj9ziLG)PumzOoeCZlui~5KyZXvcvgYqxUF#@*GEd*?_ zD;2Os55yL=XVCIbZbkrT@dFRg0ow6u!Y}Mz!t*} z1Ga$qy{va4V2hJu0b9&^4$xvlAV7=FT>)CmZwSzW2?1#FfTwq1ZI{^qu*K`fh%IUWw&?$^?%ZD1Rb;$jN`f1wK1mxOwj{WbDxduup+#MU z7W_lR7COWhbM^qXXavC}&FTf%;z!*3gAiP78wf78g&welfvz`60T5iQA3}?62rb$n zw5Y`cwCD)2#k2FK0Jg})==I;^5@|c+J&RXONpSU4Rn8r;1*GE#_9|kF&T9Z$_(8r) zP!ArnsR%9N5Lz_ouH<`$l#c0mu)UJ*{0R0MVv8+^E#PvkYmyeRMQ6x&=>}qpp@=Pt z5L-M%Y!Utmu*GbwC(KVdy`7uead_ht3pY?Tl)pu2L2szy6D!Hb5L+BaY|#U;1>A8} za)xmaO0e8u24IWMSRt>B1#E#OnEM_c?_{`R>pANKC0NoUwD=jJ#cEQ5dF2cv0H0k-Ie*kVxnN12%|qe!>ErvN7LkZ8YE1`h(H(jqXFzDt zSQv4b^tzojeY-uF`hrcqGQo5<=pSjpSF`#jX+-$8R|)IG<-wJz|IwwxImu7l)8Vv#w7kw` z_>r(aw(^~G^YdTZzGC6htqW4E5%=!wTc71AW!xQ~vS#7usP6h0mlhpYuyAYFU3RHZ z;|S4@DO$EHGkdgj-+tb|M`na^;|q4rjBn;Icdb$9V9LdO_SBYY?u!!7vF)Cp3+R0~ zNBVnX#X+Apg&D8@xpL)p(-s!4wz@2(W=6CcKmWeeB>2YAa)x@``kFIZXa?xy4c5yE z|EX+F3e$1cH?eY2;)4?_92;}PI@sMau6fBzaJd#aMe9EM;;@5<*!rHEw|Nq1^>e_x zgN^?=Ey3knMeL4O<+^$O;DVQTb(7W0ygILK8Rq(M0vnb!qD|_s0@h`X+nxjD?|ypT zHRKguwBdgI`F4}r>mErJ3Z;9=@dj2i`$ej7jTsEal zK2%;7tp@Jy_RlBw=4@YaRvl6XArSG2PJZre~ljL;j$g-J_Y`ME>_u34rl1qIWk17oZ1lY>jgm zosE9+Q%?tvtJg9r8oH$L|i`WH()|(SAhX1F24G#lL!lUu2gzSv2OSZMUTJ z1rg7=pu#ApMXA;QzJG^>tE{d`jW@ej_a60}U9BmYG`r}5G}>l-(XFQIrOrDP{=CGD zZIb-m@C}k%_RNf<&WhJ20A?dxIR~ctE+afOmpMkO}ieI=D(b>PABeRul)JLj)&|{X~R^7{~F1CWvk~O zFYJ-@!3Wm6ojkxY{6F3mYMQN!ErimKdgT>nqa&#@rj zvXs`bZkFwz&+u}tN9tW#EHecR7>5RA903E~Fa6R04>W*51NatZ7A(<$1i7P>*^3h)iH?RuB|+BCImr5FXK8uf(S3?Vn#q3FFOTke04OW@ls6V zKuqJMn8q1O8V8U(b}-B1K+NN2n8#5<-bqra_pgVvwIyk*WG!5Y^;oL)f7pBTxEi;& zf4l)nB7}+tDrqj!fVwJEr4h~1s5wp2>`t1K2C)+n(x4=gqT1JP?i-D_Rzn%1?h_geS8_jR*Y?S9s(^=GYGW^IcR zt#H<=WtQBU55JY*Vmru8oQ|x-IednhIL%p!b3ZF_u45)nLQ9;LIqm!Y&mSb}>t6ebX`;VqzDw#FjO_pOTS9d#G<&?hR(SSA^x>_bm7Nv)tRsa_@JRdzsaz9#VN&?vd$hX9LsstEax$cV13iFWC1_3NR~DJRy3>lJu`mAS@A2+0)7`({O)1J z@3W6g;OAt9@9OG-mlMc;n#!y?^-1eCD}H-f@!P}#e)B$N{F<@iSAkM=htyw>h3m|s zSA2Mmh3m|sRxx$YkXMCl;);vC;vvmedof0xKOz zGt&V<39LqYAMSL6wQf@Og=$*LT9FA`Jc>`JaYjP*AH%BrsS8bl+3Ilb&&hnTRQ zbdhO*c?Qz}#eAj#EwW5l*Jc^OtXb8l^?(KIn>z`n1@l=JFsoiYq|#UxFsovb7O<*h zv0Y;Z(WP8w5c#rT{Yx1$hO}9G!zvc6N3CPVPz)=En5CjFQnS06F~m(gUs?xG63ifC7OSeGUSkE3ENgH0;|FtZ$Sih6#?Vt{3^9vgkuk(7j#X^T zg7tdlGA86$+j9rjIv%lQZqE;~)^QqZ9sfMc+@3RQL_ML7mNB>IUMyHon!{Sg8<@-Z zKmv0a53rUo?G6*?nK=G$%b3uL{15AxS>$R&OOLgVnN_gn!*5lwipSUg>x6WHl_+aj zK(F^7fZl<1LNaIJd=v}knWdpVY32VbPZB)1k(DS-e*=0BW}@t6CCV}u(BJ+KK(EdM z`XSZ{N%ubheFiI&GK*iy;BUpS%&nO?|6&4&2=b5`&qAV0EF>yqA<+&N5NtV1dX3+ErRpo&?O>Mhlggev)oL#iwbt1hvy zidoc(EQ!kkdS=lpYkdBH1<(_Zm_Xmp0{Y!7_kI*(-UC=#&b$ZU#&T~r%e`+|?q$|} zdPr?bW!?i=#&U1TZ|;4~ya#ZZ<=%eQJ%BI9|GEc2m1b6+dP}uuZ3Prq?k#4ymsz>$ zn-)98@sYK_^o1M$70>aMeUHdpln)E%RaoJx$O>O0R`^a7bDB8cH?Uw{goAm$SNtt{ z`6t+=P-|JyYs5O=zhy-)vzQbcu(6`|J1cr$Gxq^PuW_+Xxcl%>N{OF1) zCCBmK513?n;_nvl3;x7EZt?FGBFH(IWS9O+0FV6_q~8sMHpO1)3u4i*d;B$LjEkXonB`T;-94_{_KEBrYHWg1%FK;V)9-3rxYT{Iio9n-LCIsgP9#e zCvJ}i;$`{FkJ}c-;^m=^y)(iRv2byI-i@`97~^9aASmwvlSxnf-2#5WpZLcu{=GuP z@2 zJ`Dfc0h3Hm{6V8h7Vr!H#6NEF?-e4*IhZ7iqe{5MxeI0CiM(>nh}9l=x75ija6ux> z>f1ZA2o8gn>oK|E#1n9E(WP%^sHfpy^u(VXFv;}9A2ga|0l(l+{Mto-t3vSS=1h3U zChIxvUceToqI2wX3gl3B_F0S0`JdA~zn^^4QaBd#YI(n2^WY>A&4r$BX$-pQd05irU0#NRF87yOBT+~VIWL`=R*|4jh-54^*Ln(S;7 zpMU!+-fd1cwxHdaTQU#K2)5Vtr6_*1Rv->&moA$zN`t`z90z9{uY^5(TZ|Jc)4?x6 zJ+a^r4bOcYf77JghTS6>Z#p;AVUp>IzgxgB_!IxQ#lKgGn0%N1n}92#aqInd)1b9f z>cG$JYM2!|ZRw}F3&1xV|M2VSB3xV_(Mr41^j|P z@oN|TtqLiTbMU7GP^Jx9OU=oWd346)bPcXuP5(N7VH0Xnv<69l`oFZ9_=Da#eib?j zH}DWON7h!O00e5-4v%CVh8Blw*RS0xB!3fd>_Bl99Sv?CiE6`od{?}6Sn_P{f0v&4 zy9NA$Kk=6>_=SL9@FynUrT=Ax1UDFbnfm2sI-DYZHEJ!zJ#qd2|0S`19c=~MOLx3E z>uZPl!6I~j9lrW~LKUXmT>hc$;zgX=L%gTH$%o=u{_~EUy$X{|PyF2ie!-vk$1VQ7 zLd4{|^!EZt{y7V_Qi!#&=*)Ne&ANkmSRwX${lJn!Ty;)v17Vhy01vZzy*ctO!IrPz zQ|<^={-*;bnV$HAMw2Yy7yOBT+~VIWM38eZ85Vz0G41H#b%9MHCuUXd3q-ZZ4`sRG|2*f!JqiGi~d%H*8Yh( z`@E}V_D8PMu+?*=_}!lcu#e(8PVR;NXEhT?zM1MhjjI5gVDywZR|)$XA8+k5s|1fn zoDX}qmy_=j9i5$#kOw;Bd2MzE=W&wh$yeR{y#jv0pFF4i!xsNuA!71f`g;K+fA`35 zF{vUn=u5Td@9{;5y7Q*6a?Ldud!4tP&{8`OVq6!wm4i=V4J`1Ny?5h39WcrC#2+-8 zWC6e6PyFK+|6U<;at->sm5csXg$QyE{;Yu0`)>Csx&B1$(8XpUx5shyNh_{5 z565tDO&{aShf&;2i3rs?H;TFAdoPR$j^TesPyE>dlT1(iWefhALd36K^w$LZf7C5|^m5(f+&i z#NRF87yOC8Y{4%C{DMD;|C8;~|EfYkM^9|aB5ttLR1dkGdTz^3i~SJOFusVL7RDWe z8p7emVTX6QE^-zq37y?)i-B4Pk5_+k!6VAk z=Jz|fWAIK(f7e_WJl0!0E!@czXD7uUSWu*ko4R-X+_&BqPh6A^Ycn(Cav{R+0&mdq!{ea{6A#|`uObllP8=Jp5v z`(1GUf^EFpH=CmWwQ}*;L|v?ODY4wXz!tCm@Sd}v&IR3;2$#%|amVUCyOmb?y5RB9 ztm0{FOflf=ttq`>y7>4K#;;|!wZ&DVZE3U1TyS$mw?ibqJFeTFa9_&S1urPZ&X1BY z#j4hYg^T@kasSh+DuDwwsKTcsP;uG?6?gHg7jn2``=JF|`FbvBwc$`{uaGHng+%1} zyXm4N#a+lo%4~c1x>+vXx)}4qWa`d0Hu%1~Ja`F()Ne*`*SephORb17*P-4eA+G>?aB!}&1BXIWQt zQBLmYchO^3SWBTzbf?S^xUA@^4QGPCO7WEHV!6@b>LmxPP?qv(qPt;qs`yDGZ8#9I zy}9+AF6x+HRHa2*;d+YLVzOH%@g8Tkf;Pz1!Q+olg&>I?;V5uS;wl zw>IF>+tfuRx;Q6ZYFdAo6`rF+O?2Dc?Q*(t4Pfmx^J&K)>EcZOv_`?RRyay2BfE*^ zd1(>qn*pRU?2^Zw0VQ(=o&^?~;WxEUi#2y!;C8`TVLk6m&@O54f-lz!IP|JpdToy# ztXxqWL){K|sx@lnNS+z4uQ)a1p05QS$iJC$>XiwGDy8y26YtvuNOgS1o1bik2RY8YRx+``^O7^i(@IT{e&*1r(KIC( zH2-YWJz@*4_ulQTsRg_;?5+3Usu@~Xt!@d9v%q^?LOtRqOz`wJ7q9*;O3-v@=PcC+ zwov(+Lx6Kc1r<&nUA%XLF7hh|s}kSR*J5?YeH)|Is?e5eKSX#~gAbR7cSA{W6X9Xss`o0W zcfj)jRb3aYiiestiM3eMzNp`7nJQ!&XIxD)w}x?_`{pyhIsx4=fk>$7Qo-X>Vp4b% zbn)vaOpXj(iy6}uLKbkTLaByIU5TbO>^v78lHB72Iq!%R!wwas?YAjkEUk+&-wY+L zhpxrhtK*Dlw^u=UV$8iIBGw==D`a!sT_E9c+!O!`}Ih2DPAR;c}`t@9U&>9x8mYpGq8u8l0 zn&i8C)4z8o`JY#X#5c=3YO1%vt~|k3hK~dIZ{hHhNwtTmS@%zT8k4~CONSQwko2E> z0@P|!RUtk%v&5rd8v$nHS7JHr9bnAW5@!Y3Lu=f5&DA3k80GXlr6F+%p4weJtJFpn ztlEk)s*<+BlJ(m&H4Po$sa?r~G)sGkw@Jt!;E}|NnG3Tu+n1m>uW-(fX{yklB3hl^+fDWu7W0`h6{FnChh9>S^DbrQErVwAgQ?f_*eN<90hbKiIsyuEQ!_{#ik&|*+{!hgC0 zM1`=GvJ2S5j}*%xy@!%0uK00o{{nHeO;uUpLdMR?#aCB`ecuY(fRS|O73uoTPRmA! z8TRnLDLQmVk`&f*Jqy(Ilt4p;=>>ykE5WEoxnAVnR`?OGaq8ezdw5yoWc!-O9^}2F zlNqXP zBqY9j6)cA@C$1X!&(nu_I=4)TmW7t!Nw-NCDz<{*;4>eRpBY1>|Jb?mr*mxay1 z;pNbGP+PE*Qy+T97p)oPwuEEmN_wY`Tft~U>+ILJj6r5UyH9uyk1fJl9iD%aD2|Zpr>R32^~KMr@yj<@!ku& zl5C9Ooy48FvT@TeRXnH0rcx64Oikyb=O*YSd^U?lkDG&%D^0Vt#u9EC-}$yx!x(Nh z9%6e+5dzg+^{(zxD`ClJr2wUk8{ou)j+E=?_2HVr^kj(;O;EpJuuw#glcDGFO8c4YY z4MO}f__5+9D!T}Q-msY8Rg}_gkZSy2w?KDp+D3+~Y z7Tj{E*>vN8EPA}X)KzqO6ZYNkTR-cPIljLrVehLz8qnDh6%Z@{t=_5xzv$|jpyIkT z_X(FA)+$Z+ZXg8!&zg^i&Y9z?eHPtHmlgoMOTsHhbs@~k=i&KUJsphHUaOR~$YJj6 zd#ARvZ^AdepIJA4Q`hkH> z*!5&}muZKkuU!-!;;-i&MpZd)4JG`uZ69Dpy?F(;VZ72OE!nYuJH&UkmfgUpe8Fo59mV zy85WG2^)kVS`{mwzTBF-ULS947xa>FHAmB^S?=R?J5c-V{7q~xoRDE6dcCN(k7{%9 z0MF-ORb17_EjC?UAIA&J%zJFiaiEX0QupEx%&r{f3F~meL7Jw5f$DpzB|Y@5$1YVQ zB&y@{eMfg-bg;_hpEsQFo=ZFJoK!zmU1OQ3slO`LTAw(w zafv>bW(cf#O}>pG%BimAY}5|qh>yNoUEzcUqJekP=6s~OjV2k)Oi{)1I>D0-GWw{T z6(4nYwK*<3{75d|X9tSz+-H}V??hn6+ehyCZJ(%zw(qJcs8dBz$iJqvNgtQ|T)v1` z*BqOyqFyJu>_G0KI?r3joUltVY<|m~0cxd1#4q2UDec^9j%L%j4espO zfw3+;!&2mc%d0nDx;^JJb%}4^bmdv?D_fnK5nlm?^dbZ)4 zES}biPRbRWXm*%rUVYeEsp6F^@~LQV$m29d8%o+lv)7x=S1Q|N5%!j!vj1R&iz&2; zX0;z)-E-)&Xk$6$q2CK5l%sr_Xcj3`DjTbo#mo?$X;YpXp(jO5mTcxL&k(tEO%~k_ zK5UnHX@s{ZrW4H;p;fu`4p|%%p$F-*86y`ZYNEOBS-q#Hh8${04u0PmHql%rYa=Dt z9JNQ~RlFt2O;=m{V39NSepxW{qL%xFXj$@*x!%iv#B{$U$`5aAUM%2@CX#YrK5MvR zGDS>|>^|0U_JiFXOO$-)mU)2J8P#TY-|H`0gCC%U<2Ri2hmBAL&P-Rjovgt4<|0iT|@;av)!JlQ=|ruliVz|{-0@s;*9{+1{3f~&^1akK zA7Y8`M#1FjXD9qz<)X#^(+yiFx$-uPIG*!TYUOQK5i^` zTJ#e${E*sgVpOQ^56rOP z%0377OHpv%eRijxNgA+Agc#j(ipGQsCKcP}?8Db1Lc{4-gE8)tUZq8g8E#ZBlJ_l) zB4BON3^TrsY4DqS4TrS4M+lAFlD5UTKsUj05gbGe1?EVTf2{NXgMCu)DIB z60ep927#Al3$&u~$rHnM$38}(o^9%(Yl*>_tZVN0=!_Xw?H=;n9Uld}GyPR|N~S@! z&qE0->1d4XRlj=rNfb&6ioNOZCW5hLHt#y)STk%d36Eso9R)*bx!!4W(tscJ?VE0%s_l^KqU+*1xSjRKXdBZ5&6Q(=7B?jH`s_k9?$LNB5DTojt7 zaUIw#9gJN|7QeqeV2Vw7;u}-fM?vzFVTEdHDl9hNrns3s8fVRrI?PMbZ&F$5yHhq8 zhb>d)bwrwCJmbrGd;KV|oZ0cds5})ax*3I!4oBgQ-~dV9_HgXh6_~2Z?uT2iJ!hZx zzyp?*P~wTh%MxJ*hwb_yMk<6g?TJgzh{6x53sjO?!f}L9yURD~ivrvCQu^+AK*?NA z&g>!Ywo zp3Z`(~Xv4}V9G|x+&Rs4X zOZRuKWYhG;rjlHpTmuid#QtNTG${dkr-i+g7&{0)DtsE9GEq4D^4BSjg5h}Btg2d6 z-WS<+TGrWFctE<+kY>h{fCSK!nl^B&`ylxBZz*t9h{DX9x{{sz;pq0Z_x9*UUz|@> zyk1i30pBRclx?Jls52 zu{nbyTj@pw%$Ce}hH6Rh;C8IiOoKRBE3&Ue1(zC0@@bC)rmy0o#b zsw4uIEg0>$7fXWfzNLpXRO4W9`KlB3ANHU@mEqlUjlRe=lCw~8Gl@j@=REhNM1W~z zp0)#L61-ZO{60rG4z7GUlCIRT2L;z?Z|chT#n`(AFO8*)(Jnvewy$>ttm%8pz4R54 z2$zI*m(Jph1I|oY$z8Yyd(Y^w56Amr-bkDpOgF}iO}CA*4I`kW_Zes0^+dqEA5R?b zjfF0Cr(L7x_TYji)Y>0jzPL>DLG5x9da90dJ}pp;fD5kn8{G>NVQKd5X;SxMp}h6# zu+_;uNT;1%Z))v}DR*Puz99;XP+Xj!?}cy#cvw0-aZ4o^QNWZLDh;tzQq7VP?b zTY-E@!M=?;RWIG;m%>g#Mq#lV|=Z3*8ym8CUn4{vm$g1Vf+~KODL9R1K zgS(OtQ4rD)1Iou@m;0P|fUa9sRt@rQxHaZM;^#DTjLg$eK1p*2v6jy03+JLiux6$t z$Hf>>v^guWIM)Hb^D1urDCmZ>LNX);_M4;52R^~Mr`%zC-V1xnOp^X7KHG6L2F{D- zznD*PfEo9R7{?iIn5ll~{k>3gTyTxhn6@|C9Ujlha$UJM8m`aJn*S_11`-nKx2GI& z0NzasF~MW5WMOA@b7^mLjDIsCa>&aatgXvhojsx<*!_yx8lM<=;od7eBgX-f&C>6E z`{9b2^t|B`2XidkzR(-p+#z&)o{^?aG>D8Sne*7hKzbg3`}|4=xT7wUQ5?eKhHbHB zdbOtJ==S50@ct-wNT4ne?zV`AM4gBG;tgX!!97*$1&1S8Xs>FS_rVMkDS@kOq_|b) zrL`S?XNH56GZUAyBtO}AJ~zX7O8dm+;#1tO{MyYhi^8c&UVajrKhS%FyoE|pnYg5a zS7Im443|@!CobLJ2R->zXNEs0hbAs#LT<5ZlD#V_jET!(ImI3gx*53@8l1R%A-Oa4 zGO0!YMO=+cBI0Mu6XW=$VG7rV=ZoncFi_rjvG!UBL|%%ZQ}6qOX8MS+&H^{oU2`d= z=us$&w(Keyn8LNQdT zYn#f*axiu(+wq*t?#HL?UmRBu0>$6w-XCl5CxDM?y^eE=8#d!pGr#Mh*nT31*Yoyr z$aPk^M?LHT4WjXBBIzNZ9de>bzsetGTT7&s_PgTlZ_aFS=R?sbwb%N5&~orD$_v&c z(|d)R>-`hEL*Rg{`O$BA{xI75aM1F$D@sLdx#Dp!6oV>9M)SI);M4YP?`LlzJYb}j z$A0g&5ZJNve9g~Pe_#u%?itT>#qjgiofe*3=*fW%=!Gm9iDfZ-e=UcbLs|6$Yd|P6)b^`

syPz1PhtH(4II9i?;G8eBoaS%^Bmd`=tcC~c!F}PdcfE}Zu-#l+voE;_-_Tb(X_pG`M*F$BFVAyIQ7x1+2{&MP^8w`FU>`wCP zVNKi4g4ydxj}9*kn^Um|gWSi;{W#ZyLA23Ar4eUv$elqO&vb)NM6a-A`S(po-f}8? zCKrHrFQz25=k7tzuJbP!#;k)^;sVw@H=JST%|m0`2~wf3Z4Y?MH(`?KcyHFy0OWdM zygx8;4>~6MuRp0OfyUQNI<^$4KoL)vyZ37Y+&Fug`7>>ATxFedGp!;J+uo{ORvZq* zD{{w%VqWZRS%I@E0Q`#*Ry;RA5&z9&>(+SV78|v?vljzVTX5;BRi6WK{lTc< zK6wdrc3N=l{t*>89;5mF$Q=U=@3^^Pnk-3wu6*j+{6L(#Q$94MClDVu5$W!$Brv*0 zZTw)U3XCnC+nrTofM)~-EGy=Cqv+>*uRSsY32YU4-7a}A5J!GKUG~ID0u{;`L)$i} zz}=Wd$MLKIdfBmaL=JeN=Zf+C!N@>-9WZsYxh4?5Mptg&Zj(UGoDK8N4y*v(V=B(N z83tI~J7?q9W-lDi8{gLK5{MOt998$83B|3E*z{G)FyF0aZqV!%KuM>@eD^cJWF6(cuLrzv zP${|CZbcyK*xS8N-4lr6j_vPHWJuv-^UG~V!j$2mcIC52j-&yq*AJyccwrwVFa9JF zh~J9%R-AMS#C!Ei&3>#ck-q%5gQF2Fa%E(&sM7(atH1DzHPUvls54U99F^D3oX(Kw!PZA9kCTAhqbZs67tGAJz7T8}iAW1HgjD{AB)Chr#J53V93Oz{T!f{BAJ_@390Hq*ifJYV>%6%P$UZ(Hk9 z&g@`pMCS}c?O<$u9x`p6Q6TyWFO72zbis4|Hv>Ma`9N@5jNSpKAY31GZPyq<9!6as zYVS{!2*!i2j^C133B(PNb4H(=xuDQyrLNP0K5&pwR5mmY!aFwQ`)9k87B7CTxO_%1 z-m^&#pGgKxRM3iD%`z^Sd3aEr^3xl7bgP1=X$GOVQ+~eR)?i!}0b{;{drj$Cw_c^0Jq=5BjkEw~08~Akn=EPW1fb7(1t1TLx(LpHe zHeJ&P!YzI#7(f&r3BBl6eSyHPtzADx{K9bN*>%&2^Z*|e+rHyoLAE1Ip2c5zaC_!WwkWLJ^2Aqt6@f3t7P5;@55rCu zmo+_KywR~-_4^ciN7!UXd_2ll5C=*-nr~Kh5*YibB;e6p0^5%Bm>Ttl;+^_*uIiiK z7$#=jT5RD6gN6^-t|!OA4lnU9u`~jI&d370FMBbHdA1_n8@H~u{4wa^2;9BG zb9-Fk$Oo?5s_W|r++{90-|ppJJY;>k>vLHsE?f1ysQDtH>f$C%X z<*XQx&bhm-&6NQn>swAg-N%6J+Ccxl1_m@4Zgj2ku7{XgCuYwUxejCg&vu$`V}Pt% z^V4A`1`OvrM2tl;;F|r>qHzWT?mwHUQS4q1T`~%9It8yo!Rq-U%k&x0Bia{>mJArW z-dI){z<|T2Z!H00t`@N=icSFodIcuZtIVYGoXH} zcJ{lHdXN;Z4|aY;hs+BeA64GcVbM9O=+_(!urog4kiLllJ~zj@72l8w2YOtHA~Ng2 zrtFzi<4ro)zk68Q{*Vre(>Hen4$|RE!|QaKIs+sEG$o=t7$BW(wslr?JqY*tE|^zO zhn$)r{(*KnygpT-+ul!ym6sP(v#By*6sEpwy32qt35`r@Xg%nD@i_N!5d)GV1JtkB zGeFRO{<}A23`htoiX&Q9)Wh~^-Ir5t)We63TlZE9U;qY`s+#({d+ zyUlMZEv_D3r)xd8Y@x$qwvt`_vl(z%Nl5rMX?b@0$*-@w7@*p&-CFKn4-y%~qZixB z=-y!)@uL1R9Smj+RXrc3ldC|v$&w7d$a&|U_grVd71{U?Yc1=+cJG>oyJq!}oxkhg zjFV(Jw7_*csn6-~Zo0(j*JSX`*KPSZR>}bNOAAc=pdQXEH@Hn-O~ypNK!8~)9b7cB zxw_j(dgW^;(nT3SC06&$RXRz|?E!OzEAsV_{Osy{ujTb{y62UbVImzmZjT)ZzD0+F zUY8H96J)?nO&|N`#~BcpTq;|V!AsZU4^8Mm$R9eoUFQlNL#k~ zU?XYCM}@O7ugGs;aO{?3zzw^{2J1G^piN-#2Y~x3`u7Mr$ae zKOZt6#YV(_hY$m{96e2`T1f}9m8(0Wvgwd`V?c{-F#}db)(d17GoU7(t8e56x&H5Z z1x8NiW9AXG)rDEHbail&oxxBnbu|Jam?g@^uE6J!Ge0A%s2)R0l zMF%>3r_;eoYvx^PA37Yh+`BKBTm>S!VVw8E8BmbTyX)f-(qq?#;Mrq3L`!}MnKe*H z{%j)2LBpI5ntai%-|i7~c*OsDyq(NsSBK{8LNb#TN!z@5MW>Um+2-fn@U#x3I)lvL z0v#-UwX#)_4g;EA`_c``jH%=WmFEkjY?3ieFKF$76>kbXJ?^)&%(n*6j%E5_d zs`|%XgBlu8)qW1L-=cvpg-SNFw^BOwZ4~ERJ-qzLqvJHVM=z&+tDwOYiiiQZeRMkR z*LW_Q2KrOzuh-6`foO@?mPBJ3xaV)K*q%UxGXp~cfo?Q-M7e+b#&Lv?HX}>=Hc%l# zr>B9Jn+BW1Gu9eyra{Y~ruu?d8klE%ktH5uYNN7R12Ji*~u#o6A0;hc4qLAmQPcQ;S<_GgL%Usm?A-g zBi_$umMubfwyn+W(al;o)x^I$#T&uAF~NPwM}(w7mB)HYG}!;p>}b_&8aOz=5$GMH z!ffX$&E02e;j)GL-8wx4{ka+5{I?L+X}Z#{2-86GLwd`q9|#9u3EdSZ?oeUg?9sP_ z(X}8GF|MJdNNTt>WTxCDgyIGD$EWhq;J~G`f|oiGOa&_pqpGNIykL>R46j-cF6MeU zR|28DFHc>*1R-2JV`e%zB`t=tggow$ziTcLv*awLf{8~8=c4Vkpq#|f6!DA-~y z{!}&8)4B(^)u|vgx<+B<4k|oTG3&dcK?Ql;R~7lbwGa}r`Q7{tHL$m-c#%kWHQWu0 zT$??g3I?A(l?tt)g6reU(=RNbg6fNt16fA3aC_#Vp$Lf@(A3IE&$FtAuj&fnJI8Cm z`V!^t(xp^rCH$;b<&M=t?74lf>{r!-hn5rLJ#P)Xb=x^~1*)Ns{NrHY&sqqhcPcqA zqQd55#L-F;p^1xau1mLzn!(=MNr_Vfj z_h~IuN+(rQ>TBSTbIpVIud2b_-~YNEwHk<4vy2{#N-9jrJQ<-ux~S%s)YyxTT98mk zFz%$*Kz>J^gMC*u@O5XD(5tE;{aUD2>RBqBZQ}Mx38cbF+hvxN>$NcQSwu><2QlU%C;atBZ6%^gi5AHu#3$F$o1GqD52;gm&VpLRA zL$+ej8MV}E5Kom9DsiPkk<-#^!z-y!x}dCiNkT2W(A{#sKA;9JM3<*6I$RC1_u{wu z?W%@hy0pb%GQs!=xU>!NQeljt_w9^hEo{6cxpBWy4bbnF3~BgPL&eIw@sISXLH~zo z>>dFs2zDO%QP)G%LY}q1?yJ?c;I#B0|BzG-TqeFQT zIuvXiWz<6C^>)K3fm$$_dhGN5X*EE}DQgdxt_J18X?G0Rsv-GJr>ozyT8J**xT}@4 zG|JM&!r*5OeBvPzd6t%ZfBjGa^GZjku$?=r z>AVXSQu|e+xC3h8hyO*|tE3uu-bLS0xUmMBN{RVz_gB=wy26}eO=PAEFbGz+(jhZe zZC&62(i7LLu7Bn7r~!@H?9rT(HPBe3HRH~q8i){D-{?dxGg^s>i?I|HWVX}ur>WLL z(vdAPWhSJhTR>=trv}z2cf9%@Rs-Gk-XlY0RA_eISRo-q1icKRs|_KjqYsFA2w)9)Fo!A62oNBnfD90a=yajTS{ zAaphAd(5$(qLth$%1xb*RdM>%|1qs8+$=~NczlX5o{_7R3 zTWN5A?sd83RvHjCZ{Jnyv8TaG?)I3YZxB*H&vnfuVfc=EvurdS;mVzJR;~s#04w28 zeHsn)BF*9so08iHhlmx~O$etKG>7rJBWx~GJamT)$`6!>i)+`=;Q8t9f{!IM5X(6_ zy;PG1!5mqZ)ntE^QpC2KBn*G)680tbApu-6>u2gqla{_rPPU`az|&e?I(<0}v~16b z?vEe`L_~LQB4OCWde!4u)(CI=(#wbWY0#h5($~0;25HsmvwY{#Ailk&NMb9($+}LH z+|N`nUtl?dri3s*&R^#Z>AB@g9=^J;iv}IiV*OO|I>f4{-cfE zRD)bjPold<2c9A9R}QGicOW+sMN(%4dJwKR?~gw=58+{w{`s16DunZn>RliOeOq%T zX^iw#-08)5rGSv^Jp6)bOQzD?{V!L9xnf6Wzyodwfv z%7}M$V0-z~kSI|HvWeb1YqZGCU*^}R-=Pi&Ss%Bd{dM5=bPlm_867;2v%OjJrVf^F z-#?fiUI!mr8y1+Zse`?-KNhB}tplaSo1TV6)q!ZRc2vYtI!K5G{AeO~&W`7feLLu1 z2j2^(U9nTHgS6{u+LRS_pg2vgny}qd2W4>~U&j{GA;L!|KCihBu4jL^((G6Vo;NQQ zw=b!K66H;FF7Y}ro3D8_(X|e)RKB1b;iAJwgBQ=0tLk7!X14lMBXV4ymW1Lga?h($ z{lI#59n6ySZBN`%2ktIBIWIrgf!nUfraMp7LD1@NRt4l%S;+e&F~02^4Z;R)9Eto+ zgR7TbN_vo%Y)QF0EI?{5+q1DPKB*4Gty?6#NkujYa##euBxB_hy|1#H20yM>D4btb z2lCNXJa4bpLEM&A$5cpz2GuO?7x31>(KN4~!P_*LSzjgV*+%lODY`K}uMXB}2zhHkpIHjvO~#?c*8pj9~NGd(Ry@jhOmk_Xm2xo3~aQ__VW<$^~J(Baf^+mX>Q zI<)v+Hwnlhk5@t+%S3p{+c?Ijr7m_eAo|lW=k|+azsKpX^l{bP?_P8W7`pE~ON|aqy`fdNCFyWirEmx8(jm##>Hll*%j0U?zW<#{ zb7@XUgNRUxq*OaXGA2@vj^>e~;V4t5A}OJw9yF6eY1U)jO%y3b(X0qJg^J4Tx6gAr zozvichTpxP-}m!-eeWOFbJkva?X}lhdwM@>KNW1xEkbWx?d&Upivjl!hqSECMc_O> z-Et<1^5l(%T38apoT!m&t z5s-SeLv9+vnbMFf=fP=qhkN&BwLZnbi++C_Ub`t)1Wxla5akJKuvd#U?H0?mggyHjky%w@*M__1r??d;MT@ z>$6R7rTT%A;ywO3N?l;PIN#gGqW3_2{)_`8qaLt9BROG~MV}a0{<9Sv$m|2;mC`~udFG>_Pt<=*ft4K^Iq_dYiVlZvR)AP#Z|jkvj^N=omiZR_W<4V7G3wY_X4k= ztJ4nie*|^5#p}EsKZ3XBxd{(EK7s>SXb`gJ4(u}eM`5A#TH}7K7sygEsCvYSB zOXw%%K46B=iS6X?1FqQHAY{tJn95=D2|n$z+<^Plyt2c*W#oO5edFF3xpnX5dj7kJxF zo?RyT5!8^52^30w1gd!sO;pQnASPyX%wDwz+&nHa&2vdF0)K2nP}KNgks;(vQYN-E z6h%#H#<6y#yYB#3-TE)kpa(3>nct_hvlob9fvA|Ij9m^TJMmHH>KA=n*b8E?Nk_?6 zqJ=Vn{ia%<0O$R!mqe33g1uPiQPhvUSi?~=hC1mPMmCZ$>wEjesi6l{H0kHgsOSMP z^X@)8R@?&)VUNR*^)p zhU4TD+C2w@H_9Q$qy@Jr$3O1{uGNn}t+4K*CkK>>tat~E3WZHN6uJS2<(EC=hdsbz zGt4`6s|W1I&YeKe?0E3uq# zaxjS#(}_SfdXBw~Ag>o~TG3a>{i6q*zBLdL5!?r^W2+*`CuBa#xoH)}z5_o7{AaiJ zya#J{D07Q&_kaahZzMTY#!tnu+a&)j__3kFo%FpE`0mSJ^rPql*oNnvB=01VGBCv` zve7JMt?zgE*Yty@d79cc$bBH~*rXrtsyc!GY~RD%UUY%@UlvO)uIUEvu*fK+AU9SN zMczoC5Tv8g#P5*sT}yUR9e8~xMDHo;Lf!*_e`ALP= zfmNYDbAtI>K;y-$U1x&d0CVxVVjLa=P@JpKRKKhnh{{@7G%kvPZ!h5Ew^{~+OONer z%|mD{z_?Vr%lnUdVD;jgiOsDx05uobEkh8wp77eK-*s9c)bNQdO?wC5%$i}B9s3jL z<^3@sJ?u05l=&dOd1@<=N!E3Iq@NCtZOOm$l`9dvJF!5PPc#$0d)a-mPdysAX0hUwQ+CgaaHpe*jX&t)Hd;WC}rQv)(- z&|M?~=2f@DsPqTTcS0KA#Hth?zd0|VG`{E60oNL6WpUH5YkfZm=dq?#?fnERwvPLL zcjE(4WLb4&TzM|outu0x9{!>Qnz?<{_{fSCd~O zII(HP_G6P0LD`v?33Ywn;pruj{+t^*41R2$)|pb-134)hO%@lI12Z$}$CL?9FwnB1 zScM}2>{^+bpTGJ94Ad2WDXn4*DBc;7&*tw0E5ekd?r_}zwR>pOg;hJ=!h)r`yW{ej zp~(1&MK%ME0k`=~W4Haa!2IDRj_)e>!EQLd$IP%3-WB{|e11VYcpsA>lvDp0+Fown z^r)~5EVPR^)%<)P3ZBmTe!vokOWYJ?fkQQ%s~$R;nvebl1)t*ko>jH*p{M2X#Jz6- zza{6ZO*iktdfJ>;t7_R4=&fJlf8ON=oVo4Z?wgs}V6K;oz>3?cU{YFokbqt)v{pIu z*rdZBBvfSee=<#kZJaYoUYe$Y>dTSVF-veL9=}jou>eID`v{-`*pV|^my6AQxMjk9rz{seRxSYP zTf(b4zu$qIo(HTaErB3a=!JXdq4RKx6cmUyu!oB^EaL{=tpoFZ2u>@#6A0Sh$@s64 zHm8BF$`h`i=x~FaDNp|h`+=@Be>Dnj(rkqmwe21EW;_R-!E0B$$kxI0H$!@VEPM)% zeVHz0aj6R0-O>uWgANSuOs53z)X#>Zx{rRSD#XF~kEYZucQ1k)UG`?@R@{VMJ0;e? z=DP^&HyeMwwz3#rv&eiY^C<<;>^MRy)o7?^kxD8n&cuN)1J887q+%fB*UO&*P0`S* z)>u7Ypw$nW->$qYHSqFJoUk3<2M^oyjU78^N$7&2gL7d4dgbg(ueh zsDcv{KjHW8wSpVVWRGg~)IqZ#*%l|I=RlCm!=?73243Rq@EhHNHbqV@{N zd`0=+b_KDTB3B_fCfz4Os|tvyjvF_%_9V=kdc0A|JOap0b)!}CZ@mT~pKRo+=v!d^ zMvfiFYf@mg+v&EAM#-SpKyUmM-|cWp-<7Bn0^z{TG+t%Z1XuX9XnaJc{0gu>BUj9P zoiB_|YyR40qy{-&EiTd)x&RDz>cm|NZw2eTWObfxZH1>#G#7^WJOi2YwkO^6tAdo6 z$*o7x8*o2B^C@=9y95Jgc5xgp*am{qUx>bYe-RY?5wW&nM>Nc9U+O#F5C?`5jmfjF zILp`OQ1fcVJ`d+0xQBMJuV~=SR+xfE1;0yQ0mu12%(xd* z25U7>Bsi|9gR8_2Pl`->4csUh4c~Vb!?Yinhj_)SVZn9PqSFJ7Am>P&gQIFayj{F3 zVK%KCxJRjP$@ql<%j?V3bzWZtX%7x9NDSNsx~y9UxLUkmz~uU5w{6cQaJnei~vi`PNhH5y#i*D-A@%MmwM*|)U z=^X))tF-Jl)u9_Z1Mj}@-&_sNXgfaV8Tmr=L5X~aPxYXW6l>rqTm$PjYz>VA_hIZQ zgQHRMccF<$z#47q7vS}`#eaN%TuXy-CfhIFommPh4)1?!*jx&6k&{id$SPoImvo>E zAk-`U(1C>k55a_8Kaa09ss`IWE#uo6T?59EPwBcH=!Ct84lepeZ2`+ZIL3Yrd<<8t z*eUpCay5{KFY*Iru0ad0>e$y-sn9|9Ny(OiL=dLxb1~$uC9o=W?~FPvxEW~PGyQf& z(;LM1Vg?E1sWJ=0w)c(?8-QFVRtXmN_JDOB?W&s(ix;7Q6OGS@#waN%fO;J zA{x83oPbqvtf%Dh6!0NqsRWQIhXFCC)u=De!sp)Oe3e=bf&KXrVw<-<0H3=IEgH*8 zVcbm01^%oHpxgL;K}`B35R;|*vgF(;8vM9)&49K>Hu$v2L2zCBMbMpFzdm7c7SKAhRF^G$?iQKjshh4lJ>Z0hIG2<*I_|3v; z@UASk-iMWk!NU3(Uwm@xpcH@Af{kCz;f+rZ`eVeGLVh`4{RmzM_-%D?!=7cdvv9U_ z+)P2PTJ$BCRCh_Y26$g!&FOFBUci6@!MhEc8{zGjHqCnd{a^!Lz5zb^2v>-81pItj z0&MRlKRq6u38G7`^mhMjfg+RC#qK?Oh*DpJj@zGXgpr5Icbs&KAyl(IE9cS#WOwDK ztSGMrPYhG@ZQuQXi!=*(?Xbm3WC7YL>zAL??eBs8>N}2{xfBSyBVJs4_V_&TrhL@N zeh`7yf4xujl?%Z)?pK?Zm^p*#V!r%GbyC3x^?@r69W78A+w-lZs0rmA&s}=t^b>G@ zlg``Dd5^*QQ=2Zx{wxDeI-8TG9QXr1%G$g9txOxZ znUM4l?or&7*1q}%%-=ZwcG21@IO#>MS+jaI%>U`DG-FQnyE? z6cgc%c@OfyVLScT&L{3dPHkNsjw^ZK>rSO{XY@xp&d?s2FGK?<|Nr_K5Us)t$YOehI;&|yqC~)^>?KDXx7(T%`MAY9C25sgXf8y$4u!lb*MlfP7)Ldxh z-1c%Fw54otyYCVPv#Y{Vt+Gl@D_=Ui#!jMV^(^7w?(pbB_f zMM!WydJ&qj(L9kh^=B$vcT{0O!R{Q);U!a2i<5z9N8fw?sW+hclQpN09LxX|rJ{82 z+`BMm+vlr`A7+3}_D=%^CME+Oj`?+p13};jHK{v->jJpvJN5DFMdx5)-6^r8dlx`r zyTd8h+Bmp-lXPQxO9k&l%M)CMCmX zUSTVEV(-q zCQ@ltamQdl1gGI9zO%q}$BO52Y3pFL+RBf&H|N5@~jYZ-sZ@@K?6jfbIi0AEVBI`l?#&_+%2N(lc85i;}Z#OKhPIFhw7u40awkb`lB`f zq7D$z%ZhK>cL{!_{0xgNi3hyzj&oF)*g)5lnp$oj55wmcI%amHRj}PlHKvl!8t&gm zU8~uC4;o+-Qpn1)R(G$X2%dNbl?0!K37;Vky7*KoxPrcwXgFo9}M%d0h0M=nXsR$4=&)NDbdsiDU7s}ua zwHbhwl{=1}?(YR>XX$h%&A$VMF&wecTBdsy&rmD|y$`t@_8o7AhciLu4 zx}-+u;}sLZ=hkDw2X+}i?yfcJA0N5{!-T6ZcB9vZ*LSY=A1}dYu=uT$W9O6$lc8|= z`Wch@LZF8A9eG86Q=tFZ8Z@Bm0k_v-yDpNYNa9%VMKa31H8WdpI`=j7#U5QGOCt>J z%|)cWWuigOiH|iPHQH~h`Q1|B6`u0_n(IwC?s!svQ+5!nw~si%Q!foNM2^#J^A3RI zv%9>08pnX^6CND@HGo62h#y*)_Fe}QjX&Hv)^iu;dg*UZIq(w1dHnof`1&K%bklw> zaN{L-I`gG@&!q;iq-E;C_*v}$9t`hzZd5V}PTu|1DJSbG%yHUYX|lH(3gj*~3`c)j z@iUfi2?fWT=Gw=RzWs3A<#+w3R~Lgh<4@Z9#Xkc^r90erN-&>9(IMkKTj&^CSW^*VaPUv@bQS10IkT zyR?FHwF^94%=vjsgFSFtT=?Tui$4@Nbf|w;+i{>_eeK-)M0vO;?sR+VBMeZkJT3Q7 zy#-&kuf4cuR|&ZIz}d{EsR~Yi_Vz&bvu40M)hXzTbql(}^Dvjh*FpovzK4UAuN6T` zNZFmJ9oM0$nsS-gPfPH;Z(y&{hI8;!gim^7AOsI4k;wzal~9SJK2x-^8c4iRQxm}J zq1m;vz1uw6z}y5ySrz?Scy+-c@~M&>;1y=@}isRSzt{iFBnf zp*S}%^ZN>pljgJ|(D2~YyC?Q|gIXnZ{cTDyU|_#hn8%L`&}dJ8ZKFs6P`h`3#+k4z z=;OiH^sy)xoGn@XPN3lttW6K2NVPu!(F5~Bmi(-O9VwTdwaXR(!Fj7{C%wvt;!4Xc zWR~rR7FWGi`+dFyt8aAJi#*8%3ZauIfohN8E!8~H^%!S5S)A6P6KZ_?Q4-@!h(aU14A# zqyDynNi^Jj>&|}V8gCGJ2-B|@&xexYl{chf?*Rv!m}$oM%b~YY@RVYUDxk5EbDsij zM;Y{gdM0Sv`n%x6cfLv6O`@QM%hQI7)J(W5KFjxqXcp|<^vpw8p8|uen+^G7{6T(- z?8gN;4uCSp=8#LU2ROOOsovh>2z+uVX_^3eC-8}qa`AFM16J**)&9|$1f|8XHdEP? z;7#kN#I>oHV7GGgh50d=ps(($t(o_XN~pl^JMcF8F;H7lUu~OQ3H2H_NGja91N>62 z?S`MP!X@#E*Un5|496$mK%Y*@fz}o^G0kEqF>1#I|JQ0Y@cEJN5vPw<0X0fU%bf+q zFa~qVAj`QNP6_TFC*=z_CC|F;xyBpro*K0+t=0v^Q$tEq_wI+gC$tpmW`}~r`(<8X za$CU-;Z&DXZ{oqF%W2DOwnM1>E=xYz>@JWrh-`eJQw^Ul-uSw&pgL44D#A3nTMPT zfG5wZg{)CO4+1m3CHbW#!8JGVldJS{;HQ?ZTQPHUX~0DL+`0=(@1bu6?F;Q|sDT=W z8z=sF*apFYipeanw}Sqzv|6{cG&tX zva}86x3KQ@rhaI-(4y(_@i$;^@XaiKi#qVG^5pu}liNY5ZTGj=@g*>7^NEH+p{LNN zpwQ@NaTUyzzPngwei59skV`~U1P98y9!d-(zJ}r}W~5&&><5-vMbjN#_Q7vhX%<;- z^86Up2(|s$@YcS3v*NjtFmsx0SIGHoAnw-*jFe5*s5o=IxPdqZve8yT_t#R5 zIqLne_Sp$u^F^&Nqtoy9zJb?JFz>Xe-o$zkiP5srVy7S^xkcT%8n}F=9G0wW2h;B~ z6gBgn1rPit*Lp05fQNgxN_|BNY>E#kRr=rs+q|aU%^69)_95XW*yrN0xa;a` zsApl5V^HXP-ta6cxvBcl_vNWl$Rp<+8@}(17nZkkhXycb)il2tU{L?k0q@= zAAKVPS{B&NP_N4Z^LHe4ZqI*$Fq45Cv{GLxF)KP>{2B&4)V#O3pbcauM@0IZy$7$K zTYkMawg$f4aJ|g{+XY{Lm)>;UcrCPktUJN9#1;67m*>l>2f;QT4cQ~z5g_D7e#47x zHV|%YSh`9!9B9tbpXk(z(DywVmmYf;0gd-pyd0ZH!JvZ}mIuy$?0MJ~}7;JWDSed5MXVe*0isA4#d-3!JCZiar>_a!`Wi%E{Fg00fyIRzc#mw1s_~IlU{{g zg6|402+o;$0loZ6Z{nBAgMz<;dnX;jfkM8g)Z$xJaM8RKuj-X*!Pn=vQx{x%47XLn zO*0$NrKgu2ZG9#r!lps8Yq1G{=HaE9mZ*@ zT#uRW0FJpd&^mW|0P>xDn!~t2_|;B-_x!WwP|)|V_vWG?@OG+9_F5xCr&#FInA@u=>oKo1XOssH&cC68u&sU(TUeE#z+c;?|v* z0}W+qS>9U;iovg!!H?gQOQE~6#-<8#IdIFY+7RJd3~?!?hcnTgq3jv4#rw)ISZr`e zgR(OPD!+}ZSu*Dul$Ja5=IqRq;JI$m5zh`9*duY!weXQUkgz=LX?>6ib{zb2t^*u_ z9ZM%QOn0{jwC)Y^wQ3lUQ8QRG@ck@&OG`Sk@tE{}pnve21UCB|^zyUMUFVYu41F)_ zds;t)VKEyu^Yp5L)syALjqA#xT9UoC(5maeeD&pPbHei=U(S?I5fz?5!uIk+%4HlD zn_TFM%%CHpI@v{qweaWUO5o4D@rO=ER>H8fO-kns8vreEAO5t4 zbPJMgaSQjHDsZu1%2{7H5U#QlJ$UE9UMS$*@jTYh7qpxX+HODH4}N-Iy3u6T5wK$R zT+4AjF5p4?iqyEH=Ro7$P<^$m0=TMlQ_2+XyMR1FlEgKx8nzWGmdiyqgKxKTmr~a^ z!j{}Qak-}&!1je!Ey>@D(I1i#m}l*}E)S-i)z;+mKM2iirmNNJAB7>ZM&HdPZUPn2 zgWKH}-GgeIDCC?gC18I!rz+*)6L`qwqVRUER*>E^+dsGYF*Mj_>l5aZ1@dK9CSK*r z1`o80S#Xw$cF za@7{YN!3-b#klnvP2eVcOOYxP)X4(Q9X6rkRBD0J{6%MJ(l~gNs=VpYhX+7kLc~@X zy;R$$n)q=FcJvxqe8Vlt*O|(;WiYX{`Idb2Q%Kg{{av!K37oLq^~$fg4F;&}z6amj z1kuxu+osG4KreA+z$SzJU=KOw!296Spy*(+m_k7|OoVetOCcv^#UUPS%>$h?CEBTAhW{ z%PwER!gIm=T&vAl*7u?8mE#l}u{sc*JE_=|QUDvmcJH>+hieAI^ z^Qtqfv+95;{_XMB#k$WKu0MPa3EP&at8Wv87dwFE?5vA1LWrT{|eE&4L3~C z)R7A)#17mb%h0YE?8GjM)c{SN`FkF^H-l?Jm%_SyUxDp?RFldLg-~IK_~o5S&tQ4d zG4;BsPareK0M5U=c6s@<4)kI}s@u$UE%>~-;l6fI7!4+BHfY_-*axON9?R}~fL^0b zt^Xz}u@*Wf)zsv@IRz?2#HyVyordeub2(Prya9RZlK0XUJ_5T6XH`TmzX!vg>BTRa z?g+D66c_MjHGvxq{0pbd=z;~)UVnM-RSv~l&$P^xJOx5$C6%2`&jBHm-z|R9`WWPi zTV!*NpM4qd^~SzzyA%XZ&)B<4@v9eX%v|zip-DYZctWdG&w2?>?|ZKE^lSs|IJ`;b z^AL{ugOfCl{$CiUqm?23k0jvl=4?%u;5a3O$1K@eC&BTU3#V@0`a%MI0uR>^vuu^% z(Bi@+lr38%1jmgZ`~wTruxXf$9%VAtAV?7+y~L8r?WHj5>6Dw8h$NKZ zViXGcACRD=3$m*I93^{_Bqf8Sh2NK?*m03;@VS#H3pmifwo@p2^nVklQgrG6Hp^4= z>HkidMw!dO)}dPhQbT&lV6B@evoX^dlmwDI{aF^9piH@f^05k?DAVv(WeOLEpz07i zv-<_JDHAYBm0^00wn4WFdq>%9z>5i~ppHJ8O-X0jh|Lfp*a>}P+MGF*ls`2@@LWm; zht@Pq#FQ$BnM|Sx;pkgvTpR*6Lkimy{lHK^xbYZu%1;tonH!^zr5eMl>}HU^EErCv zG03yE5Se6y0bPm$roNUUh*PwOGe*!Er5O2jgsRC>N*&(|#rT)8 z7qNMW7kh(>*dz{%(vszryy1RHSuxy548P!YD~FQ@UBbV5IC;<|x@(7%2VLU69+8K+ zByq!VMs}A}Y#hratnr++2?gz#&N*a8IOmKRMT5lVvo~gx$^Wvm&R8G~f9EV>UU^8S zWU$$Hn8z3s&(`0JE@AAjC412@x1ky4w{9cE8f+Umrlwd8x8|Vl*4Ydv5BkpEZa8_+ zcR3Ej$ut}Vz4CCkNx2T^VRxJ1j^DaX-VH_DxoOyqd^>rp-;E+mnt77MIT2g4%2}N6 zz$vQtDbrKVZUm{t*cl!6-6=dA&cOuX_9|xyUM&WT96|BF<1QgUnLapiJD(tk6X|SZ z1{+3Dbazsi6@dtI2cPBGX;OJAFi^vjqxr`9)Sx?4P6;4 z72fKyZ7L7Zy1)#sapPRCAqH1vx=d-T^_q(W{(*bv6iLC*p?c4FaD~@aE7(czd9k(@ zD|uZ0EJd70$TfC|_cJesc-8UxMLVS=IYOsnYI+WGxX8Mla_pLr=&jAf((wF-13mK5PPpFqkHW*)a$sRUON$%orTL; z?2=_~4kfN@Z;fD!}?Y) zga-t;&gQ{hq+7{wycEIZ)Y%fjeSD&R%hb-F1QYh>7S!nMTAFF73sznbK(h=ZBijYiNBtHz&cpo;I< zisY|kjm!c`=6HO^$$VNfG;3`gnrFoMn5Ak%Psl_b_dN{rb3I0?9N=mlI6}1^5gULN zEcN8VtfNss<%>KPv&Wk+n^YW09a$Yo1X%YJPhM=UmiCwiUQ*#`M_&r$*u|<3bpR<_ z_|Z`xD{!^u#Xj^;k{-iJc(&K}1MJm*uxqvC!zitWbxNMzx)cxQmtif&Hfx}U5t|X_ zCq8dbZ?-Ado}Xz}#D@B4h=tFbCpU&_Say`BVe|VzC)h@mFAuuNU17lwHrqq_+2Qnnd33Zq7QXSvg&QBI6y z*Byn>QhbL!5;VMl!ktb_F_R{p(LqlZVi^>x-rzJiY=>3Hk$xjPg-C{2BH3xoP#iMh zSTp&CZqKE7mfWZro)Lh{k%bJ-jO+l8Dvb;~)F)yXN%6==SqvgC%x$Xc(4^|zh;<8j zO~VVgy~gu!+Pcyh2nd=TMCmkP9@+fq%ylk|W6&1P_Zj=v81(XZGerXP|K%x)3!UA; z!y|ZPBGX?>ImR#?(7ifp`R*vN$1(fVRz|dVr!qyMt=B^Nyf@!oY3B! zZjgy9y@YtM*p*a1tR%rnicS>3_y4ky;34H=)tzp$@mt)s5R6A>)ek|0W|1e?vFq3p+_Q!?oSRr5SHEc_S1~Ow0H_u@K{YW&AeUl&krxkIu8$HUt zp(`ES!H~Y;F>$sVB*ijEhFd89Gso6~y@~HQvI06FeH+?Et^Jcm(VydB?B!YPmc(HK zM~8{YlBe|e*p4XUaRpsFw7qm?F;fRyV|n2ogCfYHAd8@_iVD6<+7A6mv7rIt>Nf@_ zjyOkds}M;QkDqI|n7#5(x-Pmr9J~tj4$Nnq{OIr``Xx`_kNV0Qe&2W~$kw0VoF>ROUbtrBZXqu^X>FWjN`iqZa0~EwyrX@ z7st(TyR`x&0o4>6&G^OE`YGjX9Wb}QH`>(FBo$<()h#wlPl#BRtSw@yZ6Ns}Qmu@@z@BVu{9 zF2?kj9Tdl5GzTk0J3xG-6k;DsNFhRePzsy=B)&oYSqky+^BP=K=#nO$~6BAM; z_;A!$clK^hWz$QLB~^B?A-0}xW-|zyepK}wbZ~1|y=+db>N*f)t2=|x$Otbu?x?_t zWt|gL#=;{UDGOK{5!+otBVw0MJP|uC;)y^bh$r4K!XA!PITU##N`i;HEh;E>@ zNGF;)b=p`)KyF$co$G;SpH%CKV;LDS7_ps|#)t*PVTg%n&SZ}THd9$KqI#syiS={z z_+!MD3Z6L6;TZcUy3H`7f3)MU$SM16o zkt!3=-Ds7+aWke2wvl`tmsfOr$3CkSVTdvlFUWMf%PG%tIDxbg&Lxg3#M46o#vC$y zBn&!{7dzs%<97p&aKrE1Y)HqrkdNl0jX2>&whp(BZCwIW9Nl3A8=?tOnckUOoY<2> z2Qjv>EIevSxGhAtPbXtJLfFu#-l=nsA|cF*m`B-cP%lJp6>WKQohmpHc!aSQ6*ew} zAR#Wa{*<8`l#E3RAxLAgA_Qq9D?*S)Hj5A>1{Z5VlMxe$R3#T4_b(|xG_455GXo^F_NDtQU1CH{g}&z zAs}PSUL&0Kdisyl4`ZXUsUmT^}&WK&-{Q(b)?o zW5@>cN3Q*&DzQftF_8`DKz32VX0BD3_D@J7cgydxE|J4)(?L%Rfu6kBo;eHU{-qAEf*|qPSV5qIqkfJYU`X2y z^j5?f)Aw*?2$c)G9W}-%#8KfaE-BWyAV5}Pl1GDp4w({OA7j8H1ki4*kvxQB4WBP6 zIcG*K29$y5U%H*KK zsThJ&$BYIRrb_gTtrY@O9h8A1Om*alMf#0tcRHquBTRLGlT8@$x|XTXkVP@{ifl}+ z2y|854%viFhAUB4IO@A5*DTiTM!f1LDiLp2i6`Q% zCH={{5m)qfdcjE|uvcY#j<3@_PT4I(umSHBd~-6%@iD9YPW6aot+4j3i^Q={Gqk66K2uo7A>5fnfk zbwEYvC}VftF|^%ZGMll7XFJ|}CmJ4{=2)Bv_#`R|ut?)E1Sukg2uzAaCZOX%vLMS0 zNGm$3i6K;Ta5iE)1NyNyIDdfsJyflamc6x|}z)u@4`?u_n9RfcYW*uU3MQ;HRX%q^^hVwCegfQM2 z&LJZZ2m;0#j&x!M%bjHC5TnItUiNeh;;2fp#e9vO*RqnD*wd4S(wZa$9GI_D{+`sP zVK_dK9y&6=iBI4pQ-zT^PV9KID=X0nky&u~pJg`5@kp}X$q;kiE=@GE{tf9(LXuc( zy|dWAmE%O{^f%-<5pw_X94A8R-;(3RkW63th^If#aXKoA$ro(nV@pg!V`*3x(EpG` zCxrY2H@UDUHBf$&?L_pNSdWRP|Eg>!LgRlj+erxJ2(pru{ygysy*6XLHT%zHKcRdp z+ZI=s35*n{5h+k%FLucOeJN1lD6gk|NE=o*6rKKVh|WrkqSO0_=&T$m)Tv)XPsRd{QdwVt$PGY1|p^fUj zA=3Xp`BYORNuhXHqi?Nc}{u)qWrS9JYD*T&wrU-TZ z20Vd~`;TX{SDXxQ}n-tEifznZP)^{fq#H446^-0 zY+>}xDrN(J9g@J*<*!8&n0o$uNCH!*zZgkiD)kSL1SZ?Ri6k(q|5qXj^qW=ABd@v) zp;`Yjwm`Q9!WIaV{0>_n3jY*aAUOU3wm|R@*aGqN--9juz1OS$|HKybB&SAYjEqv& zR{MVj+JJ6?{V%pK)}5@s^sd&_sNRFb7r4d^T?AzX!lT8EduM7V-A{5$4l%G$mZyu^ z@31u!?DDvdBvpb(@aj?~Jk!3I{xJkabcd0uguo0eX&!YZ^Se8(^QgHb;#*ddSl3o6 zFXl!@|GTb1jbeN$iJ)FUSI3gvsnS@MBUKQeyMWqAqFu@{g7o; z{<4r7EAW1(^}(+qp@w*|J6hCaRuTHV&OB@C{*HBN@mZCBaX>=7@ zmxA7oNpFrSHZwHM` zi0>O#gp3@pmYN{Q;zd?v3KhoIucP{r0I~mz?s7lMm;Z=}5hsylTP#H(spgq-0kv$d7hqEJlT2L?1O=*G5`e6-~8{4@B znTySyB4nyQ)1D}*(gaHju5JX@Lc$ZMB?EeA6K88;K From 311425b253f42be6c20701129cc0bbebed1121c9 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Jul 2024 17:23:31 -0400 Subject: [PATCH 191/241] Clean up desc.backend and add eigh_tridiagonal --- desc/backend.py | 48 +++++++++++++++++++----------------------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 1fed11ba64..ad9f41c4bf 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -65,21 +65,20 @@ ) if use_jax: # noqa: C901 - FIXME: simplify this, define globally and then assign? - jit = jax.jit - fori_loop = jax.lax.fori_loop - cond = jax.lax.cond - switch = jax.lax.switch - while_loop = jax.lax.while_loop - vmap = jax.vmap + from jax import custom_jvp, jit, vmap + imap = jax.lax.map - scan = jax.lax.scan - bincount = jnp.bincount - repeat = jnp.repeat - flatnonzero = jnp.flatnonzero - take = jnp.take - from jax import custom_jvp from jax.experimental.ode import odeint - from jax.scipy.linalg import block_diag, cho_factor, cho_solve, qr, solve_triangular + from jax.lax import cond, fori_loop, scan, switch, while_loop + from jax.numpy import bincount, flatnonzero, repeat, take + from jax.scipy.linalg import ( + block_diag, + cho_factor, + cho_solve, + eigh_tridiagonal, + qr, + solve_triangular, + ) from jax.scipy.special import gammaln, logsumexp from jax.tree_util import ( register_pytree_node, @@ -91,6 +90,10 @@ treedef_is_leaf, ) + trapezoid = ( + jnp.trapezoid if hasattr(jnp, "trapezoid") else jax.scipy.integrate.trapezoid + ) + def put(arr, inds, vals): """Functional interface for array "fancy indexing". @@ -375,14 +378,6 @@ def tangent_solve(g, y): ) return x, (safenorm(res), niter) - def trapezoid(y, x=None, dx=1.0, axis=-1): - """Integrate along the given axis using the composite trapezoidal rule.""" - if hasattr(jnp, "trapezoid"): - # https://github.com/google/jax/issues/20410 - return jnp.trapezoid(y, x, dx, axis) - else: - return jax.scipy.integrate.trapezoid(y, x, dx, axis) - # we can't really test the numpy backend stuff in automated testing, so we ignore it # for coverage purposes @@ -394,11 +389,14 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): block_diag, cho_factor, cho_solve, + eigh_tridiagonal, qr, solve_triangular, ) from scipy.special import gammaln, logsumexp # noqa: F401 + trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz + def imap(f, xs, in_axes=0, out_axes=0): """Generalizes jax.lax.map; uses numpy.""" if not isinstance(xs, np.ndarray): @@ -778,14 +776,6 @@ def root( out = scipy.optimize.root(fun, x0, args, jac=jac, tol=tol) return out.x, out - def trapezoid(y, x=None, dx=1.0, axis=-1): - """Integrate along the given axis using the composite trapezoidal rule.""" - if hasattr(np, "trapezoid"): - # https://github.com/numpy/numpy/issues/25586 - return np.trapezoid(y, x, dx, axis) - else: - return np.trapz(y, x, dx, axis) - def flatnonzero(a, size=None, fill_value=0): """A numpy implementation of jnp.flatnonzero.""" nz = np.flatnonzero(a) From d9214475c56adb47db621229708d044bb3f099e7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 23 Jul 2024 17:24:48 -0400 Subject: [PATCH 192/241] Add Guass-Lobatto quadrature for effective ripple, and speed up bounce_points() algorithm --- desc/compute/bounce_integral.py | 159 +++++++++++++++++++++----------- tests/test_bounce_integral.py | 80 ++++++++++------ 2 files changed, 156 insertions(+), 83 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d1e764bf5d..82f2c8494e 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -5,14 +5,14 @@ import numpy as np from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt -from orthax.legendre import leggauss +from orthax.legendre import legder, leggauss, legval -from desc.backend import flatnonzero, imap, jnp, put, take +from desc.backend import eigh_tridiagonal, flatnonzero, imap, jnp, put, take from desc.compute.utils import safediv -from desc.utils import Index, errorif, warnif +from desc.utils import errorif, warnif -@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) +@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) def _take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. @@ -170,13 +170,17 @@ def _poly_root( First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at ``c[n-i]``. - k : Array + k : jnp.ndarray Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of shape ``c.shape[1:]``. - a_min, a_max : jnp.ndarray, jnp.ndarray - Minimum and maximum value to return roots between. If specified only real roots - are returned. If None, returns all complex roots. Should broadcast with arrays - of shape ``c.shape[1:]``. + a_min : jnp.ndarray + Minimum ``a_min`` and maximum ``a_max`` value to return roots between. + If specified only real roots are returned. If None, returns all complex roots. + Should broadcast with arrays of shape ``c.shape[1:]``. + a_max : jnp.ndarray + Minimum ``a_min`` and maximum ``a_max`` value to return roots between. + If specified only real roots are returned. If None, returns all complex roots. + Should broadcast with arrays of shape ``c.shape[1:]``. sort : bool Whether to sort the roots. sentinel : float @@ -187,7 +191,7 @@ def _poly_root( Absolute tolerance with which to consider value as zero. distinct : bool Whether to only return the distinct roots. If true, when the multiplicity is - greater than one, the repeated roots are set to nan. + greater than one, the repeated roots are set to ``sentinel``. Returns ------- @@ -211,7 +215,7 @@ def _poly_root( c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) c = jnp.stack(c, axis=-1) - r = jnp.nan_to_num(_roots(c), nan=sentinel) + r = _roots(c) if get_only_real_roots: a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] @@ -223,11 +227,14 @@ def _poly_root( if sort or distinct: r = jnp.sort(r, axis=-1) - if distinct: - # eps needs to be low enough that close distinct roots do not get removed. - # Otherwise, algorithms relying on continuity will fail. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) - r = jnp.where(mask, sentinel, r) + return _filter_distinct(r, sentinel, eps) if distinct else r + + +def _filter_distinct(r, sentinel, eps): + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) return r @@ -288,7 +295,7 @@ def _poly_val(x, c): ) """ - # Fine instead of Horner's method as we expect to evaluate cubic polynomials. + # Better than Horner's method as we expect to evaluate low order polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) val = jnp.einsum("...i,i...->...", X, c) return val @@ -503,6 +510,31 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch +@partial(jnp.vectorize, signature="(m),(m)->(m)") +def _correct_inversion(is_intersect, B_z_ra): + # idx of first two intersects + idx = flatnonzero(is_intersect, size=2, fill_value=-1) + edge_case = ( + (B_z_ra[idx[0]] == 0) + & (B_z_ra[idx[1]] < 0) + & is_intersect[idx[0]] + & is_intersect[idx[1]] + # In theory, we need to keep propagating this edge case, + # e.g. (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. + ) + # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only + # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is + # required that the first intersect satisfies non-positive derivative. Now, + # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, + # there can be at most one inversion, and if it exists, the inversion must be + # at the first pair. To correct the inversion, it suffices to disqualify the + # first intersect as a right boundary, except under the above edge case. + return put(is_intersect, idx[0], edge_case) + + def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs): """Compute the bounce points given spline of |B| and pitch λ. @@ -565,48 +597,21 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. + B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = intersect.reshape(P, S, -1) >= 0 - B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) - # Gather intersects along a field line to be contiguous. - B_z_ra = _take_mask(B_z_ra, is_intersect, fill_value=0) + # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, + # we ignore the bounce points of particles assigned to a class that are + # trapped outside this snapshot of the field line. + is_bp1 = (B_z_ra <= 0) & is_intersect + is_bp2 = (B_z_ra >= 0) & _correct_inversion(is_intersect, B_z_ra) - sentinel = knots[0] - 1 # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) - # Gather intersects along a field line to be contiguous, followed by some sentinel. - intersect = _take_mask(intersect, is_intersect, fill_value=sentinel) - is_intersect = intersect > sentinel - is_bp1 = (B_z_ra <= 0) & is_intersect - is_bp2 = (B_z_ra >= 0) & is_intersect - edge_case = ( - (B_z_ra[..., 0] == 0) - & (B_z_ra[..., 1] < 0) - & is_intersect[..., 0] - & is_intersect[..., 1] - # In theory, we need to keep propagating this edge case, - # e.g (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). - # At each step, the likelihood that an intersection has already been lost - # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. - ) - is_bp2 = put(is_bp2, Index[..., 0], edge_case) - # Get ζ values of bounce points from the masks. + sentinel = knots[0] - 1 + # Get ζ values of bounce points. bp1 = _take_mask(intersect, is_bp1, fill_value=sentinel) bp2 = _take_mask(intersect, is_bp2, fill_value=sentinel) - # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only - # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is - # required that the first intersect satisfies non-positive derivative. Now, - # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, - # there can be at most one inversion, and if it exists, the inversion must be - # at the first pair. To correct the inversion, it suffices to disqualify the - # first intersect as a right boundary, except under the above edge case. - - # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, - # we ignore the bounce points of particles assigned to a class that are - # trapped outside this snapshot of the field line. - # TODO: Better to always consider boundary as bounce points. Simple change; - # do in same pull request that resolves GitHub issue #1045. if check: _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) @@ -638,7 +643,7 @@ def _composite_linspace(x, num): """ x = jnp.atleast_1d(x) pts = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) - pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *x.shape[1:]) + pts = jnp.swapaxes(pts, 0, 1).reshape(-1, *x.shape[1:]) pts = jnp.append(pts, x[jnp.newaxis, -1], axis=0) assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) return pts @@ -739,6 +744,12 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): return B_extrema +def affine_bijection_to_disc(x, a, b): + """[a, b] ∋ x ↦ y ∈ [−1, 1].""" + y = 2 * (x - a) / (b - a) - 1 + return y + + def affine_bijection(x, a, b): """[−1, 1] ∋ x ↦ y ∈ [a, b].""" y = (x + 1) / 2 * (b - a) + a @@ -840,7 +851,7 @@ def tanh_sinh(deg, m=10): Parameters ---------- - deg: int + deg : int Number of quadrature points. m : float Number of machine epsilons used for floating point error buffer. Larger implies @@ -865,6 +876,46 @@ def tanh_sinh(deg, m=10): return x, w +# TODO: upstream to orthax? +def leggausslob(deg): + """Lobatto-Gauss-Legendre quadrature. + + Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the + integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). + + Parameters + ---------- + deg : int + Number of (interior) quadrature points to return. + + Returns + ------- + x, w : (jnp.ndarray, jnp.ndarray) + Quadrature points in (-1, 1) and associated weights. + Excludes points and weights at -1 and 1. + + """ + # Designate two degrees for endpoints. + deg = int(deg) + 2 + + n = jnp.arange(2, deg - 1) + x = eigh_tridiagonal( + jnp.zeros(deg - 2), + jnp.sqrt((n**2 - 1) / (4 * n**2 - 1)), + eigvals_only=True, + ) + c0 = put(jnp.zeros(deg), -1, 1) + + # improve (single multiplicity) roots by one application of Newton + c = legder(c0) + dy = legval(x=x, c=c) + df = legval(x=x, c=legder(c)) + x -= dy / df + + w = 2 / (deg * (deg - 1) * legval(x=x, c=c0) ** 2) + return x, w + + def _plot(Z, V, title_id=""): """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" for p in range(Z.shape[0]): diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 380c62cc65..e5ab8a3eac 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -7,10 +7,11 @@ import pytest from jax import grad from matplotlib import pyplot as plt +from orthax.chebyshev import chebgauss, chebweight from orthax.legendre import leggauss from scipy import integrate from scipy.interpolate import CubicHermiteSpline -from scipy.special import ellipkm1 +from scipy.special import ellipe, ellipkm1, roots_chebyu from tests.test_plotting import tol_1d from desc.backend import flatnonzero, jnp @@ -23,6 +24,7 @@ _poly_val, _take_mask, affine_bijection, + affine_bijection_to_disc, automorphism_arcsin, automorphism_sin, bounce_integral, @@ -32,6 +34,7 @@ grad_affine_bijection, grad_automorphism_arcsin, grad_automorphism_sin, + leggausslob, plot_field_line, tanh_sinh, ) @@ -43,12 +46,6 @@ from desc.utils import only1 -def _affine_bijection_forward(x, a, b): - """[a, b] ∋ x ↦ y ∈ [−1, 1].""" - y = 2 * (x - a) / (b - a) - 1 - return y - - @partial(np.vectorize, signature="(m)->()") def _last_value(a): """Return the last non-nan value in ``a``.""" @@ -384,10 +381,10 @@ def test_automorphism(): """Test automorphisms.""" a, b = -312, 786 x = np.linspace(a, b, 10) - y = _affine_bijection_forward(x, a, b) + y = affine_bijection_to_disc(x, a, b) x_1 = affine_bijection(y, a, b) np.testing.assert_allclose(x_1, x) - np.testing.assert_allclose(_affine_bijection_forward(x_1, a, b), y) + np.testing.assert_allclose(affine_bijection_to_disc(x_1, a, b), y) np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=5e-7) np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=5e-7) @@ -424,31 +421,56 @@ def test_bounce_quadrature(): # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi # appears often in transformations. v = 7 - truth = v * 2 * ellipkm1(p) - rtol = 1e-4 - - def integrand(B, pitch): - return jnp.reciprocal(jnp.sqrt(1 - pitch * m * B)) - bp1 = -np.pi / 2 * v bp2 = -bp1 knots = np.linspace(bp1, bp2, 50) - B = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) - B_z_ra = np.sin(2 * knots / v) / v pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps - bounce_integrate, _ = bounce_integral( - B, B, B_z_ra, knots, quad=tanh_sinh(40), automorphism=None, check=True - ) - tanh_sinh_vanilla = bounce_integrate(integrand, [], pitch) - assert np.count_nonzero(tanh_sinh_vanilla) == 1 - np.testing.assert_allclose(np.sum(tanh_sinh_vanilla), truth, rtol=rtol) - bounce_integrate, _ = bounce_integral( - B, B, B_z_ra, knots, quad=leggauss(25), check=True - ) - leg_gauss_sin = bounce_integrate(integrand, [], pitch, batch=False) - assert np.count_nonzero(tanh_sinh_vanilla) == 1 - np.testing.assert_allclose(np.sum(leg_gauss_sin), truth, rtol=rtol) + def b_field(knots): + b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) + db = np.sin(2 * knots / v) / v + return b, db + + b, db = b_field(knots) + + def test(f, truth, quad, rtol=1e-4): + bounce_integrate, _ = bounce_integral( + b, + b, + db, + knots, + quad[0], + automorphism=None, + check=True, + plot=True, + ) + result = bounce_integrate(f, [], pitch) + assert np.count_nonzero(result) == 1 + np.testing.assert_allclose(np.sum(result), truth, rtol=rtol) + + bounce_integrate, _ = bounce_integral(b, b, db, knots, quad[1], check=True) + result = bounce_integrate(f, [], pitch) + assert np.count_nonzero(result) == 1 + np.testing.assert_allclose(np.sum(result), truth, rtol=rtol) + + # sin automorphism still helps out chebyshev quadrature + bounce_integrate, _ = bounce_integral(b, b, db, knots, quad[2], check=True) + result = bounce_integrate(f, [], pitch) + assert np.count_nonzero(result) == 1 + np.testing.assert_allclose(np.sum(result), truth, rtol=rtol) + + def strong(B, pitch): + return 1 / jnp.sqrt(1 - pitch * m * B) + + def weak(B, pitch): + return jnp.sqrt(1 - pitch * m * B) + + x, w = chebgauss(30) + w /= chebweight(x) + test(strong, v * 2 * ellipkm1(p), [tanh_sinh(40), leggauss(25), (x, w)]) + x, w = roots_chebyu(10) + w *= chebweight(x) + test(weak, v * 2 * ellipe(m), [tanh_sinh(20), leggausslob(10), (x, w)]) @pytest.mark.unit From 6ed9d92441cf54d4bd0b469f6b7990cbbb04882a Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Jul 2024 18:22:34 -0400 Subject: [PATCH 193/241] skeleton for fourier bounce integrals --- desc/backend.py | 9 + desc/compute/bounce_integral.py | 45 ++- desc/compute/fourier_bounce_integral.py | 453 ++++++++++++++++++++++++ tests/test_bounce_integral.py | 20 ++ 4 files changed, 503 insertions(+), 24 deletions(-) create mode 100644 desc/compute/fourier_bounce_integral.py diff --git a/desc/backend.py b/desc/backend.py index ad9f41c4bf..4268692b86 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -71,6 +71,8 @@ from jax.experimental.ode import odeint from jax.lax import cond, fori_loop, scan, switch, while_loop from jax.numpy import bincount, flatnonzero, repeat, take + from jax.numpy.fft import irfft, rfft + from jax.scipy.fft import dct, idct from jax.scipy.linalg import ( block_diag, cho_factor, @@ -94,6 +96,10 @@ jnp.trapezoid if hasattr(jnp, "trapezoid") else jax.scipy.integrate.trapezoid ) + trapezoid = ( + jnp.trapezoid if hasattr(jnp, "trapezoid") else jax.scipy.integrate.trapezoid + ) + def put(arr, inds, vals): """Functional interface for array "fancy indexing". @@ -384,6 +390,7 @@ def tangent_solve(g, y): else: # pragma: no cover jit = lambda func, *args, **kwargs: func import scipy.optimize + from scipy.fft import dct, idct, irfft, rfft # noqa: F401 from scipy.integrate import odeint # noqa: F401 from scipy.linalg import ( # noqa: F401 block_diag, @@ -397,6 +404,8 @@ def tangent_solve(g, y): trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz + trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz + def imap(f, xs, in_axes=0, out_axes=0): """Generalizes jax.lax.map; uses numpy.""" if not isinstance(xs, np.ndarray): diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 82f2c8494e..a5d93c5371 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -68,6 +68,16 @@ def _filter_nonzero_measure(bp1, bp2): return bp1[mask], bp2[mask] +def _filter_distinct(r, sentinel, eps): + """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + # TODO: check if numpy even tries to make isclose fast + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) + return r + + def _sentinel_append(r, sentinel, num=1): """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) @@ -186,7 +196,7 @@ def _poly_root( sentinel : float Value with which to pad array in place of filtered elements. Anything less than ``a_min`` or greater than ``a_max`` plus some floating point - error buffer will work just like nan while also avoiding nan gradient. + error buffer will work just like nan while avoiding nan gradient. eps : float Absolute tolerance with which to consider value as zero. distinct : bool @@ -230,14 +240,6 @@ def _poly_root( return _filter_distinct(r, sentinel, eps) if distinct else r -def _filter_distinct(r, sentinel, eps): - # eps needs to be low enough that close distinct roots do not get removed. - # Otherwise, algorithms relying on continuity will fail. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) - r = jnp.where(mask, sentinel, r) - return r - - def _poly_der(c): """Coefficients for the derivatives of the given set of polynomials. @@ -511,7 +513,7 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): @partial(jnp.vectorize, signature="(m),(m)->(m)") -def _correct_inversion(is_intersect, B_z_ra): +def _fix_inversion(is_intersect, B_z_ra): # idx of first two intersects idx = flatnonzero(is_intersect, size=2, fill_value=-1) edge_case = ( @@ -570,16 +572,13 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, S, N * degree). - The field line-following ζ coordinates of bounce points for a given pitch along - a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right - integration boundaries, respectively, for the bounce integrals. + The field line-following coordinates of bounce points for a given pitch along + a field line. The pairs ``bp1`` and ``bp2`` form left and right integration + boundaries, respectively, for the bounce integrals. - For the shaping notation, the ``degree`` of the spline of |B| matches - ``B_c.shape[0]-1``, the number of polynomials per spline ``N`` matches - ``knots.size-1``, and the number of field lines is denoted by ``S``. - If there were less than ``N*degree`` bounce points detected along a field line, - then the last axis, which enumerates the bounce points for a particular field - line, is padded with zero. + If there were less than ``N * degree`` bounce points detected + along a field line, then the last axis, which enumerates the bounce points for + a particular field line, is padded with zero. """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) @@ -601,15 +600,14 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = intersect.reshape(P, S, -1) >= 0 # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, - # we ignore the bounce points of particles assigned to a class that are + # we ignore the bounce points of particles only assigned to a class that are # trapped outside this snapshot of the field line. is_bp1 = (B_z_ra <= 0) & is_intersect - is_bp2 = (B_z_ra >= 0) & _correct_inversion(is_intersect, B_z_ra) + is_bp2 = (B_z_ra >= 0) & _fix_inversion(is_intersect, B_z_ra) # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) sentinel = knots[0] - 1 - # Get ζ values of bounce points. bp1 = _take_mask(intersect, is_bp1, fill_value=sentinel) bp2 = _take_mask(intersect, is_bp2, fill_value=sentinel) @@ -617,7 +615,7 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) mask = (bp1 > sentinel) & (bp2 > sentinel) - # Set outside mask to same value so that integration is over set of measure zero. + # Set outside mask to same value so integration is over set of measure zero. bp1 = jnp.where(mask, bp1, 0) bp2 = jnp.where(mask, bp2, 0) return bp1, bp2 @@ -876,7 +874,6 @@ def tanh_sinh(deg, m=10): return x, w -# TODO: upstream to orthax? def leggausslob(deg): """Lobatto-Gauss-Legendre quadrature. diff --git a/desc/compute/fourier_bounce_integral.py b/desc/compute/fourier_bounce_integral.py new file mode 100644 index 0000000000..39c4b32cd9 --- /dev/null +++ b/desc/compute/fourier_bounce_integral.py @@ -0,0 +1,453 @@ +"""Methods for computing bounce integrals.""" + +from orthax.chebyshev import chebpts1, chebpts2 + +from desc.backend import dct, idct, irfft, jnp, put, rfft +from desc.compute.bounce_integral import _filter_distinct, _fix_inversion, _take_mask +from desc.compute.bounce_integral import affine_bijection as map_domain +from desc.compute.bounce_integral import affine_bijection_to_disc as map_domain_to_disc +from desc.grid import Grid +from desc.utils import Index, errorif + +_eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) + + +def _fourier_pts(M): + return -jnp.pi + 2 * jnp.pi * jnp.arange(1, M + 1) / M + + +# Y = [a, b] evaluate on grid -> y = [-1, 1] chebyshev points -> y = cos(z) +# evenly spaced z. +# So I find coefficients to chebyshev series T_n(y) = cos(n arcos(y)) = cos(n z). +# So evaluating my chebyshev series in y is same as evaluting cosine series in +# z = arcos(y). +# for y = inversemap[a, b]. +# Open questions is finding roots y using chebroots better or is finding roots z +# of trig poly. +# answer: research shows doesn't really matter. +# TODO: could try boyd. eq. 16.46 pg 336 +def _chebyshev_pts(N, lobatto, domain=(-1, 1)): + y = chebpts2(N) if lobatto else chebpts1(N) + return map_domain(y, domain[0], domain[-1]) + + +# Vectorized versions of numpy functions. Need root finding to be as efficient as +# possible, so manually vectorize to solve stack of matrices with single LAPACK call. +# Also skip the slow input massaging because we don't allow duck typed lists. + + +def _chebcompanion(c): + # Adapted from + # https://numpy.org/doc/stable/reference/generated/ + # numpy.polynomial.chebyshev.chebcompanion.html. + # https://github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py. + errorif(c.shape[-1] < 2, msg="Series must have maximum degree of at least 1.") + if c.shape[-1] == 2: + return jnp.array([[-c[..., 0] / c[..., 1]]]) + + n = c.shape[-1] - 1 + scl = jnp.hstack([1.0, jnp.full(n - 1, jnp.sqrt(0.5))]) + mat = jnp.zeros((*c.shape[:-1], n, n), dtype=c.dtype) + mat = put(mat, Index[..., 0, 0], jnp.sqrt(0.5)) + mat = put(mat, Index[..., 0, 1:], 0.5) + mat = put(mat, Index[..., -1, :], mat[..., 0, :]) + mat = put( + mat, + Index[..., -1], + mat[..., -1] - c[..., :-1] / c[..., -1] * scl / scl[-1] * 0.5, + ) + return mat + + +def _chebroots(c): + # Adapted from + # https://numpy.org/doc/stable/reference/generated/ + # numpy.polynomial.chebyshev.chebroots.html. + # https://github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py, + if c.shape[-1] < 2: + return jnp.reshape([], (0,) * c.ndim) + if c.shape[-1] == 2: + return jnp.array([-c[..., 0] / c[..., 1]]) + + # rotated companion matrix reduces error + m = _chebcompanion(c)[..., ::-1, ::-1] + # Low priority: + # there are better techniques to find eigenvalues of Chebyshev colleague matrix. + r = jnp.sort(jnp.linalg.eigvals(m)) + return r + + +class FourierChebyshevBasis: + """Fourier-Chebyshev series. + + f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) + where ψₘ are trigonometric functions and Tₙ are Chebyshev polynomials + on domain [−yₘᵢₙ, yₘₐₓ]. + + Attributes + ---------- + L : int + Batch dimension size. + M : int + Fourier spectral resolution. + N : int + Chebyshev spectral resolution. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + or interior roots grid for Chebyshev points. + domain : (float, float) + Domain for y coordinates. + + """ + + def __init__(self, f, M, N, lobatto=False, domain=(-1, 1)): + """Interpolate Fourier-Chebyshev basis to ``f``. + + Parameters + ---------- + f : jnp.ndarray + Shape (..., M, N). + Samples of function on the ``FourierChebyshevBasis.nodes`` grid. + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + or interior roots grid for Chebyshev points. + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + + """ + errorif(domain[0] > domain[-1], msg="Got inverted y coordinate domain.") + errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") + self.domain = domain + self.lobatto = bool(lobatto) + self._c = rfft( + dct(f.reshape(-1, M, N), type=2 - self.lobatto, axis=-1), + axis=-2, + ) + self.N = N + self.M = M + self.L = self._c.shape[0] + self._a_n = None + + @classmethod + def nodes(cls, M, N, lobatto=False, domain=(-1, 1), **kwargs): + """Tensor product grid of optimal collocation nodes for this basis. + + Parameters + ---------- + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + lobatto : bool + Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) + or interior roots grid for Chebyshev points. + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + + Returns + ------- + grid : jnp.ndarray + Shape (M * N, 2). + Grid of (x, y) points for optimal interpolation. + + """ + x = _fourier_pts(M) + y = _chebyshev_pts(N, lobatto, domain) + if "rho" in kwargs: + # then user wants a 3D DESC grid + grid = Grid.create_meshgrid([kwargs.pop("rho"), x, y], **kwargs) + else: + xx, yy = map(jnp.ravel, jnp.meshgrid(x, y, indexing="ij")) + grid = jnp.column_stack([xx, yy]) + return grid + + def evaluate(self, M, N): + """Evaluate Fourier-Chebyshev series. + + Parameters + ---------- + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + + Returns + ------- + f : jnp.ndarray + Shape (L, M, N) + Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. + + """ + f = ( + idct( + irfft(self._c, n=M, axis=-2) * M / self.M, + type=2 - self.lobatto, + n=N, + axis=-1, + ) + * (N - self.lobatto) + / (self.N - self.lobatto) + ) + return f + + def _harmonics(self): + """Spectral coefficients aₘₙ of the interpolating polynomial. + + Transform Fourier interpolant harmonics to Nyquist trigonometric + interpolant harmonics so that the coefficients are all real. + + Returns + ------- + a_mn : jnp.ndarray + Shape (L, μ, N) where μ ∈ {M, M+1}. + Real valued spectral coefficients for Fourier-Chebyshev basis. + + """ + # ∂ₓ = 0 coefficients + a0 = jnp.real(self._c[:, 0])[:, jnp.newaxis] + # cos(mx) Tₙ(y) coefficients + an = jnp.real(self._c[:, 1:]) * 2 + # sin(mx) Tₙ(y) coefficients + bn = jnp.imag(self._c[:, 1:]) * (-2) + # Note 2*(M//2)+1 <= M+1 and bM = 0 if equality. + a_mn = jnp.hstack([a0, an, bn]) + assert a_mn.shape[-2] in (self.M, self.M + 1) and a_mn.shape[-1] == self.N + return a_mn + + def _evaluate_fourier_basis(self, x): + """Evaluate Fourier basis at points ``x`` and cache the coefficients. + + Parameters + ---------- + x : jnp.ndarray + Shape (L, x.shape[-1]) or (x.shape[-1], ). + Evaluation points. If 1d assumes batch dimension over L is implicit. + + Returns + ------- + a_n : jnp.ndarray + Shape (L, N, x.shape[-1]) + + """ + # TODO: do in desc.basis too for potentially significant performance boost. + # Partial summation technique; see Boyd p. 185, eq. 10.2. + x = jnp.atleast_2d(x)[:, jnp.newaxis] + m = jnp.arange(1, self.M // 2 + 1)[:, jnp.newaxis] + psi = jnp.dstack([jnp.ones(x.shape), jnp.cos(m * x), jnp.sin(m * x)]) + # batch matrix product (L, N, μ) @ (L, μ, x) = (L, N, x) + self._a_n = jnp.swapaxes(self._harmonics(), -1, -2) @ psi + assert self._a_n.shape == (self.L, self.N, x.shape[-1]) + return self._a_n + + def y_intersect(self, x, k=0, eps=_eps): + """Coordinates yᵢ such that f(x, yᵢ) = k(x). + + Parameters + ---------- + x : jnp.ndarray + Shape (L, x.shape[-1]) or broadcastable of lower dimension. + Evaluation points. If 1d assumes batch dimension over L is implicit + (i.e. standard numpy broadcasting rules). + k : jnp.ndarray + Shape (P, L, x.shape[-1]) or broadcastable of lower dimension. + Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. + eps : float + Absolute tolerance with which to consider value as zero. + + Returns + ------- + y : jnp.ndarray + Shape (P, L, x.shape[-1], N - 1) or (L, x.shape[-1], N - 1). + Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. + is_decreasing : jnp.ndarray + Shape y.shape. + Whether ∂f/∂y (x, yᵢ) is decreasing. + is_increasing : jnp.ndarray + Shape y.shape. + Whether ∂f/∂y (x, yᵢ) is increasing. + is_intersect : jnp.ndarray + Shape y.shape. + Boolean array into ``y`` indicating whether element is an intersect. + + """ + a_n = self._evaluate_fourier_basis(x) + if k.ndim == 3: + a_n = a_n[jnp.newaxis] + a_n = put(a_n, Index[..., 0, :], a_n[..., 0, :] - k) + a_n = jnp.swapaxes(a_n, -1, -2) # shape is (P, L, x, N) + # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) + y = _chebroots(a_n) + assert y.shape[-3:] == (self.L, x.shape[-1], self.N - 1) + + # Pick sentinel such that only distinct roots are considered intersects. + y = _filter_distinct(y, sentinel=-2, eps=eps) + is_intersect = (jnp.abs(jnp.imag(y)) <= eps) & (jnp.abs(jnp.real(y)) <= 1) + y = jnp.where(is_intersect, jnp.real(y), 0) # ensure y is in domain of arcos + # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) + # sign ∂f/∂y = sign ∑ₙ₌₁ᴺ⁻¹ aₙ(x) sin(n arcos y) + s = jnp.inner( + a_n, jnp.sin(jnp.arange(self.N) * jnp.arccos(y)[..., jnp.newaxis]) + ) + is_decreasing = s <= 0 + is_increasing = s >= 0 + + y = map_domain(y, self.domain[0], self.domain[-1]) + return y, is_decreasing, is_increasing, is_intersect + + def _isomorphism_1d(self, y): + """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². + + Maps row x of y to z = α(x) + y where α(x) = x * |domain|. + + Parameters + ---------- + y : jnp.ndarray + Shape (..., *y.shape[-2:]). + Second to last axis iterates the rows. + Leading axes are considered batch axes in usual numpy broadcasting. + + Returns + ------- + z : jnp.ndarray + Shape (..., y.shape[-2] * y.shape[-1]). + Isomorphic coordinates. + + """ + alpha = (self.domain[-1] - self.domain[0]) * jnp.arange(y.shape[-2]) + z = _flatten_matrix(alpha[:, jnp.newaxis] + y) + return z + + def _isomorphism_2d(self, z): + """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. + + Returns index x and value y such that z = α(x) + y where α(x) = x * |domain|. + + Parameters + ---------- + z : jnp.ndarray + + Returns + ------- + x_index : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. + + """ + period = self.domain[-1] - self.domain[0] + x_index = z // period + y_value = z % period + return x_index, y_value + + def bounce_points(self, y, is_decreasing, is_increasing, is_intersect): + """Compute bounce points given intersections. + + Parameters + ---------- + y : jnp.ndarray + Shape (..., *y.shape[-2:]). + Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. + Assumes the -2nd axis enumerates over poloidal coordinates + all belonging to a single field line. See ``alpha_sequence``. + is_decreasing : jnp.ndarray + Shape y.shape. + Whether ∂f/∂y (x, yᵢ) is decreasing. + is_increasing : jnp.ndarray + Shape y.shape. + Whether ∂f/∂y (x, yᵢ) is increasing. + is_intersect : jnp.ndarray + Shape y.shape. + Boolean array into ``y`` indicating whether element is an intersect. + + Returns + ------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (*y.shape[:-2], y.shape[-1] * y.shape[-2]). + The field line-following coordinates of bounce points for a given pitch along + a field line. The pairs ``bp1`` and ``bp2`` form left and right integration + boundaries, respectively, for the bounce integrals. + + If there were less than ``y.shape[-1] * y.shape[-2]`` bounce points detected + along a field line, then the last axis, which enumerates the bounce points for + a particular field line, is padded with zero. + + """ + # Last axis enumerates intersects of a pitch along a field line. + y = self._isomorphism_1d(y) + is_decreasing = _flatten_matrix(is_decreasing) + is_increasing = _flatten_matrix(is_increasing) + is_intersect = _flatten_matrix(is_intersect) + is_bp1 = is_decreasing & is_intersect + is_bp2 = is_increasing & _fix_inversion(is_intersect, is_increasing) + + sentinel = self.domain[0] - 1 + bp1 = _take_mask(y, is_bp1, fill_value=sentinel) + bp2 = _take_mask(y, is_bp2, fill_value=sentinel) + + mask = (bp1 > sentinel) & (bp2 > sentinel) + # Set outside mask to same value so integration is over set of measure zero. + bp1 = jnp.where(mask, bp1, 0) + bp2 = jnp.where(mask, bp2, 0) + return bp1, bp2 + + def _interp1d( + self, z + ): # assumes z is on x points from a_n generated after evaluate fourier + """Evaluate basis at coordinates z ∈ ℝ isomorphic to (x, y) ∈ ℝ². + + Parameters + ---------- + z : jnp.ndarray + Shape (P, L, B, Q). + Isomorphic coordinates. + Pitch, radial, bounce points, quad points. + + Returns + ------- + f : jnp.ndarray + Shape z.shape. + This basis evaluated at z. + + """ + # Will have shape (P, L, BQ) + x_index, y_values = map(_flatten_matrix, self._isomorphism_2d(z)) + y_values = map_domain_to_disc(y_values, self.domain[0], self.domain[1]) + a_n = jnp.swapaxes(self._a_n, -1, -2) # changes to shape (L, x, N) + n = jnp.arange(self.N) + T = jnp.cos(n * jnp.arccos(y_values)[..., jnp.newaxis]) + # f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) + f = jnp.inner(a_n[x_index], T).reshape(z.shape) + return f + + +def alpha_sequence(alpha_0, m, iota, period): + """Get sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) of field line. + + Parameters + ---------- + alpha_0 : float + Starting field line poloidal label. + m : float + Number of periods to follow field line. + iota : jnp.ndarray + Shape (L, ) + Rotational transform normalized by 2π. + period : float + Toroidal period after which to update label. + + Returns + ------- + alpha : jnp.ndarray + Shape (L, m) + Sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) that specify field line. + + """ + # Δz (∂α/∂ζ) = Δz ι̅ = Δz ι/2π = Δz data["iota"] + return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(m)) % (2 * jnp.pi) + + +def _flatten_matrix(y): + return y.reshape(*y.shape[:-2], -1) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e5ab8a3eac..4266fb2542 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -38,6 +38,7 @@ plot_field_line, tanh_sinh, ) +from desc.compute.fourier_bounce_integral import FourierChebyshevBasis from desc.compute.utils import dot from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid @@ -794,3 +795,22 @@ def dummy_fun(pitch): assert np.isclose(grad(dummy_fun)(1.0), 650, rtol=1e-3) return fig + + +# todo: +@pytest.mark.unit +def test_fcb_interp(): + """Test interpolation for this basis function.""" + domain = (0, 2 * np.pi) + M, N = 1, 5 + xy0 = FourierChebyshevBasis.nodes(M, N, domain=domain) + f0 = jnp.mean(xy0.reshape(M, N, 2), axis=-1) + fcb = FourierChebyshevBasis(f0, M, N, domain=domain) + f1 = fcb.evaluate(1, fcb.N * 10) + xy1 = FourierChebyshevBasis.nodes(1, fcb.N * 10, domain=domain) + + fig, ax = plt.subplots() + ax.plot(xy0[:, 1], f0[0, :], linestyle="--") + ax.plot(xy1[:, 1], f1[0, :], marker="x") + plt.show() + return fig From 30b7c1be56f23e0a273ce118df5aca74b50d500f Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Jul 2024 19:14:16 -0400 Subject: [PATCH 194/241] Move more efficient bounce points computation from fourier bounce to here --- desc/compute/bounce_integral.py | 136 +++++++++++++++++--------------- 1 file changed, 72 insertions(+), 64 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index d1e764bf5d..4fcb139f83 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -9,10 +9,10 @@ from desc.backend import flatnonzero, imap, jnp, put, take from desc.compute.utils import safediv -from desc.utils import Index, errorif, warnif +from desc.utils import errorif, warnif -@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={2, 3}) +@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) def _take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. @@ -68,6 +68,15 @@ def _filter_nonzero_measure(bp1, bp2): return bp1[mask], bp2[mask] +def _filter_distinct(r, sentinel, eps): + """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) + return r + + def _sentinel_append(r, sentinel, num=1): """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) @@ -170,24 +179,28 @@ def _poly_root( First axis should store coefficients of a polynomial. For a polynomial given by ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at ``c[n-i]``. - k : Array + k : jnp.ndarray Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of shape ``c.shape[1:]``. - a_min, a_max : jnp.ndarray, jnp.ndarray - Minimum and maximum value to return roots between. If specified only real roots - are returned. If None, returns all complex roots. Should broadcast with arrays - of shape ``c.shape[1:]``. + a_min : jnp.ndarray + Minimum ``a_min`` and maximum ``a_max`` value to return roots between. + If specified only real roots are returned. If None, returns all complex roots. + Should broadcast with arrays of shape ``c.shape[1:]``. + a_max : jnp.ndarray + Minimum ``a_min`` and maximum ``a_max`` value to return roots between. + If specified only real roots are returned. If None, returns all complex roots. + Should broadcast with arrays of shape ``c.shape[1:]``. sort : bool Whether to sort the roots. sentinel : float Value with which to pad array in place of filtered elements. Anything less than ``a_min`` or greater than ``a_max`` plus some floating point - error buffer will work just like nan while also avoiding nan gradient. + error buffer will work just like nan while avoiding nan gradient. eps : float Absolute tolerance with which to consider value as zero. distinct : bool Whether to only return the distinct roots. If true, when the multiplicity is - greater than one, the repeated roots are set to nan. + greater than one, the repeated roots are set to ``sentinel``. Returns ------- @@ -211,7 +224,7 @@ def _poly_root( c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] c.append(c_n) c = jnp.stack(c, axis=-1) - r = jnp.nan_to_num(_roots(c), nan=sentinel) + r = _roots(c) if get_only_real_roots: a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] @@ -223,12 +236,7 @@ def _poly_root( if sort or distinct: r = jnp.sort(r, axis=-1) - if distinct: - # eps needs to be low enough that close distinct roots do not get removed. - # Otherwise, algorithms relying on continuity will fail. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) - r = jnp.where(mask, sentinel, r) - return r + return _filter_distinct(r, sentinel, eps) if distinct else r def _poly_der(c): @@ -288,7 +296,7 @@ def _poly_val(x, c): ) """ - # Fine instead of Horner's method as we expect to evaluate cubic polynomials. + # Better than Horner's method as we expect to evaluate low order polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) val = jnp.einsum("...i,i...->...", X, c) return val @@ -503,6 +511,31 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch +@partial(jnp.vectorize, signature="(m),(m)->(m)") +def _fix_inversion(is_intersect, B_z_ra): + # idx of first two intersects + idx = flatnonzero(is_intersect, size=2, fill_value=-1) + edge_case = ( + (B_z_ra[idx[0]] == 0) + & (B_z_ra[idx[1]] < 0) + & is_intersect[idx[0]] + & is_intersect[idx[1]] + # In theory, we need to keep propagating this edge case, + # e.g. (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. + ) + # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only + # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is + # required that the first intersect satisfies non-positive derivative. Now, + # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, + # there can be at most one inversion, and if it exists, the inversion must be + # at the first pair. To correct the inversion, it suffices to disqualify the + # first intersect as a right boundary, except under the above edge case. + return put(is_intersect, idx[0], edge_case) + + def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs): """Compute the bounce points given spline of |B| and pitch λ. @@ -538,16 +571,13 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, S, N * degree). - The field line-following ζ coordinates of bounce points for a given pitch along - a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right - integration boundaries, respectively, for the bounce integrals. + The field line-following coordinates of bounce points for a given pitch along + a field line. The pairs ``bp1`` and ``bp2`` form left and right integration + boundaries, respectively, for the bounce integrals. - For the shaping notation, the ``degree`` of the spline of |B| matches - ``B_c.shape[0]-1``, the number of polynomials per spline ``N`` matches - ``knots.size-1``, and the number of field lines is denoted by ``S``. - If there were less than ``N*degree`` bounce points detected along a field line, - then the last axis, which enumerates the bounce points for a particular field - line, is padded with zero. + If there were less than ``N * degree`` bounce points detected + along a field line, then the last axis, which enumerates the bounce points for + a particular field line, is padded with zero. """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) @@ -565,54 +595,26 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. + B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = intersect.reshape(P, S, -1) >= 0 - B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) - # Gather intersects along a field line to be contiguous. - B_z_ra = _take_mask(B_z_ra, is_intersect, fill_value=0) + # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, + # we ignore the bounce points of particles only assigned to a class that are + # trapped outside this snapshot of the field line. + is_bp1 = (B_z_ra <= 0) & is_intersect + is_bp2 = (B_z_ra >= 0) & _fix_inversion(is_intersect, B_z_ra) - sentinel = knots[0] - 1 # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) - # Gather intersects along a field line to be contiguous, followed by some sentinel. - intersect = _take_mask(intersect, is_intersect, fill_value=sentinel) - is_intersect = intersect > sentinel - is_bp1 = (B_z_ra <= 0) & is_intersect - is_bp2 = (B_z_ra >= 0) & is_intersect - edge_case = ( - (B_z_ra[..., 0] == 0) - & (B_z_ra[..., 1] < 0) - & is_intersect[..., 0] - & is_intersect[..., 1] - # In theory, we need to keep propagating this edge case, - # e.g (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). - # At each step, the likelihood that an intersection has already been lost - # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. - ) - is_bp2 = put(is_bp2, Index[..., 0], edge_case) - # Get ζ values of bounce points from the masks. + sentinel = knots[0] - 1 bp1 = _take_mask(intersect, is_bp1, fill_value=sentinel) bp2 = _take_mask(intersect, is_bp2, fill_value=sentinel) - # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only - # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is - # required that the first intersect satisfies non-positive derivative. Now, - # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, - # there can be at most one inversion, and if it exists, the inversion must be - # at the first pair. To correct the inversion, it suffices to disqualify the - # first intersect as a right boundary, except under the above edge case. - - # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, - # we ignore the bounce points of particles assigned to a class that are - # trapped outside this snapshot of the field line. - # TODO: Better to always consider boundary as bounce points. Simple change; - # do in same pull request that resolves GitHub issue #1045. if check: _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) mask = (bp1 > sentinel) & (bp2 > sentinel) - # Set outside mask to same value so that integration is over set of measure zero. + # Set outside mask to same value so integration is over set of measure zero. bp1 = jnp.where(mask, bp1, 0) bp2 = jnp.where(mask, bp2, 0) return bp1, bp2 @@ -638,7 +640,7 @@ def _composite_linspace(x, num): """ x = jnp.atleast_1d(x) pts = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) - pts = jnp.moveaxis(pts, source=0, destination=1).reshape(-1, *x.shape[1:]) + pts = jnp.swapaxes(pts, 0, 1).reshape(-1, *x.shape[1:]) pts = jnp.append(pts, x[jnp.newaxis, -1], axis=0) assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) return pts @@ -739,6 +741,12 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): return B_extrema +def affine_bijection_to_disc(x, a, b): + """[a, b] ∋ x ↦ y ∈ [−1, 1].""" + y = 2 * (x - a) / (b - a) - 1 + return y + + def affine_bijection(x, a, b): """[−1, 1] ∋ x ↦ y ∈ [a, b].""" y = (x + 1) / 2 * (b - a) + a @@ -840,7 +848,7 @@ def tanh_sinh(deg, m=10): Parameters ---------- - deg: int + deg : int Number of quadrature points. m : float Number of machine epsilons used for floating point error buffer. Larger implies From 263930d7a7357cc505b91f52ce3c667a1fa6a9d4 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 24 Jul 2024 20:59:20 -0400 Subject: [PATCH 195/241] Fix dynamic jaxpr shape error induced from previous commit --- desc/compute/bounce_integral.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 4fcb139f83..e2a93e819d 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -12,7 +12,7 @@ from desc.utils import errorif, warnif -@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) +@partial(jnp.vectorize, signature="(m),(m)->(n)") def _take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. From 95ed8a8c49f29ec73325a9538de5e1ecde1c0d54 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Jul 2024 14:55:56 -0400 Subject: [PATCH 196/241] Add num_wells parameter to reduce size of bounce points matrix by factor of 200 --- desc/compute/bounce_integral.py | 74 +++++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 22 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index e2a93e819d..ca1bf040c2 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -9,10 +9,10 @@ from desc.backend import flatnonzero, imap, jnp, put, take from desc.compute.utils import safediv -from desc.utils import errorif, warnif +from desc.utils import errorif, setdefault, warnif -@partial(jnp.vectorize, signature="(m),(m)->(n)") +@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) def _take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. @@ -39,9 +39,7 @@ def _take_mask(a, mask, size=None, fill_value=None): """ assert a.shape == mask.shape - idx = flatnonzero( - mask, size=mask.size if size is None else size, fill_value=mask.size - ) + idx = flatnonzero(mask, size=setdefault(size, mask.size), fill_value=mask.size) return take( a, idx, @@ -314,7 +312,7 @@ def plot_field_line( title_id=None, include_knots=True, alpha_knot=0.1, - alpha_pitch=0.25, + alpha_pitch=0.3, show=True, ): """Plot the field line given spline of |B|. @@ -368,8 +366,8 @@ def add(lines): for knot in B.x: add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) z = np.linspace( - start=B.x[0] if start is None else start, - stop=B.x[-1] if stop is None else stop, + start=setdefault(start, B.x[0]), + stop=setdefault(stop, B.x[-1]), num=num, ) add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) @@ -536,7 +534,9 @@ def _fix_inversion(is_intersect, B_z_ra): return put(is_intersect, idx[0], edge_case) -def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs): +def bounce_points( + pitch, knots, B_c, B_z_ra_c, num_wells=None, check=False, plot=True, **kwargs +): """Compute the bounce points given spline of |B| and pitch λ. Parameters @@ -562,22 +562,35 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. + num_wells : int + If not specified, then all bounce points are returned in an array whose + last axis has size ``(knots.size - 1) * (B_c.shape[0] - 1)``. If there + were less than that many wells detected along a field line, then the last + axis of the returned arrays, which enumerates bounce points for a particular + field line and pitch, is padded with zero. + + Specify to only return the first ``size`` pairs of bounce points for each + pitch along each field line. This is useful if ``size`` is a close upper + bound to the actual number of wells. To get an intuition for a good estimate + for ``size``, plot the field line with all the bounce points identified + by calling this function with ``check=True``. + As a reference, there are typically <= 5 wells per toroidal transit. check : bool Flag for debugging. plot : bool - Whether to plot some things if check is true. + Whether to plot some things if check is true. Default is true. Returns ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (P, S, N * degree). + Shape (P, S, num_wells). The field line-following coordinates of bounce points for a given pitch along a field line. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. - If there were less than ``N * degree`` bounce points detected - along a field line, then the last axis, which enumerates the bounce points for - a particular field line, is padded with zero. + If there were less than ``size`` wells detected along a field line, then + the last axis, which enumerates bounce points for a particular field line + and pitch, is padded with zero. """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) @@ -606,9 +619,10 @@ def bounce_points(pitch, knots, B_c, B_z_ra_c, check=False, plot=True, **kwargs) # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) - sentinel = knots[0] - 1 - bp1 = _take_mask(intersect, is_bp1, fill_value=sentinel) - bp2 = _take_mask(intersect, is_bp2, fill_value=sentinel) + # New versions of jax only like static sentinels. + sentinel = -10000000.0 # knots[0] - 1 + bp1 = _take_mask(intersect, is_bp1, size=num_wells, fill_value=sentinel) + bp2 = _take_mask(intersect, is_bp2, size=num_wells, fill_value=sentinel) if check: _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) @@ -1232,7 +1246,7 @@ def bounce_integral( check : bool Flag for debugging. Must be false for jax transformations. plot : bool - Whether to plot stuff if ``check`` is true. + Whether to plot stuff if ``check`` is true. Default is false. Returns ------- @@ -1296,7 +1310,9 @@ def bounce_integral( # Recall affine_bijection(auto(x), ζ_b₁, ζ_b₂) = ζ. x = auto(x) - def bounce_integrate(integrand, f, pitch, method="akima", batch=True): + def bounce_integrate( + integrand, f, pitch, method="akima", batch=True, num_wells=None + ): """Bounce integrate ∫ f(ℓ) dℓ. Parameters @@ -1323,16 +1339,30 @@ def bounce_integrate(integrand, f, pitch, method="akima", batch=True): Default is akima spline. batch : bool Whether to perform computation in a batched manner. Default is true. + num_wells : int + If not specified, then all bounce integrals are returned in an array whose + last axis has size ``(knots.size - 1) * degree``. If there + were less than that many wells detected along a field line, then the last + axis of the returned array, which enumerates bounce integrals for a + particular field line and pitch, is padded with zero. + + Specify to only return the bounce integrals between the first ``size`` + wells for each pitch along each field line. This is useful if ``size`` is a + close upper bound to the actual number of wells. To get an intuition for a + good estimate for ``size``, plot the field line with all the bounce points + identified. This will be done automatically if the ``bounce_integral`` + function is called with ``check=True`` and ``plot=True``. + As a reference, there are typically <= 5 wells per toroidal transit. Returns ------- result : jnp.ndarray - Shape (P, S, (knots.size - 1) * degree). + Shape (P, S, num_wells). First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. """ - bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, check, plot) + bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, num_wells, check, plot) result = _bounce_quadrature( bp1, bp2, @@ -1350,7 +1380,7 @@ def bounce_integrate(integrand, f, pitch, method="akima", batch=True): batch=batch, check=check, ) - assert result.shape[-1] == (knots.size - 1) * degree + assert result.shape[-1] == setdefault(num_wells, (knots.size - 1) * degree) return result return bounce_integrate, spline From 1992e15d95a110750d6d785d4708b979935ef92e Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Jul 2024 15:29:02 -0400 Subject: [PATCH 197/241] Specify num_wells expclitly in test to make sure it doesnt affect autodiff --- tests/test_bounce_integral.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 380c62cc65..08793b57fa 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -747,15 +747,17 @@ def integrand_den(B, pitch): integrand=integrand_num, f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], + num_wells=1, # don't need to specify but will reduce memory and improve speed ) drift_numerical_den = bounce_integrate( integrand=integrand_den, f=[], pitch=pitch[:, np.newaxis], + num_wells=1, ) - drift_numerical_num = np.squeeze(drift_numerical_num[drift_numerical_num != 0]) - drift_numerical_den = np.squeeze(drift_numerical_den[drift_numerical_den != 0]) + drift_numerical_num = np.squeeze(drift_numerical_num) + drift_numerical_den = np.squeeze(drift_numerical_den) drift_numerical = drift_numerical_num / drift_numerical_den msg = "There should be one bounce integral per pitch in this example." assert drift_numerical.size == drift_analytic.size, msg From e6b38bf2268b826400098aa5545bdfd7e9e8a6e7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 25 Jul 2024 20:56:26 -0400 Subject: [PATCH 198/241] Clean up docstring comments --- desc/compute/bounce_integral.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index ca1bf040c2..e344dd3383 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -158,6 +158,7 @@ def root(b, c, d): _roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") +# TODO: upstream to interpax def _poly_root( c, k=0, @@ -569,12 +570,12 @@ def bounce_points( axis of the returned arrays, which enumerates bounce points for a particular field line and pitch, is padded with zero. - Specify to only return the first ``size`` pairs of bounce points for each - pitch along each field line. This is useful if ``size`` is a close upper - bound to the actual number of wells. To get an intuition for a good estimate - for ``size``, plot the field line with all the bounce points identified - by calling this function with ``check=True``. - As a reference, there are typically <= 5 wells per toroidal transit. + Specify to only return the first ``num_wells`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_wells`` is a close upper + bound to the actual number of wells. To obtain a good choice for ``num_wells``, + plot the field line with all the bounce points identified by calling this + function with ``check=True``. As a reference, there are typically <= 5 wells + per toroidal transit. check : bool Flag for debugging. plot : bool @@ -588,7 +589,7 @@ def bounce_points( a field line. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. - If there were less than ``size`` wells detected along a field line, then + If there were less than ``num_wells`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field line and pitch, is padded with zero. @@ -1346,13 +1347,13 @@ def bounce_integrate( axis of the returned array, which enumerates bounce integrals for a particular field line and pitch, is padded with zero. - Specify to only return the bounce integrals between the first ``size`` - wells for each pitch along each field line. This is useful if ``size`` is a - close upper bound to the actual number of wells. To get an intuition for a - good estimate for ``size``, plot the field line with all the bounce points + Specify to only return the bounce integrals between the first ``num_wells`` + wells for each pitch along each field line. This is useful if ``num_wells`` + is a close upper bound to the actual number of wells. To obtain a good + choice for ``num_wells``, plot the field line with all the bounce points identified. This will be done automatically if the ``bounce_integral`` - function is called with ``check=True`` and ``plot=True``. - As a reference, there are typically <= 5 wells per toroidal transit. + function is called with ``check=True`` and ``plot=True``. As a reference, + there are typically <= 5 wells per toroidal transit. Returns ------- From ad30aa0e107fd25e15071fa59f26748d248aa18e Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 31 Jul 2024 00:44:12 -0400 Subject: [PATCH 199/241] Add remaining fourier bounce methods --- desc/backend.py | 9 +- desc/basis.py | 4 + desc/compute/_interp_utils.py | 299 ++++++++++++++++ desc/compute/_quadrature_utils.py | 177 ++++++++++ desc/compute/bounce_integral.py | 416 +--------------------- desc/compute/fourier_bounce_integral.py | 450 +++++++++++++----------- desc/compute/utils.py | 43 ++- tests/test_bounce_integral.py | 99 +++++- 8 files changed, 868 insertions(+), 629 deletions(-) create mode 100644 desc/compute/_interp_utils.py create mode 100644 desc/compute/_quadrature_utils.py diff --git a/desc/backend.py b/desc/backend.py index 9685366750..80c2323a00 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -72,7 +72,7 @@ from jax.experimental.ode import odeint from jax.lax import cond, fori_loop, scan, switch, while_loop from jax.numpy import bincount, flatnonzero, repeat, take - from jax.numpy.fft import irfft, rfft + from jax.numpy.fft import irfft, rfft, rfft2 from jax.scipy.fft import dct, idct from jax.scipy.linalg import ( block_diag, @@ -414,7 +414,8 @@ def tangent_solve(g, y): jit = lambda func, *args, **kwargs: func execute_on_cpu = lambda func: func import scipy.optimize - from scipy.fft import dct, idct, irfft, rfft # noqa: F401 + from numpy.fft import irfft, rfft, rfft2 # noqa: F401 + from scipy.fft import dct, idct # noqa: F401 from scipy.integrate import odeint # noqa: F401 from scipy.linalg import ( # noqa: F401 block_diag, @@ -430,7 +431,7 @@ def tangent_solve(g, y): trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz - def imap(f, xs, in_axes=0, out_axes=0): + def imap(f, xs, batch_size=None, in_axes=0, out_axes=0): """Generalizes jax.lax.map; uses numpy.""" if not isinstance(xs, np.ndarray): raise NotImplementedError( @@ -461,7 +462,7 @@ def vmap(fun, in_axes=0, out_axes=0): Vectorized version of fun. """ - return lambda xs: imap(fun, xs, in_axes, out_axes) + return lambda xs: imap(fun, xs, in_axes=in_axes, out_axes=out_axes) def tree_stack(*args, **kwargs): """Stack pytree for numpy backend.""" diff --git a/desc/basis.py b/desc/basis.py index b01ec871f9..19c8d99ddb 100644 --- a/desc/basis.py +++ b/desc/basis.py @@ -1131,6 +1131,10 @@ def evaluate( m = m[midx] n = n[nidx] + # TODO: in map_clebsch_root findign + # lambda should be fixed to rho and zeta + # so lambda is slimmed to 1d fourier series for fixed rho zeta. + # cache radial and toroidal for rootfinding radial = zernike_radial(r[:, np.newaxis], lm[:, 0], lm[:, 1], dr=derivatives[0]) poloidal = fourier(t[:, np.newaxis], m, dt=derivatives[1]) toroidal = fourier(z[:, np.newaxis], n, NFP=self.NFP, dt=derivatives[2]) diff --git a/desc/compute/_interp_utils.py b/desc/compute/_interp_utils.py new file mode 100644 index 0000000000..28e2d54a6d --- /dev/null +++ b/desc/compute/_interp_utils.py @@ -0,0 +1,299 @@ +"""FFT interpolation.""" + +from functools import partial + +from desc.backend import jnp, rfft, rfft2 +from desc.compute.utils import safediv + +# TODO: upstream fft methods to interpax. +# TODO: For inverse transforms, do multipoint evaluation with FFT. +# FFT cost is 𝒪(M N log[M N]) while direct evaluation is 𝒪(M² N²). +# Chapter 10, https://doi.org/10.1017/CBO9781139856065. +# Likely better than using approximate NFFT to evaluate f(xq) given fourier +# coefficients because evaluation points are quadratically packed near edges as +# required by algebraic polynomial quadrature to avoid runge. + + +def interp_rfft(xq, f): + """Interpolate real-valued ``f`` to ``xq`` with FFT. + + Parameters + ---------- + xq : jnp.ndarray + Shape (..., xq.shape[-1]). + Query points where interpolation is desired. + f : jnp.ndarray + Shape (..., f.shape[-1]). + Function values on 2π periodic grid to interpolate. + + Returns + ------- + fq : jnp.ndarray + Shape (..., xq.shape[-1]) + Function value at query points. + + """ + assert xq.ndim == f.ndim >= 1 + return irfft_non_uniform(xq, rfft(f, norm="forward"), f.shape[-1]) + + +def irfft_non_uniform(xq, a, M): + """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π] periodic. + + Parameters + ---------- + xq : jnp.ndarray + Shape (..., xq.shape[-1]). + Query points where interpolation is desired. + Dimension should match ``a``, though size of last axis may differ. + a : jnp.ndarray + Fourier coefficients ``a = rfft(f, norm="forward")``. + M : int + Spectral resolution of ``a``. + + Returns + ------- + fq : jnp.ndarray + Shape (..., xq.shape[-1]) + Function value at query points. + + """ + a = a.at[..., 0].divide(2.0).at[..., -1].divide(1.0 + ((M % 2) == 0)) + m = jnp.fft.rfftfreq(M, d=1 / M) + basis = jnp.exp(1j * m * xq[..., jnp.newaxis]) + fq = 2 * jnp.real(jnp.einsum("...m,...m", basis, a)) + # ℜ einsum(basis, a) = einsum(cos(mx), ℜ(a)) - einsum(sin(mx), ℑ(a)) + return fq + + +def interp_rfft2(xq, f): + """Interpolate real-valued ``f`` to ``xq`` with FFT. + + Parameters + ---------- + xq : jnp.ndarray + Shape (..., xq.shape[-2], 2). + Query points where interpolation is desired. + f : jnp.ndarray + Shape (..., f.shape[-2], f.shape[-1]). + Function values on (2π × 2π) periodic tensor-product grid to interpolate. + + Returns + ------- + fq : jnp.ndarray + Shape (..., xq.shape[-2]). + Function value at query points. + + """ + assert xq.ndim == f.ndim >= 2 + return irfft2_non_uniform(xq, rfft2(f, norm="forward"), *f.shape[-2:]) + + +def irfft2_non_uniform(xq, a, M, N): + """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]² periodic. + + Parameters + ---------- + xq : jnp.ndarray + Shape (..., xq.shape[-2], 2). + Query points where interpolation is desired. + a : jnp.ndarray + Fourier coefficients ``a = rfft2(f, norm="forward")``. + M : int + Spectral resolution of ``a`` along second to last axis. + N : int + Spectral resolution of ``a`` along last axis. + + Returns + ------- + fq : jnp.ndarray + Shape (..., xq.shape[-2]). + Function value at query points. + + """ + a = a.at[..., 0].divide(2.0).at[..., -1].divide(1.0 + ((N % 2) == 0)) + m = jnp.fft.fftfreq(M, d=1 / M) + n = jnp.fft.rfftfreq(N, d=1 / N) + basis = jnp.exp( + 1j + * ( + (m * xq[..., 0, jnp.newaxis])[..., jnp.newaxis] + + (n * xq[..., -1, jnp.newaxis])[..., jnp.newaxis, :] + ) + ) + fq = 2 * jnp.real(jnp.einsum("...mn,...mn", basis, a)) + return fq + + +# TODO: upstream cubic spline polynomial root finding to interpax + + +def _filter_distinct(r, sentinel, eps): + """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) + return r + + +def _sentinel_append(r, sentinel, num=1): + """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" + sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) + return jnp.append(r, sent, axis=-1) + + +def _root_linear(a, b, sentinel, eps, distinct=False): + """Return r such that a r + b = 0.""" + return safediv(-b, a, jnp.where(jnp.abs(b) <= eps, 0, sentinel)) + + +def _root_quadratic(a, b, c, sentinel, eps, distinct): + """Return r such that a r² + b r + c = 0, assuming real coefficients and roots.""" + # numerical.recipes/book.html, page 227 + discriminant = b**2 - 4 * a * c + q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(jnp.abs(discriminant))) + r1 = jnp.where( + discriminant < 0, + sentinel, + safediv(q, a, _root_linear(b, c, sentinel, eps)), + ) + r2 = jnp.where( + # more robust to remove repeated roots with discriminant + (discriminant < 0) | (distinct & (discriminant <= eps)), + sentinel, + safediv(c, q, sentinel), + ) + return jnp.stack([r1, r2], axis=-1) + + +def _root_cubic(a, b, c, d, sentinel, eps, distinct): + """Return r such that a r³ + b r² + c r + d = 0, assuming real coef and roots.""" + # numerical.recipes/book.html, page 228 + + def irreducible(Q, R, b, mask): + # Three irrational real roots. + theta = jnp.arccos(R / jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1))) + return jnp.moveaxis( + -2 + * jnp.sqrt(Q) + * jnp.stack( + [ + jnp.cos(theta / 3), + jnp.cos((theta + 2 * jnp.pi) / 3), + jnp.cos((theta - 2 * jnp.pi) / 3), + ] + ) + - b / 3, + source=0, + destination=-1, + ) + + def reducible(Q, R, b): + # One real and two complex roots. + A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) + B = safediv(Q, A) + r1 = (A + B) - b / 3 + return _sentinel_append(r1[..., jnp.newaxis], sentinel, num=2) + + def root(b, c, d): + b = safediv(b, a) + c = safediv(c, a) + d = safediv(d, a) + Q = (b**2 - 3 * c) / 9 + R = (2 * b**3 - 9 * b * c + 27 * d) / 54 + mask = R**2 < Q**3 + return jnp.where( + mask[..., jnp.newaxis], + irreducible(jnp.abs(Q), R, b, mask), + reducible(Q, R, b), + ) + + return jnp.where( + # Tests catch failure here if eps < 1e-12 for 64 bit jax. + jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), + _sentinel_append(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), + root(b, c, d), + ) + + +_roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") + + +def _poly_root( + c, + k=0, + a_min=None, + a_max=None, + sort=False, + sentinel=jnp.nan, + # About 2e-12 for 64 bit jax. + eps=min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e4, 1e-8), + distinct=False, +): + """Roots of polynomial with given coefficients. + + Parameters + ---------- + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. + k : jnp.ndarray + Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of + shape ``c.shape[1:]``. + a_min : jnp.ndarray + Minimum ``a_min`` and maximum ``a_max`` value to return roots between. + If specified only real roots are returned. If None, returns all complex roots. + Should broadcast with arrays of shape ``c.shape[1:]``. + a_max : jnp.ndarray + Minimum ``a_min`` and maximum ``a_max`` value to return roots between. + If specified only real roots are returned. If None, returns all complex roots. + Should broadcast with arrays of shape ``c.shape[1:]``. + sort : bool + Whether to sort the roots. + sentinel : float + Value with which to pad array in place of filtered elements. + Anything less than ``a_min`` or greater than ``a_max`` plus some floating point + error buffer will work just like nan while avoiding nan gradient. + eps : float + Absolute tolerance with which to consider value as zero. + distinct : bool + Whether to only return the distinct roots. If true, when the multiplicity is + greater than one, the repeated roots are set to ``sentinel``. + + Returns + ------- + r : jnp.ndarray + Shape (..., c.shape[1:], c.shape[0] - 1). + The roots of the polynomial, iterated over the last axis. + + """ + is_real = not (jnp.iscomplexobj(c) or jnp.iscomplexobj(k)) + get_only_real_roots = not (a_min is None and a_max is None) + + func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} + if c.shape[0] in func and is_real and get_only_real_roots: + # Compute from analytic formula to avoid the issue of complex roots with small + # imaginary parts and to avoid nan in gradient. + r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) + distinct = distinct and c.shape[0] > 3 + else: + # Compute from eigenvalues of polynomial companion matrix. + c_n = c[-1] - k + c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] + c.append(c_n) + c = jnp.stack(c, axis=-1) + r = _roots(c) + if get_only_real_roots: + a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] + a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] + r = jnp.where( + (jnp.abs(jnp.imag(r)) <= eps) & (a_min <= r) & (r <= a_max), + jnp.real(r), + sentinel, + ) + + if sort or distinct: + r = jnp.sort(r, axis=-1) + return _filter_distinct(r, sentinel, eps) if distinct else r diff --git a/desc/compute/_quadrature_utils.py b/desc/compute/_quadrature_utils.py new file mode 100644 index 0000000000..b8055b2e34 --- /dev/null +++ b/desc/compute/_quadrature_utils.py @@ -0,0 +1,177 @@ +"""Utilities for quadratures.""" + +from orthax.legendre import legder, legval + +from desc.backend import eigh_tridiagonal, jnp +from desc.utils import errorif + + +def affine_bijection_to_disc(x, a, b): + """[a, b] ∋ x ↦ y ∈ [−1, 1].""" + y = 2 * (x - a) / (b - a) - 1 + return y + + +def affine_bijection(x, a, b): + """[−1, 1] ∋ x ↦ y ∈ [a, b].""" + y = (x + 1) / 2 * (b - a) + a + return y + + +def grad_affine_bijection(a, b): + """Gradient of affine bijection.""" + dy_dx = (b - a) / 2 + return dy_dx + + +def automorphism_arcsin(x): + """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. + + The arcsin transformation introduces a singularity that augments the singularity + in the bounce integral, so the quadrature scheme used to evaluate the integral must + work well on functions with large derivative near the boundary. + + Parameters + ---------- + x : jnp.ndarray + Points to transform. + + Returns + ------- + y : jnp.ndarray + Transformed points. + + """ + y = 2 * jnp.arcsin(x) / jnp.pi + return y + + +def grad_automorphism_arcsin(x): + """Gradient of arcsin automorphism.""" + dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) + return dy_dx + + +grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ + + +def automorphism_sin(x, s=0, m=10): + """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. + + When used as the change of variable map for the bounce integral, the Lipschitzness + of the sin transformation prevents generation of new singularities. Furthermore, + its derivative vanishes to zero slowly near the boundary, which will suppress the + large derivatives near the boundary of singular integrals. + + In effect, this map pulls the mass of the integral away from the singularities, + which should improve convergence if the quadrature performs better on less singular + integrands. Pairs well with Gauss-Legendre quadrature. + + Parameters + ---------- + x : jnp.ndarray + Points to transform. + s : float + Strength of derivative suppression, s ∈ [0, 1]. + m : float + Number of machine epsilons used for floating point error buffer. + + Returns + ------- + y : jnp.ndarray + Transformed points. + + """ + errorif(not (0 <= s <= 1)) + # s = 0 -> derivative vanishes like cosine. + # s = 1 -> derivative vanishes like cosine^k. + y0 = jnp.sin(jnp.pi * x / 2) + y1 = x + jnp.sin(jnp.pi * x) / jnp.pi # k = 2 + y = (1 - s) * y0 + s * y1 + # y is an expansion, so y(x) > x near x ∈ {−1, 1} and there is a tendency + # for floating point error to overshoot the true value. + eps = m * jnp.finfo(jnp.array(1.0).dtype).eps + return jnp.clip(y, -1 + eps, 1 - eps) + + +def grad_automorphism_sin(x, s=0): + """Gradient of sin automorphism.""" + dy0_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 + dy1_dx = 1 + jnp.cos(jnp.pi * x) + dy_dx = (1 - s) * dy0_dx + s * dy1_dx + return dy_dx + + +grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ + + +def tanh_sinh(deg, m=10): + """Tanh-Sinh quadrature. + + Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the + integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). + + Parameters + ---------- + deg : int + Number of quadrature points. + m : float + Number of machine epsilons used for floating point error buffer. Larger implies + less floating point error, but increases the minimum achievable error. + + Returns + ------- + x, w : (jnp.ndarray, jnp.ndarray) + Quadrature points and weights. + + """ + # buffer to avoid numerical instability + x_max = jnp.array(1.0) + x_max = x_max - m * jnp.finfo(x_max.dtype).eps + t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) + # maximal-spacing scheme, doi.org/10.48550/arXiv.2007.15057 + t = jnp.linspace(-t_max, t_max, deg) + dt = 2 * t_max / (deg - 1) + arg = 0.5 * jnp.pi * jnp.sinh(t) + x = jnp.tanh(arg) # x = g(t) + w = 0.5 * jnp.pi * jnp.cosh(t) / jnp.cosh(arg) ** 2 * dt # w = (dg/dt) dt + return x, w + + +def leggausslob(deg): + """Lobatto-Gauss-Legendre quadrature. + + Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the + integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). + + Parameters + ---------- + deg : int + Number of (interior) quadrature points to return. + + Returns + ------- + x, w : (jnp.ndarray, jnp.ndarray) + Quadrature points in (-1, 1) and associated weights. + Excludes points and weights at -1 and 1. + + """ + # Designate two degrees for endpoints. + deg = int(deg) + 2 + + n = jnp.arange(2, deg - 1) + x = eigh_tridiagonal( + jnp.zeros(deg - 2), + jnp.sqrt((n**2 - 1) / (4 * n**2 - 1)), + eigvals_only=True, + ) + c0 = jnp.zeros(deg).at[-1].set(1) + + # improve (single multiplicity) roots by one application of Newton + c = legder(c0) + dy = legval(x=x, c=c) + df = legval(x=x, c=legder(c)) + x -= dy / df + + w = 2 / (deg * (deg - 1) * legval(x=x, c=c0) ** 2) + return x, w diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3568d15fec..8f41d707aa 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -5,51 +5,20 @@ import numpy as np from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d from matplotlib import pyplot as plt -from orthax.legendre import legder, leggauss, legval - -from desc.backend import eigh_tridiagonal, flatnonzero, imap, jnp, put, take -from desc.compute.utils import safediv +from orthax.legendre import leggauss + +from desc.backend import flatnonzero, imap, jnp, put +from desc.compute._interp_utils import _poly_root +from desc.compute._quadrature_utils import ( + affine_bijection, + automorphism_sin, + grad_affine_bijection, + grad_automorphism_sin, +) +from desc.compute.utils import take_mask from desc.utils import errorif, setdefault, warnif -@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) -def _take_mask(a, mask, size=None, fill_value=None): - """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. - - Parameters - ---------- - a : jnp.ndarray - The source array. - mask : jnp.ndarray - Boolean mask to index into ``a``. Should have same shape as ``a``. - size : int - Elements of ``a`` at the first size True indices of ``mask`` will be returned. - If there are fewer elements than size indicates, the returned array will be - padded with ``fill_value``. The size default is ``mask.size``. - fill_value : Any - When there are fewer than the indicated number of elements, the remaining - elements will be filled with ``fill_value``. Defaults to NaN for inexact types, - the largest negative value for signed types, the largest positive value for - unsigned types, and True for booleans. - - Returns - ------- - result : jnp.ndarray - Shape (size, ). - - """ - assert a.shape == mask.shape - idx = flatnonzero(mask, size=setdefault(size, mask.size), fill_value=mask.size) - return take( - a, - idx, - mode="fill", - fill_value=fill_value, - unique_indices=True, - indices_are_sorted=True, - ) - - # use for debugging and testing def _filter_not_nan(a, check=False): """Filter out nan from ``a`` while asserting nan is padded at right.""" @@ -66,178 +35,6 @@ def _filter_nonzero_measure(bp1, bp2): return bp1[mask], bp2[mask] -def _filter_distinct(r, sentinel, eps): - """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" - # eps needs to be low enough that close distinct roots do not get removed. - # Otherwise, algorithms relying on continuity will fail. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) - r = jnp.where(mask, sentinel, r) - return r - - -def _sentinel_append(r, sentinel, num=1): - """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" - sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) - return jnp.append(r, sent, axis=-1) - - -def _root_linear(a, b, sentinel, eps, distinct=False): - """Return r such that a r + b = 0.""" - return safediv(-b, a, jnp.where(jnp.abs(b) <= eps, 0, sentinel)) - - -def _root_quadratic(a, b, c, sentinel, eps, distinct): - """Return r such that a r² + b r + c = 0, assuming real coefficients and roots.""" - # numerical.recipes/book.html, page 227 - discriminant = b**2 - 4 * a * c - q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(jnp.abs(discriminant))) - r1 = jnp.where( - discriminant < 0, - sentinel, - safediv(q, a, _root_linear(b, c, sentinel, eps)), - ) - r2 = jnp.where( - # more robust to remove repeated roots with discriminant - (discriminant < 0) | (distinct & (discriminant <= eps)), - sentinel, - safediv(c, q, sentinel), - ) - return jnp.stack([r1, r2], axis=-1) - - -def _root_cubic(a, b, c, d, sentinel, eps, distinct): - """Return r such that a r³ + b r² + c r + d = 0, assuming real coef and roots.""" - # numerical.recipes/book.html, page 228 - - def irreducible(Q, R, b, mask): - # Three irrational real roots. - theta = jnp.arccos(R / jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1))) - return jnp.moveaxis( - -2 - * jnp.sqrt(Q) - * jnp.stack( - [ - jnp.cos(theta / 3), - jnp.cos((theta + 2 * jnp.pi) / 3), - jnp.cos((theta - 2 * jnp.pi) / 3), - ] - ) - - b / 3, - source=0, - destination=-1, - ) - - def reducible(Q, R, b): - # One real and two complex roots. - A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) - B = safediv(Q, A) - r1 = (A + B) - b / 3 - return _sentinel_append(r1[..., jnp.newaxis], sentinel, num=2) - - def root(b, c, d): - b = safediv(b, a) - c = safediv(c, a) - d = safediv(d, a) - Q = (b**2 - 3 * c) / 9 - R = (2 * b**3 - 9 * b * c + 27 * d) / 54 - mask = R**2 < Q**3 - return jnp.where( - mask[..., jnp.newaxis], - irreducible(jnp.abs(Q), R, b, mask), - reducible(Q, R, b), - ) - - return jnp.where( - # Tests catch failure here if eps < 1e-12 for 64 bit jax. - jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), - _sentinel_append(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), - root(b, c, d), - ) - - -_roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") - - -# TODO: upstream to interpax -def _poly_root( - c, - k=0, - a_min=None, - a_max=None, - sort=False, - sentinel=jnp.nan, - # About 2e-12 for 64 bit jax. - eps=min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e4, 1e-8), - distinct=False, -): - """Roots of polynomial with given coefficients. - - Parameters - ---------- - c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. - k : jnp.ndarray - Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of - shape ``c.shape[1:]``. - a_min : jnp.ndarray - Minimum ``a_min`` and maximum ``a_max`` value to return roots between. - If specified only real roots are returned. If None, returns all complex roots. - Should broadcast with arrays of shape ``c.shape[1:]``. - a_max : jnp.ndarray - Minimum ``a_min`` and maximum ``a_max`` value to return roots between. - If specified only real roots are returned. If None, returns all complex roots. - Should broadcast with arrays of shape ``c.shape[1:]``. - sort : bool - Whether to sort the roots. - sentinel : float - Value with which to pad array in place of filtered elements. - Anything less than ``a_min`` or greater than ``a_max`` plus some floating point - error buffer will work just like nan while avoiding nan gradient. - eps : float - Absolute tolerance with which to consider value as zero. - distinct : bool - Whether to only return the distinct roots. If true, when the multiplicity is - greater than one, the repeated roots are set to ``sentinel``. - - Returns - ------- - r : jnp.ndarray - Shape (..., c.shape[1:], c.shape[0] - 1). - The roots of the polynomial, iterated over the last axis. - - """ - is_real = not (jnp.iscomplexobj(c) or jnp.iscomplexobj(k)) - get_only_real_roots = not (a_min is None and a_max is None) - - func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} - if c.shape[0] in func and is_real and get_only_real_roots: - # Compute from analytic formula to avoid the issue of complex roots with small - # imaginary parts and to avoid nan in gradient. - r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) - distinct = distinct and c.shape[0] > 3 - else: - # Compute from eigenvalues of polynomial companion matrix. - c_n = c[-1] - k - c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] - c.append(c_n) - c = jnp.stack(c, axis=-1) - r = _roots(c) - if get_only_real_roots: - a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] - a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] - r = jnp.where( - (jnp.abs(jnp.imag(r)) <= eps) & (a_min <= r) & (r <= a_max), - jnp.real(r), - sentinel, - ) - - if sort or distinct: - r = jnp.sort(r, axis=-1) - return _filter_distinct(r, sentinel, eps) if distinct else r - - def _poly_der(c): """Coefficients for the derivatives of the given set of polynomials. @@ -297,7 +94,7 @@ def _poly_val(x, c): """ # Better than Horner's method as we expect to evaluate low order polynomials. X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) - val = jnp.einsum("...i,i...->...", X, c) + val = jnp.einsum("...i,i...", X, c) return val @@ -570,9 +367,9 @@ def bounce_points( axis of the returned arrays, which enumerates bounce points for a particular field line and pitch, is padded with zero. - Specify to only return the first ``num_wells`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_wells`` is a close upper - bound to the actual number of wells. To obtain a good choice for ``num_wells``, + Specify to return the first ``num_wells`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_wells`` tightly + bounds the actual number of wells. To obtain a good choice for ``num_wells``, plot the field line with all the bounce points identified by calling this function with ``check=True``. As a reference, there are typically <= 5 wells per toroidal transit. @@ -589,10 +386,6 @@ def bounce_points( a field line. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. - If there were less than ``num_wells`` wells detected along a field line, then - the last axis, which enumerates bounce points for a particular field line - and pitch, is padded with zero. - """ B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 @@ -622,8 +415,8 @@ def bounce_points( intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) # New versions of jax only like static sentinels. sentinel = -10000000.0 # knots[0] - 1 - bp1 = _take_mask(intersect, is_bp1, size=num_wells, fill_value=sentinel) - bp2 = _take_mask(intersect, is_bp2, size=num_wells, fill_value=sentinel) + bp1 = take_mask(intersect, is_bp1, size=num_wells, fill_value=sentinel) + bp2 = take_mask(intersect, is_bp2, size=num_wells, fill_value=sentinel) if check: _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) @@ -756,177 +549,6 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): return B_extrema -def affine_bijection_to_disc(x, a, b): - """[a, b] ∋ x ↦ y ∈ [−1, 1].""" - y = 2 * (x - a) / (b - a) - 1 - return y - - -def affine_bijection(x, a, b): - """[−1, 1] ∋ x ↦ y ∈ [a, b].""" - y = (x + 1) / 2 * (b - a) + a - return y - - -def grad_affine_bijection(a, b): - """Gradient of affine bijection.""" - dy_dx = (b - a) / 2 - return dy_dx - - -def automorphism_arcsin(x): - """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - - The arcsin transformation introduces a singularity that augments the singularity - in the bounce integral, so the quadrature scheme used to evaluate the integral must - work well on functions with large derivative near the boundary. - - Parameters - ---------- - x : jnp.ndarray - Points to transform. - - Returns - ------- - y : jnp.ndarray - Transformed points. - - """ - y = 2 * jnp.arcsin(x) / jnp.pi - return y - - -def grad_automorphism_arcsin(x): - """Gradient of arcsin automorphism.""" - dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) - return dy_dx - - -grad_automorphism_arcsin.__doc__ += "\n" + automorphism_arcsin.__doc__ - - -def automorphism_sin(x, s=0, m=10): - """[-1, 1] ∋ x ↦ y ∈ [−1, 1]. - - When used as the change of variable map for the bounce integral, the Lipschitzness - of the sin transformation prevents generation of new singularities. Furthermore, - its derivative vanishes to zero slowly near the boundary, which will suppress the - large derivatives near the boundary of singular integrals. - - In effect, this map pulls the mass of the integral away from the singularities, - which should improve convergence if the quadrature performs better on less singular - integrands. Pairs well with Gauss-Legendre quadrature. - - Parameters - ---------- - x : jnp.ndarray - Points to transform. - s : float - Strength of derivative suppression, s ∈ [0, 1]. - m : float - Number of machine epsilons used for floating point error buffer. - - Returns - ------- - y : jnp.ndarray - Transformed points. - - """ - errorif(not (0 <= s <= 1)) - # s = 0 -> derivative vanishes like cosine. - # s = 1 -> derivative vanishes like cosine^k. - y0 = jnp.sin(jnp.pi * x / 2) - y1 = x + jnp.sin(jnp.pi * x) / jnp.pi # k = 2 - y = (1 - s) * y0 + s * y1 - # y is an expansion, so y(x) > x near x ∈ {−1, 1} and there is a tendency - # for floating point error to overshoot the true value. - eps = m * jnp.finfo(jnp.array(1.0).dtype).eps - return jnp.clip(y, -1 + eps, 1 - eps) - - -def grad_automorphism_sin(x, s=0): - """Gradient of sin automorphism.""" - dy0_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 - dy1_dx = 1 + jnp.cos(jnp.pi * x) - dy_dx = (1 - s) * dy0_dx + s * dy1_dx - return dy_dx - - -grad_automorphism_sin.__doc__ += "\n" + automorphism_sin.__doc__ - - -def tanh_sinh(deg, m=10): - """Tanh-Sinh quadrature. - - Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the - integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). - - Parameters - ---------- - deg : int - Number of quadrature points. - m : float - Number of machine epsilons used for floating point error buffer. Larger implies - less floating point error, but increases the minimum achievable error. - - Returns - ------- - x, w : (jnp.ndarray, jnp.ndarray) - Quadrature points and weights. - - """ - # buffer to avoid numerical instability - x_max = jnp.array(1.0) - x_max = x_max - m * jnp.finfo(x_max.dtype).eps - t_max = jnp.arcsinh(2 * jnp.arctanh(x_max) / jnp.pi) - # maximal-spacing scheme, doi.org/10.48550/arXiv.2007.15057 - t = jnp.linspace(-t_max, t_max, deg) - dt = 2 * t_max / (deg - 1) - arg = 0.5 * jnp.pi * jnp.sinh(t) - x = jnp.tanh(arg) # x = g(t) - w = 0.5 * jnp.pi * jnp.cosh(t) / jnp.cosh(arg) ** 2 * dt # w = (dg/dt) dt - return x, w - - -def leggausslob(deg): - """Lobatto-Gauss-Legendre quadrature. - - Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the - integral ∫₋₁¹ f(x) dx ≈ ∑ₖ wₖ f(xₖ). - - Parameters - ---------- - deg : int - Number of (interior) quadrature points to return. - - Returns - ------- - x, w : (jnp.ndarray, jnp.ndarray) - Quadrature points in (-1, 1) and associated weights. - Excludes points and weights at -1 and 1. - - """ - # Designate two degrees for endpoints. - deg = int(deg) + 2 - - n = jnp.arange(2, deg - 1) - x = eigh_tridiagonal( - jnp.zeros(deg - 2), - jnp.sqrt((n**2 - 1) / (4 * n**2 - 1)), - eigvals_only=True, - ) - c0 = put(jnp.zeros(deg), -1, 1) - - # improve (single multiplicity) roots by one application of Newton - c = legder(c0) - dy = legval(x=x, c=c) - df = legval(x=x, c=legder(c)) - x -= dy / df - - w = 2 / (deg * (deg - 1) * legval(x=x, c=c0) ** 2) - return x, w - - def _plot(Z, V, title_id=""): """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" for p in range(Z.shape[0]): @@ -1386,9 +1008,9 @@ def bounce_integrate( axis of the returned array, which enumerates bounce integrals for a particular field line and pitch, is padded with zero. - Specify to only return the bounce integrals between the first ``num_wells`` + Specify to return the bounce integrals between the first ``num_wells`` wells for each pitch along each field line. This is useful if ``num_wells`` - is a close upper bound to the actual number of wells. To obtain a good + tightly bounds the actual number of wells. To obtain a good choice for ``num_wells``, plot the field line with all the bounce points identified. This will be done automatically if the ``bounce_integral`` function is called with ``check=True`` and ``plot=True``. As a reference, diff --git a/desc/compute/fourier_bounce_integral.py b/desc/compute/fourier_bounce_integral.py index 39c4b32cd9..d6d6ebe2e2 100644 --- a/desc/compute/fourier_bounce_integral.py +++ b/desc/compute/fourier_bounce_integral.py @@ -1,46 +1,27 @@ -"""Methods for computing bounce integrals.""" +"""Methods for constructing f(α, ζ) splines and bounce integrals.""" -from orthax.chebyshev import chebpts1, chebpts2 +from orthax.chebyshev import chebpts1, chebpts2, chebval from desc.backend import dct, idct, irfft, jnp, put, rfft -from desc.compute.bounce_integral import _filter_distinct, _fix_inversion, _take_mask -from desc.compute.bounce_integral import affine_bijection as map_domain -from desc.compute.bounce_integral import affine_bijection_to_disc as map_domain_to_disc -from desc.grid import Grid +from desc.compute._interp_utils import _filter_distinct, irfft_non_uniform +from desc.compute._quadrature_utils import affine_bijection as map_domain +from desc.compute._quadrature_utils import ( + affine_bijection_to_disc as map_domain_to_disc, +) +from desc.compute.bounce_integral import _fix_inversion +from desc.compute.utils import take_mask from desc.utils import Index, errorif -_eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - - -def _fourier_pts(M): - return -jnp.pi + 2 * jnp.pi * jnp.arange(1, M + 1) / M - - -# Y = [a, b] evaluate on grid -> y = [-1, 1] chebyshev points -> y = cos(z) -# evenly spaced z. -# So I find coefficients to chebyshev series T_n(y) = cos(n arcos(y)) = cos(n z). -# So evaluating my chebyshev series in y is same as evaluting cosine series in -# z = arcos(y). -# for y = inversemap[a, b]. -# Open questions is finding roots y using chebroots better or is finding roots z -# of trig poly. -# answer: research shows doesn't really matter. -# TODO: could try boyd. eq. 16.46 pg 336 -def _chebyshev_pts(N, lobatto, domain=(-1, 1)): - y = chebpts2(N) if lobatto else chebpts1(N) - return map_domain(y, domain[0], domain[-1]) - - # Vectorized versions of numpy functions. Need root finding to be as efficient as -# possible, so manually vectorize to solve stack of matrices with single LAPACK call. -# Also skip the slow input massaging because we don't allow duck typed lists. +# possible, so vectorize to solve stack of matrices. Also skip the slow input +# massaging because we don't allow duck typed lists. def _chebcompanion(c): # Adapted from - # https://numpy.org/doc/stable/reference/generated/ + # numpy.org/doc/stable/reference/generated/ # numpy.polynomial.chebyshev.chebcompanion.html. - # https://github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py. + # github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py. errorif(c.shape[-1] < 2, msg="Series must have maximum degree of at least 1.") if c.shape[-1] == 2: return jnp.array([[-c[..., 0] / c[..., 1]]]) @@ -61,9 +42,9 @@ def _chebcompanion(c): def _chebroots(c): # Adapted from - # https://numpy.org/doc/stable/reference/generated/ + # numpy.org/doc/stable/reference/generated/ # numpy.polynomial.chebyshev.chebroots.html. - # https://github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py, + # github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py, if c.shape[-1] < 2: return jnp.reshape([], (0,) * c.ndim) if c.shape[-1] == 2: @@ -77,17 +58,51 @@ def _chebroots(c): return r +def _cheb_from_dct(c): + # Return Chebshev polynomial coefficients given forward dct type 2. + return c.at[..., 0].divide(2.0) * 2 + + +def _flatten_matrix(y): + # Flatten batch of matrix to batch of vector. + return y.reshape(*y.shape[:-2], -1) + + +def alpha_sequence(alpha_0, m, iota, period): + """Get sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) of field line. + + Parameters + ---------- + alpha_0 : float + Starting field line poloidal label. + m : float + Number of periods to follow field line. + iota : jnp.ndarray + Shape (iota.size, ). + Rotational transform normalized by 2π. + period : float + Toroidal period after which to update label. + + Returns + ------- + alpha : jnp.ndarray + Shape (iota.size, m). + Sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) that specify field line. + + """ + # Δz (∂α/∂ζ) = Δz ι̅ = Δz ι/2π = Δz data["iota"] + return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(m)) % (2 * jnp.pi) + + class FourierChebyshevBasis: """Fourier-Chebyshev series. f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) - where ψₘ are trigonometric functions and Tₙ are Chebyshev polynomials - on domain [−yₘᵢₙ, yₘₐₓ]. + where ψₘ are trigonometric polynomials on [0, 2π] + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. Attributes ---------- - L : int - Batch dimension size. M : int Fourier spectral resolution. N : int @@ -100,18 +115,17 @@ class FourierChebyshevBasis: """ - def __init__(self, f, M, N, lobatto=False, domain=(-1, 1)): + _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) + + def __init__(self, f, lobatto=False, domain=(-1, 1)): """Interpolate Fourier-Chebyshev basis to ``f``. Parameters ---------- f : jnp.ndarray Shape (..., M, N). - Samples of function on the ``FourierChebyshevBasis.nodes`` grid. - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. + Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. + M, N preferably power of 2. lobatto : bool Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) or interior roots grid for Chebyshev points. @@ -121,19 +135,38 @@ def __init__(self, f, M, N, lobatto=False, domain=(-1, 1)): """ errorif(domain[0] > domain[-1], msg="Got inverted y coordinate domain.") errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") - self.domain = domain + self.M = f.shape[-2] + self.N = f.shape[-1] self.lobatto = bool(lobatto) - self._c = rfft( - dct(f.reshape(-1, M, N), type=2 - self.lobatto, axis=-1), - axis=-2, + self.domain = domain + self._c = ( + rfft( + dct(f, type=2 - self.lobatto, axis=-1) / (self.N - self.lobatto), + axis=-2, + ) + / self.M ) - self.N = N - self.M = M - self.L = self._c.shape[0] - self._a_n = None - @classmethod - def nodes(cls, M, N, lobatto=False, domain=(-1, 1), **kwargs): + @staticmethod + def _fourier_pts(M): + return -jnp.pi + 2 * jnp.pi * jnp.arange(1, M + 1) / M + + # Y = [a, b] evaluate on grid -> y = [-1, 1] chebyshev points -> y = cos(z) + # evenly spaced z. + # So I find coefficients to chebyshev series T_n(y) = cos(n arcos(y)) = cos(n z). + # So evaluating my chebyshev series in y is same as evaluting cosine series in + # z = arcos(y). + # for y = inversemap[a, b]. + # Open questions is finding roots y using chebroots better or is finding roots z + # of trig poly. + # answer: research shows doesn't really matter. + @staticmethod + def _chebyshev_pts(N, lobatto, domain=(-1, 1)): + y = chebpts2(N) if lobatto else chebpts1(N) + return map_domain(y, domain[0], domain[-1]) + + @staticmethod + def nodes(M, N, lobatto=False, domain=(-1, 1), **kwargs): """Tensor product grid of optimal collocation nodes for this basis. Parameters @@ -150,20 +183,17 @@ def nodes(cls, M, N, lobatto=False, domain=(-1, 1), **kwargs): Returns ------- - grid : jnp.ndarray + coords : jnp.ndarray Shape (M * N, 2). Grid of (x, y) points for optimal interpolation. """ - x = _fourier_pts(M) - y = _chebyshev_pts(N, lobatto, domain) - if "rho" in kwargs: - # then user wants a 3D DESC grid - grid = Grid.create_meshgrid([kwargs.pop("rho"), x, y], **kwargs) - else: - xx, yy = map(jnp.ravel, jnp.meshgrid(x, y, indexing="ij")) - grid = jnp.column_stack([xx, yy]) - return grid + x = FourierChebyshevBasis._fourier_pts(M) + y = FourierChebyshevBasis._chebyshev_pts(N, lobatto, domain) + coords = [kwargs.pop("rho"), x, y] if "rho" in kwargs else [x, y] + coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) + coords = jnp.column_stack(coords) + return coords def evaluate(self, M, N): """Evaluate Fourier-Chebyshev series. @@ -178,23 +208,19 @@ def evaluate(self, M, N): Returns ------- f : jnp.ndarray - Shape (L, M, N) + Shape (..., M, N) Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. """ - f = ( - idct( - irfft(self._c, n=M, axis=-2) * M / self.M, - type=2 - self.lobatto, - n=N, - axis=-1, - ) - * (N - self.lobatto) - / (self.N - self.lobatto) - ) + f = idct( + irfft(self._c, n=M, axis=-2) * M, + type=2 - self.lobatto, + n=N, + axis=-1, + ) * (N - self.lobatto) return f - def _harmonics(self): + def harmonics(self): """Spectral coefficients aₘₙ of the interpolating polynomial. Transform Fourier interpolant harmonics to Nyquist trigonometric @@ -203,57 +229,60 @@ def _harmonics(self): Returns ------- a_mn : jnp.ndarray - Shape (L, μ, N) where μ ∈ {M, M+1}. + Shape (..., M, N). Real valued spectral coefficients for Fourier-Chebyshev basis. """ + c = _cheb_from_dct(self._c) + # Convert rfft to Nyquist trigonometric harmonics. + is_even = (self.M % 2) == 0 # ∂ₓ = 0 coefficients - a0 = jnp.real(self._c[:, 0])[:, jnp.newaxis] + a0 = jnp.real(c[..., 0, :])[..., jnp.newaxis, :] # cos(mx) Tₙ(y) coefficients - an = jnp.real(self._c[:, 1:]) * 2 + an = jnp.real(c[..., 1:, :].at[..., -1, :].divide(1.0 + is_even)) * 2 # sin(mx) Tₙ(y) coefficients - bn = jnp.imag(self._c[:, 1:]) * (-2) - # Note 2*(M//2)+1 <= M+1 and bM = 0 if equality. - a_mn = jnp.hstack([a0, an, bn]) - assert a_mn.shape[-2] in (self.M, self.M + 1) and a_mn.shape[-1] == self.N + bn = jnp.imag(c[..., 1 : c.shape[-2] - is_even, :]) * (-2) + + a_mn = jnp.concatenate([a0, an, bn], axis=-2) + assert a_mn.shape[-2:] == (self.M, self.N) return a_mn - def _evaluate_fourier_basis(self, x): - """Evaluate Fourier basis at points ``x`` and cache the coefficients. + def compute_cheb(self, x): + """Evaluate Fourier basis at ``x`` to obtain set of 1d Chebyshev coefficients. Parameters ---------- x : jnp.ndarray - Shape (L, x.shape[-1]) or (x.shape[-1], ). - Evaluation points. If 1d assumes batch dimension over L is implicit. + Shape (..., x.shape[-1]). + Evaluation points. If 1d assumes batch dimension over L is implicit + (i.e. standard numpy broadcasting rules). Returns ------- - a_n : jnp.ndarray - Shape (L, N, x.shape[-1]) + cheb : jnp.ndarray + Shape (..., x.shape[-1], N). + Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). """ - # TODO: do in desc.basis too for potentially significant performance boost. - # Partial summation technique; see Boyd p. 185, eq. 10.2. - x = jnp.atleast_2d(x)[:, jnp.newaxis] - m = jnp.arange(1, self.M // 2 + 1)[:, jnp.newaxis] - psi = jnp.dstack([jnp.ones(x.shape), jnp.cos(m * x), jnp.sin(m * x)]) - # batch matrix product (L, N, μ) @ (L, μ, x) = (L, N, x) - self._a_n = jnp.swapaxes(self._harmonics(), -1, -2) @ psi - assert self._a_n.shape == (self.L, self.N, x.shape[-1]) - return self._a_n - - def y_intersect(self, x, k=0, eps=_eps): + x = jnp.array(x, ndmin=self._c.ndim) + errorif(x.ndim != self._c.ndim, NotImplementedError) + cheb = _cheb_from_dct( + irfft_non_uniform(x, jnp.swapaxes(self._c, -1, -2), self.M) + ) + cheb = jnp.swapaxes(cheb, -1, -2) + assert cheb.shape == (*self._c.shape[:-2], x.shape[-1], self.N) + return cheb + + def y_intersect(self, cheb, k=0, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). Parameters ---------- - x : jnp.ndarray - Shape (L, x.shape[-1]) or broadcastable of lower dimension. - Evaluation points. If 1d assumes batch dimension over L is implicit - (i.e. standard numpy broadcasting rules). + cheb : jnp.ndarray + Shape (..., N). + Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). k : jnp.ndarray - Shape (P, L, x.shape[-1]) or broadcastable of lower dimension. + Shape (..., *cheb.shape). Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. eps : float Absolute tolerance with which to consider value as zero. @@ -261,7 +290,7 @@ def y_intersect(self, x, k=0, eps=_eps): Returns ------- y : jnp.ndarray - Shape (P, L, x.shape[-1], N - 1) or (L, x.shape[-1], N - 1). + Shape (..., *cheb.shape[:-1], N - 1). Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. is_decreasing : jnp.ndarray Shape y.shape. @@ -274,23 +303,25 @@ def y_intersect(self, x, k=0, eps=_eps): Boolean array into ``y`` indicating whether element is an intersect. """ - a_n = self._evaluate_fourier_basis(x) - if k.ndim == 3: - a_n = a_n[jnp.newaxis] - a_n = put(a_n, Index[..., 0, :], a_n[..., 0, :] - k) - a_n = jnp.swapaxes(a_n, -1, -2) # shape is (P, L, x, N) + assert cheb.shape[-1] == self.N + c = cheb[jnp.newaxis] if k.ndim > cheb.ndim else cheb + c = c.at[..., 0].add(-k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = _chebroots(a_n) - assert y.shape[-3:] == (self.L, x.shape[-1], self.N - 1) + y = _chebroots(c) + assert y.shape == (*c.shape[:-1], self.N - 1) - # Pick sentinel such that only distinct roots are considered intersects. y = _filter_distinct(y, sentinel=-2, eps=eps) + # Pick sentinel above such that only distinct roots are considered intersects. is_intersect = (jnp.abs(jnp.imag(y)) <= eps) & (jnp.abs(jnp.real(y)) <= 1) y = jnp.where(is_intersect, jnp.real(y), 0) # ensure y is in domain of arcos # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) # sign ∂f/∂y = sign ∑ₙ₌₁ᴺ⁻¹ aₙ(x) sin(n arcos y) - s = jnp.inner( - a_n, jnp.sin(jnp.arange(self.N) * jnp.arccos(y)[..., jnp.newaxis]) + s = jnp.einsum( + # TODO: Multipoint evaluation with FFT. + # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + "...n,...yn", + cheb, + jnp.sin(jnp.arange(self.N) * jnp.arccos(y)[..., jnp.newaxis]), ) is_decreasing = s <= 0 is_increasing = s >= 0 @@ -298,51 +329,9 @@ def y_intersect(self, x, k=0, eps=_eps): y = map_domain(y, self.domain[0], self.domain[-1]) return y, is_decreasing, is_increasing, is_intersect - def _isomorphism_1d(self, y): - """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². - - Maps row x of y to z = α(x) + y where α(x) = x * |domain|. - - Parameters - ---------- - y : jnp.ndarray - Shape (..., *y.shape[-2:]). - Second to last axis iterates the rows. - Leading axes are considered batch axes in usual numpy broadcasting. - - Returns - ------- - z : jnp.ndarray - Shape (..., y.shape[-2] * y.shape[-1]). - Isomorphic coordinates. - - """ - alpha = (self.domain[-1] - self.domain[0]) * jnp.arange(y.shape[-2]) - z = _flatten_matrix(alpha[:, jnp.newaxis] + y) - return z - - def _isomorphism_2d(self, z): - """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - - Returns index x and value y such that z = α(x) + y where α(x) = x * |domain|. - - Parameters - ---------- - z : jnp.ndarray - - Returns - ------- - x_index : jnp.ndarray - Shape y.shape. - Isomorphic coordinates. - - """ - period = self.domain[-1] - self.domain[0] - x_index = z // period - y_value = z % period - return x_index, y_value - - def bounce_points(self, y, is_decreasing, is_increasing, is_intersect): + def bounce_points( + self, y, is_decreasing, is_increasing, is_intersect, num_wells=None + ): """Compute bounce points given intersections. Parameters @@ -361,22 +350,29 @@ def bounce_points(self, y, is_decreasing, is_increasing, is_intersect): is_intersect : jnp.ndarray Shape y.shape. Boolean array into ``y`` indicating whether element is an intersect. + num_wells : int + If not specified, then all bounce points are returned in an array whose + last axis has size ``y.shape[-1] * y.shape[-2]``. If there + were less than that many wells detected along a field line, then the last + axis of the returned arrays, which enumerates bounce points for a particular + field line and pitch, is padded with zero. + + Specify to return the first ``num_wells`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_wells`` tightly + bounds the actual number of wells. As a reference, there are + typically <= 5 wells per toroidal transit. Returns ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (*y.shape[:-2], y.shape[-1] * y.shape[-2]). - The field line-following coordinates of bounce points for a given pitch along - a field line. The pairs ``bp1`` and ``bp2`` form left and right integration - boundaries, respectively, for the bounce integrals. - - If there were less than ``y.shape[-1] * y.shape[-2]`` bounce points detected - along a field line, then the last axis, which enumerates the bounce points for - a particular field line, is padded with zero. + Shape (*y.shape[:-2], num_wells). + The field line-following coordinates of bounce points for a given pitch + along a field line. The pairs ``bp1`` and ``bp2`` form left and right + integration boundaries, respectively, for the bounce integrals. """ - # Last axis enumerates intersects of a pitch along a field line. - y = self._isomorphism_1d(y) + # Flatten so that last axis enumerates intersects of a pitch along a field line. + y = _flatten_matrix(self._isomorphism_1d(y)) is_decreasing = _flatten_matrix(is_decreasing) is_increasing = _flatten_matrix(is_increasing) is_intersect = _flatten_matrix(is_intersect) @@ -384,70 +380,100 @@ def bounce_points(self, y, is_decreasing, is_increasing, is_intersect): is_bp2 = is_increasing & _fix_inversion(is_intersect, is_increasing) sentinel = self.domain[0] - 1 - bp1 = _take_mask(y, is_bp1, fill_value=sentinel) - bp2 = _take_mask(y, is_bp2, fill_value=sentinel) + bp1 = take_mask(y, is_bp1, size=num_wells, fill_value=sentinel) + bp2 = take_mask(y, is_bp2, size=num_wells, fill_value=sentinel) mask = (bp1 > sentinel) & (bp2 > sentinel) # Set outside mask to same value so integration is over set of measure zero. bp1 = jnp.where(mask, bp1, 0) bp2 = jnp.where(mask, bp2, 0) + # These typically have shape (num pitch, num rho, num wells). return bp1, bp2 - def _interp1d( - self, z - ): # assumes z is on x points from a_n generated after evaluate fourier - """Evaluate basis at coordinates z ∈ ℝ isomorphic to (x, y) ∈ ℝ². + def interp_cheb_spline(self, z, cheb): + """Evaluate piecewise Chebyshev spline at coordinates z. + + The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² + where z integer division domain yields index into the proper + Chebyshev series of the spline and z mod domain is the coordinate + value along the domain of that Chebyshev series. Parameters ---------- z : jnp.ndarray - Shape (P, L, B, Q). - Isomorphic coordinates. - Pitch, radial, bounce points, quad points. + Shape (*cheb.shape[:-2], num wells, num quadrature points). + Isomorphic coordinates along field line [0, inf]. + cheb: jnp.ndarray + Shape (..., num cheb series, N). + Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]). Returns ------- f : jnp.ndarray Shape z.shape. - This basis evaluated at z. + Chebyshev basis evaluated at z. """ - # Will have shape (P, L, BQ) - x_index, y_values = map(_flatten_matrix, self._isomorphism_2d(z)) - y_values = map_domain_to_disc(y_values, self.domain[0], self.domain[1]) - a_n = jnp.swapaxes(self._a_n, -1, -2) # changes to shape (L, x, N) - n = jnp.arange(self.N) - T = jnp.cos(n * jnp.arccos(y_values)[..., jnp.newaxis]) - # f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) - f = jnp.inner(a_n[x_index], T).reshape(z.shape) + # TODO: Multipoint evaluation with FFT. + # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + x_idx, y = map(_flatten_matrix, self._isomorphism_2d(z)) + y = map_domain_to_disc(y, self.domain[0], self.domain[1]) + cheb = jnp.moveaxis(cheb, source=-1, destination=0) + cheb = jnp.take_along_axis(cheb, x_idx, axis=-1, mode="promise_in_bounds") + f = chebval(y, cheb, tensor=False).reshape(z.shape) + # TODO: Add below as unit test. + # n = jnp.arange(self.N) # noqa: E800 + # T = jnp.cos(n * jnp.arccos(y)[..., jnp.newaxis]) # noqa: E800 + # f = jnp.einsum("...n,n...", T, cheb).reshape(z.shape) # noqa: E800 return f + def _isomorphism_1d(self, y): + """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². -def alpha_sequence(alpha_0, m, iota, period): - """Get sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) of field line. + Maps row x of y to z = α(x) + y where α(x) = x * |domain|. - Parameters - ---------- - alpha_0 : float - Starting field line poloidal label. - m : float - Number of periods to follow field line. - iota : jnp.ndarray - Shape (L, ) - Rotational transform normalized by 2π. - period : float - Toroidal period after which to update label. + Parameters + ---------- + y : jnp.ndarray + Shape (..., *y.shape[-2:]). + Second to last axis iterates the rows. - Returns - ------- - alpha : jnp.ndarray - Shape (L, m) - Sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) that specify field line. + Returns + ------- + z : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. - """ - # Δz (∂α/∂ζ) = Δz ι̅ = Δz ι/2π = Δz data["iota"] - return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(m)) % (2 * jnp.pi) + """ + period = self.domain[-1] - self.domain[0] + zeta_shift = period * jnp.arange(y.shape[-2]) + z = zeta_shift[:, jnp.newaxis] + y + return z + def _isomorphism_2d(self, z): + """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. -def _flatten_matrix(y): - return y.reshape(*y.shape[:-2], -1) + Returns index x and value y such that z = α(x) + y where α(x) = x * |domain|. + + Parameters + ---------- + z : jnp.ndarray + Shape z.shape. + + Returns + ------- + x_index : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. + + """ + period = self.domain[-1] - self.domain[0] + x_index = z // period + y_value = z % period + return x_index, y_value + + +def bounce_integral(data, M, N, rho): + """WIP.""" + cheb_nodes = FourierChebyshevBasis.nodes(M, N, domain=(0, 2 * jnp.pi), rho=rho) + return cheb_nodes diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 4567637be3..78a1236402 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -2,14 +2,15 @@ import copy import inspect +from functools import partial import numpy as np from termcolor import colored -from desc.backend import cond, execute_on_cpu, fori_loop, jnp, put +from desc.backend import cond, execute_on_cpu, flatnonzero, fori_loop, jnp, put, take from desc.grid import ConcentricGrid, Grid, LinearGrid -from ..utils import errorif, warnif +from ..utils import errorif, setdefault, warnif from .data_index import allowed_kwargs, data_index # map from profile name to equilibrium parameter name @@ -1581,3 +1582,41 @@ def body(i, mins): # The above implementation was benchmarked to be more efficient than # alternatives without explicit loops in GitHub pull request #501. return grid.expand(mins, surface_label) + + +@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) +def take_mask(a, mask, size=None, fill_value=None): + """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. + + Parameters + ---------- + a : jnp.ndarray + The source array. + mask : jnp.ndarray + Boolean mask to index into ``a``. Should have same shape as ``a``. + size : int + Elements of ``a`` at the first size True indices of ``mask`` will be returned. + If there are fewer elements than size indicates, the returned array will be + padded with ``fill_value``. The size default is ``mask.size``. + fill_value : Any + When there are fewer than the indicated number of elements, the remaining + elements will be filled with ``fill_value``. Defaults to NaN for inexact types, + the largest negative value for signed types, the largest positive value for + unsigned types, and True for booleans. + + Returns + ------- + result : jnp.ndarray + Shape (size, ). + + """ + assert a.shape == mask.shape + idx = flatnonzero(mask, size=setdefault(size, mask.size), fill_value=mask.size) + return take( + a, + idx, + mode="fill", + fill_value=fill_value, + unique_indices=True, + indices_are_sorted=True, + ) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index d1b24c89ee..3610193bb6 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -14,7 +14,19 @@ from scipy.special import ellipe, ellipkm1, roots_chebyu from tests.test_plotting import tol_1d -from desc.backend import flatnonzero, jnp +from desc.backend import flatnonzero, jnp, put, rfft +from desc.compute._interp_utils import interp_rfft, interp_rfft2 +from desc.compute._quadrature_utils import ( + affine_bijection, + affine_bijection_to_disc, + automorphism_arcsin, + automorphism_sin, + grad_affine_bijection, + grad_automorphism_arcsin, + grad_automorphism_sin, + leggausslob, + tanh_sinh, +) from desc.compute.bounce_integral import ( _composite_linspace, _filter_nonzero_measure, @@ -22,29 +34,19 @@ _poly_der, _poly_root, _poly_val, - _take_mask, - affine_bijection, - affine_bijection_to_disc, - automorphism_arcsin, - automorphism_sin, bounce_integral, bounce_points, get_extrema, get_pitch, - grad_affine_bijection, - grad_automorphism_arcsin, - grad_automorphism_sin, - leggausslob, plot_field_line, - tanh_sinh, ) from desc.compute.fourier_bounce_integral import FourierChebyshevBasis -from desc.compute.utils import dot +from desc.compute.utils import dot, take_mask from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid from desc.examples import get from desc.grid import Grid, LinearGrid -from desc.utils import only1 +from desc.utils import Index, only1 @partial(np.vectorize, signature="(m)->()") @@ -63,7 +65,7 @@ def test_mask_operations(): a = np.random.rand(rows, cols) nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) a.ravel()[nan_idx] = np.nan - taken = _take_mask(a, ~np.isnan(a)) + taken = take_mask(a, ~np.isnan(a)) last = _last_value(taken) for i in range(rows): desired = a[i, ~np.isnan(a[i])] @@ -799,6 +801,75 @@ def dummy_fun(pitch): return fig +# TODO: upstream to interpax +@pytest.mark.unit +def test_interp_rfft(): + """Test FFT interpolation.""" + + def _interp_rfft(xq, f): + assert xq.ndim == f.ndim >= 1 + M = f.shape[-1] + a = rfft(f, norm="forward") + a = put(a, Index[..., 0], a[..., 0] / 2) + a = put(a, Index[..., -1], a[..., -1] / (1 + ((M % 2) == 0))) + + m = np.fft.rfftfreq(M, d=1 / M) + np.testing.assert_allclose(m, np.arange(M // 2 + 1), err_msg="rfftfreq wrong.") + + mx = m * xq[..., np.newaxis] + fq = 2 * ( + np.sum(np.cos(mx) * np.real(a), axis=-1) + - np.sum(np.sin(mx) * np.imag(a), axis=-1) + ) + return fq + + def test(xq, func, n): + x = np.linspace(0, 2 * np.pi, n, endpoint=False) + assert not np.any(np.isclose(xq[..., np.newaxis], x)) + f = func(x) + np.testing.assert_allclose(_interp_rfft(xq, f), func(xq)) + np.testing.assert_allclose(interp_rfft(xq, f), func(xq)) + + xq = np.array([7.34, 1.10134, 2.28]) + freq_nyquist = 7 + f = lambda x: np.cos(freq_nyquist * x) + np.sin(x) + test(xq, f, 2 * freq_nyquist) + test(xq, f, 2 * freq_nyquist + 1) + + +@pytest.mark.xfail(reason="Numpy, jax, and scipy need to fix bug on their end.") +@pytest.mark.unit +def test_interp_rfft2(): + """Test FFT interpolation.""" + + def test(xq, func, m, n): + x = np.linspace(0, 2 * np.pi, m, endpoint=False) + y = np.linspace(0, 2 * np.pi, n, endpoint=False) + assert not np.any(np.isclose(xq[..., 0, np.newaxis], x)) + assert not np.any(np.isclose(xq[..., 1, np.newaxis], y)) + x, y = map(np.ravel, list(np.meshgrid(x, y, indexing="ij"))) + np.testing.assert_allclose( + interp_rfft2( + xq, + jnp.array(func(x, y).reshape(m, n), ndmin=xq.ndim), + ), + func(xq[..., 0], xq[..., 1]), + ) + + def f(x, y): + # something that's not separable + return np.cos(x_freq * x) * np.sin(2 * x + y) + np.sin(y_freq * y) * np.cos( + x + 3 * y + ) + + xq = np.array([[7.34, 1.10134, 2.28], [1.1, 3.78432, 8.542]]).T + x_freq, y_freq = 3, 5 + x_rate_nyquist, y_rate_nyquist = 2 * (x_freq + 2), 2 * (y_freq + 3) + test(xq, f, x_rate_nyquist + 1, y_rate_nyquist + 1) + # FIXME: Bug with numpy's computation of nyquist freq fourier coefficient. + test(xq, f, x_rate_nyquist, y_rate_nyquist) + + # todo: @pytest.mark.unit def test_fcb_interp(): From 8c9c531d5244854194bfe44d073ecde4e65cff6e Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 6 Aug 2024 21:23:15 -0400 Subject: [PATCH 200/241] Downstream changes needed to implement Nemov's Gamma_c from Gamma_c branch --- desc/compute/bounce_integral.py | 139 +++++++++++++++++--------------- tests/test_bounce_integral.py | 25 +++--- 2 files changed, 90 insertions(+), 74 deletions(-) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index e344dd3383..5d2c008510 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -3,7 +3,8 @@ from functools import partial import numpy as np -from interpax import CubicHermiteSpline, PchipInterpolator, PPoly, interp1d +from interpax import CubicHermiteSpline, PPoly, interp1d +from jax.nn import softmax from matplotlib import pyplot as plt from orthax.legendre import leggauss @@ -563,7 +564,7 @@ def bounce_points( First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. - num_wells : int + num_wells : int or None If not specified, then all bounce points are returned in an array whose last axis has size ``(knots.size - 1) * (B_c.shape[0] - 1)``. If there were less than that many wells detected along a field line, then the last @@ -600,7 +601,7 @@ def bounce_points( intersect = _poly_root( c=B_c, k=jnp.reciprocal(pitch)[..., jnp.newaxis], - a_min=jnp.array([0]), + a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sort=True, sentinel=-1, @@ -691,14 +692,8 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): return pitch -def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): - """Return |B| values at extrema. - - The quantity 1 / √(1 − λ |B|) common to bounce integrals is singular with - strength ~ |ζ_b₂ - ζ_b₁| / |(∂|B|/∂ζ)|ρ,α|. Therefore, an integral over the pitch - angle λ may have mass concentrated near λ = 1 / |B|(ζ*) where |B|(ζ*) is a - local maximum. Depending on the quantity to integrate, it may be beneficial - to place quadrature points at these regions. +def _get_extrema(knots, B_c, B_z_ra_c, sentinel=jnp.nan): + """Return extrema of |B| along field line. Sort order is arbitrary. Parameters ---------- @@ -717,43 +712,25 @@ def get_extrema(knots, B_c, B_z_ra_c, relative_shift=1e-6): First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. - relative_shift : float - Relative amount to shift maxima down and minima up to avoid floating point - errors in downstream routines. + sentinel : float + Value with which to pad array to return fixed shape. Returns ------- - B_extrema : jnp.ndarray - Shape (N * (degree - 1), S). - For the shaping notation, the ``degree`` of the spline of |B| matches - ``B_c.shape[0]-1``, the number of polynomials per spline ``N`` matches - ``knots.size-1``, and the number of field lines is denoted by ``S``. - If there were less than ``N*degree`` bounce points detected along a field line, - then the last axis, which enumerates the bounce points for a particular field - line, is padded with nan. + extrema, B_extrema : jnp.ndarray + Shape (S, N * (degree - 1)). """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - extrema = _poly_root(c=B_z_ra_c, a_min=jnp.array([0]), a_max=jnp.diff(knots)) - assert extrema.shape == (S, N, degree - 1) - B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]) - B_zz_ra_extrema = _poly_val(x=extrema, c=_poly_der(B_z_ra_c)[..., jnp.newaxis]) - # Floating point error impedes consistent detection of bounce points riding - # extrema. Shift pitch values slightly to resolve this issue. - B_extrema = ( - jnp.where( - # Higher priority to shift down maxima than shift up minima, so identify - # near equality with zero as maxima. - B_zz_ra_extrema <= 0, - (1 - relative_shift) * B_extrema, - (1 + relative_shift) * B_extrema, - ) - .reshape(S, -1) - .T + extrema = _poly_root( + c=B_z_ra_c, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel ) - assert B_extrema.shape == (N * (degree - 1), S) - return B_extrema + assert extrema.shape == (S, N, degree - 1) + B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) + # Transform out of local power basis expansion. + extrema = (extrema + knots[:-1, jnp.newaxis]).reshape(S, -1) + return extrema, B_extrema def affine_bijection_to_disc(x, a, b): @@ -913,7 +890,7 @@ def _plot(Z, V, title_id=""): plt.show() -def _check_interp(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): +def _check_interp(Z, f, B_sup_z, B, B_z_ra, result, plot): """Check for floating point errors. Parameters @@ -930,8 +907,8 @@ def _check_interp(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): B_z_ra : jnp.ndarray Norm of magnetic field, derivative with respect to field-line following coordinate, interpolated to Z. - inner_product : jnp.ndarray - Output of ``_interpolatory_quadrature``. + result : jnp.ndarray + Output of ``_interpolate_and_integrate``. plot : bool Whether to plot stuff. @@ -953,7 +930,7 @@ def _check_interp(Z, f, B_sup_z, B, B_z_ra, inner_product, plot): assert not jnp.isclose(B_sup_z, 0).any(), msg # Number of those integrals that were computed. - actual = jnp.sum(marked & jnp.isfinite(inner_product)) + actual = jnp.sum(marked & jnp.isfinite(result)) assert goal == actual, ( f"Lost {goal - actual} integrals from NaN generation in the integrand. This " "can be caused by floating point error or a poor choice of quadrature nodes." @@ -984,7 +961,6 @@ def _interpolate_and_integrate( pitch, knots, method, - method_B="cubic", check=False, plot=False, ): @@ -998,7 +974,7 @@ def _interpolate_and_integrate( Returns ------- - inner_product : jnp.ndarray + result : jnp.ndarray Shape Z.shape[:-1]. Quadrature for every pitch along every field line. @@ -1017,15 +993,15 @@ def _interpolate_and_integrate( # that the singularity near the bounce points can be captured more accurately than # can be by any polynomial. f = [_interp1d_vec(Z, knots, f_i, method=method).reshape(shape) for f_i in f] - # TODO: Pass in derivative and use method_B. + # TODO: Pass in derivative and use cubic method. b_sup_z = _interp1d_vec(Z, knots, B_sup_z / B, method=method).reshape(shape) - B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method=method_B).reshape(shape) - inner_product = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) + B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method="cubic").reshape(shape) + result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) if check: - _check_interp(Z.reshape(shape), f, b_sup_z, B, B_z_ra, inner_product, plot) + _check_interp(Z.reshape(shape), f, b_sup_z, B, B_z_ra, result, plot) - return inner_product + return result def _bounce_quadrature( @@ -1041,7 +1017,6 @@ def _bounce_quadrature( pitch, knots, method="akima", - method_B="cubic", batch=True, check=False, ): @@ -1097,8 +1072,6 @@ def _bounce_quadrature( Method of interpolation for functions contained in ``f``. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Default is akima spline. - method_B : str - Method of interpolation for |B|. Default is C1 cubic Hermite spline. batch : bool Whether to perform computation in a batched manner. Default is true. check : bool @@ -1134,7 +1107,6 @@ def _bounce_quadrature( pitch, knots, method, - method_B, check, # Only developers doing debugging want to see these plots. plot=False, @@ -1155,7 +1127,6 @@ def loop(bp): pitch, knots, method, - method_B, check=False, plot=False, ) @@ -1288,13 +1259,8 @@ def bounce_integral( B_sup_z, B, B_z_ra = (f.reshape(-1, knots.size) for f in [B_sup_z, B, B_z_ra]) # Compute splines. - monotonic = kwargs.pop("monotonic", False) # Interpax interpolation requires strictly increasing knots. - B_c = ( - PchipInterpolator(knots, B, axis=-1, check=check).c - if monotonic - else CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c - ) + B_c = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c B_c = jnp.moveaxis(B_c, source=1, destination=-1) B_z_ra_c = _poly_der(B_c) degree = 3 @@ -1312,7 +1278,13 @@ def bounce_integral( x = auto(x) def bounce_integrate( - integrand, f, pitch, method="akima", batch=True, num_wells=None + integrand, + f, + pitch, + method="akima", + batch=True, + num_wells=None, + weight=None, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -1340,7 +1312,7 @@ def bounce_integrate( Default is akima spline. batch : bool Whether to perform computation in a batched manner. Default is true. - num_wells : int + num_wells : int or None If not specified, then all bounce integrals are returned in an array whose last axis has size ``(knots.size - 1) * degree``. If there were less than that many wells detected along a field line, then the last @@ -1354,6 +1326,11 @@ def bounce_integrate( identified. This will be done automatically if the ``bounce_integral`` function is called with ``check=True`` and ``plot=True``. As a reference, there are typically <= 5 wells per toroidal transit. + weight : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + If supplied, the bounce integral labeled by well j is weighted such that + the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` + evaluated at the deepest point in the magnetic well. Returns ------- @@ -1377,11 +1354,43 @@ def bounce_integrate( pitch, knots, method, - method_B="monotonic" if monotonic else "cubic", batch=batch, check=check, ) + if weight is not None: + result *= _compute_at_deepest( + bp1, bp2, knots, B_c, B_z_ra_c, weight.reshape(-1, knots.size), method + ) assert result.shape[-1] == setdefault(num_wells, (knots.size - 1) * degree) return result return bounce_integrate, spline + + +def _compute_at_deepest(bp1, bp2, knots, B_c, B_z_ra_c, f, method, beta=-50): + """Compute ``f`` at deepest point in the magnetic well. + + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns f_min = mean_A f. + + Parameters + ---------- + beta : float + More negative gives exponentially better approximation to f_min at the + expense of sharper gradients. + + """ + extrema, B_extrema = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) + P, S, num_wells = bp1.shape + assert extrema.shape == B_extrema.shape == (S, extrema.shape[-1]) + B = jnp.where( + (bp1[..., jnp.newaxis] < extrema[:, jnp.newaxis]) + & (extrema[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + (B_extrema / jnp.mean(B_extrema, axis=-1, keepdims=True))[:, jnp.newaxis], + 100, # 100 >> max(|B|) / mean(|B|) + ) + f_min = jnp.linalg.vecdot( + softmax(beta * B, axis=-1), + _interp1d_vec(extrema, knots, f, method=method)[:, jnp.newaxis], + ) + assert f_min.shape == (P, S, num_wells) + return f_min diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 08793b57fa..b222a0918a 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -18,6 +18,7 @@ _composite_linspace, _filter_nonzero_measure, _filter_not_nan, + _get_extrema, _poly_der, _poly_root, _poly_val, @@ -27,7 +28,6 @@ automorphism_sin, bounce_integral, bounce_points, - get_extrema, get_pitch, grad_affine_bijection, grad_automorphism_arcsin, @@ -214,7 +214,7 @@ def test(x, c): @pytest.mark.unit def test_get_extrema(): - """Test that these pitch intersect extrema of |B|.""" + """Test computation of extrema of |B|.""" start = -np.pi end = -2 * start k = np.linspace(start, end, 5) @@ -222,13 +222,15 @@ def test_get_extrema(): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) B_z_ra = B.derivative() - extrema_scipy = np.sort(B(B_z_ra.roots(extrapolate=False))) - rtol = 1e-7 - extrema = get_extrema(k, B.c, B_z_ra.c, relative_shift=rtol) - eps = 100 * np.finfo(float).eps - extrema = np.sort(_filter_not_nan(extrema)) + extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) + extrema, B_extrema = map(_filter_not_nan, (extrema, B_extrema)) + idx = np.argsort(extrema) + + extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) + B_extrema_scipy = B(extrema_scipy) assert extrema.size == extrema_scipy.size - np.testing.assert_allclose(extrema, extrema_scipy, rtol=rtol + eps) + np.testing.assert_allclose(extrema[idx], extrema_scipy) + np.testing.assert_allclose(B_extrema[idx], B_extrema_scipy) @pytest.mark.unit @@ -754,6 +756,7 @@ def integrand_den(B, pitch): f=[], pitch=pitch[:, np.newaxis], num_wells=1, + weight=np.ones(zeta.size), ) drift_numerical_num = np.squeeze(drift_numerical_num) @@ -769,7 +772,11 @@ def integrand_den(B, pitch): # Test if differentiable. def dummy_fun(pitch): - return jnp.sum(bounce_integrate(integrand_num, [cvdrift, gbdrift], pitch)) + return jnp.sum( + bounce_integrate( + integrand_num, [cvdrift, gbdrift], pitch, weight=np.ones(zeta.size) + ) + ) assert np.isclose(grad(dummy_fun)(1.0), 650, rtol=1e-3) From 4ef040d4a7b0fa43de34d073c18fe1d54141bdcb Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 7 Aug 2024 00:09:15 -0400 Subject: [PATCH 201/241] WIP: Commit before merge to save progress --- desc/backend.py | 6 -- desc/compute/_interp_utils.py | 10 +- desc/compute/fourier_bounce_integral.py | 136 +++++++++++------------- desc/grid.py | 5 +- tests/test_bounce_integral.py | 7 +- 5 files changed, 73 insertions(+), 91 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index 80c2323a00..3916d53329 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -97,10 +97,6 @@ jnp.trapezoid if hasattr(jnp, "trapezoid") else jax.scipy.integrate.trapezoid ) - trapezoid = ( - jnp.trapezoid if hasattr(jnp, "trapezoid") else jax.scipy.integrate.trapezoid - ) - def put(arr, inds, vals): """Functional interface for array "fancy indexing". @@ -429,8 +425,6 @@ def tangent_solve(g, y): trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz - trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz - def imap(f, xs, batch_size=None, in_axes=0, out_axes=0): """Generalizes jax.lax.map; uses numpy.""" if not isinstance(xs, np.ndarray): diff --git a/desc/compute/_interp_utils.py b/desc/compute/_interp_utils.py index 28e2d54a6d..7cfa15f9b9 100644 --- a/desc/compute/_interp_utils.py +++ b/desc/compute/_interp_utils.py @@ -1,17 +1,16 @@ -"""FFT interpolation.""" +"""Interpolation utilities.""" from functools import partial from desc.backend import jnp, rfft, rfft2 from desc.compute.utils import safediv -# TODO: upstream fft methods to interpax. # TODO: For inverse transforms, do multipoint evaluation with FFT. # FFT cost is 𝒪(M N log[M N]) while direct evaluation is 𝒪(M² N²). # Chapter 10, https://doi.org/10.1017/CBO9781139856065. -# Likely better than using approximate NFFT to evaluate f(xq) given fourier +# Likely better than using NFFT to evaluate f(xq) given fourier # coefficients because evaluation points are quadratically packed near edges as -# required by algebraic polynomial quadrature to avoid runge. +# required by quadrature to avoid runge. NFFT is only approximation anyway. def interp_rfft(xq, f): @@ -61,8 +60,7 @@ def irfft_non_uniform(xq, a, M): a = a.at[..., 0].divide(2.0).at[..., -1].divide(1.0 + ((M % 2) == 0)) m = jnp.fft.rfftfreq(M, d=1 / M) basis = jnp.exp(1j * m * xq[..., jnp.newaxis]) - fq = 2 * jnp.real(jnp.einsum("...m,...m", basis, a)) - # ℜ einsum(basis, a) = einsum(cos(mx), ℜ(a)) - einsum(sin(mx), ℑ(a)) + fq = 2 * jnp.real(jnp.linalg.vecdot(jnp.conj(basis), a)) return fq diff --git a/desc/compute/fourier_bounce_integral.py b/desc/compute/fourier_bounce_integral.py index d6d6ebe2e2..8bb36c5d12 100644 --- a/desc/compute/fourier_bounce_integral.py +++ b/desc/compute/fourier_bounce_integral.py @@ -1,61 +1,19 @@ """Methods for constructing f(α, ζ) splines and bounce integrals.""" -from orthax.chebyshev import chebpts1, chebpts2, chebval +from orthax.chebyshev import chebpts1, chebpts2, chebroots, chebval -from desc.backend import dct, idct, irfft, jnp, put, rfft -from desc.compute._interp_utils import _filter_distinct, irfft_non_uniform +from desc.backend import dct, idct, irfft, jnp, rfft +from desc.compute._interp_utils import _filter_distinct, interp_rfft2, irfft_non_uniform from desc.compute._quadrature_utils import affine_bijection as map_domain from desc.compute._quadrature_utils import ( affine_bijection_to_disc as map_domain_to_disc, ) from desc.compute.bounce_integral import _fix_inversion from desc.compute.utils import take_mask -from desc.utils import Index, errorif - -# Vectorized versions of numpy functions. Need root finding to be as efficient as -# possible, so vectorize to solve stack of matrices. Also skip the slow input -# massaging because we don't allow duck typed lists. - - -def _chebcompanion(c): - # Adapted from - # numpy.org/doc/stable/reference/generated/ - # numpy.polynomial.chebyshev.chebcompanion.html. - # github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py. - errorif(c.shape[-1] < 2, msg="Series must have maximum degree of at least 1.") - if c.shape[-1] == 2: - return jnp.array([[-c[..., 0] / c[..., 1]]]) - - n = c.shape[-1] - 1 - scl = jnp.hstack([1.0, jnp.full(n - 1, jnp.sqrt(0.5))]) - mat = jnp.zeros((*c.shape[:-1], n, n), dtype=c.dtype) - mat = put(mat, Index[..., 0, 0], jnp.sqrt(0.5)) - mat = put(mat, Index[..., 0, 1:], 0.5) - mat = put(mat, Index[..., -1, :], mat[..., 0, :]) - mat = put( - mat, - Index[..., -1], - mat[..., -1] - c[..., :-1] / c[..., -1] * scl / scl[-1] * 0.5, - ) - return mat - +from desc.equilibrium.coords import map_clebsch_coords +from desc.utils import errorif -def _chebroots(c): - # Adapted from - # numpy.org/doc/stable/reference/generated/ - # numpy.polynomial.chebyshev.chebroots.html. - # github.com/f0uriest/orthax/blob/main/orthax/chebyshev.py, - if c.shape[-1] < 2: - return jnp.reshape([], (0,) * c.ndim) - if c.shape[-1] == 2: - return jnp.array([-c[..., 0] / c[..., 1]]) - - # rotated companion matrix reduces error - m = _chebcompanion(c)[..., ::-1, ::-1] - # Low priority: - # there are better techniques to find eigenvalues of Chebyshev colleague matrix. - r = jnp.sort(jnp.linalg.eigvals(m)) - return r +chebroots = jnp.vectorize(chebroots, signature="(m)->(m)") def _cheb_from_dct(c): @@ -90,7 +48,7 @@ def alpha_sequence(alpha_0, m, iota, period): Sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) that specify field line. """ - # Δz (∂α/∂ζ) = Δz ι̅ = Δz ι/2π = Δz data["iota"] + # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(m)) % (2 * jnp.pi) @@ -117,7 +75,7 @@ class FourierChebyshevBasis: _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - def __init__(self, f, lobatto=False, domain=(-1, 1)): + def __init__(self, f, lobatto=False, domain=(0, 2 * jnp.pi)): """Interpolate Fourier-Chebyshev basis to ``f``. Parameters @@ -130,7 +88,7 @@ def __init__(self, f, lobatto=False, domain=(-1, 1)): Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) or interior roots grid for Chebyshev points. domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. + Domain for y coordinates. Default is [0, 2π]. """ errorif(domain[0] > domain[-1], msg="Got inverted y coordinate domain.") @@ -161,12 +119,12 @@ def _fourier_pts(M): # of trig poly. # answer: research shows doesn't really matter. @staticmethod - def _chebyshev_pts(N, lobatto, domain=(-1, 1)): + def _chebyshev_pts(N, lobatto, domain=(0, 2 * jnp.pi)): y = chebpts2(N) if lobatto else chebpts1(N) return map_domain(y, domain[0], domain[-1]) @staticmethod - def nodes(M, N, lobatto=False, domain=(-1, 1), **kwargs): + def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): """Tensor product grid of optimal collocation nodes for this basis. Parameters @@ -179,7 +137,7 @@ def nodes(M, N, lobatto=False, domain=(-1, 1), **kwargs): Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) or interior roots grid for Chebyshev points. domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. + Domain for y coordinates. Default is [0, 2π]. Returns ------- @@ -234,7 +192,7 @@ def harmonics(self): """ c = _cheb_from_dct(self._c) - # Convert rfft to Nyquist trigonometric harmonics. + # convert rfft to Nyquist trigonometric harmonics is_even = (self.M % 2) == 0 # ∂ₓ = 0 coefficients a0 = jnp.real(c[..., 0, :])[..., jnp.newaxis, :] @@ -273,7 +231,7 @@ def compute_cheb(self, x): assert cheb.shape == (*self._c.shape[:-2], x.shape[-1], self.N) return cheb - def y_intersect(self, cheb, k=0, eps=_eps): + def intersect(self, cheb, k=0, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). Parameters @@ -307,7 +265,7 @@ def y_intersect(self, cheb, k=0, eps=_eps): c = cheb[jnp.newaxis] if k.ndim > cheb.ndim else cheb c = c.at[..., 0].add(-k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = _chebroots(c) + y = chebroots(c) assert y.shape == (*c.shape[:-1], self.N - 1) y = _filter_distinct(y, sentinel=-2, eps=eps) @@ -316,11 +274,10 @@ def y_intersect(self, cheb, k=0, eps=_eps): y = jnp.where(is_intersect, jnp.real(y), 0) # ensure y is in domain of arcos # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) # sign ∂f/∂y = sign ∑ₙ₌₁ᴺ⁻¹ aₙ(x) sin(n arcos y) - s = jnp.einsum( + s = jnp.linalg.vecdot( # TODO: Multipoint evaluation with FFT. # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - "...n,...yn", - cheb, + cheb[..., jnp.newaxis, :], jnp.sin(jnp.arange(self.N) * jnp.arccos(y)[..., jnp.newaxis]), ) is_decreasing = s <= 0 @@ -387,10 +344,9 @@ def bounce_points( # Set outside mask to same value so integration is over set of measure zero. bp1 = jnp.where(mask, bp1, 0) bp2 = jnp.where(mask, bp2, 0) - # These typically have shape (num pitch, num rho, num wells). return bp1, bp2 - def interp_cheb_spline(self, z, cheb): + def interp_cheb(self, z, cheb): """Evaluate piecewise Chebyshev spline at coordinates z. The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² @@ -401,7 +357,7 @@ def interp_cheb_spline(self, z, cheb): Parameters ---------- z : jnp.ndarray - Shape (*cheb.shape[:-2], num wells, num quadrature points). + Shape (*cheb.shape[:-2], z.shape[-1]). Isomorphic coordinates along field line [0, inf]. cheb: jnp.ndarray Shape (..., num cheb series, N). @@ -414,17 +370,17 @@ def interp_cheb_spline(self, z, cheb): Chebyshev basis evaluated at z. """ - # TODO: Multipoint evaluation with FFT. - # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - x_idx, y = map(_flatten_matrix, self._isomorphism_2d(z)) + x_idx, y = self._isomorphism_2d(z) y = map_domain_to_disc(y, self.domain[0], self.domain[1]) cheb = jnp.moveaxis(cheb, source=-1, destination=0) cheb = jnp.take_along_axis(cheb, x_idx, axis=-1, mode="promise_in_bounds") - f = chebval(y, cheb, tensor=False).reshape(z.shape) + # TODO: Multipoint evaluation with FFT. + # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + f = chebval(y, cheb, tensor=False) # TODO: Add below as unit test. # n = jnp.arange(self.N) # noqa: E800 # T = jnp.cos(n * jnp.arccos(y)[..., jnp.newaxis]) # noqa: E800 - # f = jnp.einsum("...n,n...", T, cheb).reshape(z.shape) # noqa: E800 + # f = jnp.einsum("...n,n...", T, cheb) # noqa: E800 return f def _isomorphism_1d(self, y): @@ -473,7 +429,43 @@ def _isomorphism_2d(self, z): return x_index, y_value -def bounce_integral(data, M, N, rho): - """WIP.""" - cheb_nodes = FourierChebyshevBasis.nodes(M, N, domain=(0, 2 * jnp.pi), rho=rho) - return cheb_nodes +def bounce_integral( + grid, + data, + L_lmn, + L_basis, + M, + N, + alpha, + pitch, + num_wells, + quad, + automorphism, + **kwargs, +): + """TODO.""" + raz = FourierChebyshevBasis.nodes(M, N, rho=grid.compress(data["rho"])) + rtz = map_clebsch_coords(raz, data["iota"], L_lmn, L_basis, **kwargs) + # Make θ(α, ζ) and B(α, ζ) splines. + theta = FourierChebyshevBasis(rtz[:, 1].reshape(grid.num_rho, M, N)) # noqa: F841 + B = FourierChebyshevBasis( + interp_rfft2( + xq=rtz[:, 1:].reshape(grid.num_rho, -1, 2), + f=data["|B|"].reshape(grid.num_rho, grid.num_theta, grid.num_zeta), + ).reshape(grid.num_rho, M, N), + ) + cheb = B.compute_cheb(alpha) + bp1, bp2 = B.bounce_points(*B.intersect(cheb, jnp.reciprocal(pitch)), num_wells) + + x, w = quad + assert x.ndim == w.ndim == 1 + if automorphism is not None: + auto, grad_auto = automorphism + w = w * grad_auto(x) + # Recall affine_bijection(auto(x), ζ_b₁, ζ_b₂) = ζ. + x = auto(x) + + shape = (*bp1.shape, x.size) + # P, rho, num wells * num quad + Q_az = _flatten_matrix(map_domain(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis])) + B_quad = B.interp_cheb(Q_az, cheb).reshape(shape) # noqa: F841 diff --git a/desc/grid.py b/desc/grid.py index 2e2bf18950..c5c811b76d 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -715,9 +715,8 @@ def __init__( self._N = self.num_nodes errorif(len(kwargs), ValueError, f"Got unexpected kwargs {kwargs.keys()}") - @classmethod + @staticmethod def create_meshgrid( - cls, nodes, spacing=None, coordinates="rtz", @@ -790,7 +789,7 @@ def create_meshgrid( a.size, ) inverse_c_idx = jnp.tile(unique_c_idx, a.size * b.size) - return cls( + return Grid( nodes=nodes, spacing=spacing, weights=weights, diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 3610193bb6..c9978e9b32 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -874,13 +874,12 @@ def f(x, y): @pytest.mark.unit def test_fcb_interp(): """Test interpolation for this basis function.""" - domain = (0, 2 * np.pi) M, N = 1, 5 - xy0 = FourierChebyshevBasis.nodes(M, N, domain=domain) + xy0 = FourierChebyshevBasis.nodes(M, N) f0 = jnp.mean(xy0.reshape(M, N, 2), axis=-1) - fcb = FourierChebyshevBasis(f0, M, N, domain=domain) + fcb = FourierChebyshevBasis(f0, M, N) f1 = fcb.evaluate(1, fcb.N * 10) - xy1 = FourierChebyshevBasis.nodes(1, fcb.N * 10, domain=domain) + xy1 = FourierChebyshevBasis.nodes(1, fcb.N * 10) fig, ax = plt.subplots() ax.plot(xy0[:, 1], f0[0, :], linestyle="--") From 62219b3b98613c686ac8cf953f67a0095baf517a Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 14 Aug 2024 21:59:41 -0400 Subject: [PATCH 202/241] Adding tests part 1 --- desc/compute/_interp_utils.py | 321 +++++++-- .../{_quadrature_utils.py => _quad_utils.py} | 7 +- desc/compute/bounce_integral.py | 277 ++++---- desc/compute/fourier_bounce_integral.py | 641 ++++++++++++++---- desc/equilibrium/coords.py | 2 +- desc/equilibrium/equilibrium.py | 8 +- desc/grid.py | 3 +- desc/utils.py | 7 + tests/test_bounce_integral.py | 452 ++++++------ tests/test_interp_utils.py | 296 ++++++++ 10 files changed, 1451 insertions(+), 563 deletions(-) rename desc/compute/{_quadrature_utils.py => _quad_utils.py} (96%) create mode 100644 tests/test_interp_utils.py diff --git a/desc/compute/_interp_utils.py b/desc/compute/_interp_utils.py index 7cfa15f9b9..772829c33e 100644 --- a/desc/compute/_interp_utils.py +++ b/desc/compute/_interp_utils.py @@ -2,8 +2,115 @@ from functools import partial -from desc.backend import jnp, rfft, rfft2 +from orthax.chebyshev import chebvander + +from desc.backend import dct, jnp, rfft, rfft2, take +from desc.compute._quad_utils import bijection_from_disc from desc.compute.utils import safediv +from desc.utils import Index, errorif + + +# Y = [a, b] evaluate on grid -> y = [-1, 1] chebyshev points -> y = cos(z) +# evenly spaced z. +# So I find coefficients to chebyshev series T_n(y) = cos(n arcos(y)) = cos(n z). +# So evaluating my chebyshev series in y is same as evaluting cosine series in +# z = arcos(y). +# for y = inversemap[a, b]. +# Open questions is finding roots y using chebroots better or is finding roots z +# of trig poly. +# answer: research shows doesn't really matter. +# TODO: Transformation to make nodes uniform Boyd eq. 16.46 pg 336. +# Shouldn't really change locations of complex poles for us, so convergence +# rate will still be good. +def cheb_pts(N, lobatto=False, domain=(-1, 1)): + """Get ``N`` Chebyshev points mapped to given domain.""" + n = jnp.arange(N) + # These are the standard definitions of the Chebyshev points. + # Reference: Wikipedia or Boyd p. 498. These are the points demanded by + # Discrete Cosine Transformations to interpolate Chebyshev series because + # the cosine basis for the DCT is defined on [0, π]. These points differ + # from numpy's chebpts1 and chebpts2 in ordering. + if lobatto: + y = jnp.cos(jnp.pi * n / (N - 1)) + else: + y = jnp.cos(jnp.pi * (2 * n + 1) / (2 * N)) + return bijection_from_disc(y, domain[0], domain[-1]) + + +def fourier_pts(M): + """Get ``M`` Fourier points.""" + m = jnp.arange(1, M + 1) + return -jnp.pi + 2 * jnp.pi * m / M + + +def harmonic(a, M, axis=-1): + """Spectral coefficients of the Nyquist trigonometric interpolant. + + Parameters + ---------- + a : jnp.ndarray + Fourier coefficients ``a=rfft(f,norm="forward",axis=axis)``. + M : int + Spectral resolution of ``a``. + axis : int + Axis along which coefficients are stored. + + Returns + ------- + h : jnp.ndarray + Nyquist trigonometric interpolant coefficients. + Coefficients ordered along ``axis`` of size ``M`` to match ordering of + [1, cos(x), ..., cos(mx), sin(x), sin(2x), ..., sin(mx)] basis. + + """ + is_even = (M % 2) == 0 + # cos(mx) coefficients + an = 2.0 * ( + jnp.real(a) + .at[Index.get(0, axis, a.ndim)] + .divide(2.0) + .at[Index.get(-1, axis, a.ndim)] + .divide(1.0 + is_even) + ) + # sin(mx) coefficients + bn = -2.0 * take( + jnp.imag(a), + jnp.arange(1, a.shape[axis] - is_even), + axis, + unique_indices=True, + indices_are_sorted=True, + ) + h = jnp.concatenate([an, bn], axis=axis) + assert h.shape[axis] == M + return h + + +def harmonic_basis(x, M): + """Nyquist trigonometric interpolant basis evaluated at ``x``. + + Parameters + ---------- + x : jnp.ndarray + Points to evaluate. + M : int + Spectral resolution. + + Returns + ------- + basis : jnp.ndarray + Shape (*x.shape, M). + Basis evaluated at points ``x``. + Last axis ordered as [1, cos(x), ..., cos(mx), sin(x), sin(2x), ..., sin(mx)]. + + """ + m = jnp.fft.rfftfreq(M, d=1 / M) + mx = m * x[..., jnp.newaxis] + basis = jnp.concatenate( + [jnp.cos(mx), jnp.sin(mx[..., 1 : m.size - ((M % 2) == 0)])], axis=-1 + ) + assert basis.shape == (*x.shape, M) + return basis + # TODO: For inverse transforms, do multipoint evaluation with FFT. # FFT cost is 𝒪(M N log[M N]) while direct evaluation is 𝒪(M² N²). @@ -11,115 +118,230 @@ # Likely better than using NFFT to evaluate f(xq) given fourier # coefficients because evaluation points are quadratically packed near edges as # required by quadrature to avoid runge. NFFT is only approximation anyway. +# https://github.com/flatironinstitute/jax-finufft. -def interp_rfft(xq, f): +def interp_rfft(xq, f, axis=-1): """Interpolate real-valued ``f`` to ``xq`` with FFT. Parameters ---------- xq : jnp.ndarray - Shape (..., xq.shape[-1]). - Query points where interpolation is desired. + Real query points where interpolation is desired. + Shape of ``xq`` must broadcast with ``f`` except along ``axis``. f : jnp.ndarray - Shape (..., f.shape[-1]). - Function values on 2π periodic grid to interpolate. + Real function values on uniform 2π periodic grid to interpolate. + axis : int + Axis along which to transform. Returns ------- fq : jnp.ndarray - Shape (..., xq.shape[-1]) - Function value at query points. + Real function value at query points. """ - assert xq.ndim == f.ndim >= 1 - return irfft_non_uniform(xq, rfft(f, norm="forward"), f.shape[-1]) + assert f.ndim >= 1 + a = rfft(f, axis=axis, norm="forward") + fq = irfft_non_uniform(xq, a, f.shape[axis], axis) + return fq -def irfft_non_uniform(xq, a, M): +def irfft_non_uniform(xq, a, n, axis=-1): """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π] periodic. Parameters ---------- xq : jnp.ndarray - Shape (..., xq.shape[-1]). - Query points where interpolation is desired. - Dimension should match ``a``, though size of last axis may differ. + Real query points where interpolation is desired. + Shape of ``xq`` must broadcast with ``a`` except along ``axis``. a : jnp.ndarray - Fourier coefficients ``a = rfft(f, norm="forward")``. - M : int + Fourier coefficients ``a=rfft(f,axis=axis,norm="forward")``. + n : int Spectral resolution of ``a``. + axis : int + Axis along which to transform. Returns ------- fq : jnp.ndarray - Shape (..., xq.shape[-1]) - Function value at query points. + Real function value at query points. """ - a = a.at[..., 0].divide(2.0).at[..., -1].divide(1.0 + ((M % 2) == 0)) - m = jnp.fft.rfftfreq(M, d=1 / M) - basis = jnp.exp(1j * m * xq[..., jnp.newaxis]) - fq = 2 * jnp.real(jnp.linalg.vecdot(jnp.conj(basis), a)) + assert a.ndim >= 1 + a = ( + (2.0 * a) + .at[Index.get(0, axis, a.ndim)] + .divide(2.0) + .at[Index.get(-1, axis, a.ndim)] + .divide(1.0 + ((n % 2) == 0)) + ) + a = jnp.swapaxes(a[..., jnp.newaxis], axis % a.ndim, -1) + m = jnp.fft.rfftfreq(n, d=1 / n) + basis = jnp.exp(-1j * m * xq[..., jnp.newaxis]) + fq = jnp.real(jnp.linalg.vecdot(basis, a)) + # ℜ〈 basis, a 〉= cos(m xq)⋅ℜ(a) − sin(m xq)⋅ℑ(a) return fq -def interp_rfft2(xq, f): +def interp_rfft2(xq, f, axes=(-2, -1)): """Interpolate real-valued ``f`` to ``xq`` with FFT. Parameters ---------- xq : jnp.ndarray - Shape (..., xq.shape[-2], 2). - Query points where interpolation is desired. + Shape (..., 2). + Real query points where interpolation is desired. + Last axis must hold coordinates for a given point. + Shape of ``xq`` must broadcast ``f`` except along ``axes``. f : jnp.ndarray Shape (..., f.shape[-2], f.shape[-1]). - Function values on (2π × 2π) periodic tensor-product grid to interpolate. + Real function values on uniform (2π × 2π) periodic tensor-product grid to + interpolate. + axes : tuple[int, int] + Axes along which to transform. Returns ------- fq : jnp.ndarray - Shape (..., xq.shape[-2]). - Function value at query points. + Real function value at query points. """ - assert xq.ndim == f.ndim >= 2 - return irfft2_non_uniform(xq, rfft2(f, norm="forward"), *f.shape[-2:]) + assert xq.shape[-1] == 2 + assert f.ndim >= 2 + a = rfft2(f, axes=axes, norm="forward") + fq = irfft2_non_uniform(xq, a, f.shape[axes[0]], f.shape[axes[-1]], axes) + return fq -def irfft2_non_uniform(xq, a, M, N): +def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]² periodic. Parameters ---------- xq : jnp.ndarray - Shape (..., xq.shape[-2], 2). - Query points where interpolation is desired. + Shape (..., 2). + Real query points where interpolation is desired. + Last axis must hold coordinates for a given point. + Shape of ``xq`` must broadcast ``a`` except along ``axes``. a : jnp.ndarray - Fourier coefficients ``a = rfft2(f, norm="forward")``. + Shape (..., a.shape[-2], a.shape[-1]). + Fourier coefficients ``a=rfft2(f,axes=axes,norm="forward")``. M : int - Spectral resolution of ``a`` along second to last axis. + Spectral resolution of ``a`` along ``axes[0]``. N : int - Spectral resolution of ``a`` along last axis. + Spectral resolution of ``a`` along ``axes[-1]``. + axes : tuple[int, int] + Axes along which to transform. Returns ------- fq : jnp.ndarray - Shape (..., xq.shape[-2]). - Function value at query points. + Real function value at query points. """ - a = a.at[..., 0].divide(2.0).at[..., -1].divide(1.0 + ((N % 2) == 0)) + errorif(axes != (-2, -1), NotImplementedError) # need to swap axes before reshape + assert xq.shape[-1] == 2 + assert a.ndim >= 2 + a = ( + (2.0 * a) + .at[Index.get(0, axes[-1], a.ndim)] + .divide(2.0) + .at[Index.get(-1, axes[-1], a.ndim)] + .divide(1.0 + ((N % 2) == 0)) + ).reshape(*a.shape[:-2], 1, -1) + m = jnp.fft.fftfreq(M, d=1 / M) n = jnp.fft.rfftfreq(N, d=1 / N) basis = jnp.exp( - 1j + -1j * ( (m * xq[..., 0, jnp.newaxis])[..., jnp.newaxis] + (n * xq[..., -1, jnp.newaxis])[..., jnp.newaxis, :] ) - ) - fq = 2 * jnp.real(jnp.einsum("...mn,...mn", basis, a)) + ).reshape(*xq.shape[:-1], m.size * n.size) + + fq = jnp.real(jnp.linalg.vecdot(basis, a)) + return fq + + +def cheb_from_dct(a, axis=-1): + """Get Chebyshev coefficients from DCT. + + Parameters + ---------- + a : jnp.ndarray + DCT coefficients ``a=dct(f,type=2,axis=axis,norm="forward")``. + axis : int + Axis along which to transform. + + Returns + ------- + cheb : jnp.ndarray + Chebyshev coefficients along ``axis``. + + """ + # See link below for DCT definition. + # docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct + cheb = a.copy().at[Index.get(0, axis, a.ndim)].divide(2.0) + return cheb + + +def interp_dct(xq, f, lobatto=False, axis=-1): + """Interpolate ``f`` to ``xq`` with DCT. + + Parameters + ---------- + xq : jnp.ndarray + Real query points where interpolation is desired. + Shape of ``xq`` must broadcast with ``f`` except along ``axis``. + f : jnp.ndarray + Real function values on Chebyshev points to interpolate. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + or interior roots grid for Chebyshev points. + axis : int + Axis along which to transform. + + Returns + ------- + fq : jnp.ndarray + Real function value at query points. + + """ + errorif(lobatto, NotImplementedError) + assert f.ndim >= 1 + lobatto = bool(lobatto) + a = dct(f, type=2 - lobatto, axis=axis) / (f.shape[axis] - lobatto) + fq = idct_non_uniform(xq, a, f.shape[axis], axis) + return fq + + +def idct_non_uniform(xq, a, n, axis=-1): + """Evaluate DCT coefficients ``a`` at ``xq`` ∈ [-1, 1]. + + Parameters + ---------- + xq : jnp.ndarray + Real query points where interpolation is desired. + Shape of ``xq`` must broadcast with ``a`` except along ``axis``. + a : jnp.ndarray + DCT coefficients. + n : int + Spectral resolution of ``a``. + axis : int + Axis along which to transform. + + Returns + ------- + fq : jnp.ndarray + Real function value at query points. + + """ + assert a.ndim >= 1 + a = cheb_from_dct(a, axis) + a = jnp.swapaxes(a[..., jnp.newaxis], axis % a.ndim, -1) + basis = chebvander(xq, n - 1) + fq = jnp.linalg.vecdot(basis, a) return fq @@ -135,7 +357,7 @@ def _filter_distinct(r, sentinel, eps): return r -def _sentinel_append(r, sentinel, num=1): +def _concat_sentinel(r, sentinel, num=1): """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) return jnp.append(r, sent, axis=-1) @@ -192,7 +414,7 @@ def reducible(Q, R, b): A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) B = safediv(Q, A) r1 = (A + B) - b / 3 - return _sentinel_append(r1[..., jnp.newaxis], sentinel, num=2) + return _concat_sentinel(r1[..., jnp.newaxis], sentinel, num=2) def root(b, c, d): b = safediv(b, a) @@ -210,7 +432,7 @@ def root(b, c, d): return jnp.where( # Tests catch failure here if eps < 1e-12 for 64 bit jax. jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), - _sentinel_append(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), + _concat_sentinel(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), root(b, c, d), ) @@ -218,7 +440,7 @@ def root(b, c, d): _roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") -def _poly_root( +def poly_root( c, k=0, a_min=None, @@ -267,11 +489,14 @@ def _poly_root( The roots of the polynomial, iterated over the last axis. """ - is_real = not (jnp.iscomplexobj(c) or jnp.iscomplexobj(k)) get_only_real_roots = not (a_min is None and a_max is None) func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} - if c.shape[0] in func and is_real and get_only_real_roots: + if ( + c.shape[0] in func + and get_only_real_roots + and not (jnp.iscomplexobj(c) or jnp.iscomplexobj(k)) + ): # Compute from analytic formula to avoid the issue of complex roots with small # imaginary parts and to avoid nan in gradient. r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) diff --git a/desc/compute/_quadrature_utils.py b/desc/compute/_quad_utils.py similarity index 96% rename from desc/compute/_quadrature_utils.py rename to desc/compute/_quad_utils.py index b8055b2e34..347207ba34 100644 --- a/desc/compute/_quadrature_utils.py +++ b/desc/compute/_quad_utils.py @@ -6,19 +6,19 @@ from desc.utils import errorif -def affine_bijection_to_disc(x, a, b): +def bijection_to_disc(x, a, b): """[a, b] ∋ x ↦ y ∈ [−1, 1].""" y = 2 * (x - a) / (b - a) - 1 return y -def affine_bijection(x, a, b): +def bijection_from_disc(x, a, b): """[−1, 1] ∋ x ↦ y ∈ [a, b].""" y = (x + 1) / 2 * (b - a) + a return y -def grad_affine_bijection(a, b): +def grad_bijection_from_disc(a, b): """Gradient of affine bijection.""" dy_dx = (b - a) / 2 return dy_dx @@ -159,6 +159,7 @@ def leggausslob(deg): # Designate two degrees for endpoints. deg = int(deg) + 2 + # Golub-Welsh algorithm for eigenvalues of orthogonal polynomials n = jnp.arange(2, deg - 1) x = eigh_tridiagonal( jnp.zeros(deg - 2), diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 5294b67f39..f2c2092581 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -9,12 +9,12 @@ from orthax.legendre import leggauss from desc.backend import flatnonzero, imap, jnp, put -from desc.compute._interp_utils import _poly_root -from desc.compute._quadrature_utils import ( - affine_bijection, +from desc.compute._interp_utils import poly_root +from desc.compute._quad_utils import ( automorphism_sin, - grad_affine_bijection, + bijection_from_disc, grad_automorphism_sin, + grad_bijection_from_disc, ) from desc.compute.utils import take_mask from desc.utils import errorif, setdefault, warnif @@ -334,7 +334,7 @@ def _fix_inversion(is_intersect, B_z_ra): def bounce_points( - pitch, knots, B_c, B_z_ra_c, num_wells=None, check=False, plot=True, **kwargs + pitch, knots, B_c, B_z_ra_c, num_well=None, check=False, plot=True, **kwargs ): """Compute the bounce points given spline of |B| and pitch λ. @@ -361,16 +361,16 @@ def bounce_points( First axis enumerates the coefficients of power series. Second axis enumerates the splines along the field lines. Last axis enumerates the polynomials that compose the spline along a particular field line. - num_wells : int or None + num_well : int or None If not specified, then all bounce points are returned in an array whose last axis has size ``(knots.size - 1) * (B_c.shape[0] - 1)``. If there were less than that many wells detected along a field line, then the last axis of the returned arrays, which enumerates bounce points for a particular field line and pitch, is padded with zero. - Specify to return the first ``num_wells`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_wells`` tightly - bounds the actual number of wells. To obtain a good choice for ``num_wells``, + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. To obtain a good choice for ``num_well``, plot the field line with all the bounce points identified by calling this function with ``check=True``. As a reference, there are typically <= 5 wells per toroidal transit. @@ -382,7 +382,7 @@ def bounce_points( Returns ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (P, S, num_wells). + Shape (P, S, num_well). The field line-following coordinates of bounce points for a given pitch along a field line. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. @@ -391,9 +391,9 @@ def bounce_points( B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 # Intersection points in local power basis. - intersect = _poly_root( + intersect = poly_root( c=B_c, - k=jnp.reciprocal(pitch)[..., jnp.newaxis], + k=(1 / pitch)[..., jnp.newaxis], a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sort=True, @@ -416,8 +416,8 @@ def bounce_points( intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) # New versions of jax only like static sentinels. sentinel = -10000000.0 # knots[0] - 1 - bp1 = take_mask(intersect, is_bp1, size=num_wells, fill_value=sentinel) - bp2 = take_mask(intersect, is_bp2, size=num_wells, fill_value=sentinel) + bp1 = take_mask(intersect, is_bp1, size=num_well, fill_value=sentinel) + bp2 = take_mask(intersect, is_bp2, size=num_well, fill_value=sentinel) if check: _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) @@ -480,7 +480,7 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): # extrema. Shift values slightly to resolve this issue. min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B - pitch = _composite_linspace(jnp.reciprocal(jnp.stack([max_B, min_B])), num) + pitch = _composite_linspace(1 / jnp.stack([max_B, min_B]), num) assert pitch.shape == (num + 2, *pitch.shape[1:]) return pitch @@ -516,7 +516,7 @@ def _get_extrema(knots, B_c, B_z_ra_c, sentinel=jnp.nan): """ B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - extrema = _poly_root( + extrema = poly_root( c=B_z_ra_c, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel ) assert extrema.shape == (S, N, degree - 1) @@ -551,7 +551,7 @@ def _plot(Z, V, title_id=""): plt.show() -def _check_interp(Z, f, B_sup_z, B, B_z_ra, result, plot): +def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): """Check for floating point errors. Parameters @@ -560,7 +560,7 @@ def _check_interp(Z, f, B_sup_z, B, B_z_ra, result, plot): Quadrature points at field line-following ζ coordinates. f : list of jnp.ndarray Arguments to the integrand interpolated to Z. - B_sup_z : jnp.ndarray + b_sup_z : jnp.ndarray Contravariant field-line following toroidal component of magnetic field, interpolated to Z. B : jnp.ndarray @@ -581,14 +581,14 @@ def _check_interp(Z, f, B_sup_z, B, B_z_ra, result, plot): msg = "Interpolation failed." assert jnp.isfinite(B_z_ra).all(), msg - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B_sup_z, axis=-1))), msg + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(b_sup_z, axis=-1))), msg assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B, axis=-1))), msg for f_i in f: assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(f_i, axis=-1))), msg msg = "|B| has vanished, violating the hairy ball theorem." assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(B_sup_z, 0).any(), msg + assert not jnp.isclose(b_sup_z, 0).any(), msg # Number of those integrals that were computed. actual = jnp.sum(marked & jnp.isfinite(result)) @@ -598,7 +598,7 @@ def _check_interp(Z, f, B_sup_z, B, B_z_ra, result, plot): ) if plot: _plot(Z, B, title_id=r"$\vert B \vert$") - _plot(Z, B_sup_z, title_id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") + _plot(Z, b_sup_z, title_id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") _interp1d_vec = jnp.vectorize( @@ -606,17 +606,18 @@ def _check_interp(Z, f, B_sup_z, B, B_z_ra, result, plot): ) -@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)", excluded={"method"}) -def _interp1d_vec_with_df(xq, x, f, fx, method): - return interp1d(xq, x, f, method, fx=fx) +@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") +def _interp1d_vec_with_df(xq, x, f, fx): + return interp1d(xq, x, f, method="cubic", fx=fx) def _interpolate_and_integrate( - Z, + Q, w, integrand, f, B_sup_z, + B_sup_z_ra, B, B_z_ra, pitch, @@ -625,42 +626,44 @@ def _interpolate_and_integrate( check=False, plot=False, ): - """Interpolate given functions to points ``Z`` and perform quadrature. + """Interpolate given functions to points ``Q`` and perform quadrature. Parameters ---------- - Z : jnp.ndarray - Shape (P, S, Z.shape[2], w.size). + Q : jnp.ndarray + Shape (P, S, Q.shape[2], w.size). Quadrature points at field line-following ζ coordinates. Returns ------- result : jnp.ndarray - Shape Z.shape[:-1]. + Shape Q.shape[:-1]. Quadrature for every pitch along every field line. """ assert pitch.ndim == 2 assert w.ndim == knots.ndim == 1 - assert 3 <= Z.ndim <= 4 and Z.shape[:2] == (pitch.shape[0], B.shape[0]) - assert Z.shape[-1] == w.size + assert 3 <= Q.ndim <= 4 and Q.shape[:2] == (pitch.shape[0], B.shape[0]) + assert Q.shape[-1] == w.size assert knots.size == B.shape[-1] - assert B_sup_z.shape == B.shape == B_z_ra.shape - pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Z.ndim == 4) else 2) - shape = Z.shape - Z = Z.reshape(Z.shape[0], Z.shape[1], -1) + assert B_sup_z.shape == B_sup_z_ra.shape == B.shape == B_z_ra.shape + + pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Q.ndim == 4) else 2) + shape = Q.shape + Q = Q.reshape(Q.shape[0], Q.shape[1], -1) + b_sup_z = _interp1d_vec_with_df( + Q, knots, B_sup_z / B, B_sup_z_ra / B - B_sup_z * B_z_ra / B**2 + ).reshape(shape) + B = _interp1d_vec_with_df(Q, knots, B, B_z_ra).reshape(shape) # Spline the integrand so that we can evaluate it at quadrature points without # expensive coordinate mappings and root finding. Spline each function separately so # that the singularity near the bounce points can be captured more accurately than # can be by any polynomial. - f = [_interp1d_vec(Z, knots, f_i, method=method).reshape(shape) for f_i in f] - # TODO: Pass in derivative and use cubic method. - b_sup_z = _interp1d_vec(Z, knots, B_sup_z / B, method=method).reshape(shape) - B = _interp1d_vec_with_df(Z, knots, B, B_z_ra, method="cubic").reshape(shape) + f = [_interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) if check: - _check_interp(Z.reshape(shape), f, b_sup_z, B, B_z_ra, result, plot) + _check_interp(Q.reshape(shape), f, b_sup_z, B, B_z_ra, result, plot) return result @@ -673,6 +676,7 @@ def _bounce_quadrature( integrand, f, B_sup_z, + B_sup_z_ra, B, B_z_ra, pitch, @@ -686,12 +690,12 @@ def _bounce_quadrature( Parameters ---------- bp1 : jnp.ndarray - Shape (P, S, bp1.shape[-1]). + Shape (P, S, num_well). The field line-following ζ coordinates of bounce points for a given pitch along a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right integration boundaries, respectively, for the bounce integrals. bp2 : jnp.ndarray - Shape (P, S, bp1.shape[-1]). + Shape (P, S, num_well). The field line-following ζ coordinates of bounce points for a given pitch along a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right integration boundaries, respectively, for the bounce integrals. @@ -714,11 +718,15 @@ def _bounce_quadrature( B_sup_z : jnp.ndarray Shape (S, knots.size) or (S * knots.size). Contravariant field-line following toroidal component of magnetic field. - B : jnp.ndarray + B_sup_z_ra : jnp.ndarray Shape (S, knots.size) or (S * knots.size). + Contravariant field-line following toroidal component of magnetic field, + derivative with respect to field-line following coordinate. + B : jnp.ndarray + Shape (S, knots.size). Norm of magnetic field. B_z_ra : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). + Shape (S, knots.size). Norm of magnetic field, derivative with respect to field-line following coordinate. pitch : jnp.ndarray @@ -758,11 +766,12 @@ def _bounce_quadrature( # Integrate and complete the change of variable. if batch: result = _interpolate_and_integrate( - affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), + bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), w, integrand, f, B_sup_z, + B_sup_z_ra, B, B_z_ra, pitch, @@ -775,14 +784,16 @@ def _bounce_quadrature( else: f = list(f) + # TODO: Use batched vmap. def loop(bp): bp1, bp2 = bp return None, _interpolate_and_integrate( - affine_bijection(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), + bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), w, integrand, f, B_sup_z, + B_sup_z_ra, B, B_z_ra, pitch, @@ -798,20 +809,18 @@ def loop(bp): destination=-1, ) - result = result * grad_affine_bijection(bp1, bp2) + result = result * grad_bijection_from_disc(bp1, bp2) assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) return result def bounce_integral( - B_sup_z, - B, - B_z_ra, + data, knots, quad=leggauss(21), automorphism=(automorphism_sin, grad_automorphism_sin), - B_ref=1, - L_ref=1, + B_ref=1.0, + L_ref=1.0, check=False, plot=False, **kwargs, @@ -833,35 +842,22 @@ def bounce_integral( Notes ----- - The quantities ``B_sup_z``, ``B``, ``B_z_ra``, and those in ``f`` supplied to the - returned method must be separable into data evaluated along particular field lines + The quantities in ``data`` and those in ``f`` supplied to the returned method + must be separable into data evaluated along particular field lines via ``.reshape(S,knots.size)``. One way to satisfy this is to compute stuff on the grid returned from the method ``desc.equilibrium.coords.get_rtz_grid``. See ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. Parameters ---------- - B_sup_z : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Contravariant field-line following toroidal component of magnetic field. - B^ζ(ρ, α, ζ) is specified by ``B_sup_z[(ρ,α),ζ]``, where in the latter the - labels (ρ,α) are interpreted as the index into the first axis that corresponds - to that field line. - B : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Norm of magnetic field. |B|(ρ, α, ζ) is specified by ``B[(ρ,α),ζ]``, where in - the latter the labels (ρ,α) are interpreted as the index into the first axis - that corresponds to that field line. - B_z_ra : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Norm of magnetic field, derivative with respect to field-line following - coordinate. (∂|B|/∂ζ)|ρ,α(ρ, α, ζ) is specified by ``B_z_ra[(ρ,α),ζ]``, where in - the latter the labels (ρ,α) are interpreted as the index into the first axis - that corresponds to that field line. + data : dict of jnp.ndarray + Data evaluated on grid. + Shape (S * knots.size, ) or (S, knots.size). + Should contain ``B^zeta``, ``B^zeta_z|r,a``, ``|B|``, and ``|B|_z|r,a``. knots : jnp.ndarray Shape (knots.size, ). - Field line following coordinate values where ``B_sup_z``, ``B``, ``B_z_ra``, and - those in ``f`` supplied to the returned method were evaluated. Must be strictly + Field line following coordinate values where arrays in ``data`` and ``f`` + supplied to the returned method were evaluated. Must be strictly increasing. These knots are used to compute a spline of |B| and interpolate the integrand. A good reference density is 100 knots per toroidal transit. quad : (jnp.ndarray, jnp.ndarray) @@ -905,7 +901,7 @@ def bounce_integral( """ warnif( - check and kwargs.pop("warn", True) and jnp.any(jnp.sign(B_sup_z) <= 0), + check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", ) # Strictly increasing zeta knots enforces dζ > 0. @@ -913,14 +909,17 @@ def bounce_integral( # This is equivalent to changing the sign of ∇ζ (or [∂ℓ/∂ζ]|ρ,a). # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ # requires the same sign change in e_ζ|ρ,a to retain the metric identity. - B_sup_z = jnp.abs(B_sup_z) * L_ref / B_ref - B = B / B_ref - B_z_ra = B_z_ra / B_ref # This is already the correct sign. - # group data by field line - B_sup_z, B, B_z_ra = (f.reshape(-1, knots.size) for f in [B_sup_z, B, B_z_ra]) + B_sup_z = jnp.abs(data["B^zeta"]).reshape(-1, knots.size) * L_ref / B_ref + B_sup_z_ra = ( + (data["B^zeta_z|r,a"] * jnp.sign(data["B^zeta"])).reshape(-1, knots.size) + * L_ref + / B_ref + ) + B = data["|B|"].reshape(-1, knots.size) / B_ref + # This is already the correct sign. + B_z_ra = data["|B|_z|r,a"].reshape(-1, knots.size) / B_ref - # Compute splines. - # Interpax interpolation requires strictly increasing knots. + # Compute local splines. B_c = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c B_c = jnp.moveaxis(B_c, source=1, destination=-1) B_z_ra_c = _poly_der(B_c) @@ -942,10 +941,10 @@ def bounce_integrate( integrand, f, pitch, + weight=None, + num_well=None, method="akima", batch=True, - num_wells=None, - weight=None, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -959,7 +958,7 @@ def bounce_integrate( bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list of jnp.ndarray Shape (S, knots.size) or (S * knots.size). - Arguments to the callable ``integrand``. These should be the scalar-valued + Arguments to the callable ``integrand``. These should be real scalar-valued functions in the bounce integrand evaluated on the DESC grid. pitch : jnp.ndarray Shape (P, S). @@ -967,41 +966,41 @@ def bounce_integrate( specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. - method : str - Method of interpolation for functions contained in ``f``. - See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. - Default is akima spline. - batch : bool - Whether to perform computation in a batched manner. Default is true. - num_wells : int or None + weight : jnp.ndarray + Shape (S, knots.size) or (S * knots.size). + If supplied, the bounce integral labeled by well j is weighted such that + the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` + interpolated to the deepest point in the magnetic well. + num_well : int or None If not specified, then all bounce integrals are returned in an array whose - last axis has size ``(knots.size - 1) * degree``. If there + last axis has size ``(knots.size-1)*degree``. If there were less than that many wells detected along a field line, then the last axis of the returned array, which enumerates bounce integrals for a particular field line and pitch, is padded with zero. - Specify to return the bounce integrals between the first ``num_wells`` - wells for each pitch along each field line. This is useful if ``num_wells`` + Specify to return the bounce integrals between the first ``num_well`` + wells for each pitch along each field line. This is useful if ``num_well`` tightly bounds the actual number of wells. To obtain a good - choice for ``num_wells``, plot the field line with all the bounce points + choice for ``num_well``, plot the field line with all the bounce points identified. This will be done automatically if the ``bounce_integral`` function is called with ``check=True`` and ``plot=True``. As a reference, there are typically <= 5 wells per toroidal transit. - weight : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - If supplied, the bounce integral labeled by well j is weighted such that - the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - evaluated at the deepest point in the magnetic well. + method : str + Method of interpolation for functions contained in ``f``. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is akima spline. + batch : bool + Whether to perform computation in a batched manner. Default is true. Returns ------- result : jnp.ndarray - Shape (P, S, num_wells). + Shape (P, S, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. """ - bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, num_wells, check, plot) + bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, num_well, check, plot) result = _bounce_quadrature( bp1, bp2, @@ -1010,48 +1009,78 @@ def bounce_integrate( integrand, f, B_sup_z, + B_sup_z_ra, B, B_z_ra, pitch, knots, method, - batch=batch, - check=check, + batch, + check, ) if weight is not None: - result *= _compute_at_deepest( - bp1, bp2, knots, B_c, B_z_ra_c, weight.reshape(-1, knots.size), method + result *= _interp_to_argmin_B_soft( + weight, bp1, bp2, knots, B_c, B_z_ra_c, method ) - assert result.shape[-1] == setdefault(num_wells, (knots.size - 1) * degree) + assert result.shape[-1] == setdefault(num_well, (knots.size - 1) * degree) return result return bounce_integrate, spline -def _compute_at_deepest(bp1, bp2, knots, B_c, B_z_ra_c, f, method, beta=-50): +def _interp_to_argmin_B_soft(f, bp1, bp2, knots, B_c, B_z_ra_c, method, beta=-50): """Compute ``f`` at deepest point in the magnetic well. - Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns f_min = mean_A f. + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns mean_A f(ζ). Parameters ---------- beta : float - More negative gives exponentially better approximation to f_min at the - expense of sharper gradients. + More negative gives exponentially better approximation at the + expense of noisier gradients. """ - extrema, B_extrema = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) - P, S, num_wells = bp1.shape - assert extrema.shape == B_extrema.shape == (S, extrema.shape[-1]) - B = jnp.where( - (bp1[..., jnp.newaxis] < extrema[:, jnp.newaxis]) - & (extrema[:, jnp.newaxis] < bp2[..., jnp.newaxis]), - (B_extrema / jnp.mean(B_extrema, axis=-1, keepdims=True))[:, jnp.newaxis], - 100, # 100 >> max(|B|) / mean(|B|) + ext, B = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) + assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] + argmin = softmax( + beta + * jnp.where( + (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + jnp.expand_dims(B / jnp.mean(B, axis=-1, keepdims=True), axis=1), + 1e2, # >> max(|B|) / mean(|B|) + ), + axis=-1, + ) + f = jnp.linalg.vecdot( + argmin, + _interp1d_vec(ext, knots, f.reshape(-1, knots.size), method=method)[ + :, jnp.newaxis + ], ) - f_min = jnp.linalg.vecdot( - softmax(beta * B, axis=-1), - _interp1d_vec(extrema, knots, f, method=method)[:, jnp.newaxis], + assert f.shape == bp1.shape == bp2.shape + return f + + +# Less efficient than above if P >> 1. +def _interp_to_argmin_B_hard(f, bp1, bp2, knots, B_c, B_z_ra_c, method): + """Compute ``f`` at deepest point in the magnetic well. + + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns f(A). + + """ + ext, B = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) + assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] + argmin = jnp.argmin( + jnp.where( + (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + B[:, jnp.newaxis], + 1e2 + jnp.max(B), + ), + axis=-1, ) - assert f_min.shape == (P, S, num_wells) - return f_min + A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) + f = _interp1d_vec(A, knots, f.reshape(-1, knots.size), method=method) + assert f.shape == bp1.shape == bp2.shape + return f diff --git a/desc/compute/fourier_bounce_integral.py b/desc/compute/fourier_bounce_integral.py index 8bb36c5d12..341bcbf3c9 100644 --- a/desc/compute/fourier_bounce_integral.py +++ b/desc/compute/fourier_bounce_integral.py @@ -1,24 +1,33 @@ -"""Methods for constructing f(α, ζ) splines and bounce integrals.""" - -from orthax.chebyshev import chebpts1, chebpts2, chebroots, chebval - -from desc.backend import dct, idct, irfft, jnp, rfft -from desc.compute._interp_utils import _filter_distinct, interp_rfft2, irfft_non_uniform -from desc.compute._quadrature_utils import affine_bijection as map_domain -from desc.compute._quadrature_utils import ( - affine_bijection_to_disc as map_domain_to_disc, +"""Methods for computing Fourier Chebyshev FFTs and bounce integrals.""" + +import numpy as np +from matplotlib import pyplot as plt +from orthax.chebyshev import chebroots, chebvander +from orthax.legendre import leggauss + +from desc.backend import dct, idct, irfft, jnp, rfft, rfft2 +from desc.compute._interp_utils import ( + _filter_distinct, + cheb_from_dct, + cheb_pts, + fourier_pts, + harmonic, + interp_rfft2, + irfft2_non_uniform, + irfft_non_uniform, ) -from desc.compute.bounce_integral import _fix_inversion +from desc.compute._quad_utils import ( + automorphism_sin, + bijection_from_disc, + bijection_to_disc, + grad_automorphism_sin, +) +from desc.compute.bounce_integral import _filter_nonzero_measure, _fix_inversion from desc.compute.utils import take_mask -from desc.equilibrium.coords import map_clebsch_coords -from desc.utils import errorif - -chebroots = jnp.vectorize(chebroots, signature="(m)->(m)") +from desc.utils import errorif, warnif - -def _cheb_from_dct(c): - # Return Chebshev polynomial coefficients given forward dct type 2. - return c.at[..., 0].divide(2.0) * 2 +# TODO: There are better techniques to find eigenvalues of Chebyshev colleague matrix. +_chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") def _flatten_matrix(y): @@ -26,18 +35,18 @@ def _flatten_matrix(y): return y.reshape(*y.shape[:-2], -1) -def alpha_sequence(alpha_0, m, iota, period): +def _alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): """Get sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) of field line. Parameters ---------- alpha_0 : float Starting field line poloidal label. - m : float - Number of periods to follow field line. iota : jnp.ndarray Shape (iota.size, ). Rotational transform normalized by 2π. + num_period : float + Number of periods to follow field line. period : float Toroidal period after which to update label. @@ -49,7 +58,9 @@ def alpha_sequence(alpha_0, m, iota, period): """ # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(m)) % (2 * jnp.pi) + return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_period)) % ( + 2 * jnp.pi + ) class FourierChebyshevBasis: @@ -73,8 +84,6 @@ class FourierChebyshevBasis: """ - _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - def __init__(self, f, lobatto=False, domain=(0, 2 * jnp.pi)): """Interpolate Fourier-Chebyshev basis to ``f``. @@ -105,24 +114,6 @@ def __init__(self, f, lobatto=False, domain=(0, 2 * jnp.pi)): / self.M ) - @staticmethod - def _fourier_pts(M): - return -jnp.pi + 2 * jnp.pi * jnp.arange(1, M + 1) / M - - # Y = [a, b] evaluate on grid -> y = [-1, 1] chebyshev points -> y = cos(z) - # evenly spaced z. - # So I find coefficients to chebyshev series T_n(y) = cos(n arcos(y)) = cos(n z). - # So evaluating my chebyshev series in y is same as evaluting cosine series in - # z = arcos(y). - # for y = inversemap[a, b]. - # Open questions is finding roots y using chebroots better or is finding roots z - # of trig poly. - # answer: research shows doesn't really matter. - @staticmethod - def _chebyshev_pts(N, lobatto, domain=(0, 2 * jnp.pi)): - y = chebpts2(N) if lobatto else chebpts1(N) - return map_domain(y, domain[0], domain[-1]) - @staticmethod def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): """Tensor product grid of optimal collocation nodes for this basis. @@ -146,8 +137,8 @@ def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): Grid of (x, y) points for optimal interpolation. """ - x = FourierChebyshevBasis._fourier_pts(M) - y = FourierChebyshevBasis._chebyshev_pts(N, lobatto, domain) + x = fourier_pts(M) + y = cheb_pts(N, lobatto, domain) coords = [kwargs.pop("rho"), x, y] if "rho" in kwargs else [x, y] coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) coords = jnp.column_stack(coords) @@ -165,18 +156,18 @@ def evaluate(self, M, N): Returns ------- - f : jnp.ndarray + fq : jnp.ndarray Shape (..., M, N) Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. """ - f = idct( + fq = idct( irfft(self._c, n=M, axis=-2) * M, type=2 - self.lobatto, n=N, axis=-1, ) * (N - self.lobatto) - return f + return fq def harmonics(self): """Spectral coefficients aₘₙ of the interpolating polynomial. @@ -191,22 +182,12 @@ def harmonics(self): Real valued spectral coefficients for Fourier-Chebyshev basis. """ - c = _cheb_from_dct(self._c) - # convert rfft to Nyquist trigonometric harmonics - is_even = (self.M % 2) == 0 - # ∂ₓ = 0 coefficients - a0 = jnp.real(c[..., 0, :])[..., jnp.newaxis, :] - # cos(mx) Tₙ(y) coefficients - an = jnp.real(c[..., 1:, :].at[..., -1, :].divide(1.0 + is_even)) * 2 - # sin(mx) Tₙ(y) coefficients - bn = jnp.imag(c[..., 1 : c.shape[-2] - is_even, :]) * (-2) - - a_mn = jnp.concatenate([a0, an, bn], axis=-2) + a_mn = harmonic(cheb_from_dct(self._c, axis=-1), self.M, axis=-2) assert a_mn.shape[-2:] == (self.M, self.N) return a_mn def compute_cheb(self, x): - """Evaluate Fourier basis at ``x`` to obtain set of 1d Chebyshev coefficients. + """Evaluate Fourier basis at ``x`` to obtain set of 1D Chebyshev coefficients. Parameters ---------- @@ -217,30 +198,58 @@ def compute_cheb(self, x): Returns ------- - cheb : jnp.ndarray - Shape (..., x.shape[-1], N). + cheb : _PiecewiseChebyshevBasis Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). """ - x = jnp.array(x, ndmin=self._c.ndim) - errorif(x.ndim != self._c.ndim, NotImplementedError) - cheb = _cheb_from_dct( - irfft_non_uniform(x, jnp.swapaxes(self._c, -1, -2), self.M) - ) - cheb = jnp.swapaxes(cheb, -1, -2) - assert cheb.shape == (*self._c.shape[:-2], x.shape[-1], self.N) - return cheb + # Always add new axis to broadcast against Chebyshev coefficients. + x = jnp.atleast_1d(x)[..., jnp.newaxis] + cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) + assert cheb.shape[-2:] == (x.shape[-1], self.N) + return _PiecewiseChebyshevBasis(cheb, self.domain) - def intersect(self, cheb, k=0, eps=_eps): - """Coordinates yᵢ such that f(x, yᵢ) = k(x). + +class _PiecewiseChebyshevBasis: + """Chebyshev series. + + { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. + + Attributes + ---------- + cheb : jnp.ndarray + Shape (..., N). + Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + N : int + Chebyshev spectral resolution. + domain : (float, float) + Domain for y coordinates. + + """ + + _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) + + def __init__(self, cheb, domain): + """Make Chebyshev series basis from given coefficients. Parameters ---------- cheb : jnp.ndarray Shape (..., N). Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + + """ + self.cheb = cheb + self.N = cheb.shape[-1] + self.domain = domain + + def intersect(self, k=0, eps=_eps): + """Coordinates yᵢ such that f(x, yᵢ) = k(x). + + Parameters + ---------- k : jnp.ndarray - Shape (..., *cheb.shape). + Shape cheb.shape[:-1] or (k.shape[0], *cheb.shape[:-1]). Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. eps : float Absolute tolerance with which to consider value as zero. @@ -261,11 +270,15 @@ def intersect(self, cheb, k=0, eps=_eps): Boolean array into ``y`` indicating whether element is an intersect. """ - assert cheb.shape[-1] == self.N - c = cheb[jnp.newaxis] if k.ndim > cheb.ndim else cheb - c = c.at[..., 0].add(-k) + errorif( + k.ndim > self.cheb.ndim, + NotImplementedError, + msg=f"Got k.ndim {k.ndim} > cheb.ndim {self.cheb.ndim}.", + ) + c = self.cheb if k.ndim < self.cheb.ndim else self.cheb[jnp.newaxis] + c = c.copy().at[..., 0].add(-k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = chebroots(c) + y = _chebroots_vec(c) assert y.shape == (*c.shape[:-1], self.N - 1) y = _filter_distinct(y, sentinel=-2, eps=eps) @@ -277,17 +290,17 @@ def intersect(self, cheb, k=0, eps=_eps): s = jnp.linalg.vecdot( # TODO: Multipoint evaluation with FFT. # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - cheb[..., jnp.newaxis, :], + self.cheb[..., jnp.newaxis, :], jnp.sin(jnp.arange(self.N) * jnp.arccos(y)[..., jnp.newaxis]), ) is_decreasing = s <= 0 is_increasing = s >= 0 - y = map_domain(y, self.domain[0], self.domain[-1]) + y = bijection_from_disc(y, self.domain[0], self.domain[-1]) return y, is_decreasing, is_increasing, is_intersect def bounce_points( - self, y, is_decreasing, is_increasing, is_intersect, num_wells=None + self, y, is_decreasing, is_increasing, is_intersect, num_well=None ): """Compute bounce points given intersections. @@ -307,22 +320,22 @@ def bounce_points( is_intersect : jnp.ndarray Shape y.shape. Boolean array into ``y`` indicating whether element is an intersect. - num_wells : int + num_well : int or None If not specified, then all bounce points are returned in an array whose - last axis has size ``y.shape[-1] * y.shape[-2]``. If there + last axis has size ``y.shape[-1]*y.shape[-2]``. If there were less than that many wells detected along a field line, then the last axis of the returned arrays, which enumerates bounce points for a particular field line and pitch, is padded with zero. - Specify to return the first ``num_wells`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_wells`` tightly + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly bounds the actual number of wells. As a reference, there are typically <= 5 wells per toroidal transit. Returns ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (*y.shape[:-2], num_wells). + Shape (*y.shape[:-2], num_well). The field line-following coordinates of bounce points for a given pitch along a field line. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. @@ -333,12 +346,17 @@ def bounce_points( is_decreasing = _flatten_matrix(is_decreasing) is_increasing = _flatten_matrix(is_increasing) is_intersect = _flatten_matrix(is_intersect) + # We ignore the degenerate edge case where the boundary shared by adjacent + # polynomials is a left bounce point i.e. ``is_bp1`` because the subset of + # pitch values that generate this edge case has zero measure. Note that + # the technique to account for this would be to disqualify intersects + # within ``_eps`` from ``domain[-1]``. is_bp1 = is_decreasing & is_intersect is_bp2 = is_increasing & _fix_inversion(is_intersect, is_increasing) sentinel = self.domain[0] - 1 - bp1 = take_mask(y, is_bp1, size=num_wells, fill_value=sentinel) - bp2 = take_mask(y, is_bp2, size=num_wells, fill_value=sentinel) + bp1 = take_mask(y, is_bp1, size=num_well, fill_value=sentinel) + bp2 = take_mask(y, is_bp2, size=num_well, fill_value=sentinel) mask = (bp1 > sentinel) & (bp2 > sentinel) # Set outside mask to same value so integration is over set of measure zero. @@ -346,7 +364,161 @@ def bounce_points( bp2 = jnp.where(mask, bp2, 0) return bp1, bp2 - def interp_cheb(self, z, cheb): + def plot_field_line( + self, + start, + stop, + num=1000, + bp1=np.array([]), + bp2=np.array([]), + pitch=np.array([]), + title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", + title_id=None, + transparency_pitch=0.3, + show=True, + ): + """Plot the field line given spline of |B|. + + Parameters + ---------- + start : float + Minimum ζ on plot. + stop : float + Maximum ζ on plot. + num : int + Number of ζ points to plot. Pick a big number. + bp1 : np.ndarray + Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. + bp2 : np.ndarray + Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. + pitch : np.ndarray + λ value. + title : str + Plot title. + title_id : str + Identifier string to append to plot title. + transparency_pitch : float + Transparency of pitch lines. + show : bool + Whether to show the plot. Default is true. + + Returns + ------- + fig, ax : matplotlib figure and axes. + + """ + errorif(start is None or stop is None) + legend = {} + + def add(lines): + if not hasattr(lines, "__iter__"): + lines = [lines] + for line in lines: + label = line.get_label() + if label not in legend: + legend[label] = line + + fig, ax = plt.subplots() + z = np.linspace(start=start, stop=stop, num=num) + add(ax.plot(z, self.eval1d(z), label=r"$\vert B \vert (\zeta)$")) + + if pitch is not None: + b = 1 / np.atleast_1d(pitch) + for val in b: + add( + ax.axhline( + val, + color="tab:purple", + alpha=transparency_pitch, + label=r"$1 / \lambda$", + ) + ) + bp1, bp2 = np.atleast_2d(bp1, bp2) + for i in range(bp1.shape[0]): + bp1_i, bp2_i = _filter_nonzero_measure(bp1[i], bp2[i]) + add( + ax.scatter( + bp1_i, + np.full_like(bp1_i, b[i]), + marker="v", + color="tab:red", + label="bp1", + ) + ) + add( + ax.scatter( + bp2_i, + np.full_like(bp2_i, b[i]), + marker="^", + color="tab:green", + label="bp2", + ) + ) + + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") + ax.legend(legend.values(), legend.keys(), loc="lower right") + if title_id is not None: + title = f"{title}. id = {title_id}." + ax.set_title(title) + plt.tight_layout() + if show: + plt.show() + plt.close() + return fig, ax + + def check_bounce_points( + self, bp1, bp2, pitch, plot=True, start=None, stop=None, **kwargs + ): + """Check that bounce points are computed correctly.""" + pitch = jnp.atleast_3d(pitch) + errorif(not (pitch.ndim == bp1.ndim == bp2.ndim == 3), NotImplementedError) + errorif(bp1.shape != bp2.shape) + + P, L, num_wells = bp1.shape + msg_1 = "Bounce points have an inversion." + err_1 = jnp.any(bp1 > bp2, axis=-1) + msg_2 = "Discontinuity detected." + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + + for l in range(L): + for p in range(P): + B_mid = self.eval1d((bp1[p, l] + bp2[p, l]) / 2) + err_3 = jnp.any(B_mid > 1 / pitch[p, l] + self._eps) + if err_1[p, l] or err_2[p, l] or err_3: + bp1_p, bp2_p = _filter_nonzero_measure(bp1[p, l], bp2[p, l]) + B_mid = B_mid[(bp1[p, l] - bp2[p, l]) != 0] + if plot: + self.plot_field_line( + start=start, + stop=stop, + pitch=pitch[p, l], + bp1=bp1_p, + bp2=bp2_p, + title_id=f"{p},{l}", + **kwargs, + ) + print("bp1:", bp1_p) + print("bp2:", bp2_p) + assert not err_1[p, l], msg_1 + assert not err_2[p, l], msg_2 + msg_3 = ( + f"Detected B midpoint = {B_mid}>{1 / pitch[p, l] + self._eps} =" + " 1/pitch. You need to use more knots." + ) + assert not err_3, msg_3 + if plot: + self.plot_field_line( + start=start, + stop=stop, + pitch=pitch[:, l], + bp1=bp1[:, l], + bp2=bp2[:, l], + title_id=str(l), + **kwargs, + ) + + def eval1d(self, z): """Evaluate piecewise Chebyshev spline at coordinates z. The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² @@ -357,11 +529,8 @@ def interp_cheb(self, z, cheb): Parameters ---------- z : jnp.ndarray - Shape (*cheb.shape[:-2], z.shape[-1]). - Isomorphic coordinates along field line [0, inf]. - cheb: jnp.ndarray - Shape (..., num cheb series, N). - Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]). + Shape (..., *cheb.shape[:-2], z.shape[-1]). + Isomorphic coordinates along field line [0, ∞). Returns ------- @@ -371,16 +540,14 @@ def interp_cheb(self, z, cheb): """ x_idx, y = self._isomorphism_2d(z) - y = map_domain_to_disc(y, self.domain[0], self.domain[1]) - cheb = jnp.moveaxis(cheb, source=-1, destination=0) - cheb = jnp.take_along_axis(cheb, x_idx, axis=-1, mode="promise_in_bounds") + y = bijection_to_disc(y, self.domain[0], self.domain[1]) + # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) + # are held in self.cheb with shape (..., num cheb series, N). + cheb = jnp.moveaxis(self.cheb, source=-1, destination=0) + cheb = jnp.take_along_axis(cheb, x_idx, axis=-1) # TODO: Multipoint evaluation with FFT. # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - f = chebval(y, cheb, tensor=False) - # TODO: Add below as unit test. - # n = jnp.arange(self.N) # noqa: E800 - # T = jnp.cos(n * jnp.arccos(y)[..., jnp.newaxis]) # noqa: E800 - # f = jnp.einsum("...n,n...", T, cheb) # noqa: E800 + f = jnp.linalg.vecdot(chebvander(y, self.N - 1), cheb) return f def _isomorphism_1d(self, y): @@ -391,7 +558,7 @@ def _isomorphism_1d(self, y): Parameters ---------- y : jnp.ndarray - Shape (..., *y.shape[-2:]). + Shape (..., y.shape[-2], y.shape[-1]). Second to last axis iterates the rows. Returns @@ -401,6 +568,7 @@ def _isomorphism_1d(self, y): Isomorphic coordinates. """ + assert y.ndim >= 2 period = self.domain[-1] - self.domain[0] zeta_shift = period * jnp.arange(y.shape[-2]) z = zeta_shift[:, jnp.newaxis] + y @@ -418,8 +586,8 @@ def _isomorphism_2d(self, z): Returns ------- - x_index : jnp.ndarray - Shape y.shape. + x_index, y_value : (jnp.ndarray, jnp.ndarray) + Shape z.shape. Isomorphic coordinates. """ @@ -429,33 +597,196 @@ def _isomorphism_2d(self, z): return x_index, y_value +def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch): + """Bounce integrate ∫ f(ℓ) dℓ. + + Parameters + ---------- + bp1 : jnp.ndarray + Shape (P, L, num_well). + The field line-following coordinates of bounce points for a given pitch + along a field line. The pairs ``bp1`` and ``bp2`` form left and right + integration boundaries, respectively, for the bounce integrals. + bp2 : jnp.ndarray + Shape (P, L, num_well). + The field line-following coordinates of bounce points for a given pitch + along a field line. The pairs ``bp1`` and ``bp2`` form left and right + integration boundaries, respectively, for the bounce integrals. + x : jnp.ndarray + Shape (w.size, ). + Quadrature points in [-1, 1]. + w : jnp.ndarray + Shape (w.size, ). + Quadrature weights. + m : int + Poloidal periodic DESC coordinate resolution on which the given + ``f`` and ``b_sup_z`` were evaluated. + n : int + Toroidal periodic DESC coordinate resolution on which the given + ``f`` and ``b_sup_z`` were evaluated. + integrand : callable + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list of jnp.ndarray + Shape (L * m * n, ) or (L, m, n) or (L, 1, m, n). + Arguments to the callable ``integrand``. These should be real scalar-valued + functions in the bounce integrand evaluated on the periodic DESC coordinate + (ρ, θ, ζ) tensor-product grid. + b_sup_z : jnp.ndarray + Shape (L, 1, m, n). + Set of 2D Fourier spectral coefficients of B^ζ/|B|. + B : jnp.ndarray + Set of 1D Chebyshev spectral coefficients of |B| along field line. + T : jnp.ndarray + Set of 1D Chebyshev spectral coefficients of θ along field line. + pitch : jnp.ndarray + Shape (P, L, 1). + λ values to evaluate the bounce integral at each field line. + + Returns + ------- + result : jnp.ndarray + Shape (P, S, num_well). + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. + + """ + errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) + errorif(pitch.ndim != 3) + errorif(x.ndim != 1 or x.shape != w.shape) + errorif( + B.cheb.shape != T.cheb.shape + or B.cheb.ndim != 3 + or B.cheb.shape[0] != bp1.shape[1] + ) + + P, L, num_well = bp1.shape + shape = (P, L, num_well, x.size) + # Quadrature points parameterized by ζ, for each pitch and flux surface. + Q_zeta = _flatten_matrix( + bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + ) + # Quadrature points in DESC (θ, ζ) coordinates. + Q_desc = jnp.stack([T.eval1d(Q_zeta), Q_zeta], axis=-1) + f = [interp_rfft2(Q_desc, f_i.reshape(L, 1, m, n)).reshape(shape) for f_i in f] + result = jnp.dot( + integrand(*f, B=B.eval1d(Q_zeta).reshape(shape), pitch=pitch[..., jnp.newaxis]) + / irfft2_non_uniform(Q_desc, b_sup_z, m, n).reshape(shape), + w, + ) + assert result.shape == (P, L, num_well) + + +# TODO: Assumes zeta = phi +# input is +# that clebsch = FourierChebyshevBasis.nodes(M, N, rho=grid.compress(data["rho"])) +# then get desc_from_clebsch = map_coordinates(clebsch) def bounce_integral( grid, data, - L_lmn, - L_basis, M, N, - alpha, - pitch, - num_wells, - quad, - automorphism, + desc_from_clebsch, + alpha_0, + num_transit, + quad=leggauss(21), + automorphism=(automorphism_sin, grad_automorphism_sin), + B_ref=1.0, + L_ref=1.0, + check=False, **kwargs, ): - """TODO.""" - raz = FourierChebyshevBasis.nodes(M, N, rho=grid.compress(data["rho"])) - rtz = map_clebsch_coords(raz, data["iota"], L_lmn, L_basis, **kwargs) - # Make θ(α, ζ) and B(α, ζ) splines. - theta = FourierChebyshevBasis(rtz[:, 1].reshape(grid.num_rho, M, N)) # noqa: F841 - B = FourierChebyshevBasis( + """Returns a method to compute bounce integrals. + + The bounce integral is defined as ∫ f(ℓ) dℓ, where + dℓ parameterizes the distance along the field line in meters, + λ is a constant proportional to the magnetic moment over energy, + |B| is the norm of the magnetic field, + f(ℓ) is the quantity to integrate along the field line, + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1. + + For a particle with fixed λ, bounce points are defined to be the location on the + field line such that the particle's velocity parallel to the magnetic field is zero. + The bounce integral is defined up to a sign. We choose the sign that corresponds to + the particle's guiding center trajectory traveling in the direction of increasing + field-line-following coordinate ζ. + + Parameters + ---------- + grid : Grid + Periodic tensor-product grid in (ρ, θ, ζ). + Note that below shape notation uses ``L=grid.num_rho``, ``m=grid.num_theta``, + and ``n=grid.num_zeta``. + data : dict of jnp.ndarray + Data evaluated on grid. + M : int + Grid resolution in poloidal direction for Clebsch coordinates. + Preferably power of 2. A good choice is ``grid.num_theta``. + N : int + Grid resolution in toroidal direction for Clebsch coordinates. + Preferably power of 2. + desc_from_clebsch : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate + tensor-product grid (ρ, α, ζ) returned by ``FourierChebyshevBasis.nodes(M, N)``. + alpha_0 : float + Starting field line poloidal label. + TODO: Allow multiple starting labels for near-rational surfaces. + num_transit : int + Number of toroidal transits to follow field line. + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. + automorphism : (Callable, Callable) or None + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. This map defines a + change of variable for the bounce integral. The choice made for the automorphism + can affect the performance of the quadrature method. + B_ref : float + Optional. Reference magnetic field strength for normalization. + L_ref : float + Optional. Reference length scale for normalization. + check : bool + Flag for debugging. Must be false for jax transformations. + + Returns + ------- + bounce_integrate : callable + This callable method computes the bounce integral ∫ f(ℓ) dℓ for every + specified field line for every λ value in ``pitch``. + + """ + # Resolution of periodic DESC coordinate tensor-product grid. + L, m, n = grid.num_rho, grid.num_theta, grid.num_zeta + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. + # This is equivalent to changing the sign of ∇ζ. + warnif( + check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", + ) + + # Transform to periodic DESC spectral domain. + b_sup_z = rfft2( + (jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref).reshape(L, 1, m, n), + norm="forward", + ) + # Transform to non-periodic Clebsch spectral domain. + T = FourierChebyshevBasis(desc_from_clebsch[:, 1].reshape(L, M, N)) # θ(α, ζ) + B = FourierChebyshevBasis( # |B|(α, ζ) interp_rfft2( - xq=rtz[:, 1:].reshape(grid.num_rho, -1, 2), - f=data["|B|"].reshape(grid.num_rho, grid.num_theta, grid.num_zeta), - ).reshape(grid.num_rho, M, N), + xq=desc_from_clebsch[:, 1:].reshape(L, -1, 2), + f=data["|B|"].reshape(L, m, n) / B_ref, + ).reshape(L, M, N), ) - cheb = B.compute_cheb(alpha) - bp1, bp2 = B.bounce_points(*B.intersect(cheb, jnp.reciprocal(pitch)), num_wells) + # Peel off field lines. + alpha = _alpha_sequence(alpha_0, grid.compress(data["iota"]), num_transit) + T = T.compute_cheb(alpha) + B = B.compute_cheb(alpha) + assert T.cheb.shape == B.cheb.shape == (L, num_transit, N) x, w = quad assert x.ndim == w.ndim == 1 @@ -465,7 +796,67 @@ def bounce_integral( # Recall affine_bijection(auto(x), ζ_b₁, ζ_b₂) = ζ. x = auto(x) - shape = (*bp1.shape, x.size) - # P, rho, num wells * num quad - Q_az = _flatten_matrix(map_domain(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis])) - B_quad = B.interp_cheb(Q_az, cheb).reshape(shape) # noqa: F841 + def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): + """Bounce integrate ∫ f(ℓ) dℓ. + + Parameters + ---------- + integrand : callable + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list of jnp.ndarray + Shape (L * m * n, ) or (L, m, n). + Arguments to the callable ``integrand``. These should be real scalar-valued + functions in the bounce integrand evaluated on ``grid``. + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + weight : jnp.ndarray + Shape (L * m * n, ) or (L, m, n). + If supplied, the bounce integral labeled by well j is weighted such that + the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` + evaluated at the deepest point in the magnetic well. + num_well : int or None + If not specified, then all bounce integrals are returned in an array whose + last axis has size ``(N-1)*num_transit``. If there + were less than that many wells detected along a field line, then the last + axis of the returned array, which enumerates bounce integrals for a + particular field line and pitch, is padded with zero. + + Specify to return the bounce integrals between the first ``num_well`` + wells for each pitch along each field line. This is useful if ``num_well`` + tightly bounds the actual number of wells. To obtain a good + choice for ``num_well``, plot the field line with all the bounce points + identified. This will be done automatically if the ``bounce_integral`` + function is called with ``check=True`` and ``plot=True``. As a reference, + there are typically <= 5 wells per toroidal transit. + + Returns + ------- + result : jnp.ndarray + Shape (P, L, num_well). + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. + + """ + errorif(weight is not None, NotImplementedError) + # Compute bounce points. + pitch = jnp.atleast_3d(pitch) + P = pitch.shape[0] + assert pitch.shape[1:] == B.cheb.shape[:-1] + bp1, bp2 = B.bounce_points(*B.intersect(1 / pitch), num_well) + num_well = bp1.shape[-1] + assert bp1.shape == bp2.shape == (P, L, num_well) + + result = _bounce_quadrature( + bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch + ) + return result + + return bounce_integrate diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 2fda119f04..8291cdd423 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -677,7 +677,7 @@ def get_rtz_grid( rtz : rho, theta, zeta period : tuple of float Assumed periodicity for each quantity in inbasis. - Use np.inf to denote no periodicity. + Use ``np.inf`` to denote no periodicity. jitable : bool, optional If false the returned grid has additional attributes. Required to be false to retain nodes at magnetic axis. diff --git a/desc/equilibrium/equilibrium.py b/desc/equilibrium/equilibrium.py index 41d205a595..8929adec0b 100644 --- a/desc/equilibrium/equilibrium.py +++ b/desc/equilibrium/equilibrium.py @@ -1161,8 +1161,6 @@ def map_coordinates( Parameters ---------- - eq : Equilibrium - Equilibrium to use. coords : ndarray Shape (k, 3). 2D array of input coordinates. Each row is a different point in space. @@ -1252,7 +1250,11 @@ def compute_theta_coords( point. Only returned if ``full_output`` is True. """ - warnif(True, DeprecationWarning, msg="Use map_coordinates instead.") + warnif( + True, + DeprecationWarning, + "Use map_coordinates instead of compute_theta_coords.", + ) return map_coordinates( self, flux_coords, diff --git a/desc/grid.py b/desc/grid.py index c5c811b76d..359917c10b 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -745,7 +745,8 @@ def create_meshgrid( Use np.inf to denote no periodicity. NFP : int Number of field periods (Default = 1). - Only makes sense to change from 1 if ``period[2]==2π``. + Only makes sense to change from 1 if last coordinate is periodic + with some constant divided by ``NFP``. Returns ------- diff --git a/desc/utils.py b/desc/utils.py index a1df551229..1547fc9e34 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -184,6 +184,13 @@ class _Indexable: def __getitem__(self, index): return index + @staticmethod + def get(stuff, axis, ndim): + slices = [slice(None)] * ndim + slices[axis] = stuff + slices = tuple(slices) + return slices + """ Helper object for building indexes for indexed update functions. diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 28b13cbbbc..365b5ab65d 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -14,16 +14,15 @@ from scipy.special import ellipe, ellipkm1, roots_chebyu from tests.test_plotting import tol_1d -from desc.backend import flatnonzero, jnp, put, rfft -from desc.compute._interp_utils import interp_rfft, interp_rfft2 -from desc.compute._quadrature_utils import ( - affine_bijection, - affine_bijection_to_disc, +from desc.backend import flatnonzero, jnp +from desc.compute._quad_utils import ( automorphism_arcsin, automorphism_sin, - grad_affine_bijection, + bijection_from_disc, + bijection_to_disc, grad_automorphism_arcsin, grad_automorphism_sin, + grad_bijection_from_disc, leggausslob, tanh_sinh, ) @@ -32,21 +31,21 @@ _filter_nonzero_measure, _filter_not_nan, _get_extrema, + _interp_to_argmin_B_hard, + _interp_to_argmin_B_soft, _poly_der, - _poly_root, _poly_val, bounce_integral, bounce_points, get_pitch, plot_field_line, ) -from desc.compute.fourier_bounce_integral import FourierChebyshevBasis from desc.compute.utils import dot, take_mask from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid from desc.examples import get from desc.grid import Grid, LinearGrid -from desc.utils import Index, only1 +from desc.utils import only1 @partial(np.vectorize, signature="(m)->()") @@ -73,12 +72,12 @@ def test_mask_operations(): taken[i], np.pad(desired, (0, cols - desired.size), constant_values=np.nan), equal_nan=True, - ), "take_mask has bugs." + ) assert np.array_equal( last[i], desired[-1] if desired.size else np.nan, equal_nan=True, - ), "flatnonzero has bugs." + ) @pytest.mark.unit @@ -117,54 +116,6 @@ def test_reshape_convention(): ), err_msg -@pytest.mark.unit -def test_poly_root(): - """Test vectorized computation of cubic polynomial exact roots.""" - cubic = 4 - c = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) - constant = np.stack([constant, constant]) - root = _poly_root(c, constant, sort=True) - - for i in range(constant.shape[0]): - for j in range(c.shape[1]): - for k in range(c.shape[2]): - d = c[-1, j, k] - constant[i, j, k] - np.testing.assert_allclose( - actual=root[i, j, k], - desired=np.sort(np.roots([*c[:-1, j, k], d])), - ) - - c = np.array( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1], - [1, -1, -8, 12], - [1, -6, 11, -6], - [0, -6, 11, -2], - ] - ) - root = _poly_root(c.T, sort=True, distinct=True) - for j in range(c.shape[0]): - unique_roots = np.unique(np.roots(c[j])) - root_filter = _filter_not_nan(root[j], check=True) - assert root_filter.size == unique_roots.size, j - np.testing.assert_allclose( - actual=root_filter, - desired=unique_roots, - err_msg=str(j), - ) - c = np.array([0, 1, -1, -8, 12]) - root = _filter_not_nan(_poly_root(c, sort=True, distinct=True), check=True) - unique_root = np.unique(np.roots(c)) - assert root.size == unique_root.size - np.testing.assert_allclose(root, unique_root) - - @pytest.mark.unit def test_poly_der(): """Test vectorized computation of polynomial derivative.""" @@ -245,11 +196,13 @@ def test_composite_linspace(): assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) -@pytest.mark.unit -def test_bounce_points(): +class TestBouncePoints: """Test that bounce points are computed correctly.""" + @staticmethod + @pytest.mark.unit def test_bp1_first(): + """Test that bounce points are computed correctly.""" start = np.pi / 3 end = 6 * np.pi knots = np.linspace(start, end, 5) @@ -262,7 +215,10 @@ def test_bp1_first(): np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) + @staticmethod + @pytest.mark.unit def test_bp2_first(): + """Test that bounce points are computed correctly.""" start = -3 * np.pi end = -start k = np.linspace(start, end, 5) @@ -272,11 +228,13 @@ def test_bp2_first(): bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) bp1, bp2 = _filter_nonzero_measure(bp1, bp2) assert bp1.size and bp2.size - # Don't include intersect[-1] for now as it doesn't have a paired bp2. np.testing.assert_allclose(bp1, intersect[1:-1:2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) + @staticmethod + @pytest.mark.unit def test_bp1_before_extrema(): + """Test that bounce points are computed correctly.""" start = -np.pi end = -2 * start k = np.linspace(start, end, 5) @@ -295,7 +253,10 @@ def test_bp1_before_extrema(): np.testing.assert_allclose(intersect[2], intersect[3], rtol=1e-6) np.testing.assert_allclose(bp2, intersect[[3, 4]], rtol=1e-6) + @staticmethod + @pytest.mark.unit def test_bp2_before_extrema(): + """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -313,7 +274,25 @@ def test_bp2_before_extrema(): np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) + @staticmethod + @pytest.mark.unit def test_extrema_first_and_before_bp1(): + """Test that bounce points are computed correctly.""" + # In theory, this test should only pass if distinct=True when computing the + # intersections in bounce points. However, we can get lucky due to floating + # point errors, and it may also pass when distinct=False. + # If a regression fails this test, this note will save many hours of debugging. + # If the filter in place to return only the distinct roots is too coarse, + # in particular atol < 1e-15, then this test will error. In the resulting + # plot that the error will produce the red bounce point on the first hump + # disappears. The true sequence is green, double red, green, red, green. + # The first green was close to the double red and hence the first of the + # double red root pair was erased as it was falsely detected as a duplicate. + # The second of the double red root pair is correctly erased. All that is + # left is the green. Now the bounce_points method assumes the intermediate + # value theorem holds for the continuous spline, so when fed these sequence + # of roots, the correct action is to ignore the first green root since + # otherwise the interior of the bounce points would be hills and not valleys. start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -336,7 +315,10 @@ def test_extrema_first_and_before_bp1(): np.testing.assert_allclose(bp1, intersect[[0, 2, 4]], rtol=1e-6) np.testing.assert_allclose(bp2, intersect[[0, 3, 5]], rtol=1e-6) + @staticmethod + @pytest.mark.unit def test_extrema_first_and_before_bp2(): + """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start + 1 k = np.linspace(start, end, 7) @@ -347,18 +329,6 @@ def test_extrema_first_and_before_bp2(): ) B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 - # If a regression fails this test, this note will save many hours of debugging. - # If the filter in place to return only the distinct roots is too coarse, - # in particular atol < 1e-15, then this test will error. In the resulting - # plot that the error will produce the red bounce point on the first hump - # disappears. The true sequence is green, double red, green, red, green. - # The first green was close to the double red and hence the first of the - # double red root pair was erased as it was falsely detected as a duplicate. - # The second of the double red root pair is correctly erased. All that is - # left is the green. Now the bounce_points method assumes the intermediate - # value theorem holds for the continuous spline, so when fed these sequence - # of roots, the correct action is to ignore the first green root since - # otherwise the interior of the bounce points would be hills and not valleys. bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) bp1, bp2 = _filter_nonzero_measure(bp1, bp2) assert bp1.size and bp2.size @@ -370,30 +340,20 @@ def test_extrema_first_and_before_bp2(): np.testing.assert_allclose(intersect[0], intersect[1], rtol=1e-5) np.testing.assert_allclose(bp2, intersect[[2, 4, 6]], rtol=1e-5) - test_bp1_first() - test_bp2_first() - test_bp1_before_extrema() - test_bp2_before_extrema() - # In theory, this test should only pass if distinct=True when computing the - # intersections in bounce points. However, we can get lucky due to floating - # point errors, and it may also pass when distinct=False. - test_extrema_first_and_before_bp1() - test_extrema_first_and_before_bp2() - @pytest.mark.unit def test_automorphism(): """Test automorphisms.""" a, b = -312, 786 x = np.linspace(a, b, 10) - y = affine_bijection_to_disc(x, a, b) - x_1 = affine_bijection(y, a, b) + y = bijection_to_disc(x, a, b) + x_1 = bijection_from_disc(y, a, b) np.testing.assert_allclose(x_1, x) - np.testing.assert_allclose(affine_bijection_to_disc(x_1, a, b), y) + np.testing.assert_allclose(bijection_to_disc(x_1, a, b), y) np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=5e-7) np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=5e-7) - np.testing.assert_allclose(grad_affine_bijection(a, b), 1 / (2 / (b - a))) + np.testing.assert_allclose(grad_bijection_from_disc(a, b), 1 / (2 / (b - a))) np.testing.assert_allclose( grad_automorphism_sin(y), 1 / grad_automorphism_arcsin(automorphism_sin(y)), @@ -416,66 +376,64 @@ def test_automorphism(): assert np.isfinite(y).all() -@pytest.mark.unit -def test_bounce_quadrature(): - """Test bounce integral matches elliptic integral.""" - p = 1e-4 - m = 1 - p - # Some prime number that doesn't appear anywhere in calculation. - # Ensures no lucky cancellation occurs from this test case since otherwise - # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi - # appears often in transformations. - v = 7 - bp1 = -np.pi / 2 * v - bp2 = -bp1 - knots = np.linspace(bp1, bp2, 50) - pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps - - def b_field(knots): - b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) - db = np.sin(2 * knots / v) / v - return b, db - - b, db = b_field(knots) - - def test(f, truth, quad, rtol=1e-4): - bounce_integrate, _ = bounce_integral( - b, - b, - db, - knots, - quad[0], - automorphism=None, - check=True, - plot=True, - ) - result = bounce_integrate(f, [], pitch) - assert np.count_nonzero(result) == 1 - np.testing.assert_allclose(np.sum(result), truth, rtol=rtol) - - bounce_integrate, _ = bounce_integral(b, b, db, knots, quad[1], check=True) - result = bounce_integrate(f, [], pitch) - assert np.count_nonzero(result) == 1 - np.testing.assert_allclose(np.sum(result), truth, rtol=rtol) - - # sin automorphism still helps out chebyshev quadrature - bounce_integrate, _ = bounce_integral(b, b, db, knots, quad[2], check=True) - result = bounce_integrate(f, [], pitch) - assert np.count_nonzero(result) == 1 - np.testing.assert_allclose(np.sum(result), truth, rtol=rtol) +class TestBounceQuadrature: + """Test bounce quadrature accuracy.""" - def strong(B, pitch): - return 1 / jnp.sqrt(1 - pitch * m * B) + @staticmethod + def _mod_cheb_gauss(deg): + x, w = chebgauss(deg) + w /= chebweight(x) + return x, w - def weak(B, pitch): - return jnp.sqrt(1 - pitch * m * B) + @staticmethod + def _mod_chebu_gauss(deg): + x, w = roots_chebyu(deg) + w *= chebweight(x) + return x, w - x, w = chebgauss(30) - w /= chebweight(x) - test(strong, v * 2 * ellipkm1(p), [tanh_sinh(40), leggauss(25), (x, w)]) - x, w = roots_chebyu(10) - w *= chebweight(x) - test(weak, v * 2 * ellipe(m), [tanh_sinh(20), leggausslob(10), (x, w)]) + @pytest.mark.unit + @pytest.mark.parametrize( + "is_strong, quad, automorphism", + [ + (True, tanh_sinh(40), None), + (True, leggauss(25), "default"), + (False, tanh_sinh(20), None), + (False, leggausslob(10), "default"), + # sin automorphism still helps out chebyshev quadrature + (True, _mod_cheb_gauss(30), "default"), + (False, _mod_chebu_gauss(10), "default"), + ], + ) + def test_bounce_quadrature(self, is_strong, quad, automorphism): + """Test bounce integral matches elliptic integrals.""" + p = 1e-4 + m = 1 - p + # Some prime number that doesn't appear anywhere in calculation. + # Ensures no lucky cancellation occurs from this test case since otherwise + # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi + # appears often in transformations. + v = 7 + bp1 = -np.pi / 2 * v + bp2 = -bp1 + knots = np.linspace(bp1, bp2, 50) + pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps + b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) + db = np.sin(2 * knots / v) / v + data = {"B^zeta": b, "B^zeta_z|r,a": db, "|B|": b, "|B|_z|r,a": db} + + if is_strong: + integrand = lambda B, pitch: 1 / jnp.sqrt(1 - pitch * m * B) + truth = v * 2 * ellipkm1(p) + else: + integrand = lambda B, pitch: jnp.sqrt(1 - pitch * m * B) + truth = v * 2 * ellipe(m) + kwargs = {} + if automorphism != "default": + kwargs["automorphism"] = automorphism + bounce_integrate, _ = bounce_integral(data, knots, quad, check=True, **kwargs) + result = bounce_integrate(integrand, [], pitch) + assert np.count_nonzero(result) == 1 + np.testing.assert_allclose(np.sum(result), truth, rtol=1e-4) @pytest.mark.unit @@ -484,11 +442,9 @@ def test_bounce_integral_checks(): def numerator(g_zz, B, pitch): f = (1 - pitch * B / 2) * g_zz - # You may need to clip and safediv to avoid nan gradient. return f / jnp.sqrt(1 - pitch * B) def denominator(B, pitch): - # You may need to clip and safediv to avoid nan gradient. return 1 / jnp.sqrt(1 - pitch * B) # Suppose we want to compute a bounce average of the function @@ -505,12 +461,19 @@ def denominator(B, pitch): eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) data = eq.compute( - ["B^zeta", "|B|", "|B|_z|r,a", "min_tz |B|", "max_tz |B|", "g_zz"], grid=grid + [ + "B^zeta", + "B^zeta_z|r,a", + "|B|", + "|B|_z|r,a", + "min_tz |B|", + "max_tz |B|", + "g_zz", + ], + grid=grid, ) bounce_integrate, spline = bounce_integral( - data["B^zeta"], - data["|B|"], - data["|B|_z|r,a"], + data, knots, check=True, plot=False, @@ -544,6 +507,49 @@ def denominator(B, pitch): print(pitch[:, i, j]) +@pytest.mark.unit +@pytest.mark.parametrize("func", [_interp_to_argmin_B_soft, _interp_to_argmin_B_hard]) +def test_interp_to_argmin_B(func): + """Test argmin interpolation.""" + + def f(z): + return np.cos(3 * z) * np.sin(2 * np.cos(z)) + np.cos(1.2 * z) + + def B(z): + return np.sin(3 * z) * np.cos(1 / (1 + z)) * np.cos(z**2) * z + + def dB_dz(z): + return ( + 3 * z * np.cos(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) + - 2 * z**2 * np.sin(3 * z) * np.sin(z**2) * np.cos(1 / (1 + z)) + + z * np.sin(3 * z) * np.sin(1 / (1 + z)) * np.cos(z**2) / (1 + z) ** 2 + + np.sin(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) + ) + + zeta = np.linspace(0, 3 * np.pi, 175) + _, spline = bounce_integral( + { + "B^zeta": np.ones_like(zeta), + "B^zeta_z|r,a": np.ones_like(zeta), + "|B|": B(zeta), + "|B|_z|r,a": dB_dz(zeta), + }, + zeta, + ) + argmin = 5.61719 + np.testing.assert_allclose( + f(argmin), + func( + f(zeta), + bp1=np.array(0, ndmin=3), + bp2=np.array(2 * np.pi, ndmin=3), + **spline, + method="cubic", + ), + rtol=1e-3, + ) + + @partial(np.vectorize, excluded={0}) def _adaptive_elliptic(integrand, k): a = 0 @@ -558,9 +564,9 @@ def _fixed_elliptic(integrand, k, deg): x, w = leggauss(deg) w = w * grad_automorphism_sin(x) x = automorphism_sin(x) - Z = affine_bijection(x, a[..., np.newaxis], b[..., np.newaxis]) + Z = bijection_from_disc(x, a[..., np.newaxis], b[..., np.newaxis]) k = k[..., np.newaxis] - quad = np.dot(integrand(Z, k), w) * grad_affine_bijection(a, b) + quad = np.dot(integrand(Z, k), w) * grad_bijection_from_disc(a, b) return quad @@ -650,6 +656,7 @@ def test_drift(): data = eq.compute( [ "B^zeta", + "B^zeta_z|r,a", "|B|", "|B|_z|r,a", "cvdrift", @@ -665,19 +672,16 @@ def test_drift(): ) np.testing.assert_allclose(data["psi"], psi) np.testing.assert_allclose(data["iota"], iota) - assert np.all(np.sign(data["B^zeta"]) > 0) + assert np.all(data["B^zeta"] > 0) data["iota"] = grid.compress(data["iota"]).item() data["shear"] = grid.compress(data["shear"]).item() - L_ref = data["a"] - B_ref = 2 * np.abs(psi_boundary) / L_ref**2 + B_ref = 2 * np.abs(psi_boundary) / data["a"] ** 2 bounce_integrate, _ = bounce_integral( - data["B^zeta"], - data["|B|"], - data["|B|_z|r,a"], + data, knots=zeta, B_ref=B_ref, - L_ref=L_ref, + L_ref=data["a"], quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 check=True, ) @@ -686,14 +690,14 @@ def test_drift(): B0 = np.mean(B) # epsilon should be changed to dimensionless, and computed in a way that # is independent of normalization length scales, like "effective r/R0". - epsilon = L_ref * rho # Aspect ratio of the flux surface. + epsilon = data["a"] * rho # Aspect ratio of the flux surface. np.testing.assert_allclose(epsilon, 0.05) theta_PEST = alpha + data["iota"] * zeta # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 B_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(B, B_analytic, atol=3e-3) - gradpar = L_ref * data["B^zeta"] / data["|B|"] + gradpar = data["a"] * data["B^zeta"] / data["|B|"] # This method of computing G0 suggests a fixed point iteration. G0 = data["a"] gradpar_analytic = G0 * (1 - epsilon * np.cos(theta_PEST)) @@ -702,7 +706,7 @@ def test_drift(): np.testing.assert_allclose(gradpar, gradpar_analytic, atol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric - normalization = -np.sign(psi) * B_ref * L_ref**2 + normalization = -np.sign(psi) * B_ref * data["a"] ** 2 cvdrift = data["cvdrift"] * normalization gbdrift = data["gbdrift"] * normalization dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) @@ -768,19 +772,19 @@ def integrand_num(cvdrift, gbdrift, B, pitch): return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) def integrand_den(B, pitch): - return jnp.reciprocal(jnp.sqrt(1 - pitch * B)) + return 1 / jnp.sqrt(1 - pitch * B) drift_numerical_num = bounce_integrate( integrand=integrand_num, f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], - num_wells=1, # don't need to specify but will reduce memory and improve speed + num_well=1, ) drift_numerical_den = bounce_integrate( integrand=integrand_den, f=[], pitch=pitch[:, np.newaxis], - num_wells=1, + num_well=1, weight=np.ones(zeta.size), ) @@ -791,105 +795,37 @@ def integrand_den(B, pitch): assert drift_numerical.size == drift_analytic.size, msg np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) + _test_bounce_autodiff( + bounce_integrate, + integrand_num, + f=[cvdrift, gbdrift], + weight=np.ones(zeta.size), + ) + fig, ax = plt.subplots() ax.plot(1 / pitch, drift_analytic) ax.plot(1 / pitch, drift_numerical) - - # Test if differentiable. - def dummy_fun(pitch): - return jnp.sum( - bounce_integrate( - integrand_num, [cvdrift, gbdrift], pitch, weight=np.ones(zeta.size) - ) - ) - - assert np.isclose(grad(dummy_fun)(1.0), 650, rtol=1e-3) - return fig -# TODO: upstream to interpax -@pytest.mark.unit -def test_interp_rfft(): - """Test FFT interpolation.""" - - def _interp_rfft(xq, f): - assert xq.ndim == f.ndim >= 1 - M = f.shape[-1] - a = rfft(f, norm="forward") - a = put(a, Index[..., 0], a[..., 0] / 2) - a = put(a, Index[..., -1], a[..., -1] / (1 + ((M % 2) == 0))) - - m = np.fft.rfftfreq(M, d=1 / M) - np.testing.assert_allclose(m, np.arange(M // 2 + 1), err_msg="rfftfreq wrong.") - - mx = m * xq[..., np.newaxis] - fq = 2 * ( - np.sum(np.cos(mx) * np.real(a), axis=-1) - - np.sum(np.sin(mx) * np.imag(a), axis=-1) - ) - return fq - - def test(xq, func, n): - x = np.linspace(0, 2 * np.pi, n, endpoint=False) - assert not np.any(np.isclose(xq[..., np.newaxis], x)) - f = func(x) - np.testing.assert_allclose(_interp_rfft(xq, f), func(xq)) - np.testing.assert_allclose(interp_rfft(xq, f), func(xq)) - - xq = np.array([7.34, 1.10134, 2.28]) - freq_nyquist = 7 - f = lambda x: np.cos(freq_nyquist * x) + np.sin(x) - test(xq, f, 2 * freq_nyquist) - test(xq, f, 2 * freq_nyquist + 1) +def _test_bounce_autodiff(bounce_integrate, integrand, **kwargs): + """Make sure reverse mode AD works correctly on this algorithm.""" + def fun1(pitch): + return jnp.sum(bounce_integrate(integrand, pitch=pitch, **kwargs)) -@pytest.mark.xfail(reason="Numpy, jax, and scipy need to fix bug on their end.") -@pytest.mark.unit -def test_interp_rfft2(): - """Test FFT interpolation.""" - - def test(xq, func, m, n): - x = np.linspace(0, 2 * np.pi, m, endpoint=False) - y = np.linspace(0, 2 * np.pi, n, endpoint=False) - assert not np.any(np.isclose(xq[..., 0, np.newaxis], x)) - assert not np.any(np.isclose(xq[..., 1, np.newaxis], y)) - x, y = map(np.ravel, list(np.meshgrid(x, y, indexing="ij"))) - np.testing.assert_allclose( - interp_rfft2( - xq, - jnp.array(func(x, y).reshape(m, n), ndmin=xq.ndim), - ), - func(xq[..., 0], xq[..., 1]), - ) + def fun2(pitch): + return jnp.sum(bounce_integrate(integrand_grad, pitch=pitch, **kwargs)) - def f(x, y): - # something that's not separable - return np.cos(x_freq * x) * np.sin(2 * x + y) + np.sin(y_freq * y) * np.cos( - x + 3 * y + def integrand_grad(*args, **kwargs2): + fun = jnp.vectorize( + grad(integrand, -1), signature="()," * len(kwargs["f"]) + "(),()->()" ) - - xq = np.array([[7.34, 1.10134, 2.28], [1.1, 3.78432, 8.542]]).T - x_freq, y_freq = 3, 5 - x_rate_nyquist, y_rate_nyquist = 2 * (x_freq + 2), 2 * (y_freq + 3) - test(xq, f, x_rate_nyquist + 1, y_rate_nyquist + 1) - # FIXME: Bug with numpy's computation of nyquist freq fourier coefficient. - test(xq, f, x_rate_nyquist, y_rate_nyquist) - - -# todo: -@pytest.mark.unit -def test_fcb_interp(): - """Test interpolation for this basis function.""" - M, N = 1, 5 - xy0 = FourierChebyshevBasis.nodes(M, N) - f0 = jnp.mean(xy0.reshape(M, N, 2), axis=-1) - fcb = FourierChebyshevBasis(f0, M, N) - f1 = fcb.evaluate(1, fcb.N * 10) - xy1 = FourierChebyshevBasis.nodes(1, fcb.N * 10) - - fig, ax = plt.subplots() - ax.plot(xy0[:, 1], f0[0, :], linestyle="--") - ax.plot(xy1[:, 1], f1[0, :], marker="x") - plt.show() - return fig + return fun(*args, *kwargs2.values()) + + pitch = 1.0 + truth = 650 # Extrapolated from plot. + assert np.isclose(grad(fun1)(pitch), truth, rtol=1e-3) + # Make sure bounce points get differentiated too. + result = fun2(pitch) + assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-3) diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py new file mode 100644 index 0000000000..a20d556465 --- /dev/null +++ b/tests/test_interp_utils.py @@ -0,0 +1,296 @@ +"""Test interpolation utilities.""" + +import numpy as np +import pytest +from matplotlib import pyplot as plt +from numpy.polynomial.chebyshev import ( + cheb2poly, + chebinterpolate, + chebpts1, + chebpts2, + chebval, +) +from scipy.fft import dct as sdct +from scipy.fft import idct as sidct + +from desc.backend import dct as jdct +from desc.backend import idct as jidct +from desc.backend import jnp, rfft +from desc.compute._interp_utils import ( + cheb_from_dct, + cheb_pts, + harmonic, + harmonic_basis, + interp_dct, + interp_rfft, + interp_rfft2, + poly_root, +) +from desc.compute._quad_utils import bijection_to_disc +from desc.compute.bounce_integral import _filter_not_nan +from desc.compute.fourier_bounce_integral import FourierChebyshevBasis + + +@pytest.mark.unit +def test_poly_root(): + """Test vectorized computation of cubic polynomial exact roots.""" + cubic = 4 + c = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) + constant = np.stack([constant, constant]) + root = poly_root(c, constant, sort=True) + + for i in range(constant.shape[0]): + for j in range(c.shape[1]): + for k in range(c.shape[2]): + d = c[-1, j, k] - constant[i, j, k] + np.testing.assert_allclose( + actual=root[i, j, k], + desired=np.sort(np.roots([*c[:-1, j, k], d])), + ) + + c = np.array( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [1, -1, -8, 12], + [1, -6, 11, -6], + [0, -6, 11, -2], + ] + ) + root = poly_root(c.T, sort=True, distinct=True) + for j in range(c.shape[0]): + unique_roots = np.unique(np.roots(c[j])) + root_filter = _filter_not_nan(root[j], check=True) + assert root_filter.size == unique_roots.size, j + np.testing.assert_allclose( + actual=root_filter, + desired=unique_roots, + err_msg=str(j), + ) + c = np.array([0, 1, -1, -8, 12]) + root = _filter_not_nan(poly_root(c, sort=True, distinct=True), check=True) + unique_root = np.unique(np.roots(c)) + assert root.size == unique_root.size + np.testing.assert_allclose(root, unique_root) + + +class TestInterp: + """Test RFFT and DCT interpolation.""" + + @pytest.mark.unit + @pytest.mark.parametrize("N", [2, 6, 7]) + def test_cheb_pts(self, N): + """Test we use Chebyshev points compatible with standard definition of DCT.""" + np.testing.assert_allclose(cheb_pts(N), chebpts1(N)[::-1], atol=1e-15) + np.testing.assert_allclose( + cheb_pts(N, lobatto=True, domain=(-np.pi, np.pi)), + np.pi * chebpts2(N)[::-1], + atol=1e-15, + ) + + @pytest.mark.unit + def test_rfftfreq(self): + """Test rfft frequency.""" + M = 8 + np.testing.assert_allclose(np.fft.rfftfreq(M, d=1 / M), np.arange(M // 2 + 1)) + M = 9 + np.testing.assert_allclose(np.fft.rfftfreq(M, d=1 / M), np.arange(M // 2 + 1)) + + @staticmethod + def _interp_rfft_harmonic(xq, f): + M = f.shape[-1] + fq = jnp.linalg.vecdot( + harmonic_basis(xq, M), harmonic(rfft(f, norm="forward"), M) + ) + return fq + + @staticmethod + def _f_1d(x): + """Test function for 1D FFT.""" + return np.cos(7 * x) + np.sin(x) - 33.2 + + @staticmethod + def _f_1d_nyquist_freq(): + return 7 + + @pytest.mark.unit + @pytest.mark.parametrize( + "func, n", + [ + (_f_1d, 2 * _f_1d_nyquist_freq() + 1), + (_f_1d, 2 * _f_1d_nyquist_freq()), + ], + ) + def test_interp_rfft(self, func, n): + """Test non-uniform FFT interpolation.""" + xq = np.array([7.34, 1.10134, 2.28]) + x = np.linspace(0, 2 * np.pi, n, endpoint=False) + assert not np.any(np.isclose(xq[..., np.newaxis], x)) + f, fq = func(x), func(xq) + np.testing.assert_allclose(self._interp_rfft_harmonic(xq, f), fq) + np.testing.assert_allclose(interp_rfft(xq, f), fq) + + @staticmethod + def _f_2d(x, y): + """Test function for 2D FFT.""" + x_freq, y_freq = 3, 5 + return ( + # something that's not separable + np.cos(x_freq * x) * np.sin(2 * x + y) + + np.sin(y_freq * y) * np.cos(x + 3 * y) + # DC terms + - 33.2 + + np.cos(x) + + np.cos(y) + ) + + @staticmethod + def _f_2d_nyquist_freq(): + x_freq_nyquist = 3 + 2 + y_freq_nyquist = 5 + 3 + return x_freq_nyquist, y_freq_nyquist + + @pytest.mark.xfail( + reason="Numpy, jax, and scipy need to fix bug with 2D FFT (fft2)." + ) + @pytest.mark.unit + @pytest.mark.parametrize( + "func, m, n", + [ + (_f_2d, 2 * _f_2d_nyquist_freq()[0] + 1, 2 * _f_2d_nyquist_freq()[1] + 1), + (_f_2d, 2 * _f_2d_nyquist_freq()[0], 2 * _f_2d_nyquist_freq()[1]), + ], + ) + def test_interp_rfft2(self, func, m, n): + """Test non-uniform FFT interpolation.""" + xq = np.array([[7.34, 1.10134, 2.28], [1.1, 3.78432, 8.542]]).T + x = np.linspace(0, 2 * np.pi, m, endpoint=False) + y = np.linspace(0, 2 * np.pi, n, endpoint=False) + assert not np.any(np.isclose(xq[..., 0, np.newaxis], x)) + assert not np.any(np.isclose(xq[..., 1, np.newaxis], y)) + x, y = map(np.ravel, list(np.meshgrid(x, y, indexing="ij"))) + np.testing.assert_allclose( + interp_rfft2(xq, func(x, y).reshape(m, n)), + func(xq[..., 0], xq[..., 1]), + ) + + @staticmethod + def _identity(x): + # Identity map known for bad Gibbs; + # only if distribution of spectral coefficients is correct will DCT + # recover Chebyshev interpolation, avoiding Gibbs and Runge. + return x + + @pytest.mark.unit + @pytest.mark.parametrize( + "f, M, lobatto", + [ + (_identity, 2, False), + (_identity, 3, False), + (_identity, 3, True), + (_identity, 4, True), + ], + ) + def test_dct(self, f, M, lobatto): + """Test discrete cosine transform interpolation. + + Parameters + ---------- + f : callable + Function to test. + M : int + Fourier spectral resolution. + lobatto : bool + Whether ``f`` should be sampled on the Gauss-Lobatto (extrema-plus-endpoint) + or interior roots grid for Chebyshev points. + + """ + # Want to unit test external code used in Fourier Chebyshev interpolation + # due to issues like + # https://github.com/scipy/scipy/issues/15033 + # https://github.com/scipy/scipy/issues/21198 + # https://github.com/google/jax/issues/22466, + domain = (0, 2 * np.pi) + m = cheb_pts(M, lobatto, domain) + n = cheb_pts(m.size * 10, lobatto, domain) + norm = (n.size - lobatto) / (m.size - lobatto) + + dct_type = 2 - lobatto + fq_1 = np.sqrt(norm) * sidct( + sdct(f(m), type=dct_type, norm="ortho", orthogonalize=False), + type=dct_type, + n=n.size, + norm="ortho", + orthogonalize=False, + ) + if lobatto: + # JAX has yet to implement type 1 DCT. + fq_2 = norm * sidct(sdct(f(m), type=dct_type), n=n.size, type=dct_type) + else: + fq_2 = norm * jidct(jdct(f(m), type=dct_type), n=n.size, type=dct_type) + np.testing.assert_allclose(fq_1, f(n), atol=1e-14) + # JAX is much less accurate than scipy. + np.testing.assert_allclose(fq_2, f(n), atol=1e-6) + + fig, ax = plt.subplots() + ax.scatter(m, f(m)) + ax.plot(n, fq_1) + ax.plot(n, fq_2) + return fig + + @staticmethod + def _f_non_periodic(z): + return np.sin(np.sqrt(2) * z) * np.cos(1 / (2 + z)) * np.cos(z**2) * z + + @staticmethod + def _f_algebraic(z): + return z**3 - 10 * z**6 - z - np.e + z**4 + + @pytest.mark.unit + @pytest.mark.parametrize( + "f, M", + [(_f_non_periodic, 5), (_f_non_periodic, 6), (_f_algebraic, 7)], + ) + def test_interp_dct(self, f, M): + """Test non-uniform DCT interpolation.""" + c0 = chebinterpolate(f, M - 1) + assert not np.allclose(c0, cheb_from_dct(jdct(f(chebpts1(M)), 2) / M)), ( + "Interpolation should fail because cosine basis is in different domain. " + "Use better test function." + ) + # test interpolation + z = cheb_pts(M) + fz = f(z) + np.testing.assert_allclose(c0, cheb_from_dct(jdct(fz, 2) / M), atol=1e-13) + if np.allclose(self._f_algebraic(z), fz): + np.testing.assert_allclose( + cheb2poly(c0), np.array([-np.e, -1, 0, 1, 1, 0, -10]), atol=1e-13 + ) + # test evaluation + xq = np.arange(10 * 3 * 2).reshape(10, 3, 2) + xq = bijection_to_disc(xq, 0, xq.size) + fq = chebval(xq, c0, tensor=False) + np.testing.assert_allclose(fq, interp_dct(xq, fz), atol=1e-13) + + +# todo: +@pytest.mark.unit +def test_fcb_interp(): + """Test interpolation for this basis function.""" + M, N = 1, 5 + xy0 = FourierChebyshevBasis.nodes(M, N) + f0 = jnp.mean(xy0.reshape(M, N, 2), axis=-1) + fcb = FourierChebyshevBasis(f0, M, N) + f1 = fcb.evaluate(1, fcb.N * 10) + xy1 = FourierChebyshevBasis.nodes(1, fcb.N * 10) + + fig, ax = plt.subplots() + ax.plot(xy0[:, 1], f0[0, :], linestyle="--") + ax.plot(xy1[:, 1], f1[0, :], marker="x") + plt.show() + return fig From 672b163f2da114e62af5be63fb6d59b733b3dcd5 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 15 Aug 2024 12:06:09 -0400 Subject: [PATCH 203/241] Making progress on tests --- desc/compute/_interp_utils.py | 32 ++++++------ desc/compute/bounce_integral.py | 8 ++- desc/compute/fourier_bounce_integral.py | 65 +++++++++++++++---------- tests/test_bounce_integral.py | 28 +++-------- tests/test_fourier_bounce.py | 54 ++++++++++++++++++++ tests/test_interp_utils.py | 57 +++++++--------------- 6 files changed, 141 insertions(+), 103 deletions(-) create mode 100644 tests/test_fourier_bounce.py diff --git a/desc/compute/_interp_utils.py b/desc/compute/_interp_utils.py index 772829c33e..eda5d0b4d5 100644 --- a/desc/compute/_interp_utils.py +++ b/desc/compute/_interp_utils.py @@ -66,15 +66,14 @@ def harmonic(a, M, axis=-1): is_even = (M % 2) == 0 # cos(mx) coefficients an = 2.0 * ( - jnp.real(a) - .at[Index.get(0, axis, a.ndim)] + a.real.at[Index.get(0, axis, a.ndim)] .divide(2.0) .at[Index.get(-1, axis, a.ndim)] .divide(1.0 + is_even) ) # sin(mx) coefficients bn = -2.0 * take( - jnp.imag(a), + a.imag, jnp.arange(1, a.shape[axis] - is_even), axis, unique_indices=True, @@ -178,7 +177,8 @@ def irfft_non_uniform(xq, a, n, axis=-1): a = jnp.swapaxes(a[..., jnp.newaxis], axis % a.ndim, -1) m = jnp.fft.rfftfreq(n, d=1 / n) basis = jnp.exp(-1j * m * xq[..., jnp.newaxis]) - fq = jnp.real(jnp.linalg.vecdot(basis, a)) + fq = jnp.linalg.vecdot(basis, a).real + # TODO: Test JAX does this optimization automatically. # ℜ〈 basis, a 〉= cos(m xq)⋅ℜ(a) − sin(m xq)⋅ℑ(a) return fq @@ -260,17 +260,20 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): ) ).reshape(*xq.shape[:-1], m.size * n.size) - fq = jnp.real(jnp.linalg.vecdot(basis, a)) + fq = jnp.linalg.vecdot(basis, a).real return fq def cheb_from_dct(a, axis=-1): - """Get Chebyshev coefficients from DCT. + """Get Discrete Chebyshev Transform from Discrete Cosine Transform. Parameters ---------- a : jnp.ndarray - DCT coefficients ``a=dct(f,type=2,axis=axis,norm="forward")``. + Discrete Cosine Transform coefficients, e.g. + ``a=dct(f,type=2,axis=axis,norm="forward")``. + The discrete cosine transformation used by scipy is defined here. + docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct axis : int Axis along which to transform. @@ -280,14 +283,12 @@ def cheb_from_dct(a, axis=-1): Chebyshev coefficients along ``axis``. """ - # See link below for DCT definition. - # docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct cheb = a.copy().at[Index.get(0, axis, a.ndim)].divide(2.0) return cheb def interp_dct(xq, f, lobatto=False, axis=-1): - """Interpolate ``f`` to ``xq`` with DCT. + """Interpolate ``f`` to ``xq`` with Discrete Chebyshev Transform. Parameters ---------- @@ -317,7 +318,7 @@ def interp_dct(xq, f, lobatto=False, axis=-1): def idct_non_uniform(xq, a, n, axis=-1): - """Evaluate DCT coefficients ``a`` at ``xq`` ∈ [-1, 1]. + """Evaluate Discrete Cosine Transform coefficients ``a`` at ``xq`` ∈ [-1, 1]. Parameters ---------- @@ -325,7 +326,10 @@ def idct_non_uniform(xq, a, n, axis=-1): Real query points where interpolation is desired. Shape of ``xq`` must broadcast with ``a`` except along ``axis``. a : jnp.ndarray - DCT coefficients. + Discrete Cosine Transform coefficients, e.g. + ``a=dct(f,type=2,axis=axis,norm="forward")``. + The discrete cosine transformation used by scipy is defined here. + docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct n : int Spectral resolution of ``a``. axis : int @@ -512,9 +516,7 @@ def poly_root( a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] r = jnp.where( - (jnp.abs(jnp.imag(r)) <= eps) & (a_min <= r) & (r <= a_max), - jnp.real(r), - sentinel, + (jnp.abs(r.imag) <= eps) & (a_min <= r) & (r <= a_max), r.real, sentinel ) if sort or distinct: diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index f2c2092581..3e5d4ba996 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -814,6 +814,11 @@ def loop(bp): return result +def required_names(): + """Return names in ``data_index`` required to compute bounce integrals.""" + return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] + + def bounce_integral( data, knots, @@ -853,7 +858,7 @@ def bounce_integral( data : dict of jnp.ndarray Data evaluated on grid. Shape (S * knots.size, ) or (S, knots.size). - Should contain ``B^zeta``, ``B^zeta_z|r,a``, ``|B|``, and ``|B|_z|r,a``. + Should contain all names in ``required_names()``. knots : jnp.ndarray Shape (knots.size, ). Field line following coordinate values where arrays in ``data`` and ``f`` @@ -1067,7 +1072,6 @@ def _interp_to_argmin_B_hard(f, bp1, bp2, knots, B_c, B_z_ra_c, method): """Compute ``f`` at deepest point in the magnetic well. Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns f(A). - """ ext, B = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] diff --git a/desc/compute/fourier_bounce_integral.py b/desc/compute/fourier_bounce_integral.py index 341bcbf3c9..b46a7f5bac 100644 --- a/desc/compute/fourier_bounce_integral.py +++ b/desc/compute/fourier_bounce_integral.py @@ -36,7 +36,7 @@ def _flatten_matrix(y): def _alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): - """Get sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) of field line. + """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. Parameters ---------- @@ -52,15 +52,14 @@ def _alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): Returns ------- - alpha : jnp.ndarray - Shape (iota.size, m). - Sequence of poloidal coordinates (α₀, α₁, …, αₘ₋₁) that specify field line. + alphas : jnp.ndarray + Shape (iota.size, num_period). + Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. """ # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - return (alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_period)) % ( - 2 * jnp.pi - ) + alphas = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_period) + return alphas class FourierChebyshevBasis: @@ -139,7 +138,9 @@ def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): """ x = fourier_pts(M) y = cheb_pts(N, lobatto, domain) - coords = [kwargs.pop("rho"), x, y] if "rho" in kwargs else [x, y] + coords = ( + [jnp.atleast_1d(kwargs.pop("rho")), x, y] if "rho" in kwargs else [x, y] + ) coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) coords = jnp.column_stack(coords) return coords @@ -205,7 +206,10 @@ def compute_cheb(self, x): # Always add new axis to broadcast against Chebyshev coefficients. x = jnp.atleast_1d(x)[..., jnp.newaxis] cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) - assert cheb.shape[-2:] == (x.shape[-1], self.N) + assert cheb.shape[-2:] == ( + x.shape[-2], + self.N, + ), f"{cheb.shape}; {x.shape}; {self.N}" return _PiecewiseChebyshevBasis(cheb, self.domain) @@ -283,8 +287,8 @@ def intersect(self, k=0, eps=_eps): y = _filter_distinct(y, sentinel=-2, eps=eps) # Pick sentinel above such that only distinct roots are considered intersects. - is_intersect = (jnp.abs(jnp.imag(y)) <= eps) & (jnp.abs(jnp.real(y)) <= 1) - y = jnp.where(is_intersect, jnp.real(y), 0) # ensure y is in domain of arcos + is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1) + y = jnp.where(is_intersect, y.real, 0) # ensure y is in domain of arcos # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) # sign ∂f/∂y = sign ∑ₙ₌₁ᴺ⁻¹ aₙ(x) sin(n arcos y) s = jnp.linalg.vecdot( @@ -638,10 +642,12 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) b_sup_z : jnp.ndarray Shape (L, 1, m, n). Set of 2D Fourier spectral coefficients of B^ζ/|B|. - B : jnp.ndarray + B : _PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of |B| along field line. - T : jnp.ndarray + {|B|_α : ζ |B|(α, ζ) | α ∈ A } . + T : _PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of θ along field line. + {θ_α : ζ θ(α, ζ) | α ∈ A }. pitch : jnp.ndarray Shape (P, L, 1). λ values to evaluate the bounce integral at each field line. @@ -680,18 +686,20 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) assert result.shape == (P, L, num_well) +def required_names(): + """Return names in ``data_index`` required to compute bounce integrals.""" + return ["B^zeta", "|B|"] + + # TODO: Assumes zeta = phi -# input is -# that clebsch = FourierChebyshevBasis.nodes(M, N, rho=grid.compress(data["rho"])) -# then get desc_from_clebsch = map_coordinates(clebsch) def bounce_integral( grid, data, M, N, desc_from_clebsch, - alpha_0, - num_transit, + alpha_0=0.0, + num_transit=50, quad=leggauss(21), automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1.0, @@ -731,7 +739,7 @@ def bounce_integral( desc_from_clebsch : jnp.ndarray Shape (L * M * N, 3). DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ) returned by ``FourierChebyshevBasis.nodes(M, N)``. + tensor-product grid (ρ, α, ζ) returned by ``FourierChebyshevBasis.nodes(M,N)``. alpha_0 : float Starting field line poloidal label. TODO: Allow multiple starting labels for near-rational surfaces. @@ -757,7 +765,14 @@ def bounce_integral( bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line for every λ value in ``pitch``. - + alphas : jnp.ndarray + Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. + B : _PiecewiseChebyshevBasis + Set of 1D Chebyshev spectral coefficients of |B| along field line. + {|B|_α : ζ |B|(α, ζ) | α ∈ A } . + T : _PiecewiseChebyshevBasis + Set of 1D Chebyshev spectral coefficients of θ along field line. + {θ_α : ζ θ(α, ζ) | α ∈ A }. """ # Resolution of periodic DESC coordinate tensor-product grid. L, m, n = grid.num_rho, grid.num_theta, grid.num_zeta @@ -783,9 +798,9 @@ def bounce_integral( ).reshape(L, M, N), ) # Peel off field lines. - alpha = _alpha_sequence(alpha_0, grid.compress(data["iota"]), num_transit) - T = T.compute_cheb(alpha) - B = B.compute_cheb(alpha) + alphas = _alpha_sequence(alpha_0, grid.compress(data["iota"]), num_transit) + T = T.compute_cheb(alphas) + B = B.compute_cheb(alphas) assert T.cheb.shape == B.cheb.shape == (L, num_transit, N) x, w = quad @@ -849,7 +864,7 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): # Compute bounce points. pitch = jnp.atleast_3d(pitch) P = pitch.shape[0] - assert pitch.shape[1:] == B.cheb.shape[:-1] + assert pitch.shape[1:] == B.cheb.shape[:-1], f"{pitch.shape}; {B.cheb.shape}" bp1, bp2 = B.bounce_points(*B.intersect(1 / pitch), num_well) num_well = bp1.shape[-1] assert bp1.shape == bp2.shape == (P, L, num_well) @@ -859,4 +874,4 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): ) return result - return bounce_integrate + return bounce_integrate, (alphas, B, T) diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 365b5ab65d..513d3c31f4 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -39,6 +39,7 @@ bounce_points, get_pitch, plot_field_line, + required_names, ) from desc.compute.utils import dot, take_mask from desc.equilibrium import Equilibrium @@ -461,23 +462,10 @@ def denominator(B, pitch): eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) data = eq.compute( - [ - "B^zeta", - "B^zeta_z|r,a", - "|B|", - "|B|_z|r,a", - "min_tz |B|", - "max_tz |B|", - "g_zz", - ], - grid=grid, + required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) bounce_integrate, spline = bounce_integral( - data, - knots, - check=True, - plot=False, - quad=leggauss(3), # not checking quadrature accuracy in this test + data, knots, check=True, plot=False, quad=leggauss(3) ) pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 @@ -510,8 +498,9 @@ def denominator(B, pitch): @pytest.mark.unit @pytest.mark.parametrize("func", [_interp_to_argmin_B_soft, _interp_to_argmin_B_hard]) def test_interp_to_argmin_B(func): - """Test argmin interpolation.""" + """Test argmin interpolation.""" # noqa: D202 + # Test functions chosen with purpose; don't change unless plotted and compared. def f(z): return np.cos(3 * z) * np.sin(2 * np.cos(z)) + np.cos(1.2 * z) @@ -654,11 +643,8 @@ def test_drift(): ) data = eq.compute( - [ - "B^zeta", - "B^zeta_z|r,a", - "|B|", - "|B|_z|r,a", + required_names() + + [ "cvdrift", "gbdrift", "grad(psi)", diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py new file mode 100644 index 0000000000..1a01ea970b --- /dev/null +++ b/tests/test_fourier_bounce.py @@ -0,0 +1,54 @@ +"""Test interpolation to Clebsch coordinates and Fourier bounce integration.""" + +import numpy as np +import pytest + +from desc.compute.bounce_integral import get_pitch +from desc.compute.fourier_bounce_integral import ( + FourierChebyshevBasis, + _alpha_sequence, + bounce_integral, + required_names, +) +from desc.equilibrium.coords import map_coordinates +from desc.examples import get +from desc.grid import LinearGrid + + +@pytest.mark.unit +@pytest.mark.parametrize( + "alpha_0, iota, num_period, period", + [(0, np.sqrt(2), 1, 2 * np.pi), (0, np.arange(1, 3) * np.sqrt(2), 5, 2 * np.pi)], +) +def test_alpha_sequence(alpha_0, iota, num_period, period): + """Test field line poloidal label tracking utility.""" + iota = np.atleast_1d(iota) + alphas = _alpha_sequence(alpha_0, iota, num_period, period) + assert alphas.shape == (iota.size, num_period) + for i in range(iota.size): + assert np.unique(alphas[i]).size == num_period, "Is iota irrational?" + print(alphas) + + +@pytest.mark.unit +def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda x: x): + """Test bounce points...""" + eq = get("W7-X") + clebsch = FourierChebyshevBasis.nodes(M, N, rho=rho) + desc_from_clebsch = map_coordinates( + eq, + clebsch, + inbasis=("rho", "alpha", "zeta"), + period=(np.inf, 2 * np.pi, np.inf), + ) + grid = LinearGrid( + rho=rho, M=eq.M_grid, N=eq.N_grid, sym=False, NFP=eq.NFP + ) # check if NFP!=1 works + data = eq.compute(names=required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid) + bounce_integrate, _ = bounce_integral( + grid, data, M, N, desc_from_clebsch, check=True, warn=False + ) # TODO check true + pitch = get_pitch( + grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 + ) + result = bounce_integrate(f, [], pitch) # noqa: F841 diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index a20d556465..7805641579 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -13,9 +13,7 @@ from scipy.fft import dct as sdct from scipy.fft import idct as sidct -from desc.backend import dct as jdct -from desc.backend import idct as jidct -from desc.backend import jnp, rfft +from desc.backend import dct, idct, jnp, rfft from desc.compute._interp_utils import ( cheb_from_dct, cheb_pts, @@ -28,7 +26,6 @@ ) from desc.compute._quad_utils import bijection_to_disc from desc.compute.bounce_integral import _filter_not_nan -from desc.compute.fourier_bounce_integral import FourierChebyshevBasis @pytest.mark.unit @@ -79,8 +76,8 @@ def test_poly_root(): np.testing.assert_allclose(root, unique_root) -class TestInterp: - """Test RFFT and DCT interpolation.""" +class TestFastInterp: + """Test fast interpolation.""" @pytest.mark.unit @pytest.mark.parametrize("N", [2, 6, 7]) @@ -94,11 +91,9 @@ def test_cheb_pts(self, N): ) @pytest.mark.unit - def test_rfftfreq(self): - """Test rfft frequency.""" - M = 8 - np.testing.assert_allclose(np.fft.rfftfreq(M, d=1 / M), np.arange(M // 2 + 1)) - M = 9 + @pytest.mark.parametrize("M", [1, 8, 9]) + def test_rfftfreq(self, M): + """Make sure numpy uses Nyquist interpolant frequencies.""" np.testing.assert_allclose(np.fft.rfftfreq(M, d=1 / M), np.arange(M // 2 + 1)) @staticmethod @@ -186,6 +181,14 @@ def _identity(x): # recover Chebyshev interpolation, avoiding Gibbs and Runge. return x + @staticmethod + def _f_non_periodic(z): + return np.sin(np.sqrt(2) * z) * np.cos(1 / (2 + z)) * np.cos(z**2) * z + + @staticmethod + def _f_algebraic(z): + return z**3 - 10 * z**6 - z - np.e + z**4 + @pytest.mark.unit @pytest.mark.parametrize( "f, M, lobatto", @@ -232,7 +235,7 @@ def test_dct(self, f, M, lobatto): # JAX has yet to implement type 1 DCT. fq_2 = norm * sidct(sdct(f(m), type=dct_type), n=n.size, type=dct_type) else: - fq_2 = norm * jidct(jdct(f(m), type=dct_type), n=n.size, type=dct_type) + fq_2 = norm * idct(dct(f(m), type=dct_type), n=n.size, type=dct_type) np.testing.assert_allclose(fq_1, f(n), atol=1e-14) # JAX is much less accurate than scipy. np.testing.assert_allclose(fq_2, f(n), atol=1e-6) @@ -243,14 +246,6 @@ def test_dct(self, f, M, lobatto): ax.plot(n, fq_2) return fig - @staticmethod - def _f_non_periodic(z): - return np.sin(np.sqrt(2) * z) * np.cos(1 / (2 + z)) * np.cos(z**2) * z - - @staticmethod - def _f_algebraic(z): - return z**3 - 10 * z**6 - z - np.e + z**4 - @pytest.mark.unit @pytest.mark.parametrize( "f, M", @@ -259,14 +254,14 @@ def _f_algebraic(z): def test_interp_dct(self, f, M): """Test non-uniform DCT interpolation.""" c0 = chebinterpolate(f, M - 1) - assert not np.allclose(c0, cheb_from_dct(jdct(f(chebpts1(M)), 2) / M)), ( + assert not np.allclose(c0, cheb_from_dct(dct(f(chebpts1(M)), 2) / M)), ( "Interpolation should fail because cosine basis is in different domain. " "Use better test function." ) # test interpolation z = cheb_pts(M) fz = f(z) - np.testing.assert_allclose(c0, cheb_from_dct(jdct(fz, 2) / M), atol=1e-13) + np.testing.assert_allclose(c0, cheb_from_dct(dct(fz, 2) / M), atol=1e-13) if np.allclose(self._f_algebraic(z), fz): np.testing.assert_allclose( cheb2poly(c0), np.array([-np.e, -1, 0, 1, 1, 0, -10]), atol=1e-13 @@ -276,21 +271,3 @@ def test_interp_dct(self, f, M): xq = bijection_to_disc(xq, 0, xq.size) fq = chebval(xq, c0, tensor=False) np.testing.assert_allclose(fq, interp_dct(xq, fz), atol=1e-13) - - -# todo: -@pytest.mark.unit -def test_fcb_interp(): - """Test interpolation for this basis function.""" - M, N = 1, 5 - xy0 = FourierChebyshevBasis.nodes(M, N) - f0 = jnp.mean(xy0.reshape(M, N, 2), axis=-1) - fcb = FourierChebyshevBasis(f0, M, N) - f1 = fcb.evaluate(1, fcb.N * 10) - xy1 = FourierChebyshevBasis.nodes(1, fcb.N * 10) - - fig, ax = plt.subplots() - ax.plot(xy0[:, 1], f0[0, :], linestyle="--") - ax.plot(xy1[:, 1], f1[0, :], marker="x") - plt.show() - return fig From ff991cc89b0b579a3e26dd1aa4b715912c355350 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 15 Aug 2024 17:25:29 -0400 Subject: [PATCH 204/241] Replace einsum with vandermode matrix --- desc/compute/_interp_utils.py | 71 +++++++++++++++++++++++++- desc/compute/_quad_utils.py | 4 +- desc/compute/bounce_integral.py | 71 ++------------------------ tests/test_bounce_integral.py | 89 ++------------------------------- tests/test_compute_utils.py | 37 +++++++++++++- tests/test_interp_utils.py | 43 ++++++++++++++++ 6 files changed, 159 insertions(+), 156 deletions(-) diff --git a/desc/compute/_interp_utils.py b/desc/compute/_interp_utils.py index eda5d0b4d5..4fa1ec327a 100644 --- a/desc/compute/_interp_utils.py +++ b/desc/compute/_interp_utils.py @@ -3,6 +3,7 @@ from functools import partial from orthax.chebyshev import chebvander +from orthax.polynomial import polyvander from desc.backend import dct, jnp, rfft, rfft2, take from desc.compute._quad_utils import bijection_from_disc @@ -516,9 +517,77 @@ def poly_root( a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] r = jnp.where( - (jnp.abs(r.imag) <= eps) & (a_min <= r) & (r <= a_max), r.real, sentinel + # Order operations default to real part on complex numbers. + (jnp.abs(r.imag) <= eps) & (a_min <= r) & (r <= a_max), + r.real, + sentinel, ) if sort or distinct: r = jnp.sort(r, axis=-1) return _filter_distinct(r, sentinel, eps) if distinct else r + + +def polyder_vec(c): + """Coefficients for the derivatives of the given set of polynomials. + + Parameters + ---------- + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. + + Returns + ------- + poly : jnp.ndarray + Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, + ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is + ``c.shape[0]-1``. + + """ + poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T + return poly + + +def polyval_vec(x, c): + """Evaluate the set of polynomials ``c`` at the points ``x``. + + Note this function is not the same as ``np.polynomial.polynomial.polyval(x,c)``. + + Parameters + ---------- + x : jnp.ndarray + Real coordinates at which to evaluate the set of polynomials. + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. + + Returns + ------- + val : jnp.ndarray + Polynomial with given coefficients evaluated at given points. + + Examples + -------- + .. code-block:: python + + val = _poly_val(x, c) + if val.ndim != max(x.ndim, c.ndim - 1): + raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") + for index in np.ndindex(c.shape[1:]): + idx = (..., *index) + np.testing.assert_allclose( + actual=val[idx], + desired=np.poly1d(c[idx])(x[idx]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", + ) + + """ + # Better than Horner's method as we expect to evaluate low order polynomials. + # No need to use fast multipoint evaluation techniques for the same reason. + val = jnp.linalg.vecdot( + polyvander(x, c.shape[0] - 1), jnp.moveaxis(jnp.flipud(c), 0, -1) + ) + return val diff --git a/desc/compute/_quad_utils.py b/desc/compute/_quad_utils.py index 347207ba34..8c4ab9ff77 100644 --- a/desc/compute/_quad_utils.py +++ b/desc/compute/_quad_utils.py @@ -2,7 +2,7 @@ from orthax.legendre import legder, legval -from desc.backend import eigh_tridiagonal, jnp +from desc.backend import eigh_tridiagonal, jnp, put from desc.utils import errorif @@ -166,7 +166,7 @@ def leggausslob(deg): jnp.sqrt((n**2 - 1) / (4 * n**2 - 1)), eigvals_only=True, ) - c0 = jnp.zeros(deg).at[-1].set(1) + c0 = put(jnp.zeros(deg), -1, 1) # improve (single multiplicity) roots by one application of Newton c = legder(c0) diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 3e5d4ba996..94036dae70 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -9,7 +9,7 @@ from orthax.legendre import leggauss from desc.backend import flatnonzero, imap, jnp, put -from desc.compute._interp_utils import poly_root +from desc.compute._interp_utils import poly_root, polyder_vec, polyval_vec from desc.compute._quad_utils import ( automorphism_sin, bijection_from_disc, @@ -36,69 +36,6 @@ def _filter_nonzero_measure(bp1, bp2): return bp1[mask], bp2[mask] -def _poly_der(c): - """Coefficients for the derivatives of the given set of polynomials. - - Parameters - ---------- - c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. - - Returns - ------- - poly : jnp.ndarray - Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, - ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is - ``c.shape[0]-1``. - - """ - poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T - return poly - - -def _poly_val(x, c): - """Evaluate the set of polynomials ``c`` at the points ``x``. - - Note this function is not the same as ``np.polynomial.polynomial.polyval(x,c)``. - - Parameters - ---------- - x : jnp.ndarray - Coordinates at which to evaluate the set of polynomials. - c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. - - Returns - ------- - val : jnp.ndarray - Polynomial with given coefficients evaluated at given points. - - Examples - -------- - .. code-block:: python - - val = _poly_val(x, c) - if val.ndim != max(x.ndim, c.ndim - 1): - raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) - - """ - # Better than Horner's method as we expect to evaluate low order polynomials. - X = x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1) - val = jnp.einsum("...i,i...", X, c) - return val - - def plot_field_line( B, pitch=None, @@ -403,7 +340,7 @@ def bounce_points( assert intersect.shape == (P, S, N, degree) # Reshape so that last axis enumerates intersects of a pitch along a field line. - B_z_ra = _poly_val(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) + B_z_ra = polyval_vec(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = intersect.reshape(P, S, -1) >= 0 # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, @@ -520,7 +457,7 @@ def _get_extrema(knots, B_c, B_z_ra_c, sentinel=jnp.nan): c=B_z_ra_c, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel ) assert extrema.shape == (S, N, degree - 1) - B_extrema = _poly_val(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) + B_extrema = polyval_vec(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) # Transform out of local power basis expansion. extrema = (extrema + knots[:-1, jnp.newaxis]).reshape(S, -1) return extrema, B_extrema @@ -927,7 +864,7 @@ def bounce_integral( # Compute local splines. B_c = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c B_c = jnp.moveaxis(B_c, source=1, destination=-1) - B_z_ra_c = _poly_der(B_c) + B_z_ra_c = polyder_vec(B_c) degree = 3 assert B_c.shape[0] == degree + 1 assert B_z_ra_c.shape[0] == degree diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 513d3c31f4..3f9409e889 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -7,14 +7,14 @@ import pytest from jax import grad from matplotlib import pyplot as plt -from orthax.chebyshev import chebgauss, chebweight -from orthax.legendre import leggauss +from numpy.polynomial.chebyshev import chebgauss, chebweight +from numpy.polynomial.legendre import leggauss from scipy import integrate from scipy.interpolate import CubicHermiteSpline from scipy.special import ellipe, ellipkm1, roots_chebyu from tests.test_plotting import tol_1d -from desc.backend import flatnonzero, jnp +from desc.backend import jnp from desc.compute._quad_utils import ( automorphism_arcsin, automorphism_sin, @@ -33,15 +33,13 @@ _get_extrema, _interp_to_argmin_B_hard, _interp_to_argmin_B_soft, - _poly_der, - _poly_val, bounce_integral, bounce_points, get_pitch, plot_field_line, required_names, ) -from desc.compute.utils import dot, take_mask +from desc.compute.utils import dot from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid from desc.examples import get @@ -49,38 +47,6 @@ from desc.utils import only1 -@partial(np.vectorize, signature="(m)->()") -def _last_value(a): - """Return the last non-nan value in ``a``.""" - a = a[::-1] - idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) - return a[idx] - - -@pytest.mark.unit -def test_mask_operations(): - """Test custom masked array operation.""" - rows = 5 - cols = 7 - a = np.random.rand(rows, cols) - nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) - a.ravel()[nan_idx] = np.nan - taken = take_mask(a, ~np.isnan(a)) - last = _last_value(taken) - for i in range(rows): - desired = a[i, ~np.isnan(a[i])] - assert np.array_equal( - taken[i], - np.pad(desired, (0, cols - desired.size), constant_values=np.nan), - equal_nan=True, - ) - assert np.array_equal( - last[i], - desired[-1] if desired.size else np.nan, - equal_nan=True, - ) - - @pytest.mark.unit def test_reshape_convention(): """Test the reshaping convention separates data across field lines.""" @@ -117,53 +83,6 @@ def test_reshape_convention(): ), err_msg -@pytest.mark.unit -def test_poly_der(): - """Test vectorized computation of polynomial derivative.""" - quintic = 6 - c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - derivative = _poly_der(c) - for j in range(c.shape[1]): - for k in range(c.shape[2]): - np.testing.assert_allclose( - actual=derivative[:, j, k], desired=np.polyder(c[:, j, k]) - ) - - -@pytest.mark.unit -def test_poly_val(): - """Test vectorized computation of polynomial evaluation.""" - - def test(x, c): - val = _poly_val(x=x, c=c) - if val.ndim != max(x.ndim, c.ndim - 1): - raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) - - quartic = 5 - c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) - test(x, c) - - x = np.stack([x, x * 2], axis=0) - x = np.stack([x, x * 2, x * 3, x * 4], axis=0) - # make sure broadcasting won't hide error in implementation - assert np.unique(x.shape).size == x.ndim - assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] - assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 - test(x, c) - - @pytest.mark.unit def test_get_extrema(): """Test computation of extrema of |B|.""" diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 12dcf64fea..1a71059473 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -1,10 +1,12 @@ """Tests compute utilities.""" +from functools import partial + import jax import numpy as np import pytest -from desc.backend import jnp +from desc.backend import flatnonzero, jnp from desc.basis import FourierZernikeBasis from desc.compute.geom_utils import rotation_matrix from desc.compute.utils import ( @@ -16,6 +18,7 @@ surface_max, surface_min, surface_variance, + take_mask, ) from desc.examples import get from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid @@ -622,3 +625,35 @@ def test_rotation_matrix(): np.testing.assert_allclose(rotation_matrix(x0), np.eye(3)) np.testing.assert_allclose(dfdx_fwd(x0), np.zeros((3, 3, 3))) np.testing.assert_allclose(dfdx_rev(x0), np.zeros((3, 3, 3))) + + +@partial(np.vectorize, signature="(m)->()") +def _last_value(a): + """Return the last non-nan value in ``a``.""" + a = a[::-1] + idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) + return a[idx] + + +@pytest.mark.unit +def test_mask_operations(): + """Test custom masked array operation.""" + rows = 5 + cols = 7 + a = np.random.rand(rows, cols) + nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) + a.ravel()[nan_idx] = np.nan + taken = take_mask(a, ~np.isnan(a)) + last = _last_value(taken) + for i in range(rows): + desired = a[i, ~np.isnan(a[i])] + assert np.array_equal( + taken[i], + np.pad(desired, (0, cols - desired.size), constant_values=np.nan), + equal_nan=True, + ) + assert np.array_equal( + last[i], + desired[-1] if desired.size else np.nan, + equal_nan=True, + ) diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 7805641579..2dffd69b5c 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -23,6 +23,8 @@ interp_rfft, interp_rfft2, poly_root, + polyder_vec, + polyval_vec, ) from desc.compute._quad_utils import bijection_to_disc from desc.compute.bounce_integral import _filter_not_nan @@ -76,6 +78,47 @@ def test_poly_root(): np.testing.assert_allclose(root, unique_root) +@pytest.mark.unit +def test_polyder_vec(): + """Test vectorized computation of polynomial derivative.""" + quintic = 6 + c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + derivative = polyder_vec(c) + desired = np.vectorize(np.polyder, signature="(m)->(n)")(c.T).T + np.testing.assert_allclose(derivative, desired) + + +@pytest.mark.unit +def test_polyval_vec(): + """Test vectorized computation of polynomial evaluation.""" + + def test(x, c): + val = polyval_vec(x=x, c=c) + np.testing.assert_allclose( + val, + np.vectorize(np.polyval, signature="(m),(n)->(n)")( + np.moveaxis(c, 0, -1), x[..., np.newaxis] + ).squeeze(axis=-1), + ) + + quartic = 5 + c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) + test(x, c) + + x = np.stack([x, x * 2], axis=0) + x = np.stack([x, x * 2, x * 3, x * 4], axis=0) + # make sure broadcasting won't hide error in implementation + assert np.unique(x.shape).size == x.ndim + assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] + assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 + test(x, c) + + class TestFastInterp: """Test fast interpolation.""" From 744540a5028cac37eb4fb2d454756a5cb714c48a Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 16 Aug 2024 01:00:23 -0400 Subject: [PATCH 205/241] Adding tests part 2 --- desc/compute/_interp_utils.py | 42 ++++---- desc/compute/bounce_integral.py | 2 +- desc/compute/fourier_bounce_integral.py | 99 ++++++++++-------- desc/equilibrium/coords.py | 2 + tests/test_bounce_integral.py | 127 ++++++++++++++---------- tests/test_fourier_bounce.py | 111 ++++++++++++++++++++- tests/test_interp_utils.py | 4 +- 7 files changed, 264 insertions(+), 123 deletions(-) diff --git a/desc/compute/_interp_utils.py b/desc/compute/_interp_utils.py index 4fa1ec327a..8284c1a02d 100644 --- a/desc/compute/_interp_utils.py +++ b/desc/compute/_interp_utils.py @@ -85,13 +85,13 @@ def harmonic(a, M, axis=-1): return h -def harmonic_basis(x, M): +def harmonic_vander(x, M): """Nyquist trigonometric interpolant basis evaluated at ``x``. Parameters ---------- x : jnp.ndarray - Points to evaluate. + Points at which to evaluate pseudo-Vandermonde matrix. M : int Spectral resolution. @@ -99,7 +99,7 @@ def harmonic_basis(x, M): ------- basis : jnp.ndarray Shape (*x.shape, M). - Basis evaluated at points ``x``. + Pseudo-Vandermonde matrix of degree ``M-1`` and sample points ``x``. Last axis ordered as [1, cos(x), ..., cos(mx), sin(x), sin(2x), ..., sin(mx)]. """ @@ -128,7 +128,7 @@ def interp_rfft(xq, f, axis=-1): ---------- xq : jnp.ndarray Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with ``f`` except along ``axis``. + Shape of ``xq`` must broadcast with arrays of shape ``np.delete(f.shape,axis)``. f : jnp.ndarray Real function values on uniform 2π periodic grid to interpolate. axis : int @@ -153,7 +153,7 @@ def irfft_non_uniform(xq, a, n, axis=-1): ---------- xq : jnp.ndarray Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with ``a`` except along ``axis``. + Shape of ``xq`` must broadcast with arrays of shape ``np.delete(a.shape,axis)``. a : jnp.ndarray Fourier coefficients ``a=rfft(f,axis=axis,norm="forward")``. n : int @@ -175,7 +175,7 @@ def irfft_non_uniform(xq, a, n, axis=-1): .at[Index.get(-1, axis, a.ndim)] .divide(1.0 + ((n % 2) == 0)) ) - a = jnp.swapaxes(a[..., jnp.newaxis], axis % a.ndim, -1) + a = jnp.moveaxis(a, axis, -1) m = jnp.fft.rfftfreq(n, d=1 / n) basis = jnp.exp(-1j * m * xq[..., jnp.newaxis]) fq = jnp.linalg.vecdot(basis, a).real @@ -193,7 +193,7 @@ def interp_rfft2(xq, f, axes=(-2, -1)): Shape (..., 2). Real query points where interpolation is desired. Last axis must hold coordinates for a given point. - Shape of ``xq`` must broadcast ``f`` except along ``axes``. + Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(f.shape,axes)``. f : jnp.ndarray Shape (..., f.shape[-2], f.shape[-1]). Real function values on uniform (2π × 2π) periodic tensor-product grid to @@ -223,7 +223,7 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): Shape (..., 2). Real query points where interpolation is desired. Last axis must hold coordinates for a given point. - Shape of ``xq`` must broadcast ``a`` except along ``axes``. + Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(a.shape,axes)``. a : jnp.ndarray Shape (..., a.shape[-2], a.shape[-1]). Fourier coefficients ``a=rfft2(f,axes=axes,norm="forward")``. @@ -240,7 +240,6 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): Real function value at query points. """ - errorif(axes != (-2, -1), NotImplementedError) # need to swap axes before reshape assert xq.shape[-1] == 2 assert a.ndim >= 2 a = ( @@ -249,7 +248,9 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): .divide(2.0) .at[Index.get(-1, axes[-1], a.ndim)] .divide(1.0 + ((N % 2) == 0)) - ).reshape(*a.shape[:-2], 1, -1) + ) + a = jnp.moveaxis(a, source=axes, destination=(-2, -1)) + a = a.reshape(*a.shape[:-2], -1) m = jnp.fft.fftfreq(M, d=1 / M) n = jnp.fft.rfftfreq(N, d=1 / N) @@ -295,7 +296,7 @@ def interp_dct(xq, f, lobatto=False, axis=-1): ---------- xq : jnp.ndarray Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with ``f`` except along ``axis``. + Shape of ``xq`` must broadcast with shape ``np.delete(f.shape,axis)``. f : jnp.ndarray Real function values on Chebyshev points to interpolate. lobatto : bool @@ -310,27 +311,26 @@ def interp_dct(xq, f, lobatto=False, axis=-1): Real function value at query points. """ + lobatto = bool(lobatto) errorif(lobatto, NotImplementedError) assert f.ndim >= 1 - lobatto = bool(lobatto) - a = dct(f, type=2 - lobatto, axis=axis) / (f.shape[axis] - lobatto) + a = cheb_from_dct( + dct(f, type=2 - lobatto, axis=axis) / (f.shape[axis] - lobatto), axis + ) fq = idct_non_uniform(xq, a, f.shape[axis], axis) return fq def idct_non_uniform(xq, a, n, axis=-1): - """Evaluate Discrete Cosine Transform coefficients ``a`` at ``xq`` ∈ [-1, 1]. + """Evaluate Discrete Chebyshev Transform coefficients ``a`` at ``xq`` ∈ [-1, 1]. Parameters ---------- xq : jnp.ndarray Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with ``a`` except along ``axis``. + Shape of ``xq`` must broadcast with shape ``np.delete(a.shape,axis)``. a : jnp.ndarray - Discrete Cosine Transform coefficients, e.g. - ``a=dct(f,type=2,axis=axis,norm="forward")``. - The discrete cosine transformation used by scipy is defined here. - docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct + Discrete Chebyshev Transform coefficients. n : int Spectral resolution of ``a``. axis : int @@ -343,9 +343,9 @@ def idct_non_uniform(xq, a, n, axis=-1): """ assert a.ndim >= 1 - a = cheb_from_dct(a, axis) - a = jnp.swapaxes(a[..., jnp.newaxis], axis % a.ndim, -1) + a = jnp.moveaxis(a, axis, -1) basis = chebvander(xq, n - 1) + # Could instead use Clenshaw recursion with ``fq=chebval(xq,a,tensor=False)``. fq = jnp.linalg.vecdot(basis, a) return fq diff --git a/desc/compute/bounce_integral.py b/desc/compute/bounce_integral.py index 94036dae70..bff1b9cdf6 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/compute/bounce_integral.py @@ -6,7 +6,7 @@ from interpax import CubicHermiteSpline, PPoly, interp1d from jax.nn import softmax from matplotlib import pyplot as plt -from orthax.legendre import leggauss +from numpy.polynomial.legendre import leggauss from desc.backend import flatnonzero, imap, jnp, put from desc.compute._interp_utils import poly_root, polyder_vec, polyval_vec diff --git a/desc/compute/fourier_bounce_integral.py b/desc/compute/fourier_bounce_integral.py index b46a7f5bac..0459996601 100644 --- a/desc/compute/fourier_bounce_integral.py +++ b/desc/compute/fourier_bounce_integral.py @@ -2,7 +2,7 @@ import numpy as np from matplotlib import pyplot as plt -from orthax.chebyshev import chebroots, chebvander +from orthax.chebyshev import chebroots from orthax.legendre import leggauss from desc.backend import dct, idct, irfft, jnp, rfft, rfft2 @@ -12,6 +12,7 @@ cheb_pts, fourier_pts, harmonic, + idct_non_uniform, interp_rfft2, irfft2_non_uniform, irfft_non_uniform, @@ -35,7 +36,7 @@ def _flatten_matrix(y): return y.reshape(*y.shape[:-2], -1) -def _alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): +def alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. Parameters @@ -62,6 +63,19 @@ def _alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): return alphas +def _subtract(c, k): + # subtract k from last axis of c, obeying numpy broadcasting + c_0 = c[..., 0] - k + c = jnp.concatenate( + [ + jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), + c_0[..., jnp.newaxis], + ], + axis=-1, + ) + return c + + class FourierChebyshevBasis: """Fourier-Chebyshev series. @@ -206,10 +220,7 @@ def compute_cheb(self, x): # Always add new axis to broadcast against Chebyshev coefficients. x = jnp.atleast_1d(x)[..., jnp.newaxis] cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) - assert cheb.shape[-2:] == ( - x.shape[-2], - self.N, - ), f"{cheb.shape}; {x.shape}; {self.N}" + assert cheb.shape[-2:] == (x.shape[-2], self.N) return _PiecewiseChebyshevBasis(cheb, self.domain) @@ -247,6 +258,16 @@ def __init__(self, cheb, domain): self.N = cheb.shape[-1] self.domain = domain + def _chebcast(self, arr): + # Input should not have rightmost dimension of cheb that iterates coefficients, + # but may have additional leftmost dimensions for batch operations. + errorif( + arr.ndim > self.cheb.ndim, + NotImplementedError, + msg=f"Got ndim {arr.ndim} > cheb.ndim {self.cheb.ndim}.", + ) + return self.cheb if arr.ndim < self.cheb.ndim else self.cheb[jnp.newaxis] + def intersect(self, k=0, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). @@ -274,13 +295,7 @@ def intersect(self, k=0, eps=_eps): Boolean array into ``y`` indicating whether element is an intersect. """ - errorif( - k.ndim > self.cheb.ndim, - NotImplementedError, - msg=f"Got k.ndim {k.ndim} > cheb.ndim {self.cheb.ndim}.", - ) - c = self.cheb if k.ndim < self.cheb.ndim else self.cheb[jnp.newaxis] - c = c.copy().at[..., 0].add(-k) + c = _subtract(self._chebcast(k), k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) y = _chebroots_vec(c) assert y.shape == (*c.shape[:-1], self.N - 1) @@ -289,13 +304,15 @@ def intersect(self, k=0, eps=_eps): # Pick sentinel above such that only distinct roots are considered intersects. is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1) y = jnp.where(is_intersect, y.real, 0) # ensure y is in domain of arcos + + # TODO: Multipoint evaluation with FFT. + # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + n = jnp.arange(self.N) # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) - # sign ∂f/∂y = sign ∑ₙ₌₁ᴺ⁻¹ aₙ(x) sin(n arcos y) + # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) s = jnp.linalg.vecdot( - # TODO: Multipoint evaluation with FFT. - # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), self.cheb[..., jnp.newaxis, :], - jnp.sin(jnp.arange(self.N) * jnp.arccos(y)[..., jnp.newaxis]), ) is_decreasing = s <= 0 is_increasing = s >= 0 @@ -547,11 +564,9 @@ def eval1d(self, z): y = bijection_to_disc(y, self.domain[0], self.domain[1]) # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) # are held in self.cheb with shape (..., num cheb series, N). - cheb = jnp.moveaxis(self.cheb, source=-1, destination=0) - cheb = jnp.take_along_axis(cheb, x_idx, axis=-1) - # TODO: Multipoint evaluation with FFT. - # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - f = jnp.linalg.vecdot(chebvander(y, self.N - 1), cheb) + cheb = jnp.take_along_axis(self._chebcast(z), x_idx[..., jnp.newaxis], axis=-2) + f = idct_non_uniform(y, cheb, self.N) + assert f.shape == z.shape return f def _isomorphism_1d(self, y): @@ -595,10 +610,8 @@ def _isomorphism_2d(self, z): Isomorphic coordinates. """ - period = self.domain[-1] - self.domain[0] - x_index = z // period - y_value = z % period - return x_index, y_value + x_index, y_value = jnp.divmod(z, self.domain[-1] - self.domain[0]) + return x_index.astype(int), y_value def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch): @@ -655,7 +668,7 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) Returns ------- result : jnp.ndarray - Shape (P, S, num_well). + Shape (P, L, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. @@ -684,6 +697,7 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) w, ) assert result.shape == (P, L, num_well) + return result def required_names(): @@ -691,7 +705,7 @@ def required_names(): return ["B^zeta", "|B|"] -# TODO: Assumes zeta = phi +# TODO: Assumes zeta = phi (alpha sequence) def bounce_integral( grid, data, @@ -765,14 +779,16 @@ def bounce_integral( bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line for every λ value in ``pitch``. - alphas : jnp.ndarray - Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - B : _PiecewiseChebyshevBasis - Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ |B|(α, ζ) | α ∈ A } . - T : _PiecewiseChebyshevBasis - Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ θ(α, ζ) | α ∈ A }. + spline : tuple(ndarray, _PiecewiseChebyshevBasis, _PiecewiseChebyshevBasis) + alphas : jnp.ndarray + Poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. + B : _PiecewiseChebyshevBasis + Set of 1D Chebyshev spectral coefficients of |B| along field line. + {|B|_α : ζ |B|(α, ζ) | α ∈ A } . + T : _PiecewiseChebyshevBasis + Set of 1D Chebyshev spectral coefficients of θ along field line. + {θ_α : ζ θ(α, ζ) | α ∈ A }. + """ # Resolution of periodic DESC coordinate tensor-product grid. L, m, n = grid.num_rho, grid.num_theta, grid.num_zeta @@ -798,7 +814,7 @@ def bounce_integral( ).reshape(L, M, N), ) # Peel off field lines. - alphas = _alpha_sequence(alpha_0, grid.compress(data["iota"]), num_transit) + alphas = alpha_sequence(alpha_0, grid.compress(data["iota"]), num_transit) T = T.compute_cheb(alphas) B = B.compute_cheb(alphas) assert T.cheb.shape == B.cheb.shape == (L, num_transit, N) @@ -863,12 +879,15 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): errorif(weight is not None, NotImplementedError) # Compute bounce points. pitch = jnp.atleast_3d(pitch) - P = pitch.shape[0] - assert pitch.shape[1:] == B.cheb.shape[:-1], f"{pitch.shape}; {B.cheb.shape}" + assert ( + pitch.shape[1] == B.cheb.shape[0] + or pitch.shape[1] == 1 + or B.cheb.shape[0] == 1 + ) bp1, bp2 = B.bounce_points(*B.intersect(1 / pitch), num_well) + P = pitch.shape[0] num_well = bp1.shape[-1] assert bp1.shape == bp2.shape == (P, L, num_well) - result = _bounce_quadrature( bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch ) diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 8291cdd423..c052be4040 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -699,6 +699,8 @@ def get_rtz_grid( "z": "zeta", "p": "phi", } + if "iota" in kwargs: + kwargs["iota"] = grid.expand(kwargs["iota"], surface_label="rho") rtz_nodes = map_coordinates( eq, grid.nodes, diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index 3f9409e889..e6e2719010 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -541,63 +541,15 @@ def _elliptic_incomplete(k2): return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 -@pytest.mark.unit -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) -def test_drift(): - """Test bounce-averaged drift with analytical expressions.""" - eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi_boundary = eq.Psi / (2 * np.pi) - psi = 0.25 * psi_boundary - rho = np.sqrt(psi / psi_boundary) - np.testing.assert_allclose(rho, 0.5) - - # Make a set of nodes along a single fieldline. - grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - data = eq.compute(["iota"], grid=grid_fsa) - iota = grid_fsa.compress(data["iota"]).item() - alpha = 0 - zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = get_rtz_grid( - eq, rho, alpha, zeta, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) - ) - - data = eq.compute( - required_names() - + [ - "cvdrift", - "gbdrift", - "grad(psi)", - "grad(alpha)", - "shear", - "iota", - "psi", - "a", - ], - grid=grid, - ) - np.testing.assert_allclose(data["psi"], psi) - np.testing.assert_allclose(data["iota"], iota) - assert np.all(data["B^zeta"] > 0) - data["iota"] = grid.compress(data["iota"]).item() - data["shear"] = grid.compress(data["shear"]).item() - - B_ref = 2 * np.abs(psi_boundary) / data["a"] ** 2 - bounce_integrate, _ = bounce_integral( - data, - knots=zeta, - B_ref=B_ref, - L_ref=data["a"], - quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 - check=True, - ) - - B = data["|B|"] / B_ref +def _drift_analytic(data): + """Compute analytic approximation for bounce-averaged binormal drift.""" + B = data["|B|"] / data["B ref"] B0 = np.mean(B) # epsilon should be changed to dimensionless, and computed in a way that # is independent of normalization length scales, like "effective r/R0". - epsilon = data["a"] * rho # Aspect ratio of the flux surface. + epsilon = data["a"] * data["rho"] # Aspect ratio of the flux surface. np.testing.assert_allclose(epsilon, 0.05) - theta_PEST = alpha + data["iota"] * zeta + theta_PEST = data["alpha"] + data["iota"] * data["zeta"] # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 B_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) np.testing.assert_allclose(B, B_analytic, atol=3e-3) @@ -611,7 +563,7 @@ def test_drift(): np.testing.assert_allclose(gradpar, gradpar_analytic, atol=5e-3) # Comparing coefficient calculation here with coefficients from compute/_metric - normalization = -np.sign(psi) * B_ref * data["a"] ** 2 + normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 cvdrift = data["cvdrift"] * normalization gbdrift = data["gbdrift"] * normalization dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) @@ -620,7 +572,7 @@ def test_drift(): -np.sign(data["iota"]) * data["shear"] * dot(data["grad(psi)"], data["grad(alpha)"]) - / B_ref + / data["B ref"] ) gds21_analytic = -data["shear"] * ( data["shear"] * theta_PEST - alpha_MHD / B**4 * np.sin(theta_PEST) @@ -671,6 +623,71 @@ def test_drift(): ) / G0 drift_analytic_den = I_0 / G0 drift_analytic = drift_analytic_num / drift_analytic_den + return drift_analytic, cvdrift, gbdrift, pitch + + +@pytest.mark.unit +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) +def test_drift(): + """Test bounce-averaged drift with analytical expressions.""" + eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") + psi_boundary = eq.Psi / (2 * np.pi) + psi = 0.25 * psi_boundary + rho = np.sqrt(psi / psi_boundary) + np.testing.assert_allclose(rho, 0.5) + + # Make a set of nodes along a single fieldline. + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + iota = grid_fsa.compress(data["iota"]).item() + alpha = 0 + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + grid = get_rtz_grid( + eq, + rho, + alpha, + zeta, + coordinates="raz", + period=(np.inf, 2 * np.pi, np.inf), + iota=np.array([iota]), + ) + data = eq.compute( + required_names() + + [ + "cvdrift", + "gbdrift", + "grad(psi)", + "grad(alpha)", + "shear", + "iota", + "psi", + "a", + ], + grid=grid, + ) + np.testing.assert_allclose(data["psi"], psi) + np.testing.assert_allclose(data["iota"], iota) + assert np.all(data["B^zeta"] > 0) + B_ref = 2 * np.abs(psi_boundary) / data["a"] ** 2 + data["B ref"] = B_ref + data["rho"] = rho + data["alpha"] = alpha + data["zeta"] = zeta + data["psi"] = grid.compress(data["psi"]) + data["iota"] = grid.compress(data["iota"]) + data["shear"] = grid.compress(data["shear"]) + + # Compute analytic approximation. + drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) + # Compute numerical result. + bounce_integrate, _ = bounce_integral( + data, + knots=zeta, + B_ref=B_ref, + L_ref=data["a"], + quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 + check=True, + ) def integrand_num(cvdrift, gbdrift, B, pitch): g = jnp.sqrt(1 - pitch * B) diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index 1a01ea970b..8718695766 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -2,15 +2,21 @@ import numpy as np import pytest +from matplotlib import pyplot as plt +from numpy.polynomial.legendre import leggauss +from tests.test_bounce_integral import _drift_analytic +from tests.test_plotting import tol_1d +from desc.backend import jnp from desc.compute.bounce_integral import get_pitch from desc.compute.fourier_bounce_integral import ( FourierChebyshevBasis, - _alpha_sequence, + alpha_sequence, bounce_integral, required_names, ) -from desc.equilibrium.coords import map_coordinates +from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import get_rtz_grid, map_coordinates from desc.examples import get from desc.grid import LinearGrid @@ -23,7 +29,7 @@ def test_alpha_sequence(alpha_0, iota, num_period, period): """Test field line poloidal label tracking utility.""" iota = np.atleast_1d(iota) - alphas = _alpha_sequence(alpha_0, iota, num_period, period) + alphas = alpha_sequence(alpha_0, iota, num_period, period) assert alphas.shape == (iota.size, num_period) for i in range(iota.size): assert np.unique(alphas[i]).size == num_period, "Is iota irrational?" @@ -31,7 +37,7 @@ def test_alpha_sequence(alpha_0, iota, num_period, period): @pytest.mark.unit -def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda x: x): +def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): """Test bounce points...""" eq = get("W7-X") clebsch = FourierChebyshevBasis.nodes(M, N, rho=rho) @@ -52,3 +58,100 @@ def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda x: x): grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) result = bounce_integrate(f, [], pitch) # noqa: F841 + + +@pytest.mark.unit +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) +def test_drift(): + """Test bounce-averaged drift with analytical expressions.""" + eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") + psi_boundary = eq.Psi / (2 * np.pi) + psi = 0.25 * psi_boundary + rho = np.sqrt(psi / psi_boundary) + np.testing.assert_allclose(rho, 0.5) + + # Make a set of nodes along a single fieldline. + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + iota = grid_fsa.compress(data["iota"]).item() + alpha = 0 + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + grid = get_rtz_grid( + eq, + rho, + alpha, + zeta, + coordinates="raz", + period=(np.inf, 2 * np.pi, np.inf), + iota=np.array([iota]), + ) + data = eq.compute( + required_names() + + [ + "cvdrift", + "gbdrift", + "grad(psi)", + "grad(alpha)", + "shear", + "iota", + "psi", + "a", + ], + grid=grid, + ) + np.testing.assert_allclose(data["psi"], psi) + np.testing.assert_allclose(data["iota"], iota) + assert np.all(data["B^zeta"] > 0) + B_ref = 2 * np.abs(psi_boundary) / data["a"] ** 2 + data["B ref"] = B_ref + data["rho"] = rho + data["alpha"] = alpha + data["zeta"] = zeta + data["psi"] = grid.compress(data["psi"]) + data["iota"] = grid.compress(data["iota"]) + data["shear"] = grid.compress(data["shear"]) + + # Compute analytic approximation. + drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) + # Compute numerical result. + bounce_integrate, _ = bounce_integral( + data, + knots=zeta, + B_ref=B_ref, + L_ref=data["a"], + quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 + check=True, + ) + + def integrand_num(cvdrift, gbdrift, B, pitch): + g = jnp.sqrt(1 - pitch * B) + return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) + + def integrand_den(B, pitch): + return 1 / jnp.sqrt(1 - pitch * B) + + drift_numerical_num = bounce_integrate( + integrand=integrand_num, + f=[cvdrift, gbdrift], + pitch=pitch[:, np.newaxis], + num_well=1, + ) + drift_numerical_den = bounce_integrate( + integrand=integrand_den, + f=[], + pitch=pitch[:, np.newaxis], + num_well=1, + weight=np.ones(zeta.size), + ) + + drift_numerical_num = np.squeeze(drift_numerical_num) + drift_numerical_den = np.squeeze(drift_numerical_den) + drift_numerical = drift_numerical_num / drift_numerical_den + msg = "There should be one bounce integral per pitch in this example." + assert drift_numerical.size == drift_analytic.size, msg + np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) + + fig, ax = plt.subplots() + ax.plot(1 / pitch, drift_analytic) + ax.plot(1 / pitch, drift_numerical) + return fig diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 2dffd69b5c..1f47e74418 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -18,7 +18,7 @@ cheb_from_dct, cheb_pts, harmonic, - harmonic_basis, + harmonic_vander, interp_dct, interp_rfft, interp_rfft2, @@ -143,7 +143,7 @@ def test_rfftfreq(self, M): def _interp_rfft_harmonic(xq, f): M = f.shape[-1] fq = jnp.linalg.vecdot( - harmonic_basis(xq, M), harmonic(rfft(f, norm="forward"), M) + harmonic_vander(xq, M), harmonic(rfft(f, norm="forward"), M) ) return fq From 8197f71ce1c3495ed26597b7ce86966e077bf073 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 20 Aug 2024 12:19:01 -0400 Subject: [PATCH 206/241] Force push with lease to avoid diverging branch with remote due to commit 0a5216c --- desc/compute/utils.py | 43 +- desc/grid.py | 2 +- desc/integrals/__init__.py | 3 + desc/{compute => integrals}/_interp_utils.py | 8 +- desc/{compute => integrals}/_quad_utils.py | 0 .../{compute => integrals}/bounce_integral.py | 122 ++-- .../fourier_bounce_integral.py | 526 ++++++++++-------- desc/utils.py | 73 ++- tests/test_bounce_integral.py | 52 +- tests/test_fourier_bounce.py | 114 +++- tests/test_interp_utils.py | 16 +- 11 files changed, 550 insertions(+), 409 deletions(-) create mode 100644 desc/integrals/__init__.py rename desc/{compute => integrals}/_interp_utils.py (98%) rename desc/{compute => integrals}/_quad_utils.py (100%) rename desc/{compute => integrals}/bounce_integral.py (92%) rename desc/{compute => integrals}/fourier_bounce_integral.py (75%) diff --git a/desc/compute/utils.py b/desc/compute/utils.py index 7d7a2562dd..92c41a000f 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -2,14 +2,13 @@ import copy import inspect -from functools import partial import numpy as np -from desc.backend import cond, execute_on_cpu, flatnonzero, fori_loop, jnp, put, take +from desc.backend import cond, execute_on_cpu, fori_loop, jnp, put from desc.grid import ConcentricGrid, Grid, LinearGrid -from ..utils import errorif, setdefault, warnif +from ..utils import errorif, warnif from .data_index import allowed_kwargs, data_index # map from profile name to equilibrium parameter name @@ -1580,41 +1579,3 @@ def body(i, mins): # The above implementation was benchmarked to be more efficient than # alternatives without explicit loops in GitHub pull request #501. return grid.expand(mins, surface_label) - - -@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) -def take_mask(a, mask, size=None, fill_value=None): - """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. - - Parameters - ---------- - a : jnp.ndarray - The source array. - mask : jnp.ndarray - Boolean mask to index into ``a``. Should have same shape as ``a``. - size : int - Elements of ``a`` at the first size True indices of ``mask`` will be returned. - If there are fewer elements than size indicates, the returned array will be - padded with ``fill_value``. The size default is ``mask.size``. - fill_value : Any - When there are fewer than the indicated number of elements, the remaining - elements will be filled with ``fill_value``. Defaults to NaN for inexact types, - the largest negative value for signed types, the largest positive value for - unsigned types, and True for booleans. - - Returns - ------- - result : jnp.ndarray - Shape (size, ). - - """ - assert a.shape == mask.shape - idx = flatnonzero(mask, size=setdefault(size, mask.size), fill_value=mask.size) - return take( - a, - idx, - mode="fill", - fill_value=fill_value, - unique_indices=True, - indices_are_sorted=True, - ) diff --git a/desc/grid.py b/desc/grid.py index 359917c10b..b5afa3ab16 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -742,7 +742,7 @@ def create_meshgrid( rtz : rho, theta, zeta period : tuple of float Assumed periodicity for each coordinate. - Use np.inf to denote no periodicity. + Use ``np.inf`` to denote no periodicity. NFP : int Number of field periods (Default = 1). Only makes sense to change from 1 if last coordinate is periodic diff --git a/desc/integrals/__init__.py b/desc/integrals/__init__.py new file mode 100644 index 0000000000..419801a33e --- /dev/null +++ b/desc/integrals/__init__.py @@ -0,0 +1,3 @@ +"""Classes for integration.""" + +from .fourier_bounce_integral import FourierChebyshevBasis, PiecewiseChebyshevBasis diff --git a/desc/compute/_interp_utils.py b/desc/integrals/_interp_utils.py similarity index 98% rename from desc/compute/_interp_utils.py rename to desc/integrals/_interp_utils.py index 8284c1a02d..ea022891c1 100644 --- a/desc/compute/_interp_utils.py +++ b/desc/integrals/_interp_utils.py @@ -6,8 +6,8 @@ from orthax.polynomial import polyvander from desc.backend import dct, jnp, rfft, rfft2, take -from desc.compute._quad_utils import bijection_from_disc from desc.compute.utils import safediv +from desc.integrals._quad_utils import bijection_from_disc from desc.utils import Index, errorif @@ -314,8 +314,8 @@ def interp_dct(xq, f, lobatto=False, axis=-1): lobatto = bool(lobatto) errorif(lobatto, NotImplementedError) assert f.ndim >= 1 - a = cheb_from_dct( - dct(f, type=2 - lobatto, axis=axis) / (f.shape[axis] - lobatto), axis + a = cheb_from_dct(dct(f, type=2 - lobatto, axis=axis), axis) / ( + f.shape[axis] - lobatto ) fq = idct_non_uniform(xq, a, f.shape[axis], axis) return fq @@ -345,7 +345,7 @@ def idct_non_uniform(xq, a, n, axis=-1): assert a.ndim >= 1 a = jnp.moveaxis(a, axis, -1) basis = chebvander(xq, n - 1) - # Could instead use Clenshaw recursion with ``fq=chebval(xq,a,tensor=False)``. + # Could use Clenshaw recursion with fq = chebval(xq, a, tensor=False). fq = jnp.linalg.vecdot(basis, a) return fq diff --git a/desc/compute/_quad_utils.py b/desc/integrals/_quad_utils.py similarity index 100% rename from desc/compute/_quad_utils.py rename to desc/integrals/_quad_utils.py diff --git a/desc/compute/bounce_integral.py b/desc/integrals/bounce_integral.py similarity index 92% rename from desc/compute/bounce_integral.py rename to desc/integrals/bounce_integral.py index bff1b9cdf6..2b61dfece5 100644 --- a/desc/compute/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -2,45 +2,34 @@ from functools import partial -import numpy as np from interpax import CubicHermiteSpline, PPoly, interp1d from jax.nn import softmax from matplotlib import pyplot as plt -from numpy.polynomial.legendre import leggauss +from orthax.legendre import leggauss +from tests.test_interp_utils import filter_not_nan from desc.backend import flatnonzero, imap, jnp, put -from desc.compute._interp_utils import poly_root, polyder_vec, polyval_vec -from desc.compute._quad_utils import ( +from desc.integrals._interp_utils import poly_root, polyder_vec, polyval_vec +from desc.integrals._quad_utils import ( automorphism_sin, bijection_from_disc, grad_automorphism_sin, grad_bijection_from_disc, ) -from desc.compute.utils import take_mask -from desc.utils import errorif, setdefault, warnif +from desc.utils import errorif, setdefault, take_mask, warnif -# use for debugging and testing -def _filter_not_nan(a, check=False): - """Filter out nan from ``a`` while asserting nan is padded at right.""" - is_nan = np.isnan(a) - if check: - assert np.array_equal(is_nan, np.sort(is_nan, axis=-1)) - return a[~is_nan] - - -# use for debugging and testing -def _filter_nonzero_measure(bp1, bp2): +def filter_bounce_points(bp1, bp2): """Return only bounce points such that |bp2 - bp1| > 0.""" - mask = (bp2 - bp1) != 0 + mask = (bp2 - bp1) != 0.0 return bp1[mask], bp2[mask] def plot_field_line( B, pitch=None, - bp1=np.array([]), - bp2=np.array([]), + bp1=jnp.array([]), + bp2=jnp.array([]), start=None, stop=None, num=1000, @@ -57,11 +46,11 @@ def plot_field_line( ---------- B : PPoly Spline of |B| over given field line. - pitch : np.ndarray + pitch : jnp.ndarray λ value. - bp1 : np.ndarray + bp1 : jnp.ndarray Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. - bp2 : np.ndarray + bp2 : jnp.ndarray Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. start : float Minimum ζ on plot. @@ -90,9 +79,7 @@ def plot_field_line( legend = {} def add(lines): - if not hasattr(lines, "__iter__"): - lines = [lines] - for line in lines: + for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): label = line.get_label() if label not in legend: legend[label] = line @@ -101,7 +88,7 @@ def add(lines): if include_knots: for knot in B.x: add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) - z = np.linspace( + z = jnp.linspace( start=setdefault(start, B.x[0]), stop=setdefault(stop, B.x[-1]), num=num, @@ -109,24 +96,24 @@ def add(lines): add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - b = 1 / np.atleast_1d(pitch) + b = 1 / jnp.atleast_1d(pitch) for val in b: add( ax.axhline( val, color="tab:purple", alpha=alpha_pitch, label=r"$1 / \lambda$" ) ) - bp1, bp2 = np.atleast_2d(bp1, bp2) + bp1, bp2 = jnp.atleast_2d(bp1, bp2) for i in range(bp1.shape[0]): if bp1.shape == bp2.shape: - bp1_i, bp2_i = _filter_nonzero_measure(bp1[i], bp2[i]) + bp1_i, bp2_i = filter_bounce_points(bp1[i], bp2[i]) else: bp1_i, bp2_i = bp1[i], bp2[i] - bp1_i, bp2_i = map(_filter_not_nan, (bp1_i, bp2_i)) + bp1_i, bp2_i = bp1_i[~jnp.isnan(bp1_i)], bp2_i[~jnp.isnan(bp2_i)] add( ax.scatter( bp1_i, - np.full_like(bp1_i, b[i]), + jnp.full_like(bp1_i, b[i]), marker="v", color="tab:red", label="bp1", @@ -135,7 +122,7 @@ def add(lines): add( ax.scatter( bp2_i, - np.full_like(bp2_i, b[i]), + jnp.full_like(bp2_i, b[i]), marker="^", color="tab:green", label="bp2", @@ -155,44 +142,55 @@ def add(lines): return fig, ax -def _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs): +def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs): """Check that bounce points are computed correctly.""" - bp1 = jnp.where(bp1 > sentinel, bp1, jnp.nan) - bp2 = jnp.where(bp2 > sentinel, bp2, jnp.nan) + assert bp1.shape == bp2.shape + mask = (bp1 - bp2) == 0 + bp1 = jnp.where(mask, jnp.nan, bp1) + bp2 = jnp.where(mask, jnp.nan, bp2) eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 - P, S = bp1.shape[:-1] - msg_1 = "Bounce points have an inversion." + msg_1 = "Bounce points have an inversion.\n" err_1 = jnp.any(bp1 > bp2, axis=-1) - msg_2 = "Discontinuity detected." + msg_2 = "Discontinuity detected.\n" err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + P, S, _ = bp1.shape for s in range(S): B = PPoly(B_c[:, s], knots) for p in range(P): - B_mid = B((bp1[p, s] + bp2[p, s]) / 2) - err_3 = jnp.any(B_mid > 1 / pitch[p, s] + eps) + B_m_ps = B((bp1[p, s] + bp2[p, s]) / 2) + err_3 = jnp.any(B_m_ps > 1 / pitch[p, s] + eps) if err_1[p, s] or err_2[p, s] or err_3: - bp1_p = _filter_not_nan(bp1[p, s], check=True) - bp2_p = _filter_not_nan(bp2[p, s], check=True) - B_mid = _filter_not_nan(B_mid, check=True) + bp1_ps, bp2_ps, B_m_ps = map( + filter_not_nan, (bp1[p, s], bp2[p, s], B_m_ps) + ) if plot: plot_field_line( - B, pitch[p, s], bp1_p, bp2_p, title_id=f"{p},{s}", **kwargs + B, + pitch[p, s], + bp1_ps, + bp2_ps, + title_id=f"{p},{s}", + **kwargs, ) - print("bp1:", bp1_p) - print("bp2:", bp2_p) + print("bp1:", bp1_ps) + print("bp2:", bp2_ps) assert not err_1[p, s], msg_1 assert not err_2[p, s], msg_2 msg_3 = ( - f"Detected B midpoint = {B_mid}>{1 / pitch[p, s] + eps} = 1/pitch. " - "You need to use more knots or, if that is infeasible, switch to a " - "monotonic spline method.\n" + f"Detected |B| = {B_m_ps} > {1 / pitch[p, s] + eps} = 1/λ in well. " + "Use more knots or switch to a monotonic spline method.\n" ) assert not err_3, msg_3 if plot: plot_field_line( - B, pitch[:, s], bp1[:, s], bp2[:, s], title_id=str(s), **kwargs + B, + pitch[:, s], + bp1[:, s], + bp2[:, s], + title_id=str(s), + **kwargs, ) @@ -334,7 +332,7 @@ def bounce_points( a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sort=True, - sentinel=-1, + sentinel=-1.0, distinct=True, ) assert intersect.shape == (P, S, N, degree) @@ -356,13 +354,14 @@ def bounce_points( bp1 = take_mask(intersect, is_bp1, size=num_well, fill_value=sentinel) bp2 = take_mask(intersect, is_bp2, size=num_well, fill_value=sentinel) - if check: - _check_bounce_points(bp1, bp2, sentinel, pitch, knots, B_c, plot, **kwargs) - mask = (bp1 > sentinel) & (bp2 > sentinel) # Set outside mask to same value so integration is over set of measure zero. - bp1 = jnp.where(mask, bp1, 0) - bp2 = jnp.where(mask, bp2, 0) + bp1 = jnp.where(mask, bp1, 0.0) + bp2 = jnp.where(mask, bp2, 0.0) + + if check: + _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs) + return bp1, bp2 @@ -626,12 +625,7 @@ def _bounce_quadrature( Parameters ---------- - bp1 : jnp.ndarray - Shape (P, S, num_well). - The field line-following ζ coordinates of bounce points for a given pitch along - a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right - integration boundaries, respectively, for the bounce integrals. - bp2 : jnp.ndarray + bp1, bp2 : jnp.ndarray Shape (P, S, num_well). The field line-following ζ coordinates of bounce points for a given pitch along a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right @@ -876,7 +870,7 @@ def bounce_integral( if automorphism is not None: auto, grad_auto = automorphism w = w * grad_auto(x) - # Recall affine_bijection(auto(x), ζ_b₁, ζ_b₂) = ζ. + # Recall bijection_from_disc(auto(x), ζ_b₁, ζ_b₂) = ζ. x = auto(x) def bounce_integrate( diff --git a/desc/compute/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py similarity index 75% rename from desc/compute/fourier_bounce_integral.py rename to desc/integrals/fourier_bounce_integral.py index 0459996601..cf03b7596b 100644 --- a/desc/compute/fourier_bounce_integral.py +++ b/desc/integrals/fourier_bounce_integral.py @@ -6,7 +6,7 @@ from orthax.legendre import leggauss from desc.backend import dct, idct, irfft, jnp, rfft, rfft2 -from desc.compute._interp_utils import ( +from desc.integrals._interp_utils import ( _filter_distinct, cheb_from_dct, cheb_pts, @@ -17,17 +17,23 @@ irfft2_non_uniform, irfft_non_uniform, ) -from desc.compute._quad_utils import ( +from desc.integrals._quad_utils import ( automorphism_sin, bijection_from_disc, bijection_to_disc, grad_automorphism_sin, ) -from desc.compute.bounce_integral import _filter_nonzero_measure, _fix_inversion -from desc.compute.utils import take_mask -from desc.utils import errorif, warnif +from desc.integrals.bounce_integral import _fix_inversion, filter_bounce_points +from desc.utils import ( + atleast_2d_end, + atleast_3d_mid, + atleast_nd, + errorif, + setdefault, + take_mask, + warnif, +) -# TODO: There are better techniques to find eigenvalues of Chebyshev colleague matrix. _chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") @@ -36,7 +42,7 @@ def _flatten_matrix(y): return y.reshape(*y.shape[:-2], -1) -def alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): +def alpha_sequence(alpha_0, iota, num_transit, period=2 * jnp.pi): """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. Parameters @@ -46,36 +52,23 @@ def alpha_sequence(alpha_0, iota, num_period, period=2 * jnp.pi): iota : jnp.ndarray Shape (iota.size, ). Rotational transform normalized by 2π. - num_period : float - Number of periods to follow field line. + num_transit : float + Number of ``period``s to follow field line. period : float Toroidal period after which to update label. Returns ------- alphas : jnp.ndarray - Shape (iota.size, num_period). + Shape (iota.size, num_transit). Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. """ # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - alphas = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_period) + alphas = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) return alphas -def _subtract(c, k): - # subtract k from last axis of c, obeying numpy broadcasting - c_0 = c[..., 0] - k - c = jnp.concatenate( - [ - jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), - c_0[..., jnp.newaxis], - ], - axis=-1, - ) - return c - - class FourierChebyshevBasis: """Fourier-Chebyshev series. @@ -113,19 +106,14 @@ def __init__(self, f, lobatto=False, domain=(0, 2 * jnp.pi)): Domain for y coordinates. Default is [0, 2π]. """ - errorif(domain[0] > domain[-1], msg="Got inverted y coordinate domain.") + lobatto = bool(lobatto) errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") + self.lobatto = lobatto + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = domain self.M = f.shape[-2] self.N = f.shape[-1] - self.lobatto = bool(lobatto) - self.domain = domain - self._c = ( - rfft( - dct(f, type=2 - self.lobatto, axis=-1) / (self.N - self.lobatto), - axis=-2, - ) - / self.M - ) + self._c = self._fast_transform(f, lobatto) @staticmethod def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): @@ -145,19 +133,23 @@ def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): Returns ------- - coords : jnp.ndarray + coord : jnp.ndarray Shape (M * N, 2). Grid of (x, y) points for optimal interpolation. """ x = fourier_pts(M) y = cheb_pts(N, lobatto, domain) - coords = ( - [jnp.atleast_1d(kwargs.pop("rho")), x, y] if "rho" in kwargs else [x, y] - ) - coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) - coords = jnp.column_stack(coords) - return coords + coord = [jnp.atleast_1d(kwargs.pop("rho")), x, y] if "rho" in kwargs else [x, y] + coord = list(map(jnp.ravel, jnp.meshgrid(*coord, indexing="ij"))) + coord = jnp.column_stack(coord) + return coord + + @staticmethod + def _fast_transform(f, lobatto): + M = f.shape[-2] + N = f.shape[-1] + return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) def evaluate(self, M, N): """Evaluate Fourier-Chebyshev series. @@ -176,12 +168,9 @@ def evaluate(self, M, N): Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. """ - fq = idct( - irfft(self._c, n=M, axis=-2) * M, - type=2 - self.lobatto, - n=N, - axis=-1, - ) * (N - self.lobatto) + fq = idct(irfft(self._c, n=M, axis=-2), type=2 - self.lobatto, n=N, axis=-1) * ( + M * (N - self.lobatto) + ) return fq def harmonics(self): @@ -213,7 +202,7 @@ def compute_cheb(self, x): Returns ------- - cheb : _PiecewiseChebyshevBasis + cheb : PiecewiseChebyshevBasis Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). """ @@ -221,10 +210,23 @@ def compute_cheb(self, x): x = jnp.atleast_1d(x)[..., jnp.newaxis] cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) assert cheb.shape[-2:] == (x.shape[-2], self.N) - return _PiecewiseChebyshevBasis(cheb, self.domain) + return PiecewiseChebyshevBasis(cheb, self.domain) -class _PiecewiseChebyshevBasis: +def _subtract(c, k): + # subtract k from last axis of c, obeying numpy broadcasting + c_0 = c[..., 0] - k + c = jnp.concatenate( + [ + c_0[..., jnp.newaxis], + jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), + ], + axis=-1, + ) + return c + + +class PiecewiseChebyshevBasis: """Chebyshev series. { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } @@ -233,8 +235,10 @@ class _PiecewiseChebyshevBasis: Attributes ---------- cheb : jnp.ndarray - Shape (..., N). + Shape (..., M, N). Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + M : int + Number of function in this basis set. N : int Chebyshev spectral resolution. domain : (float, float) @@ -250,25 +254,38 @@ def __init__(self, cheb, domain): Parameters ---------- cheb : jnp.ndarray - Shape (..., N). + Shape (..., M, N). Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). """ - self.cheb = cheb - self.N = cheb.shape[-1] + errorif(domain[0] > domain[-1], msg="Got inverted domain.") self.domain = domain + self.cheb = jnp.atleast_2d(cheb) + + @property + def M(self): + """Number of function in this basis set.""" + return self.cheb.shape[-2] - def _chebcast(self, arr): + @property + def N(self): + """Chebyshev spectral resolution.""" + return self.cheb.shape[-1] + + @staticmethod + def _chebcast(cheb, arr): # Input should not have rightmost dimension of cheb that iterates coefficients, - # but may have additional leftmost dimensions for batch operations. + # but may have additional leftmost dimension for batch operation. errorif( - arr.ndim > self.cheb.ndim, + arr.ndim > cheb.ndim, NotImplementedError, - msg=f"Got ndim {arr.ndim} > cheb.ndim {self.cheb.ndim}.", + msg=f"Only one additional axis for batch dimension is allowed. " + f"Got {arr.ndim - cheb.ndim + 1} additional axes.", ) - return self.cheb if arr.ndim < self.cheb.ndim else self.cheb[jnp.newaxis] + # Don't add additional axis unless necessary to appease JIT compilation. + return cheb if arr.ndim < cheb.ndim else cheb[jnp.newaxis] - def intersect(self, k=0, eps=_eps): + def intersect(self, k, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). Parameters @@ -295,15 +312,17 @@ def intersect(self, k=0, eps=_eps): Boolean array into ``y`` indicating whether element is an intersect. """ - c = _subtract(self._chebcast(k), k) + k = jnp.atleast_1d(k) + c = _subtract(self._chebcast(self.cheb, k), k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) y = _chebroots_vec(c) assert y.shape == (*c.shape[:-1], self.N - 1) - y = _filter_distinct(y, sentinel=-2, eps=eps) - # Pick sentinel above such that only distinct roots are considered intersects. - is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1) - y = jnp.where(is_intersect, y.real, 0) # ensure y is in domain of arcos + # Intersects must satisfy y ∈ [-1, 1]. + # Pick sentinel such that only distinct roots are considered intersects. + y = _filter_distinct(y, sentinel=-2.0, eps=eps) + is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) + y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos # TODO: Multipoint evaluation with FFT. # Chapter 10, https://doi.org/10.1017/CBO9781139856065. @@ -317,7 +336,7 @@ def intersect(self, k=0, eps=_eps): is_decreasing = s <= 0 is_increasing = s >= 0 - y = bijection_from_disc(y, self.domain[0], self.domain[-1]) + y = bijection_from_disc(y, *self.domain) return y, is_decreasing, is_increasing, is_intersect def bounce_points( @@ -357,13 +376,15 @@ def bounce_points( ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (*y.shape[:-2], num_well). - The field line-following coordinates of bounce points for a given pitch - along a field line. The pairs ``bp1`` and ``bp2`` form left and right - integration boundaries, respectively, for the bounce integrals. + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. """ + errorif(self.N < 2, NotImplementedError, f"Got self.N = {self.N} < 2.") + # Flatten so that last axis enumerates intersects of a pitch along a field line. - y = _flatten_matrix(self._isomorphism_1d(y)) + y = _flatten_matrix(self._isomorphism_to_C1(y)) is_decreasing = _flatten_matrix(is_decreasing) is_increasing = _flatten_matrix(is_increasing) is_intersect = _flatten_matrix(is_intersect) @@ -375,44 +396,202 @@ def bounce_points( is_bp1 = is_decreasing & is_intersect is_bp2 = is_increasing & _fix_inversion(is_intersect, is_increasing) - sentinel = self.domain[0] - 1 + sentinel = self.domain[0] - 1.0 bp1 = take_mask(y, is_bp1, size=num_well, fill_value=sentinel) bp2 = take_mask(y, is_bp2, size=num_well, fill_value=sentinel) mask = (bp1 > sentinel) & (bp2 > sentinel) # Set outside mask to same value so integration is over set of measure zero. - bp1 = jnp.where(mask, bp1, 0) - bp2 = jnp.where(mask, bp2, 0) + bp1 = jnp.where(mask, bp1, 0.0) + bp2 = jnp.where(mask, bp2, 0.0) return bp1, bp2 + def eval1d(self, z, cheb=None): + """Evaluate piecewise Chebyshev spline at coordinates z. + + The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² + where z integer division domain yields index into the proper + Chebyshev series of the spline and z mod domain is the coordinate + value along the domain of that Chebyshev series. + + Parameters + ---------- + z : jnp.ndarray + Shape (..., *cheb.shape[:-2], z.shape[-1]). + Isomorphic coordinates along field line [0, ∞). + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients to use. If not given, uses ``self.cheb``. + + Returns + ------- + f : jnp.ndarray + Shape z.shape. + Chebyshev basis evaluated at z. + + """ + cheb = self._chebcast(setdefault(cheb, self.cheb), z) + N = cheb.shape[-1] + x_idx, y = self._isomorphism_to_C2(z) + y = bijection_to_disc(y, self.domain[0], self.domain[1]) + # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) + # are held in cheb with shape (..., num cheb series, N). + cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) + f = idct_non_uniform(y, cheb, N) + assert f.shape == z.shape + return f + + def _isomorphism_to_C1(self, y): + """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². + + Maps row x of y to z = y + f(x) where f(x) = x * |domain|. + + Parameters + ---------- + y : jnp.ndarray + Shape (..., y.shape[-2], y.shape[-1]). + Second to last axis iterates the rows. + + Returns + ------- + z : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. + + """ + assert y.ndim >= 2 + z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) + return y + z_shift[:, jnp.newaxis] + + def _isomorphism_to_C2(self, z): + """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. + + Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. + + Parameters + ---------- + z : jnp.ndarray + Shape z.shape. + + Returns + ------- + x_idx, y_val : (jnp.ndarray, jnp.ndarray) + Shape z.shape. + Isomorphic coordinates. + + """ + x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) + return x_idx.astype(int), y_val + self.domain[0] + + def _check_shape(self, bp1, bp2, pitch): + """Return shapes that broadcast with (P, *self.cheb.shape[:-2], W).""" + # Ensure pitch batch dim exists and add back dim to broadcast with wells. + pitch = atleast_nd(self.cheb.ndim - 1, pitch)[..., jnp.newaxis] + # Same but back dim already exists. + bp1, bp2 = atleast_nd(self.cheb.ndim, bp1, bp2) + # Cheb has shape (..., M, N) and others + # have shape (P, ..., W) + errorif(not (bp1.ndim == bp2.ndim == pitch.ndim == self.cheb.ndim)) + return bp1, bp2, pitch + + def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): + """Check that bounce points are computed correctly. + + Parameters + ---------- + bp1, bp2 : jnp.ndarray + Shape must broadcast with (P, *self.cheb.shape[:-2], W). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + pitch : jnp.ndarray + Shape must broadcast with (P, *self.cheb.shape[:-2]). + λ values to evaluate the bounce integral. + plot : bool + Whether to plot stuff. Default is true. + kwargs : dict + Keyword arguments into ``plot_field_line``. + + """ + assert bp1.shape == bp2.shape + mask = (bp1 - bp2) != 0.0 + bp1 = jnp.where(mask, bp1, jnp.nan) + bp2 = jnp.where(mask, bp2, jnp.nan) + bp1, bp2, pitch = self._check_shape(bp1, bp2, pitch) + + err_1 = jnp.any(bp1 > bp2, axis=-1) + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + B_m = self.eval1d((bp1 + bp2) / 2) + assert B_m.shape == bp1.shape + err_3 = jnp.any(B_m > 1 / pitch + self._eps, axis=-1) + if not (plot or jnp.any(err_1 | err_2 | err_3)): + return + + # Ensure l axis exists for iteration in below loop. + cheb = atleast_nd(3, self.cheb) + mask, bp1, bp2, B_m = atleast_3d_mid(mask, bp1, bp2, B_m) + err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) + + print(np.sum(mask)) + + for l in np.ndindex(cheb.shape[:-2]): + for p in range(pitch.shape[0]): + if not (err_1[p, l] or err_2[p, l] or err_3[p, l]): + continue + _bp1 = bp1[p, l][mask[p, l]] + _bp2 = bp2[p, l][mask[p, l]] + if plot: + self.plot_field_line( + cheb[l], + pitch=pitch[p, l], + bp1=_bp1, + bp2=_bp2, + title_id=f"{p},{l}", + **kwargs, + ) + print(" bp1 | bp2") + print(jnp.column_stack([_bp1, _bp2])) + assert not err_1[p, l], "Bounce points have an inversion.\n" + assert not err_2[p, l], "Detected discontinuity.\n" + assert not err_3[p, l], ( + "Detected |B| > 1/λ in well. Increase Chebyshev resolution.\n" + f"{B_m[p, l][mask[p, l]]} > {1 / pitch[p, l] + self._eps}" + ) + if plot: + self.plot_field_line( + cheb[l], + pitch=pitch[:, l], + bp1=bp1[:, l], + bp2=bp2[:, l], + title_id=str(l), + **kwargs, + ) + def plot_field_line( self, - start, - stop, + cheb, + bp1=jnp.array([[]]), + bp2=jnp.array([[]]), + pitch=jnp.array([]), num=1000, - bp1=np.array([]), - bp2=np.array([]), - pitch=np.array([]), title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", title_id=None, - transparency_pitch=0.3, + transparency_pitch=0.5, show=True, ): """Plot the field line given spline of |B|. Parameters ---------- - start : float - Minimum ζ on plot. - stop : float - Maximum ζ on plot. + cheb : jnp.ndarray + Piecewise Chebyshev coefficients of |B| along the field line. num : int Number of ζ points to plot. Pick a big number. - bp1 : np.ndarray + bp1 : jnp.ndarray Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. - bp2 : np.ndarray + bp2 : jnp.ndarray Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. - pitch : np.ndarray + pitch : jnp.ndarray λ value. title : str Plot title. @@ -428,23 +607,24 @@ def plot_field_line( fig, ax : matplotlib figure and axes. """ - errorif(start is None or stop is None) legend = {} def add(lines): - if not hasattr(lines, "__iter__"): - lines = [lines] - for line in lines: + for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): label = line.get_label() if label not in legend: legend[label] = line fig, ax = plt.subplots() - z = np.linspace(start=start, stop=stop, num=num) - add(ax.plot(z, self.eval1d(z), label=r"$\vert B \vert (\zeta)$")) + z = jnp.linspace( + start=self.domain[0], + stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, + num=num, + ) + add(ax.plot(z, self.eval1d(z, cheb), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - b = 1 / np.atleast_1d(pitch) + b = 1 / jnp.atleast_1d(pitch) for val in b: add( ax.axhline( @@ -454,13 +634,16 @@ def add(lines): label=r"$1 / \lambda$", ) ) - bp1, bp2 = np.atleast_2d(bp1, bp2) + bp1, bp2 = jnp.atleast_2d(bp1, bp2) for i in range(bp1.shape[0]): - bp1_i, bp2_i = _filter_nonzero_measure(bp1[i], bp2[i]) + if bp1.shape == bp2.shape: + _bp1, _bp2 = filter_bounce_points(bp1[i], bp2[i]) + else: + _bp1, _bp2 = bp1[i], bp2[i] add( ax.scatter( - bp1_i, - np.full_like(bp1_i, b[i]), + _bp1, + jnp.full_like(_bp1, b[i]), marker="v", color="tab:red", label="bp1", @@ -468,8 +651,8 @@ def add(lines): ) add( ax.scatter( - bp2_i, - np.full_like(bp2_i, b[i]), + _bp2, + jnp.full_like(_bp2, b[i]), marker="^", color="tab:green", label="bp2", @@ -480,7 +663,7 @@ def add(lines): ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") ax.legend(legend.values(), legend.keys(), loc="lower right") if title_id is not None: - title = f"{title}. id = {title_id}." + title = f"{title}. ID={title_id}." ax.set_title(title) plt.tight_layout() if show: @@ -488,131 +671,6 @@ def add(lines): plt.close() return fig, ax - def check_bounce_points( - self, bp1, bp2, pitch, plot=True, start=None, stop=None, **kwargs - ): - """Check that bounce points are computed correctly.""" - pitch = jnp.atleast_3d(pitch) - errorif(not (pitch.ndim == bp1.ndim == bp2.ndim == 3), NotImplementedError) - errorif(bp1.shape != bp2.shape) - - P, L, num_wells = bp1.shape - msg_1 = "Bounce points have an inversion." - err_1 = jnp.any(bp1 > bp2, axis=-1) - msg_2 = "Discontinuity detected." - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) - - for l in range(L): - for p in range(P): - B_mid = self.eval1d((bp1[p, l] + bp2[p, l]) / 2) - err_3 = jnp.any(B_mid > 1 / pitch[p, l] + self._eps) - if err_1[p, l] or err_2[p, l] or err_3: - bp1_p, bp2_p = _filter_nonzero_measure(bp1[p, l], bp2[p, l]) - B_mid = B_mid[(bp1[p, l] - bp2[p, l]) != 0] - if plot: - self.plot_field_line( - start=start, - stop=stop, - pitch=pitch[p, l], - bp1=bp1_p, - bp2=bp2_p, - title_id=f"{p},{l}", - **kwargs, - ) - print("bp1:", bp1_p) - print("bp2:", bp2_p) - assert not err_1[p, l], msg_1 - assert not err_2[p, l], msg_2 - msg_3 = ( - f"Detected B midpoint = {B_mid}>{1 / pitch[p, l] + self._eps} =" - " 1/pitch. You need to use more knots." - ) - assert not err_3, msg_3 - if plot: - self.plot_field_line( - start=start, - stop=stop, - pitch=pitch[:, l], - bp1=bp1[:, l], - bp2=bp2[:, l], - title_id=str(l), - **kwargs, - ) - - def eval1d(self, z): - """Evaluate piecewise Chebyshev spline at coordinates z. - - The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² - where z integer division domain yields index into the proper - Chebyshev series of the spline and z mod domain is the coordinate - value along the domain of that Chebyshev series. - - Parameters - ---------- - z : jnp.ndarray - Shape (..., *cheb.shape[:-2], z.shape[-1]). - Isomorphic coordinates along field line [0, ∞). - - Returns - ------- - f : jnp.ndarray - Shape z.shape. - Chebyshev basis evaluated at z. - - """ - x_idx, y = self._isomorphism_2d(z) - y = bijection_to_disc(y, self.domain[0], self.domain[1]) - # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) - # are held in self.cheb with shape (..., num cheb series, N). - cheb = jnp.take_along_axis(self._chebcast(z), x_idx[..., jnp.newaxis], axis=-2) - f = idct_non_uniform(y, cheb, self.N) - assert f.shape == z.shape - return f - - def _isomorphism_1d(self, y): - """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². - - Maps row x of y to z = α(x) + y where α(x) = x * |domain|. - - Parameters - ---------- - y : jnp.ndarray - Shape (..., y.shape[-2], y.shape[-1]). - Second to last axis iterates the rows. - - Returns - ------- - z : jnp.ndarray - Shape y.shape. - Isomorphic coordinates. - - """ - assert y.ndim >= 2 - period = self.domain[-1] - self.domain[0] - zeta_shift = period * jnp.arange(y.shape[-2]) - z = zeta_shift[:, jnp.newaxis] + y - return z - - def _isomorphism_2d(self, z): - """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - - Returns index x and value y such that z = α(x) + y where α(x) = x * |domain|. - - Parameters - ---------- - z : jnp.ndarray - Shape z.shape. - - Returns - ------- - x_index, y_value : (jnp.ndarray, jnp.ndarray) - Shape z.shape. - Isomorphic coordinates. - - """ - x_index, y_value = jnp.divmod(z, self.domain[-1] - self.domain[0]) - return x_index.astype(int), y_value - def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch): """Bounce integrate ∫ f(ℓ) dℓ. @@ -655,12 +713,12 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) b_sup_z : jnp.ndarray Shape (L, 1, m, n). Set of 2D Fourier spectral coefficients of B^ζ/|B|. - B : _PiecewiseChebyshevBasis + B : PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ |B|(α, ζ) | α ∈ A } . - T : _PiecewiseChebyshevBasis + {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A }. + T : PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ θ(α, ζ) | α ∈ A }. + {θ_α : ζ ↦ θ(α, ζ) | α ∈ A }. pitch : jnp.ndarray Shape (P, L, 1). λ values to evaluate the bounce integral at each field line. @@ -702,7 +760,7 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) def required_names(): """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "|B|"] + return ["B^zeta", "|B|", "iota"] # TODO: Assumes zeta = phi (alpha sequence) @@ -784,10 +842,10 @@ def bounce_integral( Poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. B : _PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ |B|(α, ζ) | α ∈ A } . + {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A }. T : _PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ θ(α, ζ) | α ∈ A }. + {θ_α : ζ ↦ θ(α, ζ) | α ∈ A }. """ # Resolution of periodic DESC coordinate tensor-product grid. @@ -824,7 +882,7 @@ def bounce_integral( if automorphism is not None: auto, grad_auto = automorphism w = w * grad_auto(x) - # Recall affine_bijection(auto(x), ζ_b₁, ζ_b₂) = ζ. + # Recall bijection_from_disc(auto(x), ζ_b₁, ζ_b₂) = ζ. x = auto(x) def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): @@ -885,6 +943,8 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): or B.cheb.shape[0] == 1 ) bp1, bp2 = B.bounce_points(*B.intersect(1 / pitch), num_well) + if check: + B.check_bounce_points(bp1, bp2, pitch, plot=True) P = pitch.shape[0] num_well = bp1.shape[-1] assert bp1.shape == bp2.shape == (P, L, num_well) diff --git a/desc/utils.py b/desc/utils.py index 1547fc9e34..eb8e459fd7 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -2,13 +2,14 @@ import operator import warnings +from functools import partial from itertools import combinations_with_replacement, permutations import numpy as np from scipy.special import factorial from termcolor import colored -from desc.backend import fori_loop, jit, jnp +from desc.backend import flatnonzero, fori_loop, jit, jnp, take class Timer: @@ -689,3 +690,73 @@ def broadcast_tree(tree_in, tree_out, dtype=int): # invalid tree structure else: raise ValueError("trees must be nested lists of dicts") + + +@partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) +def take_mask(a, mask, size=None, fill_value=None): + """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. + + Parameters + ---------- + a : jnp.ndarray + The source array. + mask : jnp.ndarray + Boolean mask to index into ``a``. Should have same shape as ``a``. + size : int + Elements of ``a`` at the first size True indices of ``mask`` will be returned. + If there are fewer elements than size indicates, the returned array will be + padded with ``fill_value``. The size default is ``mask.size``. + fill_value : Any + When there are fewer than the indicated number of elements, the remaining + elements will be filled with ``fill_value``. Defaults to NaN for inexact types, + the largest negative value for signed types, the largest positive value for + unsigned types, and True for booleans. + + Returns + ------- + result : jnp.ndarray + Shape (size, ). + + """ + assert a.shape == mask.shape + idx = flatnonzero(mask, size=setdefault(size, mask.size), fill_value=mask.size) + return take( + a, + idx, + mode="fill", + fill_value=fill_value, + unique_indices=True, + indices_are_sorted=True, + ) + + +# TODO: Eventually remove and use numpy's stuff. +# https://github.com/numpy/numpy/issues/25805 +def atleast_nd(ndmin, *arys): + """Adds dimensions to front if necessary.""" + if ndmin == 1: + return jnp.atleast_1d(*arys) + if ndmin == 2: + return jnp.atleast_2d(*arys) + tup = tuple(jnp.array(ary, ndmin=ndmin) for ary in arys) + if len(tup) == 1: + tup = tup[0] + return tup + + +def atleast_3d_mid(*arys): + """Like np.atleast3d but if adds dim at axis 1 for 2d arrays.""" + arys = jnp.atleast_2d(*arys) + tup = tuple(ary[:, jnp.newaxis] if ary.ndim == 2 else ary for ary in arys) + if len(tup) == 1: + tup = tup[0] + return tup + + +def atleast_2d_end(*arys): + """Like np.atleast2d but if adds dim at axis 1 for 1d arrays.""" + arys = jnp.atleast_1d(*arys) + tup = tuple(ary[:, jnp.newaxis] if ary.ndim == 1 else ary for ary in arys) + if len(tup) == 1: + tup = tup[0] + return tup diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index e6e2719010..a09273657c 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -15,7 +15,12 @@ from tests.test_plotting import tol_1d from desc.backend import jnp -from desc.compute._quad_utils import ( +from desc.compute.utils import dot +from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import get_rtz_grid +from desc.examples import get +from desc.grid import Grid, LinearGrid +from desc.integrals._quad_utils import ( automorphism_arcsin, automorphism_sin, bijection_from_disc, @@ -26,24 +31,18 @@ leggausslob, tanh_sinh, ) -from desc.compute.bounce_integral import ( +from desc.integrals.bounce_integral import ( _composite_linspace, - _filter_nonzero_measure, - _filter_not_nan, _get_extrema, _interp_to_argmin_B_hard, _interp_to_argmin_B_soft, bounce_integral, bounce_points, + filter_bounce_points, get_pitch, plot_field_line, required_names, ) -from desc.compute.utils import dot -from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import get_rtz_grid -from desc.examples import get -from desc.grid import Grid, LinearGrid from desc.utils import only1 @@ -94,7 +93,8 @@ def test_get_extrema(): ) B_z_ra = B.derivative() extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) - extrema, B_extrema = map(_filter_not_nan, (extrema, B_extrema)) + mask = ~np.isnan(extrema) + extrema, B_extrema = extrema[mask], B_extrema[mask] idx = np.argsort(extrema) extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) @@ -130,7 +130,7 @@ def test_bp1_first(): pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) - bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + bp1, bp2 = filter_bounce_points(bp1, bp2) assert bp1.size and bp2.size np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) @@ -146,7 +146,7 @@ def test_bp2_first(): pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) - bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + bp1, bp2 = filter_bounce_points(bp1, bp2) assert bp1.size and bp2.size np.testing.assert_allclose(bp1, intersect[1:-1:2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) @@ -164,7 +164,7 @@ def test_bp1_before_extrema(): B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + bp1, bp2 = filter_bounce_points(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) @@ -188,7 +188,7 @@ def test_bp2_before_extrema(): B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + bp1, bp2 = filter_bounce_points(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) @@ -198,21 +198,6 @@ def test_bp2_before_extrema(): @pytest.mark.unit def test_extrema_first_and_before_bp1(): """Test that bounce points are computed correctly.""" - # In theory, this test should only pass if distinct=True when computing the - # intersections in bounce points. However, we can get lucky due to floating - # point errors, and it may also pass when distinct=False. - # If a regression fails this test, this note will save many hours of debugging. - # If the filter in place to return only the distinct roots is too coarse, - # in particular atol < 1e-15, then this test will error. In the resulting - # plot that the error will produce the red bounce point on the first hump - # disappears. The true sequence is green, double red, green, red, green. - # The first green was close to the double red and hence the first of the - # double red root pair was erased as it was falsely detected as a duplicate. - # The second of the double red root pair is correctly erased. All that is - # left is the green. Now the bounce_points method assumes the intermediate - # value theorem holds for the continuous spline, so when fed these sequence - # of roots, the correct action is to ignore the first green root since - # otherwise the interior of the bounce points would be hills and not valleys. start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -227,7 +212,7 @@ def test_extrema_first_and_before_bp1(): pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False ) plot_field_line(B, pitch, bp1, bp2, start=k[2]) - bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + bp1, bp2 = filter_bounce_points(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) @@ -250,7 +235,7 @@ def test_extrema_first_and_before_bp2(): B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = _filter_nonzero_measure(bp1, bp2) + bp1, bp2 = filter_bounce_points(bp1, bp2) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -709,10 +694,7 @@ def integrand_den(B, pitch): num_well=1, weight=np.ones(zeta.size), ) - - drift_numerical_num = np.squeeze(drift_numerical_num) - drift_numerical_den = np.squeeze(drift_numerical_den) - drift_numerical = drift_numerical_num / drift_numerical_den + drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) msg = "There should be one bounce integral per pitch in this example." assert drift_numerical.size == drift_analytic.size, msg np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index 8718695766..e6b44aa4ac 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -3,22 +3,24 @@ import numpy as np import pytest from matplotlib import pyplot as plt +from numpy.polynomial.chebyshev import chebinterpolate, chebroots from numpy.polynomial.legendre import leggauss from tests.test_bounce_integral import _drift_analytic from tests.test_plotting import tol_1d from desc.backend import jnp -from desc.compute.bounce_integral import get_pitch -from desc.compute.fourier_bounce_integral import ( +from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import get_rtz_grid, map_coordinates +from desc.examples import get +from desc.grid import Grid, LinearGrid +from desc.integrals._interp_utils import fourier_pts +from desc.integrals.bounce_integral import filter_bounce_points, get_pitch +from desc.integrals.fourier_bounce_integral import ( FourierChebyshevBasis, alpha_sequence, bounce_integral, required_names, ) -from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import get_rtz_grid, map_coordinates -from desc.examples import get -from desc.grid import LinearGrid @pytest.mark.unit @@ -27,15 +29,56 @@ [(0, np.sqrt(2), 1, 2 * np.pi), (0, np.arange(1, 3) * np.sqrt(2), 5, 2 * np.pi)], ) def test_alpha_sequence(alpha_0, iota, num_period, period): - """Test field line poloidal label tracking utility.""" + """Test field line poloidal label tracking.""" iota = np.atleast_1d(iota) alphas = alpha_sequence(alpha_0, iota, num_period, period) assert alphas.shape == (iota.size, num_period) for i in range(iota.size): - assert np.unique(alphas[i]).size == num_period, "Is iota irrational?" + assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" print(alphas) +class TestBouncePoints: + """Test that bounce points are computed correctly.""" + + @staticmethod + def _cheb_intersect(cheb, k): + cheb = cheb.copy() + cheb[0] = cheb[0] - k + roots = chebroots(cheb) + intersect = roots[ + np.logical_and(np.isreal(roots), np.abs(roots.real) <= 1) + ].real + return intersect + + @staticmethod + def _periodic_fun(nodes, M, N): + alpha, zeta = nodes.T + f = -2 * np.cos(1 / (0.1 + zeta**2)) + 2 + return f.reshape(M, N) + + @pytest.mark.unit + def test_bp1_first(self): + """Test that bounce points are computed correctly.""" + pitch = 1 / np.linspace(1, 4, 20).reshape(20, 1) + M, N = 1, 10 + domain = (-1, 1) + nodes = FourierChebyshevBasis.nodes(M, N, domain=domain) + f = self._periodic_fun(nodes, M, N) + fcb = FourierChebyshevBasis(f, domain=domain) + pcb = fcb.compute_cheb(fourier_pts(M)) + bp1, bp2 = pcb.bounce_points(*pcb.intersect(1 / pitch)) + pcb.check_bounce_points(bp1, bp2, pitch.ravel()) + bp1, bp2 = filter_bounce_points(bp1, bp2) + + def f(z): + return -2 * np.cos(1 / (0.1 + z**2)) + 2 + + r = self._cheb_intersect(chebinterpolate(f, N), 1 / pitch) + np.testing.assert_allclose(bp1, r[::2], rtol=1e-3) + np.testing.assert_allclose(bp2, r[1::2], rtol=1e-3) + + @pytest.mark.unit def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): """Test bounce points...""" @@ -71,12 +114,18 @@ def test_drift(): np.testing.assert_allclose(rho, 0.5) # Make a set of nodes along a single fieldline. - grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - data = eq.compute(["iota"], grid=grid_fsa) - iota = grid_fsa.compress(data["iota"]).item() + grid_rtz = Grid.create_meshgrid( + [ + rho, + np.linspace(0, 2 * np.pi, eq.M_grid), + np.linspace(0, 2 * np.pi, eq.N_grid + 1), + ], + ) + data = eq.compute(["iota"], grid=grid_rtz) + iota = grid_rtz.compress(data["iota"]).item() alpha = 0 zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = get_rtz_grid( + grid_raz = get_rtz_grid( eq, rho, alpha, @@ -97,7 +146,7 @@ def test_drift(): "psi", "a", ], - grid=grid, + grid=grid_raz, ) np.testing.assert_allclose(data["psi"], psi) np.testing.assert_allclose(data["iota"], iota) @@ -107,20 +156,38 @@ def test_drift(): data["rho"] = rho data["alpha"] = alpha data["zeta"] = zeta - data["psi"] = grid.compress(data["psi"]) - data["iota"] = grid.compress(data["iota"]) - data["shear"] = grid.compress(data["shear"]) - + data["psi"] = grid_raz.compress(data["psi"]) + data["iota"] = grid_raz.compress(data["iota"]) + data["shear"] = grid_raz.compress(data["shear"]) # Compute analytic approximation. drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) + # Compute numerical result. + M, N = eq.M_grid, 100 + clebsch = FourierChebyshevBasis.nodes(M=eq.M_grid, N=N, rho=rho) + data_2 = eq.compute(names=required_names() + ["cvdrift", "gbdrift"], grid=grid_rtz) + normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 + cvdrift = data_2["cvdrift"] * normalization + gbdrift = data_2["gbdrift"] * normalization bounce_integrate, _ = bounce_integral( - data, - knots=zeta, - B_ref=B_ref, + grid_rtz, + data_2, + M, + N, + desc_from_clebsch=map_coordinates( + eq, + clebsch, + inbasis=("rho", "alpha", "zeta"), + period=(np.inf, 2 * np.pi, np.inf), + iota=np.broadcast_to(data["iota"], (M * N)), + ), + alpha_0=data["alpha"], + num_transit=5, + B_ref=data["B ref"], L_ref=data["a"], quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 check=True, + plot=True, ) def integrand_num(cvdrift, gbdrift, B, pitch): @@ -141,12 +208,8 @@ def integrand_den(B, pitch): f=[], pitch=pitch[:, np.newaxis], num_well=1, - weight=np.ones(zeta.size), ) - - drift_numerical_num = np.squeeze(drift_numerical_num) - drift_numerical_den = np.squeeze(drift_numerical_den) - drift_numerical = drift_numerical_num / drift_numerical_den + drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) msg = "There should be one bounce integral per pitch in this example." assert drift_numerical.size == drift_analytic.size, msg np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) @@ -154,4 +217,5 @@ def integrand_den(B, pitch): fig, ax = plt.subplots() ax.plot(1 / pitch, drift_analytic) ax.plot(1 / pitch, drift_numerical) + plt.show() return fig diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 1f47e74418..9cfd2239eb 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -14,7 +14,7 @@ from scipy.fft import idct as sidct from desc.backend import dct, idct, jnp, rfft -from desc.compute._interp_utils import ( +from desc.integrals._interp_utils import ( cheb_from_dct, cheb_pts, harmonic, @@ -26,8 +26,14 @@ polyder_vec, polyval_vec, ) -from desc.compute._quad_utils import bijection_to_disc -from desc.compute.bounce_integral import _filter_not_nan +from desc.integrals._quad_utils import bijection_to_disc + + +def filter_not_nan(a): + """Filter out nan from ``a`` while asserting nan is padded at right.""" + is_nan = jnp.isnan(a) + assert jnp.array_equal(is_nan, jnp.sort(is_nan, axis=-1)) + return a[~is_nan] @pytest.mark.unit @@ -64,7 +70,7 @@ def test_poly_root(): root = poly_root(c.T, sort=True, distinct=True) for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) - root_filter = _filter_not_nan(root[j], check=True) + root_filter = filter_not_nan(root[j]) assert root_filter.size == unique_roots.size, j np.testing.assert_allclose( actual=root_filter, @@ -72,7 +78,7 @@ def test_poly_root(): err_msg=str(j), ) c = np.array([0, 1, -1, -8, 12]) - root = _filter_not_nan(poly_root(c, sort=True, distinct=True), check=True) + root = filter_not_nan(poly_root(c, sort=True, distinct=True)) unique_root = np.unique(np.roots(c)) assert root.size == unique_root.size np.testing.assert_allclose(root, unique_root) From 8b64e0dfccf3fa1f79a2ab22d0db4539edfb19c7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 20 Aug 2024 21:13:04 -0400 Subject: [PATCH 207/241] Move integration algorithms to integrals subfolder --- desc/compute/__init__.py | 2 - desc/compute/_bootstrap.py | 2 +- desc/compute/_equil.py | 3 +- desc/compute/_field.py | 9 +- desc/compute/_geometry.py | 3 +- desc/compute/_metric.py | 3 +- desc/compute/_profiles.py | 3 +- desc/compute/_stability.py | 3 +- desc/compute/utils.py | 716 +------------------------ desc/{ => integrals}/singularities.py | 0 desc/integrals/surface_integral.py | 718 ++++++++++++++++++++++++++ desc/magnetic_fields/_core.py | 2 +- desc/objectives/_coils.py | 2 +- desc/objectives/_free_boundary.py | 6 +- desc/plotting.py | 3 +- desc/vmec.py | 2 +- tests/test_axis_limits.py | 3 +- tests/test_compute_utils.py | 606 +--------------------- tests/test_integrals.py | 526 +++++++++++++++++++ tests/test_plotting.py | 2 +- tests/test_singularities.py | 2 +- 21 files changed, 1274 insertions(+), 1342 deletions(-) rename desc/{ => integrals}/singularities.py (100%) create mode 100644 desc/integrals/surface_integral.py create mode 100644 tests/test_integrals.py diff --git a/desc/compute/__init__.py b/desc/compute/__init__.py index 87f03068bb..1b345a6492 100644 --- a/desc/compute/__init__.py +++ b/desc/compute/__init__.py @@ -26,8 +26,6 @@ # just need to import all the submodules here to register everything in the # data_index -from desc.utils import flatten_list - from . import ( _basis_vectors, _bootstrap, diff --git a/desc/compute/_bootstrap.py b/desc/compute/_bootstrap.py index 9bfd532775..2329682c06 100644 --- a/desc/compute/_bootstrap.py +++ b/desc/compute/_bootstrap.py @@ -13,8 +13,8 @@ from scipy.special import roots_legendre from ..backend import fori_loop, jnp +from ..integrals.surface_integral import surface_averages_map from .data_index import register_compute_fun -from .utils import surface_averages_map @register_compute_fun( diff --git a/desc/compute/_equil.py b/desc/compute/_equil.py index d1ac38f637..3ca0eebe20 100644 --- a/desc/compute/_equil.py +++ b/desc/compute/_equil.py @@ -13,8 +13,9 @@ from desc.backend import jnp +from ..integrals.surface_integral import surface_averages from .data_index import register_compute_fun -from .utils import cross, dot, safediv, safenorm, surface_averages +from .utils import cross, dot, safediv, safenorm @register_compute_fun( diff --git a/desc/compute/_field.py b/desc/compute/_field.py index e53f033464..2c39ea748d 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -13,17 +13,14 @@ from desc.backend import jnp -from .data_index import register_compute_fun -from .utils import ( - cross, - dot, - safediv, - safenorm, +from ..integrals.surface_integral import ( surface_averages, surface_integrals_map, surface_max, surface_min, ) +from .data_index import register_compute_fun +from .utils import cross, dot, safediv, safenorm @register_compute_fun( diff --git a/desc/compute/_geometry.py b/desc/compute/_geometry.py index 5e38a5c89b..139f91f537 100644 --- a/desc/compute/_geometry.py +++ b/desc/compute/_geometry.py @@ -11,8 +11,9 @@ from desc.backend import jnp +from ..integrals.surface_integral import line_integrals, surface_integrals from .data_index import register_compute_fun -from .utils import cross, dot, line_integrals, safenorm, surface_integrals +from .utils import cross, dot, safenorm @register_compute_fun( diff --git a/desc/compute/_metric.py b/desc/compute/_metric.py index 96228ffc06..ceb6703386 100644 --- a/desc/compute/_metric.py +++ b/desc/compute/_metric.py @@ -13,8 +13,9 @@ from desc.backend import jnp +from ..integrals.surface_integral import surface_averages from .data_index import register_compute_fun -from .utils import cross, dot, safediv, safenorm, surface_averages +from .utils import cross, dot, safediv, safenorm @register_compute_fun( diff --git a/desc/compute/_profiles.py b/desc/compute/_profiles.py index 1cfbc2f23f..56ebfc7220 100644 --- a/desc/compute/_profiles.py +++ b/desc/compute/_profiles.py @@ -13,8 +13,9 @@ from desc.backend import cond, jnp +from ..integrals.surface_integral import surface_averages, surface_integrals from .data_index import register_compute_fun -from .utils import cumtrapz, dot, safediv, surface_averages, surface_integrals +from .utils import cumtrapz, dot, safediv @register_compute_fun( diff --git a/desc/compute/_stability.py b/desc/compute/_stability.py index 57a7eef98d..3b820f83b0 100644 --- a/desc/compute/_stability.py +++ b/desc/compute/_stability.py @@ -13,8 +13,9 @@ from desc.backend import jnp +from ..integrals.surface_integral import surface_integrals_map from .data_index import register_compute_fun -from .utils import dot, surface_integrals_map +from .utils import dot @register_compute_fun( diff --git a/desc/compute/utils.py b/desc/compute/utils.py index f6c7b12e68..0c6e2f7de3 100644 --- a/desc/compute/utils.py +++ b/desc/compute/utils.py @@ -5,10 +5,10 @@ import numpy as np -from desc.backend import cond, execute_on_cpu, fori_loop, jnp, put -from desc.grid import ConcentricGrid, Grid, LinearGrid +from desc.backend import execute_on_cpu, jnp +from desc.grid import Grid -from ..utils import errorif, warnif +from ..utils import errorif from .data_index import allowed_kwargs, data_index # map from profile name to equilibrium parameter name @@ -869,713 +869,3 @@ def tupleset(t, i, value): ) return res - - -def _get_grid_surface(grid, surface_label): - """Return grid quantities associated with the given surface label. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - surface_label : str - The surface label of rho, poloidal, or zeta. - - Returns - ------- - unique_size : int - The number of the unique values of the surface_label. - inverse_idx : ndarray - Indexing array to go from unique values to full grid. - spacing : ndarray - The relevant columns of grid.spacing. - has_endpoint_dupe : bool - Whether this surface label's nodes have a duplicate at the endpoint - of a periodic domain. (e.g. a node at 0 and 2π). - has_idx : bool - Whether the grid knows the number of unique nodes and inverse idx. - - """ - assert surface_label in {"rho", "poloidal", "zeta"} - if surface_label == "rho": - spacing = grid.spacing[:, 1:] - has_endpoint_dupe = False - elif surface_label == "poloidal": - spacing = grid.spacing[:, [0, 2]] - has_endpoint_dupe = isinstance(grid, LinearGrid) and grid._poloidal_endpoint - else: - spacing = grid.spacing[:, :2] - has_endpoint_dupe = isinstance(grid, LinearGrid) and grid._toroidal_endpoint - has_idx = hasattr(grid, f"num_{surface_label}") and hasattr( - grid, f"_inverse_{surface_label}_idx" - ) - unique_size = getattr(grid, f"num_{surface_label}", -1) - inverse_idx = getattr(grid, f"_inverse_{surface_label}_idx", jnp.array([])) - return unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx - - -def line_integrals( - grid, - q=jnp.array([1.0]), - line_label="poloidal", - fix_surface=("rho", 1.0), - expand_out=True, - tol=1e-14, -): - """Compute line integrals over curves covering the given surface. - - As an example, by specifying the combination of ``line_label="poloidal"`` and - ``fix_surface=("rho", 1.0)``, the intention is to integrate along the - outermost perimeter of a particular zeta surface (toroidal cross-section), - for each zeta surface in the grid. - - Notes - ----- - It is assumed that the integration curve has length 1 when the line - label is rho and length 2π when the line label is theta or zeta. - You may want to multiply the input by the line length Jacobian. - - The grid must have nodes on the specified surface in ``fix_surface``. - - Correctness is not guaranteed on grids with duplicate nodes. - An attempt to print a warning is made if the given grid has duplicate - nodes and is one of the predefined grid types - (``Linear``, ``Concentric``, ``Quadrature``). - If the grid is custom, no attempt is made to warn. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - q : ndarray - Quantity to integrate. - The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to integrate, - over the domain parameterized by rho, poloidal, and zeta, - an n-dimensional function over the previously mentioned domain. - line_label : str - The coordinate curve to compute the integration over. - To clarify, a theta (poloidal) curve is the intersection of a - rho surface (flux surface) and zeta (toroidal) surface. - fix_surface : str, float - A tuple of the form: label, value. - ``fix_surface`` label should differ from ``line_label``. - By default, ``fix_surface`` is chosen to be the flux surface at rho=1. - expand_out : bool - Whether to expand the output array so that the output has the same - shape as the input. Defaults to true so that the output may be - broadcast in the same way as the input. Setting to false will save - memory. - tol : float - Tolerance for considering nodes the same. - Only relevant if the grid object doesn't already have this information. - - Returns - ------- - integrals : ndarray - Line integrals of the input over curves covering the given surface. - By default, the returned array has the same shape as the input. - - """ - line_label = grid.get_label(line_label) - fix_label = grid.get_label(fix_surface[0]) - errorif( - line_label == fix_label, - msg="There is no valid use for this combination of inputs.", - ) - errorif( - line_label != "poloidal" and isinstance(grid, ConcentricGrid), - msg="ConcentricGrid should only be used for poloidal line integrals.", - ) - warnif( - isinstance(grid, LinearGrid) and grid.endpoint, - msg="Correctness not guaranteed on grids with duplicate nodes.", - ) - # Generate a new quantity q_prime which is zero everywhere - # except on the fixed surface, on which q_prime takes the value of q. - # Then forward the computation to surface_integrals(). - # The differential element of the line integral, denoted dl, - # should correspond to the line label's spacing. - # The differential element of the surface integral is - # ds = dl * fix_surface_dl, so we scale q_prime by 1 / fix_surface_dl. - axis = {"rho": 0, "poloidal": 1, "zeta": 2} - column_id = axis[fix_label] - mask = grid.nodes[:, column_id] == fix_surface[1] - q_prime = (mask * jnp.atleast_1d(q).T / grid.spacing[:, column_id]).T - (surface_label,) = axis.keys() - {line_label, fix_label} - return surface_integrals(grid, q_prime, surface_label, expand_out, tol) - - -def surface_integrals( - grid, q=jnp.array([1.0]), surface_label="rho", expand_out=True, tol=1e-14 -): - """Compute a surface integral for each surface in the grid. - - Notes - ----- - It is assumed that the integration surface has area 4π² when the - surface label is rho and area 2π when the surface label is theta or - zeta. You may want to multiply the input by the surface area Jacobian. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - q : ndarray - Quantity to integrate. - The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to integrate, - over the domain parameterized by rho, poloidal, and zeta, - an n-dimensional function over the previously mentioned domain. - surface_label : str - The surface label of rho, poloidal, or zeta to compute the integration over. - expand_out : bool - Whether to expand the output array so that the output has the same - shape as the input. Defaults to true so that the output may be - broadcast in the same way as the input. Setting to false will save - memory. - tol : float - Tolerance for considering nodes the same. - Only relevant if the grid object doesn't already have this information. - - Returns - ------- - integrals : ndarray - Surface integral of the input over each surface in the grid. - By default, the returned array has the same shape as the input. - - """ - return surface_integrals_map(grid, surface_label, expand_out, tol)(q) - - -def surface_integrals_map(grid, surface_label="rho", expand_out=True, tol=1e-14): - """Returns a method to compute any surface integral for each surface in the grid. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - surface_label : str - The surface label of rho, poloidal, or zeta to compute the integration over. - expand_out : bool - Whether to expand the output array so that the output has the same - shape as the input. Defaults to true so that the output may be - broadcast in the same way as the input. Setting to false will save - memory. - tol : float - Tolerance for considering nodes the same. - Only relevant if the grid object doesn't already have this information. - - Returns - ------- - function : callable - Method to compute any surface integral of the input ``q`` over each - surface in the grid with code: ``function(q)``. - - """ - surface_label = grid.get_label(surface_label) - warnif( - surface_label == "poloidal" and isinstance(grid, ConcentricGrid), - msg="Integrals over constant poloidal surfaces" - " are poorly defined for ConcentricGrid.", - ) - unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx = _get_grid_surface( - grid, surface_label - ) - spacing = jnp.prod(spacing, axis=1) - - # Todo: Define mask as a sparse matrix once sparse matrices are no longer - # experimental in jax. - if has_idx: - # The ith row of masks is True only at the indices which correspond to the - # ith surface. The integral over the ith surface is the dot product of the - # ith row vector and the integrand defined over all the surfaces. - mask = inverse_idx == jnp.arange(unique_size)[:, jnp.newaxis] - # Imagine a torus cross-section at zeta=π. - # A grid with a duplicate zeta=π node has 2 of those cross-sections. - # In grid.py, we multiply by 1/n the areas of surfaces with - # duplicity n. This prevents the area of that surface from being - # double-counted, as surfaces with the same node value are combined - # into 1 integral, which sums their areas. Thus, if the zeta=π - # cross-section has duplicity 2, we ensure that the area on the zeta=π - # surface will have the correct total area of π+π = 2π. - # An edge case exists if the duplicate surface has nodes with - # different values for the surface label, which only occurs when - # has_endpoint_dupe is true. If ``has_endpoint_dupe`` is true, this grid - # has a duplicate surface at surface_label=0 and - # surface_label=max surface value. Although the modulo of these values - # are equal, their numeric values are not, so the integration - # would treat them as different surfaces. We solve this issue by - # combining the indices corresponding to the integrands of the duplicated - # surface, so that the duplicate surface is treated as one, like in the - # previous paragraph. - mask = cond( - has_endpoint_dupe, - lambda _: put(mask, jnp.array([0, -1]), mask[0] | mask[-1]), - lambda _: mask, - operand=None, - ) - else: - # If we don't have the idx attributes, we are forced to expand out. - errorif( - not has_idx and not expand_out, - msg=f"Grid lacks attributes 'num_{surface_label}' and " - f"'inverse_{surface_label}_idx', so this method " - f"can't satisfy the request expand_out={expand_out}.", - ) - # don't try to expand if already expanded - expand_out = expand_out and has_idx - axis = {"rho": 0, "poloidal": 1, "zeta": 2}[surface_label] - # Converting nodes from numpy.ndarray to jaxlib.xla_extension.ArrayImpl - # reduces memory usage by > 400% for the forward computation and Jacobian. - nodes = jnp.asarray(grid.nodes[:, axis]) - # This branch will execute for custom grids, which don't have a use - # case for having duplicate nodes, so we don't bother to modulo nodes - # by 2pi or 2pi/NFP. - mask = jnp.abs(nodes - nodes[:, jnp.newaxis]) <= tol - # The above implementation was benchmarked to be more efficient than - # alternatives with explicit loops in GitHub pull request #934. - - def integrate(q=jnp.array([1.0])): - """Compute a surface integral for each surface in the grid. - - Notes - ----- - It is assumed that the integration surface has area 4π² when the - surface label is rho and area 2π when the surface label is theta or - zeta. You may want to multiply the input by the surface area Jacobian. - - Parameters - ---------- - q : ndarray - Quantity to integrate. - The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to integrate, - over the domain parameterized by rho, poloidal, and zeta, - an n-dimensional function over the previously mentioned domain. - - Returns - ------- - integrals : ndarray - Surface integral of the input over each surface in the grid. - - """ - integrands = (spacing * jnp.nan_to_num(q).T).T - integrals = jnp.tensordot(mask, integrands, axes=1) - return grid.expand(integrals, surface_label) if expand_out else integrals - - return integrate - - -def surface_averages( - grid, - q, - sqrt_g=jnp.array([1.0]), - surface_label="rho", - denominator=None, - expand_out=True, - tol=1e-14, -): - """Compute a surface average for each surface in the grid. - - Notes - ----- - Implements the flux-surface average formula given by equation 4.9.11 in - W.D. D'haeseleer et al. (1991) doi:10.1007/978-3-642-75595-8. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - q : ndarray - Quantity to average. - The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to average, - over the domain parameterized by rho, poloidal, and zeta, - an n-dimensional function over the previously mentioned domain. - sqrt_g : ndarray - Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. - surface_label : str - The surface label of rho, poloidal, or zeta to compute the average over. - denominator : ndarray - By default, the denominator is computed as the surface integral of - ``sqrt_g``. This parameter can optionally be supplied to avoid - redundant computations or to use a different denominator to compute - the average. This array should broadcast with arrays of size - ``grid.num_nodes`` (``grid.num_surface_label``) if ``expand_out`` - is true (false). - expand_out : bool - Whether to expand the output array so that the output has the same - shape as the input. Defaults to true so that the output may be - broadcast in the same way as the input. Setting to false will save - memory. - tol : float - Tolerance for considering nodes the same. - Only relevant if the grid object doesn't already have this information. - - Returns - ------- - averages : ndarray - Surface average of the input over each surface in the grid. - By default, the returned array has the same shape as the input. - - """ - return surface_averages_map(grid, surface_label, expand_out, tol)( - q, sqrt_g, denominator - ) - - -def surface_averages_map(grid, surface_label="rho", expand_out=True, tol=1e-14): - """Returns a method to compute any surface average for each surface in the grid. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - surface_label : str - The surface label of rho, poloidal, or zeta to compute the average over. - expand_out : bool - Whether to expand the output array so that the output has the same - shape as the input. Defaults to true so that the output may be - broadcast in the same way as the input. Setting to false will save - memory. - tol : float - Tolerance for considering nodes the same. - Only relevant if the grid object doesn't already have this information. - - Returns - ------- - function : callable - Method to compute any surface average of the input ``q`` and optionally - the volume Jacobian ``sqrt_g`` over each surface in the grid with code: - ``function(q, sqrt_g)``. - - """ - surface_label = grid.get_label(surface_label) - has_idx = hasattr(grid, f"num_{surface_label}") and hasattr( - grid, f"_inverse_{surface_label}_idx" - ) - # If we don't have the idx attributes, we are forced to expand out. - errorif( - not has_idx and not expand_out, - msg=f"Grid lacks attributes 'num_{surface_label}' and " - f"'inverse_{surface_label}_idx', so this method " - f"can't satisfy the request expand_out={expand_out}.", - ) - integrate = surface_integrals_map( - grid, surface_label, expand_out=not has_idx, tol=tol - ) - # don't try to expand if already expanded - expand_out = expand_out and has_idx - - def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): - """Compute a surface average for each surface in the grid. - - Notes - ----- - Implements the flux-surface average formula given by equation 4.9.11 in - W.D. D'haeseleer et al. (1991) doi:10.1007/978-3-642-75595-8. - - Parameters - ---------- - q : ndarray - Quantity to average. - The first dimension of the array should have size ``grid.num_nodes``. - When ``q`` is n-dimensional, the intention is to average, - over the domain parameterized by rho, poloidal, and zeta, - an n-dimensional function over the previously mentioned domain. - sqrt_g : ndarray - Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. - denominator : ndarray - By default, the denominator is computed as the surface integral of - ``sqrt_g``. This parameter can optionally be supplied to avoid - redundant computations or to use a different denominator to compute - the average. This array should broadcast with arrays of size - ``grid.num_nodes`` (``grid.num_surface_label``) if ``expand_out`` - is true (false). - - Returns - ------- - averages : ndarray - Surface average of the input over each surface in the grid. - - """ - q, sqrt_g = jnp.atleast_1d(q, sqrt_g) - numerator = integrate((sqrt_g * q.T).T) - # memory optimization to call expand() at most once - if denominator is None: - # skip integration if constant - denominator = ( - (4 * jnp.pi**2 if surface_label == "rho" else 2 * jnp.pi) * sqrt_g - if sqrt_g.size == 1 - else integrate(sqrt_g) - ) - averages = (numerator.T / denominator).T - if expand_out: - averages = grid.expand(averages, surface_label) - else: - if expand_out: - # implies denominator given with size grid.num_nodes - numerator = grid.expand(numerator, surface_label) - averages = (numerator.T / denominator).T - return averages - - return _surface_averages - - -def surface_integrals_transform(grid, surface_label="rho"): - """Returns a method to compute any integral transform over each surface in grid. - - The returned method takes an array input ``q`` and returns an array output. - - Given a set of kernel functions in ``q``, each parameterized by at most - five variables, the returned method computes an integral transform, - reducing ``q`` to a set of functions of at most three variables. - - Define the domain D = u₁ × u₂ × u₃ and the codomain C = u₄ × u₅ × u₆. - For every surface of constant u₁ in the domain, the returned method - evaluates the transform Tᵤ₁ : u₂ × u₃ × C → C, where Tᵤ₁ projects - away the parameters u₂ and u₃ via an integration of the given kernel - function Kᵤ₁ over the corresponding surface of constant u₁. - - Notes - ----- - It is assumed that the integration surface has area 4π² when the - surface label is rho and area 2π when the surface label is theta or - zeta. You may want to multiply the input ``q`` by the surface area - Jacobian. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - surface_label : str - The surface label of rho, poloidal, or zeta to compute the integration over. - These correspond to the domain parameters discussed in this method's - description. In particular, ``surface_label`` names u₁. - - Returns - ------- - function : callable - Method to compute any surface integral transform of the input ``q`` over - each surface in the grid with code: ``function(q)``. - - The first dimension of ``q`` should always discretize some function, g, - over the domain, and therefore, have size ``grid.num_nodes``. - The second dimension may discretize some function, f, over the - codomain, and therefore, have size that matches the desired number of - points at which the output is evaluated. - - This method can also be used to compute the output one point at a time, - in which case ``q`` can have shape (``grid.num_nodes``, ). - - Input - ----- - If ``q`` has one dimension, then it should have shape - (``grid.num_nodes``, ). - If ``q`` has multiple dimensions, then it should have shape - (``grid.num_nodes``, *f.shape). - - Output - ------ - Each element along the first dimension of the returned array, stores - Tᵤ₁ for a particular surface of constant u₁ in the given grid. - The order is sorted in increasing order of the values which specify u₁. - - If ``q`` has one dimension, the returned array has shape - (grid.num_surface_label, ). - If ``q`` has multiple dimensions, the returned array has shape - (grid.num_surface_label, *f.shape). - - """ - # Expansion should not occur here. The typical use case of this method is to - # transform into the computational domain, so the second dimension that - # discretizes f over the codomain will typically have size grid.num_nodes - # to broadcast with quantities in data_index. - surface_label = grid.get_label(surface_label) - has_idx = hasattr(grid, f"num_{surface_label}") and hasattr( - grid, f"_inverse_{surface_label}_idx" - ) - errorif( - not has_idx, - msg=f"Grid lacks attributes 'num_{surface_label}' and " - f"'inverse_{surface_label}_idx', which are required for this function.", - ) - return surface_integrals_map(grid, surface_label, expand_out=False) - - -def surface_variance( - grid, - q, - weights=jnp.array([1.0]), - bias=False, - surface_label="rho", - expand_out=True, - tol=1e-14, -): - """Compute the weighted sample variance of ``q`` on each surface of the grid. - - Computes nₑ / (nₑ − b) * (∑ᵢ₌₁ⁿ (qᵢ − q̅)² wᵢ) / (∑ᵢ₌₁ⁿ wᵢ). - wᵢ is the weight assigned to qᵢ given by the product of ``weights[i]`` and - the differential surface area element (not already weighted by the area - Jacobian) at the node where qᵢ is evaluated, - q̅ is the weighted mean of q, - b is 0 if the biased sample variance is to be returned and 1 otherwise, - n is the number of samples on a surface, and - nₑ ≝ (∑ᵢ₌₁ⁿ wᵢ)² / ∑ᵢ₌₁ⁿ wᵢ² is the effective number of samples. - - As the weights wᵢ approach each other, nₑ approaches n, and the output - converges to ∑ᵢ₌₁ⁿ (qᵢ − q̅)² / (n − b). - - Notes - ----- - There are three different methods to unbias the variance of a weighted - sample so that the computed variance better estimates the true variance. - Whether the method is correct for a particular use case depends on what - the weights assigned to each sample represent. - - This function implements the first case, where the weights are not random - and are intended to assign more weight to some samples for reasons - unrelated to differences in uncertainty between samples. See - https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights. - - The second case is when the weights are intended to assign more weight - to samples with less uncertainty. See - https://en.wikipedia.org/wiki/Inverse-variance_weighting. - The unbiased sample variance for this case is obtained by replacing the - effective number of samples in the formula this function implements, - nₑ, with the actual number of samples n. - - The third case is when the weights denote the integer frequency of each - sample. See - https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Frequency_weights. - This is indeed a distinct case from the above two because here the - weights encode additional information about the distribution. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - q : ndarray - Quantity to compute the sample variance. - weights : ndarray - Weight assigned to each sample of ``q``. - A good candidate for this parameter is the surface area Jacobian. - bias : bool - If this condition is true, then the biased estimator of the sample - variance is returned. This is desirable if you are only concerned with - computing the variance of the given set of numbers and not the - distribution the numbers are (potentially) sampled from. - surface_label : str - The surface label of rho, poloidal, or zeta to compute the variance over. - expand_out : bool - Whether to expand the output array so that the output has the same - shape as the input. Defaults to true so that the output may be - broadcast in the same way as the input. Setting to false will save - memory. - tol : float - Tolerance for considering nodes the same. - Only relevant if the grid object doesn't already have this information. - - Returns - ------- - variance : ndarray - Variance of the given weighted sample over each surface in the grid. - By default, the returned array has the same shape as the input. - - """ - surface_label = grid.get_label(surface_label) - _, _, spacing, _, has_idx = _get_grid_surface(grid, surface_label) - # If we don't have the idx attributes, we are forced to expand out. - errorif( - not has_idx and not expand_out, - msg=f"Grid lacks attributes 'num_{surface_label}' and " - f"'inverse_{surface_label}_idx', so this method " - f"can't satisfy the request expand_out={expand_out}.", - ) - integrate = surface_integrals_map( - grid, surface_label, expand_out=not has_idx, tol=tol - ) - - v1 = integrate(weights) - v2 = integrate(weights**2 * jnp.prod(spacing, axis=-1)) - # effective number of samples per surface - n_e = v1**2 / v2 - # analogous to Bessel's bias correction - correction = n_e / (n_e - (not bias)) - - q = jnp.atleast_1d(q) - # compute variance in two passes to avoid catastrophic round off error - mean = (integrate((weights * q.T).T).T / v1).T - if has_idx: # guard so that we don't try to expand when already expanded - mean = grid.expand(mean, surface_label) - variance = (correction * integrate((weights * ((q - mean) ** 2).T).T).T / v1).T - if expand_out and has_idx: - return grid.expand(variance, surface_label) - else: - return variance - - -def surface_max(grid, x, surface_label="rho"): - """Get the max of x for each surface in the grid. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - x : ndarray - Quantity to find max. - The array should have size grid.num_nodes. - surface_label : str - The surface label of rho, poloidal, or zeta to compute max over. - - Returns - ------- - maxs : ndarray - Maximum of x over each surface in grid. - The returned array has the same shape as the input. - - """ - return -surface_min(grid, -x, surface_label) - - -def surface_min(grid, x, surface_label="rho"): - """Get the min of x for each surface in the grid. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - x : ndarray - Quantity to find min. - The array should have size grid.num_nodes. - surface_label : str - The surface label of rho, poloidal, or zeta to compute min over. - - Returns - ------- - mins : ndarray - Minimum of x over each surface in grid. - The returned array has the same shape as the input. - - """ - surface_label = grid.get_label(surface_label) - unique_size, inverse_idx, _, _, has_idx = _get_grid_surface(grid, surface_label) - errorif( - not has_idx, - NotImplementedError, - msg=f"Grid lacks attributes 'num_{surface_label}' and " - f"'inverse_{surface_label}_idx', which are required for this function.", - ) - inverse_idx = jnp.asarray(inverse_idx) - x = jnp.asarray(x) - mins = jnp.full(unique_size, jnp.inf) - - def body(i, mins): - mins = put(mins, inverse_idx[i], jnp.minimum(x[i], mins[inverse_idx[i]])) - return mins - - mins = fori_loop(0, inverse_idx.size, body, mins) - # The above implementation was benchmarked to be more efficient than - # alternatives without explicit loops in GitHub pull request #501. - return grid.expand(mins, surface_label) diff --git a/desc/singularities.py b/desc/integrals/singularities.py similarity index 100% rename from desc/singularities.py rename to desc/integrals/singularities.py diff --git a/desc/integrals/surface_integral.py b/desc/integrals/surface_integral.py new file mode 100644 index 0000000000..ae9f62be46 --- /dev/null +++ b/desc/integrals/surface_integral.py @@ -0,0 +1,718 @@ +"""Surface integrals of non-singular functions.""" + +from desc.backend import cond, fori_loop, jnp, put +from desc.grid import ConcentricGrid, LinearGrid +from desc.utils import errorif, warnif + +# TODO: Make these objects that override callable method instead of returning callables. +# Would make simpler to default to more efficient methods on tensor product grids. + + +def _get_grid_surface(grid, surface_label): + """Return grid quantities associated with the given surface label. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + surface_label : str + The surface label of rho, poloidal, or zeta. + + Returns + ------- + unique_size : int + The number of the unique values of the surface_label. + inverse_idx : ndarray + Indexing array to go from unique values to full grid. + spacing : ndarray + The relevant columns of grid.spacing. + has_endpoint_dupe : bool + Whether this surface label's nodes have a duplicate at the endpoint + of a periodic domain. (e.g. a node at 0 and 2π). + has_idx : bool + Whether the grid knows the number of unique nodes and inverse idx. + + """ + assert surface_label in {"rho", "poloidal", "zeta"} + if surface_label == "rho": + spacing = grid.spacing[:, 1:] + has_endpoint_dupe = False + elif surface_label == "poloidal": + spacing = grid.spacing[:, [0, 2]] + has_endpoint_dupe = isinstance(grid, LinearGrid) and grid._poloidal_endpoint + else: + spacing = grid.spacing[:, :2] + has_endpoint_dupe = isinstance(grid, LinearGrid) and grid._toroidal_endpoint + has_idx = hasattr(grid, f"num_{surface_label}") and hasattr( + grid, f"_inverse_{surface_label}_idx" + ) + unique_size = getattr(grid, f"num_{surface_label}", -1) + inverse_idx = getattr(grid, f"_inverse_{surface_label}_idx", jnp.array([])) + return unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx + + +def line_integrals( + grid, + q=jnp.array([1.0]), + line_label="poloidal", + fix_surface=("rho", 1.0), + expand_out=True, + tol=1e-14, +): + """Compute line integrals over curves covering the given surface. + + As an example, by specifying the combination of ``line_label="poloidal"`` and + ``fix_surface=("rho", 1.0)``, the intention is to integrate along the + outermost perimeter of a particular zeta surface (toroidal cross-section), + for each zeta surface in the grid. + + Notes + ----- + It is assumed that the integration curve has length 1 when the line + label is rho and length 2π when the line label is theta or zeta. + You may want to multiply the input by the line length Jacobian. + + The grid must have nodes on the specified surface in ``fix_surface``. + + Correctness is not guaranteed on grids with duplicate nodes. + An attempt to print a warning is made if the given grid has duplicate + nodes and is one of the predefined grid types + (``Linear``, ``Concentric``, ``Quadrature``). + If the grid is custom, no attempt is made to warn. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + q : ndarray + Quantity to integrate. + The first dimension of the array should have size ``grid.num_nodes``. + When ``q`` is n-dimensional, the intention is to integrate, + over the domain parameterized by rho, poloidal, and zeta, + an n-dimensional function over the previously mentioned domain. + line_label : str + The coordinate curve to compute the integration over. + To clarify, a theta (poloidal) curve is the intersection of a + rho surface (flux surface) and zeta (toroidal) surface. + fix_surface : str, float + A tuple of the form: label, value. + ``fix_surface`` label should differ from ``line_label``. + By default, ``fix_surface`` is chosen to be the flux surface at rho=1. + expand_out : bool + Whether to expand the output array so that the output has the same + shape as the input. Defaults to true so that the output may be + broadcast in the same way as the input. Setting to false will save + memory. + tol : float + Tolerance for considering nodes the same. + Only relevant if the grid object doesn't already have this information. + + Returns + ------- + integrals : ndarray + Line integrals of the input over curves covering the given surface. + By default, the returned array has the same shape as the input. + + """ + line_label = grid.get_label(line_label) + fix_label = grid.get_label(fix_surface[0]) + errorif( + line_label == fix_label, + msg="There is no valid use for this combination of inputs.", + ) + errorif( + line_label != "poloidal" and isinstance(grid, ConcentricGrid), + msg="ConcentricGrid should only be used for poloidal line integrals.", + ) + warnif( + isinstance(grid, LinearGrid) and grid.endpoint, + msg="Correctness not guaranteed on grids with duplicate nodes.", + ) + # Generate a new quantity q_prime which is zero everywhere + # except on the fixed surface, on which q_prime takes the value of q. + # Then forward the computation to surface_integrals(). + # The differential element of the line integral, denoted dl, + # should correspond to the line label's spacing. + # The differential element of the surface integral is + # ds = dl * fix_surface_dl, so we scale q_prime by 1 / fix_surface_dl. + axis = {"rho": 0, "poloidal": 1, "zeta": 2} + column_id = axis[fix_label] + mask = grid.nodes[:, column_id] == fix_surface[1] + q_prime = (mask * jnp.atleast_1d(q).T / grid.spacing[:, column_id]).T + (surface_label,) = axis.keys() - {line_label, fix_label} + return surface_integrals(grid, q_prime, surface_label, expand_out, tol) + + +def surface_integrals( + grid, q=jnp.array([1.0]), surface_label="rho", expand_out=True, tol=1e-14 +): + """Compute a surface integral for each surface in the grid. + + Notes + ----- + It is assumed that the integration surface has area 4π² when the + surface label is rho and area 2π when the surface label is theta or + zeta. You may want to multiply the input by the surface area Jacobian. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + q : ndarray + Quantity to integrate. + The first dimension of the array should have size ``grid.num_nodes``. + When ``q`` is n-dimensional, the intention is to integrate, + over the domain parameterized by rho, poloidal, and zeta, + an n-dimensional function over the previously mentioned domain. + surface_label : str + The surface label of rho, poloidal, or zeta to compute the integration over. + expand_out : bool + Whether to expand the output array so that the output has the same + shape as the input. Defaults to true so that the output may be + broadcast in the same way as the input. Setting to false will save + memory. + tol : float + Tolerance for considering nodes the same. + Only relevant if the grid object doesn't already have this information. + + Returns + ------- + integrals : ndarray + Surface integral of the input over each surface in the grid. + By default, the returned array has the same shape as the input. + + """ + return surface_integrals_map(grid, surface_label, expand_out, tol)(q) + + +def surface_integrals_map(grid, surface_label="rho", expand_out=True, tol=1e-14): + """Returns a method to compute any surface integral for each surface in the grid. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + surface_label : str + The surface label of rho, poloidal, or zeta to compute the integration over. + expand_out : bool + Whether to expand the output array so that the output has the same + shape as the input. Defaults to true so that the output may be + broadcast in the same way as the input. Setting to false will save + memory. + tol : float + Tolerance for considering nodes the same. + Only relevant if the grid object doesn't already have this information. + + Returns + ------- + function : callable + Method to compute any surface integral of the input ``q`` over each + surface in the grid with code: ``function(q)``. + + """ + surface_label = grid.get_label(surface_label) + warnif( + surface_label == "poloidal" and isinstance(grid, ConcentricGrid), + msg="Integrals over constant poloidal surfaces" + " are poorly defined for ConcentricGrid.", + ) + unique_size, inverse_idx, spacing, has_endpoint_dupe, has_idx = _get_grid_surface( + grid, surface_label + ) + spacing = jnp.prod(spacing, axis=1) + + # Todo: Define mask as a sparse matrix once sparse matrices are no longer + # experimental in jax. + if has_idx: + # The ith row of masks is True only at the indices which correspond to the + # ith surface. The integral over the ith surface is the dot product of the + # ith row vector and the integrand defined over all the surfaces. + mask = inverse_idx == jnp.arange(unique_size)[:, jnp.newaxis] + # Imagine a torus cross-section at zeta=π. + # A grid with a duplicate zeta=π node has 2 of those cross-sections. + # In grid.py, we multiply by 1/n the areas of surfaces with + # duplicity n. This prevents the area of that surface from being + # double-counted, as surfaces with the same node value are combined + # into 1 integral, which sums their areas. Thus, if the zeta=π + # cross-section has duplicity 2, we ensure that the area on the zeta=π + # surface will have the correct total area of π+π = 2π. + # An edge case exists if the duplicate surface has nodes with + # different values for the surface label, which only occurs when + # has_endpoint_dupe is true. If ``has_endpoint_dupe`` is true, this grid + # has a duplicate surface at surface_label=0 and + # surface_label=max surface value. Although the modulo of these values + # are equal, their numeric values are not, so the integration + # would treat them as different surfaces. We solve this issue by + # combining the indices corresponding to the integrands of the duplicated + # surface, so that the duplicate surface is treated as one, like in the + # previous paragraph. + mask = cond( + has_endpoint_dupe, + lambda _: put(mask, jnp.array([0, -1]), mask[0] | mask[-1]), + lambda _: mask, + operand=None, + ) + else: + # If we don't have the idx attributes, we are forced to expand out. + errorif( + not has_idx and not expand_out, + msg=f"Grid lacks attributes 'num_{surface_label}' and " + f"'inverse_{surface_label}_idx', so this method " + f"can't satisfy the request expand_out={expand_out}.", + ) + # don't try to expand if already expanded + expand_out = expand_out and has_idx + axis = {"rho": 0, "poloidal": 1, "zeta": 2}[surface_label] + # Converting nodes from numpy.ndarray to jaxlib.xla_extension.ArrayImpl + # reduces memory usage by > 400% for the forward computation and Jacobian. + nodes = jnp.asarray(grid.nodes[:, axis]) + # This branch will execute for custom grids, which don't have a use + # case for having duplicate nodes, so we don't bother to modulo nodes + # by 2pi or 2pi/NFP. + mask = jnp.abs(nodes - nodes[:, jnp.newaxis]) <= tol + # The above implementation was benchmarked to be more efficient than + # alternatives with explicit loops in GitHub pull request #934. + + def integrate(q=jnp.array([1.0])): + """Compute a surface integral for each surface in the grid. + + Notes + ----- + It is assumed that the integration surface has area 4π² when the + surface label is rho and area 2π when the surface label is theta or + zeta. You may want to multiply the input by the surface area Jacobian. + + Parameters + ---------- + q : ndarray + Quantity to integrate. + The first dimension of the array should have size ``grid.num_nodes``. + When ``q`` is n-dimensional, the intention is to integrate, + over the domain parameterized by rho, poloidal, and zeta, + an n-dimensional function over the previously mentioned domain. + + Returns + ------- + integrals : ndarray + Surface integral of the input over each surface in the grid. + + """ + integrands = (spacing * jnp.nan_to_num(q).T).T + integrals = jnp.tensordot(mask, integrands, axes=1) + return grid.expand(integrals, surface_label) if expand_out else integrals + + return integrate + + +def surface_averages( + grid, + q, + sqrt_g=jnp.array([1.0]), + surface_label="rho", + denominator=None, + expand_out=True, + tol=1e-14, +): + """Compute a surface average for each surface in the grid. + + Notes + ----- + Implements the flux-surface average formula given by equation 4.9.11 in + W.D. D'haeseleer et al. (1991) doi:10.1007/978-3-642-75595-8. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + q : ndarray + Quantity to average. + The first dimension of the array should have size ``grid.num_nodes``. + When ``q`` is n-dimensional, the intention is to average, + over the domain parameterized by rho, poloidal, and zeta, + an n-dimensional function over the previously mentioned domain. + sqrt_g : ndarray + Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. + surface_label : str + The surface label of rho, poloidal, or zeta to compute the average over. + denominator : ndarray + By default, the denominator is computed as the surface integral of + ``sqrt_g``. This parameter can optionally be supplied to avoid + redundant computations or to use a different denominator to compute + the average. This array should broadcast with arrays of size + ``grid.num_nodes`` (``grid.num_surface_label``) if ``expand_out`` + is true (false). + expand_out : bool + Whether to expand the output array so that the output has the same + shape as the input. Defaults to true so that the output may be + broadcast in the same way as the input. Setting to false will save + memory. + tol : float + Tolerance for considering nodes the same. + Only relevant if the grid object doesn't already have this information. + + Returns + ------- + averages : ndarray + Surface average of the input over each surface in the grid. + By default, the returned array has the same shape as the input. + + """ + return surface_averages_map(grid, surface_label, expand_out, tol)( + q, sqrt_g, denominator + ) + + +def surface_averages_map(grid, surface_label="rho", expand_out=True, tol=1e-14): + """Returns a method to compute any surface average for each surface in the grid. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + surface_label : str + The surface label of rho, poloidal, or zeta to compute the average over. + expand_out : bool + Whether to expand the output array so that the output has the same + shape as the input. Defaults to true so that the output may be + broadcast in the same way as the input. Setting to false will save + memory. + tol : float + Tolerance for considering nodes the same. + Only relevant if the grid object doesn't already have this information. + + Returns + ------- + function : callable + Method to compute any surface average of the input ``q`` and optionally + the volume Jacobian ``sqrt_g`` over each surface in the grid with code: + ``function(q, sqrt_g)``. + + """ + surface_label = grid.get_label(surface_label) + has_idx = hasattr(grid, f"num_{surface_label}") and hasattr( + grid, f"_inverse_{surface_label}_idx" + ) + # If we don't have the idx attributes, we are forced to expand out. + errorif( + not has_idx and not expand_out, + msg=f"Grid lacks attributes 'num_{surface_label}' and " + f"'inverse_{surface_label}_idx', so this method " + f"can't satisfy the request expand_out={expand_out}.", + ) + integrate = surface_integrals_map( + grid, surface_label, expand_out=not has_idx, tol=tol + ) + # don't try to expand if already expanded + expand_out = expand_out and has_idx + + def _surface_averages(q, sqrt_g=jnp.array([1.0]), denominator=None): + """Compute a surface average for each surface in the grid. + + Notes + ----- + Implements the flux-surface average formula given by equation 4.9.11 in + W.D. D'haeseleer et al. (1991) doi:10.1007/978-3-642-75595-8. + + Parameters + ---------- + q : ndarray + Quantity to average. + The first dimension of the array should have size ``grid.num_nodes``. + When ``q`` is n-dimensional, the intention is to average, + over the domain parameterized by rho, poloidal, and zeta, + an n-dimensional function over the previously mentioned domain. + sqrt_g : ndarray + Coordinate system Jacobian determinant; see ``data_index["sqrt(g)"]``. + denominator : ndarray + By default, the denominator is computed as the surface integral of + ``sqrt_g``. This parameter can optionally be supplied to avoid + redundant computations or to use a different denominator to compute + the average. This array should broadcast with arrays of size + ``grid.num_nodes`` (``grid.num_surface_label``) if ``expand_out`` + is true (false). + + Returns + ------- + averages : ndarray + Surface average of the input over each surface in the grid. + + """ + q, sqrt_g = jnp.atleast_1d(q, sqrt_g) + numerator = integrate((sqrt_g * q.T).T) + # memory optimization to call expand() at most once + if denominator is None: + # skip integration if constant + denominator = ( + (4 * jnp.pi**2 if surface_label == "rho" else 2 * jnp.pi) * sqrt_g + if sqrt_g.size == 1 + else integrate(sqrt_g) + ) + averages = (numerator.T / denominator).T + if expand_out: + averages = grid.expand(averages, surface_label) + else: + if expand_out: + # implies denominator given with size grid.num_nodes + numerator = grid.expand(numerator, surface_label) + averages = (numerator.T / denominator).T + return averages + + return _surface_averages + + +def surface_integrals_transform(grid, surface_label="rho"): + """Returns a method to compute any integral transform over each surface in grid. + + The returned method takes an array input ``q`` and returns an array output. + + Given a set of kernel functions in ``q``, each parameterized by at most + five variables, the returned method computes an integral transform, + reducing ``q`` to a set of functions of at most three variables. + + Define the domain D = u₁ × u₂ × u₃ and the codomain C = u₄ × u₅ × u₆. + For every surface of constant u₁ in the domain, the returned method + evaluates the transform Tᵤ₁ : u₂ × u₃ × C → C, where Tᵤ₁ projects + away the parameters u₂ and u₃ via an integration of the given kernel + function Kᵤ₁ over the corresponding surface of constant u₁. + + Notes + ----- + It is assumed that the integration surface has area 4π² when the + surface label is rho and area 2π when the surface label is theta or + zeta. You may want to multiply the input ``q`` by the surface area + Jacobian. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + surface_label : str + The surface label of rho, poloidal, or zeta to compute the integration over. + These correspond to the domain parameters discussed in this method's + description. In particular, ``surface_label`` names u₁. + + Returns + ------- + function : callable + Method to compute any surface integral transform of the input ``q`` over + each surface in the grid with code: ``function(q)``. + + The first dimension of ``q`` should always discretize some function, g, + over the domain, and therefore, have size ``grid.num_nodes``. + The second dimension may discretize some function, f, over the + codomain, and therefore, have size that matches the desired number of + points at which the output is evaluated. + + This method can also be used to compute the output one point at a time, + in which case ``q`` can have shape (``grid.num_nodes``, ). + + Input + ----- + If ``q`` has one dimension, then it should have shape + (``grid.num_nodes``, ). + If ``q`` has multiple dimensions, then it should have shape + (``grid.num_nodes``, *f.shape). + + Output + ------ + Each element along the first dimension of the returned array, stores + Tᵤ₁ for a particular surface of constant u₁ in the given grid. + The order is sorted in increasing order of the values which specify u₁. + + If ``q`` has one dimension, the returned array has shape + (grid.num_surface_label, ). + If ``q`` has multiple dimensions, the returned array has shape + (grid.num_surface_label, *f.shape). + + """ + # Expansion should not occur here. The typical use case of this method is to + # transform into the computational domain, so the second dimension that + # discretizes f over the codomain will typically have size grid.num_nodes + # to broadcast with quantities in data_index. + surface_label = grid.get_label(surface_label) + has_idx = hasattr(grid, f"num_{surface_label}") and hasattr( + grid, f"_inverse_{surface_label}_idx" + ) + errorif( + not has_idx, + msg=f"Grid lacks attributes 'num_{surface_label}' and " + f"'inverse_{surface_label}_idx', which are required for this function.", + ) + return surface_integrals_map(grid, surface_label, expand_out=False) + + +def surface_variance( + grid, + q, + weights=jnp.array([1.0]), + bias=False, + surface_label="rho", + expand_out=True, + tol=1e-14, +): + """Compute the weighted sample variance of ``q`` on each surface of the grid. + + Computes nₑ / (nₑ − b) * (∑ᵢ₌₁ⁿ (qᵢ − q̅)² wᵢ) / (∑ᵢ₌₁ⁿ wᵢ). + wᵢ is the weight assigned to qᵢ given by the product of ``weights[i]`` and + the differential surface area element (not already weighted by the area + Jacobian) at the node where qᵢ is evaluated, + q̅ is the weighted mean of q, + b is 0 if the biased sample variance is to be returned and 1 otherwise, + n is the number of samples on a surface, and + nₑ ≝ (∑ᵢ₌₁ⁿ wᵢ)² / ∑ᵢ₌₁ⁿ wᵢ² is the effective number of samples. + + As the weights wᵢ approach each other, nₑ approaches n, and the output + converges to ∑ᵢ₌₁ⁿ (qᵢ − q̅)² / (n − b). + + Notes + ----- + There are three different methods to unbias the variance of a weighted + sample so that the computed variance better estimates the true variance. + Whether the method is correct for a particular use case depends on what + the weights assigned to each sample represent. + + This function implements the first case, where the weights are not random + and are intended to assign more weight to some samples for reasons + unrelated to differences in uncertainty between samples. See + https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights. + + The second case is when the weights are intended to assign more weight + to samples with less uncertainty. See + https://en.wikipedia.org/wiki/Inverse-variance_weighting. + The unbiased sample variance for this case is obtained by replacing the + effective number of samples in the formula this function implements, + nₑ, with the actual number of samples n. + + The third case is when the weights denote the integer frequency of each + sample. See + https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Frequency_weights. + This is indeed a distinct case from the above two because here the + weights encode additional information about the distribution. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + q : ndarray + Quantity to compute the sample variance. + weights : ndarray + Weight assigned to each sample of ``q``. + A good candidate for this parameter is the surface area Jacobian. + bias : bool + If this condition is true, then the biased estimator of the sample + variance is returned. This is desirable if you are only concerned with + computing the variance of the given set of numbers and not the + distribution the numbers are (potentially) sampled from. + surface_label : str + The surface label of rho, poloidal, or zeta to compute the variance over. + expand_out : bool + Whether to expand the output array so that the output has the same + shape as the input. Defaults to true so that the output may be + broadcast in the same way as the input. Setting to false will save + memory. + tol : float + Tolerance for considering nodes the same. + Only relevant if the grid object doesn't already have this information. + + Returns + ------- + variance : ndarray + Variance of the given weighted sample over each surface in the grid. + By default, the returned array has the same shape as the input. + + """ + surface_label = grid.get_label(surface_label) + _, _, spacing, _, has_idx = _get_grid_surface(grid, surface_label) + # If we don't have the idx attributes, we are forced to expand out. + errorif( + not has_idx and not expand_out, + msg=f"Grid lacks attributes 'num_{surface_label}' and " + f"'inverse_{surface_label}_idx', so this method " + f"can't satisfy the request expand_out={expand_out}.", + ) + integrate = surface_integrals_map( + grid, surface_label, expand_out=not has_idx, tol=tol + ) + + v1 = integrate(weights) + v2 = integrate(weights**2 * jnp.prod(spacing, axis=-1)) + # effective number of samples per surface + n_e = v1**2 / v2 + # analogous to Bessel's bias correction + correction = n_e / (n_e - (not bias)) + + q = jnp.atleast_1d(q) + # compute variance in two passes to avoid catastrophic round off error + mean = (integrate((weights * q.T).T).T / v1).T + if has_idx: # guard so that we don't try to expand when already expanded + mean = grid.expand(mean, surface_label) + variance = (correction * integrate((weights * ((q - mean) ** 2).T).T).T / v1).T + if expand_out and has_idx: + return grid.expand(variance, surface_label) + else: + return variance + + +def surface_max(grid, x, surface_label="rho"): + """Get the max of x for each surface in the grid. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + x : ndarray + Quantity to find max. + The array should have size grid.num_nodes. + surface_label : str + The surface label of rho, poloidal, or zeta to compute max over. + + Returns + ------- + maxs : ndarray + Maximum of x over each surface in grid. + The returned array has the same shape as the input. + + """ + return -surface_min(grid, -x, surface_label) + + +def surface_min(grid, x, surface_label="rho"): + """Get the min of x for each surface in the grid. + + Parameters + ---------- + grid : Grid + Collocation grid containing the nodes to evaluate at. + x : ndarray + Quantity to find min. + The array should have size grid.num_nodes. + surface_label : str + The surface label of rho, poloidal, or zeta to compute min over. + + Returns + ------- + mins : ndarray + Minimum of x over each surface in grid. + The returned array has the same shape as the input. + + """ + surface_label = grid.get_label(surface_label) + unique_size, inverse_idx, _, _, has_idx = _get_grid_surface(grid, surface_label) + errorif( + not has_idx, + NotImplementedError, + msg=f"Grid lacks attributes 'num_{surface_label}' and " + f"'inverse_{surface_label}_idx', which are required for this function.", + ) + inverse_idx = jnp.asarray(inverse_idx) + x = jnp.asarray(x) + mins = jnp.full(unique_size, jnp.inf) + + def body(i, mins): + mins = put(mins, inverse_idx[i], jnp.minimum(x[i], mins[inverse_idx[i]])) + return mins + + mins = fori_loop(0, inverse_idx.size, body, mins) + # The above implementation was benchmarked to be more efficient than + # alternatives without explicit loops in GitHub pull request #501. + return grid.expand(mins, surface_label) diff --git a/desc/magnetic_fields/_core.py b/desc/magnetic_fields/_core.py index 8cf4a4b35c..aff0d7b3c3 100644 --- a/desc/magnetic_fields/_core.py +++ b/desc/magnetic_fields/_core.py @@ -20,9 +20,9 @@ from desc.derivatives import Derivative from desc.equilibrium import EquilibriaFamily, Equilibrium from desc.grid import LinearGrid, _Grid +from desc.integrals.singularities import compute_B_plasma from desc.io import IOAble from desc.optimizable import Optimizable, OptimizableCollection, optimizable_parameter -from desc.singularities import compute_B_plasma from desc.transform import Transform from desc.utils import copy_coeffs, errorif, flatten_list, setdefault, warnif from desc.vmec_utils import ptolemy_identity_fwd, ptolemy_identity_rev diff --git a/desc/objectives/_coils.py b/desc/objectives/_coils.py index 62ef664622..93c7781a8c 100644 --- a/desc/objectives/_coils.py +++ b/desc/objectives/_coils.py @@ -14,7 +14,7 @@ from desc.compute.utils import _compute as compute_fun from desc.compute.utils import safenorm from desc.grid import LinearGrid, _Grid -from desc.singularities import compute_B_plasma +from desc.integrals.singularities import compute_B_plasma from desc.utils import Timer, errorif, warnif from .normalization import compute_scaling_factors diff --git a/desc/objectives/_free_boundary.py b/desc/objectives/_free_boundary.py index 3093f40f1e..9421ee0794 100644 --- a/desc/objectives/_free_boundary.py +++ b/desc/objectives/_free_boundary.py @@ -9,13 +9,13 @@ from desc.compute import get_params, get_profiles, get_transforms from desc.compute.utils import _compute as compute_fun from desc.grid import LinearGrid -from desc.nestor import Nestor -from desc.objectives.objective_funs import _Objective -from desc.singularities import ( +from desc.integrals.singularities import ( DFTInterpolator, FFTInterpolator, virtual_casing_biot_savart, ) +from desc.nestor import Nestor +from desc.objectives.objective_funs import _Objective from desc.utils import Timer, errorif, warnif from .normalization import compute_scaling_factors diff --git a/desc/plotting.py b/desc/plotting.py index fdcb9a393c..dab9c08184 100644 --- a/desc/plotting.py +++ b/desc/plotting.py @@ -18,9 +18,10 @@ from desc.basis import fourier, zernike_radial_poly from desc.coils import CoilSet, _Coil from desc.compute import data_index, get_transforms -from desc.compute.utils import _parse_parameterization, surface_averages_map +from desc.compute.utils import _parse_parameterization from desc.equilibrium.coords import map_coordinates from desc.grid import Grid, LinearGrid +from desc.integrals.surface_integral import surface_averages_map from desc.magnetic_fields import field_line_integrate from desc.utils import errorif, only1, parse_argname_change, setdefault from desc.vmec_utils import ptolemy_linear_transform diff --git a/desc/vmec.py b/desc/vmec.py index a0212dbd31..c11074715c 100644 --- a/desc/vmec.py +++ b/desc/vmec.py @@ -12,10 +12,10 @@ from desc.basis import DoubleFourierSeries from desc.compat import ensure_positive_jacobian -from desc.compute.utils import surface_averages from desc.equilibrium import Equilibrium from desc.geometry import FourierRZToroidalSurface from desc.grid import Grid, LinearGrid +from desc.integrals.surface_integral import surface_averages from desc.objectives import ( ObjectiveFunction, get_fixed_axis_constraints, diff --git a/tests/test_axis_limits.py b/tests/test_axis_limits.py index 4459595137..1a61c1cd39 100644 --- a/tests/test_axis_limits.py +++ b/tests/test_axis_limits.py @@ -12,10 +12,11 @@ import pytest from desc.compute import data_index -from desc.compute.utils import _grow_seeds, dot, surface_integrals_map +from desc.compute.utils import _grow_seeds, dot from desc.equilibrium import Equilibrium from desc.examples import get from desc.grid import LinearGrid +from desc.integrals.surface_integral import surface_integrals_map from desc.objectives import GenericObjective, ObjectiveFunction # Unless mentioned in the source code of the compute function, the assumptions diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 1a71059473..5ecef83cc5 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -7,612 +7,8 @@ import pytest from desc.backend import flatnonzero, jnp -from desc.basis import FourierZernikeBasis from desc.compute.geom_utils import rotation_matrix -from desc.compute.utils import ( - _get_grid_surface, - line_integrals, - surface_averages, - surface_integrals, - surface_integrals_transform, - surface_max, - surface_min, - surface_variance, - take_mask, -) -from desc.examples import get -from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid -from desc.transform import Transform - -# arbitrary choice -L = 5 -M = 5 -N = 2 -NFP = 3 - - -class TestComputeUtils: - """Tests for compute utilities related to surface averaging, etc.""" - - @staticmethod - def surface_integrals(grid, q=np.array([1.0]), surface_label="rho"): - """Compute a surface integral for each surface in the grid. - - Notes - ----- - It is assumed that the integration surface has area 4π² when the - surface label is rho and area 2π when the surface label is theta or - zeta. You may want to multiply q by the surface area Jacobian. - - Parameters - ---------- - grid : Grid - Collocation grid containing the nodes to evaluate at. - q : ndarray - Quantity to integrate. - The first dimension of the array should have size ``grid.num_nodes``. - - When ``q`` is 1-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a scalar function over the previously mentioned domain. - - When ``q`` is 2-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a vector-valued function over the previously mentioned domain. - - When ``q`` is 3-dimensional, the intention is to integrate, - over the domain parameterized by rho, theta, and zeta, - a matrix-valued function over the previously mentioned domain. - surface_label : str - The surface label of rho, theta, or zeta to compute the integration over. - - Returns - ------- - integrals : ndarray - Surface integral of the input over each surface in the grid. - - """ - _, _, spacing, _, _ = _get_grid_surface(grid, grid.get_label(surface_label)) - if surface_label == "rho": - has_endpoint_dupe = False - elif surface_label == "theta": - has_endpoint_dupe = (grid.nodes[grid.unique_theta_idx[0], 1] == 0) & ( - grid.nodes[grid.unique_theta_idx[-1], 1] == 2 * np.pi - ) - else: - has_endpoint_dupe = (grid.nodes[grid.unique_zeta_idx[0], 2] == 0) & ( - grid.nodes[grid.unique_zeta_idx[-1], 2] == 2 * np.pi / grid.NFP - ) - weights = (spacing.prod(axis=1) * np.nan_to_num(q).T).T - - surfaces = {} - nodes = grid.nodes[:, {"rho": 0, "theta": 1, "zeta": 2}[surface_label]] - # collect node indices for each surface_label surface - for grid_row_idx, surface_label_value in enumerate(nodes): - surfaces.setdefault(surface_label_value, []).append(grid_row_idx) - # integration over non-contiguous elements - integrals = [weights[surfaces[key]].sum(axis=0) for key in sorted(surfaces)] - if has_endpoint_dupe: - integrals[0] = integrals[-1] = integrals[0] + integrals[-1] - return np.asarray(integrals) - - @pytest.mark.unit - def test_surface_integrals(self): - """Test surface_integrals against a more intuitive implementation. - - This test should ensure that the algorithm in implementation is correct - on different types of grids (e.g. LinearGrid, ConcentricGrid). Each test - should also be done on grids with duplicate nodes (e.g. endpoint=True). - """ - - def test_b_theta(surface_label, grid, eq): - q = eq.compute("B_theta", grid=grid)["B_theta"] - integrals = surface_integrals(grid, q, surface_label, expand_out=False) - unique_size = { - "rho": grid.num_rho, - "theta": grid.num_theta, - "zeta": grid.num_zeta, - }[surface_label] - assert integrals.shape == (unique_size,), surface_label - - desired = self.surface_integrals(grid, q, surface_label) - np.testing.assert_allclose( - integrals, desired, atol=1e-16, err_msg=surface_label - ) - - eq = get("W7-X") - with pytest.warns(UserWarning, match="Reducing radial"): - eq.change_resolution(3, 3, 3, 6, 6, 6) - lg = LinearGrid(L=L, M=M, N=N, NFP=eq.NFP, endpoint=False) - lg_endpoint = LinearGrid(L=L, M=M, N=N, NFP=eq.NFP, endpoint=True) - cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=True) - for label in ("rho", "theta", "zeta"): - test_b_theta(label, lg, eq) - test_b_theta(label, lg_endpoint, eq) - if label != "theta": - # theta integrals are poorly defined on concentric grids - test_b_theta(label, cg_sym, eq) - - @pytest.mark.unit - def test_unknown_unique_grid_integral(self): - """Test that averages are invariant to whether grids have unique_idx.""" - lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, endpoint=False) - q = jnp.arange(lg.num_nodes) ** 2 - result = surface_integrals(lg, q, surface_label="rho") - del lg._unique_rho_idx - np.testing.assert_allclose( - surface_integrals(lg, q, surface_label="rho"), result - ) - result = surface_averages(lg, q, surface_label="theta") - del lg._unique_poloidal_idx - np.testing.assert_allclose( - surface_averages(lg, q, surface_label="theta"), result - ) - result = surface_variance(lg, q, surface_label="zeta") - del lg._unique_zeta_idx - np.testing.assert_allclose( - surface_variance(lg, q, surface_label="zeta"), result - ) - - @pytest.mark.unit - def test_surface_integrals_transform(self): - """Test surface integral of a kernel function.""" - - def test(surface_label, grid): - ints = np.arange(grid.num_nodes) - # better to test when all elements have the same sign - q = np.abs(np.outer(np.cos(ints), np.sin(ints))) - # This q represents the kernel function - # K_{u_1} = |cos(x(u_1, u_2, u_3)) * sin(x(u_4, u_5, u_6))| - # The first dimension of q varies the domain u_1, u_2, and u_3 - # and the second dimension varies the codomain u_4, u_5, u_6. - integrals = surface_integrals_transform(grid, surface_label)(q) - unique_size = { - "rho": grid.num_rho, - "theta": grid.num_theta, - "zeta": grid.num_zeta, - }[surface_label] - assert integrals.shape == (unique_size, grid.num_nodes), surface_label - - desired = self.surface_integrals(grid, q, surface_label) - np.testing.assert_allclose(integrals, desired, err_msg=surface_label) - - cg = ConcentricGrid(L=L, M=M, N=N, sym=True, NFP=NFP) - lg = LinearGrid(L=L, M=M, N=N, sym=True, NFP=NFP, endpoint=True) - test("rho", cg) - test("theta", lg) - test("zeta", cg) - - @pytest.mark.unit - def test_surface_averages_vector_functions(self): - """Test surface averages of vector-valued, function-valued integrands.""" - - def test(surface_label, grid): - g_size = grid.num_nodes # not a choice; required - f_size = g_size // 10 + (g_size < 10) - # arbitrary choice, but f_size != v_size != g_size is better to test - v_size = g_size // 20 + (g_size < 20) - g = np.cos(np.arange(g_size)) - fv = np.sin(np.arange(f_size * v_size).reshape(f_size, v_size)) - # better to test when all elements have the same sign - q = np.abs(np.einsum("g,fv->gfv", g, fv)) - sqrt_g = np.arange(g_size).astype(float) - - averages = surface_averages(grid, q, sqrt_g, surface_label) - assert averages.shape == q.shape == (g_size, f_size, v_size), surface_label - - desired = ( - self.surface_integrals(grid, (sqrt_g * q.T).T, surface_label).T - / self.surface_integrals(grid, sqrt_g, surface_label) - ).T - np.testing.assert_allclose( - grid.compress(averages, surface_label), desired, err_msg=surface_label - ) - - cg = ConcentricGrid(L=L, M=M, N=N, sym=True, NFP=NFP) - lg = LinearGrid(L=L, M=M, N=N, sym=True, NFP=NFP, endpoint=True) - test("rho", cg) - test("theta", lg) - test("zeta", cg) - - @pytest.mark.unit - def test_surface_area(self): - """Test that surface_integrals(ds) is 4π² for rho, 2pi for theta, zeta. - - This test should ensure that surfaces have the correct area on grids - constructed by specifying L, M, N and by specifying an array of nodes. - Each test should also be done on grids with duplicate nodes - (e.g. endpoint=True) and grids with symmetry. - """ - - def test(surface_label, grid): - areas = surface_integrals( - grid, surface_label=surface_label, expand_out=False - ) - correct_area = 4 * np.pi**2 if surface_label == "rho" else 2 * np.pi - np.testing.assert_allclose(areas, correct_area, err_msg=surface_label) - - lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False, endpoint=False) - lg_sym = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=False) - lg_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False, endpoint=True) - lg_sym_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=True) - rho = np.linspace(1, 0, L)[::-1] - theta = np.linspace(0, 2 * np.pi, M, endpoint=False) - theta_endpoint = np.linspace(0, 2 * np.pi, M, endpoint=True) - zeta = np.linspace(0, 2 * np.pi / NFP, N, endpoint=False) - zeta_endpoint = np.linspace(0, 2 * np.pi / NFP, N, endpoint=True) - lg_2 = LinearGrid( - rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=False, endpoint=False - ) - lg_2_sym = LinearGrid( - rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=True, endpoint=False - ) - lg_2_endpoint = LinearGrid( - rho=rho, - theta=theta_endpoint, - zeta=zeta_endpoint, - NFP=NFP, - sym=False, - endpoint=True, - ) - lg_2_sym_endpoint = LinearGrid( - rho=rho, - theta=theta_endpoint, - zeta=zeta_endpoint, - NFP=NFP, - sym=True, - endpoint=True, - ) - cg = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=False) - cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True) - - for label in ("rho", "theta", "zeta"): - test(label, lg) - test(label, lg_sym) - test(label, lg_endpoint) - test(label, lg_sym_endpoint) - test(label, lg_2) - test(label, lg_2_sym) - test(label, lg_2_endpoint) - test(label, lg_2_sym_endpoint) - if label != "theta": - # theta integrals are poorly defined on concentric grids - test(label, cg) - test(label, cg_sym) - - @pytest.mark.unit - def test_line_length(self): - """Test that line_integrals(dl) is 1 for rho, 2π for theta, zeta. - - This test should ensure that lines have the correct length on grids - constructed by specifying L, M, N and by specifying an array of nodes. - """ - - def test(grid): - if not isinstance(grid, ConcentricGrid): - for theta_val in grid.nodes[grid.unique_theta_idx, 1]: - result = line_integrals( - grid, - line_label="rho", - fix_surface=("theta", theta_val), - expand_out=False, - ) - np.testing.assert_allclose(result, 1) - for rho_val in grid.nodes[grid.unique_rho_idx, 0]: - result = line_integrals( - grid, - line_label="zeta", - fix_surface=("rho", rho_val), - expand_out=False, - ) - np.testing.assert_allclose(result, 2 * np.pi) - for zeta_val in grid.nodes[grid.unique_zeta_idx, 2]: - result = line_integrals( - grid, - line_label="theta", - fix_surface=("zeta", zeta_val), - expand_out=False, - ) - np.testing.assert_allclose(result, 2 * np.pi) - - lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False) - lg_sym = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True) - rho = np.linspace(1, 0, L)[::-1] - theta = np.linspace(0, 2 * np.pi, M, endpoint=False) - zeta = np.linspace(0, 2 * np.pi / NFP, N, endpoint=False) - lg_2 = LinearGrid(rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=False) - lg_2_sym = LinearGrid(rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=True) - cg = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=False) - cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True) - - test(lg) - test(lg_sym) - test(lg_2) - test(lg_2_sym) - test(cg) - test(cg_sym) - - @pytest.mark.unit - def test_surface_averages_identity_op(self): - """Test flux surface averages of surface functions are identity operations.""" - eq = get("W7-X") - with pytest.warns(UserWarning, match="Reducing radial"): - eq.change_resolution(3, 3, 3, 6, 6, 6) - grid = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=eq.sym) - data = eq.compute(["p", "sqrt(g)"], grid=grid) - pressure_average = surface_averages(grid, data["p"], data["sqrt(g)"]) - np.testing.assert_allclose(data["p"], pressure_average) - - @pytest.mark.unit - def test_surface_averages_homomorphism(self): - """Test flux surface averages of surface functions are additive homomorphisms. - - Meaning average(a + b) = average(a) + average(b). - """ - eq = get("W7-X") - with pytest.warns(UserWarning, match="Reducing radial"): - eq.change_resolution(3, 3, 3, 6, 6, 6) - grid = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=eq.sym) - data = eq.compute(["|B|", "|B|_t", "sqrt(g)"], grid=grid) - a = surface_averages(grid, data["|B|"], data["sqrt(g)"]) - b = surface_averages(grid, data["|B|_t"], data["sqrt(g)"]) - a_plus_b = surface_averages(grid, data["|B|"] + data["|B|_t"], data["sqrt(g)"]) - np.testing.assert_allclose(a_plus_b, a + b) - - @pytest.mark.unit - def test_surface_integrals_against_shortcut(self): - """Test integration against less general methods.""" - grid = ConcentricGrid(L=L, M=M, N=N, NFP=NFP) - ds = grid.spacing[:, :2].prod(axis=-1) - # something arbitrary that will give different sum across surfaces - q = np.arange(grid.num_nodes) ** 2 - # The predefined grids sort nodes in zeta surface chunks. - # To compute a quantity local to a surface, we can reshape it into zeta - # surface chunks and compute across the chunks. - result = grid.expand( - (ds * q).reshape((grid.num_zeta, -1)).sum(axis=-1), - surface_label="zeta", - ) - np.testing.assert_allclose( - surface_integrals(grid, q, surface_label="zeta"), - desired=result, - ) - - @pytest.mark.unit - def test_surface_averages_against_shortcut(self): - """Test averaging against less general methods.""" - # test on zeta surfaces - grid = LinearGrid(L=L, M=M, N=N, NFP=NFP) - # something arbitrary that will give different average across surfaces - q = np.arange(grid.num_nodes) ** 2 - # The predefined grids sort nodes in zeta surface chunks. - # To compute a quantity local to a surface, we can reshape it into zeta - # surface chunks and compute across the chunks. - mean = grid.expand( - q.reshape((grid.num_zeta, -1)).mean(axis=-1), - surface_label="zeta", - ) - # number of nodes per surface - n = grid.num_rho * grid.num_theta - np.testing.assert_allclose(np.bincount(grid.inverse_zeta_idx), desired=n) - ds = grid.spacing[:, :2].prod(axis=-1) - np.testing.assert_allclose( - surface_integrals(grid, q / ds, surface_label="zeta") / n, - desired=mean, - ) - np.testing.assert_allclose( - surface_averages(grid, q, surface_label="zeta"), - desired=mean, - ) - - # test on grids with a single rho surface - eq = get("W7-X") - with pytest.warns(UserWarning, match="Reducing radial"): - eq.change_resolution(3, 3, 3, 6, 6, 6) - rho = np.array((1 - 1e-4) * np.random.default_rng().random() + 1e-4) - grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) - data = eq.compute(["|B|", "sqrt(g)"], grid=grid) - np.testing.assert_allclose( - surface_averages(grid, data["|B|"], data["sqrt(g)"]), - np.mean(data["sqrt(g)"] * data["|B|"]) / np.mean(data["sqrt(g)"]), - err_msg="average with sqrt(g) fail", - ) - np.testing.assert_allclose( - surface_averages(grid, data["|B|"]), - np.mean(data["|B|"]), - err_msg="average without sqrt(g) fail", - ) - - @pytest.mark.unit - def test_symmetry_surface_average_1(self): - """Test surface average of a symmetric function.""" - - def test(grid): - r = grid.nodes[:, 0] - t = grid.nodes[:, 1] - z = grid.nodes[:, 2] * grid.NFP - true_surface_avg = 5 - function_of_rho = 1 / (r + 0.35) - f = ( - true_surface_avg - + np.cos(t) - - 0.5 * np.cos(z) - + 3 * np.cos(t) * np.cos(z) ** 2 - - 2 * np.sin(z) * np.sin(t) - ) * function_of_rho - np.testing.assert_allclose( - surface_averages(grid, f), - true_surface_avg * function_of_rho, - rtol=1e-15, - err_msg=type(grid), - ) - - # these tests should be run on relatively low resolution grids, - # or at least low enough so that the asymmetric spacing test fails - L = [3, 3, 5, 3] - M = [3, 6, 5, 7] - N = [2, 2, 2, 2] - NFP = [5, 3, 5, 3] - sym = np.array([True, True, False, False]) - # to test code not tested on grids made with M=. - even_number = 4 - n_theta = even_number - sym - - # asymmetric spacing - with pytest.raises(AssertionError): - theta = 2 * np.pi * np.array([t**2 for t in np.linspace(0, 1, max(M))]) - test(LinearGrid(L=max(L), theta=theta, N=max(N), sym=False)) - - for i in range(len(L)): - test(LinearGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i])) - test(LinearGrid(L=L[i], theta=n_theta[i], N=N[i], NFP=NFP[i], sym=sym[i])) - test( - LinearGrid( - L=L[i], - theta=np.linspace(0, 2 * np.pi, n_theta[i]), - N=N[i], - NFP=NFP[i], - sym=sym[i], - ) - ) - test( - LinearGrid( - L=L[i], - theta=np.linspace(0, 2 * np.pi, n_theta[i] + 1), - N=N[i], - NFP=NFP[i], - sym=sym[i], - ) - ) - test(QuadratureGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i])) - test(ConcentricGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i])) - # nonuniform spacing when sym is False, but spacing is still symmetric - test( - LinearGrid( - L=L[i], - theta=np.linspace(0, np.pi, n_theta[i]), - N=N[i], - NFP=NFP[i], - sym=sym[i], - ) - ) - test( - LinearGrid( - L=L[i], - theta=np.linspace(0, np.pi, n_theta[i] + 1), - N=N[i], - NFP=NFP[i], - sym=sym[i], - ) - ) - - @pytest.mark.unit - def test_symmetry_surface_average_2(self): - """Tests that surface averages are correct using specified basis.""" - - def test(grid, basis, true_avg=1): - transform = Transform(grid, basis) - - # random data with specified average on each surface - coeffs = np.random.rand(basis.num_modes) - coeffs[np.all(basis.modes[:, 1:] == [0, 0], axis=1)] = 0 - coeffs[np.all(basis.modes == [0, 0, 0], axis=1)] = true_avg - - # compute average for each surface in grid - values = transform.transform(coeffs) - numerical_avg = surface_averages(grid, values, expand_out=False) - np.testing.assert_allclose( - # values closest to axis are never accurate enough - numerical_avg[isinstance(grid, ConcentricGrid) :], - true_avg, - err_msg=str(type(grid)) + " " + str(grid.sym), - ) - - M = 5 - M_grid = 13 - test( - QuadratureGrid(L=M_grid, M=M_grid, N=0), FourierZernikeBasis(L=M, M=M, N=0) - ) - test( - LinearGrid(L=M_grid, M=M_grid, N=0, sym=True), - FourierZernikeBasis(L=M, M=M, N=0, sym="cos"), - ) - test( - ConcentricGrid(L=M_grid, M=M_grid, N=0), FourierZernikeBasis(L=M, M=M, N=0) - ) - test( - ConcentricGrid(L=M_grid, M=M_grid, N=0, sym=True), - FourierZernikeBasis(L=M, M=M, N=0, sym="cos"), - ) - - @pytest.mark.unit - def test_surface_variance(self): - """Test correctness of variance against less general methods.""" - grid = LinearGrid(L=L, M=M, N=N, NFP=NFP) - # something arbitrary that will give different variance across surfaces - q = np.arange(grid.num_nodes) ** 2 - - # Test weighted sample variance with different weights. - # positive weights to prevent cancellations that may hide implementation error - weights = np.cos(q) * np.sin(q) + 5 - biased = surface_variance( - grid, q, weights, bias=True, surface_label="zeta", expand_out=False - ) - unbiased = surface_variance( - grid, q, weights, surface_label="zeta", expand_out=False - ) - # The predefined grids sort nodes in zeta surface chunks. - # To compute a quantity local to a surface, we can reshape it into zeta - # surface chunks and compute across the chunks. - chunks = q.reshape((grid.num_zeta, -1)) - # The ds weights are built into the surface variance function. - # So weights for np.cov should be ds * weights. Since ds is constant on - # LinearGrid, we need to get the same result if we don't multiply by ds. - weights = weights.reshape((grid.num_zeta, -1)) - for i in range(grid.num_zeta): - np.testing.assert_allclose( - biased[i], - desired=np.cov(chunks[i], bias=True, aweights=weights[i]), - ) - np.testing.assert_allclose( - unbiased[i], - desired=np.cov(chunks[i], aweights=weights[i]), - ) - - # Test weighted sample variance converges to unweighted sample variance - # when all weights are equal. - chunks = grid.expand(chunks, surface_label="zeta") - np.testing.assert_allclose( - surface_variance(grid, q, np.e, bias=True, surface_label="zeta"), - desired=chunks.var(axis=-1), - ) - np.testing.assert_allclose( - surface_variance(grid, q, np.e, surface_label="zeta"), - desired=chunks.var(axis=-1, ddof=1), - ) - - @pytest.mark.unit - def test_surface_min_max(self): - """Test the surface_min and surface_max functions.""" - for grid_type in [LinearGrid, QuadratureGrid, ConcentricGrid]: - grid = grid_type(L=L, M=M, N=N, NFP=NFP) - rho = grid.nodes[:, 0] - theta = grid.nodes[:, 1] - zeta = grid.nodes[:, 2] - # Make up an arbitrary function of the coordinates: - B = ( - 1.7 - + 0.4 * rho * np.cos(theta) - + 0.8 * rho * rho * np.cos(2 * theta - 3 * zeta) - ) - Bmax_alt = np.zeros(grid.num_rho) - Bmin_alt = np.zeros(grid.num_rho) - for j in range(grid.num_rho): - mask = grid.inverse_rho_idx == j - Bmax_alt[j] = np.max(B[mask]) - Bmin_alt[j] = np.min(B[mask]) - np.testing.assert_allclose(Bmax_alt, grid.compress(surface_max(grid, B))) - np.testing.assert_allclose(Bmin_alt, grid.compress(surface_min(grid, B))) +from desc.utils import take_mask @pytest.mark.unit diff --git a/tests/test_integrals.py b/tests/test_integrals.py new file mode 100644 index 0000000000..fc181dc808 --- /dev/null +++ b/tests/test_integrals.py @@ -0,0 +1,526 @@ +"""Test integration algorithms.""" + +import numpy as np +import pytest + +from desc.basis import FourierZernikeBasis +from desc.examples import get +from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid +from desc.integrals.surface_integral import ( + _get_grid_surface, + line_integrals, + surface_averages, + surface_integrals, + surface_integrals_transform, + surface_max, + surface_min, + surface_variance, +) +from desc.transform import Transform + +# arbitrary choice +L = 5 +M = 5 +N = 2 +NFP = 3 + + +class TestSurfaceIntegral: + """Tests for non-singular surface integrals.""" + + @staticmethod + def _surface_integrals(grid, q=np.array([1.0]), surface_label="rho"): + """Compute a surface integral for each surface in the grid.""" + _, _, spacing, has_endpoint_dupe, _ = _get_grid_surface( + grid, grid.get_label(surface_label) + ) + weights = (spacing.prod(axis=1) * np.nan_to_num(q).T).T + surfaces = {} + nodes = grid.nodes[:, {"rho": 0, "theta": 1, "zeta": 2}[surface_label]] + for grid_row_idx, surface_label_value in enumerate(nodes): + surfaces.setdefault(surface_label_value, []).append(grid_row_idx) + integrals = [weights[surfaces[key]].sum(axis=0) for key in sorted(surfaces)] + if has_endpoint_dupe: + integrals[0] = integrals[-1] = integrals[0] + integrals[-1] + return np.asarray(integrals) + + @pytest.mark.unit + def test_unknown_unique_grid_integral(self): + """Test that averages are invariant to whether grids have unique_idx.""" + lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, endpoint=False) + q = np.arange(lg.num_nodes) ** 2 + result = surface_integrals(lg, q, surface_label="rho") + del lg._unique_rho_idx + np.testing.assert_allclose( + surface_integrals(lg, q, surface_label="rho"), result + ) + result = surface_averages(lg, q, surface_label="theta") + del lg._unique_poloidal_idx + np.testing.assert_allclose( + surface_averages(lg, q, surface_label="theta"), result + ) + result = surface_variance(lg, q, surface_label="zeta") + del lg._unique_zeta_idx + np.testing.assert_allclose( + surface_variance(lg, q, surface_label="zeta"), result + ) + + @pytest.mark.unit + def test_surface_integrals_transform(self): + """Test surface integral of a kernel function.""" + + def test(surface_label, grid): + ints = np.arange(grid.num_nodes) + # better to test when all elements have the same sign + q = np.abs(np.outer(np.cos(ints), np.sin(ints))) + # This q represents the kernel function + # K_{u_1} = |cos(x(u_1, u_2, u_3)) * sin(x(u_4, u_5, u_6))| + # The first dimension of q varies the domain u_1, u_2, and u_3 + # and the second dimension varies the codomain u_4, u_5, u_6. + integrals = surface_integrals_transform(grid, surface_label)(q) + unique_size = { + "rho": grid.num_rho, + "theta": grid.num_theta, + "zeta": grid.num_zeta, + }[surface_label] + assert integrals.shape == (unique_size, grid.num_nodes), surface_label + + desired = self._surface_integrals(grid, q, surface_label) + np.testing.assert_allclose(integrals, desired, err_msg=surface_label) + + cg = ConcentricGrid(L=L, M=M, N=N, sym=True, NFP=NFP) + lg = LinearGrid(L=L, M=M, N=N, sym=True, NFP=NFP, endpoint=True) + test("rho", cg) + test("theta", lg) + test("zeta", cg) + + @pytest.mark.unit + def test_surface_averages_vector_functions(self): + """Test surface averages of vector-valued, function-valued integrands.""" + + def test(surface_label, grid): + g_size = grid.num_nodes # not a choice; required + f_size = g_size // 10 + (g_size < 10) + # arbitrary choice, but f_size != v_size != g_size is better to test + v_size = g_size // 20 + (g_size < 20) + g = np.cos(np.arange(g_size)) + fv = np.sin(np.arange(f_size * v_size).reshape(f_size, v_size)) + # better to test when all elements have the same sign + q = np.abs(np.einsum("g,fv->gfv", g, fv)) + sqrt_g = np.arange(g_size).astype(float) + + averages = surface_averages(grid, q, sqrt_g, surface_label) + assert averages.shape == q.shape == (g_size, f_size, v_size), surface_label + + desired = ( + self._surface_integrals(grid, (sqrt_g * q.T).T, surface_label).T + / self._surface_integrals(grid, sqrt_g, surface_label) + ).T + np.testing.assert_allclose( + grid.compress(averages, surface_label), desired, err_msg=surface_label + ) + + cg = ConcentricGrid(L=L, M=M, N=N, sym=True, NFP=NFP) + lg = LinearGrid(L=L, M=M, N=N, sym=True, NFP=NFP, endpoint=True) + test("rho", cg) + test("theta", lg) + test("zeta", cg) + + @pytest.mark.unit + def test_surface_area(self): + """Test that surface_integrals(ds) is 4π² for rho, 2pi for theta, zeta. + + This test should ensure that surfaces have the correct area on grids + constructed by specifying L, M, N and by specifying an array of nodes. + Each test should also be done on grids with duplicate nodes + (e.g. endpoint=True) and grids with symmetry. + """ + + def test(surface_label, grid): + areas = surface_integrals( + grid, surface_label=surface_label, expand_out=False + ) + correct_area = 4 * np.pi**2 if surface_label == "rho" else 2 * np.pi + np.testing.assert_allclose(areas, correct_area, err_msg=surface_label) + + lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False, endpoint=False) + lg_sym = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=False) + lg_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False, endpoint=True) + lg_sym_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=True) + rho = np.linspace(1, 0, L)[::-1] + theta = np.linspace(0, 2 * np.pi, M, endpoint=False) + theta_endpoint = np.linspace(0, 2 * np.pi, M, endpoint=True) + zeta = np.linspace(0, 2 * np.pi / NFP, N, endpoint=False) + zeta_endpoint = np.linspace(0, 2 * np.pi / NFP, N, endpoint=True) + lg_2 = LinearGrid( + rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=False, endpoint=False + ) + lg_2_sym = LinearGrid( + rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=True, endpoint=False + ) + lg_2_endpoint = LinearGrid( + rho=rho, + theta=theta_endpoint, + zeta=zeta_endpoint, + NFP=NFP, + sym=False, + endpoint=True, + ) + lg_2_sym_endpoint = LinearGrid( + rho=rho, + theta=theta_endpoint, + zeta=zeta_endpoint, + NFP=NFP, + sym=True, + endpoint=True, + ) + cg = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=False) + cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True) + + for label in ("rho", "theta", "zeta"): + test(label, lg) + test(label, lg_sym) + test(label, lg_endpoint) + test(label, lg_sym_endpoint) + test(label, lg_2) + test(label, lg_2_sym) + test(label, lg_2_endpoint) + test(label, lg_2_sym_endpoint) + if label != "theta": + # theta integrals are poorly defined on concentric grids + test(label, cg) + test(label, cg_sym) + + @pytest.mark.unit + def test_line_length(self): + """Test that line_integrals(dl) is 1 for rho, 2π for theta, zeta. + + This test should ensure that lines have the correct length on grids + constructed by specifying L, M, N and by specifying an array of nodes. + """ + + def test(grid): + if not isinstance(grid, ConcentricGrid): + for theta_val in grid.nodes[grid.unique_theta_idx, 1]: + result = line_integrals( + grid, + line_label="rho", + fix_surface=("theta", theta_val), + expand_out=False, + ) + np.testing.assert_allclose(result, 1) + for rho_val in grid.nodes[grid.unique_rho_idx, 0]: + result = line_integrals( + grid, + line_label="zeta", + fix_surface=("rho", rho_val), + expand_out=False, + ) + np.testing.assert_allclose(result, 2 * np.pi) + for zeta_val in grid.nodes[grid.unique_zeta_idx, 2]: + result = line_integrals( + grid, + line_label="theta", + fix_surface=("zeta", zeta_val), + expand_out=False, + ) + np.testing.assert_allclose(result, 2 * np.pi) + + lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False) + lg_sym = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True) + rho = np.linspace(1, 0, L)[::-1] + theta = np.linspace(0, 2 * np.pi, M, endpoint=False) + zeta = np.linspace(0, 2 * np.pi / NFP, N, endpoint=False) + lg_2 = LinearGrid(rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=False) + lg_2_sym = LinearGrid(rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=True) + cg = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=False) + cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True) + + test(lg) + test(lg_sym) + test(lg_2) + test(lg_2_sym) + test(cg) + test(cg_sym) + + @pytest.mark.unit + def test_surface_averages_identity_op(self): + """Test flux surface averages of surface functions are identity operations.""" + eq = get("W7-X") + with pytest.warns(UserWarning, match="Reducing radial"): + eq.change_resolution(3, 3, 3, 6, 6, 6) + grid = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=eq.sym) + data = eq.compute(["p", "sqrt(g)"], grid=grid) + pressure_average = surface_averages(grid, data["p"], data["sqrt(g)"]) + np.testing.assert_allclose(data["p"], pressure_average) + + @pytest.mark.unit + def test_surface_averages_homomorphism(self): + """Test flux surface averages of surface functions are additive homomorphisms. + + Meaning average(a + b) = average(a) + average(b). + """ + eq = get("W7-X") + with pytest.warns(UserWarning, match="Reducing radial"): + eq.change_resolution(3, 3, 3, 6, 6, 6) + grid = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=eq.sym) + data = eq.compute(["|B|", "|B|_t", "sqrt(g)"], grid=grid) + a = surface_averages(grid, data["|B|"], data["sqrt(g)"]) + b = surface_averages(grid, data["|B|_t"], data["sqrt(g)"]) + a_plus_b = surface_averages(grid, data["|B|"] + data["|B|_t"], data["sqrt(g)"]) + np.testing.assert_allclose(a_plus_b, a + b) + + @pytest.mark.unit + def test_surface_integrals_against_shortcut(self): + """Test integration against less general methods.""" + grid = ConcentricGrid(L=L, M=M, N=N, NFP=NFP) + ds = grid.spacing[:, :2].prod(axis=-1) + # something arbitrary that will give different sum across surfaces + q = np.arange(grid.num_nodes) ** 2 + # The predefined grids sort nodes in zeta surface chunks. + # To compute a quantity local to a surface, we can reshape it into zeta + # surface chunks and compute across the chunks. + result = grid.expand( + (ds * q).reshape((grid.num_zeta, -1)).sum(axis=-1), + surface_label="zeta", + ) + np.testing.assert_allclose( + surface_integrals(grid, q, surface_label="zeta"), + desired=result, + ) + + @pytest.mark.unit + def test_surface_averages_against_shortcut(self): + """Test averaging against less general methods.""" + # test on zeta surfaces + grid = LinearGrid(L=L, M=M, N=N, NFP=NFP) + # something arbitrary that will give different average across surfaces + q = np.arange(grid.num_nodes) ** 2 + # The predefined grids sort nodes in zeta surface chunks. + # To compute a quantity local to a surface, we can reshape it into zeta + # surface chunks and compute across the chunks. + mean = grid.expand( + q.reshape((grid.num_zeta, -1)).mean(axis=-1), + surface_label="zeta", + ) + # number of nodes per surface + n = grid.num_rho * grid.num_theta + np.testing.assert_allclose(np.bincount(grid.inverse_zeta_idx), desired=n) + ds = grid.spacing[:, :2].prod(axis=-1) + np.testing.assert_allclose( + surface_integrals(grid, q / ds, surface_label="zeta") / n, + desired=mean, + ) + np.testing.assert_allclose( + surface_averages(grid, q, surface_label="zeta"), + desired=mean, + ) + + # test on grids with a single rho surface + eq = get("W7-X") + with pytest.warns(UserWarning, match="Reducing radial"): + eq.change_resolution(3, 3, 3, 6, 6, 6) + rho = np.array((1 - 1e-4) * np.random.default_rng().random() + 1e-4) + grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym) + data = eq.compute(["|B|", "sqrt(g)"], grid=grid) + np.testing.assert_allclose( + surface_averages(grid, data["|B|"], data["sqrt(g)"]), + np.mean(data["sqrt(g)"] * data["|B|"]) / np.mean(data["sqrt(g)"]), + err_msg="average with sqrt(g) fail", + ) + np.testing.assert_allclose( + surface_averages(grid, data["|B|"]), + np.mean(data["|B|"]), + err_msg="average without sqrt(g) fail", + ) + + @pytest.mark.unit + def test_symmetry_surface_average_1(self): + """Test surface average of a symmetric function.""" + + def test(grid): + r = grid.nodes[:, 0] + t = grid.nodes[:, 1] + z = grid.nodes[:, 2] * grid.NFP + true_surface_avg = 5 + function_of_rho = 1 / (r + 0.35) + f = ( + true_surface_avg + + np.cos(t) + - 0.5 * np.cos(z) + + 3 * np.cos(t) * np.cos(z) ** 2 + - 2 * np.sin(z) * np.sin(t) + ) * function_of_rho + np.testing.assert_allclose( + surface_averages(grid, f), + true_surface_avg * function_of_rho, + rtol=1e-15, + err_msg=type(grid), + ) + + # these tests should be run on relatively low resolution grids, + # or at least low enough so that the asymmetric spacing test fails + L = [3, 3, 5, 3] + M = [3, 6, 5, 7] + N = [2, 2, 2, 2] + NFP = [5, 3, 5, 3] + sym = np.array([True, True, False, False]) + # to test code not tested on grids made with M=. + even_number = 4 + n_theta = even_number - sym + + # asymmetric spacing + with pytest.raises(AssertionError): + theta = 2 * np.pi * np.array([t**2 for t in np.linspace(0, 1, max(M))]) + test(LinearGrid(L=max(L), theta=theta, N=max(N), sym=False)) + + for i in range(len(L)): + test(LinearGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i])) + test(LinearGrid(L=L[i], theta=n_theta[i], N=N[i], NFP=NFP[i], sym=sym[i])) + test( + LinearGrid( + L=L[i], + theta=np.linspace(0, 2 * np.pi, n_theta[i]), + N=N[i], + NFP=NFP[i], + sym=sym[i], + ) + ) + test( + LinearGrid( + L=L[i], + theta=np.linspace(0, 2 * np.pi, n_theta[i] + 1), + N=N[i], + NFP=NFP[i], + sym=sym[i], + ) + ) + test(QuadratureGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i])) + test(ConcentricGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i])) + # nonuniform spacing when sym is False, but spacing is still symmetric + test( + LinearGrid( + L=L[i], + theta=np.linspace(0, np.pi, n_theta[i]), + N=N[i], + NFP=NFP[i], + sym=sym[i], + ) + ) + test( + LinearGrid( + L=L[i], + theta=np.linspace(0, np.pi, n_theta[i] + 1), + N=N[i], + NFP=NFP[i], + sym=sym[i], + ) + ) + + @pytest.mark.unit + def test_symmetry_surface_average_2(self): + """Tests that surface averages are correct using specified basis.""" + + def test(grid, basis, true_avg=1): + transform = Transform(grid, basis) + + # random data with specified average on each surface + coeffs = np.random.rand(basis.num_modes) + coeffs[np.all(basis.modes[:, 1:] == [0, 0], axis=1)] = 0 + coeffs[np.all(basis.modes == [0, 0, 0], axis=1)] = true_avg + + # compute average for each surface in grid + values = transform.transform(coeffs) + numerical_avg = surface_averages(grid, values, expand_out=False) + np.testing.assert_allclose( + # values closest to axis are never accurate enough + numerical_avg[isinstance(grid, ConcentricGrid) :], + true_avg, + err_msg=str(type(grid)) + " " + str(grid.sym), + ) + + M = 5 + M_grid = 13 + test( + QuadratureGrid(L=M_grid, M=M_grid, N=0), FourierZernikeBasis(L=M, M=M, N=0) + ) + test( + LinearGrid(L=M_grid, M=M_grid, N=0, sym=True), + FourierZernikeBasis(L=M, M=M, N=0, sym="cos"), + ) + test( + ConcentricGrid(L=M_grid, M=M_grid, N=0), FourierZernikeBasis(L=M, M=M, N=0) + ) + test( + ConcentricGrid(L=M_grid, M=M_grid, N=0, sym=True), + FourierZernikeBasis(L=M, M=M, N=0, sym="cos"), + ) + + @pytest.mark.unit + def test_surface_variance(self): + """Test correctness of variance against less general methods.""" + grid = LinearGrid(L=L, M=M, N=N, NFP=NFP) + # something arbitrary that will give different variance across surfaces + q = np.arange(grid.num_nodes) ** 2 + + # Test weighted sample variance with different weights. + # positive weights to prevent cancellations that may hide implementation error + weights = np.cos(q) * np.sin(q) + 5 + biased = surface_variance( + grid, q, weights, bias=True, surface_label="zeta", expand_out=False + ) + unbiased = surface_variance( + grid, q, weights, surface_label="zeta", expand_out=False + ) + # The predefined grids sort nodes in zeta surface chunks. + # To compute a quantity local to a surface, we can reshape it into zeta + # surface chunks and compute across the chunks. + chunks = q.reshape((grid.num_zeta, -1)) + # The ds weights are built into the surface variance function. + # So weights for np.cov should be ds * weights. Since ds is constant on + # LinearGrid, we need to get the same result if we don't multiply by ds. + weights = weights.reshape((grid.num_zeta, -1)) + for i in range(grid.num_zeta): + np.testing.assert_allclose( + biased[i], + desired=np.cov(chunks[i], bias=True, aweights=weights[i]), + ) + np.testing.assert_allclose( + unbiased[i], + desired=np.cov(chunks[i], aweights=weights[i]), + ) + + # Test weighted sample variance converges to unweighted sample variance + # when all weights are equal. + chunks = grid.expand(chunks, surface_label="zeta") + np.testing.assert_allclose( + surface_variance(grid, q, np.e, bias=True, surface_label="zeta"), + desired=chunks.var(axis=-1), + ) + np.testing.assert_allclose( + surface_variance(grid, q, np.e, surface_label="zeta"), + desired=chunks.var(axis=-1, ddof=1), + ) + + @pytest.mark.unit + def test_surface_min_max(self): + """Test the surface_min and surface_max functions.""" + for grid_type in [LinearGrid, QuadratureGrid, ConcentricGrid]: + grid = grid_type(L=L, M=M, N=N, NFP=NFP) + rho = grid.nodes[:, 0] + theta = grid.nodes[:, 1] + zeta = grid.nodes[:, 2] + # Make up an arbitrary function of the coordinates: + B = ( + 1.7 + + 0.4 * rho * np.cos(theta) + + 0.8 * rho * rho * np.cos(2 * theta - 3 * zeta) + ) + Bmax_alt = np.zeros(grid.num_rho) + Bmin_alt = np.zeros(grid.num_rho) + for j in range(grid.num_rho): + mask = grid.inverse_rho_idx == j + Bmax_alt[j] = np.max(B[mask]) + Bmin_alt[j] = np.min(B[mask]) + np.testing.assert_allclose(Bmax_alt, grid.compress(surface_max(grid, B))) + np.testing.assert_allclose(Bmin_alt, grid.compress(surface_min(grid, B))) diff --git a/tests/test_plotting.py b/tests/test_plotting.py index 1351b46c93..399ea173af 100644 --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -15,10 +15,10 @@ ) from desc.coils import CoilSet, FourierXYZCoil, MixedCoilSet from desc.compute import data_index -from desc.compute.utils import surface_averages from desc.examples import get from desc.geometry import FourierRZToroidalSurface, FourierXYZCurve from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid +from desc.integrals.surface_integral import surface_averages from desc.io import load from desc.magnetic_fields import ( OmnigenousField, diff --git a/tests/test_singularities.py b/tests/test_singularities.py index fd44ce05c4..1c3a0bd48c 100644 --- a/tests/test_singularities.py +++ b/tests/test_singularities.py @@ -25,7 +25,7 @@ import desc from desc.equilibrium import Equilibrium from desc.grid import LinearGrid -from desc.singularities import ( +from desc.integrals.singularities import ( DFTInterpolator, FFTInterpolator, _get_quadrature_nodes, From 7c2d7c2e51241c459e58a193bf9ef8696ab46674 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 21 Aug 2024 17:27:33 -0400 Subject: [PATCH 208/241] Simplify some broadcasting add short comments explaining theory --- desc/integrals/_interp_utils.py | 9 - desc/integrals/bounce_integral.py | 4 +- desc/integrals/fourier_bounce_integral.py | 191 +++++++++++----------- tests/test_compute_utils.py | 37 +---- tests/test_fourier_bounce.py | 10 +- tests/test_utils.py | 38 ++++- 6 files changed, 140 insertions(+), 149 deletions(-) diff --git a/desc/integrals/_interp_utils.py b/desc/integrals/_interp_utils.py index ea022891c1..c7a8609704 100644 --- a/desc/integrals/_interp_utils.py +++ b/desc/integrals/_interp_utils.py @@ -11,15 +11,6 @@ from desc.utils import Index, errorif -# Y = [a, b] evaluate on grid -> y = [-1, 1] chebyshev points -> y = cos(z) -# evenly spaced z. -# So I find coefficients to chebyshev series T_n(y) = cos(n arcos(y)) = cos(n z). -# So evaluating my chebyshev series in y is same as evaluting cosine series in -# z = arcos(y). -# for y = inversemap[a, b]. -# Open questions is finding roots y using chebroots better or is finding roots z -# of trig poly. -# answer: research shows doesn't really matter. # TODO: Transformation to make nodes uniform Boyd eq. 16.46 pg 336. # Shouldn't really change locations of complex poles for us, so convergence # rate will still be good. diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 2b61dfece5..c5e03df6ea 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -803,11 +803,13 @@ def bounce_integral( The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. This map defines a change of variable for the bounce integral. The choice made for the automorphism - can affect the performance of the quadrature method. + will affect the performance of the quadrature method. B_ref : float Optional. Reference magnetic field strength for normalization. + Has no effect on computation, but may be useful for analysis. L_ref : float Optional. Reference length scale for normalization. + Has no effect on computation, but may be useful for analysis. check : bool Flag for debugging. Must be false for jax transformations. plot : bool diff --git a/desc/integrals/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py index cf03b7596b..be247f9249 100644 --- a/desc/integrals/fourier_bounce_integral.py +++ b/desc/integrals/fourier_bounce_integral.py @@ -1,4 +1,4 @@ -"""Methods for computing Fourier Chebyshev FFTs and bounce integrals.""" +"""Methods for computing Fast Fourier Chebyshev transforms and bounce integrals.""" import numpy as np from matplotlib import pyplot as plt @@ -42,7 +42,7 @@ def _flatten_matrix(y): return y.reshape(*y.shape[:-2], -1) -def alpha_sequence(alpha_0, iota, num_transit, period=2 * jnp.pi): +def get_alphas(alpha_0, iota, num_transit, period): """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. Parameters @@ -90,7 +90,7 @@ class FourierChebyshevBasis: """ - def __init__(self, f, lobatto=False, domain=(0, 2 * jnp.pi)): + def __init__(self, f, domain, lobatto=False): """Interpolate Fourier-Chebyshev basis to ``f``. Parameters @@ -99,24 +99,23 @@ def __init__(self, f, lobatto=False, domain=(0, 2 * jnp.pi)): Shape (..., M, N). Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. M, N preferably power of 2. + domain : (float, float) + Domain for y coordinates. lobatto : bool Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) or interior roots grid for Chebyshev points. - domain : (float, float) - Domain for y coordinates. Default is [0, 2π]. """ - lobatto = bool(lobatto) - errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") - self.lobatto = lobatto - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = domain self.M = f.shape[-2] self.N = f.shape[-1] - self._c = self._fast_transform(f, lobatto) + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = domain + errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") + self.lobatto = bool(lobatto) + self._c = self._fast_transform(f, self.lobatto) @staticmethod - def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): + def nodes(M, N, domain, lobatto=False, **kwargs): """Tensor product grid of optimal collocation nodes for this basis. Parameters @@ -125,11 +124,11 @@ def nodes(M, N, lobatto=False, domain=(0, 2 * jnp.pi), **kwargs): Grid resolution in x direction. Preferably power of 2. N : int Grid resolution in y direction. Preferably power of 2. + domain : (float, float) + Domain for y coordinates. lobatto : bool Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) or interior roots grid for Chebyshev points. - domain : (float, float) - Domain for y coordinates. Default is [0, 2π]. Returns ------- @@ -196,9 +195,7 @@ def compute_cheb(self, x): Parameters ---------- x : jnp.ndarray - Shape (..., x.shape[-1]). - Evaluation points. If 1d assumes batch dimension over L is implicit - (i.e. standard numpy broadcasting rules). + Points to evaluate Fourier basis. Returns ------- @@ -277,13 +274,12 @@ def _chebcast(cheb, arr): # Input should not have rightmost dimension of cheb that iterates coefficients, # but may have additional leftmost dimension for batch operation. errorif( - arr.ndim > cheb.ndim, + jnp.ndim(arr) > cheb.ndim, NotImplementedError, msg=f"Only one additional axis for batch dimension is allowed. " - f"Got {arr.ndim - cheb.ndim + 1} additional axes.", + f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", ) - # Don't add additional axis unless necessary to appease JIT compilation. - return cheb if arr.ndim < cheb.ndim else cheb[jnp.newaxis] + return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] def intersect(self, k, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). @@ -312,7 +308,6 @@ def intersect(self, k, eps=_eps): Boolean array into ``y`` indicating whether element is an intersect. """ - k = jnp.atleast_1d(k) c = _subtract(self._chebcast(self.cheb, k), k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) y = _chebroots_vec(c) @@ -339,33 +334,20 @@ def intersect(self, k, eps=_eps): y = bijection_from_disc(y, *self.domain) return y, is_decreasing, is_increasing, is_intersect - def bounce_points( - self, y, is_decreasing, is_increasing, is_intersect, num_well=None - ): + def bounce_points(self, pitch, num_well=None): """Compute bounce points given intersections. Parameters ---------- - y : jnp.ndarray - Shape (..., *y.shape[-2:]). - Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. - Assumes the -2nd axis enumerates over poloidal coordinates - all belonging to a single field line. See ``alpha_sequence``. - is_decreasing : jnp.ndarray - Shape y.shape. - Whether ∂f/∂y (x, yᵢ) is decreasing. - is_increasing : jnp.ndarray - Shape y.shape. - Whether ∂f/∂y (x, yᵢ) is increasing. - is_intersect : jnp.ndarray - Shape y.shape. - Boolean array into ``y`` indicating whether element is an intersect. + pitch : jnp.ndarray + Shape must broadcast with (P, *self.cheb.shape[:-2]). + λ values to evaluate the bounce integral. num_well : int or None If not specified, then all bounce points are returned in an array whose - last axis has size ``y.shape[-1]*y.shape[-2]``. If there - were less than that many wells detected along a field line, then the last - axis of the returned arrays, which enumerates bounce points for a particular - field line and pitch, is padded with zero. + last axis has size ``self.M*(self.N-1)``. If there were less than that many + wells detected along a field line, then the last axis of the returned + arrays, which enumerates bounce points for a particular field line and + pitch, is padded with zero. Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -374,15 +356,20 @@ def bounce_points( Returns ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (*y.shape[:-2], num_well). + bp1, bp2 : jnp.ndarray + Shape broadcasts with (P, *self.cheb.shape[:-2], num_well). The field line-following coordinates of bounce points. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. """ + # _fix_inversion assumes N > 1. errorif(self.N < 2, NotImplementedError, f"Got self.N = {self.N} < 2.") - + y, is_decreasing, is_increasing, is_intersect = self.intersect( + # Add axis to use same pitch over all cuts of field line. + 1 + / jnp.atleast_1d(pitch)[..., jnp.newaxis] + ) # Flatten so that last axis enumerates intersects of a pitch along a field line. y = _flatten_matrix(self._isomorphism_to_C1(y)) is_decreasing = _flatten_matrix(is_decreasing) @@ -532,8 +519,6 @@ def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): mask, bp1, bp2, B_m = atleast_3d_mid(mask, bp1, bp2, B_m) err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) - print(np.sum(mask)) - for l in np.ndindex(cheb.shape[:-2]): for p in range(pitch.shape[0]): if not (err_1[p, l] or err_2[p, l] or err_3[p, l]): @@ -706,7 +691,7 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list of jnp.ndarray - Shape (L * m * n, ) or (L, m, n) or (L, 1, m, n). + Shape (L * m * n, ). Arguments to the callable ``integrand``. These should be real scalar-valued functions in the bounce integrand evaluated on the periodic DESC coordinate (ρ, θ, ζ) tensor-product grid. @@ -720,7 +705,7 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) Set of 1D Chebyshev spectral coefficients of θ along field line. {θ_α : ζ ↦ θ(α, ζ) | α ∈ A }. pitch : jnp.ndarray - Shape (P, L, 1). + Shape (P, L). λ values to evaluate the bounce integral at each field line. Returns @@ -731,26 +716,33 @@ def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch) Last axis enumerates the bounce integrals. """ - errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) - errorif(pitch.ndim != 3) - errorif(x.ndim != 1 or x.shape != w.shape) - errorif( - B.cheb.shape != T.cheb.shape - or B.cheb.ndim != 3 - or B.cheb.shape[0] != bp1.shape[1] - ) + assert bp1.ndim == 3 + assert bp1.shape == bp2.shape + assert x.ndim == 1 + assert x.shape == w.shape + assert B.cheb.ndim == 3 + assert B.cheb.shape == T.cheb.shape + assert pitch.ndim == 2 P, L, num_well = bp1.shape shape = (P, L, num_well, x.size) # Quadrature points parameterized by ζ, for each pitch and flux surface. Q_zeta = _flatten_matrix( - bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + bijection_from_disc( + x, + bp1[..., jnp.newaxis], + bp2[..., jnp.newaxis], + ) ) - # Quadrature points in DESC (θ, ζ) coordinates. + # Quadrature points in (θ, ζ) coordinates. Q_desc = jnp.stack([T.eval1d(Q_zeta), Q_zeta], axis=-1) f = [interp_rfft2(Q_desc, f_i.reshape(L, 1, m, n)).reshape(shape) for f_i in f] result = jnp.dot( - integrand(*f, B=B.eval1d(Q_zeta).reshape(shape), pitch=pitch[..., jnp.newaxis]) + integrand( + *f, + B=B.eval1d(Q_zeta).reshape(shape), + pitch=pitch[..., jnp.newaxis, jnp.newaxis], + ) / irfft2_non_uniform(Q_desc, b_sup_z, m, n).reshape(shape), w, ) @@ -777,6 +769,7 @@ def bounce_integral( B_ref=1.0, L_ref=1.0, check=False, + plot=False, **kwargs, ): """Returns a method to compute bounce integrals. @@ -798,23 +791,27 @@ def bounce_integral( ---------- grid : Grid Periodic tensor-product grid in (ρ, θ, ζ). - Note that below shape notation uses ``L=grid.num_rho``, ``m=grid.num_theta``, - and ``n=grid.num_zeta``. + Note that below shape notation defines + L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. data : dict of jnp.ndarray Data evaluated on grid. M : int Grid resolution in poloidal direction for Clebsch coordinates. - Preferably power of 2. A good choice is ``grid.num_theta``. + Preferably power of 2. A good choice is ``m``. If the poloidal stream + function condenses the Fourier spectrum of |B| significantly, then a + larger number may be beneficial. N : int Grid resolution in toroidal direction for Clebsch coordinates. Preferably power of 2. desc_from_clebsch : jnp.ndarray Shape (L * M * N, 3). DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ) returned by ``FourierChebyshevBasis.nodes(M,N)``. + tensor-product grid (ρ, α, ζ) returned by + ``FourierChebyshevBasis.nodes(M,N,domain=(0,2π))``. alpha_0 : float Starting field line poloidal label. TODO: Allow multiple starting labels for near-rational surfaces. + Concatenate along second to last axis of cheb. num_transit : int Number of toroidal transits to follow field line. quad : (jnp.ndarray, jnp.ndarray) @@ -824,58 +821,68 @@ def bounce_integral( The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. This map defines a change of variable for the bounce integral. The choice made for the automorphism - can affect the performance of the quadrature method. + will affect the performance of the quadrature method. B_ref : float Optional. Reference magnetic field strength for normalization. + Has no effect on computation, but may be useful for analysis. L_ref : float Optional. Reference length scale for normalization. + Has no effect on computation, but may be useful for analysis. check : bool Flag for debugging. Must be false for jax transformations. + plot : bool + Whether to plot stuff if ``check`` is true. Default is false. Returns ------- bounce_integrate : callable This callable method computes the bounce integral ∫ f(ℓ) dℓ for every specified field line for every λ value in ``pitch``. - spline : tuple(ndarray, _PiecewiseChebyshevBasis, _PiecewiseChebyshevBasis) + spline : tuple(ndarray, PiecewiseChebyshevBasis, PiecewiseChebyshevBasis) alphas : jnp.ndarray Poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - B : _PiecewiseChebyshevBasis + B : PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of |B| along field line. {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A }. - T : _PiecewiseChebyshevBasis + T : PiecewiseChebyshevBasis Set of 1D Chebyshev spectral coefficients of θ along field line. {θ_α : ζ ↦ θ(α, ζ) | α ∈ A }. """ - # Resolution of periodic DESC coordinate tensor-product grid. - L, m, n = grid.num_rho, grid.num_theta, grid.num_zeta # Strictly increasing zeta knots enforces dζ > 0. # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. - # This is equivalent to changing the sign of ∇ζ. + # This is equivalent to changing the sign of ∇ζ (or [∂ℓ/∂ζ]|ρ,a). warnif( check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", ) - # Transform to periodic DESC spectral domain. - b_sup_z = rfft2( + # Resolution of periodic DESC coordinate tensor-product grid. + L, m, n = grid.num_rho, grid.num_theta, grid.num_zeta + # Transform to DESC spectral domain. + b_sup_z = rfft2( # B^ζ(θ,ζ) (jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref).reshape(L, 1, m, n), norm="forward", ) - # Transform to non-periodic Clebsch spectral domain. - T = FourierChebyshevBasis(desc_from_clebsch[:, 1].reshape(L, M, N)) # θ(α, ζ) - B = FourierChebyshevBasis( # |B|(α, ζ) + domain = (0, 2 * jnp.pi) + # Transform to Clebsch spectral domain. + # We compute θ(α,ζ) to avoid nonlinear root finding later, and |B|(α,ζ) + # so that roots are computable without inferior local search algorithms. + T = FourierChebyshevBasis(desc_from_clebsch[:, 1].reshape(L, M, N), domain) + B = FourierChebyshevBasis( interp_rfft2( xq=desc_from_clebsch[:, 1:].reshape(L, -1, 2), - f=data["|B|"].reshape(L, m, n) / B_ref, + f=data["|B|"].reshape(L, 1, m, n) / B_ref, ).reshape(L, M, N), + domain, ) # Peel off field lines. - alphas = alpha_sequence(alpha_0, grid.compress(data["iota"]), num_transit) + alphas = get_alphas(alpha_0, grid.compress(data["iota"]), num_transit, domain[-1]) T = T.compute_cheb(alphas) B = B.compute_cheb(alphas) assert T.cheb.shape == B.cheb.shape == (L, num_transit, N) + # Evaluation of a set of Chebyshev series is always more efficient than evaluating + # single Fourier Chebyshev series, so we also get Chebyshev series for θ. x, w = quad assert x.ndim == w.ndim == 1 @@ -913,10 +920,10 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): evaluated at the deepest point in the magnetic well. num_well : int or None If not specified, then all bounce integrals are returned in an array whose - last axis has size ``(N-1)*num_transit``. If there - were less than that many wells detected along a field line, then the last - axis of the returned array, which enumerates bounce integrals for a - particular field line and pitch, is padded with zero. + last axis has size ``num_transit*(N-1)``. If there were less than that many + wells detected along a field line, then the last axis of the returned array, + which enumerates bounce integrals for a particular field line and + pitch, is padded with zero. Specify to return the bounce integrals between the first ``num_well`` wells for each pitch along each field line. This is useful if ``num_well`` @@ -931,26 +938,18 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): result : jnp.ndarray Shape (P, L, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. + Last axis enumerates the bounce integrals.cd """ errorif(weight is not None, NotImplementedError) - # Compute bounce points. - pitch = jnp.atleast_3d(pitch) - assert ( - pitch.shape[1] == B.cheb.shape[0] - or pitch.shape[1] == 1 - or B.cheb.shape[0] == 1 - ) - bp1, bp2 = B.bounce_points(*B.intersect(1 / pitch), num_well) + pitch = jnp.atleast_2d(pitch) + bp1, bp2 = B.bounce_points(pitch, num_well) if check: - B.check_bounce_points(bp1, bp2, pitch, plot=True) - P = pitch.shape[0] - num_well = bp1.shape[-1] - assert bp1.shape == bp2.shape == (P, L, num_well) + B.check_bounce_points(bp1, bp2, pitch, plot) result = _bounce_quadrature( bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch ) + assert result.shape == (pitch.shape[0], L, setdefault(num_well, N - 1)) return result return bounce_integrate, (alphas, B, T) diff --git a/tests/test_compute_utils.py b/tests/test_compute_utils.py index 938cec7f57..83a31ed3bb 100644 --- a/tests/test_compute_utils.py +++ b/tests/test_compute_utils.py @@ -1,14 +1,11 @@ """Tests compute utilities.""" -from functools import partial - import jax import numpy as np import pytest -from desc.backend import flatnonzero, jnp +from desc.backend import jnp from desc.compute.geom_utils import rotation_matrix -from desc.utils import take_mask @pytest.mark.unit @@ -21,35 +18,3 @@ def test_rotation_matrix(): np.testing.assert_allclose(rotation_matrix(x0), np.eye(3)) np.testing.assert_allclose(dfdx_fwd(x0), np.zeros((3, 3, 3))) np.testing.assert_allclose(dfdx_rev(x0), np.zeros((3, 3, 3))) - - -@partial(jnp.vectorize, signature="(m)->()") -def _last_value(a): - """Return the last non-nan value in ``a``.""" - a = a[::-1] - idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) - return a[idx] - - -@pytest.mark.unit -def test_mask_operations(): - """Test custom masked array operation.""" - rows = 5 - cols = 7 - a = np.random.rand(rows, cols) - nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) - a.ravel()[nan_idx] = np.nan - taken = take_mask(a, ~np.isnan(a)) - last = _last_value(taken) - for i in range(rows): - desired = a[i, ~np.isnan(a[i])] - assert np.array_equal( - taken[i], - np.pad(desired, (0, cols - desired.size), constant_values=np.nan), - equal_nan=True, - ) - assert np.array_equal( - last[i], - desired[-1] if desired.size else np.nan, - equal_nan=True, - ) diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index e6b44aa4ac..6628dccd45 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -17,8 +17,8 @@ from desc.integrals.bounce_integral import filter_bounce_points, get_pitch from desc.integrals.fourier_bounce_integral import ( FourierChebyshevBasis, - alpha_sequence, bounce_integral, + get_alphas, required_names, ) @@ -31,7 +31,7 @@ def test_alpha_sequence(alpha_0, iota, num_period, period): """Test field line poloidal label tracking.""" iota = np.atleast_1d(iota) - alphas = alpha_sequence(alpha_0, iota, num_period, period) + alphas = get_alphas(alpha_0, iota, num_period, period) assert alphas.shape == (iota.size, num_period) for i in range(iota.size): assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" @@ -60,15 +60,15 @@ def _periodic_fun(nodes, M, N): @pytest.mark.unit def test_bp1_first(self): """Test that bounce points are computed correctly.""" - pitch = 1 / np.linspace(1, 4, 20).reshape(20, 1) M, N = 1, 10 domain = (-1, 1) nodes = FourierChebyshevBasis.nodes(M, N, domain=domain) f = self._periodic_fun(nodes, M, N) fcb = FourierChebyshevBasis(f, domain=domain) pcb = fcb.compute_cheb(fourier_pts(M)) - bp1, bp2 = pcb.bounce_points(*pcb.intersect(1 / pitch)) - pcb.check_bounce_points(bp1, bp2, pitch.ravel()) + pitch = 0.5 # 1 / np.linspace(1, 4, 20) + bp1, bp2 = pcb.bounce_points(pitch) + pcb.check_bounce_points(bp1, bp2, pitch) bp1, bp2 = filter_bounce_points(bp1, bp2) def f(z): diff --git a/tests/test_utils.py b/tests/test_utils.py index 6bfadb4008..aa042fcbc8 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,11 +1,13 @@ """Tests for utility functions.""" +from functools import partial + import numpy as np import pytest -from desc.backend import tree_leaves, tree_structure +from desc.backend import flatnonzero, jnp, tree_leaves, tree_structure from desc.grid import LinearGrid -from desc.utils import broadcast_tree, isalmostequal, islinspaced +from desc.utils import broadcast_tree, isalmostequal, islinspaced, take_mask @pytest.mark.unit @@ -197,3 +199,35 @@ def test_broadcast_tree(): ] for leaf, leaf_correct in zip(tree_leaves(tree), tree_leaves(tree_correct)): np.testing.assert_allclose(leaf, leaf_correct) + + +@partial(jnp.vectorize, signature="(m)->()") +def _last_value(a): + """Return the last non-nan value in ``a``.""" + a = a[::-1] + idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) + return a[idx] + + +@pytest.mark.unit +def test_take_mask(): + """Test custom masked array operation.""" + rows = 5 + cols = 7 + a = np.random.rand(rows, cols) + nan_idx = np.random.choice(rows * cols, size=(rows * cols) // 2, replace=False) + a.ravel()[nan_idx] = np.nan + taken = take_mask(a, ~np.isnan(a)) + last = _last_value(taken) + for i in range(rows): + desired = a[i, ~np.isnan(a[i])] + assert np.array_equal( + taken[i], + np.pad(desired, (0, cols - desired.size), constant_values=np.nan), + equal_nan=True, + ) + assert np.array_equal( + last[i], + desired[-1] if desired.size else np.nan, + equal_nan=True, + ) From 809420982502f1fcdf5c7fcb84e1c818556bb6bd Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 22 Aug 2024 05:08:33 -0400 Subject: [PATCH 209/241] Make compatible with new meshgrid structure on master --- desc/equilibrium/coords.py | 2 - desc/grid.py | 3 +- desc/integrals/__init__.py | 5 + desc/integrals/bounce_integral.py | 33 +- desc/integrals/fourier_bounce_integral.py | 642 +++++++++++------- .../{_interp_utils.py => interp_utils.py} | 5 +- .../{_quad_utils.py => quad_utils.py} | 58 ++ tests/test_bounce_integral.py | 151 +--- tests/test_fourier_bounce.py | 66 +- tests/test_interp_utils.py | 4 +- tests/test_quad_utils.py | 64 ++ 11 files changed, 578 insertions(+), 455 deletions(-) rename desc/integrals/{_interp_utils.py => interp_utils.py} (98%) rename desc/integrals/{_quad_utils.py => quad_utils.py} (71%) create mode 100644 tests/test_quad_utils.py diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index d082692152..a89742b40f 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -704,8 +704,6 @@ def get_rtz_grid( "z": "zeta", "p": "phi", } - if "iota" in kwargs: - kwargs["iota"] = grid.expand(kwargs["iota"], surface_label="rho") rtz_nodes = map_coordinates( eq, grid.nodes, diff --git a/desc/grid.py b/desc/grid.py index 2779a5e534..d1dc9c49b7 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -619,6 +619,7 @@ def meshgrid_reshape(self, x, order): ------- x : ndarray Data reshaped to align with grid nodes. + """ errorif( not self.is_meshgrid, @@ -637,7 +638,7 @@ def meshgrid_reshape(self, x, order): vec = True shape += (-1,) x = x.reshape(shape, order="F") - x = jnp.moveaxis(x, 1, 0) # now shape rtz/raz etc + x = jnp.swapaxes(x, 1, 0) # now shape rtz/raz etc newax = tuple(self.coordinates.index(c) for c in order) if vec: newax += (3,) diff --git a/desc/integrals/__init__.py b/desc/integrals/__init__.py index f223e39606..0ac381f051 100644 --- a/desc/integrals/__init__.py +++ b/desc/integrals/__init__.py @@ -1,5 +1,10 @@ """Classes for function integration.""" +from .fourier_bounce_integral import ( + FourierBounce, + FourierChebyshevBasis, + PiecewiseChebyshevBasis, +) from .singularities import ( DFTInterpolator, FFTInterpolator, diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index c5e03df6ea..3cf3c3df60 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -9,10 +9,11 @@ from tests.test_interp_utils import filter_not_nan from desc.backend import flatnonzero, imap, jnp, put -from desc.integrals._interp_utils import poly_root, polyder_vec, polyval_vec -from desc.integrals._quad_utils import ( +from desc.integrals.interp_utils import poly_root, polyder_vec, polyval_vec +from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, + composite_linspace, grad_automorphism_sin, grad_bijection_from_disc, ) @@ -365,32 +366,6 @@ def bounce_points( return bp1, bp2 -def _composite_linspace(x, num): - """Returns linearly spaced points between every pair of points ``x``. - - Parameters - ---------- - x : jnp.ndarray - First axis has values to return linearly spaced values between. The remaining - axes are batch axes. Assumes input is sorted along first axis. - num : int - Number of points between every pair of points in ``x``. - - Returns - ------- - pts : jnp.ndarray - Shape ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]). - Linearly spaced points between ``x``. - - """ - x = jnp.atleast_1d(x) - pts = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) - pts = jnp.swapaxes(pts, 0, 1).reshape(-1, *x.shape[1:]) - pts = jnp.append(pts, x[jnp.newaxis, -1], axis=0) - assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) - return pts - - def get_pitch(min_B, max_B, num, relative_shift=1e-6): """Return uniformly spaced pitch values between 1 / max B and 1 / min B. @@ -416,7 +391,7 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): # extrema. Shift values slightly to resolve this issue. min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B - pitch = _composite_linspace(1 / jnp.stack([max_B, min_B]), num) + pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) assert pitch.shape == (num + 2, *pitch.shape[1:]) return pitch diff --git a/desc/integrals/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py index be247f9249..d739ada397 100644 --- a/desc/integrals/fourier_bounce_integral.py +++ b/desc/integrals/fourier_bounce_integral.py @@ -6,7 +6,8 @@ from orthax.legendre import leggauss from desc.backend import dct, idct, irfft, jnp, rfft, rfft2 -from desc.integrals._interp_utils import ( +from desc.integrals.bounce_integral import _fix_inversion, filter_bounce_points +from desc.integrals.interp_utils import ( _filter_distinct, cheb_from_dct, cheb_pts, @@ -17,13 +18,13 @@ irfft2_non_uniform, irfft_non_uniform, ) -from desc.integrals._quad_utils import ( +from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, bijection_to_disc, + get_quad_points, grad_automorphism_sin, ) -from desc.integrals.bounce_integral import _fix_inversion, filter_bounce_points from desc.utils import ( atleast_2d_end, atleast_3d_mid, @@ -34,39 +35,11 @@ warnif, ) -_chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") - - -def _flatten_matrix(y): - # Flatten batch of matrix to batch of vector. - return y.reshape(*y.shape[:-2], -1) - - -def get_alphas(alpha_0, iota, num_transit, period): - """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. - - Parameters - ---------- - alpha_0 : float - Starting field line poloidal label. - iota : jnp.ndarray - Shape (iota.size, ). - Rotational transform normalized by 2π. - num_transit : float - Number of ``period``s to follow field line. - period : float - Toroidal period after which to update label. - - Returns - ------- - alphas : jnp.ndarray - Shape (iota.size, num_transit). - Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - """ - # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - alphas = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) - return alphas +def _fast_transform(f, lobatto): + M = f.shape[-2] + N = f.shape[-1] + return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) class FourierChebyshevBasis: @@ -112,7 +85,7 @@ def __init__(self, f, domain, lobatto=False): self.domain = domain errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") self.lobatto = bool(lobatto) - self._c = self._fast_transform(f, self.lobatto) + self._c = _fast_transform(f, self.lobatto) @staticmethod def nodes(M, N, domain, lobatto=False, **kwargs): @@ -144,12 +117,6 @@ def nodes(M, N, domain, lobatto=False, **kwargs): coord = jnp.column_stack(coord) return coord - @staticmethod - def _fast_transform(f, lobatto): - M = f.shape[-2] - N = f.shape[-1] - return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) - def evaluate(self, M, N): """Evaluate Fourier-Chebyshev series. @@ -210,6 +177,14 @@ def compute_cheb(self, x): return PiecewiseChebyshevBasis(cheb, self.domain) +_chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") + + +def _flatten_matrix(y): + # Flatten batch of matrix to batch of vector. + return y.reshape(*y.shape[:-2], -1) + + def _subtract(c, k): # subtract k from last axis of c, obeying numpy broadcasting c_0 = c[..., 0] - k @@ -657,264 +632,336 @@ def add(lines): return fig, ax -def _bounce_quadrature(bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch): - """Bounce integrate ∫ f(ℓ) dℓ. +def _get_alphas(alpha_0, iota, num_transit, period): + """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. Parameters ---------- - bp1 : jnp.ndarray - Shape (P, L, num_well). - The field line-following coordinates of bounce points for a given pitch - along a field line. The pairs ``bp1`` and ``bp2`` form left and right - integration boundaries, respectively, for the bounce integrals. - bp2 : jnp.ndarray - Shape (P, L, num_well). - The field line-following coordinates of bounce points for a given pitch - along a field line. The pairs ``bp1`` and ``bp2`` form left and right - integration boundaries, respectively, for the bounce integrals. - x : jnp.ndarray - Shape (w.size, ). - Quadrature points in [-1, 1]. - w : jnp.ndarray - Shape (w.size, ). - Quadrature weights. - m : int - Poloidal periodic DESC coordinate resolution on which the given - ``f`` and ``b_sup_z`` were evaluated. - n : int - Toroidal periodic DESC coordinate resolution on which the given - ``f`` and ``b_sup_z`` were evaluated. - integrand : callable - The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list of jnp.ndarray - Shape (L * m * n, ). - Arguments to the callable ``integrand``. These should be real scalar-valued - functions in the bounce integrand evaluated on the periodic DESC coordinate - (ρ, θ, ζ) tensor-product grid. - b_sup_z : jnp.ndarray - Shape (L, 1, m, n). - Set of 2D Fourier spectral coefficients of B^ζ/|B|. - B : PiecewiseChebyshevBasis - Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A }. - T : PiecewiseChebyshevBasis - Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ ↦ θ(α, ζ) | α ∈ A }. - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. + alpha_0 : float + Starting field line poloidal label. + iota : jnp.ndarray + Shape (iota.size, ). + Rotational transform normalized by 2π. + num_transit : float + Number of ``period``s to follow field line. + period : float + Toroidal period after which to update label. Returns ------- - result : jnp.ndarray - Shape (P, L, num_well). - First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. + alphas : jnp.ndarray + Shape (iota.size, num_transit). + Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. """ - assert bp1.ndim == 3 - assert bp1.shape == bp2.shape - assert x.ndim == 1 - assert x.shape == w.shape - assert B.cheb.ndim == 3 - assert B.cheb.shape == T.cheb.shape - assert pitch.ndim == 2 - - P, L, num_well = bp1.shape - shape = (P, L, num_well, x.size) - # Quadrature points parameterized by ζ, for each pitch and flux surface. - Q_zeta = _flatten_matrix( - bijection_from_disc( - x, - bp1[..., jnp.newaxis], - bp2[..., jnp.newaxis], - ) - ) - # Quadrature points in (θ, ζ) coordinates. - Q_desc = jnp.stack([T.eval1d(Q_zeta), Q_zeta], axis=-1) - f = [interp_rfft2(Q_desc, f_i.reshape(L, 1, m, n)).reshape(shape) for f_i in f] - result = jnp.dot( - integrand( - *f, - B=B.eval1d(Q_zeta).reshape(shape), - pitch=pitch[..., jnp.newaxis, jnp.newaxis], - ) - / irfft2_non_uniform(Q_desc, b_sup_z, m, n).reshape(shape), - w, - ) - assert result.shape == (P, L, num_well) - return result - - -def required_names(): - """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "|B|", "iota"] - - -# TODO: Assumes zeta = phi (alpha sequence) -def bounce_integral( - grid, - data, - M, - N, - desc_from_clebsch, - alpha_0=0.0, - num_transit=50, - quad=leggauss(21), - automorphism=(automorphism_sin, grad_automorphism_sin), - B_ref=1.0, - L_ref=1.0, - check=False, - plot=False, - **kwargs, -): - """Returns a method to compute bounce integrals. + # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] + alphas = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) + return alphas - The bounce integral is defined as ∫ f(ℓ) dℓ, where - dℓ parameterizes the distance along the field line in meters, - λ is a constant proportional to the magnetic moment over energy, - |B| is the norm of the magnetic field, - f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1. - For a particle with fixed λ, bounce points are defined to be the location on the - field line such that the particle's velocity parallel to the magnetic field is zero. - The bounce integral is defined up to a sign. We choose the sign that corresponds to - the particle's guiding center trajectory traveling in the direction of increasing - field-line-following coordinate ζ. +def _transform_to_desc(grid, f): + """Transform to DESC spectral domain. Parameters ---------- grid : Grid Periodic tensor-product grid in (ρ, θ, ζ). Note that below shape notation defines - L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. - data : dict of jnp.ndarray - Data evaluated on grid. + L = ``grid.num_rho``. + f : jnp.ndarray + Function evaluated on ``grid``. + + Returns + ------- + a : jnp.ndarray + Coefficients 2D real FFT. + + """ + f = grid.meshgrid_reshape(f, order="rtz")[:, jnp.newaxis] + return rfft2(f, norm="forward") + + +def _transform_to_clebsch(grid, M, N, desc_from_clebsch, B): + """Transform to Clebsch spectral domain. + + Parameters + ---------- + grid : Grid + Periodic tensor-product grid in (ρ, θ, ζ). + Note that below shape notation defines + L = ``grid.num_rho``. M : int - Grid resolution in poloidal direction for Clebsch coordinates. + Grid resolution in poloidal direction for Clebsch coordinate grid. Preferably power of 2. A good choice is ``m``. If the poloidal stream function condenses the Fourier spectrum of |B| significantly, then a larger number may be beneficial. N : int - Grid resolution in toroidal direction for Clebsch coordinates. + Grid resolution in toroidal direction for Clebsch coordinate grid. Preferably power of 2. desc_from_clebsch : jnp.ndarray Shape (L * M * N, 3). DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ) returned by - ``FourierChebyshevBasis.nodes(M,N,domain=(0,2π))``. - alpha_0 : float - Starting field line poloidal label. - TODO: Allow multiple starting labels for near-rational surfaces. - Concatenate along second to last axis of cheb. - num_transit : int - Number of toroidal transits to follow field line. - quad : (jnp.ndarray, jnp.ndarray) - Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. - automorphism : (Callable, Callable) or None - The first callable should be an automorphism of the real interval [-1, 1]. - The second callable should be the derivative of the first. This map defines a - change of variable for the bounce integral. The choice made for the automorphism - will affect the performance of the quadrature method. - B_ref : float - Optional. Reference magnetic field strength for normalization. - Has no effect on computation, but may be useful for analysis. - L_ref : float - Optional. Reference length scale for normalization. - Has no effect on computation, but may be useful for analysis. - check : bool - Flag for debugging. Must be false for jax transformations. - plot : bool - Whether to plot stuff if ``check`` is true. Default is false. + tensor-product grid (ρ, α, ζ) returned by + ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. + B : jnp.ndarray + |B| evaluated on ``grid``. Returns ------- - bounce_integrate : callable - This callable method computes the bounce integral ∫ f(ℓ) dℓ for every - specified field line for every λ value in ``pitch``. - spline : tuple(ndarray, PiecewiseChebyshevBasis, PiecewiseChebyshevBasis) - alphas : jnp.ndarray - Poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - B : PiecewiseChebyshevBasis - Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A }. - T : PiecewiseChebyshevBasis - Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ ↦ θ(α, ζ) | α ∈ A }. + T, B : (FourierChebyshevBasis, FourierChebyshevBasis) """ - # Strictly increasing zeta knots enforces dζ > 0. - # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. - # This is equivalent to changing the sign of ∇ζ (or [∂ℓ/∂ζ]|ρ,a). - warnif( - check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), - msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", + T = FourierChebyshevBasis( + # θ is computed on the optimal nodes in Clebsch space, + # which is a tensor product node set in Clebsch space. + f=desc_from_clebsch[:, 1].reshape(grid.num_rho, M, N), + domain=FourierBounce.domain, ) - - # Resolution of periodic DESC coordinate tensor-product grid. - L, m, n = grid.num_rho, grid.num_theta, grid.num_zeta - # Transform to DESC spectral domain. - b_sup_z = rfft2( # B^ζ(θ,ζ) - (jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref).reshape(L, 1, m, n), - norm="forward", - ) - domain = (0, 2 * jnp.pi) - # Transform to Clebsch spectral domain. - # We compute θ(α,ζ) to avoid nonlinear root finding later, and |B|(α,ζ) - # so that roots are computable without inferior local search algorithms. - T = FourierChebyshevBasis(desc_from_clebsch[:, 1].reshape(L, M, N), domain) + # Transformation from spectral domain of periodic basis to spectral + # domain of non-periodic basis is best done through interpolation. + # No shortcuts. B = FourierChebyshevBasis( - interp_rfft2( - xq=desc_from_clebsch[:, 1:].reshape(L, -1, 2), - f=data["|B|"].reshape(L, 1, m, n) / B_ref, - ).reshape(L, M, N), - domain, + f=interp_rfft2( + # Interpolate to optimal nodes in Clebsch space, + # which is not a tensor product node set in DESC space. + xq=desc_from_clebsch[:, 1:].reshape(grid.num_rho, -1, 2), + f=grid.meshgrid_reshape(B, order="rtz")[:, jnp.newaxis], + ).reshape(grid.num_rho, M, N), + domain=FourierBounce.domain, ) - # Peel off field lines. - alphas = get_alphas(alpha_0, grid.compress(data["iota"]), num_transit, domain[-1]) - T = T.compute_cheb(alphas) - B = B.compute_cheb(alphas) - assert T.cheb.shape == B.cheb.shape == (L, num_transit, N) - # Evaluation of a set of Chebyshev series is always more efficient than evaluating - # single Fourier Chebyshev series, so we also get Chebyshev series for θ. - - x, w = quad - assert x.ndim == w.ndim == 1 - if automorphism is not None: - auto, grad_auto = automorphism - w = w * grad_auto(x) - # Recall bijection_from_disc(auto(x), ζ_b₁, ζ_b₂) = ζ. - x = auto(x) - - def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): + # We compute |B|(α,ζ) so that roots are obtainable without inferior + # local search algorithms and θ(α,ζ) to avoid coordinate mapping + # of quadrature points in Clebsch space to DESC space. The root finding + # required to solve the nonlinear relation in the latter is not "local" + # because there is a global minima or unique mapping between coordinate + # systems. However, it should still be avoided as the number of + # quadrature points is higher due to the large number of integrals that + # need to be computed. + return T, B + + +class FourierBounce: + """Computes bounce integrals with pseudo-spectral methods. + + The bounce integral is defined as ∫ f(ℓ) dℓ, where + dℓ parameterizes the distance along the field line in meters, + λ is a constant proportional to the magnetic moment over energy, + |B| is the norm of the magnetic field, + f(ℓ) is the quantity to integrate along the field line, + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1. + + For a particle with fixed λ, bounce points are defined to be the location on the + field line such that the particle's velocity parallel to the magnetic field is zero. + The bounce integral is defined up to a sign. We choose the sign that corresponds to + the particle's guiding center trajectory traveling in the direction of increasing + field-line-following coordinate ζ. + + Attributes + ---------- + B : PiecewiseChebyshevBasis + Set of 1D Chebyshev spectral coefficients of |B| along field line. + {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the + sequence of poloidal coordinates that specify the field line. + T : PiecewiseChebyshevBasis + Set of 1D Chebyshev spectral coefficients of θ along field line. + {θ_α : ζ ↦ θ(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the + sequence of poloidal coordinates that specify the field line. + L : int + Number of flux surfaces to compute on. + num_transit : int + Number of toroidal transits to follow field line. + N : int + Chebyshev spectral resolution. + _b_sup_z : jnp.ndarray + Shape (L, 1, m, n). + Set of 2D (θ, ζ) Fourier spectral coefficients of B^ζ/|B|. + _x : jnp.ndarray + Shape (w.size, ). + Quadrature points in [-1, 1]. + _w : jnp.ndarray + Shape (w.size, ). + Quadrature weights. + _check : bool + Flag for debugging. Must be false for jax transformations. + _plot : bool + Whether to plot stuff if ``check`` is true. Default is false. + + """ + + domain = (0, 2 * jnp.pi) + + # TODO: Assumes zeta = phi (alpha sequence) + def __init__( + self, + grid, + data, + M, + N, + desc_from_clebsch, + alpha_0=0.0, + num_transit=50, + quad=leggauss(21), + automorphism=(automorphism_sin, grad_automorphism_sin), + B_ref=1.0, + L_ref=1.0, + check=False, + plot=False, + **kwargs, + ): + """Returns an object to compute bounce integrals. + + Parameters + ---------- + grid : Grid + Periodic tensor-product grid in (ρ, θ, ζ). + Note that below shape notation defines + L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. + data : dict[str, jnp.ndarray] + Data evaluated on grid. Must include ``FourierBounce.required_names()``. + M : int + Grid resolution in poloidal direction for Clebsch coordinate grid. + Preferably power of 2. A good choice is ``m``. If the poloidal stream + function condenses the Fourier spectrum of |B| significantly, then a + larger number may be beneficial. + N : int + Grid resolution in toroidal direction for Clebsch coordinate grid. + Preferably power of 2. + desc_from_clebsch : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate + tensor-product grid (ρ, α, ζ) returned by + ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. + alpha_0 : float + Starting field line poloidal label. + TODO: Allow multiple starting labels for near-rational surfaces. + Concatenate along second to last axis of cheb. + num_transit : int + Number of toroidal transits to follow field line. + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. + automorphism : (Callable, Callable) or None + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. This map defines + a change of variable for the bounce integral. The choice made for the + automorphism will affect the performance of the quadrature method. + B_ref : float + Optional. Reference magnetic field strength for normalization. + Has no effect on computation, but may be useful for analysis. + L_ref : float + Optional. Reference length scale for normalization. + Has no effect on computation, but may be useful for analysis. + check : bool + Flag for debugging. Must be false for jax transformations. + plot : bool + Whether to plot stuff if ``check`` is true. Default is false. + + """ + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require + # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ. + warnif( + check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", + ) + + T, B = _transform_to_clebsch(grid, M, N, desc_from_clebsch, data["|B|"] / B_ref) + alphas = _get_alphas( + alpha_0, + grid.compress(data["iota"]), + num_transit, + period=FourierBounce.domain[-1], + ) + # Peel off field lines. + self.B = B.compute_cheb(alphas) + # Evaluating a set of Chebyshev series is more efficient than evaluating + # single Fourier Chebyshev series, so we also get Chebyshev series for θ. + # This statement holds even if fast 2D transform methods are used, such + # as non-uniform fast transforms or fast multipoint transforms. + self.T = T.compute_cheb(alphas) + assert self.B.cheb.shape == self.T.cheb.shape + assert self.B.cheb.shape == (grid.num_rho, num_transit, N) + + # Cache these since they are used in every integral. + self._b_sup_z = _transform_to_desc( + grid, jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref + ) + self._x, self._w = get_quad_points(quad, automorphism) + self._check = check + self._plot = plot + + @staticmethod + def required_names(): + """Return names in ``data_index`` required to compute bounce integrals.""" + return ["B^zeta", "|B|", "iota"] + + @staticmethod + def reshape_data(grid, data, names): + """Reshape``data`` given by ``names`` for input to ``bounce_integrate``. + + Parameters + ---------- + grid : Grid + Periodic tensor-product grid in (ρ, θ, ζ). + data : dict[str, jnp.ndarray] + Data evaluated on grid. + names : list[str] + Strings of keys in ``data`` dict to reshape. + + Returns + ------- + f : list[jnp.ndarray] + List of reshaped data which may be given to ``bounce_integrate``. + + """ + if isinstance(names, str): + names = [names] + # Add dim to broadcast with axis of quadrature points. + f = [grid.meshgrid_reshape(data[name], "rtz")[:, jnp.newaxis] for name in names] + return f + + @property + def L(self): + """int: Number of flux surfaces to compute on.""" + return self.B.cheb.shape[0] + + @property + def num_transit(self): + """int: Number of toroidal transits to follow field line.""" + return self.B.cheb.shape[-2] + + @property + def N(self): + """int: Chebyshev spectral resolution.""" + return self.B.cheb.shape[-1] + + def bounce_integrate(self, pitch, integrand, f, weight=None, num_well=None): """Bounce integrate ∫ f(ℓ) dℓ. + Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line + for every λ value in ``pitch``. + Parameters ---------- + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the arrays in ``f`` as arguments as well as the additional keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list of jnp.ndarray - Shape (L * m * n, ) or (L, m, n). + f : list[jnp.ndarray] + Shape (L, 1, m, n). Arguments to the callable ``integrand``. These should be real scalar-valued - functions in the bounce integrand evaluated on ``grid``. - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + functions in the bounce integrand evaluated on the periodic DESC coordinate + (ρ, θ, ζ) tensor-product grid. weight : jnp.ndarray - Shape (L * m * n, ) or (L, m, n). + Shape (L, 1, m * n). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` evaluated at the deepest point in the magnetic well. @@ -938,18 +985,87 @@ def bounce_integrate(integrand, f, pitch, weight=None, num_well=None): result : jnp.ndarray Shape (P, L, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals.cd + Last axis enumerates the bounce integrals. """ errorif(weight is not None, NotImplementedError) pitch = jnp.atleast_2d(pitch) - bp1, bp2 = B.bounce_points(pitch, num_well) - if check: - B.check_bounce_points(bp1, bp2, pitch, plot) - result = _bounce_quadrature( - bp1, bp2, x, w, m, n, integrand, f, b_sup_z, B, T, pitch + bp1, bp2 = self.B.bounce_points(pitch, num_well) + if self._check: + self.B.check_bounce_points(bp1, bp2, pitch, self._plot) + result = self._bounce_quadrature(bp1, bp2, pitch, integrand, f) + assert result.shape == ( + pitch.shape[0], + self.L, + setdefault(num_well, self.N - 1), ) - assert result.shape == (pitch.shape[0], L, setdefault(num_well, N - 1)) return result - return bounce_integrate, (alphas, B, T) + def _bounce_quadrature(self, bp1, bp2, pitch, integrand, f): + """Bounce integrate ∫ f(ℓ) dℓ. + + Parameters + ---------- + bp1, bp2 : jnp.ndarray + Shape (P, L, num_well). + The field line-following coordinates of bounce points for a given pitch + along a field line. The pairs ``bp1`` and ``bp2`` form left and right + integration boundaries, respectively, for the bounce integrals. + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + integrand : callable + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list[jnp.ndarray] + Shape (L, 1, m, n). + Arguments to the callable ``integrand``. These should be real scalar-valued + functions in the bounce integrand evaluated on the periodic DESC coordinate + (ρ, θ, ζ) tensor-product grid. + + Returns + ------- + result : jnp.ndarray + Shape (P, L, num_well). + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. + + """ + assert bp1.ndim == 3 + assert bp1.shape == bp2.shape + assert pitch.ndim == 2 + assert self.L == f[0].shape[0] + m = f[0].shape[-2] + n = f[0].shape[-1] + W = bp1.shape[-1] # number of wells + shape = (pitch.shape[0], self.L, W, self._x.size) + + # quadrature points parameterized by ζ for each pitch and flux surface + Q_zeta = _flatten_matrix( + bijection_from_disc( + self._x, + bp1[..., jnp.newaxis], + bp2[..., jnp.newaxis], + ) + ) + # quadrature points in (θ, ζ) coordinates + Q_desc = jnp.stack([self.T.eval1d(Q_zeta), Q_zeta], axis=-1) + # interpolate and integrate + f = [interp_rfft2(Q_desc, f_i).reshape(shape) for f_i in f] + result = jnp.dot( + integrand( + *f, + B=self.B.eval1d(Q_zeta).reshape(shape), + pitch=pitch[..., jnp.newaxis, jnp.newaxis], + ) + / irfft2_non_uniform(Q_desc, self._b_sup_z, m, n).reshape(shape), + self._w, + ) + assert result.shape == (pitch.shape[0], self.L, W) + return result diff --git a/desc/integrals/_interp_utils.py b/desc/integrals/interp_utils.py similarity index 98% rename from desc/integrals/_interp_utils.py rename to desc/integrals/interp_utils.py index c7a8609704..4875202f21 100644 --- a/desc/integrals/_interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -7,7 +7,7 @@ from desc.backend import dct, jnp, rfft, rfft2, take from desc.compute.utils import safediv -from desc.integrals._quad_utils import bijection_from_disc +from desc.integrals.quad_utils import bijection_from_disc from desc.utils import Index, errorif @@ -508,8 +508,7 @@ def poly_root( a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] r = jnp.where( - # Order operations default to real part on complex numbers. - (jnp.abs(r.imag) <= eps) & (a_min <= r) & (r <= a_max), + (jnp.abs(r.imag) <= eps) & (a_min <= r.real) & (r.real <= a_max), r.real, sentinel, ) diff --git a/desc/integrals/_quad_utils.py b/desc/integrals/quad_utils.py similarity index 71% rename from desc/integrals/_quad_utils.py rename to desc/integrals/quad_utils.py index 8c4ab9ff77..17bddcd44c 100644 --- a/desc/integrals/_quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -176,3 +176,61 @@ def leggausslob(deg): w = 2 / (deg * (deg - 1) * legval(x=x, c=c0) ** 2) return x, w + + +def get_quad_points(quad, automorphism): + """Apply automorphism to given quadrature points and weights. + + Parameters + ---------- + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). + automorphism : (Callable, Callable) or None + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. This map defines + a change of variable for the bounce integral. The choice made for the + automorphism will affect the performance of the quadrature method. + + Returns + ------- + x, w : (jnp.ndarray, jnp.ndarray) + Quadrature points in [-1, 1] and associated weights. + + """ + x, w = quad + assert x.ndim == w.ndim == 1 + assert x.shape == w.shape + if automorphism is not None: + # Apply automorphisms to supress singularities. + auto, grad_auto = automorphism + w = w * grad_auto(x) + # Recall bijection_from_disc(auto(x), ζ_b₁, ζ_b₂) = ζ. + x = auto(x) + return x, w + + +def composite_linspace(x, num): + """Returns linearly spaced points between every pair of points ``x``. + + Parameters + ---------- + x : jnp.ndarray + First axis has values to return linearly spaced values between. The remaining + axes are batch axes. Assumes input is sorted along first axis. + num : int + Number of points between every pair of points in ``x``. + + Returns + ------- + pts : jnp.ndarray + Shape ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]). + Linearly spaced points between ``x``. + + """ + x = jnp.atleast_1d(x) + pts = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) + pts = jnp.swapaxes(pts, 0, 1).reshape(-1, *x.shape[1:]) + pts = jnp.append(pts, x[jnp.newaxis, -1], axis=0) + assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) + return pts diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py index a09273657c..d805387b72 100644 --- a/tests/test_bounce_integral.py +++ b/tests/test_bounce_integral.py @@ -1,6 +1,5 @@ """Test bounce integral methods.""" -import inspect from functools import partial import numpy as np @@ -19,20 +18,8 @@ from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid from desc.examples import get -from desc.grid import Grid, LinearGrid -from desc.integrals._quad_utils import ( - automorphism_arcsin, - automorphism_sin, - bijection_from_disc, - bijection_to_disc, - grad_automorphism_arcsin, - grad_automorphism_sin, - grad_bijection_from_disc, - leggausslob, - tanh_sinh, -) +from desc.grid import LinearGrid from desc.integrals.bounce_integral import ( - _composite_linspace, _get_extrema, _interp_to_argmin_B_hard, _interp_to_argmin_B_soft, @@ -43,77 +30,14 @@ plot_field_line, required_names, ) -from desc.utils import only1 - - -@pytest.mark.unit -def test_reshape_convention(): - """Test the reshaping convention separates data across field lines.""" - rho = np.linspace(0, 1, 3) - alpha = np.linspace(0, 2 * np.pi, 4) - zeta = np.linspace(0, 6 * np.pi, 5) - grid = Grid.create_meshgrid([rho, alpha, zeta], coordinates="raz") - r, a, z = grid.nodes.T - # functions of zeta should separate along first two axes - # since those are contiguous, this should work - f = z.reshape(-1, zeta.size) - for i in range(1, f.shape[0]): - np.testing.assert_allclose(f[i - 1], f[i]) - # likewise for rho - f = r.reshape(rho.size, -1) - for i in range(1, f.shape[-1]): - np.testing.assert_allclose(f[:, i - 1], f[:, i]) - # test reshaping result won't mix data - f = (a**2 + z).reshape(rho.size, alpha.size, zeta.size) - for i in range(1, f.shape[0]): - np.testing.assert_allclose(f[i - 1], f[i]) - f = (r**2 + z).reshape(rho.size, alpha.size, zeta.size) - for i in range(1, f.shape[1]): - np.testing.assert_allclose(f[:, i - 1], f[:, i]) - f = (r**2 + a).reshape(rho.size, alpha.size, zeta.size) - for i in range(1, f.shape[-1]): - np.testing.assert_allclose(f[..., i - 1], f[..., i]) - - err_msg = "The ordering conventions are required for correctness." - assert "P, S, N" in inspect.getsource(bounce_points), err_msg - assert "S, knots.size" in inspect.getsource(bounce_integral), err_msg - assert 'meshgrid(a, b, c, indexing="ij")' in inspect.getsource( - Grid.create_meshgrid - ), err_msg - - -@pytest.mark.unit -def test_get_extrema(): - """Test computation of extrema of |B|.""" - start = -np.pi - end = -2 * start - k = np.linspace(start, end, 5) - B = CubicHermiteSpline( - k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) - ) - B_z_ra = B.derivative() - extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) - mask = ~np.isnan(extrema) - extrema, B_extrema = extrema[mask], B_extrema[mask] - idx = np.argsort(extrema) - - extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) - B_extrema_scipy = B(extrema_scipy) - assert extrema.size == extrema_scipy.size - np.testing.assert_allclose(extrema[idx], extrema_scipy) - np.testing.assert_allclose(B_extrema[idx], B_extrema_scipy) - - -@pytest.mark.unit -def test_composite_linspace(): - """Test this utility function useful for Newton-Cotes integration over pitch.""" - B_min_tz = np.array([0.1, 0.2]) - B_max_tz = np.array([1, 3]) - breaks = np.linspace(B_min_tz, B_max_tz, num=5) - b = _composite_linspace(breaks, num=3) - for i in range(breaks.shape[0]): - for j in range(breaks.shape[1]): - assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) +from desc.integrals.quad_utils import ( + automorphism_sin, + bijection_from_disc, + grad_automorphism_sin, + grad_bijection_from_disc, + leggausslob, + tanh_sinh, +) class TestBouncePoints: @@ -246,41 +170,6 @@ def test_extrema_first_and_before_bp2(): np.testing.assert_allclose(bp2, intersect[[2, 4, 6]], rtol=1e-5) -@pytest.mark.unit -def test_automorphism(): - """Test automorphisms.""" - a, b = -312, 786 - x = np.linspace(a, b, 10) - y = bijection_to_disc(x, a, b) - x_1 = bijection_from_disc(y, a, b) - np.testing.assert_allclose(x_1, x) - np.testing.assert_allclose(bijection_to_disc(x_1, a, b), y) - np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=5e-7) - np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=5e-7) - - np.testing.assert_allclose(grad_bijection_from_disc(a, b), 1 / (2 / (b - a))) - np.testing.assert_allclose( - grad_automorphism_sin(y), - 1 / grad_automorphism_arcsin(automorphism_sin(y)), - atol=2e-6, - ) - np.testing.assert_allclose( - 1 / grad_automorphism_arcsin(y), - grad_automorphism_sin(automorphism_arcsin(y)), - atol=2e-6, - ) - - # test that floating point error is acceptable - x = tanh_sinh(19)[0] - assert np.all(np.abs(x) < 1) - y = 1 / np.sqrt(1 - np.abs(x)) - assert np.isfinite(y).all() - y = 1 / np.sqrt(1 - np.abs(automorphism_sin(x))) - assert np.isfinite(y).all() - y = 1 / np.sqrt(1 - np.abs(automorphism_arcsin(x))) - assert np.isfinite(y).all() - - class TestBounceQuadrature: """Test bounce quadrature accuracy.""" @@ -399,6 +288,28 @@ def denominator(B, pitch): print(pitch[:, i, j]) +@pytest.mark.unit +def test_get_extrema(): + """Test computation of extrema of |B|.""" + start = -np.pi + end = -2 * start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline( + k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) + ) + B_z_ra = B.derivative() + extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) + mask = ~np.isnan(extrema) + extrema, B_extrema = extrema[mask], B_extrema[mask] + idx = np.argsort(extrema) + + extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) + B_extrema_scipy = B(extrema_scipy) + assert extrema.size == extrema_scipy.size + np.testing.assert_allclose(extrema[idx], extrema_scipy) + np.testing.assert_allclose(B_extrema[idx], B_extrema_scipy) + + @pytest.mark.unit @pytest.mark.parametrize("func", [_interp_to_argmin_B_soft, _interp_to_argmin_B_hard]) def test_interp_to_argmin_B(func): diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index 6628dccd45..8e808d3a4c 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -12,15 +12,11 @@ from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid, map_coordinates from desc.examples import get -from desc.grid import Grid, LinearGrid -from desc.integrals._interp_utils import fourier_pts +from desc.grid import LinearGrid +from desc.integrals import FourierBounce from desc.integrals.bounce_integral import filter_bounce_points, get_pitch -from desc.integrals.fourier_bounce_integral import ( - FourierChebyshevBasis, - bounce_integral, - get_alphas, - required_names, -) +from desc.integrals.fourier_bounce_integral import FourierChebyshevBasis, _get_alphas +from desc.integrals.interp_utils import fourier_pts @pytest.mark.unit @@ -31,7 +27,7 @@ def test_alpha_sequence(alpha_0, iota, num_period, period): """Test field line poloidal label tracking.""" iota = np.atleast_1d(iota) - alphas = get_alphas(alpha_0, iota, num_period, period) + alphas = _get_alphas(alpha_0, iota, num_period, period) assert alphas.shape == (iota.size, num_period) for i in range(iota.size): assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" @@ -66,7 +62,7 @@ def test_bp1_first(self): f = self._periodic_fun(nodes, M, N) fcb = FourierChebyshevBasis(f, domain=domain) pcb = fcb.compute_cheb(fourier_pts(M)) - pitch = 0.5 # 1 / np.linspace(1, 4, 20) + pitch = 1 / np.linspace(1, 4, 20) bp1, bp2 = pcb.bounce_points(pitch) pcb.check_bounce_points(bp1, bp2, pitch) bp1, bp2 = filter_bounce_points(bp1, bp2) @@ -93,14 +89,16 @@ def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): grid = LinearGrid( rho=rho, M=eq.M_grid, N=eq.N_grid, sym=False, NFP=eq.NFP ) # check if NFP!=1 works - data = eq.compute(names=required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid) - bounce_integrate, _ = bounce_integral( + data = eq.compute( + names=FourierBounce.required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid + ) + fb = FourierBounce( grid, data, M, N, desc_from_clebsch, check=True, warn=False ) # TODO check true pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) - result = bounce_integrate(f, [], pitch) # noqa: F841 + result = fb.bounce_integrate(f, [], pitch) # noqa: F841 @pytest.mark.unit @@ -114,18 +112,12 @@ def test_drift(): np.testing.assert_allclose(rho, 0.5) # Make a set of nodes along a single fieldline. - grid_rtz = Grid.create_meshgrid( - [ - rho, - np.linspace(0, 2 * np.pi, eq.M_grid), - np.linspace(0, 2 * np.pi, eq.N_grid + 1), - ], - ) - data = eq.compute(["iota"], grid=grid_rtz) - iota = grid_rtz.compress(data["iota"]).item() + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + iota = grid_fsa.compress(data["iota"]).item() alpha = 0 zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid_raz = get_rtz_grid( + grid = get_rtz_grid( eq, rho, alpha, @@ -135,7 +127,7 @@ def test_drift(): iota=np.array([iota]), ) data = eq.compute( - required_names() + FourierBounce.required_names() + [ "cvdrift", "gbdrift", @@ -146,7 +138,7 @@ def test_drift(): "psi", "a", ], - grid=grid_raz, + grid=grid, ) np.testing.assert_allclose(data["psi"], psi) np.testing.assert_allclose(data["iota"], iota) @@ -156,21 +148,25 @@ def test_drift(): data["rho"] = rho data["alpha"] = alpha data["zeta"] = zeta - data["psi"] = grid_raz.compress(data["psi"]) - data["iota"] = grid_raz.compress(data["iota"]) - data["shear"] = grid_raz.compress(data["shear"]) + data["psi"] = grid.compress(data["psi"]) + data["iota"] = grid.compress(data["iota"]) + data["shear"] = grid.compress(data["shear"]) + # Compute analytic approximation. drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) - # Compute numerical result. M, N = eq.M_grid, 100 - clebsch = FourierChebyshevBasis.nodes(M=eq.M_grid, N=N, rho=rho) - data_2 = eq.compute(names=required_names() + ["cvdrift", "gbdrift"], grid=grid_rtz) + clebsch = FourierChebyshevBasis.nodes( + M=eq.M_grid, N=N, domain=FourierBounce.domain, rho=rho + ) + data_2 = eq.compute( + names=FourierBounce.required_names() + ["cvdrift", "gbdrift"], grid=grid + ) normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 cvdrift = data_2["cvdrift"] * normalization gbdrift = data_2["gbdrift"] * normalization - bounce_integrate, _ = bounce_integral( - grid_rtz, + fb = FourierBounce( + grid, data_2, M, N, @@ -197,13 +193,13 @@ def integrand_num(cvdrift, gbdrift, B, pitch): def integrand_den(B, pitch): return 1 / jnp.sqrt(1 - pitch * B) - drift_numerical_num = bounce_integrate( + drift_numerical_num = fb.bounce_integrate( integrand=integrand_num, f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], num_well=1, ) - drift_numerical_den = bounce_integrate( + drift_numerical_den = fb.bounce_integrate( integrand=integrand_den, f=[], pitch=pitch[:, np.newaxis], diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 9cfd2239eb..14b8456a28 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -14,7 +14,7 @@ from scipy.fft import idct as sidct from desc.backend import dct, idct, jnp, rfft -from desc.integrals._interp_utils import ( +from desc.integrals.interp_utils import ( cheb_from_dct, cheb_pts, harmonic, @@ -26,7 +26,7 @@ polyder_vec, polyval_vec, ) -from desc.integrals._quad_utils import bijection_to_disc +from desc.integrals.quad_utils import bijection_to_disc def filter_not_nan(a): diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py new file mode 100644 index 0000000000..130b2732b8 --- /dev/null +++ b/tests/test_quad_utils.py @@ -0,0 +1,64 @@ +"""Tests for quadrature utilities.""" + +import numpy as np +import pytest + +from desc.integrals.quad_utils import ( + automorphism_arcsin, + automorphism_sin, + bijection_from_disc, + bijection_to_disc, + composite_linspace, + grad_automorphism_arcsin, + grad_automorphism_sin, + grad_bijection_from_disc, + tanh_sinh, +) +from desc.utils import only1 + + +@pytest.mark.unit +def test_composite_linspace(): + """Test this utility function useful for Newton-Cotes integration over pitch.""" + B_min_tz = np.array([0.1, 0.2]) + B_max_tz = np.array([1, 3]) + breaks = np.linspace(B_min_tz, B_max_tz, num=5) + b = composite_linspace(breaks, num=3) + for i in range(breaks.shape[0]): + for j in range(breaks.shape[1]): + assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) + + +@pytest.mark.unit +def test_automorphism(): + """Test automorphisms.""" + a, b = -312, 786 + x = np.linspace(a, b, 10) + y = bijection_to_disc(x, a, b) + x_1 = bijection_from_disc(y, a, b) + np.testing.assert_allclose(x_1, x) + np.testing.assert_allclose(bijection_to_disc(x_1, a, b), y) + np.testing.assert_allclose(automorphism_arcsin(automorphism_sin(y)), y, atol=5e-7) + np.testing.assert_allclose(automorphism_sin(automorphism_arcsin(y)), y, atol=5e-7) + + np.testing.assert_allclose(grad_bijection_from_disc(a, b), 1 / (2 / (b - a))) + np.testing.assert_allclose( + grad_automorphism_sin(y), + 1 / grad_automorphism_arcsin(automorphism_sin(y)), + atol=2e-6, + ) + np.testing.assert_allclose( + 1 / grad_automorphism_arcsin(y), + grad_automorphism_sin(automorphism_arcsin(y)), + atol=2e-6, + ) + + # test that floating point error is acceptable + x = tanh_sinh(19)[0] + assert np.all(np.abs(x) < 1) + y = 1 / np.sqrt(1 - np.abs(x)) + assert np.isfinite(y).all() + y = 1 / np.sqrt(1 - np.abs(automorphism_sin(x))) + assert np.isfinite(y).all() + y = 1 / np.sqrt(1 - np.abs(automorphism_arcsin(x))) + assert np.isfinite(y).all() From 2724c5b84dc2834db438f68e032de53124b88dfe Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 22 Aug 2024 16:43:33 -0400 Subject: [PATCH 210/241] Making progress on tests. All the bounce pointand splines seem to working fine but the quadrature seems innaccurate. --- desc/integrals/bounce_integral.py | 2 +- desc/integrals/fourier_bounce_integral.py | 132 ++++++++++++++-------- desc/integrals/interp_utils.py | 46 ++++++-- tests/test_fourier_bounce.py | 28 ++--- tests/test_grid.py | 13 +-- 5 files changed, 138 insertions(+), 83 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 3cf3c3df60..bf8a1677dd 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -819,7 +819,7 @@ def bounce_integral( ) # Strictly increasing zeta knots enforces dζ > 0. # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. - # This is equivalent to changing the sign of ∇ζ (or [∂ℓ/∂ζ]|ρ,a). + # This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ # requires the same sign change in e_ζ|ρ,a to retain the metric identity. B_sup_z = jnp.abs(data["B^zeta"]).reshape(-1, knots.size) * L_ref / B_ref diff --git a/desc/integrals/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py index d739ada397..2783af0bec 100644 --- a/desc/integrals/fourier_bounce_integral.py +++ b/desc/integrals/fourier_bounce_integral.py @@ -57,7 +57,7 @@ class FourierChebyshevBasis: Chebyshev spectral resolution. lobatto : bool Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - or interior roots grid for Chebyshev points. + instead of the interior roots grid for Chebyshev points. domain : (float, float) Domain for y coordinates. @@ -76,7 +76,7 @@ def __init__(self, f, domain, lobatto=False): Domain for y coordinates. lobatto : bool Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - or interior roots grid for Chebyshev points. + instead of the interior roots grid for Chebyshev points. """ self.M = f.shape[-2] @@ -101,7 +101,7 @@ def nodes(M, N, domain, lobatto=False, **kwargs): Domain for y coordinates. lobatto : bool Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) - or interior roots grid for Chebyshev points. + instead of the interior roots grid for Chebyshev points. Returns ------- @@ -496,33 +496,35 @@ def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): for l in np.ndindex(cheb.shape[:-2]): for p in range(pitch.shape[0]): - if not (err_1[p, l] or err_2[p, l] or err_3[p, l]): + idx = (p, *l) + if not (err_1[idx] or err_2[idx] or err_3[idx]): continue - _bp1 = bp1[p, l][mask[p, l]] - _bp2 = bp2[p, l][mask[p, l]] + _bp1 = bp1[idx][mask[idx]] + _bp2 = bp2[idx][mask[idx]] if plot: self.plot_field_line( - cheb[l], - pitch=pitch[p, l], + cheb=cheb[l], bp1=_bp1, bp2=_bp2, - title_id=f"{p},{l}", + pitch=pitch[idx], + title_id=str(idx), **kwargs, ) print(" bp1 | bp2") print(jnp.column_stack([_bp1, _bp2])) - assert not err_1[p, l], "Bounce points have an inversion.\n" - assert not err_2[p, l], "Detected discontinuity.\n" - assert not err_3[p, l], ( + assert not err_1[idx], "Bounce points have an inversion.\n" + assert not err_2[idx], "Detected discontinuity.\n" + assert not err_3[idx], ( "Detected |B| > 1/λ in well. Increase Chebyshev resolution.\n" - f"{B_m[p, l][mask[p, l]]} > {1 / pitch[p, l] + self._eps}" + f"{B_m[idx][mask[idx]]} > {1 / pitch[idx] + self._eps}" ) + idx = (slice(None), *l) if plot: self.plot_field_line( - cheb[l], - pitch=pitch[:, l], - bp1=bp1[:, l], - bp2=bp2[:, l], + cheb=cheb[l], + bp1=bp1[idx], + bp2=bp2[idx], + pitch=pitch[idx], title_id=str(l), **kwargs, ) @@ -548,11 +550,14 @@ def plot_field_line( num : int Number of ζ points to plot. Pick a big number. bp1 : jnp.ndarray + Shape (P, W). Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. bp2 : jnp.ndarray + Shape (P, W). Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. pitch : jnp.ndarray - λ value. + Shape (P, ). + λ values. title : str Plot title. title_id : str @@ -584,7 +589,12 @@ def add(lines): add(ax.plot(z, self.eval1d(z, cheb), label=r"$\vert B \vert (\zeta)$")) if pitch is not None: - b = 1 / jnp.atleast_1d(pitch) + b = 1 / jnp.atleast_1d(jnp.squeeze(pitch)) + assert b.ndim == 1 + bp1, bp2 = jnp.atleast_2d(bp1, bp2) + assert bp1.ndim == bp2.ndim == 2 + assert b.shape[0] == bp1.shape[0] + for val in b: add( ax.axhline( @@ -594,7 +604,6 @@ def add(lines): label=r"$1 / \lambda$", ) ) - bp1, bp2 = jnp.atleast_2d(bp1, bp2) for i in range(bp1.shape[0]): if bp1.shape == bp2.shape: _bp1, _bp2 = filter_bounce_points(bp1[i], bp2[i]) @@ -674,11 +683,14 @@ def _transform_to_desc(grid, f): Returns ------- a : jnp.ndarray - Coefficients 2D real FFT. + Shape (grid.num_rho, 1, grid.num_theta, grid.num_zeta // 2 + 1) + Coefficients of 2D real FFT. """ - f = grid.meshgrid_reshape(f, order="rtz")[:, jnp.newaxis] - return rfft2(f, norm="forward") + f = grid.meshgrid_reshape(f, order="rtz") + a = rfft2(f, norm="forward")[:, jnp.newaxis] + assert a.shape == (grid.num_rho, 1, grid.num_theta, grid.num_zeta // 2 + 1) + return a def _transform_to_clebsch(grid, M, N, desc_from_clebsch, B): @@ -736,7 +748,12 @@ def _transform_to_clebsch(grid, M, N, desc_from_clebsch, B): # because there is a global minima or unique mapping between coordinate # systems. However, it should still be avoided as the number of # quadrature points is higher due to the large number of integrals that - # need to be computed. + # need to be computed. (An alternative would be to also transform functions + # in the integrand of the quadrature like |B| and evaluate quadrature + # points in Clebsch space. This may be less efficient if there are + # multiple functions in the integrand that need to be transformed + # independently, perhaps because the composition defined by the + # integrand is less smooth than the individual components.) return T, B @@ -772,23 +789,10 @@ class FourierBounce: Number of toroidal transits to follow field line. N : int Chebyshev spectral resolution. - _b_sup_z : jnp.ndarray - Shape (L, 1, m, n). - Set of 2D (θ, ζ) Fourier spectral coefficients of B^ζ/|B|. - _x : jnp.ndarray - Shape (w.size, ). - Quadrature points in [-1, 1]. - _w : jnp.ndarray - Shape (w.size, ). - Quadrature weights. - _check : bool - Flag for debugging. Must be false for jax transformations. - _plot : bool - Whether to plot stuff if ``check`` is true. Default is false. """ - domain = (0, 2 * jnp.pi) + domain = (0, 4 * jnp.pi) # TODO: Assumes zeta = phi (alpha sequence) def __init__( @@ -847,19 +851,22 @@ def __init__( automorphism will affect the performance of the quadrature method. B_ref : float Optional. Reference magnetic field strength for normalization. - Has no effect on computation, but may be useful for analysis. L_ref : float Optional. Reference length scale for normalization. - Has no effect on computation, but may be useful for analysis. check : bool Flag for debugging. Must be false for jax transformations. plot : bool Whether to plot stuff if ``check`` is true. Default is false. """ + errorif( + grid.sym, NotImplementedError, msg="Need grid that samples full domain." + ) # Strictly increasing zeta knots enforces dζ > 0. # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require - # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ. + # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. + # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ + # requires the same sign change in e_ζ|ρ,a to retain the metric identity. warnif( check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", @@ -889,6 +896,42 @@ def __init__( self._x, self._w = get_quad_points(quad, automorphism) self._check = check self._plot = plot + self.m, self.n = grid.num_theta, grid.num_zeta + + @staticmethod + def desc_from_clebsch(eq, rho, M, N, **kwargs): + """Return DESC coordinates of optimal Fourier Chebyshev basis nodes. + + Parameters + ---------- + eq : Equilibrium + Equilibrium to use defining the coordinate mapping. + rho : jnp.ndarray + Flux surface coordinate values. + M : int + Grid resolution in poloidal direction for Clebsch coordinate grid. + Preferably power of 2. A good choice is ``m``. If the poloidal stream + function condenses the Fourier spectrum of |B| significantly, then a + larger number may be beneficial. + N : int + Grid resolution in toroidal direction for Clebsch coordinate grid. + Preferably power of 2. + + Returns + ------- + coords : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate + tensor-product grid (ρ, α, ζ). + + """ + coords = FourierChebyshevBasis.nodes(M, N, FourierBounce.domain, rho=rho) + return eq.map_coordinates( + coords, + inbasis=("rho", "alpha", "zeta"), + period=(jnp.inf, 2 * jnp.pi, jnp.inf), + **kwargs, + ) @staticmethod def required_names(): @@ -961,7 +1004,7 @@ def bounce_integrate(self, pitch, integrand, f, weight=None, num_well=None): functions in the bounce integrand evaluated on the periodic DESC coordinate (ρ, θ, ζ) tensor-product grid. weight : jnp.ndarray - Shape (L, 1, m * n). + Shape (L, 1, m, n). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` evaluated at the deepest point in the magnetic well. @@ -1040,9 +1083,6 @@ def _bounce_quadrature(self, bp1, bp2, pitch, integrand, f): assert bp1.ndim == 3 assert bp1.shape == bp2.shape assert pitch.ndim == 2 - assert self.L == f[0].shape[0] - m = f[0].shape[-2] - n = f[0].shape[-1] W = bp1.shape[-1] # number of wells shape = (pitch.shape[0], self.L, W, self._x.size) @@ -1064,7 +1104,7 @@ def _bounce_quadrature(self, bp1, bp2, pitch, integrand, f): B=self.B.eval1d(Q_zeta).reshape(shape), pitch=pitch[..., jnp.newaxis, jnp.newaxis], ) - / irfft2_non_uniform(Q_desc, self._b_sup_z, m, n).reshape(shape), + / irfft2_non_uniform(Q_desc, self._b_sup_z, self.m, self.n).reshape(shape), self._w, ) assert result.shape == (pitch.shape[0], self.L, W) diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 4875202f21..3180ddea90 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -10,18 +10,43 @@ from desc.integrals.quad_utils import bijection_from_disc from desc.utils import Index, errorif - -# TODO: Transformation to make nodes uniform Boyd eq. 16.46 pg 336. +# TODO: Transformation to make nodes uniform Boyd eq. 16.46 pg. 336. # Shouldn't really change locations of complex poles for us, so convergence -# rate will still be good. +# rate will still be good. This will basically do spectral condensation. + + def cheb_pts(N, lobatto=False, domain=(-1, 1)): - """Get ``N`` Chebyshev points mapped to given domain.""" + """Get ``N`` Chebyshev points mapped to given domain. + + Notes + ----- + This is a common definition of the Chebyshev points (see Boyd, Chebyshev and + Fourier Spectral Methods p. 498). These are the points demanded by Discrete + Cosine Transformations to interpolate Chebyshev series because the cosine + basis for the DCT is defined on [0, π]. + + They differ in ordering from the points returned by + ``numpy.polynomial.chebyshev.chebpts1`` and + ``numpy.polynomial.chebyshev.chebpts2``. + + Parameters + ---------- + N : int + Number of points. + lobatto : bool + Whether to return the Gauss-Lobatto (extrema-plus-endpoint) + instead of the interior roots for Chebyshev points. + domain : (float, float) + Domain for points. + + Returns + ------- + pts : jnp.ndarray + Shape (N, ). + Chebyshev points mapped to given domain. + + """ n = jnp.arange(N) - # These are the standard definitions of the Chebyshev points. - # Reference: Wikipedia or Boyd p. 498. These are the points demanded by - # Discrete Cosine Transformations to interpolate Chebyshev series because - # the cosine basis for the DCT is defined on [0, π]. These points differ - # from numpy's chebpts1 and chebpts2 in ordering. if lobatto: y = jnp.cos(jnp.pi * n / (N - 1)) else: @@ -106,7 +131,8 @@ def harmonic_vander(x, M): # TODO: For inverse transforms, do multipoint evaluation with FFT. # FFT cost is 𝒪(M N log[M N]) while direct evaluation is 𝒪(M² N²). # Chapter 10, https://doi.org/10.1017/CBO9781139856065. -# Likely better than using NFFT to evaluate f(xq) given fourier +# Right now we just do an MMT with the Vandermode matrix. +# Multipoint is likely better than using NFFT to evaluate f(xq) given fourier # coefficients because evaluation points are quadratically packed near edges as # required by quadrature to avoid runge. NFFT is only approximation anyway. # https://github.com/flatironinstitute/jax-finufft. diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index 8e808d3a4c..8f60ab9517 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -155,33 +155,25 @@ def test_drift(): # Compute analytic approximation. drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) # Compute numerical result. - M, N = eq.M_grid, 100 - clebsch = FourierChebyshevBasis.nodes( - M=eq.M_grid, N=N, domain=FourierBounce.domain, rho=rho - ) + grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP) data_2 = eq.compute( names=FourierBounce.required_names() + ["cvdrift", "gbdrift"], grid=grid ) normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 - cvdrift = data_2["cvdrift"] * normalization - gbdrift = data_2["gbdrift"] * normalization + data_2["cvdrift"] = data_2["cvdrift"] * normalization + data_2["gbdrift"] = data_2["gbdrift"] * normalization + M, N = eq.M_grid, 20 fb = FourierBounce( grid, data_2, M, N, - desc_from_clebsch=map_coordinates( - eq, - clebsch, - inbasis=("rho", "alpha", "zeta"), - period=(np.inf, 2 * np.pi, np.inf), - iota=np.broadcast_to(data["iota"], (M * N)), - ), + desc_from_clebsch=FourierBounce.desc_from_clebsch(eq, rho, M, N), alpha_0=data["alpha"], - num_transit=5, + num_transit=1, B_ref=data["B ref"], L_ref=data["a"], - quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 + quad=leggauss(50), # converges to absolute and relative tolerance of 1e-7 check=True, plot=True, ) @@ -194,15 +186,15 @@ def integrand_den(B, pitch): return 1 / jnp.sqrt(1 - pitch * B) drift_numerical_num = fb.bounce_integrate( - integrand=integrand_num, - f=[cvdrift, gbdrift], pitch=pitch[:, np.newaxis], + integrand=integrand_num, + f=FourierBounce.reshape_data(grid, data_2, ["cvdrift", "gbdrift"]), num_well=1, ) drift_numerical_den = fb.bounce_integrate( + pitch=pitch[:, np.newaxis], integrand=integrand_den, f=[], - pitch=pitch[:, np.newaxis], num_well=1, ) drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) diff --git a/tests/test_grid.py b/tests/test_grid.py index 160c6aac9c..67ea849209 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -791,26 +791,23 @@ def test_meshgrid_reshape(self): zeta = np.linspace(0, 6 * np.pi, 5) grid = Grid.create_meshgrid([rho, alpha, zeta], coordinates="raz") r, a, z = grid.nodes.T - r = grid.meshgrid_reshape(r, "raz") - a = grid.meshgrid_reshape(a, "raz") - z = grid.meshgrid_reshape(z, "raz") # functions of zeta should separate along first two axes # since those are contiguous, this should work - f = z.reshape(-1, zeta.size) + f = grid.meshgrid_reshape(z, "raz").reshape(-1, zeta.size) for i in range(1, f.shape[0]): np.testing.assert_allclose(f[i - 1], f[i]) # likewise for rho - f = r.reshape(rho.size, -1) + f = grid.meshgrid_reshape(r, "raz").reshape(rho.size, -1) for i in range(1, f.shape[-1]): np.testing.assert_allclose(f[:, i - 1], f[:, i]) # test reshaping result won't mix data - f = (a**2 + z).reshape(rho.size, alpha.size, zeta.size) + f = grid.meshgrid_reshape(a**2 + z, "raz") for i in range(1, f.shape[0]): np.testing.assert_allclose(f[i - 1], f[i]) - f = (r**2 + z).reshape(rho.size, alpha.size, zeta.size) + f = grid.meshgrid_reshape(r**2 + z, "raz") for i in range(1, f.shape[1]): np.testing.assert_allclose(f[:, i - 1], f[:, i]) - f = (r**2 + a).reshape(rho.size, alpha.size, zeta.size) + f = grid.meshgrid_reshape(r**2 + a, "raz") for i in range(1, f.shape[-1]): np.testing.assert_allclose(f[..., i - 1], f[..., i]) From c683bb8b40cc405f820f128b3e1f3414e3c59632 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 22 Aug 2024 16:56:53 -0400 Subject: [PATCH 211/241] Fix comment --- desc/integrals/fourier_bounce_integral.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/desc/integrals/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py index 2783af0bec..0f1fc7ad9f 100644 --- a/desc/integrals/fourier_bounce_integral.py +++ b/desc/integrals/fourier_bounce_integral.py @@ -882,9 +882,9 @@ def __init__( # Peel off field lines. self.B = B.compute_cheb(alphas) # Evaluating a set of Chebyshev series is more efficient than evaluating - # single Fourier Chebyshev series, so we also get Chebyshev series for θ. - # This statement holds even if fast 2D transform methods are used, such - # as non-uniform fast transforms or fast multipoint transforms. + # single Fourier Chebyshev series, so we get the Chebyshev series for + # all the other functions whose Fourier Chebyshev series is available. + # This statement holds even if fast 2D transform methods are used. self.T = T.compute_cheb(alphas) assert self.B.cheb.shape == self.T.cheb.shape assert self.B.cheb.shape == (grid.num_rho, num_transit, N) From 32d64e91b43fcee9dce72f2fba3225c5c10e8437 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Aug 2024 02:57:05 -0400 Subject: [PATCH 212/241] Commit before I start modifying bounce_integral.py --- desc/equilibrium/coords.py | 3 + desc/integrals/__init__.py | 6 +- desc/integrals/_bounce_utils.py | 175 +++ desc/integrals/bounce_integral.py | 73 +- desc/integrals/fourier_bounce_integral.py | 1363 ++++++++++++++------- desc/integrals/interp_utils.py | 314 ++--- desc/integrals/quad_utils.py | 3 +- tests/test_fourier_bounce.py | 26 +- 8 files changed, 1277 insertions(+), 686 deletions(-) create mode 100644 desc/integrals/_bounce_utils.py diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index a89742b40f..f9b831986b 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -684,6 +684,9 @@ def get_rtz_grid( jitable : bool, optional If false the returned grid has additional attributes. Required to be false to retain nodes at magnetic axis. + kwargs : dict + Additional parameters to supply to the coordinate mapping function. + See ``desc.equilibrium.coords.map_coordinates``. Returns ------- diff --git a/desc/integrals/__init__.py b/desc/integrals/__init__.py index 0ac381f051..559e054166 100644 --- a/desc/integrals/__init__.py +++ b/desc/integrals/__init__.py @@ -1,10 +1,6 @@ """Classes for function integration.""" -from .fourier_bounce_integral import ( - FourierBounce, - FourierChebyshevBasis, - PiecewiseChebyshevBasis, -) +from .fourier_bounce_integral import Bounce2D, ChebyshevBasisSet, FourierChebyshevBasis from .singularities import ( DFTInterpolator, FFTInterpolator, diff --git a/desc/integrals/_bounce_utils.py b/desc/integrals/_bounce_utils.py new file mode 100644 index 0000000000..04bc899a3c --- /dev/null +++ b/desc/integrals/_bounce_utils.py @@ -0,0 +1,175 @@ +from functools import partial + +from orthax.chebyshev import chebroots + +from desc.backend import flatnonzero, jnp, put +from desc.integrals.quad_utils import composite_linspace +from desc.utils import setdefault + +# TODO: Boyd's method 𝒪(N²) instead of Chebyshev companion matrix 𝒪(N³). +# John P. Boyd, Computing real roots of a polynomial in Chebyshev series +# form through subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. +chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") + + +def flatten_matrix(y): + """Flatten batch of matrix to batch of vector.""" + return y.reshape(*y.shape[:-2], -1) + + +def subtract(c, k): + """Subtract ``k`` from last axis of ``c``, obeying numpy broadcasting.""" + c_0 = c[..., 0] - k + c = jnp.concatenate( + [ + c_0[..., jnp.newaxis], + jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), + ], + axis=-1, + ) + return c + + +def filter_bounce_points(bp1, bp2): + """Return only bounce points such that ``bp2-bp1`` ≠ 0.""" + mask = (bp2 - bp1) != 0.0 + return bp1[mask], bp2[mask] + + +def add2legend(legend, lines): + """Add lines to legend if it's not already in it.""" + for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): + label = line.get_label() + if label not in legend: + legend[label] = line + + +def plot_intersect(ax, legend, z1, z2, k, k_transparency): + """Plot intersects on ``ax``.""" + if k is None: + return + + k = jnp.atleast_1d(jnp.squeeze(k)) + assert k.ndim == 1 + z1, z2 = jnp.atleast_2d(z1, z2) + assert z1.ndim == z2.ndim == 2 + assert k.shape[0] == z1.shape[0] == z2.shape[0] + for p in k: + add2legend( + legend, + ax.axhline(p, color="tab:purple", alpha=k_transparency), + ) + for i in range(k.size): + _z1, _z2 = z1[i], z2[i] + if _z1.size == _z2.size: + _z1, _z2 = filter_bounce_points(_z1, _z2) + add2legend( + legend, + ax.scatter(_z1, jnp.full(z1.shape[1], k[i]), marker="v", color="tab:red"), + ) + add2legend( + legend, + ax.scatter(_z2, jnp.full(z2.shape[1], k[i]), marker="^", color="tab:green"), + ) + + +@partial(jnp.vectorize, signature="(m),(m)->(m)") +def fix_inversion(is_intersect, df_dy_sign): + """Disqualify first intersect except under an edge case. + + The pairs ``y1`` and ``y2`` are boundaries of an integral only if + ``y1 <= y2``. It is required that the first intersect satisfies + non-positive derivative. Now, because + ``df_dy_sign[...,k]<=0`` implies ``df_dy_sign[...,k+1]>=0`` + by continuity, there can be at most one inversion, and if it exists, + the inversion must be at the first pair. To correct the inversion, + it suffices to disqualify the first intersect as a right boundary, + except under an edge case. + + Parameters + ---------- + is_intersect : jnp.ndarray + Boolean array into ``y`` indicating whether element is an intersect. + df_dy_sign : jnp.ndarray + Shape ``is_intersect.shape``. + Sign of ∂f/∂y (x, yᵢ). + + Returns + ------- + is_intersect : jnp.ndarray + + """ + # idx of first two intersects + idx = flatnonzero(is_intersect, size=2, fill_value=-1) + edge_case = ( + (df_dy_sign[idx[0]] == 0) + & (df_dy_sign[idx[1]] < 0) + & is_intersect[idx[0]] + & is_intersect[idx[1]] + # In theory, we need to keep propagating this edge case, e.g. + # (df_dy_sign[..., 1] < 0) | ( + # (df_dy_sign[..., 1] == 0) & (df_dy_sign[..., 2] < 0)... + # ). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. + ) + return put(is_intersect, idx[0], edge_case) + + +def get_pitch(min_B, max_B, num, relative_shift=1e-6): + """Return uniformly spaced pitch values between ``1/max_B`` and ``1/min_B``. + + Parameters + ---------- + min_B : jnp.ndarray + Minimum |B| value. + max_B : jnp.ndarray + Maximum |B| value. + num : int + Number of values, not including endpoints. + relative_shift : float + Relative amount to shift maxima down and minima up to avoid floating point + errors in downstream routines. + + Returns + ------- + pitch : jnp.ndarray + Shape (num + 2, *min_B.shape). + + """ + # Floating point error impedes consistent detection of bounce points riding + # extrema. Shift values slightly to resolve this issue. + min_B = (1 + relative_shift) * min_B + max_B = (1 - relative_shift) * max_B + pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) + assert pitch.shape == (num + 2, *min_B.shape) + return pitch + + +# TODO: Generalize this beyond ζ = ϕ or just map to Clebsch with ϕ +def get_alpha(alpha_0, iota, num_transit, period): + """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. + + Parameters + ---------- + alpha_0 : float + Starting field line poloidal label. + iota : jnp.ndarray + Shape (iota.size, ). + Rotational transform normalized by 2π. + num_transit : float + Number of ``period``s to follow field line. + period : float + Toroidal period after which to update label. + + Returns + ------- + alpha : jnp.ndarray + Shape (iota.size, num_transit). + Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. + + """ + # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] + alpha = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) + return alpha diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index bf8a1677dd..822184901b 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,4 +1,4 @@ -"""Methods for computing bounce integrals.""" +"""Functional programming methods for ``Bounce1D``.""" from functools import partial @@ -8,24 +8,18 @@ from orthax.legendre import leggauss from tests.test_interp_utils import filter_not_nan -from desc.backend import flatnonzero, imap, jnp, put +from desc.backend import imap, jnp +from desc.integrals._bounce_utils import filter_bounce_points, fix_inversion from desc.integrals.interp_utils import poly_root, polyder_vec, polyval_vec from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, - composite_linspace, grad_automorphism_sin, grad_bijection_from_disc, ) from desc.utils import errorif, setdefault, take_mask, warnif -def filter_bounce_points(bp1, bp2): - """Return only bounce points such that |bp2 - bp1| > 0.""" - mask = (bp2 - bp1) != 0.0 - return bp1[mask], bp2[mask] - - def plot_field_line( B, pitch=None, @@ -244,31 +238,6 @@ def _check_shape(knots, B_c, B_z_ra_c, pitch=None): return B_c, B_z_ra_c, pitch -@partial(jnp.vectorize, signature="(m),(m)->(m)") -def _fix_inversion(is_intersect, B_z_ra): - # idx of first two intersects - idx = flatnonzero(is_intersect, size=2, fill_value=-1) - edge_case = ( - (B_z_ra[idx[0]] == 0) - & (B_z_ra[idx[1]] < 0) - & is_intersect[idx[0]] - & is_intersect[idx[1]] - # In theory, we need to keep propagating this edge case, - # e.g. (B_z_ra[..., 1] < 0) | ((B_z_ra[..., 1] == 0) & (B_z_ra[..., 2] < 0)...). - # At each step, the likelihood that an intersection has already been lost - # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. - ) - # The pairs bp1[i, j, k] and bp2[i, j, k] are boundaries of an integral only - # if bp1[i, j, k] <= bp2[i, j, k]. For correctness of the algorithm, it is - # required that the first intersect satisfies non-positive derivative. Now, - # because B_z_ra[i, j, k] <= 0 implies B_z_ra[i, j, k + 1] >= 0 by continuity, - # there can be at most one inversion, and if it exists, the inversion must be - # at the first pair. To correct the inversion, it suffices to disqualify the - # first intersect as a right boundary, except under the above edge case. - return put(is_intersect, idx[0], edge_case) - - def bounce_points( pitch, knots, B_c, B_z_ra_c, num_well=None, check=False, plot=True, **kwargs ): @@ -346,7 +315,7 @@ def bounce_points( # we ignore the bounce points of particles only assigned to a class that are # trapped outside this snapshot of the field line. is_bp1 = (B_z_ra <= 0) & is_intersect - is_bp2 = (B_z_ra >= 0) & _fix_inversion(is_intersect, B_z_ra) + is_bp2 = (B_z_ra >= 0) & fix_inversion(is_intersect, B_z_ra) # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) @@ -366,36 +335,6 @@ def bounce_points( return bp1, bp2 -def get_pitch(min_B, max_B, num, relative_shift=1e-6): - """Return uniformly spaced pitch values between 1 / max B and 1 / min B. - - Parameters - ---------- - min_B : jnp.ndarray - Minimum |B| value. - max_B : jnp.ndarray - Maximum |B| value. - num : int - Number of values, not including endpoints. - relative_shift : float - Relative amount to shift maxima down and minima up to avoid floating point - errors in downstream routines. - - Returns - ------- - pitch : jnp.ndarray - Shape (num + 2, *min_B.shape). - - """ - # Floating point error impedes consistent detection of bounce points riding - # extrema. Shift values slightly to resolve this issue. - min_B = (1 + relative_shift) * min_B - max_B = (1 - relative_shift) * max_B - pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) - assert pitch.shape == (num + 2, *pitch.shape[1:]) - return pitch - - def _get_extrema(knots, B_c, B_z_ra_c, sentinel=jnp.nan): """Return extrema of |B| along field line. Sort order is arbitrary. @@ -728,7 +667,7 @@ def required_names(): def bounce_integral( data, knots, - quad=leggauss(21), + quad=leggauss(32), automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1.0, L_ref=1.0, @@ -773,7 +712,7 @@ def bounce_integral( integrand. A good reference density is 100 knots per toroidal transit. quad : (jnp.ndarray, jnp.ndarray) Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. automorphism : (Callable, Callable) or None The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. This map defines a diff --git a/desc/integrals/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py index 0f1fc7ad9f..a269aee7c6 100644 --- a/desc/integrals/fourier_bounce_integral.py +++ b/desc/integrals/fourier_bounce_integral.py @@ -1,12 +1,25 @@ """Methods for computing Fast Fourier Chebyshev transforms and bounce integrals.""" import numpy as np +from interpax import CubicHermiteSpline, PPoly from matplotlib import pyplot as plt -from orthax.chebyshev import chebroots from orthax.legendre import leggauss -from desc.backend import dct, idct, irfft, jnp, rfft, rfft2 -from desc.integrals.bounce_integral import _fix_inversion, filter_bounce_points +from desc.backend import dct, idct, irfft, jnp, rfft +from desc.integrals._bounce_utils import ( + add2legend, + chebroots_vec, + fix_inversion, + flatten_matrix, + get_alpha, + plot_intersect, + subtract, +) +from desc.integrals.bounce_integral import ( + _bounce_quadrature, + _interp_to_argmin_B_soft, + bounce_points, +) from desc.integrals.interp_utils import ( _filter_distinct, cheb_from_dct, @@ -17,12 +30,14 @@ interp_rfft2, irfft2_non_uniform, irfft_non_uniform, + polyder_vec, + transform_to_desc, ) from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, bijection_to_disc, - get_quad_points, + get_quad, grad_automorphism_sin, ) from desc.utils import ( @@ -30,18 +45,13 @@ atleast_3d_mid, atleast_nd, errorif, + isposint, setdefault, take_mask, warnif, ) -def _fast_transform(f, lobatto): - M = f.shape[-2] - N = f.shape[-1] - return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) - - class FourierChebyshevBasis: """Fourier-Chebyshev series. @@ -49,6 +59,11 @@ class FourierChebyshevBasis: where ψₘ are trigonometric polynomials on [0, 2π] and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. + Notes + ----- + Performance may improve significantly + if the spectral resolutions ``M`` and ``N`` are powers of two. + Attributes ---------- M : int @@ -63,7 +78,7 @@ class FourierChebyshevBasis: """ - def __init__(self, f, domain, lobatto=False): + def __init__(self, f, domain=(-1, 1), lobatto=False): """Interpolate Fourier-Chebyshev basis to ``f``. Parameters @@ -71,9 +86,8 @@ def __init__(self, f, domain, lobatto=False): f : jnp.ndarray Shape (..., M, N). Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. - M, N preferably power of 2. domain : (float, float) - Domain for y coordinates. + Domain for y coordinates. Default is [-1, 1]. lobatto : bool Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) instead of the interior roots grid for Chebyshev points. @@ -82,13 +96,19 @@ def __init__(self, f, domain, lobatto=False): self.M = f.shape[-2] self.N = f.shape[-1] errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = domain + self.domain = tuple(domain) errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") self.lobatto = bool(lobatto) - self._c = _fast_transform(f, self.lobatto) + self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) + + @staticmethod + def _fast_transform(f, lobatto): + M = f.shape[-2] + N = f.shape[-1] + return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) @staticmethod - def nodes(M, N, domain, lobatto=False, **kwargs): + def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): """Tensor product grid of optimal collocation nodes for this basis. Parameters @@ -97,25 +117,34 @@ def nodes(M, N, domain, lobatto=False, **kwargs): Grid resolution in x direction. Preferably power of 2. N : int Grid resolution in y direction. Preferably power of 2. + L : int or jnp.ndarray + Optional, resolution in radial direction of domain [0, 1]. + May also be an array of coordinates values. If given, then the + returned ``coords`` is a 3D tensor-product with shape (L * M * N, 3). domain : (float, float) - Domain for y coordinates. + Domain for y coordinates. Default is [-1, 1]. lobatto : bool Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) instead of the interior roots grid for Chebyshev points. Returns ------- - coord : jnp.ndarray + coords : jnp.ndarray Shape (M * N, 2). Grid of (x, y) points for optimal interpolation. """ x = fourier_pts(M) y = cheb_pts(N, lobatto, domain) - coord = [jnp.atleast_1d(kwargs.pop("rho")), x, y] if "rho" in kwargs else [x, y] - coord = list(map(jnp.ravel, jnp.meshgrid(*coord, indexing="ij"))) - coord = jnp.column_stack(coord) - return coord + if L is not None: + if isposint(L): + L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) + coords = (L, x, y) + else: + coords = (x, y) + coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) + coords = jnp.column_stack(coords) + return coords def evaluate(self, M, N): """Evaluate Fourier-Chebyshev series. @@ -166,7 +195,7 @@ def compute_cheb(self, x): Returns ------- - cheb : PiecewiseChebyshevBasis + cheb : ChebyshevBasisSet Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). """ @@ -174,35 +203,14 @@ def compute_cheb(self, x): x = jnp.atleast_1d(x)[..., jnp.newaxis] cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) assert cheb.shape[-2:] == (x.shape[-2], self.N) - return PiecewiseChebyshevBasis(cheb, self.domain) - - -_chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") - - -def _flatten_matrix(y): - # Flatten batch of matrix to batch of vector. - return y.reshape(*y.shape[:-2], -1) + return ChebyshevBasisSet(cheb, self.domain) -def _subtract(c, k): - # subtract k from last axis of c, obeying numpy broadcasting - c_0 = c[..., 0] - k - c = jnp.concatenate( - [ - c_0[..., jnp.newaxis], - jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), - ], - axis=-1, - ) - return c - - -class PiecewiseChebyshevBasis: +class ChebyshevBasisSet: """Chebyshev series. { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ] Attributes ---------- @@ -220,7 +228,7 @@ class PiecewiseChebyshevBasis: _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - def __init__(self, cheb, domain): + def __init__(self, cheb, domain=(-1, 1)): """Make Chebyshev series basis from given coefficients. Parameters @@ -228,11 +236,13 @@ def __init__(self, cheb, domain): cheb : jnp.ndarray Shape (..., M, N). Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. """ - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = domain self.cheb = jnp.atleast_2d(cheb) + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = tuple(domain) @property def M(self): @@ -256,13 +266,13 @@ def _chebcast(cheb, arr): ) return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] - def intersect(self, k, eps=_eps): + def intersect2d(self, k=0.0, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). Parameters ---------- k : jnp.ndarray - Shape cheb.shape[:-1] or (k.shape[0], *cheb.shape[:-1]). + Shape must broadcast with (..., *cheb.shape[:-1]). Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. eps : float Absolute tolerance with which to consider value as zero. @@ -272,20 +282,17 @@ def intersect(self, k, eps=_eps): y : jnp.ndarray Shape (..., *cheb.shape[:-1], N - 1). Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. - is_decreasing : jnp.ndarray - Shape y.shape. - Whether ∂f/∂y (x, yᵢ) is decreasing. - is_increasing : jnp.ndarray - Shape y.shape. - Whether ∂f/∂y (x, yᵢ) is increasing. is_intersect : jnp.ndarray Shape y.shape. Boolean array into ``y`` indicating whether element is an intersect. + df_dy_sign : jnp.ndarray + Shape y.shape. + Sign of ∂f/∂y (x, yᵢ). """ - c = _subtract(self._chebcast(self.cheb, k), k) + c = subtract(ChebyshevBasisSet._chebcast(self.cheb, k), k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = _chebroots_vec(c) + y = chebroots_vec(c) assert y.shape == (*c.shape[:-1], self.N - 1) # Intersects must satisfy y ∈ [-1, 1]. @@ -299,88 +306,88 @@ def intersect(self, k, eps=_eps): n = jnp.arange(self.N) # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) - s = jnp.linalg.vecdot( - n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), - self.cheb[..., jnp.newaxis, :], + df_dy_sign = jnp.sign( + jnp.linalg.vecdot( + n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), + self.cheb[..., jnp.newaxis, :], + ) ) - is_decreasing = s <= 0 - is_increasing = s >= 0 + y = bijection_from_disc(y, self.domain[0], self.domain[-1]) + return y, is_intersect, df_dy_sign - y = bijection_from_disc(y, *self.domain) - return y, is_decreasing, is_increasing, is_intersect - - def bounce_points(self, pitch, num_well=None): - """Compute bounce points given intersections. + def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): + """Coordinates z(x, yᵢ) such that fₓ(yᵢ) = k for every x. Parameters ---------- - pitch : jnp.ndarray - Shape must broadcast with (P, *self.cheb.shape[:-2]). - λ values to evaluate the bounce integral. - num_well : int or None - If not specified, then all bounce points are returned in an array whose + k : jnp.ndarray + Shape must broadcast with (..., *cheb.shape[:-2]). + Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. + num_intersect : int or None + If not specified, then all intersects are returned in an array whose last axis has size ``self.M*(self.N-1)``. If there were less than that many - wells detected along a field line, then the last axis of the returned - arrays, which enumerates bounce points for a particular field line and - pitch, is padded with zero. - - Specify to return the first ``num_well`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are - typically <= 5 wells per toroidal transit. + intersects detected, then the last axis of the returned arrays is padded + with ``pad_value``. Specify to return the first ``num_intersect`` pairs + of intersects. This is useful if ``num_intersect`` tightly bounds the + actual number. + pad_value : float + Value with which to pad array. Default 0. Returns ------- - bp1, bp2 : jnp.ndarray - Shape broadcasts with (P, *self.cheb.shape[:-2], num_well). - The field line-following coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. + z1, z2 : (jnp.ndarray, jnp.ndarray) + Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). + ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, + respectively. """ - # _fix_inversion assumes N > 1. - errorif(self.N < 2, NotImplementedError, f"Got self.N = {self.N} < 2.") - y, is_decreasing, is_increasing, is_intersect = self.intersect( - # Add axis to use same pitch over all cuts of field line. - 1 - / jnp.atleast_1d(pitch)[..., jnp.newaxis] + errorif( + self.N < 2, + NotImplementedError, + "This method requires the Chebyshev spectral resolution of at " + f"least 2, but got N={self.N}.", + ) + + # Add axis to use same k over all Chebyshev series of the piecewise object. + y, is_intersect, df_dy_sign = self.intersect2d( + jnp.atleast_1d(k)[..., jnp.newaxis] ) - # Flatten so that last axis enumerates intersects of a pitch along a field line. - y = _flatten_matrix(self._isomorphism_to_C1(y)) - is_decreasing = _flatten_matrix(is_decreasing) - is_increasing = _flatten_matrix(is_increasing) - is_intersect = _flatten_matrix(is_intersect) + # Flatten so that last axis enumerates intersects along the piecewise object. + y, is_intersect, df_dy_sign = map( + flatten_matrix, (self.isomorphism_to_C1(y), is_intersect, df_dy_sign) + ) + + # Note for bounce point applications: # We ignore the degenerate edge case where the boundary shared by adjacent - # polynomials is a left bounce point i.e. ``is_bp1`` because the subset of + # polynomials is a left intersect point i.e. ``is_z1`` because the subset of # pitch values that generate this edge case has zero measure. Note that # the technique to account for this would be to disqualify intersects # within ``_eps`` from ``domain[-1]``. - is_bp1 = is_decreasing & is_intersect - is_bp2 = is_increasing & _fix_inversion(is_intersect, is_increasing) + is_z1 = (df_dy_sign <= 0) & is_intersect + is_z2 = (df_dy_sign >= 0) & fix_inversion(is_intersect, df_dy_sign) sentinel = self.domain[0] - 1.0 - bp1 = take_mask(y, is_bp1, size=num_well, fill_value=sentinel) - bp2 = take_mask(y, is_bp2, size=num_well, fill_value=sentinel) + z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) + z2 = take_mask(y, is_z2, size=num_intersect, fill_value=sentinel) - mask = (bp1 > sentinel) & (bp2 > sentinel) + mask = (z1 > sentinel) & (z2 > sentinel) # Set outside mask to same value so integration is over set of measure zero. - bp1 = jnp.where(mask, bp1, 0.0) - bp2 = jnp.where(mask, bp2, 0.0) - return bp1, bp2 + z1 = jnp.where(mask, z1, pad_value) + z2 = jnp.where(mask, z2, pad_value) + return z1, z2 def eval1d(self, z, cheb=None): """Evaluate piecewise Chebyshev spline at coordinates z. - The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² - where z integer division domain yields index into the proper - Chebyshev series of the spline and z mod domain is the coordinate - value along the domain of that Chebyshev series. - Parameters ---------- z : jnp.ndarray Shape (..., *cheb.shape[:-2], z.shape[-1]). - Isomorphic coordinates along field line [0, ∞). + Coordinates in [sef.domain[0], ∞). + The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where + ``z // domain`` yields the index into the proper Chebyshev series + along the second to last axis of ``cheb`` and ``z % domain`` is + the coordinate value on the domain of that Chebyshev series. cheb : jnp.ndarray Shape (..., M, N). Chebyshev coefficients to use. If not given, uses ``self.cheb``. @@ -394,7 +401,7 @@ def eval1d(self, z, cheb=None): """ cheb = self._chebcast(setdefault(cheb, self.cheb), z) N = cheb.shape[-1] - x_idx, y = self._isomorphism_to_C2(z) + x_idx, y = self.isomorphism_to_C2(z) y = bijection_to_disc(y, self.domain[0], self.domain[1]) # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) # are held in cheb with shape (..., num cheb series, N). @@ -403,7 +410,7 @@ def eval1d(self, z, cheb=None): assert f.shape == z.shape return f - def _isomorphism_to_C1(self, y): + def isomorphism_to_C1(self, y): """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². Maps row x of y to z = y + f(x) where f(x) = x * |domain|. @@ -423,9 +430,10 @@ def _isomorphism_to_C1(self, y): """ assert y.ndim >= 2 z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) - return y + z_shift[:, jnp.newaxis] + z = y + z_shift[:, jnp.newaxis] + return z - def _isomorphism_to_C2(self, z): + def isomorphism_to_C2(self, z): """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. @@ -443,196 +451,154 @@ def _isomorphism_to_C2(self, z): """ x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) - return x_idx.astype(int), y_val + self.domain[0] + x_idx = x_idx.astype(int) + y_val += self.domain[0] + return x_idx, y_val - def _check_shape(self, bp1, bp2, pitch): - """Return shapes that broadcast with (P, *self.cheb.shape[:-2], W).""" + def _check_shape(self, z1, z2, k): + """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" # Ensure pitch batch dim exists and add back dim to broadcast with wells. - pitch = atleast_nd(self.cheb.ndim - 1, pitch)[..., jnp.newaxis] + k = atleast_nd(self.cheb.ndim - 1, k)[..., jnp.newaxis] # Same but back dim already exists. - bp1, bp2 = atleast_nd(self.cheb.ndim, bp1, bp2) + z1, z2 = atleast_nd(self.cheb.ndim, z1, z2) # Cheb has shape (..., M, N) and others - # have shape (P, ..., W) - errorif(not (bp1.ndim == bp2.ndim == pitch.ndim == self.cheb.ndim)) - return bp1, bp2, pitch + # have shape (K, ..., W) + errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) + return z1, z2, k - def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): - """Check that bounce points are computed correctly. + def check_intersect1d(self, z1, z2, k, pad_value=0.0, plot=True, **kwargs): + """Check that intersects are computed correctly. Parameters ---------- - bp1, bp2 : jnp.ndarray - Shape must broadcast with (P, *self.cheb.shape[:-2], W). - The field line-following coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. - pitch : jnp.ndarray - Shape must broadcast with (P, *self.cheb.shape[:-2]). - λ values to evaluate the bounce integral. + z1, z2 : jnp.ndarray + Shape must broadcast with (k, *self.cheb.shape[:-2], W). + ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, + respectively. + k : jnp.ndarray + Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). + k such that fₓ(yᵢ) = k. + pad_value : float + Value that pads ``z1`` and ``z2`` arrays. plot : bool Whether to plot stuff. Default is true. kwargs : dict - Keyword arguments into ``plot_field_line``. + Keyword arguments into ``self.plot``. """ - assert bp1.shape == bp2.shape - mask = (bp1 - bp2) != 0.0 - bp1 = jnp.where(mask, bp1, jnp.nan) - bp2 = jnp.where(mask, bp2, jnp.nan) - bp1, bp2, pitch = self._check_shape(bp1, bp2, pitch) - - err_1 = jnp.any(bp1 > bp2, axis=-1) - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) - B_m = self.eval1d((bp1 + bp2) / 2) - assert B_m.shape == bp1.shape - err_3 = jnp.any(B_m > 1 / pitch + self._eps, axis=-1) + assert z1.shape == z2.shape + mask = (z1 - z2) != pad_value + z1 = jnp.where(mask, z1, jnp.nan) + z2 = jnp.where(mask, z2, jnp.nan) + z1, z2, k = self._check_shape(z1, z2, k) + + err_1 = jnp.any(z1 > z2, axis=-1) + err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) + f_m = self.eval1d((z1 + z2) / 2) + assert f_m.shape == z1.shape + err_3 = jnp.any(f_m > k + self._eps, axis=-1) if not (plot or jnp.any(err_1 | err_2 | err_3)): return # Ensure l axis exists for iteration in below loop. cheb = atleast_nd(3, self.cheb) - mask, bp1, bp2, B_m = atleast_3d_mid(mask, bp1, bp2, B_m) + mask, z1, z2, f_m = atleast_3d_mid(mask, z1, z2, f_m) err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) for l in np.ndindex(cheb.shape[:-2]): - for p in range(pitch.shape[0]): + for p in range(k.shape[0]): idx = (p, *l) if not (err_1[idx] or err_2[idx] or err_3[idx]): continue - _bp1 = bp1[idx][mask[idx]] - _bp2 = bp2[idx][mask[idx]] + _z1 = z1[idx][mask[idx]] + _z2 = z2[idx][mask[idx]] if plot: - self.plot_field_line( + self.plot1d( cheb=cheb[l], - bp1=_bp1, - bp2=_bp2, - pitch=pitch[idx], - title_id=str(idx), + z1=_z1, + z2=_z2, + k=k[idx], **kwargs, ) - print(" bp1 | bp2") - print(jnp.column_stack([_bp1, _bp2])) - assert not err_1[idx], "Bounce points have an inversion.\n" + print(" z1 | z2") + print(jnp.column_stack([_z1, _z2])) + assert not err_1[idx], "Intersects have an inversion.\n" assert not err_2[idx], "Detected discontinuity.\n" assert not err_3[idx], ( - "Detected |B| > 1/λ in well. Increase Chebyshev resolution.\n" - f"{B_m[idx][mask[idx]]} > {1 / pitch[idx] + self._eps}" + "Detected f > k in well. Increase Chebyshev resolution.\n" + f"{f_m[idx][mask[idx]]} > {k[idx] + self._eps}" ) idx = (slice(None), *l) if plot: - self.plot_field_line( + self.plot1d( cheb=cheb[l], - bp1=bp1[idx], - bp2=bp2[idx], - pitch=pitch[idx], - title_id=str(l), + z1=z1[idx], + z2=z2[idx], + k=k[idx], **kwargs, ) - def plot_field_line( + def plot1d( self, cheb, - bp1=jnp.array([[]]), - bp2=jnp.array([[]]), - pitch=jnp.array([]), + z1=None, + z2=None, + k=None, + k_transparency=0.5, num=1000, - title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", - title_id=None, - transparency_pitch=0.5, + title=r"Intersects $z$ for $f(z) - k = 0$", + hlabel=r"$z$", + vlabel=r"$f(z)$", show=True, ): - """Plot the field line given spline of |B|. + """Plot the function ``f`` defined by the Chebyshev coefficients. Parameters ---------- cheb : jnp.ndarray - Piecewise Chebyshev coefficients of |B| along the field line. + Shape (M, N). + Piecewise Chebyshev coefficients. + z1 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂y <= 0. + z2 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂y >= 0. + k : jnp.ndarray + Shape (k.shape[0], ). + Optional, k such that fₓ(yᵢ) = k. + k_transparency : float + Transparency of pitch lines. num : int - Number of ζ points to plot. Pick a big number. - bp1 : jnp.ndarray - Shape (P, W). - Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. - bp2 : jnp.ndarray - Shape (P, W). - Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. - pitch : jnp.ndarray - Shape (P, ). - λ values. + Number of points to evaluate ``cheb`` for plot. title : str Plot title. - title_id : str - Identifier string to append to plot title. - transparency_pitch : float - Transparency of pitch lines. + hlabel : str + Horizontal axis label. + vlabel : str + Vertical axis label. show : bool Whether to show the plot. Default is true. Returns ------- - fig, ax : matplotlib figure and axes. + fig, ax : matplotlib figure and axes """ + fig, ax = plt.subplots() legend = {} - def add(lines): - for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): - label = line.get_label() - if label not in legend: - legend[label] = line - - fig, ax = plt.subplots() z = jnp.linspace( start=self.domain[0], stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, num=num, ) - add(ax.plot(z, self.eval1d(z, cheb), label=r"$\vert B \vert (\zeta)$")) - - if pitch is not None: - b = 1 / jnp.atleast_1d(jnp.squeeze(pitch)) - assert b.ndim == 1 - bp1, bp2 = jnp.atleast_2d(bp1, bp2) - assert bp1.ndim == bp2.ndim == 2 - assert b.shape[0] == bp1.shape[0] - - for val in b: - add( - ax.axhline( - val, - color="tab:purple", - alpha=transparency_pitch, - label=r"$1 / \lambda$", - ) - ) - for i in range(bp1.shape[0]): - if bp1.shape == bp2.shape: - _bp1, _bp2 = filter_bounce_points(bp1[i], bp2[i]) - else: - _bp1, _bp2 = bp1[i], bp2[i] - add( - ax.scatter( - _bp1, - jnp.full_like(_bp1, b[i]), - marker="v", - color="tab:red", - label="bp1", - ) - ) - add( - ax.scatter( - _bp2, - jnp.full_like(_bp2, b[i]), - marker="^", - color="tab:green", - label="bp2", - ) - ) + add2legend(legend, ax.plot(z, self.eval1d(z, cheb), label=vlabel)) + plot_intersect(ax, legend, z1, z2, k, k_transparency) - ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") + ax.set_xlabel(hlabel) + ax.set_ylabel(vlabel) ax.legend(legend.values(), legend.keys(), loc="lower right") - if title_id is not None: - title = f"{title}. ID={title_id}." ax.set_title(title) plt.tight_layout() if show: @@ -641,67 +607,20 @@ def add(lines): return fig, ax -def _get_alphas(alpha_0, iota, num_transit, period): - """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. - - Parameters - ---------- - alpha_0 : float - Starting field line poloidal label. - iota : jnp.ndarray - Shape (iota.size, ). - Rotational transform normalized by 2π. - num_transit : float - Number of ``period``s to follow field line. - period : float - Toroidal period after which to update label. - - Returns - ------- - alphas : jnp.ndarray - Shape (iota.size, num_transit). - Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - - """ - # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - alphas = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) - return alphas - - -def _transform_to_desc(grid, f): - """Transform to DESC spectral domain. - - Parameters - ---------- - grid : Grid - Periodic tensor-product grid in (ρ, θ, ζ). - Note that below shape notation defines - L = ``grid.num_rho``. - f : jnp.ndarray - Function evaluated on ``grid``. - - Returns - ------- - a : jnp.ndarray - Shape (grid.num_rho, 1, grid.num_theta, grid.num_zeta // 2 + 1) - Coefficients of 2D real FFT. - - """ - f = grid.meshgrid_reshape(f, order="rtz") - a = rfft2(f, norm="forward")[:, jnp.newaxis] - assert a.shape == (grid.num_rho, 1, grid.num_theta, grid.num_zeta // 2 + 1) - return a - - -def _transform_to_clebsch(grid, M, N, desc_from_clebsch, B): +def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): """Transform to Clebsch spectral domain. Parameters ---------- grid : Grid - Periodic tensor-product grid in (ρ, θ, ζ). + Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in + (2π × 2π) poloidal and toroidal coordinates. Note that below shape notation defines - L = ``grid.num_rho``. + L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. + desc_from_clebsch : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates + ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. M : int Grid resolution in poloidal direction for Clebsch coordinate grid. Preferably power of 2. A good choice is ``m``. If the poloidal stream @@ -710,11 +629,6 @@ def _transform_to_clebsch(grid, M, N, desc_from_clebsch, B): N : int Grid resolution in toroidal direction for Clebsch coordinate grid. Preferably power of 2. - desc_from_clebsch : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ) returned by - ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. B : jnp.ndarray |B| evaluated on ``grid``. @@ -727,45 +641,40 @@ def _transform_to_clebsch(grid, M, N, desc_from_clebsch, B): # θ is computed on the optimal nodes in Clebsch space, # which is a tensor product node set in Clebsch space. f=desc_from_clebsch[:, 1].reshape(grid.num_rho, M, N), - domain=FourierBounce.domain, + domain=Bounce2D.domain, ) - # Transformation from spectral domain of periodic basis to spectral - # domain of non-periodic basis is best done through interpolation. - # No shortcuts. B = FourierChebyshevBasis( f=interp_rfft2( # Interpolate to optimal nodes in Clebsch space, # which is not a tensor product node set in DESC space. xq=desc_from_clebsch[:, 1:].reshape(grid.num_rho, -1, 2), f=grid.meshgrid_reshape(B, order="rtz")[:, jnp.newaxis], + axes=(-1, -2), ).reshape(grid.num_rho, M, N), - domain=FourierBounce.domain, + domain=Bounce2D.domain, ) - # We compute |B|(α,ζ) so that roots are obtainable without inferior - # local search algorithms and θ(α,ζ) to avoid coordinate mapping - # of quadrature points in Clebsch space to DESC space. The root finding - # required to solve the nonlinear relation in the latter is not "local" - # because there is a global minima or unique mapping between coordinate - # systems. However, it should still be avoided as the number of - # quadrature points is higher due to the large number of integrals that - # need to be computed. (An alternative would be to also transform functions - # in the integrand of the quadrature like |B| and evaluate quadrature - # points in Clebsch space. This may be less efficient if there are - # multiple functions in the integrand that need to be transformed - # independently, perhaps because the composition defined by the - # integrand is less smooth than the individual components.) return T, B -class FourierBounce: - """Computes bounce integrals with pseudo-spectral methods. +# TODO: +# After GitHub issue #1034 is resolved, we can also pass in the previous +# θ(α) coordinates as an initial guess for the next coordinate mapping. +# Perhaps tell the optimizer to perturb the coefficients of the +# |B|(α, ζ) directly? Maybe auto diff to see change on |B|(θ, ζ) +# and hence stream functions. just guessing. not sure if feasible / useful. +# TODO: Allow multiple starting labels for near-rational surfaces. +# can just concatenate along second to last axis of cheb. + + +class Bounce2D: + """Computes bounce integrals using two-dimensional pseudo-spectral methods. The bounce integral is defined as ∫ f(ℓ) dℓ, where dℓ parameterizes the distance along the field line in meters, - λ is a constant proportional to the magnetic moment over energy, - |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1. + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, + where λ is a constant proportional to the magnetic moment over energy + and |B| is the norm of the magnetic field. For a particle with fixed λ, bounce points are defined to be the location on the field line such that the particle's velocity parallel to the magnetic field is zero. @@ -773,55 +682,153 @@ class FourierBounce: the particle's guiding center trajectory traveling in the direction of increasing field-line-following coordinate ζ. + Notes + ----- + Motivation and description of algorithm for developers. + + For applications which reduce to computing a nonlinear function of distance + along field lines between bounce points, it is required to identify these + points with field-line-following coordinates. In the special case of a linear + function summing integrals between bounce points over a flux surface, arbitrary + coordinate systems may be used as this operation becomes a surface integral, + which is invariant to the order of summation. + + The DESC coordinate system is related to field-line-following coordinate + systems by a relation whose solution is best found with Newton iteration. + There is a unique real solution to this equation, so Newton iteration is a + globally convergent root-finding algorithm here. For the task of finding + bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration + is not a globally convergent algorithm to find the real roots of + f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. + For this, function approximation of |B| is necessary. + + Therefore, to compute bounce points {(ζ₁, ζ₂)}, we approximate |B| by a + series expansion of basis functions in (α, ζ) coordinates restricting the + class of basis functions to low order (e.g. N = 2ᵏ where k is small) + algebraic or trigonometric polynomial with integer frequencies. These are + the two classes useful for function approximation and for which there exists + globally convergent root-finding algorithms. We require low order because + the computation expenses grow with the number of potential roots, and the + theorem of algebra states that number is N (2N) for algebraic + (trigonometric) polynomials of degree N. + + The frequency transform of a map under the chosen basis must be concentrated + at low frequencies for the series to converge to the true function fast. + For periodic (non-periodic) maps, the best basis is a Fourier (Chebyshev) + series. Both converge exponentially, but the larger region of convergence in + the complex plane of Fourier series make it preferable in practice to choose + coordinate systems such that the function to approximate is periodic. The + Chebyshev series is preferred to other orthogonal polynomial series since + fast discrete polynomial transforms (DPT) are implemented via fast transform + to Chebyshev then DCT. Although nothing prohibits a direct DPT, we want to + rely on existing, optimized libraries. There are other reasons to prefer + Chebyshev series not discussed here. + + Therefore, |B| is interpolated to a Fourier-Chebyshev series in (α, ζ). + The roots of f are computed as the eigenvalues of the Chebyshev companion + matrix. This will later be replaced with Boyd's method: + Computing real roots of a polynomial in Chebyshev series form through + subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. + + Computing accurate series expansions in (α, ζ) coordinates demands + particular interpolation points in that coordinate system. Newton iteration + is used to compute θ at these interpolation points. Note that interpolation + is necessary because there is no transformation that converts series + coefficients in periodic coordinates, e.g. (ϑ, ϕ), to a low order + polynomial basis in non-periodic coordinates. For example, one can obtain + series coefficients in (α, ϕ) coordinates from those in (ϑ, ϕ) as follows + g : ϑ, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mϑ + nϕ]) + + g : α, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mα + (m ι + n)ϕ]) + However, the basis for the latter are trigonometric functions with + irrational frequencies since the rotational transform is irrational. + Globally convergent root-finding schemes for that basis (at fixed α) are + not known. The denominator of a close rational could be absorbed into the + coordinate ϕ, but this balloons the frequency, and hence the degree of the + series. Although since Fourier series may converge faster than Chebyshev, + an alternate strategy that should work is to interpolate |B| to a double + Fourier series in (ϑ, ϕ), then apply bisection methods to find roots of f + with mesh size inversely proportional to the max frequency along the field + line: M ι + N. ``Bounce2D`` does not use this approach because the + root-finding scheme is inferior. + + After obtaining the bounce points, the supplied quadrature is performed. + By default, Gauss quadrature is performed after removing the singularity. + Fast fourier transforms interpolate functions in the integrand to the + quadrature nodes. + + Fast transforms are used where possible, though fast multipoint methods + are not yet implemented. For non-uniform interpolation, Vandermode MMT with + the linear algebra libraries of JAX are used. It should be worthwhile to use + the inverse non-uniform fast transforms. Fast multipoint methods are + preferable because they are exact, but this requires more development work. + Future work may implement these techniques, along with empirical testing of + a few change of variables for the Chebyshev interpolation that may allow + earlier truncation of the series without loss of accuracy. + + See Also + -------- + Bounce1D + Uses one-dimensional local spline methods for the same task. + An advantage of ``Bounce2D`` over ``Bounce1D`` is that the coordinates on + which the root-finding must be done to map from DESC to Clebsch coords is + fixed to ``M*N``, independent of the number of toroidal transits. + + Warnings + -------- + It is assumed that ζ = ϕ. + Attributes ---------- - B : PiecewiseChebyshevBasis + _B : ChebyshevBasisSet Set of 1D Chebyshev spectral coefficients of |B| along field line. {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the sequence of poloidal coordinates that specify the field line. - T : PiecewiseChebyshevBasis + _T : ChebyshevBasisSet Set of 1D Chebyshev spectral coefficients of θ along field line. {θ_α : ζ ↦ θ(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the sequence of poloidal coordinates that specify the field line. - L : int - Number of flux surfaces to compute on. - num_transit : int - Number of toroidal transits to follow field line. - N : int - Chebyshev spectral resolution. """ - domain = (0, 4 * jnp.pi) + domain = (0, 2 * jnp.pi) - # TODO: Assumes zeta = phi (alpha sequence) def __init__( self, grid, data, + desc_from_clebsch, M, N, - desc_from_clebsch, alpha_0=0.0, num_transit=50, - quad=leggauss(21), + quad=leggauss(32), automorphism=(automorphism_sin, grad_automorphism_sin), B_ref=1.0, L_ref=1.0, check=False, - plot=False, **kwargs, ): """Returns an object to compute bounce integrals. + Notes + ----- + Performance may improve significantly + if the spectral resolutions ``M`` and ``N`` are powers of two. + Parameters ---------- grid : Grid - Periodic tensor-product grid in (ρ, θ, ζ). + Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in + (2π × 2π) poloidal and toroidal coordinates. Note that below shape notation defines L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. data : dict[str, jnp.ndarray] - Data evaluated on grid. Must include ``FourierBounce.required_names()``. + Data evaluated on ``grid``. Must include ``FourierBounce.required_names()``. + desc_from_clebsch : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates + ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. M : int Grid resolution in poloidal direction for Clebsch coordinate grid. Preferably power of 2. A good choice is ``m``. If the poloidal stream @@ -830,20 +837,13 @@ def __init__( N : int Grid resolution in toroidal direction for Clebsch coordinate grid. Preferably power of 2. - desc_from_clebsch : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ) returned by - ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. alpha_0 : float Starting field line poloidal label. - TODO: Allow multiple starting labels for near-rational surfaces. - Concatenate along second to last axis of cheb. num_transit : int Number of toroidal transits to follow field line. quad : (jnp.ndarray, jnp.ndarray) Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 21 points. + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. automorphism : (Callable, Callable) or None The first callable should be an automorphism of the real interval [-1, 1]. The second callable should be the derivative of the first. This map defines @@ -855,13 +855,9 @@ def __init__( Optional. Reference length scale for normalization. check : bool Flag for debugging. Must be false for jax transformations. - plot : bool - Whether to plot stuff if ``check`` is true. Default is false. """ - errorif( - grid.sym, NotImplementedError, msg="Need grid that samples full domain." - ) + errorif(grid.sym, NotImplementedError, msg="Need grid that works with FFTs.") # Strictly increasing zeta knots enforces dζ > 0. # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. @@ -871,43 +867,44 @@ def __init__( check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", ) + self._m = grid.num_theta + self._n = grid.num_zeta + self._b_sup_z = jnp.expand_dims( + transform_to_desc(grid, jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref), + axis=1, + ) + self._x, self._w = get_quad(quad, automorphism) - T, B = _transform_to_clebsch(grid, M, N, desc_from_clebsch, data["|B|"] / B_ref) - alphas = _get_alphas( + # Compute global splines. + T, B = _transform_to_clebsch(grid, desc_from_clebsch, M, N, data["|B|"] / B_ref) + # peel off field lines + alphas = get_alpha( alpha_0, grid.compress(data["iota"]), num_transit, - period=FourierBounce.domain[-1], + period=Bounce2D.domain[-1], ) - # Peel off field lines. - self.B = B.compute_cheb(alphas) - # Evaluating a set of Chebyshev series is more efficient than evaluating - # single Fourier Chebyshev series, so we get the Chebyshev series for - # all the other functions whose Fourier Chebyshev series is available. - # This statement holds even if fast 2D transform methods are used. - self.T = T.compute_cheb(alphas) - assert self.B.cheb.shape == self.T.cheb.shape - assert self.B.cheb.shape == (grid.num_rho, num_transit, N) - - # Cache these since they are used in every integral. - self._b_sup_z = _transform_to_desc( - grid, jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref + self._B = B.compute_cheb(alphas) + # Evaluating set of Chebyshev series more efficient than evaluating + # Fourier Chebyshev series, so we project θ to Chebyshev series as well. + self._T = T.compute_cheb(alphas) + assert self._B.M == self._T.M == num_transit + assert self._B.N == self._T.N == N + assert ( + self._B.cheb.shape == self._T.cheb.shape == (grid.num_rho, num_transit, N) ) - self._x, self._w = get_quad_points(quad, automorphism) - self._check = check - self._plot = plot - self.m, self.n = grid.num_theta, grid.num_zeta @staticmethod - def desc_from_clebsch(eq, rho, M, N, **kwargs): + def desc_from_clebsch(eq, L, M, N, clebsch=None, **kwargs): """Return DESC coordinates of optimal Fourier Chebyshev basis nodes. Parameters ---------- eq : Equilibrium Equilibrium to use defining the coordinate mapping. - rho : jnp.ndarray - Flux surface coordinate values. + L : int or jnp.ndarray + Number of flux surfaces uniformly in [0, 1] on which to compute. + May also be an array of non-uniform coordinates. M : int Grid resolution in poloidal direction for Clebsch coordinate grid. Preferably power of 2. A good choice is ``m``. If the poloidal stream @@ -916,22 +913,30 @@ def desc_from_clebsch(eq, rho, M, N, **kwargs): N : int Grid resolution in toroidal direction for Clebsch coordinate grid. Preferably power of 2. + clebsch : jnp.ndarray + Optional, Clebsch coordinate tensor-product grid (ρ, α, ζ). + If given, ``L``, ``M``, and ``N`` are ignored. + kwargs : dict + Additional parameters to supply to the coordinate mapping function. + See ``desc.equilibrium.Equilibrium.map_coordinates``. Returns ------- - coords : jnp.ndarray + desc_coords : jnp.ndarray Shape (L * M * N, 3). DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate tensor-product grid (ρ, α, ζ). """ - coords = FourierChebyshevBasis.nodes(M, N, FourierBounce.domain, rho=rho) - return eq.map_coordinates( - coords, + if clebsch is None: + clebsch = FourierChebyshevBasis.nodes(M, N, L, Bounce2D.domain) + desc_coords = eq.map_coordinates( + coords=clebsch, inbasis=("rho", "alpha", "zeta"), period=(jnp.inf, 2 * jnp.pi, jnp.inf), **kwargs, ) + return desc_coords @staticmethod def required_names(): @@ -940,45 +945,77 @@ def required_names(): @staticmethod def reshape_data(grid, data, names): - """Reshape``data`` given by ``names`` for input to ``bounce_integrate``. + """Reshape``data`` given by ``names`` for input to ``self.integrate``. Parameters ---------- grid : Grid - Periodic tensor-product grid in (ρ, θ, ζ). + Tensor-product grid in (ρ, θ, ζ). data : dict[str, jnp.ndarray] Data evaluated on grid. names : list[str] - Strings of keys in ``data`` dict to reshape. + Strings of keys in ``data`` to reshape. Returns ------- f : list[jnp.ndarray] - List of reshaped data which may be given to ``bounce_integrate``. + List of reshaped data which may be given to ``self.integrate``. """ if isinstance(names, str): names = [names] - # Add dim to broadcast with axis of quadrature points. f = [grid.meshgrid_reshape(data[name], "rtz")[:, jnp.newaxis] for name in names] return f @property - def L(self): + def _L(self): """int: Number of flux surfaces to compute on.""" - return self.B.cheb.shape[0] + return self._B.cheb.shape[0] - @property - def num_transit(self): - """int: Number of toroidal transits to follow field line.""" - return self.B.cheb.shape[-2] + def bounce_points(self, pitch, num_well=None): + """Compute bounce points. - @property - def N(self): - """int: Chebyshev spectral resolution.""" - return self.B.cheb.shape[-1] + Parameters + ---------- + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + num_well : int or None + If not specified, then all bounce points are returned in an array whose + last axis has size ``num_transit*(N-1)``. If there were less than that many + wells detected along a field line, then the last axis of the returned + arrays, which enumerates bounce points for a particular field line and + pitch, is padded with zero. - def bounce_integrate(self, pitch, integrand, f, weight=None, num_well=None): + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. + + Returns + ------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (P, L, num_well). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + + """ + return self._B.intersect1d(1 / jnp.atleast_2d(pitch), num_well) + + def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): + """Check that bounce points are computed correctly and plot them.""" + kwargs.setdefault( + "title", r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$" + ) + kwargs.setdefault("hlabel", r"$\zeta$") + kwargs.setdefault("vlabel", r"$\vertB\vert(\zeta)$") + self._B.check_intersect1d(bp1, bp2, 1 / pitch, plot, **kwargs) + + def integrate(self, pitch, integrand, f, weight=None, num_well=None): """Bounce integrate ∫ f(ℓ) dℓ. Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line @@ -1000,28 +1037,27 @@ def bounce_integrate(self, pitch, integrand, f, weight=None, num_well=None): bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list[jnp.ndarray] Shape (L, 1, m, n). - Arguments to the callable ``integrand``. These should be real scalar-valued - functions in the bounce integrand evaluated on the periodic DESC coordinate - (ρ, θ, ζ) tensor-product grid. + Real scalar-valued (2π × 2π) periodic in (θ, ζ) functions evaluated + on the ``grid`` supplied to construct this object. These functions + should be arguments to the callable ``integrand``. Use the method + ``self.reshape_data`` to reshape the data into the expected shape. weight : jnp.ndarray Shape (L, 1, m, n). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - evaluated at the deepest point in the magnetic well. + interpolated to the deepest point in the magnetic well. Use the method + ``self.reshape_data`` to reshape the data into the expected shape. num_well : int or None If not specified, then all bounce integrals are returned in an array whose last axis has size ``num_transit*(N-1)``. If there were less than that many wells detected along a field line, then the last axis of the returned array, - which enumerates bounce integrals for a particular field line and - pitch, is padded with zero. + which enumerates bounce integrals for a particular field line and pitch, + is padded with zero. Specify to return the bounce integrals between the first ``num_well`` wells for each pitch along each field line. This is useful if ``num_well`` - tightly bounds the actual number of wells. To obtain a good - choice for ``num_well``, plot the field line with all the bounce points - identified. This will be done automatically if the ``bounce_integral`` - function is called with ``check=True`` and ``plot=True``. As a reference, - there are typically <= 5 wells per toroidal transit. + tightly bounds the actual number of wells. As a reference, there are + typically at most 5 wells per toroidal transit for a given pitch. Returns ------- @@ -1031,33 +1067,357 @@ def bounce_integrate(self, pitch, integrand, f, weight=None, num_well=None): Last axis enumerates the bounce integrals. """ - errorif(weight is not None, NotImplementedError) pitch = jnp.atleast_2d(pitch) - bp1, bp2 = self.B.bounce_points(pitch, num_well) - if self._check: - self.B.check_bounce_points(bp1, bp2, pitch, self._plot) - result = self._bounce_quadrature(bp1, bp2, pitch, integrand, f) - assert result.shape == ( - pitch.shape[0], - self.L, - setdefault(num_well, self.N - 1), + bp1, bp2 = self.bounce_points(pitch, num_well) + result = self._integrate(bp1, bp2, pitch, integrand, f) + errorif(weight is not None, NotImplementedError) + return result + + def _integrate(self, bp1, bp2, pitch, integrand, f): + assert bp1.ndim == 3 + assert bp1.shape == bp2.shape + assert pitch.ndim == 2 + W = bp1.shape[-1] # number of wells + shape = (pitch.shape[0], self._L, W, self._x.size) + + # quadrature points parameterized by ζ for each pitch and flux surface + Q_zeta = flatten_matrix( + bijection_from_disc(self._x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + ) + # quadrature points in (θ, ζ) coordinates + Q = jnp.stack([self._T.eval1d(Q_zeta), Q_zeta], axis=-1) + + # interpolate and integrate + f = [interp_rfft2(Q, f_i, axes=(-1, -2)).reshape(shape) for f_i in f] + result = jnp.dot( + integrand( + *f, + B=self._B.eval1d(Q_zeta).reshape(shape), + pitch=pitch[..., jnp.newaxis, jnp.newaxis], + ) + / irfft2_non_uniform( + Q, self._b_sup_z, self._m, self._n, axes=(-1, -2) + ).reshape(shape), + self._w, ) + assert result.shape == (pitch.shape[0], self._L, W) return result - def _bounce_quadrature(self, bp1, bp2, pitch, integrand, f): - """Bounce integrate ∫ f(ℓ) dℓ. + +class Bounce1D: + """Computes bounce integrals using one-dimensional local spline methods. + + The bounce integral is defined as ∫ f(ℓ) dℓ, where + dℓ parameterizes the distance along the field line in meters, + f(ℓ) is the quantity to integrate along the field line, + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, + where λ is a constant proportional to the magnetic moment over energy + and |B| is the norm of the magnetic field. + + For a particle with fixed λ, bounce points are defined to be the location on the + field line such that the particle's velocity parallel to the magnetic field is zero. + The bounce integral is defined up to a sign. We choose the sign that corresponds to + the particle's guiding center trajectory traveling in the direction of increasing + field-line-following coordinate ζ. + + Notes + ----- + Motivation and description of algorithm for developers. + + For applications which reduce to computing a nonlinear function of distance + along field lines between bounce points, it is required to identify these + points with field-line-following coordinates. In the special case of a linear + function summing integrals between bounce points over a flux surface, arbitrary + coordinate systems may be used as this operation becomes a surface integral, + which is invariant to the order of summation. + + The DESC coordinate system is related to field-line-following coordinate + systems by a relation whose solution is best found with Newton iteration. + There is a unique real solution to this equation, so Newton iteration is a + globally convergent root-finding algorithm here. For the task of finding + bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration + is not a globally convergent algorithm to find the real roots of + f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. + For this, function approximation of |B| is necessary. + + The function approximation in ``Bounce1D`` is ignorant that the objects to + approximate are defined on a bounded subset of ℝ². Instead, the domain is + projected to ℝ, where information sampled about the function at infinity + cannot support reconstruction of the function near the origin. As the + functions of interest do not vanish at infinity, pseudo-spectral techniques + are not used. Instead, function approximation is done with local splines. + This is useful if one can efficiently obtain data along field lines. + + After obtaining the bounce points, the supplied quadrature is performed. + By default, Gauss quadrature is performed after removing the singularity. + Local splines interpolate functions in the integrand to the quadrature nodes. + + See Also + -------- + Bounce2D : Uses two-dimensional pseudo-spectral techniques for the same task. + + Warnings + -------- + The supplied data must be from a Clebsch coordinate (ρ, α, ζ) tensor-product grid. + The field-line-following coordinate ζ must be strictly increasing. + The ζ coordinate is preferably uniformly spaced, although this is not required. + These are used as knots to construct splines. + A good reference density is 100 knots per toroidal transit. + + Attributes + ---------- + zeta : jnp.ndarray + Shape (N, ). + Field line-following ζ coordinates of spline knots. + B : jnp.ndarray + Shape (4, L * M, N - 1). + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + dB_dz : jnp.ndarray + Shape (3, L * M, N - 1). + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + + """ + + def __init__( + self, + grid, + data, + quad=leggauss(32), + automorphism=(automorphism_sin, grad_automorphism_sin), + Bref=1.0, + Lref=1.0, + check=False, + **kwargs, + ): + """Returns an object to compute bounce integrals. Parameters ---------- - bp1, bp2 : jnp.ndarray + grid : Grid + Clebsch coordinate (ρ, α, ζ) tensor-product grid. + Note that below shape notation defines + L = ``grid.num_rho``, M = ``grid.num_alpha``, and N = ``grid.num_zeta``. + data : dict[str, jnp.ndarray] + Data evaluated on grid. Must include names in ``Bounce.required_names()``. + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. + automorphism : (Callable, Callable) or None + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. This map defines + a change of variable for the bounce integral. The choice made for the + automorphism will affect the performance of the quadrature method. + Bref : float + Optional. Reference magnetic field strength for normalization. + Lref : float + Optional. Reference length scale for normalization. + check : bool + Flag for debugging. Must be false for jax transformations. + + """ + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require + # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. + # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ + # requires the same sign change in e_ζ|ρ,a to retain the metric identity. + warnif( + check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", + ) + data = { + "B^zeta": jnp.abs(data["B^zeta"]) * Lref / Bref, + "B^zeta_z|r,a": data["B^zeta_z|r,a"] + * jnp.sign(data["B^zeta"]) + * Lref + / Bref, + "|B|": data["|B|"] / Bref, + "|B|_z|r,a": data["|B|_z|r,a"] / Bref, # This is already the correct sign. + } + self._data = { + key: grid.meshgrid_reshape(val, "raz").reshape(-1, grid.num_zeta) + for key, val in data.items() + } + self._x, self._w = get_quad(quad, automorphism) + + # Compute local splines. + self.zeta = grid.compress(grid.nodes[:, 2], surface_label="zeta") + self.B = jnp.moveaxis( + CubicHermiteSpline( + x=self._zeta, + y=self._data["|B|"], + dydx=self._data["|B|_z|r,a"], + axis=-1, + check=check, + ).c, + source=1, + destination=-1, + ) + self.dB_dz = polyder_vec(self.B) + degree = 3 + assert self.B.shape[0] == degree + 1 + assert self.dB_dz.shape[0] == degree + assert self.B.shape[-1] == self.dB_dz.shape[-1] == grid.num_zeta - 1 + + @staticmethod + def required_names(): + """Return names in ``data_index`` required to compute bounce integrals.""" + return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] + + @staticmethod + def reshape_data(grid, data, names): + """Reshape ``data`` given by ``names`` for input to ``self.integrate``. + + Parameters + ---------- + grid : Grid + Clebsch coordinate (ρ, α, ζ) tensor-product grid. + data : dict[str, jnp.ndarray] + Data evaluated on grid. + names : list[str] + Strings of keys in ``data`` dict to reshape. + + Returns + ------- + f : list[jnp.ndarray] + List of reshaped data which may be given to ``self.integrate``. + + """ + if isinstance(names, str): + names = [names] + f = [ + grid.meshgrid_reshape(data[name], "raz").reshape(-1, grid.num_zeta) + for name in names + ] + return f + + def bounce_points(self, pitch, num_well=None): + """Compute bounce points. + + Parameters + ---------- + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + num_well : int or None + If not specified, then all bounce points are returned in an array whose + last axis has size ``num_transit*(N-1)``. If there were less than that many + wells detected along a field line, then the last axis of the returned + arrays, which enumerates bounce points for a particular field line and + pitch, is padded with zero. + + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. + + Returns + ------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, L, num_well). - The field line-following coordinates of bounce points for a given pitch - along a field line. The pairs ``bp1`` and ``bp2`` form left and right - integration boundaries, respectively, for the bounce integrals. + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + + """ + return bounce_points( + jnp.atleast_2d(pitch), + self.zeta, + self.B, + self.dB_dz, + num_well, + check=False, + ) + + def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): + """Check that bounce points are computed correctly and plot them.""" + eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 + + assert bp1.shape == bp2.shape + mask = (bp1 - bp2) == 0 + bp1 = jnp.where(mask, jnp.nan, bp1) + bp2 = jnp.where(mask, jnp.nan, bp2) + + err_1 = jnp.any(bp1 > bp2, axis=-1) + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + + P, S, _ = bp1.shape + for s in range(S): + B = PPoly(self.B[:, s], self.zeta) + for p in range(P): + B_m = B((bp1[p, s] + bp2[p, s]) / 2) + err_3 = jnp.any(B_m > 1 / pitch[p, s] + eps) + if not (err_1[p, s] or err_2[p, s] or err_3): + continue + _bp1 = bp1[p, s][mask[p, s]] + _bp2 = bp2[p, s][mask[p, s]] + if plot: + self._plot1d( + ppoly=B, + z1=_bp1, + z2=_bp2, + k=1 / pitch[p, s], + title=kwargs.pop( + "title", + r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$", + ), + hlabel=kwargs.pop("hlabel", r"$\zeta$"), + vlabel=kwargs.pop("vlabel", r"$\vertB\vert(\zeta)$"), + **kwargs, + ) + print(" bp1 | bp2") + print(jnp.column_stack([_bp1, _bp2])) + assert not err_1[p, s], "Intersects have an inversion.\n" + assert not err_2[p, s], "Detected discontinuity.\n" + assert not err_3, ( + f"Detected |B| = {B_m[mask[p, s]]} > {1 / pitch[p, s] + eps} = 1/λ " + "in well. Use more knots.\n" + ) + if plot: + self._plot1d( + ppoly=B, + z1=bp1[:, s], + z2=bp2[:, s], + k=1 / pitch[:, s], + title=kwargs.pop( + "title", + r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$", + ), + hlabel=kwargs.pop("hlabel", r"$\zeta$"), + vlabel=kwargs.pop("vlabel", r"$\vertB\vert(\zeta)$"), + **kwargs, + ) + + def integrate( + self, + pitch, + integrand, + f, + weight=None, + num_well=None, + method="cubic", + batch=True, + check=False, + ): + """Bounce integrate ∫ f(ℓ) dℓ. + + Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line + for every λ value in ``pitch``. + + Parameters + ---------- pitch : jnp.ndarray Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. integrand : callable @@ -1067,45 +1427,136 @@ def _bounce_quadrature(self, bp1, bp2, pitch, integrand, f): ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list[jnp.ndarray] - Shape (L, 1, m, n). - Arguments to the callable ``integrand``. These should be real scalar-valued - functions in the bounce integrand evaluated on the periodic DESC coordinate - (ρ, θ, ζ) tensor-product grid. + Shape (L * M, N). + Real scalar-valued functions evaluated on the ``grid`` supplied to + construct this object. These functions should be arguments to the callable + ``integrand``. Use the method ``self.reshape_data`` to reshape the data + into the expected shape. + weight : jnp.ndarray + Shape (L * M, N). + If supplied, the bounce integral labeled by well j is weighted such that + the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` + interpolated to the deepest point in the magnetic well. Use the method + ``self.reshape_data`` to reshape the data into the expected shape. + num_well : int or None + If not specified, then all bounce integrals are returned in an array whose + last axis has size ``(N-1)*degree``. If there were less than that many + wells detected along a field line, then the last axis of the returned array, + which enumerates bounce integrals for a particular field line and pitch, + is padded with zero. + + Specify to return the bounce integrals between the first ``num_well`` + wells for each pitch along each field line. This is useful if ``num_well`` + tightly bounds the actual number of wells. As a reference, there are + typically at most 5 wells per toroidal transit for a given pitch. + method : str + Method of interpolation for functions contained in ``f``. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is cubic C1 local spline. + batch : bool + Whether to perform computation in a batched manner. Default is true. + check : bool + Flag for debugging. Must be false for jax transformations. Returns ------- result : jnp.ndarray - Shape (P, L, num_well). + Shape (P, L*M, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. """ - assert bp1.ndim == 3 - assert bp1.shape == bp2.shape - assert pitch.ndim == 2 - W = bp1.shape[-1] # number of wells - shape = (pitch.shape[0], self.L, W, self._x.size) - - # quadrature points parameterized by ζ for each pitch and flux surface - Q_zeta = _flatten_matrix( - bijection_from_disc( - self._x, - bp1[..., jnp.newaxis], - bp2[..., jnp.newaxis], - ) + pitch = jnp.atleast_2d(pitch) + bp1, bp2 = self.bounce_points(pitch, num_well) + result = _bounce_quadrature( + bp1=bp1, + bp2=bp2, + x=self._x, + w=self._w, + integrand=integrand, + f=f, + B_sup_z=self._data["B^zeta"], + B_sup_z_ra=self._data["B^zeta_z"], + B=self._data["|B|"], + B_z_ra=self._data["|B|"], + pitch=pitch, + knots=self.zeta, + method=method, + batch=batch, + check=check, ) - # quadrature points in (θ, ζ) coordinates - Q_desc = jnp.stack([self.T.eval1d(Q_zeta), Q_zeta], axis=-1) - # interpolate and integrate - f = [interp_rfft2(Q_desc, f_i).reshape(shape) for f_i in f] - result = jnp.dot( - integrand( - *f, - B=self.B.eval1d(Q_zeta).reshape(shape), - pitch=pitch[..., jnp.newaxis, jnp.newaxis], + if weight is not None: + result *= _interp_to_argmin_B_soft( + f=weight, + bp1=bp1, + bp2=bp2, + knots=self.zeta, + B_c=self.B, + B_z_ra_c=self.dB_dz, + method=method, ) - / irfft2_non_uniform(Q_desc, self._b_sup_z, self.m, self.n).reshape(shape), - self._w, - ) - assert result.shape == (pitch.shape[0], self.L, W) + assert result.shape[-1] == setdefault(num_well, (self.zeta.size - 1) * 3) return result + + def _plot1d( + self, + ppoly, + z1=None, + z2=None, + k=None, + k_transparency=0.5, + num=1000, + title=r"Intersects $z$ for $f(z) - k = 0$", + hlabel=r"$z$", + vlabel=r"$f(z)$", + show=True, + ): + """Plot the function ``f`` defined by the piecewise polynomial. + + Parameters + ---------- + ppoly : PPoly + Spline of f over ``self.zeta``. + z1 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂z <= 0. + z2 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂z >= 0. + k : jnp.ndarray + Shape (k.shape[0], ). + Optional, k such that f(z) = k. + k_transparency : float + Transparency of pitch lines. + num : int + Number of points to evaluate for plot. + title : str + Plot title. + hlabel : str + Horizontal axis label. + vlabel : str + Vertical axis label. + show : bool + Whether to show the plot. Default is true. + + Returns + ------- + fig, ax : matplotlib figure and axes + + """ + fig, ax = plt.subplots() + legend = {} + + z = jnp.linspace(start=self.zeta[0], stop=self.zeta[-1], num=num) + add2legend(legend, ax.plot(z, ppoly(z), label=vlabel)) + plot_intersect(ax, legend, z1, z2, k, k_transparency) + + ax.set_xlabel(hlabel) + ax.set_ylabel(vlabel) + ax.legend(legend.values(), legend.keys(), loc="lower right") + ax.set_title(title) + plt.tight_layout() + if show: + plt.show() + plt.close() + return fig, ax diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 3180ddea90..4a9d4e09bb 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -10,9 +10,9 @@ from desc.integrals.quad_utils import bijection_from_disc from desc.utils import Index, errorif -# TODO: Transformation to make nodes uniform Boyd eq. 16.46 pg. 336. -# Shouldn't really change locations of complex poles for us, so convergence -# rate will still be good. This will basically do spectral condensation. +# TODO: Transformation to make nodes more uniform Boyd eq. 16.46 pg. 336. +# Have a hunch it won't change locations of complex poles much, so using +# more uniformly spaced nodes could speed up convergence. def cheb_pts(N, lobatto=False, domain=(-1, 1)): @@ -21,8 +21,8 @@ def cheb_pts(N, lobatto=False, domain=(-1, 1)): Notes ----- This is a common definition of the Chebyshev points (see Boyd, Chebyshev and - Fourier Spectral Methods p. 498). These are the points demanded by Discrete - Cosine Transformations to interpolate Chebyshev series because the cosine + Fourier Spectral Methods p. 498). These are the points demanded by discrete + cosine transformations to interpolate Chebyshev series because the cosine basis for the DCT is defined on [0, π]. They differ in ordering from the points returned by @@ -147,7 +147,7 @@ def interp_rfft(xq, f, axis=-1): Real query points where interpolation is desired. Shape of ``xq`` must broadcast with arrays of shape ``np.delete(f.shape,axis)``. f : jnp.ndarray - Real function values on uniform 2π periodic grid to interpolate. + Real 2π periodic function values on uniform grid to interpolate. axis : int Axis along which to transform. @@ -213,10 +213,12 @@ def interp_rfft2(xq, f, axes=(-2, -1)): Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(f.shape,axes)``. f : jnp.ndarray Shape (..., f.shape[-2], f.shape[-1]). - Real function values on uniform (2π × 2π) periodic tensor-product grid to - interpolate. + Real (2π × 2π) periodic function values on uniform tensor-product grid + to interpolate. axes : tuple[int, int] Axes along which to transform. + The real transform is done along ``axes[-1]``, so it will be more + efficient for that to denote the larger size axis in ``axes``. Returns ------- @@ -283,13 +285,38 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): return fq +def transform_to_desc(grid, f): + """Transform to DESC spectral domain. + + Parameters + ---------- + grid : Grid + Tensor-product grid in (θ, ζ) with uniformly spaced nodes in + (2π × 2π) poloidal and toroidal coordinates. + f : jnp.ndarray + Function evaluated on ``grid``. + + Returns + ------- + a : jnp.ndarray + Shape (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) + Coefficients of 2D real FFT. + + """ + f = grid.meshgrid_reshape(f, order="rtz") + a = rfft2(f, axes=(-1, -2), norm="forward") + # Real fft done over poloidal since grid.num_theta > grid.num_zeta usually. + assert a.shape == (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) + return a + + def cheb_from_dct(a, axis=-1): - """Get Discrete Chebyshev Transform from Discrete Cosine Transform. + """Get discrete Chebyshev transform from discrete cosine transform. Parameters ---------- a : jnp.ndarray - Discrete Cosine Transform coefficients, e.g. + Discrete cosine transform coefficients, e.g. ``a=dct(f,type=2,axis=axis,norm="forward")``. The discrete cosine transformation used by scipy is defined here. docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct @@ -307,7 +334,7 @@ def cheb_from_dct(a, axis=-1): def interp_dct(xq, f, lobatto=False, axis=-1): - """Interpolate ``f`` to ``xq`` with Discrete Chebyshev Transform. + """Interpolate ``f`` to ``xq`` with discrete Chebyshev transform. Parameters ---------- @@ -329,7 +356,7 @@ def interp_dct(xq, f, lobatto=False, axis=-1): """ lobatto = bool(lobatto) - errorif(lobatto, NotImplementedError) + errorif(lobatto, NotImplementedError, "JAX hasn't implemented type 1 DCT.") assert f.ndim >= 1 a = cheb_from_dct(dct(f, type=2 - lobatto, axis=axis), axis) / ( f.shape[axis] - lobatto @@ -339,7 +366,7 @@ def interp_dct(xq, f, lobatto=False, axis=-1): def idct_non_uniform(xq, a, n, axis=-1): - """Evaluate Discrete Chebyshev Transform coefficients ``a`` at ``xq`` ∈ [-1, 1]. + """Evaluate discrete Chebyshev transform coefficients ``a`` at ``xq`` ∈ [-1, 1]. Parameters ---------- @@ -347,7 +374,7 @@ def idct_non_uniform(xq, a, n, axis=-1): Real query points where interpolation is desired. Shape of ``xq`` must broadcast with shape ``np.delete(a.shape,axis)``. a : jnp.ndarray - Discrete Chebyshev Transform coefficients. + Discrete Chebyshev transform coefficients. n : int Spectral resolution of ``a``. axis : int @@ -361,102 +388,78 @@ def idct_non_uniform(xq, a, n, axis=-1): """ assert a.ndim >= 1 a = jnp.moveaxis(a, axis, -1) - basis = chebvander(xq, n - 1) # Could use Clenshaw recursion with fq = chebval(xq, a, tensor=False). + basis = chebvander(xq, n - 1) fq = jnp.linalg.vecdot(basis, a) return fq -# TODO: upstream cubic spline polynomial root finding to interpax - +def polyder_vec(c): + """Coefficients for the derivatives of the given set of polynomials. -def _filter_distinct(r, sentinel, eps): - """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" - # eps needs to be low enough that close distinct roots do not get removed. - # Otherwise, algorithms relying on continuity will fail. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) - r = jnp.where(mask, sentinel, r) - return r + Parameters + ---------- + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. + Returns + ------- + poly : jnp.ndarray + Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, + ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is + ``c.shape[0]-1``. -def _concat_sentinel(r, sentinel, num=1): - """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" - sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) - return jnp.append(r, sent, axis=-1) + """ + poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T + return poly -def _root_linear(a, b, sentinel, eps, distinct=False): - """Return r such that a r + b = 0.""" - return safediv(-b, a, jnp.where(jnp.abs(b) <= eps, 0, sentinel)) +def polyval_vec(x, c): + """Evaluate the set of polynomials ``c`` at the points ``x``. + Note this function is not the same as ``np.polynomial.polynomial.polyval(x,c)``. -def _root_quadratic(a, b, c, sentinel, eps, distinct): - """Return r such that a r² + b r + c = 0, assuming real coefficients and roots.""" - # numerical.recipes/book.html, page 227 - discriminant = b**2 - 4 * a * c - q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(jnp.abs(discriminant))) - r1 = jnp.where( - discriminant < 0, - sentinel, - safediv(q, a, _root_linear(b, c, sentinel, eps)), - ) - r2 = jnp.where( - # more robust to remove repeated roots with discriminant - (discriminant < 0) | (distinct & (discriminant <= eps)), - sentinel, - safediv(c, q, sentinel), - ) - return jnp.stack([r1, r2], axis=-1) + Parameters + ---------- + x : jnp.ndarray + Real coordinates at which to evaluate the set of polynomials. + c : jnp.ndarray + First axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at + ``c[n-i]``. + Returns + ------- + val : jnp.ndarray + Polynomial with given coefficients evaluated at given points. -def _root_cubic(a, b, c, d, sentinel, eps, distinct): - """Return r such that a r³ + b r² + c r + d = 0, assuming real coef and roots.""" - # numerical.recipes/book.html, page 228 + Examples + -------- + .. code-block:: python - def irreducible(Q, R, b, mask): - # Three irrational real roots. - theta = jnp.arccos(R / jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1))) - return jnp.moveaxis( - -2 - * jnp.sqrt(Q) - * jnp.stack( - [ - jnp.cos(theta / 3), - jnp.cos((theta + 2 * jnp.pi) / 3), - jnp.cos((theta - 2 * jnp.pi) / 3), - ] + val = polyval_vec(x, c) + if val.ndim != max(x.ndim, c.ndim - 1): + raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") + for index in np.ndindex(c.shape[1:]): + idx = (..., *index) + np.testing.assert_allclose( + actual=val[idx], + desired=np.poly1d(c[idx])(x[idx]), + err_msg=f"Failed with shapes {x.shape} and {c.shape}.", ) - - b / 3, - source=0, - destination=-1, - ) - def reducible(Q, R, b): - # One real and two complex roots. - A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) - B = safediv(Q, A) - r1 = (A + B) - b / 3 - return _concat_sentinel(r1[..., jnp.newaxis], sentinel, num=2) + """ + # Better than Horner's method as we expect to evaluate low order polynomials. + # No need to use fast multipoint evaluation techniques for the same reason. + val = jnp.linalg.vecdot( + polyvander(x, c.shape[0] - 1), jnp.moveaxis(jnp.flipud(c), 0, -1) + ) + return val - def root(b, c, d): - b = safediv(b, a) - c = safediv(c, a) - d = safediv(d, a) - Q = (b**2 - 3 * c) / 9 - R = (2 * b**3 - 9 * b * c + 27 * d) / 54 - mask = R**2 < Q**3 - return jnp.where( - mask[..., jnp.newaxis], - irreducible(jnp.abs(Q), R, b, mask), - reducible(Q, R, b), - ) - return jnp.where( - # Tests catch failure here if eps < 1e-12 for 64 bit jax. - jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), - _concat_sentinel(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), - root(b, c, d), - ) +# TODO: Eventually do a PR to move this stuff into interpax. _roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") @@ -522,6 +525,7 @@ def poly_root( # Compute from analytic formula to avoid the issue of complex roots with small # imaginary parts and to avoid nan in gradient. r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) + # We already filtered distinct roots for quadratics. distinct = distinct and c.shape[0] > 3 else: # Compute from eigenvalues of polynomial companion matrix. @@ -544,66 +548,90 @@ def poly_root( return _filter_distinct(r, sentinel, eps) if distinct else r -def polyder_vec(c): - """Coefficients for the derivatives of the given set of polynomials. +def _root_cubic(a, b, c, d, sentinel, eps, distinct): + """Return r such that a r³ + b r² + c r + d = 0, assuming real coef and roots.""" + # numerical.recipes/book.html, page 228 - Parameters - ---------- - c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. + def irreducible(Q, R, b, mask): + # Three irrational real roots. + theta = jnp.arccos(R / jnp.sqrt(jnp.where(mask, Q**3, R**2 + 1))) + return jnp.moveaxis( + -2 + * jnp.sqrt(Q) + * jnp.stack( + [ + jnp.cos(theta / 3), + jnp.cos((theta + 2 * jnp.pi) / 3), + jnp.cos((theta - 2 * jnp.pi) / 3), + ] + ) + - b / 3, + source=0, + destination=-1, + ) - Returns - ------- - poly : jnp.ndarray - Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, - ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is - ``c.shape[0]-1``. + def reducible(Q, R, b): + # One real and two complex roots. + A = -jnp.sign(R) * (jnp.abs(R) + jnp.sqrt(jnp.abs(R**2 - Q**3))) ** (1 / 3) + B = safediv(Q, A) + r1 = (A + B) - b / 3 + return _concat_sentinel(r1[..., jnp.newaxis], sentinel, num=2) - """ - poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T - return poly + def root(b, c, d): + b = safediv(b, a) + c = safediv(c, a) + d = safediv(d, a) + Q = (b**2 - 3 * c) / 9 + R = (2 * b**3 - 9 * b * c + 27 * d) / 54 + mask = R**2 < Q**3 + return jnp.where( + mask[..., jnp.newaxis], + irreducible(jnp.abs(Q), R, b, mask), + reducible(Q, R, b), + ) + return jnp.where( + # Tests catch failure here if eps < 1e-12 for 64 bit jax. + jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), + _concat_sentinel(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), + root(b, c, d), + ) -def polyval_vec(x, c): - """Evaluate the set of polynomials ``c`` at the points ``x``. - Note this function is not the same as ``np.polynomial.polynomial.polyval(x,c)``. +def _root_quadratic(a, b, c, sentinel, eps, distinct): + """Return r such that a r² + b r + c = 0, assuming real coefficients and roots.""" + # numerical.recipes/book.html, page 227 + discriminant = b**2 - 4 * a * c + q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(jnp.abs(discriminant))) + r1 = jnp.where( + discriminant < 0, + sentinel, + safediv(q, a, _root_linear(b, c, sentinel, eps)), + ) + r2 = jnp.where( + # more robust to remove repeated roots with discriminant + (discriminant < 0) | (distinct & (discriminant <= eps)), + sentinel, + safediv(c, q, sentinel), + ) + return jnp.stack([r1, r2], axis=-1) - Parameters - ---------- - x : jnp.ndarray - Real coordinates at which to evaluate the set of polynomials. - c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. - Returns - ------- - val : jnp.ndarray - Polynomial with given coefficients evaluated at given points. +def _root_linear(a, b, sentinel, eps, distinct=False): + """Return r such that a r + b = 0.""" + return safediv(-b, a, jnp.where(jnp.abs(b) <= eps, 0, sentinel)) - Examples - -------- - .. code-block:: python - val = _poly_val(x, c) - if val.ndim != max(x.ndim, c.ndim - 1): - raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) +def _concat_sentinel(r, sentinel, num=1): + """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" + sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) + return jnp.append(r, sent, axis=-1) - """ - # Better than Horner's method as we expect to evaluate low order polynomials. - # No need to use fast multipoint evaluation techniques for the same reason. - val = jnp.linalg.vecdot( - polyvander(x, c.shape[0] - 1), jnp.moveaxis(jnp.flipud(c), 0, -1) - ) - return val + +def _filter_distinct(r, sentinel, eps): + """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) + return r diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index 17bddcd44c..ff4e0ab55d 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -178,7 +178,7 @@ def leggausslob(deg): return x, w -def get_quad_points(quad, automorphism): +def get_quad(quad, automorphism): """Apply automorphism to given quadrature points and weights. Parameters @@ -200,7 +200,6 @@ def get_quad_points(quad, automorphism): """ x, w = quad assert x.ndim == w.ndim == 1 - assert x.shape == w.shape if automorphism is not None: # Apply automorphisms to supress singularities. auto, grad_auto = automorphism diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index 8f60ab9517..fcf4fb128d 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -13,7 +13,7 @@ from desc.equilibrium.coords import get_rtz_grid, map_coordinates from desc.examples import get from desc.grid import LinearGrid -from desc.integrals import FourierBounce +from desc.integrals import Bounce2D from desc.integrals.bounce_integral import filter_bounce_points, get_pitch from desc.integrals.fourier_bounce_integral import FourierChebyshevBasis, _get_alphas from desc.integrals.interp_utils import fourier_pts @@ -63,8 +63,8 @@ def test_bp1_first(self): fcb = FourierChebyshevBasis(f, domain=domain) pcb = fcb.compute_cheb(fourier_pts(M)) pitch = 1 / np.linspace(1, 4, 20) - bp1, bp2 = pcb.bounce_points(pitch) - pcb.check_bounce_points(bp1, bp2, pitch) + bp1, bp2 = pcb.intersect1d(pitch) + pcb.check_intersect1d(bp1, bp2, pitch) bp1, bp2 = filter_bounce_points(bp1, bp2) def f(z): @@ -90,15 +90,15 @@ def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): rho=rho, M=eq.M_grid, N=eq.N_grid, sym=False, NFP=eq.NFP ) # check if NFP!=1 works data = eq.compute( - names=FourierBounce.required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid + names=Bounce2D.required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid ) - fb = FourierBounce( + fb = Bounce2D( grid, data, M, N, desc_from_clebsch, check=True, warn=False ) # TODO check true pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) - result = fb.bounce_integrate(f, [], pitch) # noqa: F841 + result = fb.integrate(f, [], pitch) # noqa: F841 @pytest.mark.unit @@ -127,7 +127,7 @@ def test_drift(): iota=np.array([iota]), ) data = eq.compute( - FourierBounce.required_names() + Bounce2D.required_names() + [ "cvdrift", "gbdrift", @@ -157,18 +157,18 @@ def test_drift(): # Compute numerical result. grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP) data_2 = eq.compute( - names=FourierBounce.required_names() + ["cvdrift", "gbdrift"], grid=grid + names=Bounce2D.required_names() + ["cvdrift", "gbdrift"], grid=grid ) normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 data_2["cvdrift"] = data_2["cvdrift"] * normalization data_2["gbdrift"] = data_2["gbdrift"] * normalization M, N = eq.M_grid, 20 - fb = FourierBounce( + fb = Bounce2D( grid, data_2, M, N, - desc_from_clebsch=FourierBounce.desc_from_clebsch(eq, rho, M, N), + desc_from_clebsch=Bounce2D.desc_from_clebsch(eq, rho, M, N), alpha_0=data["alpha"], num_transit=1, B_ref=data["B ref"], @@ -185,13 +185,13 @@ def integrand_num(cvdrift, gbdrift, B, pitch): def integrand_den(B, pitch): return 1 / jnp.sqrt(1 - pitch * B) - drift_numerical_num = fb.bounce_integrate( + drift_numerical_num = fb.integrate( pitch=pitch[:, np.newaxis], integrand=integrand_num, - f=FourierBounce.reshape_data(grid, data_2, ["cvdrift", "gbdrift"]), + f=Bounce2D.reshape_data(grid, data_2, ["cvdrift", "gbdrift"]), num_well=1, ) - drift_numerical_den = fb.bounce_integrate( + drift_numerical_den = fb.integrate( pitch=pitch[:, np.newaxis], integrand=integrand_den, f=[], From 819bff25dd68337e57363905e5f6634651961648 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Aug 2024 17:02:33 -0400 Subject: [PATCH 213/241] Major refactoring of bounce integrals --- desc/backend.py | 3 +- desc/integrals/__init__.py | 2 +- desc/integrals/_bounce_utils.py | 175 -- desc/integrals/bounce_integral.py | 2153 +++++++++++++-------- desc/integrals/bounce_utils.py | 874 +++++++++ desc/integrals/fourier_bounce_integral.py | 1562 --------------- desc/integrals/interp_utils.py | 12 + desc/integrals/quad_utils.py | 2 +- tests/test_bounce_integral.py | 646 ------- tests/test_fourier_bounce.py | 2 +- tests/test_integrals.py | 669 ++++++- 11 files changed, 2896 insertions(+), 3204 deletions(-) delete mode 100644 desc/integrals/_bounce_utils.py create mode 100644 desc/integrals/bounce_utils.py delete mode 100644 desc/integrals/fourier_bounce_integral.py delete mode 100644 tests/test_bounce_integral.py diff --git a/desc/backend.py b/desc/backend.py index 3916d53329..c237ba1504 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -71,6 +71,7 @@ imap = jax.lax.map from jax.experimental.ode import odeint from jax.lax import cond, fori_loop, scan, switch, while_loop + from jax.nn import softmax from jax.numpy import bincount, flatnonzero, repeat, take from jax.numpy.fft import irfft, rfft, rfft2 from jax.scipy.fft import dct, idct @@ -421,7 +422,7 @@ def tangent_solve(g, y): qr, solve_triangular, ) - from scipy.special import gammaln, logsumexp # noqa: F401 + from scipy.special import gammaln, logsumexp, softmax # noqa: F401 trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz diff --git a/desc/integrals/__init__.py b/desc/integrals/__init__.py index 559e054166..e3d59d02ef 100644 --- a/desc/integrals/__init__.py +++ b/desc/integrals/__init__.py @@ -1,6 +1,6 @@ """Classes for function integration.""" -from .fourier_bounce_integral import Bounce2D, ChebyshevBasisSet, FourierChebyshevBasis +from .bounce_integral import Bounce1D, Bounce2D from .singularities import ( DFTInterpolator, FFTInterpolator, diff --git a/desc/integrals/_bounce_utils.py b/desc/integrals/_bounce_utils.py deleted file mode 100644 index 04bc899a3c..0000000000 --- a/desc/integrals/_bounce_utils.py +++ /dev/null @@ -1,175 +0,0 @@ -from functools import partial - -from orthax.chebyshev import chebroots - -from desc.backend import flatnonzero, jnp, put -from desc.integrals.quad_utils import composite_linspace -from desc.utils import setdefault - -# TODO: Boyd's method 𝒪(N²) instead of Chebyshev companion matrix 𝒪(N³). -# John P. Boyd, Computing real roots of a polynomial in Chebyshev series -# form through subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. -chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") - - -def flatten_matrix(y): - """Flatten batch of matrix to batch of vector.""" - return y.reshape(*y.shape[:-2], -1) - - -def subtract(c, k): - """Subtract ``k`` from last axis of ``c``, obeying numpy broadcasting.""" - c_0 = c[..., 0] - k - c = jnp.concatenate( - [ - c_0[..., jnp.newaxis], - jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), - ], - axis=-1, - ) - return c - - -def filter_bounce_points(bp1, bp2): - """Return only bounce points such that ``bp2-bp1`` ≠ 0.""" - mask = (bp2 - bp1) != 0.0 - return bp1[mask], bp2[mask] - - -def add2legend(legend, lines): - """Add lines to legend if it's not already in it.""" - for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): - label = line.get_label() - if label not in legend: - legend[label] = line - - -def plot_intersect(ax, legend, z1, z2, k, k_transparency): - """Plot intersects on ``ax``.""" - if k is None: - return - - k = jnp.atleast_1d(jnp.squeeze(k)) - assert k.ndim == 1 - z1, z2 = jnp.atleast_2d(z1, z2) - assert z1.ndim == z2.ndim == 2 - assert k.shape[0] == z1.shape[0] == z2.shape[0] - for p in k: - add2legend( - legend, - ax.axhline(p, color="tab:purple", alpha=k_transparency), - ) - for i in range(k.size): - _z1, _z2 = z1[i], z2[i] - if _z1.size == _z2.size: - _z1, _z2 = filter_bounce_points(_z1, _z2) - add2legend( - legend, - ax.scatter(_z1, jnp.full(z1.shape[1], k[i]), marker="v", color="tab:red"), - ) - add2legend( - legend, - ax.scatter(_z2, jnp.full(z2.shape[1], k[i]), marker="^", color="tab:green"), - ) - - -@partial(jnp.vectorize, signature="(m),(m)->(m)") -def fix_inversion(is_intersect, df_dy_sign): - """Disqualify first intersect except under an edge case. - - The pairs ``y1`` and ``y2`` are boundaries of an integral only if - ``y1 <= y2``. It is required that the first intersect satisfies - non-positive derivative. Now, because - ``df_dy_sign[...,k]<=0`` implies ``df_dy_sign[...,k+1]>=0`` - by continuity, there can be at most one inversion, and if it exists, - the inversion must be at the first pair. To correct the inversion, - it suffices to disqualify the first intersect as a right boundary, - except under an edge case. - - Parameters - ---------- - is_intersect : jnp.ndarray - Boolean array into ``y`` indicating whether element is an intersect. - df_dy_sign : jnp.ndarray - Shape ``is_intersect.shape``. - Sign of ∂f/∂y (x, yᵢ). - - Returns - ------- - is_intersect : jnp.ndarray - - """ - # idx of first two intersects - idx = flatnonzero(is_intersect, size=2, fill_value=-1) - edge_case = ( - (df_dy_sign[idx[0]] == 0) - & (df_dy_sign[idx[1]] < 0) - & is_intersect[idx[0]] - & is_intersect[idx[1]] - # In theory, we need to keep propagating this edge case, e.g. - # (df_dy_sign[..., 1] < 0) | ( - # (df_dy_sign[..., 1] == 0) & (df_dy_sign[..., 2] < 0)... - # ). - # At each step, the likelihood that an intersection has already been lost - # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. - ) - return put(is_intersect, idx[0], edge_case) - - -def get_pitch(min_B, max_B, num, relative_shift=1e-6): - """Return uniformly spaced pitch values between ``1/max_B`` and ``1/min_B``. - - Parameters - ---------- - min_B : jnp.ndarray - Minimum |B| value. - max_B : jnp.ndarray - Maximum |B| value. - num : int - Number of values, not including endpoints. - relative_shift : float - Relative amount to shift maxima down and minima up to avoid floating point - errors in downstream routines. - - Returns - ------- - pitch : jnp.ndarray - Shape (num + 2, *min_B.shape). - - """ - # Floating point error impedes consistent detection of bounce points riding - # extrema. Shift values slightly to resolve this issue. - min_B = (1 + relative_shift) * min_B - max_B = (1 - relative_shift) * max_B - pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) - assert pitch.shape == (num + 2, *min_B.shape) - return pitch - - -# TODO: Generalize this beyond ζ = ϕ or just map to Clebsch with ϕ -def get_alpha(alpha_0, iota, num_transit, period): - """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. - - Parameters - ---------- - alpha_0 : float - Starting field line poloidal label. - iota : jnp.ndarray - Shape (iota.size, ). - Rotational transform normalized by 2π. - num_transit : float - Number of ``period``s to follow field line. - period : float - Toroidal period after which to update label. - - Returns - ------- - alpha : jnp.ndarray - Shape (iota.size, num_transit). - Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - - """ - # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - alpha = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) - return alpha diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 822184901b..b4fb5ab416 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,688 +1,1122 @@ -"""Functional programming methods for ``Bounce1D``.""" +"""Bounce integrals, along field lines or otherwise.""" -from functools import partial - -from interpax import CubicHermiteSpline, PPoly, interp1d -from jax.nn import softmax +import numpy as np +from interpax import CubicHermiteSpline from matplotlib import pyplot as plt from orthax.legendre import leggauss -from tests.test_interp_utils import filter_not_nan -from desc.backend import imap, jnp -from desc.integrals._bounce_utils import filter_bounce_points, fix_inversion -from desc.integrals.interp_utils import poly_root, polyder_vec, polyval_vec +from desc.backend import dct, idct, irfft, jnp, rfft +from desc.integrals.bounce_utils import ( + _add2legend, + _check_bounce_points, + _interp_to_argmin_B_soft, + _plot_intersect, + bounce_points, + bounce_quadrature, + chebroots_vec, + epigraph_and, + flatten_matrix, + get_alpha, + plot_ppoly, + subtract, +) +from desc.integrals.interp_utils import ( + _filter_distinct, + cheb_from_dct, + cheb_pts, + fourier_pts, + harmonic, + idct_non_uniform, + interp_rfft2, + irfft2_non_uniform, + irfft_non_uniform, + polyder_vec, + transform_to_desc, +) from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, + bijection_to_disc, + get_quadrature, grad_automorphism_sin, - grad_bijection_from_disc, ) -from desc.utils import errorif, setdefault, take_mask, warnif - - -def plot_field_line( - B, - pitch=None, - bp1=jnp.array([]), - bp2=jnp.array([]), - start=None, - stop=None, - num=1000, - title=r"Computed bounce points for $\vert B \vert$ and pitch $\lambda$", - title_id=None, - include_knots=True, - alpha_knot=0.1, - alpha_pitch=0.3, - show=True, -): - """Plot the field line given spline of |B|. +from desc.utils import ( + atleast_2d_end, + atleast_3d_mid, + atleast_nd, + errorif, + isposint, + setdefault, + take_mask, + warnif, +) - Parameters + +class FourierChebyshevBasis: + """Fourier-Chebyshev series. + + f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) + where ψₘ are trigonometric polynomials on [0, 2π] + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. + + Notes + ----- + Performance may improve significantly + if the spectral resolutions ``M`` and ``N`` are powers of two. + + Attributes ---------- - B : PPoly - Spline of |B| over given field line. - pitch : jnp.ndarray - λ value. - bp1 : jnp.ndarray - Bounce points with (∂|B|/∂ζ)|ρ,α <= 0. - bp2 : jnp.ndarray - Bounce points with (∂|B|/∂ζ)|ρ,α >= 0. - start : float - Minimum ζ on plot. - stop : float - Maximum ζ on plot. - num : int - Number of ζ points to plot. Pick a big number. - title : str - Plot title. - title_id : str - Identifier string to append to plot title. - include_knots : bool - Whether to plot vertical lines at the knots. - alpha_knot : float - Transparency of knot lines. - alpha_pitch : float - Transparency of pitch lines. - show : bool - Whether to show the plot. Default is true. + M : int + Fourier spectral resolution. + N : int + Chebyshev spectral resolution. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + instead of the interior roots grid for Chebyshev points. + domain : (float, float) + Domain for y coordinates. - Returns - ------- - fig, ax : matplotlib figure and axes. + """ + + def __init__(self, f, domain=(-1, 1), lobatto=False): + """Interpolate Fourier-Chebyshev basis to ``f``. + + Parameters + ---------- + f : jnp.ndarray + Shape (..., M, N). + Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + instead of the interior roots grid for Chebyshev points. + + """ + self.M = f.shape[-2] + self.N = f.shape[-1] + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = tuple(domain) + errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") + self.lobatto = bool(lobatto) + self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) + + @staticmethod + def _fast_transform(f, lobatto): + M = f.shape[-2] + N = f.shape[-1] + return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) + + @staticmethod + def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): + """Tensor product grid of optimal collocation nodes for this basis. + + Parameters + ---------- + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + L : int or jnp.ndarray + Optional, resolution in radial direction of domain [0, 1]. + May also be an array of coordinates values. If given, then the + returned ``coords`` is a 3D tensor-product with shape (L * M * N, 3). + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + lobatto : bool + Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) + instead of the interior roots grid for Chebyshev points. + + Returns + ------- + coords : jnp.ndarray + Shape (M * N, 2). + Grid of (x, y) points for optimal interpolation. + + """ + x = fourier_pts(M) + y = cheb_pts(N, lobatto, domain) + if L is not None: + if isposint(L): + L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) + coords = (L, x, y) + else: + coords = (x, y) + coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) + coords = jnp.column_stack(coords) + return coords + + def evaluate(self, M, N): + """Evaluate Fourier-Chebyshev series. + + Parameters + ---------- + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + + Returns + ------- + fq : jnp.ndarray + Shape (..., M, N) + Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. + + """ + fq = idct(irfft(self._c, n=M, axis=-2), type=2 - self.lobatto, n=N, axis=-1) * ( + M * (N - self.lobatto) + ) + return fq + + def harmonics(self): + """Spectral coefficients aₘₙ of the interpolating polynomial. + + Transform Fourier interpolant harmonics to Nyquist trigonometric + interpolant harmonics so that the coefficients are all real. + + Returns + ------- + a_mn : jnp.ndarray + Shape (..., M, N). + Real valued spectral coefficients for Fourier-Chebyshev basis. + + """ + a_mn = harmonic(cheb_from_dct(self._c, axis=-1), self.M, axis=-2) + assert a_mn.shape[-2:] == (self.M, self.N) + return a_mn + + def compute_cheb(self, x): + """Evaluate Fourier basis at ``x`` to obtain set of 1D Chebyshev coefficients. + + Parameters + ---------- + x : jnp.ndarray + Points to evaluate Fourier basis. + + Returns + ------- + cheb : ChebyshevBasisSet + Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + + """ + # Always add new axis to broadcast against Chebyshev coefficients. + x = jnp.atleast_1d(x)[..., jnp.newaxis] + cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) + assert cheb.shape[-2:] == (x.shape[-2], self.N) + return ChebyshevBasisSet(cheb, self.domain) + + +class ChebyshevBasisSet: + """Chebyshev series. + + { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ] + + Attributes + ---------- + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + M : int + Number of function in this basis set. + N : int + Chebyshev spectral resolution. + domain : (float, float) + Domain for y coordinates. """ - legend = {} - - def add(lines): - for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): - label = line.get_label() - if label not in legend: - legend[label] = line - - fig, ax = plt.subplots() - if include_knots: - for knot in B.x: - add(ax.axvline(x=knot, color="tab:blue", alpha=alpha_knot, label="knot")) - z = jnp.linspace( - start=setdefault(start, B.x[0]), - stop=setdefault(stop, B.x[-1]), - num=num, - ) - add(ax.plot(z, B(z), label=r"$\vert B \vert (\zeta)$")) - - if pitch is not None: - b = 1 / jnp.atleast_1d(pitch) - for val in b: - add( - ax.axhline( - val, color="tab:purple", alpha=alpha_pitch, label=r"$1 / \lambda$" - ) - ) - bp1, bp2 = jnp.atleast_2d(bp1, bp2) - for i in range(bp1.shape[0]): - if bp1.shape == bp2.shape: - bp1_i, bp2_i = filter_bounce_points(bp1[i], bp2[i]) - else: - bp1_i, bp2_i = bp1[i], bp2[i] - bp1_i, bp2_i = bp1_i[~jnp.isnan(bp1_i)], bp2_i[~jnp.isnan(bp2_i)] - add( - ax.scatter( - bp1_i, - jnp.full_like(bp1_i, b[i]), - marker="v", - color="tab:red", - label="bp1", - ) - ) - add( - ax.scatter( - bp2_i, - jnp.full_like(bp2_i, b[i]), - marker="^", - color="tab:green", - label="bp2", - ) + + _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) + + def __init__(self, cheb, domain=(-1, 1)): + """Make Chebyshev series basis from given coefficients. + + Parameters + ---------- + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + + """ + self.cheb = jnp.atleast_2d(cheb) + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = tuple(domain) + + @property + def M(self): + """Number of function in this basis set.""" + return self.cheb.shape[-2] + + @property + def N(self): + """Chebyshev spectral resolution.""" + return self.cheb.shape[-1] + + @staticmethod + def _chebcast(cheb, arr): + # Input should not have rightmost dimension of cheb that iterates coefficients, + # but may have additional leftmost dimension for batch operation. + errorif( + jnp.ndim(arr) > cheb.ndim, + NotImplementedError, + msg=f"Only one additional axis for batch dimension is allowed. " + f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", + ) + return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] + + def intersect2d(self, k=0.0, eps=_eps): + """Coordinates yᵢ such that f(x, yᵢ) = k(x). + + Parameters + ---------- + k : jnp.ndarray + Shape must broadcast with (..., *cheb.shape[:-1]). + Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. + eps : float + Absolute tolerance with which to consider value as zero. + + Returns + ------- + y : jnp.ndarray + Shape (..., *cheb.shape[:-1], N - 1). + Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. + is_intersect : jnp.ndarray + Shape y.shape. + Boolean array into ``y`` indicating whether element is an intersect. + df_dy_sign : jnp.ndarray + Shape y.shape. + Sign of ∂f/∂y (x, yᵢ). + + """ + c = subtract(ChebyshevBasisSet._chebcast(self.cheb, k), k) + # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) + y = chebroots_vec(c) + assert y.shape == (*c.shape[:-1], self.N - 1) + + # Intersects must satisfy y ∈ [-1, 1]. + # Pick sentinel such that only distinct roots are considered intersects. + y = _filter_distinct(y, sentinel=-2.0, eps=eps) + is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) + y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos + + # TODO: Multipoint evaluation with FFT. + # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + n = jnp.arange(self.N) + # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) + # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) + df_dy_sign = jnp.sign( + jnp.linalg.vecdot( + n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), + self.cheb[..., jnp.newaxis, :], ) + ) + y = bijection_from_disc(y, self.domain[0], self.domain[-1]) + return y, is_intersect, df_dy_sign - ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel(r"$\vert B \vert \sim 1 / \lambda$") - ax.legend(legend.values(), legend.keys(), loc="lower right") - if title_id is not None: - title = f"{title}. id = {title_id}." - ax.set_title(title) - plt.tight_layout() - if show: - plt.show() - plt.close() - return fig, ax - - -def _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs): - """Check that bounce points are computed correctly.""" - assert bp1.shape == bp2.shape - mask = (bp1 - bp2) == 0 - bp1 = jnp.where(mask, jnp.nan, bp1) - bp2 = jnp.where(mask, jnp.nan, bp2) - - eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 - msg_1 = "Bounce points have an inversion.\n" - err_1 = jnp.any(bp1 > bp2, axis=-1) - msg_2 = "Discontinuity detected.\n" - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) - - P, S, _ = bp1.shape - for s in range(S): - B = PPoly(B_c[:, s], knots) - for p in range(P): - B_m_ps = B((bp1[p, s] + bp2[p, s]) / 2) - err_3 = jnp.any(B_m_ps > 1 / pitch[p, s] + eps) - if err_1[p, s] or err_2[p, s] or err_3: - bp1_ps, bp2_ps, B_m_ps = map( - filter_not_nan, (bp1[p, s], bp2[p, s], B_m_ps) - ) + def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): + """Coordinates z(x, yᵢ) such that fₓ(yᵢ) = k for every x. + + Parameters + ---------- + k : jnp.ndarray + Shape must broadcast with (..., *cheb.shape[:-2]). + Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. + num_intersect : int or None + If not specified, then all intersects are returned in an array whose + last axis has size ``self.M*(self.N-1)``. If there were less than that many + intersects detected, then the last axis of the returned arrays is padded + with ``pad_value``. Specify to return the first ``num_intersect`` pairs + of intersects. This is useful if ``num_intersect`` tightly bounds the + actual number. + pad_value : float + Value with which to pad array. Default 0. + + Returns + ------- + z1, z2 : (jnp.ndarray, jnp.ndarray) + Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). + ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, + respectively. + + """ + errorif( + self.N < 2, + NotImplementedError, + "This method requires the Chebyshev spectral resolution of at " + f"least 2, but got N={self.N}.", + ) + + # Add axis to use same k over all Chebyshev series of the piecewise object. + y, is_intersect, df_dy_sign = self.intersect2d( + jnp.atleast_1d(k)[..., jnp.newaxis] + ) + # Flatten so that last axis enumerates intersects along the piecewise object. + y, is_intersect, df_dy_sign = map( + flatten_matrix, (self.isomorphism_to_C1(y), is_intersect, df_dy_sign) + ) + + # Note for bounce point applications: + # We ignore the degenerate edge case where the boundary shared by adjacent + # polynomials is a left intersect point i.e. ``is_z1`` because the subset of + # pitch values that generate this edge case has zero measure. Note that + # the technique to account for this would be to disqualify intersects + # within ``_eps`` from ``domain[-1]``. + is_z1 = (df_dy_sign <= 0) & is_intersect + is_z2 = (df_dy_sign >= 0) & epigraph_and(is_intersect, df_dy_sign) + + sentinel = self.domain[0] - 1.0 + z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) + z2 = take_mask(y, is_z2, size=num_intersect, fill_value=sentinel) + + mask = (z1 > sentinel) & (z2 > sentinel) + # Set outside mask to same value so integration is over set of measure zero. + z1 = jnp.where(mask, z1, pad_value) + z2 = jnp.where(mask, z2, pad_value) + return z1, z2 + + def eval1d(self, z, cheb=None): + """Evaluate piecewise Chebyshev spline at coordinates z. + + Parameters + ---------- + z : jnp.ndarray + Shape (..., *cheb.shape[:-2], z.shape[-1]). + Coordinates in [sef.domain[0], ∞). + The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where + ``z // domain`` yields the index into the proper Chebyshev series + along the second to last axis of ``cheb`` and ``z % domain`` is + the coordinate value on the domain of that Chebyshev series. + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients to use. If not given, uses ``self.cheb``. + + Returns + ------- + f : jnp.ndarray + Shape z.shape. + Chebyshev basis evaluated at z. + + """ + cheb = self._chebcast(setdefault(cheb, self.cheb), z) + N = cheb.shape[-1] + x_idx, y = self.isomorphism_to_C2(z) + y = bijection_to_disc(y, self.domain[0], self.domain[1]) + # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) + # are held in cheb with shape (..., num cheb series, N). + cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) + f = idct_non_uniform(y, cheb, N) + assert f.shape == z.shape + return f + + def isomorphism_to_C1(self, y): + """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². + + Maps row x of y to z = y + f(x) where f(x) = x * |domain|. + + Parameters + ---------- + y : jnp.ndarray + Shape (..., y.shape[-2], y.shape[-1]). + Second to last axis iterates the rows. + + Returns + ------- + z : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. + + """ + assert y.ndim >= 2 + z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) + z = y + z_shift[:, jnp.newaxis] + return z + + def isomorphism_to_C2(self, z): + """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. + + Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. + + Parameters + ---------- + z : jnp.ndarray + Shape z.shape. + + Returns + ------- + x_idx, y_val : (jnp.ndarray, jnp.ndarray) + Shape z.shape. + Isomorphic coordinates. + + """ + x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) + x_idx = x_idx.astype(int) + y_val += self.domain[0] + return x_idx, y_val + + def _check_shape(self, z1, z2, k): + """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" + # Ensure pitch batch dim exists and add back dim to broadcast with wells. + k = atleast_nd(self.cheb.ndim - 1, k)[..., jnp.newaxis] + # Same but back dim already exists. + z1, z2 = atleast_nd(self.cheb.ndim, z1, z2) + # Cheb has shape (..., M, N) and others + # have shape (K, ..., W) + errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) + return z1, z2, k + + def check_intersect1d(self, z1, z2, k, pad_value=0.0, plot=True, **kwargs): + """Check that intersects are computed correctly. + + Parameters + ---------- + z1, z2 : jnp.ndarray + Shape must broadcast with (k, *self.cheb.shape[:-2], W). + ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, + respectively. + k : jnp.ndarray + Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). + k such that fₓ(yᵢ) = k. + pad_value : float + Value that pads ``z1`` and ``z2`` arrays. + plot : bool + Whether to plot stuff. Default is true. + kwargs : dict + Keyword arguments into ``self.plot``. + + """ + assert z1.shape == z2.shape + mask = (z1 - z2) != pad_value + z1 = jnp.where(mask, z1, jnp.nan) + z2 = jnp.where(mask, z2, jnp.nan) + z1, z2, k = self._check_shape(z1, z2, k) + + err_1 = jnp.any(z1 > z2, axis=-1) + err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) + f_m = self.eval1d((z1 + z2) / 2) + assert f_m.shape == z1.shape + err_3 = jnp.any(f_m > k + self._eps, axis=-1) + if not (plot or jnp.any(err_1 | err_2 | err_3)): + return + + # Ensure l axis exists for iteration in below loop. + cheb = atleast_nd(3, self.cheb) + mask, z1, z2, f_m = atleast_3d_mid(mask, z1, z2, f_m) + err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) + + for l in np.ndindex(cheb.shape[:-2]): + for p in range(k.shape[0]): + idx = (p, *l) + if not (err_1[idx] or err_2[idx] or err_3[idx]): + continue + _z1 = z1[idx][mask[idx]] + _z2 = z2[idx][mask[idx]] if plot: - plot_field_line( - B, - pitch[p, s], - bp1_ps, - bp2_ps, - title_id=f"{p},{s}", + self.plot1d( + cheb=cheb[l], + z1=_z1, + z2=_z2, + k=k[idx], **kwargs, ) - print("bp1:", bp1_ps) - print("bp2:", bp2_ps) - assert not err_1[p, s], msg_1 - assert not err_2[p, s], msg_2 - msg_3 = ( - f"Detected |B| = {B_m_ps} > {1 / pitch[p, s] + eps} = 1/λ in well. " - "Use more knots or switch to a monotonic spline method.\n" + print(" z1 | z2") + print(jnp.column_stack([_z1, _z2])) + assert not err_1[idx], "Intersects have an inversion.\n" + assert not err_2[idx], "Detected discontinuity.\n" + assert not err_3[idx], ( + "Detected f > k in well. Increase Chebyshev resolution.\n" + f"{f_m[idx][mask[idx]]} > {k[idx] + self._eps}" ) - assert not err_3, msg_3 - if plot: - plot_field_line( - B, - pitch[:, s], - bp1[:, s], - bp2[:, s], - title_id=str(s), - **kwargs, - ) + idx = (slice(None), *l) + if plot: + self.plot1d( + cheb=cheb[l], + z1=z1[idx], + z2=z2[idx], + k=k[idx], + **kwargs, + ) + + def plot1d( + self, + cheb, + num=1000, + z1=None, + z2=None, + k=None, + k_transparency=0.5, + klabel=r"$k$", + title=r"Intersects $z$ in epigraph of $f(z) = k$", + hlabel=r"$z$", + vlabel=r"$f(z)$", + show=True, + pad_value=0.0, + ): + """Plot the piecewise Chebyshev series. + + Parameters + ---------- + cheb : jnp.ndarray + Shape (M, N). + Piecewise Chebyshev series f. + num : int + Number of points to evaluate ``cheb`` for plot. + z1 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂y <= 0. + z2 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂y >= 0. + k : jnp.ndarray + Shape (k.shape[0], ). + Optional, k such that fₓ(yᵢ) = k. + k_transparency : float + Transparency of pitch lines. + klabel : float + Label of intersect lines. + title : str + Plot title. + hlabel : str + Horizontal axis label. + vlabel : str + Vertical axis label. + show : bool + Whether to show the plot. Default is true. + pad_value : float + Doesn't plot intersects where ``z1-z2==pad_value``. + + Returns + ------- + fig, ax : matplotlib figure and axes + + """ + fig, ax = plt.subplots() + legend = {} + z = jnp.linspace( + start=self.domain[0], + stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, + num=num, + ) + _add2legend(legend, ax.plot(z, self.eval1d(z, cheb), label=vlabel)) + _plot_intersect( + ax=ax, + legend=legend, + z1=z1, + z2=z2, + k=k, + k_transparency=k_transparency, + klabel=klabel, + pad_value=pad_value, + ) + ax.set_xlabel(hlabel) + ax.set_ylabel(vlabel) + ax.legend(legend.values(), legend.keys()) + ax.set_title(title) + plt.tight_layout() + if show: + plt.show() + plt.close() + return fig, ax -def _check_shape(knots, B_c, B_z_ra_c, pitch=None): - """Ensure inputs have compatible shape, and return them with full dimension. +def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): + """Transform to Clebsch spectral domain. Parameters ---------- - knots : jnp.ndarray - Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. + grid : Grid + Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in + (2π × 2π) poloidal and toroidal coordinates. + Note that below shape notation defines + L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. + desc_from_clebsch : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates + ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. + M : int + Grid resolution in poloidal direction for Clebsch coordinate grid. + Preferably power of 2. A good choice is ``m``. If the poloidal stream + function condenses the Fourier spectrum of |B| significantly, then a + larger number may be beneficial. + N : int + Grid resolution in toroidal direction for Clebsch coordinate grid. + Preferably power of 2. + B : jnp.ndarray + |B| evaluated on ``grid``. Returns ------- - B_c : jnp.ndarray - Shape (B_c.shape[0], S, knots.size - 1). - Polynomial coefficients of the spline of |B| in local power basis. - B_z_ra_c : jnp.ndarray - Shape (B_c.shape[0] - 1, *B_c.shape[1:]). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. - pitch : jnp.ndarray - Shape (P, S). - λ values to evaluate the bounce integral at each field line. + T, B : (FourierChebyshevBasis, FourierChebyshevBasis) """ - errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") - if B_c.ndim == 2 and B_z_ra_c.ndim == 2: - # Add axis which enumerates field lines. - B_c = B_c[:, jnp.newaxis] - B_z_ra_c = B_z_ra_c[:, jnp.newaxis] - msg = ( - "Invalid shape for spline arrays. " - f"B_c.shape={B_c.shape}. B_z_ra_c.shape={B_z_ra_c.shape}." + T = FourierChebyshevBasis( + # θ is computed on the optimal nodes in Clebsch space, + # which is a tensor product node set in Clebsch space. + f=desc_from_clebsch[:, 1].reshape(grid.num_rho, M, N), + domain=Bounce2D.domain, ) - errorif(not (B_c.ndim == B_z_ra_c.ndim == 3), msg=msg) - errorif(B_c.shape[0] - 1 != B_z_ra_c.shape[0], msg=msg) - errorif(B_c.shape[1:] != B_z_ra_c.shape[1:], msg=msg) - errorif( - B_c.shape[-1] != knots.size - 1, - msg=( - "Last axis does not enumerate polynomials of spline. " - f"B_c.shape={B_c.shape}. knots.shape={knots.shape}." - ), + B = FourierChebyshevBasis( + f=interp_rfft2( + # Interpolate to optimal nodes in Clebsch space, + # which is not a tensor product node set in DESC space. + xq=desc_from_clebsch[:, 1:].reshape(grid.num_rho, -1, 2), + f=grid.meshgrid_reshape(B, order="rtz")[:, jnp.newaxis], + axes=(-1, -2), + ).reshape(grid.num_rho, M, N), + domain=Bounce2D.domain, ) - if pitch is not None: - pitch = jnp.atleast_2d(pitch) - msg = f"Invalid shape {pitch.shape} for pitch angles." - errorif(pitch.ndim != 2, msg=msg) - errorif(pitch.shape[-1] != 1 and pitch.shape[-1] != B_c.shape[1], msg=msg) - return B_c, B_z_ra_c, pitch + return T, B -def bounce_points( - pitch, knots, B_c, B_z_ra_c, num_well=None, check=False, plot=True, **kwargs -): - """Compute the bounce points given spline of |B| and pitch λ. +# TODO: +# After GitHub issue #1034 is resolved, we can also pass in the previous +# θ(α) coordinates as an initial guess for the next coordinate mapping. +# Perhaps tell the optimizer to perturb the coefficients of the +# |B|(α, ζ) directly? Maybe auto diff to see change on |B|(θ, ζ) +# and hence stream functions. just guessing. not sure if feasible / useful. +# TODO: Allow multiple starting labels for near-rational surfaces. +# can just concatenate along second to last axis of cheb. - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, S). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - knots : jnp.ndarray - Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. Must be strictly increasing. - B_c : jnp.ndarray - Shape (B_c.shape[0], S, knots.size - 1). - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - B_z_ra_c : jnp.ndarray - Shape (B_c.shape[0] - 1, *B_c.shape[1:]). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - num_well : int or None - If not specified, then all bounce points are returned in an array whose - last axis has size ``(knots.size - 1) * (B_c.shape[0] - 1)``. If there - were less than that many wells detected along a field line, then the last - axis of the returned arrays, which enumerates bounce points for a particular - field line and pitch, is padded with zero. - - Specify to return the first ``num_well`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. To obtain a good choice for ``num_well``, - plot the field line with all the bounce points identified by calling this - function with ``check=True``. As a reference, there are typically <= 5 wells - per toroidal transit. - check : bool - Flag for debugging. - plot : bool - Whether to plot some things if check is true. Default is true. - Returns - ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (P, S, num_well). - The field line-following coordinates of bounce points for a given pitch along - a field line. The pairs ``bp1`` and ``bp2`` form left and right integration - boundaries, respectively, for the bounce integrals. +class Bounce2D: + """Computes bounce integrals using two-dimensional pseudo-spectral methods. + + The bounce integral is defined as ∫ f(ℓ) dℓ, where + dℓ parameterizes the distance along the field line in meters, + f(ℓ) is the quantity to integrate along the field line, + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, + where λ is a constant proportional to the magnetic moment over energy + and |B| is the norm of the magnetic field. + + For a particle with fixed λ, bounce points are defined to be the location on the + field line such that the particle's velocity parallel to the magnetic field is zero. + The bounce integral is defined up to a sign. We choose the sign that corresponds to + the particle's guiding center trajectory traveling in the direction of increasing + field-line-following coordinate ζ. + + Notes + ----- + Brief motivation and description of algorithm for developers. + + For applications which reduce to computing a nonlinear function of distance + along field lines between bounce points, it is required to identify these + points with field-line-following coordinates. In the special case of a linear + function summing integrals between bounce points over a flux surface, arbitrary + coordinate systems may be used as this operation becomes a surface integral, + which is invariant to the order of summation. + + The DESC coordinate system is related to field-line-following coordinate + systems by a relation whose solution is best found with Newton iteration. + There is a unique real solution to this equation, so Newton iteration is a + globally convergent root-finding algorithm here. For the task of finding + bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration + is not a globally convergent algorithm to find the real roots of + f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. + For this, function approximation of |B| is necessary. + + Therefore, to compute bounce points {(ζ₁, ζ₂)}, we approximate |B| by a + series expansion of basis functions in (α, ζ) coordinates restricting the + class of basis functions to low order (e.g. N = 2ᵏ where k is small) + algebraic or trigonometric polynomial with integer frequencies. These are + the two classes useful for function approximation and for which there exists + globally convergent root-finding algorithms. We require low order because + the computation expenses grow with the number of potential roots, and the + theorem of algebra states that number is N (2N) for algebraic + (trigonometric) polynomials of degree N. + + The frequency transform of a map under the chosen basis must be concentrated + at low frequencies for the series to converge to the true function fast. + For periodic (non-periodic) maps, the best basis is a Fourier (Chebyshev) + series. Both converge exponentially, but the larger region of convergence in + the complex plane of Fourier series make it preferable in practice to choose + coordinate systems such that the function to approximate is periodic. The + Chebyshev series is preferred to other orthogonal polynomial series since + fast discrete polynomial transforms (DPT) are implemented via fast transform + to Chebyshev then DCT. Although nothing prohibits a direct DPT, we want to + rely on existing, optimized libraries. There are other reasons to prefer + Chebyshev series not discussed here. + + Therefore, |B| is interpolated to a Fourier-Chebyshev series in (α, ζ). + The roots of f are computed as the eigenvalues of the Chebyshev companion + matrix. This will later be replaced with Boyd's method: + Computing real roots of a polynomial in Chebyshev series form through + subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. + + Computing accurate series expansions in (α, ζ) coordinates demands + particular interpolation points in that coordinate system. Newton iteration + is used to compute θ at these interpolation points. Note that interpolation + is necessary because there is no transformation that converts series + coefficients in periodic coordinates, e.g. (ϑ, ϕ), to a low order + polynomial basis in non-periodic coordinates. For example, one can obtain + series coefficients in (α, ϕ) coordinates from those in (ϑ, ϕ) as follows + g : ϑ, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mϑ + nϕ]) + + g : α, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mα + (m ι + n)ϕ]) + However, the basis for the latter are trigonometric functions with + irrational frequencies since the rotational transform is irrational. + Globally convergent root-finding schemes for that basis (at fixed α) are + not known. The denominator of a close rational could be absorbed into the + coordinate ϕ, but this balloons the frequency, and hence the degree of the + series. Although since Fourier series may converge faster than Chebyshev, + an alternate strategy that should work is to interpolate |B| to a double + Fourier series in (ϑ, ϕ), then apply bisection methods to find roots of f + with mesh size inversely proportional to the max frequency along the field + line: M ι + N. ``Bounce2D`` does not use this approach because the + root-finding scheme is inferior. + + After obtaining the bounce points, the supplied quadrature is performed. + By default, Gauss quadrature is performed after removing the singularity. + Fast fourier transforms interpolate functions in the integrand to the + quadrature nodes. + + Fast transforms are used where possible, though fast multipoint methods + are not yet implemented. For non-uniform interpolation, Vandermode MMT with + the linear algebra libraries of JAX are used. It should be worthwhile to use + the inverse non-uniform fast transforms. Fast multipoint methods are + preferable because they are exact, but this requires more development work. + Future work may implement these techniques, along with empirical testing of + a few change of variables for the Chebyshev interpolation that may allow + earlier truncation of the series without loss of accuracy. + + See Also + -------- + Bounce1D + Uses one-dimensional local spline methods for the same task. + An advantage of ``Bounce2D`` over ``Bounce1D`` is that the coordinates on + which the root-finding must be done to map from DESC to Clebsch coords is + fixed to ``M*N``, independent of the number of toroidal transits. + + Warnings + -------- + It is assumed that ζ = ϕ. + + Attributes + ---------- + _B : ChebyshevBasisSet + Set of 1D Chebyshev spectral coefficients of |B| along field line. + {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the + sequence of poloidal coordinates that specify the field line. + _T : ChebyshevBasisSet + Set of 1D Chebyshev spectral coefficients of θ along field line. + {θ_α : ζ ↦ θ(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the + sequence of poloidal coordinates that specify the field line. """ - B_c, B_z_ra_c, pitch = _check_shape(knots, B_c, B_z_ra_c, pitch) - P, S, N, degree = pitch.shape[0], B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - # Intersection points in local power basis. - intersect = poly_root( - c=B_c, - k=(1 / pitch)[..., jnp.newaxis], - a_min=jnp.array([0.0]), - a_max=jnp.diff(knots), - sort=True, - sentinel=-1.0, - distinct=True, - ) - assert intersect.shape == (P, S, N, degree) - # Reshape so that last axis enumerates intersects of a pitch along a field line. - B_z_ra = polyval_vec(x=intersect, c=B_z_ra_c[..., jnp.newaxis]).reshape(P, S, -1) - # Only consider intersect if it is within knots that bound that polynomial. - is_intersect = intersect.reshape(P, S, -1) >= 0 - # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, - # we ignore the bounce points of particles only assigned to a class that are - # trapped outside this snapshot of the field line. - is_bp1 = (B_z_ra <= 0) & is_intersect - is_bp2 = (B_z_ra >= 0) & fix_inversion(is_intersect, B_z_ra) + domain = (0, 2 * jnp.pi) + + def __init__( + self, + grid, + data, + desc_from_clebsch, + M, + N, + alpha_0=0.0, + num_transit=50, + quad=leggauss(32), + automorphism=(automorphism_sin, grad_automorphism_sin), + B_ref=1.0, + L_ref=1.0, + check=False, + **kwargs, + ): + """Returns an object to compute bounce integrals. - # Transform out of local power basis expansion. - intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) - # New versions of jax only like static sentinels. - sentinel = -10000000.0 # knots[0] - 1 - bp1 = take_mask(intersect, is_bp1, size=num_well, fill_value=sentinel) - bp2 = take_mask(intersect, is_bp2, size=num_well, fill_value=sentinel) + Notes + ----- + Performance may improve significantly + if the spectral resolutions ``M`` and ``N`` are powers of two. - mask = (bp1 > sentinel) & (bp2 > sentinel) - # Set outside mask to same value so integration is over set of measure zero. - bp1 = jnp.where(mask, bp1, 0.0) - bp2 = jnp.where(mask, bp2, 0.0) + Parameters + ---------- + grid : Grid + Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in + (2π × 2π) poloidal and toroidal coordinates. + Note that below shape notation defines + L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. + data : dict[str, jnp.ndarray] + Data evaluated on ``grid``. Must include ``FourierBounce.required_names()``. + desc_from_clebsch : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates + ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. + M : int + Grid resolution in poloidal direction for Clebsch coordinate grid. + Preferably power of 2. A good choice is ``m``. If the poloidal stream + function condenses the Fourier spectrum of |B| significantly, then a + larger number may be beneficial. + N : int + Grid resolution in toroidal direction for Clebsch coordinate grid. + Preferably power of 2. + alpha_0 : float + Starting field line poloidal label. + num_transit : int + Number of toroidal transits to follow field line. + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. + automorphism : (Callable, Callable) or None + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. This map defines + a change of variable for the bounce integral. The choice made for the + automorphism will affect the performance of the quadrature method. + B_ref : float + Optional. Reference magnetic field strength for normalization. + L_ref : float + Optional. Reference length scale for normalization. + check : bool + Flag for debugging. Must be false for JAX transformations. - if check: - _check_bounce_points(bp1, bp2, pitch, knots, B_c, plot, **kwargs) + """ + errorif(grid.sym, NotImplementedError, msg="Need grid that works with FFTs.") + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require + # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. + # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ + # requires the same sign change in e_ζ|ρ,a to retain the metric identity. + warnif( + check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", + ) + self._m = grid.num_theta + self._n = grid.num_zeta + self._b_sup_z = jnp.expand_dims( + transform_to_desc(grid, jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref), + axis=1, + ) + self._x, self._w = get_quadrature(quad, automorphism) + + # Compute global splines. + T, B = _transform_to_clebsch(grid, desc_from_clebsch, M, N, data["|B|"] / B_ref) + # peel off field lines + alphas = get_alpha( + alpha_0, + grid.compress(data["iota"]), + num_transit, + period=Bounce2D.domain[-1], + ) + self._B = B.compute_cheb(alphas) + # Evaluating set of Chebyshev series more efficient than evaluating + # Fourier Chebyshev series, so we project θ to Chebyshev series as well. + self._T = T.compute_cheb(alphas) + assert self._B.M == self._T.M == num_transit + assert self._B.N == self._T.N == N + assert ( + self._B.cheb.shape == self._T.cheb.shape == (grid.num_rho, num_transit, N) + ) - return bp1, bp2 + @staticmethod + def desc_from_clebsch(eq, L, M, N, clebsch=None, **kwargs): + """Return DESC coordinates of optimal Fourier Chebyshev basis nodes. + Parameters + ---------- + eq : Equilibrium + Equilibrium to use defining the coordinate mapping. + L : int or jnp.ndarray + Number of flux surfaces uniformly in [0, 1] on which to compute. + May also be an array of non-uniform coordinates. + M : int + Grid resolution in poloidal direction for Clebsch coordinate grid. + Preferably power of 2. A good choice is ``m``. If the poloidal stream + function condenses the Fourier spectrum of |B| significantly, then a + larger number may be beneficial. + N : int + Grid resolution in toroidal direction for Clebsch coordinate grid. + Preferably power of 2. + clebsch : jnp.ndarray + Optional, Clebsch coordinate tensor-product grid (ρ, α, ζ). + If given, ``L``, ``M``, and ``N`` are ignored. + kwargs : dict + Additional parameters to supply to the coordinate mapping function. + See ``desc.equilibrium.Equilibrium.map_coordinates``. -def _get_extrema(knots, B_c, B_z_ra_c, sentinel=jnp.nan): - """Return extrema of |B| along field line. Sort order is arbitrary. + Returns + ------- + desc_coords : jnp.ndarray + Shape (L * M * N, 3). + DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate + tensor-product grid (ρ, α, ζ). - Parameters - ---------- - knots : jnp.ndarray - Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. Must be strictly increasing. - B_c : jnp.ndarray - Shape (B_c.shape[0], S, knots.size - 1). - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - B_z_ra_c : jnp.ndarray - Shape (B_c.shape[0] - 1, *B_c.shape[1:]). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - sentinel : float - Value with which to pad array to return fixed shape. + """ + if clebsch is None: + clebsch = FourierChebyshevBasis.nodes(M, N, L, Bounce2D.domain) + desc_coords = eq.map_coordinates( + coords=clebsch, + inbasis=("rho", "alpha", "zeta"), + period=(jnp.inf, 2 * jnp.pi, jnp.inf), + **kwargs, + ) + return desc_coords - Returns - ------- - extrema, B_extrema : jnp.ndarray - Shape (S, N * (degree - 1)). + @staticmethod + def required_names(): + """Return names in ``data_index`` required to compute bounce integrals.""" + return ["B^zeta", "|B|", "iota"] - """ - B_c, B_z_ra_c, _ = _check_shape(knots, B_c, B_z_ra_c) - S, N, degree = B_c.shape[1], knots.size - 1, B_c.shape[0] - 1 - extrema = poly_root( - c=B_z_ra_c, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel - ) - assert extrema.shape == (S, N, degree - 1) - B_extrema = polyval_vec(x=extrema, c=B_c[..., jnp.newaxis]).reshape(S, -1) - # Transform out of local power basis expansion. - extrema = (extrema + knots[:-1, jnp.newaxis]).reshape(S, -1) - return extrema, B_extrema - - -def _plot(Z, V, title_id=""): - """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" - for p in range(Z.shape[0]): - for s in range(Z.shape[1]): - marked = jnp.nonzero(jnp.any(Z != 0, axis=-1))[0] - if marked.size == 0: - continue - fig, ax = plt.subplots() - ax.set_xlabel(r"Field line $\zeta$") - ax.set_ylabel(title_id) - ax.set_title( - f"Interpolation of {title_id} to quadrature points. Index {p},{s}." - ) - for i in marked: - ax.plot(Z[p, s, i], V[p, s, i], marker="o") - fig.text( - 0.01, - 0.01, - f"Each color specifies the set of points and values (ζ, {title_id}(ζ)) " - "used to evaluate an integral.", - ) - plt.tight_layout() - plt.show() + @staticmethod + def reshape_data(grid, *data): + """Reshape``data`` given by ``names`` for input to ``self.integrate``. + Parameters + ---------- + grid : Grid + Tensor-product grid in (ρ, θ, ζ). + data : jnp.ndarray + Data evaluated on grid. -def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): - """Check for floating point errors. + Returns + ------- + f : list[jnp.ndarray] + List of reshaped data which may be given to ``self.integrate``. - Parameters - ---------- - Z : jnp.ndarray - Quadrature points at field line-following ζ coordinates. - f : list of jnp.ndarray - Arguments to the integrand interpolated to Z. - b_sup_z : jnp.ndarray - Contravariant field-line following toroidal component of magnetic field, - interpolated to Z. - B : jnp.ndarray - Norm of magnetic field, interpolated to Z. - B_z_ra : jnp.ndarray - Norm of magnetic field, derivative with respect to field-line following - coordinate, interpolated to Z. - result : jnp.ndarray - Output of ``_interpolate_and_integrate``. - plot : bool - Whether to plot stuff. + """ + return [grid.meshgrid_reshape(d, "rtz")[:, jnp.newaxis] for d in data] - """ - assert jnp.isfinite(Z).all(), "NaN interpolation point." - # Integrals that we should be computing. - marked = jnp.any(Z != 0, axis=-1) - goal = jnp.sum(marked) - - msg = "Interpolation failed." - assert jnp.isfinite(B_z_ra).all(), msg - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(b_sup_z, axis=-1))), msg - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B, axis=-1))), msg - for f_i in f: - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(f_i, axis=-1))), msg - - msg = "|B| has vanished, violating the hairy ball theorem." - assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(b_sup_z, 0).any(), msg - - # Number of those integrals that were computed. - actual = jnp.sum(marked & jnp.isfinite(result)) - assert goal == actual, ( - f"Lost {goal - actual} integrals from NaN generation in the integrand. This " - "can be caused by floating point error or a poor choice of quadrature nodes." - ) - if plot: - _plot(Z, B, title_id=r"$\vert B \vert$") - _plot(Z, b_sup_z, title_id=r"$ (B/\vert B \vert) \cdot e^{\zeta}$") + @property + def _L(self): + """int: Number of flux surfaces to compute on.""" + return self._B.cheb.shape[0] + def bounce_points(self, pitch, num_well=None): + """Compute bounce points. -_interp1d_vec = jnp.vectorize( - interp1d, signature="(m),(n),(n)->(m)", excluded={"method"} -) + Parameters + ---------- + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + num_well : int or None + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. + If not specified, then all bounce points are returned. If there were fewer + wells detected along a field line than the size of the last axis of the + returned arrays, then that axis is padded with zero. -@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") -def _interp1d_vec_with_df(xq, x, f, fx): - return interp1d(xq, x, f, method="cubic", fx=fx) - - -def _interpolate_and_integrate( - Q, - w, - integrand, - f, - B_sup_z, - B_sup_z_ra, - B, - B_z_ra, - pitch, - knots, - method, - check=False, - plot=False, -): - """Interpolate given functions to points ``Q`` and perform quadrature. + Returns + ------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (P, L, num_well). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. - Parameters - ---------- - Q : jnp.ndarray - Shape (P, S, Q.shape[2], w.size). - Quadrature points at field line-following ζ coordinates. + """ + return self._B.intersect1d(1 / jnp.atleast_2d(pitch), num_well) - Returns - ------- - result : jnp.ndarray - Shape Q.shape[:-1]. - Quadrature for every pitch along every field line. + def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): + """Check that bounce points are computed correctly and plot them.""" + kwargs.setdefault( + "title", r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$" + ) + kwargs.setdefault("hlabel", r"$\zeta$") + kwargs.setdefault("vlabel", r"$\vertB\vert(\zeta)$") + self._B.check_intersect1d(bp1, bp2, 1 / pitch, plot, **kwargs) - """ - assert pitch.ndim == 2 - assert w.ndim == knots.ndim == 1 - assert 3 <= Q.ndim <= 4 and Q.shape[:2] == (pitch.shape[0], B.shape[0]) - assert Q.shape[-1] == w.size - assert knots.size == B.shape[-1] - assert B_sup_z.shape == B_sup_z_ra.shape == B.shape == B_z_ra.shape - - pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Q.ndim == 4) else 2) - shape = Q.shape - Q = Q.reshape(Q.shape[0], Q.shape[1], -1) - b_sup_z = _interp1d_vec_with_df( - Q, knots, B_sup_z / B, B_sup_z_ra / B - B_sup_z * B_z_ra / B**2 - ).reshape(shape) - B = _interp1d_vec_with_df(Q, knots, B, B_z_ra).reshape(shape) - # Spline the integrand so that we can evaluate it at quadrature points without - # expensive coordinate mappings and root finding. Spline each function separately so - # that the singularity near the bounce points can be captured more accurately than - # can be by any polynomial. - f = [_interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] - result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) - - if check: - _check_interp(Q.reshape(shape), f, b_sup_z, B, B_z_ra, result, plot) - - return result - - -def _bounce_quadrature( - bp1, - bp2, - x, - w, - integrand, - f, - B_sup_z, - B_sup_z_ra, - B, - B_z_ra, - pitch, - knots, - method="akima", - batch=True, - check=False, -): - """Bounce integrate ∫ f(ℓ) dℓ. + def integrate(self, pitch, integrand, f, weight=None, num_well=None): + """Bounce integrate ∫ f(ℓ) dℓ. - Parameters - ---------- - bp1, bp2 : jnp.ndarray - Shape (P, S, num_well). - The field line-following ζ coordinates of bounce points for a given pitch along - a field line. The pairs ``bp1[i,j,k]`` and ``bp2[i,j,k]`` form left and right - integration boundaries, respectively, for the bounce integrals. - x : jnp.ndarray - Shape (w.size, ). - Quadrature points in [-1, 1]. - w : jnp.ndarray - Shape (w.size, ). - Quadrature weights. - integrand : callable - The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list of jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Arguments to the callable ``integrand``. These should be the scalar-valued - functions in the bounce integrand evaluated on the DESC grid. - B_sup_z : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Contravariant field-line following toroidal component of magnetic field. - B_sup_z_ra : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Contravariant field-line following toroidal component of magnetic field, - derivative with respect to field-line following coordinate. - B : jnp.ndarray - Shape (S, knots.size). - Norm of magnetic field. - B_z_ra : jnp.ndarray - Shape (S, knots.size). - Norm of magnetic field, derivative with respect to field-line following - coordinate. - pitch : jnp.ndarray - Shape (P, S). - λ values to evaluate the bounce integral at each field line. - knots : jnp.ndarray - Shape (knots.size, ). - Field line following coordinate values where ``B_sup_z``, ``B``, ``B_z_ra``, and - those in ``f`` supplied to the returned method were evaluated. Must be strictly - increasing. - method : str - Method of interpolation for functions contained in ``f``. - See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. - Default is akima spline. - batch : bool - Whether to perform computation in a batched manner. Default is true. - check : bool - Flag for debugging. + Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line + for every λ value in ``pitch``. - Returns - ------- - result : jnp.ndarray - Shape (P, S, bp1.shape[-1]). - First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. + Parameters + ---------- + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + integrand : callable + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list[jnp.ndarray] + Shape (L, 1, m, n). + Real scalar-valued (2π × 2π) periodic in (θ, ζ) functions evaluated + on the ``grid`` supplied to construct this object. These functions + should be arguments to the callable ``integrand``. Use the method + ``self.reshape_data`` to reshape the data into the expected shape. + weight : jnp.ndarray + Shape (L, 1, m, n). + If supplied, the bounce integral labeled by well j is weighted such that + the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` + interpolated to the deepest point in the magnetic well. Use the method + ``self.reshape_data`` to reshape the data into the expected shape. + num_well : int or None + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. - """ - errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) - errorif(x.ndim != 1 or x.shape != w.shape) - pitch = jnp.atleast_2d(pitch) - S = B.shape[0] - if not isinstance(f, (list, tuple)): - f = [f] - # group data by field line - f = map(lambda f_i: f_i.reshape(-1, knots.size), f) - - # Integrate and complete the change of variable. - if batch: - result = _interpolate_and_integrate( - bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), - w, - integrand, - f, - B_sup_z, - B_sup_z_ra, - B, - B_z_ra, - pitch, - knots, - method, - check, - # Only developers doing debugging want to see these plots. - plot=False, - ) - else: - f = list(f) - - # TODO: Use batched vmap. - def loop(bp): - bp1, bp2 = bp - return None, _interpolate_and_integrate( - bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), - w, - integrand, - f, - B_sup_z, - B_sup_z_ra, - B, - B_z_ra, - pitch, - knots, - method, - check=False, - plot=False, - ) + If not specified, then all bounce points are returned. If there were fewer + wells detected along a field line than the size of the last axis of the + returned arrays, then that axis is padded with zero. - result = jnp.moveaxis( - imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0)))[1], - source=0, - destination=-1, - ) + Returns + ------- + result : jnp.ndarray + Shape (P, L, num_well). + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. - result = result * grad_bijection_from_disc(bp1, bp2) - assert result.shape == (pitch.shape[0], S, bp1.shape[-1]) - return result + """ + pitch = jnp.atleast_2d(pitch) + bp1, bp2 = self.bounce_points(pitch, num_well) + result = self._integrate(bp1, bp2, pitch, integrand, f) + errorif(weight is not None, NotImplementedError) + return result + def _integrate(self, bp1, bp2, pitch, integrand, f): + assert bp1.ndim == 3 + assert bp1.shape == bp2.shape + assert pitch.ndim == 2 + W = bp1.shape[-1] # number of wells + shape = (pitch.shape[0], self._L, W, self._x.size) -def required_names(): - """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] + # quadrature points parameterized by ζ for each pitch and flux surface + Q_zeta = flatten_matrix( + bijection_from_disc(self._x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + ) + # quadrature points in (θ, ζ) coordinates + Q = jnp.stack([self._T.eval1d(Q_zeta), Q_zeta], axis=-1) + + # interpolate and integrate + f = [interp_rfft2(Q, f_i, axes=(-1, -2)).reshape(shape) for f_i in f] + result = jnp.dot( + integrand( + *f, + B=self._B.eval1d(Q_zeta).reshape(shape), + pitch=pitch[..., jnp.newaxis, jnp.newaxis], + ) + / irfft2_non_uniform( + Q, self._b_sup_z, self._m, self._n, axes=(-1, -2) + ).reshape(shape), + self._w, + ) + assert result.shape == (pitch.shape[0], self._L, W) + return result -def bounce_integral( - data, - knots, - quad=leggauss(32), - automorphism=(automorphism_sin, grad_automorphism_sin), - B_ref=1.0, - L_ref=1.0, - check=False, - plot=False, - **kwargs, -): - """Returns a method to compute bounce integrals. +class Bounce1D: + """Computes bounce integrals using one-dimensional local spline methods. The bounce integral is defined as ∫ f(ℓ) dℓ, where dℓ parameterizes the distance along the field line in meters, - λ is a constant proportional to the magnetic moment over energy, - |B| is the norm of the magnetic field, f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1. + and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, + where λ is a constant proportional to the magnetic moment over energy + and |B| is the norm of the magnetic field. For a particle with fixed λ, bounce points are defined to be the location on the field line such that the particle's velocity parallel to the magnetic field is zero. @@ -692,246 +1126,335 @@ def bounce_integral( Notes ----- - The quantities in ``data`` and those in ``f`` supplied to the returned method - must be separable into data evaluated along particular field lines - via ``.reshape(S,knots.size)``. One way to satisfy this is to compute stuff on the - grid returned from the method ``desc.equilibrium.coords.get_rtz_grid``. See - ``tests.test_bounce_integral.test_bounce_integral_checks`` for example use. - - Parameters + Brief description of algorithm for developers. + + For applications which reduce to computing a nonlinear function of distance + along field lines between bounce points, it is required to identify these + points with field-line-following coordinates. In the special case of a linear + function summing integrals between bounce points over a flux surface, arbitrary + coordinate systems may be used as this operation becomes a surface integral, + which is invariant to the order of summation. + + The DESC coordinate system is related to field-line-following coordinate + systems by a relation whose solution is best found with Newton iteration. + There is a unique real solution to this equation, so Newton iteration is a + globally convergent root-finding algorithm here. For the task of finding + bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration + is not a globally convergent algorithm to find the real roots of + f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. + For this, function approximation of |B| is necessary. + + The function approximation in ``Bounce1D`` is ignorant that the objects to + approximate are defined on a bounded subset of ℝ². Instead, the domain is + projected to ℝ, where information sampled about the function at infinity + cannot support reconstruction of the function near the origin. As the + functions of interest do not vanish at infinity, pseudo-spectral techniques + are not used. Instead, function approximation is done with local splines. + This is useful if one can efficiently obtain data along field lines. + + After obtaining the bounce points, the supplied quadrature is performed. + By default, Gauss quadrature is performed after removing the singularity. + Local splines interpolate functions in the integrand to the quadrature nodes. + + See Also + -------- + Bounce2D : Uses two-dimensional pseudo-spectral techniques for the same task. + + Warnings + -------- + The supplied data must be from a Clebsch coordinate (ρ, α, ζ) tensor-product grid. + The field-line-following coordinate ζ must be strictly increasing. + The ζ coordinate is preferably uniformly spaced, although this is not required. + These are used as knots to construct splines. + A reference density is 100 knots per toroidal transit. + + Attributes ---------- - data : dict of jnp.ndarray - Data evaluated on grid. - Shape (S * knots.size, ) or (S, knots.size). - Should contain all names in ``required_names()``. - knots : jnp.ndarray - Shape (knots.size, ). - Field line following coordinate values where arrays in ``data`` and ``f`` - supplied to the returned method were evaluated. Must be strictly - increasing. These knots are used to compute a spline of |B| and interpolate the - integrand. A good reference density is 100 knots per toroidal transit. - quad : (jnp.ndarray, jnp.ndarray) - Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. - automorphism : (Callable, Callable) or None - The first callable should be an automorphism of the real interval [-1, 1]. - The second callable should be the derivative of the first. This map defines a - change of variable for the bounce integral. The choice made for the automorphism - will affect the performance of the quadrature method. - B_ref : float - Optional. Reference magnetic field strength for normalization. - Has no effect on computation, but may be useful for analysis. - L_ref : float - Optional. Reference length scale for normalization. - Has no effect on computation, but may be useful for analysis. - check : bool - Flag for debugging. Must be false for jax transformations. - plot : bool - Whether to plot stuff if ``check`` is true. Default is false. - - Returns - ------- - bounce_integrate : callable - This callable method computes the bounce integral ∫ f(ℓ) dℓ for every - specified field line for every λ value in ``pitch``. - spline : dict of jnp.ndarray - knots : jnp.ndarray - Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. - B_c : jnp.ndarray - Shape (4, S, knots.size - 1). - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - B_z_ra_c : jnp.ndarray - Shape (3, S, knots.size - 1). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. + B : jnp.ndarray + Shape (4, L * M, N - 1). + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. """ - warnif( - check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), - msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", - ) - # Strictly increasing zeta knots enforces dζ > 0. - # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require B^ζ = B⋅∇ζ > 0. - # This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. - # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ - # requires the same sign change in e_ζ|ρ,a to retain the metric identity. - B_sup_z = jnp.abs(data["B^zeta"]).reshape(-1, knots.size) * L_ref / B_ref - B_sup_z_ra = ( - (data["B^zeta_z|r,a"] * jnp.sign(data["B^zeta"])).reshape(-1, knots.size) - * L_ref - / B_ref - ) - B = data["|B|"].reshape(-1, knots.size) / B_ref - # This is already the correct sign. - B_z_ra = data["|B|_z|r,a"].reshape(-1, knots.size) / B_ref - - # Compute local splines. - B_c = CubicHermiteSpline(knots, B, B_z_ra, axis=-1, check=check).c - B_c = jnp.moveaxis(B_c, source=1, destination=-1) - B_z_ra_c = polyder_vec(B_c) - degree = 3 - assert B_c.shape[0] == degree + 1 - assert B_z_ra_c.shape[0] == degree - assert B_c.shape[-1] == B_z_ra_c.shape[-1] == knots.size - 1 - spline = {"knots": knots, "B_c": B_c, "B_z_ra_c": B_z_ra_c} - - x, w = quad - assert x.ndim == w.ndim == 1 - if automorphism is not None: - auto, grad_auto = automorphism - w = w * grad_auto(x) - # Recall bijection_from_disc(auto(x), ζ_b₁, ζ_b₂) = ζ. - x = auto(x) - - def bounce_integrate( + + plot_ppoly = staticmethod(plot_ppoly) + + def __init__( + self, + grid, + data, + quad=leggauss(32), + automorphism=(automorphism_sin, grad_automorphism_sin), + Bref=1.0, + Lref=1.0, + check=False, + **kwargs, + ): + """Returns an object to compute bounce integrals. + + Parameters + ---------- + grid : Grid + Clebsch coordinate (ρ, α, ζ) tensor-product grid. + Note that below shape notation defines + L = ``grid.num_rho``, M = ``grid.num_alpha``, and N = ``grid.num_zeta``. + data : dict[str, jnp.ndarray] + Data evaluated on ``grid``. + Must include names in ``Bounce1D.required_names()``. + quad : (jnp.ndarray, jnp.ndarray) + Quadrature points xₖ and weights wₖ for the approximate evaluation of an + integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. + automorphism : (Callable, Callable) or None + The first callable should be an automorphism of the real interval [-1, 1]. + The second callable should be the derivative of the first. This map defines + a change of variable for the bounce integral. The choice made for the + automorphism will affect the performance of the quadrature method. + Bref : float + Optional. Reference magnetic field strength for normalization. + Lref : float + Optional. Reference length scale for normalization. + check : bool + Flag for debugging. Must be false for JAX transformations. + + """ + # Strictly increasing zeta knots enforces dζ > 0. + # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require + # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. + # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ + # requires the same sign change in e_ζ|ρ,a to retain the metric identity. + warnif( + check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), + msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", + ) + data = { + "B^zeta": jnp.abs(data["B^zeta"]) * Lref / Bref, + "B^zeta_z|r,a": data["B^zeta_z|r,a"] + * jnp.sign(data["B^zeta"]) + * Lref + / Bref, + "|B|": data["|B|"] / Bref, + "|B|_z|r,a": data["|B|_z|r,a"] / Bref, # This is already the correct sign. + } + self._data = { + key: grid.meshgrid_reshape(val, "raz").reshape(-1, grid.num_zeta) + for key, val in data.items() + } + self._x, self._w = get_quadrature(quad, automorphism) + + # Compute local splines. + self._zeta = grid.compress(grid.nodes[:, 2], surface_label="zeta") + self.B = jnp.moveaxis( + CubicHermiteSpline( + x=self._zeta, + y=self._data["|B|"], + dydx=self._data["|B|_z|r,a"], + axis=-1, + check=check, + ).c, + source=1, + destination=-1, + ) + self._dB_dz = polyder_vec(self.B) + degree = 3 + assert self.B.shape[0] == degree + 1 + assert self._dB_dz.shape[0] == degree + assert self.B.shape[-1] == self._dB_dz.shape[-1] == grid.num_zeta - 1 + + @staticmethod + def required_names(): + """Return names in ``data_index`` required to compute bounce integrals.""" + return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] + + @staticmethod + def reshape_data(grid, *data): + """Reshape ``data`` given by ``names`` for input to ``self.integrate``. + + Parameters + ---------- + grid : Grid + Clebsch coordinate (ρ, α, ζ) tensor-product grid. + data : jnp.ndarray + Data evaluated on grid. + + Returns + ------- + f : list[jnp.ndarray] + List of reshaped data which may be given to ``self.integrate``. + + """ + return [ + grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in data + ] + + def bounce_points(self, pitch, num_well=None): + """Compute bounce points. + + Parameters + ---------- + pitch : jnp.ndarray + Shape must broadcast with (P, L * M). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,ρ]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + num_well : int or None + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. + + If not specified, then all bounce points are returned. If there were fewer + wells detected along a field line than the size of the last axis of the + returned arrays, then that axis is padded with zero. + + Returns + ------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (P, L * M, num_well). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + + If there were less than ``num_wells`` wells detected along a field line, + then the last axis, which enumerates bounce points for a particular field + line and pitch, is padded with zero. + + """ + return bounce_points( + pitch=pitch, + knots=self._zeta, + B=self.B, + dB_dz=self._dB_dz, + num_well=num_well, + ) + + def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): + """Check that bounce points are computed correctly. + + Parameters + ---------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (P, L * M, num_well). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + pitch : jnp.ndarray + Shape must broadcast with (P, L * M). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + plot : bool + Whether to plot stuff. + kwargs : dict + Keyword arguments into ``self.plot_ppoly``. + + """ + _check_bounce_points( + bp1=bp1, + bp2=bp2, + pitch=jnp.atleast_2d(pitch), + knots=self._zeta, + B=self.B, + plot=plot, + **kwargs, + ) + + def integrate( + self, + pitch, integrand, f, - pitch, weight=None, num_well=None, - method="akima", + method="cubic", batch=True, + check=False, ): """Bounce integrate ∫ f(ℓ) dℓ. + Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line + for every λ value in ``pitch``. + Parameters ---------- + pitch : jnp.ndarray + Shape must broadcast with (P, L * M). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the arrays in ``f`` as arguments as well as the additional keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list of jnp.ndarray - Shape (S, knots.size) or (S * knots.size). - Arguments to the callable ``integrand``. These should be real scalar-valued - functions in the bounce integrand evaluated on the DESC grid. - pitch : jnp.ndarray - Shape (P, S). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + f : list[jnp.ndarray] + Shape (L * M, N). + Real scalar-valued functions evaluated on the ``grid`` supplied to + construct this object. These functions should be arguments to the callable + ``integrand``. Use the method ``self.reshape_data`` to reshape the data + into the expected shape. weight : jnp.ndarray - Shape (S, knots.size) or (S * knots.size). + Shape (L * M, N). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - interpolated to the deepest point in the magnetic well. + interpolated to the deepest point in the magnetic well. Use the method + ``self.reshape_data`` to reshape the data into the expected shape. num_well : int or None - If not specified, then all bounce integrals are returned in an array whose - last axis has size ``(knots.size-1)*degree``. If there - were less than that many wells detected along a field line, then the last - axis of the returned array, which enumerates bounce integrals for a - particular field line and pitch, is padded with zero. - - Specify to return the bounce integrals between the first ``num_well`` - wells for each pitch along each field line. This is useful if ``num_well`` - tightly bounds the actual number of wells. To obtain a good - choice for ``num_well``, plot the field line with all the bounce points - identified. This will be done automatically if the ``bounce_integral`` - function is called with ``check=True`` and ``plot=True``. As a reference, - there are typically <= 5 wells per toroidal transit. + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. + + If not specified, then all bounce points are returned. If there were fewer + wells detected along a field line than the size of the last axis of the + returned arrays, then that axis is padded with zero. method : str Method of interpolation for functions contained in ``f``. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. - Default is akima spline. + Default is cubic C1 local spline. batch : bool Whether to perform computation in a batched manner. Default is true. + check : bool + Flag for debugging. Must be false for JAX transformations. Returns ------- result : jnp.ndarray - Shape (P, S, num_well). + Shape (P, L*M, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. """ - bp1, bp2 = bounce_points(pitch, knots, B_c, B_z_ra_c, num_well, check, plot) - result = _bounce_quadrature( - bp1, - bp2, - x, - w, - integrand, - f, - B_sup_z, - B_sup_z_ra, - B, - B_z_ra, - pitch, - knots, - method, - batch, - check, + pitch = jnp.atleast_2d(pitch) + bp1, bp2 = self.bounce_points(pitch, num_well) + result = bounce_quadrature( + x=self._x, + w=self._w, + bp1=bp1, + bp2=bp2, + pitch=pitch, + integrand=integrand, + f=f, + data=self._data, + knots=self._zeta, + method=method, + batch=batch, + check=check, ) if weight is not None: result *= _interp_to_argmin_B_soft( - weight, bp1, bp2, knots, B_c, B_z_ra_c, method + g=weight, + bp1=bp1, + bp2=bp2, + knots=self._zeta, + B=self.B, + dB_dz=self._dB_dz, + method=method, ) - assert result.shape[-1] == setdefault(num_well, (knots.size - 1) * degree) + assert result.shape[-1] == setdefault(num_well, (self._zeta.size - 1) * 3) return result - - return bounce_integrate, spline - - -def _interp_to_argmin_B_soft(f, bp1, bp2, knots, B_c, B_z_ra_c, method, beta=-50): - """Compute ``f`` at deepest point in the magnetic well. - - Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns mean_A f(ζ). - - Parameters - ---------- - beta : float - More negative gives exponentially better approximation at the - expense of noisier gradients. - - """ - ext, B = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) - assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] - argmin = softmax( - beta - * jnp.where( - (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), - jnp.expand_dims(B / jnp.mean(B, axis=-1, keepdims=True), axis=1), - 1e2, # >> max(|B|) / mean(|B|) - ), - axis=-1, - ) - f = jnp.linalg.vecdot( - argmin, - _interp1d_vec(ext, knots, f.reshape(-1, knots.size), method=method)[ - :, jnp.newaxis - ], - ) - assert f.shape == bp1.shape == bp2.shape - return f - - -# Less efficient than above if P >> 1. -def _interp_to_argmin_B_hard(f, bp1, bp2, knots, B_c, B_z_ra_c, method): - """Compute ``f`` at deepest point in the magnetic well. - - Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns f(A). - """ - ext, B = _get_extrema(knots, B_c, B_z_ra_c, sentinel=0) - assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] - argmin = jnp.argmin( - jnp.where( - (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), - B[:, jnp.newaxis], - 1e2 + jnp.max(B), - ), - axis=-1, - ) - A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) - f = _interp1d_vec(A, knots, f.reshape(-1, knots.size), method=method) - assert f.shape == bp1.shape == bp2.shape - return f diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py new file mode 100644 index 0000000000..3482e5d4c9 --- /dev/null +++ b/desc/integrals/bounce_utils.py @@ -0,0 +1,874 @@ +"""Utilities for bounce integrals.""" + +from functools import partial + +from interpax import PPoly +from matplotlib import pyplot as plt +from orthax.chebyshev import chebroots + +from desc.backend import flatnonzero, imap, jnp, put, softmax +from desc.integrals.interp_utils import ( + interp1d_vec, + interp1d_vec_with_df, + poly_root, + polyval_vec, +) +from desc.integrals.quad_utils import ( + bijection_from_disc, + composite_linspace, + grad_bijection_from_disc, +) +from desc.utils import atleast_3d_mid, errorif, setdefault, take_mask + +# TODO: Boyd's method 𝒪(N²) instead of Chebyshev companion matrix 𝒪(N³). +# John P. Boyd, Computing real roots of a polynomial in Chebyshev series +# form through subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. +chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") + + +def flatten_matrix(y): + """Flatten matrix to vector.""" + return y.reshape(*y.shape[:-2], -1) + + +def subtract(c, k): + """Subtract ``k`` from first index of last axis of ``c``. + + Semantically same as ``return c.copy().at[...,0].add(-k)``, + but allows dimension to increase. + """ + c_0 = c[..., 0] - k + c = jnp.concatenate( + [ + c_0[..., jnp.newaxis], + jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), + ], + axis=-1, + ) + return c + + +def get_pitch(min_B, max_B, num, relative_shift=1e-6): + """Return uniformly spaced values between ``1/max_B`` and ``1/min_B``. + + Parameters + ---------- + min_B : jnp.ndarray + Minimum |B| value. + max_B : jnp.ndarray + Maximum |B| value. + num : int + Number of values, not including endpoints. + relative_shift : float + Relative amount to shift maxima down and minima up to avoid floating point + errors in downstream routines. + + Returns + ------- + pitch : jnp.ndarray + Shape (num + 2, *min_B.shape). + + """ + # Floating point error impedes consistent detection of bounce points riding + # extrema. Shift values slightly to resolve this issue. + min_B = (1 + relative_shift) * min_B + max_B = (1 - relative_shift) * max_B + pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) + assert pitch.shape == (num + 2, *min_B.shape) + return pitch + + +# TODO: Generalize this beyond ζ = ϕ or just map to Clebsch with ϕ. +def get_alpha(alpha_0, iota, num_transit, period): + """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. + + Parameters + ---------- + alpha_0 : float + Starting field line poloidal label. + iota : jnp.ndarray + Shape (iota.size, ). + Rotational transform normalized by 2π. + num_transit : float + Number of ``period``s to follow field line. + period : float + Toroidal period after which to update label. + + Returns + ------- + alpha : jnp.ndarray + Shape (iota.size, num_transit). + Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. + + """ + # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] + alpha = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) + return alpha + + +@partial(jnp.vectorize, signature="(m),(m)->(m)") +def epigraph_and(is_intersect, df_dy_sign): + """Set and epigraph of f with ``is_intersect``. + + Remove intersects for which there does not exist a connected path between + adjacent intersects in the epigraph of a continuous map ``f``. + + Parameters + ---------- + is_intersect : jnp.ndarray + Boolean array indicating whether element is an intersect. + df_dy_sign : jnp.ndarray + Shape ``is_intersect.shape``. + Sign of ∂f/∂y (yᵢ) for f(yᵢ) = 0. + + Returns + ------- + is_intersect : jnp.ndarray + Boolean array indicating whether element is an intersect + and satisfies the stated condition. + + """ + # The pairs ``y1`` and ``y2`` are boundaries of an integral only if ``y1 <= y2``. + # For the integrals to be over wells, it is required that the first intersect + # has a non-positive derivative. Now, by continuity, + # ``df_dy_sign[...,k]<=0`` implies ``df_dy_sign[...,k+1]>=0``, + # so there can be at most one inversion, and if it exists, the inversion + # must be at the first pair. To correct the inversion, it suffices to disqualify the + # first intersect as a right boundary, except under an edge case of a series of + # inflection points. + idx = flatnonzero(is_intersect, size=2, fill_value=-1) # idx of first 2 intersects + edge_case = ( + (df_dy_sign[idx[0]] == 0) + & (df_dy_sign[idx[1]] < 0) + & is_intersect[idx[0]] + & is_intersect[idx[1]] + # In theory, we need to keep propagating this edge case, e.g. + # (df_dy_sign[..., 1] < 0) | ( + # (df_dy_sign[..., 1] == 0) & (df_dy_sign[..., 2] < 0)... + # ). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. + ) + return put(is_intersect, idx[0], edge_case) + + +def _check_spline_shape(knots, B, dB_dz, pitch=None): + """Ensure inputs have compatible shape, and return them with full dimension. + + Parameters + ---------- + knots : jnp.ndarray + Shape (knots.size, ). + Field line-following ζ coordinates of spline knots. Must be strictly increasing. + B : jnp.ndarray + Shape (B.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + dB_dz : jnp.ndarray + Shape (B.shape[0] - 1, *B.shape[1:]). + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + pitch : jnp.ndarray + Shape must broadcast with (P, S). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + + """ + errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") + errorif( + B.shape[-1] != (knots.size - 1), + msg=( + "Last axis does not enumerate polynomials of spline. " + f"B.shape={B.shape}. knots.shape={knots.shape}." + ), + ) + errorif( + B.ndim > 3 + or dB_dz.ndim > 3 + or (B.shape[0] - 1) != dB_dz.shape[0] + or B.shape[1:] != dB_dz.shape[1:], + msg=f"Invalid shape for spline. B.shape={B.shape}. dB_dz.shape={dB_dz.shape}.", + ) + # Add axis which enumerates field lines if necessary. + B, dB_dz = atleast_3d_mid(B, dB_dz) + if pitch is not None: + pitch = jnp.atleast_2d(pitch) + errorif( + pitch.ndim != 2 + or not (pitch.shape[-1] == 1 or pitch.shape[-1] == B.shape[1]), + msg=f"Invalid shape {pitch.shape} for pitch angles.", + ) + return B, dB_dz, pitch + + +def bounce_points( + pitch, knots, B, dB_dz, num_well=None, check=False, plot=True, **kwargs +): + """Compute the bounce points given spline of |B| and pitch λ. + + Parameters + ---------- + pitch : jnp.ndarray + Shape must broadcast with (P, S). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + knots : jnp.ndarray + Shape (knots.size, ). + Field line-following ζ coordinates of spline knots. Must be strictly increasing. + B : jnp.ndarray + Shape (B.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + dB_dz : jnp.ndarray + Shape (B.shape[0] - 1, *B.shape[1:]). + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + num_well : int or None + Specify to return the first ``num_well`` pairs of bounce points for each + pitch along each field line. This is useful if ``num_well`` tightly + bounds the actual number of wells. As a reference, there are typically + at most 5 wells per toroidal transit for a given pitch. + + If not specified, then all bounce points are returned. If there were fewer + wells detected along a field line than the size of the last axis of the + returned arrays, then that axis is padded with zero. + check : bool + Flag for debugging. Must be false for JAX transformations. + plot : bool + Whether to plot some things if check is true. Default is true. + kwargs : dict + Keyword arguments into ``plot_ppoly``. + + Returns + ------- + bp1, bp2 : (jnp.ndarray, jnp.ndarray) + Shape (P, S, num_well). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + + If there were less than ``num_wells`` wells detected along a field line, + then the last axis, which enumerates bounce points for a particular field + line and pitch, is padded with zero. + + """ + B, dB_dz, pitch = _check_spline_shape(knots, B, dB_dz, pitch) + P, S, degree = pitch.shape[0], B.shape[1], B.shape[0] - 1 + # Intersection points in local power basis. + intersect = poly_root( + c=B, + k=(1 / pitch)[..., jnp.newaxis], + a_min=jnp.array([0.0]), + a_max=jnp.diff(knots), + sort=True, + sentinel=-1.0, + distinct=True, + ) + assert intersect.shape == (P, S, knots.size - 1, degree) + + # Reshape so that last axis enumerates intersects of a pitch along a field line. + dB_dz_sign = jnp.sign( + polyval_vec(x=intersect, c=dB_dz[..., jnp.newaxis]).reshape(P, S, -1) + ) + # Only consider intersect if it is within knots that bound that polynomial. + is_intersect = intersect.reshape(P, S, -1) >= 0 + # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, + # we ignore the bounce points of particles only assigned to a class that are + # trapped outside this snapshot of the field line. + is_bp1 = (dB_dz_sign <= 0) & is_intersect + is_bp2 = (dB_dz_sign >= 0) & epigraph_and(is_intersect, dB_dz_sign) + + # Transform out of local power basis expansion. + intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) + # New versions of JAX only like static sentinels. + sentinel = -10000000.0 # instead of knots[0] - 1 + bp1 = take_mask(intersect, is_bp1, size=num_well, fill_value=sentinel) + bp2 = take_mask(intersect, is_bp2, size=num_well, fill_value=sentinel) + + mask = (bp1 > sentinel) & (bp2 > sentinel) + # Set outside mask to same value so integration is over set of measure zero. + bp1 = jnp.where(mask, bp1, 0.0) + bp2 = jnp.where(mask, bp2, 0.0) + + if check: + _check_bounce_points(bp1, bp2, pitch, knots, B, plot, **kwargs) + + return bp1, bp2 + + +def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): + """Check that bounce points are computed correctly.""" + eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 + title = kwargs.pop( + "title", + r"Intersects $\zeta$ in epigraph of $\vert B \vert(\zeta) = 1/\lambda$", + ) + klabel = kwargs.pop("klabel", r"$1/\lambda$") + hlabel = kwargs.pop("hlabel", r"$\zeta$") + vlabel = kwargs.pop("vlabel", r"$\vert B \vert(\zeta)$") + + assert bp1.shape == bp2.shape + mask = (bp1 - bp2) != 0.0 + bp1 = jnp.where(mask, bp1, jnp.nan) + bp2 = jnp.where(mask, bp2, jnp.nan) + + err_1 = jnp.any(bp1 > bp2, axis=-1) + err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + + P, S, _ = bp1.shape + for s in range(S): + Bs = PPoly(B[:, s], knots) + for p in range(P): + Bs_midpoint = Bs((bp1[p, s] + bp2[p, s]) / 2) + err_3 = jnp.any(Bs_midpoint > 1 / pitch[p, s] + eps) + if not (err_1[p, s] or err_2[p, s] or err_3): + continue + _bp1 = bp1[p, s][mask[p, s]] + _bp2 = bp2[p, s][mask[p, s]] + if plot: + plot_ppoly( + ppoly=Bs, + z1=_bp1, + z2=_bp2, + k=1 / pitch[p, s], + klabel=klabel, + title=title, + hlabel=hlabel, + vlabel=vlabel, + **kwargs, + ) + print(" bp1 | bp2") + print(jnp.column_stack([_bp1, _bp2])) + assert not err_1[p, s], "Intersects have an inversion.\n" + assert not err_2[p, s], "Detected discontinuity.\n" + assert not err_3, ( + f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {1 / pitch[p, s] + eps} " + f"= 1/λ in well. Use more knots.\n" + ) + if plot: + plot_ppoly( + ppoly=Bs, + z1=bp1[:, s], + z2=bp2[:, s], + k=1 / pitch[:, s], + klabel=klabel, + title=title, + hlabel=hlabel, + vlabel=vlabel, + **kwargs, + ) + + +def bounce_quadrature( + x, + w, + bp1, + bp2, + pitch, + integrand, + f, + data, + knots, + method="cubic", + batch=True, + check=False, + plot=False, +): + """Bounce integrate ∫ f(ℓ) dℓ. + + Parameters + ---------- + x : jnp.ndarray + Shape (w.size, ). + Quadrature points in [-1, 1]. + w : jnp.ndarray + Shape (w.size, ). + Quadrature weights. + bp1, bp2 : jnp.ndarray + Shape (P, S, num_well). + The field line-following coordinates of bounce points. + The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, + respectively, for the bounce integrals. + pitch : jnp.ndarray + Shape must broadcast with (P, S). + λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + integrand : callable + The composition operator on the set of functions in ``f`` that maps the + functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the + arrays in ``f`` as arguments as well as the additional keyword arguments: + ``B`` and ``pitch``. A quadrature will be performed to approximate the + bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + f : list[jnp.ndarray] + Shape (S, knots.size). + Real scalar-valued functions evaluated on the ``knots``. + These functions should be arguments to the callable ``integrand``. + data : dict[str, jnp.ndarray] + Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. + Must include names in ``Bounce1D.required_names()``. + knots : jnp.ndarray + Shape (knots.size, ). + Field line-following sorted, unique ζ coordinates where the arrays in + ``data`` and ``f`` were evaluated. + method : str + Method of interpolation for functions contained in ``f``. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is cubic C1 local spline. + batch : bool + Whether to perform computation in a batched manner. Default is true. + check : bool + Flag for debugging. Must be false for JAX transformations. + Ignored if ``batch`` is false. + plot : bool + Whether to plot stuff if ``check`` is true. Default is false. + Only developers doing debugging want to see these plots. + + Returns + ------- + result : jnp.ndarray + Shape (P, S, num_well). + Quadrature for every pitch along every field line. + First axis enumerates pitch values. Second axis enumerates the field lines. + Last axis enumerates the bounce integrals. + + """ + errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) + errorif(x.ndim != 1 or x.shape != w.shape) + pitch = jnp.atleast_2d(pitch) + if not isinstance(f, (list, tuple)): + f = [f] + + # Integrate and complete the change of variable. + if batch: + result = _interpolate_and_integrate( + w=w, + Q=bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), + pitch=pitch, + integrand=integrand, + f=f, + data=data, + knots=knots, + method=method, + check=check, + plot=plot, + ) + else: + f = list(f) + + # TODO: Use batched vmap. + def loop(bp): + bp1, bp2 = bp + # Need to return tuple because input was tuple; artifact of JAX map. + return None, _interpolate_and_integrate( + w=w, + Q=bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), + pitch=pitch, + integrand=integrand, + f=f, + data=data, + knots=knots, + method=method, + check=False, + plot=False, + ) + + result = jnp.moveaxis( + imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0)))[1], + source=0, + destination=-1, + ) + + result = result * grad_bijection_from_disc(bp1, bp2) + assert result.shape == (pitch.shape[0], data["|B|"].shape[0], bp1.shape[-1]) + return result + + +def _interpolate_and_integrate( + w, + Q, + pitch, + integrand, + f, + data, + knots, + method, + check=False, + plot=False, +): + """Interpolate given functions to points ``Q`` and perform quadrature. + + Parameters + ---------- + w : jnp.ndarray + Shape (w.size, ). + Quadrature weights. + Q : jnp.ndarray + Shape (P, S, Q.shape[2], w.size). + Quadrature points at field line-following ζ coordinates. + data : dict[str, jnp.ndarray] + Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. + Must include names in ``Bounce1D.required_names()``. + + Returns + ------- + result : jnp.ndarray + Shape Q.shape[:-1]. + Quadrature for every pitch along every field line. + + """ + assert pitch.ndim == 2 + assert w.ndim == knots.ndim == 1 + assert 3 <= Q.ndim <= 4 and Q.shape[:2] == (pitch.shape[0], data["|B|"].shape[0]) + assert Q.shape[-1] == w.size + assert knots.size == data["|B|"].shape[-1] + assert ( + data["B^zeta"].shape + == data["B^zeta_z|r,a"].shape + == data["|B|"].shape + == data["|B|_z|r,a"].shape + ) + + pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Q.ndim == 4) else 2) + shape = Q.shape + Q = Q.reshape(Q.shape[0], Q.shape[1], -1) + b_sup_z = interp1d_vec_with_df( + Q, + knots, + data["B^zeta"] / data["|B|"], + data["B^zeta_z|r,a"] / data["|B|"] + - data["B^zeta"] * data["|B|_z|r,a"] / data["|B|"] ** 2, + ).reshape(shape) + B = interp1d_vec_with_df(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) + # Spline the integrand so that we can evaluate it at quadrature points without + # expensive coordinate mappings and root finding. Spline each function separately so + # that the singularity near the bounce points can be captured more accurately than + # can be by any polynomial. + f = [interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] + result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) + + if check: + _check_interp(Q.reshape(shape), f, b_sup_z, B, data["|B|_z|r,a"], result, plot) + + return result + + +def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): + """Check for floating point errors. + + Parameters + ---------- + Z : jnp.ndarray + Quadrature points at field line-following ζ coordinates. + f : list of jnp.ndarray + Arguments to the integrand interpolated to Z. + b_sup_z : jnp.ndarray + Contravariant field-line following toroidal component of magnetic field, + interpolated to Z. + B : jnp.ndarray + Norm of magnetic field, interpolated to Z. + B_z_ra : jnp.ndarray + Norm of magnetic field, derivative with respect to field-line following + coordinate. + result : jnp.ndarray + Output of ``_interpolate_and_integrate``. + plot : bool + Whether to plot stuff. + + """ + assert jnp.isfinite(Z).all(), "NaN interpolation point." + # Integrals that we should be computing. + marked = jnp.any(Z != 0, axis=-1) + goal = jnp.sum(marked) + + msg = "Interpolation failed." + assert jnp.isfinite(B_z_ra).all(), msg + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(b_sup_z, axis=-1))), msg + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B, axis=-1))), msg + for f_i in f: + assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(f_i, axis=-1))), msg + + msg = "|B| has vanished, violating the hairy ball theorem." + assert not jnp.isclose(B, 0).any(), msg + assert not jnp.isclose(b_sup_z, 0).any(), msg + + # Number of those integrals that were computed. + actual = jnp.sum(marked & jnp.isfinite(result)) + assert goal == actual, ( + f"Lost {goal - actual} integrals from NaN generation in the integrand. This " + "can be caused by floating point error or a poor choice of quadrature nodes." + ) + if plot: + _plot_check_interp(Z, B, name=r"$\vert B \vert$") + _plot_check_interp(Z, b_sup_z, name=r"$ (B / \vert B \vert) \cdot e^{\zeta}$") + + +def _plot_check_interp(Z, V, name=""): + """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" + for p in range(Z.shape[0]): + for s in range(Z.shape[1]): + marked = jnp.nonzero(jnp.any(Z != 0, axis=-1))[0] + if marked.size == 0: + continue + fig, ax = plt.subplots() + ax.set_xlabel(r"Field line $\zeta$") + ax.set_ylabel(name) + ax.set_title( + f"Interpolation of {name} to quadrature points. Index {p},{s}." + ) + for i in marked: + ax.plot(Z[p, s, i], V[p, s, i], marker="o") + fig.text( + 0.01, + 0.01, + f"Each color specifies the set of points and values (ζ, {name}(ζ)) " + "used to evaluate an integral.", + ) + plt.tight_layout() + plt.show() + + +def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): + """Return extrema (ζ*, |B|(ζ*)) along field line. + + Parameters + ---------- + knots : jnp.ndarray + Shape (knots.size, ). + Field line-following ζ coordinates of spline knots. Must be strictly increasing. + B : jnp.ndarray + Shape (B.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of |B| in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + dB_dz : jnp.ndarray + Shape (B.shape[0] - 1, *B.shape[1:]). + Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines along the field lines. Last axis enumerates the + polynomials that compose the spline along a particular field line. + sentinel : float + Value with which to pad array to return fixed shape. + + Returns + ------- + extrema, B_extrema : jnp.ndarray + Shape (S, (knots.size - 1) * (degree - 1)). + First array enumerates ζ*. Second array enumerates |B|(ζ*) + Sorted order of ζ* is not promised. + + """ + B, dB_dz, _ = _check_spline_shape(knots, B, dB_dz) + S, degree = B.shape[1], B.shape[0] - 1 + extrema = poly_root( + c=dB_dz, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel + ) + assert extrema.shape == (S, knots.size - 1, degree - 1) + B_extrema = polyval_vec(x=extrema, c=B[..., jnp.newaxis]).reshape(S, -1) + # Transform out of local power basis expansion. + extrema = (extrema + knots[:-1, jnp.newaxis]).reshape(S, -1) + return extrema, B_extrema + + +def _interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta=-50): + """Interpolate ``g`` to the deepest point in the magnetic well. + + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns mean_A g(ζ). + + Parameters + ---------- + beta : float + More negative gives exponentially better approximation at the + expense of noisier gradients - noisier in the physics sense (unrelated + to the automatic differentiation). + + """ + ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) + assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] + argmin = softmax( + beta + * jnp.where( + (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + jnp.expand_dims(B / jnp.mean(B, axis=-1, keepdims=True), axis=1), + 1e2, # >> max(|B|) / mean(|B|) + ), + axis=-1, + ) + g = jnp.linalg.vecdot( + argmin, + interp1d_vec(ext, knots, g.reshape(-1, knots.size), method=method)[ + :, jnp.newaxis + ], + ) + assert g.shape == bp1.shape == bp2.shape + return g + + +# Less efficient than soft if P >> 1. +def _interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): + """Interpolate ``g`` to the deepest point in the magnetic well. + + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns g(A). + """ + ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) + assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] + argmin = jnp.argmin( + jnp.where( + (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + B[:, jnp.newaxis], + 1e2 + jnp.max(B), + ), + axis=-1, + ) + A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) + g = interp1d_vec(A, knots, g.reshape(-1, knots.size), method=method) + assert g.shape == bp1.shape == bp2.shape + return g + + +def plot_ppoly( + ppoly, + num=1000, + z1=None, + z2=None, + k=None, + k_transparency=0.5, + klabel=r"$k$", + title=r"Intersects $z$ in epigraph of $f(z) = k$", + hlabel=r"$z$", + vlabel=r"$f(z)$", + show=True, + start=None, + stop=None, + include_knots=False, + knot_transparency=0.1, +): + """Plot the piecewise polynomial ``ppoly``. + + Parameters + ---------- + ppoly : PPoly + Piecewise polynomial f. + num : int + Number of points to evaluate for plot. + z1 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂ζ <= 0. + z2 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂ζ >= 0. + k : jnp.ndarray + Shape (k.shape[0], ). + Optional, k such that f(ζ) = k. + k_transparency : float + Transparency of intersect lines. + klabel : float + Label of intersect lines. + title : str + Plot title. + hlabel : str + Horizontal axis label. + vlabel : str + Vertical axis label. + show : bool + Whether to show the plot. Default is true. + start : float + Minimum ζ on plot. + stop : float + Maximum ζ on plot. + include_knots : bool + Whether to plot vertical lines at the knots. + knot_transparency : float + Transparency of knot lines. + + Returns + ------- + fig, ax : matplotlib figure and axes + + """ + fig, ax = plt.subplots() + legend = {} + if include_knots: + for knot in ppoly.x: + _add2legend( + legend, + ax.axvline( + x=knot, color="tab:blue", alpha=knot_transparency, label="knot" + ), + ) + + z = jnp.linspace( + start=setdefault(start, ppoly.x[0]), + stop=setdefault(stop, ppoly.x[-1]), + num=num, + ) + _add2legend(legend, ax.plot(z, ppoly(z), label=vlabel)) + _plot_intersect( + ax=ax, + legend=legend, + z1=z1, + z2=z2, + k=k, + k_transparency=k_transparency, + klabel=klabel, + pad_value=0.0, + ) + ax.set_xlabel(hlabel) + ax.set_ylabel(vlabel) + ax.legend(legend.values(), legend.keys()) + ax.set_title(title) + plt.tight_layout() + if show: + plt.show() + plt.close() + return fig, ax + + +def _add2legend(legend, lines): + """Add lines to legend if it's not already in it.""" + for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): + label = line.get_label() + if label not in legend: + legend[label] = line + + +def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel, pad_value=0.0): + """Plot intersects on ``ax``.""" + if k is None: + return + + k = jnp.atleast_1d(jnp.squeeze(k)) + assert k.ndim == 1 + z1, z2 = jnp.atleast_2d(z1, z2) + assert z1.ndim == z2.ndim >= 2 + assert k.shape[0] == z1.shape[0] == z2.shape[0] + for p in k: + _add2legend( + legend, + ax.axhline(p, color="tab:purple", alpha=k_transparency, label=klabel), + ) + for i in range(k.size): + _z1, _z2 = z1[i], z2[i] + if _z1.size == _z2.size: + mask = (z1 - z2) != pad_value + _z1 = z1[mask] + _z2 = z2[mask] + ax.scatter(_z1, jnp.full_like(_z1, k[i]), marker="v", color="tab:red") + ax.scatter(_z2, jnp.full_like(_z2, k[i]), marker="^", color="tab:green") diff --git a/desc/integrals/fourier_bounce_integral.py b/desc/integrals/fourier_bounce_integral.py deleted file mode 100644 index a269aee7c6..0000000000 --- a/desc/integrals/fourier_bounce_integral.py +++ /dev/null @@ -1,1562 +0,0 @@ -"""Methods for computing Fast Fourier Chebyshev transforms and bounce integrals.""" - -import numpy as np -from interpax import CubicHermiteSpline, PPoly -from matplotlib import pyplot as plt -from orthax.legendre import leggauss - -from desc.backend import dct, idct, irfft, jnp, rfft -from desc.integrals._bounce_utils import ( - add2legend, - chebroots_vec, - fix_inversion, - flatten_matrix, - get_alpha, - plot_intersect, - subtract, -) -from desc.integrals.bounce_integral import ( - _bounce_quadrature, - _interp_to_argmin_B_soft, - bounce_points, -) -from desc.integrals.interp_utils import ( - _filter_distinct, - cheb_from_dct, - cheb_pts, - fourier_pts, - harmonic, - idct_non_uniform, - interp_rfft2, - irfft2_non_uniform, - irfft_non_uniform, - polyder_vec, - transform_to_desc, -) -from desc.integrals.quad_utils import ( - automorphism_sin, - bijection_from_disc, - bijection_to_disc, - get_quad, - grad_automorphism_sin, -) -from desc.utils import ( - atleast_2d_end, - atleast_3d_mid, - atleast_nd, - errorif, - isposint, - setdefault, - take_mask, - warnif, -) - - -class FourierChebyshevBasis: - """Fourier-Chebyshev series. - - f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) - where ψₘ are trigonometric polynomials on [0, 2π] - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. - - Notes - ----- - Performance may improve significantly - if the spectral resolutions ``M`` and ``N`` are powers of two. - - Attributes - ---------- - M : int - Fourier spectral resolution. - N : int - Chebyshev spectral resolution. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots grid for Chebyshev points. - domain : (float, float) - Domain for y coordinates. - - """ - - def __init__(self, f, domain=(-1, 1), lobatto=False): - """Interpolate Fourier-Chebyshev basis to ``f``. - - Parameters - ---------- - f : jnp.ndarray - Shape (..., M, N). - Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots grid for Chebyshev points. - - """ - self.M = f.shape[-2] - self.N = f.shape[-1] - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = tuple(domain) - errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") - self.lobatto = bool(lobatto) - self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) - - @staticmethod - def _fast_transform(f, lobatto): - M = f.shape[-2] - N = f.shape[-1] - return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) - - @staticmethod - def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): - """Tensor product grid of optimal collocation nodes for this basis. - - Parameters - ---------- - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. - L : int or jnp.ndarray - Optional, resolution in radial direction of domain [0, 1]. - May also be an array of coordinates values. If given, then the - returned ``coords`` is a 3D tensor-product with shape (L * M * N, 3). - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - lobatto : bool - Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) - instead of the interior roots grid for Chebyshev points. - - Returns - ------- - coords : jnp.ndarray - Shape (M * N, 2). - Grid of (x, y) points for optimal interpolation. - - """ - x = fourier_pts(M) - y = cheb_pts(N, lobatto, domain) - if L is not None: - if isposint(L): - L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) - coords = (L, x, y) - else: - coords = (x, y) - coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) - coords = jnp.column_stack(coords) - return coords - - def evaluate(self, M, N): - """Evaluate Fourier-Chebyshev series. - - Parameters - ---------- - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. - - Returns - ------- - fq : jnp.ndarray - Shape (..., M, N) - Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. - - """ - fq = idct(irfft(self._c, n=M, axis=-2), type=2 - self.lobatto, n=N, axis=-1) * ( - M * (N - self.lobatto) - ) - return fq - - def harmonics(self): - """Spectral coefficients aₘₙ of the interpolating polynomial. - - Transform Fourier interpolant harmonics to Nyquist trigonometric - interpolant harmonics so that the coefficients are all real. - - Returns - ------- - a_mn : jnp.ndarray - Shape (..., M, N). - Real valued spectral coefficients for Fourier-Chebyshev basis. - - """ - a_mn = harmonic(cheb_from_dct(self._c, axis=-1), self.M, axis=-2) - assert a_mn.shape[-2:] == (self.M, self.N) - return a_mn - - def compute_cheb(self, x): - """Evaluate Fourier basis at ``x`` to obtain set of 1D Chebyshev coefficients. - - Parameters - ---------- - x : jnp.ndarray - Points to evaluate Fourier basis. - - Returns - ------- - cheb : ChebyshevBasisSet - Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - - """ - # Always add new axis to broadcast against Chebyshev coefficients. - x = jnp.atleast_1d(x)[..., jnp.newaxis] - cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) - assert cheb.shape[-2:] == (x.shape[-2], self.N) - return ChebyshevBasisSet(cheb, self.domain) - - -class ChebyshevBasisSet: - """Chebyshev series. - - { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ] - - Attributes - ---------- - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - M : int - Number of function in this basis set. - N : int - Chebyshev spectral resolution. - domain : (float, float) - Domain for y coordinates. - - """ - - _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - - def __init__(self, cheb, domain=(-1, 1)): - """Make Chebyshev series basis from given coefficients. - - Parameters - ---------- - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - - """ - self.cheb = jnp.atleast_2d(cheb) - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = tuple(domain) - - @property - def M(self): - """Number of function in this basis set.""" - return self.cheb.shape[-2] - - @property - def N(self): - """Chebyshev spectral resolution.""" - return self.cheb.shape[-1] - - @staticmethod - def _chebcast(cheb, arr): - # Input should not have rightmost dimension of cheb that iterates coefficients, - # but may have additional leftmost dimension for batch operation. - errorif( - jnp.ndim(arr) > cheb.ndim, - NotImplementedError, - msg=f"Only one additional axis for batch dimension is allowed. " - f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", - ) - return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] - - def intersect2d(self, k=0.0, eps=_eps): - """Coordinates yᵢ such that f(x, yᵢ) = k(x). - - Parameters - ---------- - k : jnp.ndarray - Shape must broadcast with (..., *cheb.shape[:-1]). - Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. - eps : float - Absolute tolerance with which to consider value as zero. - - Returns - ------- - y : jnp.ndarray - Shape (..., *cheb.shape[:-1], N - 1). - Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. - is_intersect : jnp.ndarray - Shape y.shape. - Boolean array into ``y`` indicating whether element is an intersect. - df_dy_sign : jnp.ndarray - Shape y.shape. - Sign of ∂f/∂y (x, yᵢ). - - """ - c = subtract(ChebyshevBasisSet._chebcast(self.cheb, k), k) - # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = chebroots_vec(c) - assert y.shape == (*c.shape[:-1], self.N - 1) - - # Intersects must satisfy y ∈ [-1, 1]. - # Pick sentinel such that only distinct roots are considered intersects. - y = _filter_distinct(y, sentinel=-2.0, eps=eps) - is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) - y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos - - # TODO: Multipoint evaluation with FFT. - # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - n = jnp.arange(self.N) - # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) - # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) - df_dy_sign = jnp.sign( - jnp.linalg.vecdot( - n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), - self.cheb[..., jnp.newaxis, :], - ) - ) - y = bijection_from_disc(y, self.domain[0], self.domain[-1]) - return y, is_intersect, df_dy_sign - - def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): - """Coordinates z(x, yᵢ) such that fₓ(yᵢ) = k for every x. - - Parameters - ---------- - k : jnp.ndarray - Shape must broadcast with (..., *cheb.shape[:-2]). - Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. - num_intersect : int or None - If not specified, then all intersects are returned in an array whose - last axis has size ``self.M*(self.N-1)``. If there were less than that many - intersects detected, then the last axis of the returned arrays is padded - with ``pad_value``. Specify to return the first ``num_intersect`` pairs - of intersects. This is useful if ``num_intersect`` tightly bounds the - actual number. - pad_value : float - Value with which to pad array. Default 0. - - Returns - ------- - z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). - ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. - - """ - errorif( - self.N < 2, - NotImplementedError, - "This method requires the Chebyshev spectral resolution of at " - f"least 2, but got N={self.N}.", - ) - - # Add axis to use same k over all Chebyshev series of the piecewise object. - y, is_intersect, df_dy_sign = self.intersect2d( - jnp.atleast_1d(k)[..., jnp.newaxis] - ) - # Flatten so that last axis enumerates intersects along the piecewise object. - y, is_intersect, df_dy_sign = map( - flatten_matrix, (self.isomorphism_to_C1(y), is_intersect, df_dy_sign) - ) - - # Note for bounce point applications: - # We ignore the degenerate edge case where the boundary shared by adjacent - # polynomials is a left intersect point i.e. ``is_z1`` because the subset of - # pitch values that generate this edge case has zero measure. Note that - # the technique to account for this would be to disqualify intersects - # within ``_eps`` from ``domain[-1]``. - is_z1 = (df_dy_sign <= 0) & is_intersect - is_z2 = (df_dy_sign >= 0) & fix_inversion(is_intersect, df_dy_sign) - - sentinel = self.domain[0] - 1.0 - z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) - z2 = take_mask(y, is_z2, size=num_intersect, fill_value=sentinel) - - mask = (z1 > sentinel) & (z2 > sentinel) - # Set outside mask to same value so integration is over set of measure zero. - z1 = jnp.where(mask, z1, pad_value) - z2 = jnp.where(mask, z2, pad_value) - return z1, z2 - - def eval1d(self, z, cheb=None): - """Evaluate piecewise Chebyshev spline at coordinates z. - - Parameters - ---------- - z : jnp.ndarray - Shape (..., *cheb.shape[:-2], z.shape[-1]). - Coordinates in [sef.domain[0], ∞). - The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where - ``z // domain`` yields the index into the proper Chebyshev series - along the second to last axis of ``cheb`` and ``z % domain`` is - the coordinate value on the domain of that Chebyshev series. - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients to use. If not given, uses ``self.cheb``. - - Returns - ------- - f : jnp.ndarray - Shape z.shape. - Chebyshev basis evaluated at z. - - """ - cheb = self._chebcast(setdefault(cheb, self.cheb), z) - N = cheb.shape[-1] - x_idx, y = self.isomorphism_to_C2(z) - y = bijection_to_disc(y, self.domain[0], self.domain[1]) - # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) - # are held in cheb with shape (..., num cheb series, N). - cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) - f = idct_non_uniform(y, cheb, N) - assert f.shape == z.shape - return f - - def isomorphism_to_C1(self, y): - """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². - - Maps row x of y to z = y + f(x) where f(x) = x * |domain|. - - Parameters - ---------- - y : jnp.ndarray - Shape (..., y.shape[-2], y.shape[-1]). - Second to last axis iterates the rows. - - Returns - ------- - z : jnp.ndarray - Shape y.shape. - Isomorphic coordinates. - - """ - assert y.ndim >= 2 - z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) - z = y + z_shift[:, jnp.newaxis] - return z - - def isomorphism_to_C2(self, z): - """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - - Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. - - Parameters - ---------- - z : jnp.ndarray - Shape z.shape. - - Returns - ------- - x_idx, y_val : (jnp.ndarray, jnp.ndarray) - Shape z.shape. - Isomorphic coordinates. - - """ - x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) - x_idx = x_idx.astype(int) - y_val += self.domain[0] - return x_idx, y_val - - def _check_shape(self, z1, z2, k): - """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" - # Ensure pitch batch dim exists and add back dim to broadcast with wells. - k = atleast_nd(self.cheb.ndim - 1, k)[..., jnp.newaxis] - # Same but back dim already exists. - z1, z2 = atleast_nd(self.cheb.ndim, z1, z2) - # Cheb has shape (..., M, N) and others - # have shape (K, ..., W) - errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) - return z1, z2, k - - def check_intersect1d(self, z1, z2, k, pad_value=0.0, plot=True, **kwargs): - """Check that intersects are computed correctly. - - Parameters - ---------- - z1, z2 : jnp.ndarray - Shape must broadcast with (k, *self.cheb.shape[:-2], W). - ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. - k : jnp.ndarray - Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). - k such that fₓ(yᵢ) = k. - pad_value : float - Value that pads ``z1`` and ``z2`` arrays. - plot : bool - Whether to plot stuff. Default is true. - kwargs : dict - Keyword arguments into ``self.plot``. - - """ - assert z1.shape == z2.shape - mask = (z1 - z2) != pad_value - z1 = jnp.where(mask, z1, jnp.nan) - z2 = jnp.where(mask, z2, jnp.nan) - z1, z2, k = self._check_shape(z1, z2, k) - - err_1 = jnp.any(z1 > z2, axis=-1) - err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) - f_m = self.eval1d((z1 + z2) / 2) - assert f_m.shape == z1.shape - err_3 = jnp.any(f_m > k + self._eps, axis=-1) - if not (plot or jnp.any(err_1 | err_2 | err_3)): - return - - # Ensure l axis exists for iteration in below loop. - cheb = atleast_nd(3, self.cheb) - mask, z1, z2, f_m = atleast_3d_mid(mask, z1, z2, f_m) - err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) - - for l in np.ndindex(cheb.shape[:-2]): - for p in range(k.shape[0]): - idx = (p, *l) - if not (err_1[idx] or err_2[idx] or err_3[idx]): - continue - _z1 = z1[idx][mask[idx]] - _z2 = z2[idx][mask[idx]] - if plot: - self.plot1d( - cheb=cheb[l], - z1=_z1, - z2=_z2, - k=k[idx], - **kwargs, - ) - print(" z1 | z2") - print(jnp.column_stack([_z1, _z2])) - assert not err_1[idx], "Intersects have an inversion.\n" - assert not err_2[idx], "Detected discontinuity.\n" - assert not err_3[idx], ( - "Detected f > k in well. Increase Chebyshev resolution.\n" - f"{f_m[idx][mask[idx]]} > {k[idx] + self._eps}" - ) - idx = (slice(None), *l) - if plot: - self.plot1d( - cheb=cheb[l], - z1=z1[idx], - z2=z2[idx], - k=k[idx], - **kwargs, - ) - - def plot1d( - self, - cheb, - z1=None, - z2=None, - k=None, - k_transparency=0.5, - num=1000, - title=r"Intersects $z$ for $f(z) - k = 0$", - hlabel=r"$z$", - vlabel=r"$f(z)$", - show=True, - ): - """Plot the function ``f`` defined by the Chebyshev coefficients. - - Parameters - ---------- - cheb : jnp.ndarray - Shape (M, N). - Piecewise Chebyshev coefficients. - z1 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂y <= 0. - z2 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂y >= 0. - k : jnp.ndarray - Shape (k.shape[0], ). - Optional, k such that fₓ(yᵢ) = k. - k_transparency : float - Transparency of pitch lines. - num : int - Number of points to evaluate ``cheb`` for plot. - title : str - Plot title. - hlabel : str - Horizontal axis label. - vlabel : str - Vertical axis label. - show : bool - Whether to show the plot. Default is true. - - Returns - ------- - fig, ax : matplotlib figure and axes - - """ - fig, ax = plt.subplots() - legend = {} - - z = jnp.linspace( - start=self.domain[0], - stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, - num=num, - ) - add2legend(legend, ax.plot(z, self.eval1d(z, cheb), label=vlabel)) - plot_intersect(ax, legend, z1, z2, k, k_transparency) - - ax.set_xlabel(hlabel) - ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys(), loc="lower right") - ax.set_title(title) - plt.tight_layout() - if show: - plt.show() - plt.close() - return fig, ax - - -def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): - """Transform to Clebsch spectral domain. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in - (2π × 2π) poloidal and toroidal coordinates. - Note that below shape notation defines - L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. - desc_from_clebsch : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates - ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. - M : int - Grid resolution in poloidal direction for Clebsch coordinate grid. - Preferably power of 2. A good choice is ``m``. If the poloidal stream - function condenses the Fourier spectrum of |B| significantly, then a - larger number may be beneficial. - N : int - Grid resolution in toroidal direction for Clebsch coordinate grid. - Preferably power of 2. - B : jnp.ndarray - |B| evaluated on ``grid``. - - Returns - ------- - T, B : (FourierChebyshevBasis, FourierChebyshevBasis) - - """ - T = FourierChebyshevBasis( - # θ is computed on the optimal nodes in Clebsch space, - # which is a tensor product node set in Clebsch space. - f=desc_from_clebsch[:, 1].reshape(grid.num_rho, M, N), - domain=Bounce2D.domain, - ) - B = FourierChebyshevBasis( - f=interp_rfft2( - # Interpolate to optimal nodes in Clebsch space, - # which is not a tensor product node set in DESC space. - xq=desc_from_clebsch[:, 1:].reshape(grid.num_rho, -1, 2), - f=grid.meshgrid_reshape(B, order="rtz")[:, jnp.newaxis], - axes=(-1, -2), - ).reshape(grid.num_rho, M, N), - domain=Bounce2D.domain, - ) - return T, B - - -# TODO: -# After GitHub issue #1034 is resolved, we can also pass in the previous -# θ(α) coordinates as an initial guess for the next coordinate mapping. -# Perhaps tell the optimizer to perturb the coefficients of the -# |B|(α, ζ) directly? Maybe auto diff to see change on |B|(θ, ζ) -# and hence stream functions. just guessing. not sure if feasible / useful. -# TODO: Allow multiple starting labels for near-rational surfaces. -# can just concatenate along second to last axis of cheb. - - -class Bounce2D: - """Computes bounce integrals using two-dimensional pseudo-spectral methods. - - The bounce integral is defined as ∫ f(ℓ) dℓ, where - dℓ parameterizes the distance along the field line in meters, - f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, - where λ is a constant proportional to the magnetic moment over energy - and |B| is the norm of the magnetic field. - - For a particle with fixed λ, bounce points are defined to be the location on the - field line such that the particle's velocity parallel to the magnetic field is zero. - The bounce integral is defined up to a sign. We choose the sign that corresponds to - the particle's guiding center trajectory traveling in the direction of increasing - field-line-following coordinate ζ. - - Notes - ----- - Motivation and description of algorithm for developers. - - For applications which reduce to computing a nonlinear function of distance - along field lines between bounce points, it is required to identify these - points with field-line-following coordinates. In the special case of a linear - function summing integrals between bounce points over a flux surface, arbitrary - coordinate systems may be used as this operation becomes a surface integral, - which is invariant to the order of summation. - - The DESC coordinate system is related to field-line-following coordinate - systems by a relation whose solution is best found with Newton iteration. - There is a unique real solution to this equation, so Newton iteration is a - globally convergent root-finding algorithm here. For the task of finding - bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration - is not a globally convergent algorithm to find the real roots of - f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. - For this, function approximation of |B| is necessary. - - Therefore, to compute bounce points {(ζ₁, ζ₂)}, we approximate |B| by a - series expansion of basis functions in (α, ζ) coordinates restricting the - class of basis functions to low order (e.g. N = 2ᵏ where k is small) - algebraic or trigonometric polynomial with integer frequencies. These are - the two classes useful for function approximation and for which there exists - globally convergent root-finding algorithms. We require low order because - the computation expenses grow with the number of potential roots, and the - theorem of algebra states that number is N (2N) for algebraic - (trigonometric) polynomials of degree N. - - The frequency transform of a map under the chosen basis must be concentrated - at low frequencies for the series to converge to the true function fast. - For periodic (non-periodic) maps, the best basis is a Fourier (Chebyshev) - series. Both converge exponentially, but the larger region of convergence in - the complex plane of Fourier series make it preferable in practice to choose - coordinate systems such that the function to approximate is periodic. The - Chebyshev series is preferred to other orthogonal polynomial series since - fast discrete polynomial transforms (DPT) are implemented via fast transform - to Chebyshev then DCT. Although nothing prohibits a direct DPT, we want to - rely on existing, optimized libraries. There are other reasons to prefer - Chebyshev series not discussed here. - - Therefore, |B| is interpolated to a Fourier-Chebyshev series in (α, ζ). - The roots of f are computed as the eigenvalues of the Chebyshev companion - matrix. This will later be replaced with Boyd's method: - Computing real roots of a polynomial in Chebyshev series form through - subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. - - Computing accurate series expansions in (α, ζ) coordinates demands - particular interpolation points in that coordinate system. Newton iteration - is used to compute θ at these interpolation points. Note that interpolation - is necessary because there is no transformation that converts series - coefficients in periodic coordinates, e.g. (ϑ, ϕ), to a low order - polynomial basis in non-periodic coordinates. For example, one can obtain - series coefficients in (α, ϕ) coordinates from those in (ϑ, ϕ) as follows - g : ϑ, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mϑ + nϕ]) - - g : α, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mα + (m ι + n)ϕ]) - However, the basis for the latter are trigonometric functions with - irrational frequencies since the rotational transform is irrational. - Globally convergent root-finding schemes for that basis (at fixed α) are - not known. The denominator of a close rational could be absorbed into the - coordinate ϕ, but this balloons the frequency, and hence the degree of the - series. Although since Fourier series may converge faster than Chebyshev, - an alternate strategy that should work is to interpolate |B| to a double - Fourier series in (ϑ, ϕ), then apply bisection methods to find roots of f - with mesh size inversely proportional to the max frequency along the field - line: M ι + N. ``Bounce2D`` does not use this approach because the - root-finding scheme is inferior. - - After obtaining the bounce points, the supplied quadrature is performed. - By default, Gauss quadrature is performed after removing the singularity. - Fast fourier transforms interpolate functions in the integrand to the - quadrature nodes. - - Fast transforms are used where possible, though fast multipoint methods - are not yet implemented. For non-uniform interpolation, Vandermode MMT with - the linear algebra libraries of JAX are used. It should be worthwhile to use - the inverse non-uniform fast transforms. Fast multipoint methods are - preferable because they are exact, but this requires more development work. - Future work may implement these techniques, along with empirical testing of - a few change of variables for the Chebyshev interpolation that may allow - earlier truncation of the series without loss of accuracy. - - See Also - -------- - Bounce1D - Uses one-dimensional local spline methods for the same task. - An advantage of ``Bounce2D`` over ``Bounce1D`` is that the coordinates on - which the root-finding must be done to map from DESC to Clebsch coords is - fixed to ``M*N``, independent of the number of toroidal transits. - - Warnings - -------- - It is assumed that ζ = ϕ. - - Attributes - ---------- - _B : ChebyshevBasisSet - Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the - sequence of poloidal coordinates that specify the field line. - _T : ChebyshevBasisSet - Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ ↦ θ(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the - sequence of poloidal coordinates that specify the field line. - - """ - - domain = (0, 2 * jnp.pi) - - def __init__( - self, - grid, - data, - desc_from_clebsch, - M, - N, - alpha_0=0.0, - num_transit=50, - quad=leggauss(32), - automorphism=(automorphism_sin, grad_automorphism_sin), - B_ref=1.0, - L_ref=1.0, - check=False, - **kwargs, - ): - """Returns an object to compute bounce integrals. - - Notes - ----- - Performance may improve significantly - if the spectral resolutions ``M`` and ``N`` are powers of two. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in - (2π × 2π) poloidal and toroidal coordinates. - Note that below shape notation defines - L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. - data : dict[str, jnp.ndarray] - Data evaluated on ``grid``. Must include ``FourierBounce.required_names()``. - desc_from_clebsch : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates - ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. - M : int - Grid resolution in poloidal direction for Clebsch coordinate grid. - Preferably power of 2. A good choice is ``m``. If the poloidal stream - function condenses the Fourier spectrum of |B| significantly, then a - larger number may be beneficial. - N : int - Grid resolution in toroidal direction for Clebsch coordinate grid. - Preferably power of 2. - alpha_0 : float - Starting field line poloidal label. - num_transit : int - Number of toroidal transits to follow field line. - quad : (jnp.ndarray, jnp.ndarray) - Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. - automorphism : (Callable, Callable) or None - The first callable should be an automorphism of the real interval [-1, 1]. - The second callable should be the derivative of the first. This map defines - a change of variable for the bounce integral. The choice made for the - automorphism will affect the performance of the quadrature method. - B_ref : float - Optional. Reference magnetic field strength for normalization. - L_ref : float - Optional. Reference length scale for normalization. - check : bool - Flag for debugging. Must be false for jax transformations. - - """ - errorif(grid.sym, NotImplementedError, msg="Need grid that works with FFTs.") - # Strictly increasing zeta knots enforces dζ > 0. - # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require - # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. - # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ - # requires the same sign change in e_ζ|ρ,a to retain the metric identity. - warnif( - check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), - msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", - ) - self._m = grid.num_theta - self._n = grid.num_zeta - self._b_sup_z = jnp.expand_dims( - transform_to_desc(grid, jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref), - axis=1, - ) - self._x, self._w = get_quad(quad, automorphism) - - # Compute global splines. - T, B = _transform_to_clebsch(grid, desc_from_clebsch, M, N, data["|B|"] / B_ref) - # peel off field lines - alphas = get_alpha( - alpha_0, - grid.compress(data["iota"]), - num_transit, - period=Bounce2D.domain[-1], - ) - self._B = B.compute_cheb(alphas) - # Evaluating set of Chebyshev series more efficient than evaluating - # Fourier Chebyshev series, so we project θ to Chebyshev series as well. - self._T = T.compute_cheb(alphas) - assert self._B.M == self._T.M == num_transit - assert self._B.N == self._T.N == N - assert ( - self._B.cheb.shape == self._T.cheb.shape == (grid.num_rho, num_transit, N) - ) - - @staticmethod - def desc_from_clebsch(eq, L, M, N, clebsch=None, **kwargs): - """Return DESC coordinates of optimal Fourier Chebyshev basis nodes. - - Parameters - ---------- - eq : Equilibrium - Equilibrium to use defining the coordinate mapping. - L : int or jnp.ndarray - Number of flux surfaces uniformly in [0, 1] on which to compute. - May also be an array of non-uniform coordinates. - M : int - Grid resolution in poloidal direction for Clebsch coordinate grid. - Preferably power of 2. A good choice is ``m``. If the poloidal stream - function condenses the Fourier spectrum of |B| significantly, then a - larger number may be beneficial. - N : int - Grid resolution in toroidal direction for Clebsch coordinate grid. - Preferably power of 2. - clebsch : jnp.ndarray - Optional, Clebsch coordinate tensor-product grid (ρ, α, ζ). - If given, ``L``, ``M``, and ``N`` are ignored. - kwargs : dict - Additional parameters to supply to the coordinate mapping function. - See ``desc.equilibrium.Equilibrium.map_coordinates``. - - Returns - ------- - desc_coords : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ). - - """ - if clebsch is None: - clebsch = FourierChebyshevBasis.nodes(M, N, L, Bounce2D.domain) - desc_coords = eq.map_coordinates( - coords=clebsch, - inbasis=("rho", "alpha", "zeta"), - period=(jnp.inf, 2 * jnp.pi, jnp.inf), - **kwargs, - ) - return desc_coords - - @staticmethod - def required_names(): - """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "|B|", "iota"] - - @staticmethod - def reshape_data(grid, data, names): - """Reshape``data`` given by ``names`` for input to ``self.integrate``. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (ρ, θ, ζ). - data : dict[str, jnp.ndarray] - Data evaluated on grid. - names : list[str] - Strings of keys in ``data`` to reshape. - - Returns - ------- - f : list[jnp.ndarray] - List of reshaped data which may be given to ``self.integrate``. - - """ - if isinstance(names, str): - names = [names] - f = [grid.meshgrid_reshape(data[name], "rtz")[:, jnp.newaxis] for name in names] - return f - - @property - def _L(self): - """int: Number of flux surfaces to compute on.""" - return self._B.cheb.shape[0] - - def bounce_points(self, pitch, num_well=None): - """Compute bounce points. - - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - num_well : int or None - If not specified, then all bounce points are returned in an array whose - last axis has size ``num_transit*(N-1)``. If there were less than that many - wells detected along a field line, then the last axis of the returned - arrays, which enumerates bounce points for a particular field line and - pitch, is padded with zero. - - Specify to return the first ``num_well`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. - - Returns - ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (P, L, num_well). - The field line-following coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. - - """ - return self._B.intersect1d(1 / jnp.atleast_2d(pitch), num_well) - - def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): - """Check that bounce points are computed correctly and plot them.""" - kwargs.setdefault( - "title", r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$" - ) - kwargs.setdefault("hlabel", r"$\zeta$") - kwargs.setdefault("vlabel", r"$\vertB\vert(\zeta)$") - self._B.check_intersect1d(bp1, bp2, 1 / pitch, plot, **kwargs) - - def integrate(self, pitch, integrand, f, weight=None, num_well=None): - """Bounce integrate ∫ f(ℓ) dℓ. - - Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line - for every λ value in ``pitch``. - - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - integrand : callable - The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list[jnp.ndarray] - Shape (L, 1, m, n). - Real scalar-valued (2π × 2π) periodic in (θ, ζ) functions evaluated - on the ``grid`` supplied to construct this object. These functions - should be arguments to the callable ``integrand``. Use the method - ``self.reshape_data`` to reshape the data into the expected shape. - weight : jnp.ndarray - Shape (L, 1, m, n). - If supplied, the bounce integral labeled by well j is weighted such that - the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - interpolated to the deepest point in the magnetic well. Use the method - ``self.reshape_data`` to reshape the data into the expected shape. - num_well : int or None - If not specified, then all bounce integrals are returned in an array whose - last axis has size ``num_transit*(N-1)``. If there were less than that many - wells detected along a field line, then the last axis of the returned array, - which enumerates bounce integrals for a particular field line and pitch, - is padded with zero. - - Specify to return the bounce integrals between the first ``num_well`` - wells for each pitch along each field line. This is useful if ``num_well`` - tightly bounds the actual number of wells. As a reference, there are - typically at most 5 wells per toroidal transit for a given pitch. - - Returns - ------- - result : jnp.ndarray - Shape (P, L, num_well). - First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. - - """ - pitch = jnp.atleast_2d(pitch) - bp1, bp2 = self.bounce_points(pitch, num_well) - result = self._integrate(bp1, bp2, pitch, integrand, f) - errorif(weight is not None, NotImplementedError) - return result - - def _integrate(self, bp1, bp2, pitch, integrand, f): - assert bp1.ndim == 3 - assert bp1.shape == bp2.shape - assert pitch.ndim == 2 - W = bp1.shape[-1] # number of wells - shape = (pitch.shape[0], self._L, W, self._x.size) - - # quadrature points parameterized by ζ for each pitch and flux surface - Q_zeta = flatten_matrix( - bijection_from_disc(self._x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) - ) - # quadrature points in (θ, ζ) coordinates - Q = jnp.stack([self._T.eval1d(Q_zeta), Q_zeta], axis=-1) - - # interpolate and integrate - f = [interp_rfft2(Q, f_i, axes=(-1, -2)).reshape(shape) for f_i in f] - result = jnp.dot( - integrand( - *f, - B=self._B.eval1d(Q_zeta).reshape(shape), - pitch=pitch[..., jnp.newaxis, jnp.newaxis], - ) - / irfft2_non_uniform( - Q, self._b_sup_z, self._m, self._n, axes=(-1, -2) - ).reshape(shape), - self._w, - ) - assert result.shape == (pitch.shape[0], self._L, W) - return result - - -class Bounce1D: - """Computes bounce integrals using one-dimensional local spline methods. - - The bounce integral is defined as ∫ f(ℓ) dℓ, where - dℓ parameterizes the distance along the field line in meters, - f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, - where λ is a constant proportional to the magnetic moment over energy - and |B| is the norm of the magnetic field. - - For a particle with fixed λ, bounce points are defined to be the location on the - field line such that the particle's velocity parallel to the magnetic field is zero. - The bounce integral is defined up to a sign. We choose the sign that corresponds to - the particle's guiding center trajectory traveling in the direction of increasing - field-line-following coordinate ζ. - - Notes - ----- - Motivation and description of algorithm for developers. - - For applications which reduce to computing a nonlinear function of distance - along field lines between bounce points, it is required to identify these - points with field-line-following coordinates. In the special case of a linear - function summing integrals between bounce points over a flux surface, arbitrary - coordinate systems may be used as this operation becomes a surface integral, - which is invariant to the order of summation. - - The DESC coordinate system is related to field-line-following coordinate - systems by a relation whose solution is best found with Newton iteration. - There is a unique real solution to this equation, so Newton iteration is a - globally convergent root-finding algorithm here. For the task of finding - bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration - is not a globally convergent algorithm to find the real roots of - f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. - For this, function approximation of |B| is necessary. - - The function approximation in ``Bounce1D`` is ignorant that the objects to - approximate are defined on a bounded subset of ℝ². Instead, the domain is - projected to ℝ, where information sampled about the function at infinity - cannot support reconstruction of the function near the origin. As the - functions of interest do not vanish at infinity, pseudo-spectral techniques - are not used. Instead, function approximation is done with local splines. - This is useful if one can efficiently obtain data along field lines. - - After obtaining the bounce points, the supplied quadrature is performed. - By default, Gauss quadrature is performed after removing the singularity. - Local splines interpolate functions in the integrand to the quadrature nodes. - - See Also - -------- - Bounce2D : Uses two-dimensional pseudo-spectral techniques for the same task. - - Warnings - -------- - The supplied data must be from a Clebsch coordinate (ρ, α, ζ) tensor-product grid. - The field-line-following coordinate ζ must be strictly increasing. - The ζ coordinate is preferably uniformly spaced, although this is not required. - These are used as knots to construct splines. - A good reference density is 100 knots per toroidal transit. - - Attributes - ---------- - zeta : jnp.ndarray - Shape (N, ). - Field line-following ζ coordinates of spline knots. - B : jnp.ndarray - Shape (4, L * M, N - 1). - Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - dB_dz : jnp.ndarray - Shape (3, L * M, N - 1). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - - """ - - def __init__( - self, - grid, - data, - quad=leggauss(32), - automorphism=(automorphism_sin, grad_automorphism_sin), - Bref=1.0, - Lref=1.0, - check=False, - **kwargs, - ): - """Returns an object to compute bounce integrals. - - Parameters - ---------- - grid : Grid - Clebsch coordinate (ρ, α, ζ) tensor-product grid. - Note that below shape notation defines - L = ``grid.num_rho``, M = ``grid.num_alpha``, and N = ``grid.num_zeta``. - data : dict[str, jnp.ndarray] - Data evaluated on grid. Must include names in ``Bounce.required_names()``. - quad : (jnp.ndarray, jnp.ndarray) - Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. - automorphism : (Callable, Callable) or None - The first callable should be an automorphism of the real interval [-1, 1]. - The second callable should be the derivative of the first. This map defines - a change of variable for the bounce integral. The choice made for the - automorphism will affect the performance of the quadrature method. - Bref : float - Optional. Reference magnetic field strength for normalization. - Lref : float - Optional. Reference length scale for normalization. - check : bool - Flag for debugging. Must be false for jax transformations. - - """ - # Strictly increasing zeta knots enforces dζ > 0. - # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require - # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. - # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ - # requires the same sign change in e_ζ|ρ,a to retain the metric identity. - warnif( - check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), - msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", - ) - data = { - "B^zeta": jnp.abs(data["B^zeta"]) * Lref / Bref, - "B^zeta_z|r,a": data["B^zeta_z|r,a"] - * jnp.sign(data["B^zeta"]) - * Lref - / Bref, - "|B|": data["|B|"] / Bref, - "|B|_z|r,a": data["|B|_z|r,a"] / Bref, # This is already the correct sign. - } - self._data = { - key: grid.meshgrid_reshape(val, "raz").reshape(-1, grid.num_zeta) - for key, val in data.items() - } - self._x, self._w = get_quad(quad, automorphism) - - # Compute local splines. - self.zeta = grid.compress(grid.nodes[:, 2], surface_label="zeta") - self.B = jnp.moveaxis( - CubicHermiteSpline( - x=self._zeta, - y=self._data["|B|"], - dydx=self._data["|B|_z|r,a"], - axis=-1, - check=check, - ).c, - source=1, - destination=-1, - ) - self.dB_dz = polyder_vec(self.B) - degree = 3 - assert self.B.shape[0] == degree + 1 - assert self.dB_dz.shape[0] == degree - assert self.B.shape[-1] == self.dB_dz.shape[-1] == grid.num_zeta - 1 - - @staticmethod - def required_names(): - """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] - - @staticmethod - def reshape_data(grid, data, names): - """Reshape ``data`` given by ``names`` for input to ``self.integrate``. - - Parameters - ---------- - grid : Grid - Clebsch coordinate (ρ, α, ζ) tensor-product grid. - data : dict[str, jnp.ndarray] - Data evaluated on grid. - names : list[str] - Strings of keys in ``data`` dict to reshape. - - Returns - ------- - f : list[jnp.ndarray] - List of reshaped data which may be given to ``self.integrate``. - - """ - if isinstance(names, str): - names = [names] - f = [ - grid.meshgrid_reshape(data[name], "raz").reshape(-1, grid.num_zeta) - for name in names - ] - return f - - def bounce_points(self, pitch, num_well=None): - """Compute bounce points. - - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - num_well : int or None - If not specified, then all bounce points are returned in an array whose - last axis has size ``num_transit*(N-1)``. If there were less than that many - wells detected along a field line, then the last axis of the returned - arrays, which enumerates bounce points for a particular field line and - pitch, is padded with zero. - - Specify to return the first ``num_well`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. - - Returns - ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) - Shape (P, L, num_well). - The field line-following coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. - - """ - return bounce_points( - jnp.atleast_2d(pitch), - self.zeta, - self.B, - self.dB_dz, - num_well, - check=False, - ) - - def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): - """Check that bounce points are computed correctly and plot them.""" - eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 - - assert bp1.shape == bp2.shape - mask = (bp1 - bp2) == 0 - bp1 = jnp.where(mask, jnp.nan, bp1) - bp2 = jnp.where(mask, jnp.nan, bp2) - - err_1 = jnp.any(bp1 > bp2, axis=-1) - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) - - P, S, _ = bp1.shape - for s in range(S): - B = PPoly(self.B[:, s], self.zeta) - for p in range(P): - B_m = B((bp1[p, s] + bp2[p, s]) / 2) - err_3 = jnp.any(B_m > 1 / pitch[p, s] + eps) - if not (err_1[p, s] or err_2[p, s] or err_3): - continue - _bp1 = bp1[p, s][mask[p, s]] - _bp2 = bp2[p, s][mask[p, s]] - if plot: - self._plot1d( - ppoly=B, - z1=_bp1, - z2=_bp2, - k=1 / pitch[p, s], - title=kwargs.pop( - "title", - r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$", - ), - hlabel=kwargs.pop("hlabel", r"$\zeta$"), - vlabel=kwargs.pop("vlabel", r"$\vertB\vert(\zeta)$"), - **kwargs, - ) - print(" bp1 | bp2") - print(jnp.column_stack([_bp1, _bp2])) - assert not err_1[p, s], "Intersects have an inversion.\n" - assert not err_2[p, s], "Detected discontinuity.\n" - assert not err_3, ( - f"Detected |B| = {B_m[mask[p, s]]} > {1 / pitch[p, s] + eps} = 1/λ " - "in well. Use more knots.\n" - ) - if plot: - self._plot1d( - ppoly=B, - z1=bp1[:, s], - z2=bp2[:, s], - k=1 / pitch[:, s], - title=kwargs.pop( - "title", - r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$", - ), - hlabel=kwargs.pop("hlabel", r"$\zeta$"), - vlabel=kwargs.pop("vlabel", r"$\vertB\vert(\zeta)$"), - **kwargs, - ) - - def integrate( - self, - pitch, - integrand, - f, - weight=None, - num_well=None, - method="cubic", - batch=True, - check=False, - ): - """Bounce integrate ∫ f(ℓ) dℓ. - - Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line - for every λ value in ``pitch``. - - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - integrand : callable - The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list[jnp.ndarray] - Shape (L * M, N). - Real scalar-valued functions evaluated on the ``grid`` supplied to - construct this object. These functions should be arguments to the callable - ``integrand``. Use the method ``self.reshape_data`` to reshape the data - into the expected shape. - weight : jnp.ndarray - Shape (L * M, N). - If supplied, the bounce integral labeled by well j is weighted such that - the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - interpolated to the deepest point in the magnetic well. Use the method - ``self.reshape_data`` to reshape the data into the expected shape. - num_well : int or None - If not specified, then all bounce integrals are returned in an array whose - last axis has size ``(N-1)*degree``. If there were less than that many - wells detected along a field line, then the last axis of the returned array, - which enumerates bounce integrals for a particular field line and pitch, - is padded with zero. - - Specify to return the bounce integrals between the first ``num_well`` - wells for each pitch along each field line. This is useful if ``num_well`` - tightly bounds the actual number of wells. As a reference, there are - typically at most 5 wells per toroidal transit for a given pitch. - method : str - Method of interpolation for functions contained in ``f``. - See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. - Default is cubic C1 local spline. - batch : bool - Whether to perform computation in a batched manner. Default is true. - check : bool - Flag for debugging. Must be false for jax transformations. - - Returns - ------- - result : jnp.ndarray - Shape (P, L*M, num_well). - First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. - - """ - pitch = jnp.atleast_2d(pitch) - bp1, bp2 = self.bounce_points(pitch, num_well) - result = _bounce_quadrature( - bp1=bp1, - bp2=bp2, - x=self._x, - w=self._w, - integrand=integrand, - f=f, - B_sup_z=self._data["B^zeta"], - B_sup_z_ra=self._data["B^zeta_z"], - B=self._data["|B|"], - B_z_ra=self._data["|B|"], - pitch=pitch, - knots=self.zeta, - method=method, - batch=batch, - check=check, - ) - if weight is not None: - result *= _interp_to_argmin_B_soft( - f=weight, - bp1=bp1, - bp2=bp2, - knots=self.zeta, - B_c=self.B, - B_z_ra_c=self.dB_dz, - method=method, - ) - assert result.shape[-1] == setdefault(num_well, (self.zeta.size - 1) * 3) - return result - - def _plot1d( - self, - ppoly, - z1=None, - z2=None, - k=None, - k_transparency=0.5, - num=1000, - title=r"Intersects $z$ for $f(z) - k = 0$", - hlabel=r"$z$", - vlabel=r"$f(z)$", - show=True, - ): - """Plot the function ``f`` defined by the piecewise polynomial. - - Parameters - ---------- - ppoly : PPoly - Spline of f over ``self.zeta``. - z1 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂z <= 0. - z2 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂z >= 0. - k : jnp.ndarray - Shape (k.shape[0], ). - Optional, k such that f(z) = k. - k_transparency : float - Transparency of pitch lines. - num : int - Number of points to evaluate for plot. - title : str - Plot title. - hlabel : str - Horizontal axis label. - vlabel : str - Vertical axis label. - show : bool - Whether to show the plot. Default is true. - - Returns - ------- - fig, ax : matplotlib figure and axes - - """ - fig, ax = plt.subplots() - legend = {} - - z = jnp.linspace(start=self.zeta[0], stop=self.zeta[-1], num=num) - add2legend(legend, ax.plot(z, ppoly(z), label=vlabel)) - plot_intersect(ax, legend, z1, z2, k, k_transparency) - - ax.set_xlabel(hlabel) - ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys(), loc="lower right") - ax.set_title(title) - plt.tight_layout() - if show: - plt.show() - plt.close() - return fig, ax diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 4a9d4e09bb..f8b75c05b9 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -2,6 +2,7 @@ from functools import partial +from interpax import interp1d from orthax.chebyshev import chebvander from orthax.polynomial import polyvander @@ -459,6 +460,17 @@ def polyval_vec(x, c): return val +interp1d_vec = jnp.vectorize( + interp1d, signature="(m),(n),(n)->(m)", excluded={"method"} +) + + +@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") +def interp1d_vec_with_df(xq, x, f, fx): + """Vectorized interp1d.""" + return interp1d(xq, x, f, method="cubic", fx=fx) + + # TODO: Eventually do a PR to move this stuff into interpax. diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index ff4e0ab55d..d9950ad07b 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -178,7 +178,7 @@ def leggausslob(deg): return x, w -def get_quad(quad, automorphism): +def get_quadrature(quad, automorphism): """Apply automorphism to given quadrature points and weights. Parameters diff --git a/tests/test_bounce_integral.py b/tests/test_bounce_integral.py deleted file mode 100644 index d805387b72..0000000000 --- a/tests/test_bounce_integral.py +++ /dev/null @@ -1,646 +0,0 @@ -"""Test bounce integral methods.""" - -from functools import partial - -import numpy as np -import pytest -from jax import grad -from matplotlib import pyplot as plt -from numpy.polynomial.chebyshev import chebgauss, chebweight -from numpy.polynomial.legendre import leggauss -from scipy import integrate -from scipy.interpolate import CubicHermiteSpline -from scipy.special import ellipe, ellipkm1, roots_chebyu -from tests.test_plotting import tol_1d - -from desc.backend import jnp -from desc.compute.utils import dot -from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import get_rtz_grid -from desc.examples import get -from desc.grid import LinearGrid -from desc.integrals.bounce_integral import ( - _get_extrema, - _interp_to_argmin_B_hard, - _interp_to_argmin_B_soft, - bounce_integral, - bounce_points, - filter_bounce_points, - get_pitch, - plot_field_line, - required_names, -) -from desc.integrals.quad_utils import ( - automorphism_sin, - bijection_from_disc, - grad_automorphism_sin, - grad_bijection_from_disc, - leggausslob, - tanh_sinh, -) - - -class TestBouncePoints: - """Test that bounce points are computed correctly.""" - - @staticmethod - @pytest.mark.unit - def test_bp1_first(): - """Test that bounce points are computed correctly.""" - start = np.pi / 3 - end = 6 * np.pi - knots = np.linspace(start, end, 5) - B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - pitch = 2.0 - intersect = B.solve(1 / pitch, extrapolate=False) - bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) - bp1, bp2 = filter_bounce_points(bp1, bp2) - assert bp1.size and bp2.size - np.testing.assert_allclose(bp1, intersect[0::2]) - np.testing.assert_allclose(bp2, intersect[1::2]) - - @staticmethod - @pytest.mark.unit - def test_bp2_first(): - """Test that bounce points are computed correctly.""" - start = -3 * np.pi - end = -start - k = np.linspace(start, end, 5) - B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) - pitch = 2.0 - intersect = B.solve(1 / pitch, extrapolate=False) - bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) - bp1, bp2 = filter_bounce_points(bp1, bp2) - assert bp1.size and bp2.size - np.testing.assert_allclose(bp1, intersect[1:-1:2]) - np.testing.assert_allclose(bp2, intersect[0::2][1:]) - - @staticmethod - @pytest.mark.unit - def test_bp1_before_extrema(): - """Test that bounce points are computed correctly.""" - start = -np.pi - end = -2 * start - k = np.linspace(start, end, 5) - B = CubicHermiteSpline( - k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) - ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = filter_bounce_points(bp1, bp2) - assert bp1.size and bp2.size - intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) - np.testing.assert_allclose(bp1, intersect[[1, 2]], rtol=1e-6) - # intersect array could not resolve double root as single at index 2,3 - np.testing.assert_allclose(intersect[2], intersect[3], rtol=1e-6) - np.testing.assert_allclose(bp2, intersect[[3, 4]], rtol=1e-6) - - @staticmethod - @pytest.mark.unit - def test_bp2_before_extrema(): - """Test that bounce points are computed correctly.""" - start = -1.2 * np.pi - end = -2 * start - k = np.linspace(start, end, 7) - B = CubicHermiteSpline( - k, - np.cos(k) + 2 * np.sin(-2 * k) + k / 4, - -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 4, - ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = filter_bounce_points(bp1, bp2) - assert bp1.size and bp2.size - intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1, intersect[[0, -2]]) - np.testing.assert_allclose(bp2, intersect[[1, -1]]) - - @staticmethod - @pytest.mark.unit - def test_extrema_first_and_before_bp1(): - """Test that bounce points are computed correctly.""" - start = -1.2 * np.pi - end = -2 * start - k = np.linspace(start, end, 7) - B = CubicHermiteSpline( - k, - np.cos(k) + 2 * np.sin(-2 * k) + k / 20, - -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, - ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - 1e-13 - bp1, bp2 = bounce_points( - pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False - ) - plot_field_line(B, pitch, bp1, bp2, start=k[2]) - bp1, bp2 = filter_bounce_points(bp1, bp2) - assert bp1.size and bp2.size - intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) - intersect = intersect[intersect >= k[2]] - np.testing.assert_allclose(bp1, intersect[[0, 2, 4]], rtol=1e-6) - np.testing.assert_allclose(bp2, intersect[[0, 3, 5]], rtol=1e-6) - - @staticmethod - @pytest.mark.unit - def test_extrema_first_and_before_bp2(): - """Test that bounce points are computed correctly.""" - start = -1.2 * np.pi - end = -2 * start + 1 - k = np.linspace(start, end, 7) - B = CubicHermiteSpline( - k, - np.cos(k) + 2 * np.sin(-2 * k) + k / 10, - -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, - ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = filter_bounce_points(bp1, bp2) - assert bp1.size and bp2.size - # Our routine correctly detects intersection, while scipy, jnp.root fails. - intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[0], -0.671904, rtol=1e-6) - np.testing.assert_allclose(bp1, intersect[[0, 3, 5]], rtol=1e-5) - # intersect array could not resolve double root as single at index 0,1 - np.testing.assert_allclose(intersect[0], intersect[1], rtol=1e-5) - np.testing.assert_allclose(bp2, intersect[[2, 4, 6]], rtol=1e-5) - - -class TestBounceQuadrature: - """Test bounce quadrature accuracy.""" - - @staticmethod - def _mod_cheb_gauss(deg): - x, w = chebgauss(deg) - w /= chebweight(x) - return x, w - - @staticmethod - def _mod_chebu_gauss(deg): - x, w = roots_chebyu(deg) - w *= chebweight(x) - return x, w - - @pytest.mark.unit - @pytest.mark.parametrize( - "is_strong, quad, automorphism", - [ - (True, tanh_sinh(40), None), - (True, leggauss(25), "default"), - (False, tanh_sinh(20), None), - (False, leggausslob(10), "default"), - # sin automorphism still helps out chebyshev quadrature - (True, _mod_cheb_gauss(30), "default"), - (False, _mod_chebu_gauss(10), "default"), - ], - ) - def test_bounce_quadrature(self, is_strong, quad, automorphism): - """Test bounce integral matches elliptic integrals.""" - p = 1e-4 - m = 1 - p - # Some prime number that doesn't appear anywhere in calculation. - # Ensures no lucky cancellation occurs from this test case since otherwise - # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi - # appears often in transformations. - v = 7 - bp1 = -np.pi / 2 * v - bp2 = -bp1 - knots = np.linspace(bp1, bp2, 50) - pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps - b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) - db = np.sin(2 * knots / v) / v - data = {"B^zeta": b, "B^zeta_z|r,a": db, "|B|": b, "|B|_z|r,a": db} - - if is_strong: - integrand = lambda B, pitch: 1 / jnp.sqrt(1 - pitch * m * B) - truth = v * 2 * ellipkm1(p) - else: - integrand = lambda B, pitch: jnp.sqrt(1 - pitch * m * B) - truth = v * 2 * ellipe(m) - kwargs = {} - if automorphism != "default": - kwargs["automorphism"] = automorphism - bounce_integrate, _ = bounce_integral(data, knots, quad, check=True, **kwargs) - result = bounce_integrate(integrand, [], pitch) - assert np.count_nonzero(result) == 1 - np.testing.assert_allclose(np.sum(result), truth, rtol=1e-4) - - -@pytest.mark.unit -def test_bounce_integral_checks(): - """Test that all the internal correctness checks pass for real example.""" - - def numerator(g_zz, B, pitch): - f = (1 - pitch * B / 2) * g_zz - return f / jnp.sqrt(1 - pitch * B) - - def denominator(B, pitch): - return 1 / jnp.sqrt(1 - pitch * B) - - # Suppose we want to compute a bounce average of the function - # f(ℓ) = (1 − λ|B|/2) * g_zz, where g_zz is the squared norm of the - # toroidal basis vector on some set of field lines specified by (ρ, α) - # coordinates. This is defined as - # [∫ f(ℓ) / √(1 − λ|B|) dℓ] / [∫ 1 / √(1 − λ|B|) dℓ] - eq = get("HELIOTRON") - # Clebsch-Type field-line coordinates ρ, α, ζ. - rho = np.linspace(0.1, 1, 6) - alpha = np.array([0]) - knots = np.linspace(-2 * np.pi, 2 * np.pi, 200) - grid = get_rtz_grid( - eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) - ) - data = eq.compute( - required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid - ) - bounce_integrate, spline = bounce_integral( - data, knots, check=True, plot=False, quad=leggauss(3) - ) - pitch = get_pitch( - grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 - ) - # You can also plot the field line by uncommenting the following line. - # Useful to see if the knot density was sufficient to reconstruct the field line. - # _, _ = bounce_points(pitch, **spline, check=True, num=50000) # noqa: E800 - num = bounce_integrate(numerator, data["g_zz"], pitch) - den = bounce_integrate(denominator, [], pitch) - avg = num / den - - # Sum all bounce integrals across each particular field line. - avg = np.nansum(avg, axis=-1) - assert np.count_nonzero(avg) - # Split the resulting data by field line. - avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) - # The sum stored at index i, j - i, j = 0, 0 - print(avg[:, i, j]) - # is the summed bounce average among wells along the field line with nodes - # given in Clebsch-Type field-line coordinates ρ, α, ζ - raz_grid = grid.source_grid - nodes = raz_grid.nodes.reshape(rho.size, alpha.size, -1, 3) - print(nodes[i, j]) - # for the pitch values stored in - pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) - print(pitch[:, i, j]) - - -@pytest.mark.unit -def test_get_extrema(): - """Test computation of extrema of |B|.""" - start = -np.pi - end = -2 * start - k = np.linspace(start, end, 5) - B = CubicHermiteSpline( - k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) - ) - B_z_ra = B.derivative() - extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) - mask = ~np.isnan(extrema) - extrema, B_extrema = extrema[mask], B_extrema[mask] - idx = np.argsort(extrema) - - extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) - B_extrema_scipy = B(extrema_scipy) - assert extrema.size == extrema_scipy.size - np.testing.assert_allclose(extrema[idx], extrema_scipy) - np.testing.assert_allclose(B_extrema[idx], B_extrema_scipy) - - -@pytest.mark.unit -@pytest.mark.parametrize("func", [_interp_to_argmin_B_soft, _interp_to_argmin_B_hard]) -def test_interp_to_argmin_B(func): - """Test argmin interpolation.""" # noqa: D202 - - # Test functions chosen with purpose; don't change unless plotted and compared. - def f(z): - return np.cos(3 * z) * np.sin(2 * np.cos(z)) + np.cos(1.2 * z) - - def B(z): - return np.sin(3 * z) * np.cos(1 / (1 + z)) * np.cos(z**2) * z - - def dB_dz(z): - return ( - 3 * z * np.cos(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) - - 2 * z**2 * np.sin(3 * z) * np.sin(z**2) * np.cos(1 / (1 + z)) - + z * np.sin(3 * z) * np.sin(1 / (1 + z)) * np.cos(z**2) / (1 + z) ** 2 - + np.sin(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) - ) - - zeta = np.linspace(0, 3 * np.pi, 175) - _, spline = bounce_integral( - { - "B^zeta": np.ones_like(zeta), - "B^zeta_z|r,a": np.ones_like(zeta), - "|B|": B(zeta), - "|B|_z|r,a": dB_dz(zeta), - }, - zeta, - ) - argmin = 5.61719 - np.testing.assert_allclose( - f(argmin), - func( - f(zeta), - bp1=np.array(0, ndmin=3), - bp2=np.array(2 * np.pi, ndmin=3), - **spline, - method="cubic", - ), - rtol=1e-3, - ) - - -@partial(np.vectorize, excluded={0}) -def _adaptive_elliptic(integrand, k): - a = 0 - b = 2 * np.arcsin(k) - return integrate.quad(integrand, a, b, args=(k,), points=b)[0] - - -def _fixed_elliptic(integrand, k, deg): - k = np.atleast_1d(k) - a = np.zeros_like(k) - b = 2 * np.arcsin(k) - x, w = leggauss(deg) - w = w * grad_automorphism_sin(x) - x = automorphism_sin(x) - Z = bijection_from_disc(x, a[..., np.newaxis], b[..., np.newaxis]) - k = k[..., np.newaxis] - quad = np.dot(integrand(Z, k), w) * grad_bijection_from_disc(a, b) - return quad - - -def _elliptic_incomplete(k2): - K_integrand = lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * (k / 4) - E_integrand = lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) / (k * 4) - # Scipy's elliptic integrals are broken. - # https://github.com/scipy/scipy/issues/20525. - k = np.sqrt(k2) - K = _adaptive_elliptic(K_integrand, k) - E = _adaptive_elliptic(E_integrand, k) - # Make sure scipy's adaptive quadrature is not broken. - np.testing.assert_allclose(K, _fixed_elliptic(K_integrand, k, 10)) - np.testing.assert_allclose(E, _fixed_elliptic(E_integrand, k, 10)) - - I_0 = 4 / k * K - I_1 = 4 * k * E - I_2 = 16 * k * E - I_3 = 16 * k / 9 * (2 * (-1 + 2 * k2) * E - (-1 + k2) * K) - I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - 2 * (-1 + k2) * K) - I_5 = 32 * k / 30 * (2 * (1 - k2 + k2**2) * E - (1 - 3 * k2 + 2 * k2**2) * K) - I_6 = 4 / k * (2 * k2 * E + (1 - 2 * k2) * K) - I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) - # Check for math mistakes. - np.testing.assert_allclose( - I_2, - _adaptive_elliptic( - lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k - ), - ) - np.testing.assert_allclose( - I_3, - _adaptive_elliptic( - lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k - ), - ) - np.testing.assert_allclose( - I_4, - _adaptive_elliptic( - lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k - ), - ) - np.testing.assert_allclose( - I_5, - _adaptive_elliptic( - lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k - ), - ) - # scipy fails - np.testing.assert_allclose( - I_6, - _fixed_elliptic( - lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), - k, - deg=10, - ), - ) - np.testing.assert_allclose( - I_7, - _adaptive_elliptic( - lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k - ), - ) - return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 - - -def _drift_analytic(data): - """Compute analytic approximation for bounce-averaged binormal drift.""" - B = data["|B|"] / data["B ref"] - B0 = np.mean(B) - # epsilon should be changed to dimensionless, and computed in a way that - # is independent of normalization length scales, like "effective r/R0". - epsilon = data["a"] * data["rho"] # Aspect ratio of the flux surface. - np.testing.assert_allclose(epsilon, 0.05) - theta_PEST = data["alpha"] + data["iota"] * data["zeta"] - # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 - B_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) - np.testing.assert_allclose(B, B_analytic, atol=3e-3) - - gradpar = data["a"] * data["B^zeta"] / data["|B|"] - # This method of computing G0 suggests a fixed point iteration. - G0 = data["a"] - gradpar_analytic = G0 * (1 - epsilon * np.cos(theta_PEST)) - gradpar_theta_analytic = data["iota"] * gradpar_analytic - G0 = np.mean(gradpar_theta_analytic) - np.testing.assert_allclose(gradpar, gradpar_analytic, atol=5e-3) - - # Comparing coefficient calculation here with coefficients from compute/_metric - normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 - cvdrift = data["cvdrift"] * normalization - gbdrift = data["gbdrift"] * normalization - dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) - alpha_MHD = -0.5 * dPdrho / data["iota"] ** 2 - gds21 = ( - -np.sign(data["iota"]) - * data["shear"] - * dot(data["grad(psi)"], data["grad(alpha)"]) - / data["B ref"] - ) - gds21_analytic = -data["shear"] * ( - data["shear"] * theta_PEST - alpha_MHD / B**4 * np.sin(theta_PEST) - ) - gds21_analytic_low_order = -data["shear"] * ( - data["shear"] * theta_PEST - alpha_MHD / B0**4 * np.sin(theta_PEST) - ) - np.testing.assert_allclose(gds21, gds21_analytic, atol=2e-2) - np.testing.assert_allclose(gds21, gds21_analytic_low_order, atol=2.7e-2) - - fudge_1 = 0.19 - gbdrift_analytic = fudge_1 * ( - -data["shear"] - + np.cos(theta_PEST) - - gds21_analytic / data["shear"] * np.sin(theta_PEST) - ) - gbdrift_analytic_low_order = fudge_1 * ( - -data["shear"] - + np.cos(theta_PEST) - - gds21_analytic_low_order / data["shear"] * np.sin(theta_PEST) - ) - fudge_2 = 0.07 - cvdrift_analytic = gbdrift_analytic + fudge_2 * alpha_MHD / B**2 - cvdrift_analytic_low_order = ( - gbdrift_analytic_low_order + fudge_2 * alpha_MHD / B0**2 - ) - np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1e-2) - np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=2e-2) - np.testing.assert_allclose(gbdrift, gbdrift_analytic_low_order, atol=1e-2) - np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) - - pitch = get_pitch(np.min(B), np.max(B), 100)[1:] - k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) - I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = _elliptic_incomplete(k2) - y = np.sqrt(2 * epsilon * pitch * B0) - I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) - I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) - - drift_analytic_num = ( - fudge_2 * alpha_MHD / B0**2 * I_1 - - 0.5 - * fudge_1 - * ( - data["shear"] * (I_0 + I_1 - I_2 - I_3) - + alpha_MHD / B0**4 * (I_4 + I_5) - - (I_6 + I_7) - ) - ) / G0 - drift_analytic_den = I_0 / G0 - drift_analytic = drift_analytic_num / drift_analytic_den - return drift_analytic, cvdrift, gbdrift, pitch - - -@pytest.mark.unit -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) -def test_drift(): - """Test bounce-averaged drift with analytical expressions.""" - eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi_boundary = eq.Psi / (2 * np.pi) - psi = 0.25 * psi_boundary - rho = np.sqrt(psi / psi_boundary) - np.testing.assert_allclose(rho, 0.5) - - # Make a set of nodes along a single fieldline. - grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - data = eq.compute(["iota"], grid=grid_fsa) - iota = grid_fsa.compress(data["iota"]).item() - alpha = 0 - zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = get_rtz_grid( - eq, - rho, - alpha, - zeta, - coordinates="raz", - period=(np.inf, 2 * np.pi, np.inf), - iota=np.array([iota]), - ) - data = eq.compute( - required_names() - + [ - "cvdrift", - "gbdrift", - "grad(psi)", - "grad(alpha)", - "shear", - "iota", - "psi", - "a", - ], - grid=grid, - ) - np.testing.assert_allclose(data["psi"], psi) - np.testing.assert_allclose(data["iota"], iota) - assert np.all(data["B^zeta"] > 0) - B_ref = 2 * np.abs(psi_boundary) / data["a"] ** 2 - data["B ref"] = B_ref - data["rho"] = rho - data["alpha"] = alpha - data["zeta"] = zeta - data["psi"] = grid.compress(data["psi"]) - data["iota"] = grid.compress(data["iota"]) - data["shear"] = grid.compress(data["shear"]) - - # Compute analytic approximation. - drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) - # Compute numerical result. - bounce_integrate, _ = bounce_integral( - data, - knots=zeta, - B_ref=B_ref, - L_ref=data["a"], - quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 - check=True, - ) - - def integrand_num(cvdrift, gbdrift, B, pitch): - g = jnp.sqrt(1 - pitch * B) - return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - - def integrand_den(B, pitch): - return 1 / jnp.sqrt(1 - pitch * B) - - drift_numerical_num = bounce_integrate( - integrand=integrand_num, - f=[cvdrift, gbdrift], - pitch=pitch[:, np.newaxis], - num_well=1, - ) - drift_numerical_den = bounce_integrate( - integrand=integrand_den, - f=[], - pitch=pitch[:, np.newaxis], - num_well=1, - weight=np.ones(zeta.size), - ) - drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) - msg = "There should be one bounce integral per pitch in this example." - assert drift_numerical.size == drift_analytic.size, msg - np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) - - _test_bounce_autodiff( - bounce_integrate, - integrand_num, - f=[cvdrift, gbdrift], - weight=np.ones(zeta.size), - ) - - fig, ax = plt.subplots() - ax.plot(1 / pitch, drift_analytic) - ax.plot(1 / pitch, drift_numerical) - return fig - - -def _test_bounce_autodiff(bounce_integrate, integrand, **kwargs): - """Make sure reverse mode AD works correctly on this algorithm.""" - - def fun1(pitch): - return jnp.sum(bounce_integrate(integrand, pitch=pitch, **kwargs)) - - def fun2(pitch): - return jnp.sum(bounce_integrate(integrand_grad, pitch=pitch, **kwargs)) - - def integrand_grad(*args, **kwargs2): - fun = jnp.vectorize( - grad(integrand, -1), signature="()," * len(kwargs["f"]) + "(),()->()" - ) - return fun(*args, *kwargs2.values()) - - pitch = 1.0 - truth = 650 # Extrapolated from plot. - assert np.isclose(grad(fun1)(pitch), truth, rtol=1e-3) - # Make sure bounce points get differentiated too. - result = fun2(pitch) - assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-3) diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index fcf4fb128d..aab1f64de7 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -8,13 +8,13 @@ from tests.test_bounce_integral import _drift_analytic from tests.test_plotting import tol_1d +from desc._bounce_utils.bounce_integral import filter_bounce_points, get_pitch from desc.backend import jnp from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid, map_coordinates from desc.examples import get from desc.grid import LinearGrid from desc.integrals import Bounce2D -from desc.integrals.bounce_integral import filter_bounce_points, get_pitch from desc.integrals.fourier_bounce_integral import FourierChebyshevBasis, _get_alphas from desc.integrals.interp_utils import fourier_pts diff --git a/tests/test_integrals.py b/tests/test_integrals.py index b15b019283..752be71614 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1,13 +1,27 @@ """Test integration algorithms.""" +from functools import partial + import numpy as np import pytest - +from jax import grad +from matplotlib import pyplot as plt +from numpy.polynomial.chebyshev import chebgauss, chebweight +from numpy.polynomial.legendre import leggauss +from scipy import integrate +from scipy.interpolate import CubicHermiteSpline +from scipy.special import ellipe, ellipkm1, roots_chebyu +from tests.test_plotting import tol_1d + +from desc.backend import jnp from desc.basis import FourierZernikeBasis +from desc.compute.utils import dot from desc.equilibrium import Equilibrium +from desc.equilibrium.coords import get_rtz_grid from desc.examples import get -from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid +from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid from desc.integrals import ( + Bounce1D, DFTInterpolator, FFTInterpolator, line_integrals, @@ -20,6 +34,22 @@ surface_variance, virtual_casing_biot_savart, ) +from desc.integrals.bounce_utils import ( + _get_extrema, + _interp_to_argmin_B_hard, + _interp_to_argmin_B_soft, + bounce_points, + get_pitch, + plot_ppoly, +) +from desc.integrals.quad_utils import ( + automorphism_sin, + bijection_from_disc, + grad_automorphism_sin, + grad_bijection_from_disc, + leggausslob, + tanh_sinh, +) from desc.integrals.singularities import _get_quadrature_nodes from desc.integrals.surface_integral import _get_grid_surface from desc.transform import Transform @@ -688,3 +718,638 @@ def test_biest_interpolators(self): g2 = interp2(f(source_theta, source_zeta), i) np.testing.assert_allclose(g1, g2) np.testing.assert_allclose(g1, ff) + + +def _filter_bounce_points(bp1, bp2): + mask = (bp1 - bp2) != 0 + return bp1[mask], bp2[mask] + + +class TestBouncePoints: + """Test that bounce points are computed correctly.""" + + @staticmethod + @pytest.mark.unit + def test_bp1_first(): + """Test that bounce points are computed correctly.""" + start = np.pi / 3 + end = 6 * np.pi + knots = np.linspace(start, end, 5) + B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) + pitch = 2.0 + intersect = B.solve(1 / pitch, extrapolate=False) + bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) + bp1, bp2 = _filter_bounce_points(bp1, bp2) + assert bp1.size and bp2.size + np.testing.assert_allclose(bp1, intersect[0::2]) + np.testing.assert_allclose(bp2, intersect[1::2]) + + @staticmethod + @pytest.mark.unit + def test_bp2_first(): + """Test that bounce points are computed correctly.""" + start = -3 * np.pi + end = -start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) + pitch = 2.0 + intersect = B.solve(1 / pitch, extrapolate=False) + bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) + bp1, bp2 = _filter_bounce_points(bp1, bp2) + assert bp1.size and bp2.size + np.testing.assert_allclose(bp1, intersect[1:-1:2]) + np.testing.assert_allclose(bp2, intersect[0::2][1:]) + + @staticmethod + @pytest.mark.unit + def test_bp1_before_extrema(): + """Test that bounce points are computed correctly.""" + start = -np.pi + end = -2 * start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline( + k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = _filter_bounce_points(bp1, bp2) + assert bp1.size and bp2.size + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) + np.testing.assert_allclose(bp1, intersect[[1, 2]], rtol=1e-6) + # intersect array could not resolve double root as single at index 2,3 + np.testing.assert_allclose(intersect[2], intersect[3], rtol=1e-6) + np.testing.assert_allclose(bp2, intersect[[3, 4]], rtol=1e-6) + + @staticmethod + @pytest.mark.unit + def test_bp2_before_extrema(): + """Test that bounce points are computed correctly.""" + start = -1.2 * np.pi + end = -2 * start + k = np.linspace(start, end, 7) + B = CubicHermiteSpline( + k, + np.cos(k) + 2 * np.sin(-2 * k) + k / 4, + -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 4, + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = _filter_bounce_points(bp1, bp2) + assert bp1.size and bp2.size + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1, intersect[[0, -2]]) + np.testing.assert_allclose(bp2, intersect[[1, -1]]) + + @staticmethod + @pytest.mark.unit + def test_extrema_first_and_before_bp1(): + """Test that bounce points are computed correctly.""" + start = -1.2 * np.pi + end = -2 * start + k = np.linspace(start, end, 7) + B = CubicHermiteSpline( + k, + np.cos(k) + 2 * np.sin(-2 * k) + k / 20, + -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - 1e-13 + bp1, bp2 = bounce_points( + pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False + ) + plot_ppoly(B, z1=bp1, z2=bp2, k=1 / pitch, start=k[2]) + bp1, bp2 = _filter_bounce_points(bp1, bp2) + assert bp1.size and bp2.size + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) + intersect = intersect[intersect >= k[2]] + np.testing.assert_allclose(bp1, intersect[[0, 2, 4]], rtol=1e-6) + np.testing.assert_allclose(bp2, intersect[[0, 3, 5]], rtol=1e-6) + + @staticmethod + @pytest.mark.unit + def test_extrema_first_and_before_bp2(): + """Test that bounce points are computed correctly.""" + start = -1.2 * np.pi + end = -2 * start + 1 + k = np.linspace(start, end, 7) + B = CubicHermiteSpline( + k, + np.cos(k) + 2 * np.sin(-2 * k) + k / 10, + -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, + ) + B_z_ra = B.derivative() + pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 + bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + bp1, bp2 = _filter_bounce_points(bp1, bp2) + assert bp1.size and bp2.size + # Our routine correctly detects intersection, while scipy, jnp.root fails. + intersect = B.solve(1 / pitch, extrapolate=False) + np.testing.assert_allclose(bp1[0], -0.671904, rtol=1e-6) + np.testing.assert_allclose(bp1, intersect[[0, 3, 5]], rtol=1e-5) + # intersect array could not resolve double root as single at index 0,1 + np.testing.assert_allclose(intersect[0], intersect[1], rtol=1e-5) + np.testing.assert_allclose(bp2, intersect[[2, 4, 6]], rtol=1e-5) + + @pytest.mark.unit + def test_get_extrema(self): + """Test computation of extrema of |B|.""" + start = -np.pi + end = -2 * start + k = np.linspace(start, end, 5) + B = CubicHermiteSpline( + k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) + ) + B_z_ra = B.derivative() + extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) + mask = ~np.isnan(extrema) + extrema, B_extrema = extrema[mask], B_extrema[mask] + idx = np.argsort(extrema) + + extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) + B_extrema_scipy = B(extrema_scipy) + assert extrema.size == extrema_scipy.size + np.testing.assert_allclose(extrema[idx], extrema_scipy) + np.testing.assert_allclose(B_extrema[idx], B_extrema_scipy) + + +class TestBounceQuadrature: + """Test bounce quadrature accuracy.""" + + @staticmethod + def _mod_cheb_gauss(deg): + x, w = chebgauss(deg) + w /= chebweight(x) + return x, w + + @staticmethod + def _mod_chebu_gauss(deg): + x, w = roots_chebyu(deg) + w *= chebweight(x) + return x, w + + @pytest.mark.unit + @pytest.mark.parametrize( + "is_strong, quad, automorphism", + [ + (True, tanh_sinh(40), None), + (True, leggauss(25), "default"), + (False, tanh_sinh(20), None), + (False, leggausslob(10), "default"), + # sin automorphism still helps out chebyshev quadrature + (True, _mod_cheb_gauss(30), "default"), + (False, _mod_chebu_gauss(10), "default"), + ], + ) + def test_bounce_quadrature(self, is_strong, quad, automorphism): + """Test bounce integral matches elliptic integrals.""" + p = 1e-4 + m = 1 - p + # Some prime number that doesn't appear anywhere in calculation. + # Ensures no lucky cancellation occurs from this test case since otherwise + # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi + # appears often in transformations. + v = 7 + bp1 = -np.pi / 2 * v + bp2 = -bp1 + knots = np.linspace(bp1, bp2, 50) + pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps + b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) + db = np.sin(2 * knots / v) / v + data = {"B^zeta": b, "B^zeta_z|r,a": db, "|B|": b, "|B|_z|r,a": db} + + if is_strong: + integrand = lambda B, pitch: 1 / jnp.sqrt(1 - pitch * m * B) + truth = v * 2 * ellipkm1(p) + else: + integrand = lambda B, pitch: jnp.sqrt(1 - pitch * m * B) + truth = v * 2 * ellipe(m) + kwargs = {} + if automorphism != "default": + kwargs["automorphism"] = automorphism + bounce = Bounce1D( + Grid.create_meshgrid([1, 0, knots], coordinates="raz"), + data, + quad, + check=True, + **kwargs + ) + result = bounce.integrate(pitch, integrand, [], check=True) + assert np.count_nonzero(result) == 1 + np.testing.assert_allclose(np.sum(result), truth, rtol=1e-4) + + @staticmethod + @partial(np.vectorize, excluded={0}) + def _adaptive_elliptic(integrand, k): + a = 0 + b = 2 * np.arcsin(k) + return integrate.quad(integrand, a, b, args=(k,), points=b)[0] + + @staticmethod + def _fixed_elliptic(integrand, k, deg): + k = np.atleast_1d(k) + a = np.zeros_like(k) + b = 2 * np.arcsin(k) + x, w = leggauss(deg) + w = w * grad_automorphism_sin(x) + x = automorphism_sin(x) + Z = bijection_from_disc(x, a[..., np.newaxis], b[..., np.newaxis]) + k = k[..., np.newaxis] + quad = np.dot(integrand(Z, k), w) * grad_bijection_from_disc(a, b) + return quad + + @staticmethod + def elliptic_incomplete(k2): + """Calculate elliptic integrals for bounce averaged binormal drift.""" + K_integrand = lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * (k / 4) + E_integrand = lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) / (k * 4) + # Scipy's elliptic integrals are broken. + # https://github.com/scipy/scipy/issues/20525. + k = np.sqrt(k2) + K = TestBounceQuadrature._adaptive_elliptic(K_integrand, k) + E = TestBounceQuadrature._adaptive_elliptic(E_integrand, k) + # Make sure scipy's adaptive quadrature is not broken. + np.testing.assert_allclose( + K, TestBounceQuadrature._fixed_elliptic(K_integrand, k, 10) + ) + np.testing.assert_allclose( + E, TestBounceQuadrature._fixed_elliptic(E_integrand, k, 10) + ) + + I_0 = 4 / k * K + I_1 = 4 * k * E + I_2 = 16 * k * E + I_3 = 16 * k / 9 * (2 * (-1 + 2 * k2) * E - (-1 + k2) * K) + I_4 = 16 * k / 3 * ((-1 + 2 * k2) * E - 2 * (-1 + k2) * K) + I_5 = 32 * k / 30 * (2 * (1 - k2 + k2**2) * E - (1 - 3 * k2 + 2 * k2**2) * K) + I_6 = 4 / k * (2 * k2 * E + (1 - 2 * k2) * K) + I_7 = 2 * k / 3 * ((-2 + 4 * k2) * E - 4 * (-1 + k2) * K) + # Check for math mistakes. + np.testing.assert_allclose( + I_2, + TestBounceQuadrature._adaptive_elliptic( + lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k + ), + ) + np.testing.assert_allclose( + I_3, + TestBounceQuadrature._adaptive_elliptic( + lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k + ), + ) + np.testing.assert_allclose( + I_4, + TestBounceQuadrature._adaptive_elliptic( + lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k + ), + ) + np.testing.assert_allclose( + I_5, + TestBounceQuadrature._adaptive_elliptic( + lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k + ), + ) + # scipy fails + np.testing.assert_allclose( + I_6, + TestBounceQuadrature._fixed_elliptic( + lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), + k, + deg=10, + ), + ) + np.testing.assert_allclose( + I_7, + TestBounceQuadrature._adaptive_elliptic( + lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k + ), + ) + return I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 + + +class TestBounce1D: + """Test bounce integral methods that use one-dimensional local splines.""" + + @pytest.mark.unit + def test_bounce_integral_checks(self): + """Test that all the internal correctness checks pass for real example.""" + + def numerator(g_zz, B, pitch): + f = (1 - pitch * B / 2) * g_zz + return f / jnp.sqrt(1 - pitch * B) + + def denominator(B, pitch): + return 1 / jnp.sqrt(1 - pitch * B) + + # Suppose we want to compute a bounce average of the function + # f(ℓ) = (1 − λ|B|/2) * g_zz, where g_zz is the squared norm of the + # toroidal basis vector on some set of field lines specified by (ρ, α) + # coordinates. This is defined as + # [∫ f(ℓ) / √(1 − λ|B|) dℓ] / [∫ 1 / √(1 − λ|B|) dℓ] + eq = get("HELIOTRON") + # Clebsch-Type field-line coordinates ρ, α, ζ. + rho = np.linspace(0.1, 1, 6) + alpha = np.array([0]) + knots = np.linspace(-2 * np.pi, 2 * np.pi, 200) + grid = get_rtz_grid( + eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) + ) + data = eq.compute( + Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid + ) + bounce = Bounce1D(grid.source_grid, data, check=True) + pitch = get_pitch( + grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 + ) + num = bounce.integrate( + pitch, + numerator, + Bounce1D.reshape_data(grid.source_grid, data["g_zz"]), + check=True, + ) + den = bounce.integrate(pitch, denominator, [], check=True) + avg = num / den + + # Sum all bounce integrals across each particular field line. + avg = np.nansum(avg, axis=-1) + assert np.count_nonzero(avg) + # Split the resulting data by field line. + avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) + # The sum stored at index i, j + i, j = 0, 0 + print(avg[:, i, j]) + # is the summed bounce average among wells along the field line with nodes + # given in Clebsch-Type field-line coordinates ρ, α, ζ + raz_grid = grid.source_grid + nodes = raz_grid.nodes.reshape(rho.size, alpha.size, -1, 3) + print(nodes[i, j]) + # for the pitch values stored in + pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) + print(pitch[:, i, j]) + + @pytest.mark.unit + @pytest.mark.parametrize( + "func", [_interp_to_argmin_B_soft, _interp_to_argmin_B_hard] + ) + def test_interp_to_argmin_B(self, func): + """Test argmin interpolation.""" # noqa: D202 + + # Test functions chosen with purpose; don't change unless plotted and compared. + def f(z): + return np.cos(3 * z) * np.sin(2 * np.cos(z)) + np.cos(1.2 * z) + + def B(z): + return np.sin(3 * z) * np.cos(1 / (1 + z)) * np.cos(z**2) * z + + def dB_dz(z): + return ( + 3 * z * np.cos(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) + - 2 * z**2 * np.sin(3 * z) * np.sin(z**2) * np.cos(1 / (1 + z)) + + z * np.sin(3 * z) * np.sin(1 / (1 + z)) * np.cos(z**2) / (1 + z) ** 2 + + np.sin(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) + ) + + zeta = np.linspace(0, 3 * np.pi, 175) + bounce = Bounce1D( + Grid.create_meshgrid([1, 0, zeta], coordinates="raz"), + { + "B^zeta": np.ones_like(zeta), + "B^zeta_z|r,a": np.ones_like(zeta), + "|B|": B(zeta), + "|B|_z|r,a": dB_dz(zeta), + }, + ) + np.testing.assert_allclose(bounce._zeta, zeta) + argmin = 5.61719 + np.testing.assert_allclose( + f(argmin), + func( + f(zeta), + bp1=np.array(0, ndmin=3), + bp2=np.array(2 * np.pi, ndmin=3), + knots=zeta, + B=bounce.B, + dB_dz=bounce._dB_dz, + method="cubic", + ), + rtol=1e-3, + ) + + @staticmethod + def drift_analytic(data): + """Compute analytic approximation for bounce-averaged binormal drift.""" + B = data["|B|"] / data["Bref"] + B0 = np.mean(B) + # epsilon should be changed to dimensionless, and computed in a way that + # is independent of normalization length scales, like "effective r/R0". + epsilon = data["a"] * data["rho"] # Aspect ratio of the flux surface. + np.testing.assert_allclose(epsilon, 0.05) + theta_PEST = data["alpha"] + data["iota"] * data["zeta"] + # same as 1 / (1 + epsilon cos(theta)) assuming epsilon << 1 + B_analytic = B0 * (1 - epsilon * np.cos(theta_PEST)) + np.testing.assert_allclose(B, B_analytic, atol=3e-3) + + gradpar = data["a"] * data["B^zeta"] / data["|B|"] + # This method of computing G0 suggests a fixed point iteration. + G0 = data["a"] + gradpar_analytic = G0 * (1 - epsilon * np.cos(theta_PEST)) + gradpar_theta_analytic = data["iota"] * gradpar_analytic + G0 = np.mean(gradpar_theta_analytic) + np.testing.assert_allclose(gradpar, gradpar_analytic, atol=5e-3) + + # Comparing coefficient calculation here with coefficients from compute/_metric + normalization = -np.sign(data["psi"]) * data["Bref"] * data["a"] ** 2 + cvdrift = data["cvdrift"] * normalization + gbdrift = data["gbdrift"] * normalization + dPdrho = np.mean(-0.5 * (cvdrift - gbdrift) * data["|B|"] ** 2) + alpha_MHD = -0.5 * dPdrho / data["iota"] ** 2 + gds21 = ( + -np.sign(data["iota"]) + * data["shear"] + * dot(data["grad(psi)"], data["grad(alpha)"]) + / data["Bref"] + ) + gds21_analytic = -data["shear"] * ( + data["shear"] * theta_PEST - alpha_MHD / B**4 * np.sin(theta_PEST) + ) + gds21_analytic_low_order = -data["shear"] * ( + data["shear"] * theta_PEST - alpha_MHD / B0**4 * np.sin(theta_PEST) + ) + np.testing.assert_allclose(gds21, gds21_analytic, atol=2e-2) + np.testing.assert_allclose(gds21, gds21_analytic_low_order, atol=2.7e-2) + + fudge_1 = 0.19 + gbdrift_analytic = fudge_1 * ( + -data["shear"] + + np.cos(theta_PEST) + - gds21_analytic / data["shear"] * np.sin(theta_PEST) + ) + gbdrift_analytic_low_order = fudge_1 * ( + -data["shear"] + + np.cos(theta_PEST) + - gds21_analytic_low_order / data["shear"] * np.sin(theta_PEST) + ) + fudge_2 = 0.07 + cvdrift_analytic = gbdrift_analytic + fudge_2 * alpha_MHD / B**2 + cvdrift_analytic_low_order = ( + gbdrift_analytic_low_order + fudge_2 * alpha_MHD / B0**2 + ) + np.testing.assert_allclose(gbdrift, gbdrift_analytic, atol=1e-2) + np.testing.assert_allclose(cvdrift, cvdrift_analytic, atol=2e-2) + np.testing.assert_allclose(gbdrift, gbdrift_analytic_low_order, atol=1e-2) + np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) + + pitch = get_pitch(np.min(B), np.max(B), 100)[1:] + k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) + I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = ( + TestBounceQuadrature.elliptic_incomplete(k2) + ) + y = np.sqrt(2 * epsilon * pitch * B0) + I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) + I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) + + drift_analytic_num = ( + fudge_2 * alpha_MHD / B0**2 * I_1 + - 0.5 + * fudge_1 + * ( + data["shear"] * (I_0 + I_1 - I_2 - I_3) + + alpha_MHD / B0**4 * (I_4 + I_5) + - (I_6 + I_7) + ) + ) / G0 + drift_analytic_den = I_0 / G0 + drift_analytic = drift_analytic_num / drift_analytic_den + return drift_analytic, cvdrift, gbdrift, pitch + + @pytest.mark.unit + @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) + def test_drift(self): + """Test bounce-averaged drift with analytical expressions.""" + eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") + psi_boundary = eq.Psi / (2 * np.pi) + psi = 0.25 * psi_boundary + rho = np.sqrt(psi / psi_boundary) + np.testing.assert_allclose(rho, 0.5) + + # Make a set of nodes along a single fieldline. + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + iota = grid_fsa.compress(data["iota"]).item() + alpha = 0 + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + grid = get_rtz_grid( + eq, + rho, + alpha, + zeta, + coordinates="raz", + period=(np.inf, 2 * np.pi, np.inf), + iota=np.array([iota]), + ) + data = eq.compute( + Bounce1D.required_names() + + [ + "cvdrift", + "gbdrift", + "grad(psi)", + "grad(alpha)", + "shear", + "iota", + "psi", + "a", + ], + grid=grid, + ) + np.testing.assert_allclose(data["psi"], psi) + np.testing.assert_allclose(data["iota"], iota) + assert np.all(data["B^zeta"] > 0) + Bref = 2 * np.abs(psi_boundary) / data["a"] ** 2 + data["Bref"] = Bref + data["rho"] = rho + data["alpha"] = alpha + data["zeta"] = zeta + data["psi"] = grid.compress(data["psi"]) + data["iota"] = grid.compress(data["iota"]) + data["shear"] = grid.compress(data["shear"]) + + # Compute analytic approximation. + drift_analytic, cvdrift, gbdrift, pitch = TestBounce1D.drift_analytic(data) + # Compute numerical result. + bounce = Bounce1D( + grid.source_grid, + data, + quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 + Bref=Bref, + Lref=data["a"], + check=True, + ) + + def integrand_num(cvdrift, gbdrift, B, pitch): + g = jnp.sqrt(1 - pitch * B) + return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) + + def integrand_den(B, pitch): + return 1 / jnp.sqrt(1 - pitch * B) + + drift_numerical_num = bounce.integrate( + pitch=pitch[:, np.newaxis], + integrand=integrand_num, + f=Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift), + num_well=1, + check=True, + ) + drift_numerical_den = bounce.integrate( + pitch=pitch[:, np.newaxis], + integrand=integrand_den, + f=[], + num_well=1, + weight=np.ones(zeta.size), + check=True, + ) + drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) + msg = "There should be one bounce integral per pitch in this example." + assert drift_numerical.size == drift_analytic.size, msg + np.testing.assert_allclose( + drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2 + ) + + self._test_bounce_autodiff( + bounce, + integrand_num, + f=[cvdrift, gbdrift], + weight=np.ones(zeta.size), + ) + + fig, ax = plt.subplots() + ax.plot(1 / pitch, drift_analytic) + ax.plot(1 / pitch, drift_numerical) + return fig + + @staticmethod + def _test_bounce_autodiff(bounce, integrand, **kwargs): + """Make sure reverse mode AD works correctly on this algorithm.""" + + def fun1(pitch): + return jnp.sum(bounce.integrate(pitch, integrand, check=False, **kwargs)) + + def fun2(pitch): + return jnp.sum( + bounce.integrate(pitch, integrand_grad, check=True, **kwargs) + ) + + def integrand_grad(*args, **kwargs2): + fun = jnp.vectorize( + grad(integrand, -1), signature="()," * len(kwargs["f"]) + "(),()->()" + ) + return fun(*args, *kwargs2.values()) + + pitch = 1.0 + truth = 650 # Extrapolated from plot. + assert np.isclose(grad(fun1)(pitch), truth, rtol=1e-3) + # Make sure bounce points get differentiated too. + result = fun2(pitch) + assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-3) From baa907d62fa3aebe5c9184f8c0ab081891e37a19 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Aug 2024 17:31:55 -0400 Subject: [PATCH 214/241] Fix some stuff from previous commit --- desc/integrals/bounce_integral.py | 16 +++++------ desc/integrals/bounce_utils.py | 5 ++-- tests/test_fourier_bounce.py | 12 ++++---- tests/test_integrals.py | 47 +++++++++++++++++-------------- 4 files changed, 41 insertions(+), 39 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index b4fb5ab416..c8d3809f8e 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,4 +1,4 @@ -"""Bounce integrals, along field lines or otherwise.""" +"""Methods for computing bounce integrals (singular or otherwise).""" import numpy as np from interpax import CubicHermiteSpline @@ -466,7 +466,7 @@ def _check_shape(self, z1, z2, k): errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) return z1, z2, k - def check_intersect1d(self, z1, z2, k, pad_value=0.0, plot=True, **kwargs): + def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): """Check that intersects are computed correctly. Parameters @@ -478,8 +478,6 @@ def check_intersect1d(self, z1, z2, k, pad_value=0.0, plot=True, **kwargs): k : jnp.ndarray Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). k such that fₓ(yᵢ) = k. - pad_value : float - Value that pads ``z1`` and ``z2`` arrays. plot : bool Whether to plot stuff. Default is true. kwargs : dict @@ -487,7 +485,7 @@ def check_intersect1d(self, z1, z2, k, pad_value=0.0, plot=True, **kwargs): """ assert z1.shape == z2.shape - mask = (z1 - z2) != pad_value + mask = (z1 - z2) != 0.0 z1 = jnp.where(mask, z1, jnp.nan) z2 = jnp.where(mask, z2, jnp.nan) z1, z2, k = self._check_shape(z1, z2, k) @@ -551,7 +549,6 @@ def plot1d( hlabel=r"$z$", vlabel=r"$f(z)$", show=True, - pad_value=0.0, ): """Plot the piecewise Chebyshev series. @@ -583,8 +580,6 @@ def plot1d( Vertical axis label. show : bool Whether to show the plot. Default is true. - pad_value : float - Doesn't plot intersects where ``z1-z2==pad_value``. Returns ------- @@ -607,7 +602,6 @@ def plot1d( k=k, k_transparency=k_transparency, klabel=klabel, - pad_value=pad_value, ) ax.set_xlabel(hlabel) ax.set_ylabel(vlabel) @@ -1168,6 +1162,10 @@ class Bounce1D: These are used as knots to construct splines. A reference density is 100 knots per toroidal transit. + Examples + -------- + See ``tests/test_integrals.py::TestBounce1D::test_integrate_checks``. + Attributes ---------- B : jnp.ndarray diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 3482e5d4c9..9f97a30302 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -828,7 +828,6 @@ def plot_ppoly( k=k, k_transparency=k_transparency, klabel=klabel, - pad_value=0.0, ) ax.set_xlabel(hlabel) ax.set_ylabel(vlabel) @@ -849,7 +848,7 @@ def _add2legend(legend, lines): legend[label] = line -def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel, pad_value=0.0): +def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel): """Plot intersects on ``ax``.""" if k is None: return @@ -867,7 +866,7 @@ def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel, pad_value=0.0 for i in range(k.size): _z1, _z2 = z1[i], z2[i] if _z1.size == _z2.size: - mask = (z1 - z2) != pad_value + mask = (z1 - z2) != 0.0 _z1 = z1[mask] _z2 = z2[mask] ax.scatter(_z1, jnp.full_like(_z1, k[i]), marker="v", color="tab:red") diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index aab1f64de7..782f4610ff 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -5,17 +5,17 @@ from matplotlib import pyplot as plt from numpy.polynomial.chebyshev import chebinterpolate, chebroots from numpy.polynomial.legendre import leggauss -from tests.test_bounce_integral import _drift_analytic +from tests.test_integrals import TestBounce1D from tests.test_plotting import tol_1d -from desc._bounce_utils.bounce_integral import filter_bounce_points, get_pitch from desc.backend import jnp from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid, map_coordinates from desc.examples import get from desc.grid import LinearGrid from desc.integrals import Bounce2D -from desc.integrals.fourier_bounce_integral import FourierChebyshevBasis, _get_alphas +from desc.integrals.bounce_integral import FourierChebyshevBasis +from desc.integrals.bounce_utils import get_alpha, get_pitch from desc.integrals.interp_utils import fourier_pts @@ -27,7 +27,7 @@ def test_alpha_sequence(alpha_0, iota, num_period, period): """Test field line poloidal label tracking.""" iota = np.atleast_1d(iota) - alphas = _get_alphas(alpha_0, iota, num_period, period) + alphas = get_alpha(alpha_0, iota, num_period, period) assert alphas.shape == (iota.size, num_period) for i in range(iota.size): assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" @@ -65,7 +65,7 @@ def test_bp1_first(self): pitch = 1 / np.linspace(1, 4, 20) bp1, bp2 = pcb.intersect1d(pitch) pcb.check_intersect1d(bp1, bp2, pitch) - bp1, bp2 = filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) def f(z): return -2 * np.cos(1 / (0.1 + z**2)) + 2 @@ -153,7 +153,7 @@ def test_drift(): data["shear"] = grid.compress(data["shear"]) # Compute analytic approximation. - drift_analytic, cvdrift, gbdrift, pitch = _drift_analytic(data) + drift_analytic, cvdrift, gbdrift, pitch = TestBounce1D.drift_analytic(data) # Compute numerical result. grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP) data_2 = eq.compute( diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 752be71614..00a740905d 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -720,14 +720,15 @@ def test_biest_interpolators(self): np.testing.assert_allclose(g1, ff) -def _filter_bounce_points(bp1, bp2): - mask = (bp1 - bp2) != 0 - return bp1[mask], bp2[mask] - - class TestBouncePoints: """Test that bounce points are computed correctly.""" + @staticmethod + def filter(bp1, bp2): + """Remove bounce points whose integrals have zero measure.""" + mask = (bp1 - bp2) != 0.0 + return bp1[mask], bp2[mask] + @staticmethod @pytest.mark.unit def test_bp1_first(): @@ -739,7 +740,7 @@ def test_bp1_first(): pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) - bp1, bp2 = _filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) @@ -755,7 +756,7 @@ def test_bp2_first(): pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) - bp1, bp2 = _filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size np.testing.assert_allclose(bp1, intersect[1:-1:2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) @@ -773,7 +774,7 @@ def test_bp1_before_extrema(): B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = _filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) @@ -797,7 +798,7 @@ def test_bp2_before_extrema(): B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = _filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) @@ -821,7 +822,7 @@ def test_extrema_first_and_before_bp1(): pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False ) plot_ppoly(B, z1=bp1, z2=bp2, k=1 / pitch, start=k[2]) - bp1, bp2 = _filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) @@ -844,7 +845,7 @@ def test_extrema_first_and_before_bp2(): B_z_ra = B.derivative() pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) - bp1, bp2 = _filter_bounce_points(bp1, bp2) + bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -1034,8 +1035,14 @@ class TestBounce1D: """Test bounce integral methods that use one-dimensional local splines.""" @pytest.mark.unit - def test_bounce_integral_checks(self): + def test_integrate_checks(self): """Test that all the internal correctness checks pass for real example.""" + # noqa: D202 + # Suppose we want to compute a bounce average of the function + # f(ℓ) = (1 − λ|B|/2) * g_zz, where g_zz is the squared norm of the + # toroidal basis vector on some set of field lines specified by (ρ, α) + # coordinates. This is defined as + # [∫ f(ℓ) / √(1 − λ|B|) dℓ] / [∫ 1 / √(1 − λ|B|) dℓ] def numerator(g_zz, B, pitch): f = (1 - pitch * B / 2) * g_zz @@ -1044,18 +1051,16 @@ def numerator(g_zz, B, pitch): def denominator(B, pitch): return 1 / jnp.sqrt(1 - pitch * B) - # Suppose we want to compute a bounce average of the function - # f(ℓ) = (1 − λ|B|/2) * g_zz, where g_zz is the squared norm of the - # toroidal basis vector on some set of field lines specified by (ρ, α) - # coordinates. This is defined as - # [∫ f(ℓ) / √(1 − λ|B|) dℓ] / [∫ 1 / √(1 − λ|B|) dℓ] - eq = get("HELIOTRON") - # Clebsch-Type field-line coordinates ρ, α, ζ. + # Pick flux surfaces, field lines, and how far to follow the field line + # in Clebsch-Type field-line coordinates ρ, α, ζ. rho = np.linspace(0.1, 1, 6) alpha = np.array([0]) - knots = np.linspace(-2 * np.pi, 2 * np.pi, 200) + zeta = np.linspace(-2 * np.pi, 2 * np.pi, 200) + + eq = get("HELIOTRON") + # Convert above coordinates to DESC computational coordinates. grid = get_rtz_grid( - eq, rho, alpha, knots, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) + eq, rho, alpha, zeta, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) data = eq.compute( Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid From cee3da72ec2c3dd6a0cb01f55ac6d9d929dd285c Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Aug 2024 17:54:34 -0400 Subject: [PATCH 215/241] Change interp2argmin to expect already reshaped data to be consistent with nw API --- desc/integrals/bounce_integral.py | 38 +++++++++++++++---------------- desc/integrals/bounce_utils.py | 24 ++++++++++++------- desc/integrals/interp_utils.py | 4 ++-- tests/test_integrals.py | 8 +++---- 4 files changed, 40 insertions(+), 34 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index c8d3809f8e..ac22db4a0b 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -9,7 +9,6 @@ from desc.integrals.bounce_utils import ( _add2legend, _check_bounce_points, - _interp_to_argmin_B_soft, _plot_intersect, bounce_points, bounce_quadrature, @@ -17,13 +16,14 @@ epigraph_and, flatten_matrix, get_alpha, + interp_to_argmin_B_soft, plot_ppoly, subtract, ) from desc.integrals.interp_utils import ( - _filter_distinct, cheb_from_dct, cheb_pts, + filter_distinct, fourier_pts, harmonic, idct_non_uniform, @@ -297,7 +297,7 @@ def intersect2d(self, k=0.0, eps=_eps): # Intersects must satisfy y ∈ [-1, 1]. # Pick sentinel such that only distinct roots are considered intersects. - y = _filter_distinct(y, sentinel=-2.0, eps=eps) + y = filter_distinct(y, sentinel=-2.0, eps=eps) is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos @@ -324,12 +324,12 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): Shape must broadcast with (..., *cheb.shape[:-2]). Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. num_intersect : int or None - If not specified, then all intersects are returned in an array whose - last axis has size ``self.M*(self.N-1)``. If there were less than that many - intersects detected, then the last axis of the returned arrays is padded - with ``pad_value``. Specify to return the first ``num_intersect`` pairs - of intersects. This is useful if ``num_intersect`` tightly bounds the - actual number. + Specify to return the first ``num_intersect`` intersects. + This is useful if ``num_intersect`` tightly bounds the actual number. + + If not specified, then all intersects are returned. If there were fewer + intersects detected than the size of the last axis of the returned arrays, + then that axis is padded with ``pad_value``. pad_value : float Value with which to pad array. Default 0. @@ -988,8 +988,8 @@ def bounce_points(self, pitch, num_well=None): num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically at most 5 + wells per toroidal transit for a given pitch. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the @@ -1050,8 +1050,8 @@ def integrate(self, pitch, integrand, f, weight=None, num_well=None): num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically at most 5 + wells per toroidal transit for a given pitch. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the @@ -1300,8 +1300,8 @@ def bounce_points(self, pitch, num_well=None): num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically at most 5 + wells per toroidal transit for a given pitch. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the @@ -1397,7 +1397,7 @@ def integrate( ``integrand``. Use the method ``self.reshape_data`` to reshape the data into the expected shape. weight : jnp.ndarray - Shape (L * M, N). + Shape must broadcast with (L * M, N). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` interpolated to the deepest point in the magnetic well. Use the method @@ -1405,8 +1405,8 @@ def integrate( num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically at most 5 + wells per toroidal transit for a given pitch. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the @@ -1445,7 +1445,7 @@ def integrate( check=check, ) if weight is not None: - result *= _interp_to_argmin_B_soft( + result *= interp_to_argmin_B_soft( g=weight, bp1=bp1, bp2=bp2, diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 9f97a30302..b87a7d818a 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -239,8 +239,8 @@ def bounce_points( num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number of wells. As a reference, there are typically - at most 5 wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically at most 5 + wells per toroidal transit for a given pitch. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the @@ -685,13 +685,16 @@ def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): return extrema, B_extrema -def _interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta=-50): +def interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta=-50): """Interpolate ``g`` to the deepest point in the magnetic well. Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns mean_A g(ζ). Parameters ---------- + g : jnp.ndarray + Shape must broadcast with (S, knots.size). + Values evaluated on ``knots`` to interpolate. beta : float More negative gives exponentially better approximation at the expense of noisier gradients - noisier in the physics sense (unrelated @@ -712,19 +715,24 @@ def _interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta= ) g = jnp.linalg.vecdot( argmin, - interp1d_vec(ext, knots, g.reshape(-1, knots.size), method=method)[ - :, jnp.newaxis - ], + interp1d_vec(ext, knots, jnp.atleast_2d(g), method=method)[:, jnp.newaxis], ) assert g.shape == bp1.shape == bp2.shape return g # Less efficient than soft if P >> 1. -def _interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): +def interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): """Interpolate ``g`` to the deepest point in the magnetic well. Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns g(A). + + Parameters + ---------- + g : jnp.ndarray + Shape must broadcast with (S, knots.size). + Values evaluated on ``knots`` to interpolate. + """ ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] @@ -738,7 +746,7 @@ def _interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): axis=-1, ) A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) - g = interp1d_vec(A, knots, g.reshape(-1, knots.size), method=method) + g = interp1d_vec(A, knots, jnp.atleast_2d(g), method=method) assert g.shape == bp1.shape == bp2.shape return g diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index f8b75c05b9..562b2edfb7 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -557,7 +557,7 @@ def poly_root( if sort or distinct: r = jnp.sort(r, axis=-1) - return _filter_distinct(r, sentinel, eps) if distinct else r + return filter_distinct(r, sentinel, eps) if distinct else r def _root_cubic(a, b, c, d, sentinel, eps, distinct): @@ -640,7 +640,7 @@ def _concat_sentinel(r, sentinel, num=1): return jnp.append(r, sent, axis=-1) -def _filter_distinct(r, sentinel, eps): +def filter_distinct(r, sentinel, eps): """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" # eps needs to be low enough that close distinct roots do not get removed. # Otherwise, algorithms relying on continuity will fail. diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 00a740905d..896b412b69 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -36,10 +36,10 @@ ) from desc.integrals.bounce_utils import ( _get_extrema, - _interp_to_argmin_B_hard, - _interp_to_argmin_B_soft, bounce_points, get_pitch, + interp_to_argmin_B_hard, + interp_to_argmin_B_soft, plot_ppoly, ) from desc.integrals.quad_utils import ( @@ -1096,9 +1096,7 @@ def denominator(B, pitch): print(pitch[:, i, j]) @pytest.mark.unit - @pytest.mark.parametrize( - "func", [_interp_to_argmin_B_soft, _interp_to_argmin_B_hard] - ) + @pytest.mark.parametrize("func", [interp_to_argmin_B_soft, interp_to_argmin_B_hard]) def test_interp_to_argmin_B(self, func): """Test argmin interpolation.""" # noqa: D202 From a1f249c33b8aa574a9d88a035f29df14b282cb8a Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Aug 2024 19:47:32 -0400 Subject: [PATCH 216/241] Containerize and refactor basis used in bounce integrals --- desc/integrals/basis.py | 688 ++++++++++++++++++++++++++++++ desc/integrals/bounce_integral.py | 640 ++------------------------- desc/integrals/bounce_utils.py | 165 +------ desc/integrals/interp_utils.py | 8 +- desc/utils.py | 5 + tests/test_fourier_bounce.py | 2 +- 6 files changed, 752 insertions(+), 756 deletions(-) create mode 100644 desc/integrals/basis.py diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py new file mode 100644 index 0000000000..72aecaac66 --- /dev/null +++ b/desc/integrals/basis.py @@ -0,0 +1,688 @@ +"""Fast transformable basis.""" + +from functools import partial + +import numpy as np +from matplotlib import pyplot as plt + +from desc.backend import dct, flatnonzero, idct, irfft, jnp, put, rfft +from desc.integrals.interp_utils import ( + cheb_from_dct, + cheb_pts, + chebroots_vec, + filter_distinct, + fourier_pts, + harmonic, + idct_non_uniform, + irfft_non_uniform, +) +from desc.integrals.quad_utils import bijection_from_disc, bijection_to_disc +from desc.utils import ( + atleast_2d_end, + atleast_3d_mid, + atleast_nd, + errorif, + flatten_matrix, + isposint, + setdefault, + take_mask, +) + + +def _subtract(c, k): + """Subtract ``k`` from first index of last axis of ``c``. + + Semantically same as ``return c.copy().at[...,0].add(-k)``, + but allows dimension to increase. + """ + c_0 = c[..., 0] - k + c = jnp.concatenate( + [ + c_0[..., jnp.newaxis], + jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), + ], + axis=-1, + ) + return c + + +@partial(jnp.vectorize, signature="(m),(m)->(m)") +def epigraph_and(is_intersect, df_dy_sign): + """Set and epigraph of f with ``is_intersect``. + + Remove intersects for which there does not exist a connected path between + adjacent intersects in the epigraph of a continuous map ``f``. + + Parameters + ---------- + is_intersect : jnp.ndarray + Boolean array indicating whether element is an intersect. + df_dy_sign : jnp.ndarray + Shape ``is_intersect.shape``. + Sign of ∂f/∂y (yᵢ) for f(yᵢ) = 0. + + Returns + ------- + is_intersect : jnp.ndarray + Boolean array indicating whether element is an intersect + and satisfies the stated condition. + + """ + # The pairs ``y1`` and ``y2`` are boundaries of an integral only if ``y1 <= y2``. + # For the integrals to be over wells, it is required that the first intersect + # has a non-positive derivative. Now, by continuity, + # ``df_dy_sign[...,k]<=0`` implies ``df_dy_sign[...,k+1]>=0``, + # so there can be at most one inversion, and if it exists, the inversion + # must be at the first pair. To correct the inversion, it suffices to disqualify the + # first intersect as a right boundary, except under an edge case of a series of + # inflection points. + idx = flatnonzero(is_intersect, size=2, fill_value=-1) # idx of first 2 intersects + edge_case = ( + (df_dy_sign[idx[0]] == 0) + & (df_dy_sign[idx[1]] < 0) + & is_intersect[idx[0]] + & is_intersect[idx[1]] + # In theory, we need to keep propagating this edge case, e.g. + # (df_dy_sign[..., 1] < 0) | ( + # (df_dy_sign[..., 1] == 0) & (df_dy_sign[..., 2] < 0)... + # ). + # At each step, the likelihood that an intersection has already been lost + # due to floating point errors grows, so the real solution is to pick a less + # degenerate pitch value - one that does not ride the global extrema of |B|. + ) + return put(is_intersect, idx[0], edge_case) + + +class FourierChebyshevBasis: + """Fourier-Chebyshev series. + + f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) + where ψₘ are trigonometric polynomials on [0, 2π] + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. + + Notes + ----- + Performance may improve significantly + if the spectral resolutions ``M`` and ``N`` are powers of two. + + Attributes + ---------- + M : int + Fourier spectral resolution. + N : int + Chebyshev spectral resolution. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + instead of the interior roots grid for Chebyshev points. + domain : (float, float) + Domain for y coordinates. + + """ + + def __init__(self, f, domain=(-1, 1), lobatto=False): + """Interpolate Fourier-Chebyshev basis to ``f``. + + Parameters + ---------- + f : jnp.ndarray + Shape (..., M, N). + Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + lobatto : bool + Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) + instead of the interior roots grid for Chebyshev points. + + """ + self.M = f.shape[-2] + self.N = f.shape[-1] + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = tuple(domain) + errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") + self.lobatto = bool(lobatto) + self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) + + @staticmethod + def _fast_transform(f, lobatto): + M = f.shape[-2] + N = f.shape[-1] + return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) + + @staticmethod + def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): + """Tensor product grid of optimal collocation nodes for this basis. + + Parameters + ---------- + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + L : int or jnp.ndarray + Optional, resolution in radial direction of domain [0, 1]. + May also be an array of coordinates values. If given, then the + returned ``coords`` is a 3D tensor-product with shape (L * M * N, 3). + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + lobatto : bool + Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) + instead of the interior roots grid for Chebyshev points. + + Returns + ------- + coords : jnp.ndarray + Shape (M * N, 2). + Grid of (x, y) points for optimal interpolation. + + """ + x = fourier_pts(M) + y = cheb_pts(N, lobatto, domain) + if L is not None: + if isposint(L): + L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) + coords = (L, x, y) + else: + coords = (x, y) + coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) + coords = jnp.column_stack(coords) + return coords + + def evaluate(self, M, N): + """Evaluate Fourier-Chebyshev series. + + Parameters + ---------- + M : int + Grid resolution in x direction. Preferably power of 2. + N : int + Grid resolution in y direction. Preferably power of 2. + + Returns + ------- + fq : jnp.ndarray + Shape (..., M, N) + Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. + + """ + fq = idct(irfft(self._c, n=M, axis=-2), type=2 - self.lobatto, n=N, axis=-1) * ( + M * (N - self.lobatto) + ) + return fq + + def harmonics(self): + """Spectral coefficients aₘₙ of the interpolating polynomial. + + Transform Fourier interpolant harmonics to Nyquist trigonometric + interpolant harmonics so that the coefficients are all real. + + Returns + ------- + a_mn : jnp.ndarray + Shape (..., M, N). + Real valued spectral coefficients for Fourier-Chebyshev basis. + + """ + a_mn = harmonic(cheb_from_dct(self._c, axis=-1), self.M, axis=-2) + assert a_mn.shape[-2:] == (self.M, self.N) + return a_mn + + def compute_cheb(self, x): + """Evaluate Fourier basis at ``x`` to obtain set of 1D Chebyshev coefficients. + + Parameters + ---------- + x : jnp.ndarray + Points to evaluate Fourier basis. + + Returns + ------- + cheb : ChebyshevBasisSet + Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + + """ + # Always add new axis to broadcast against Chebyshev coefficients. + x = jnp.atleast_1d(x)[..., jnp.newaxis] + cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) + assert cheb.shape[-2:] == (x.shape[-2], self.N) + return ChebyshevBasisSet(cheb, self.domain) + + +class ChebyshevBasisSet: + """Chebyshev series. + + { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } + and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ] + + Attributes + ---------- + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + M : int + Number of function in this basis set. + N : int + Chebyshev spectral resolution. + domain : (float, float) + Domain for y coordinates. + + """ + + _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) + + def __init__(self, cheb, domain=(-1, 1)): + """Make Chebyshev series basis from given coefficients. + + Parameters + ---------- + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). + domain : (float, float) + Domain for y coordinates. Default is [-1, 1]. + + """ + self.cheb = jnp.atleast_2d(cheb) + errorif(domain[0] > domain[-1], msg="Got inverted domain.") + self.domain = tuple(domain) + + @property + def M(self): + """Number of function in this basis set.""" + return self.cheb.shape[-2] + + @property + def N(self): + """Chebyshev spectral resolution.""" + return self.cheb.shape[-1] + + @staticmethod + def _chebcast(cheb, arr): + # Input should not have rightmost dimension of cheb that iterates coefficients, + # but may have additional leftmost dimension for batch operation. + errorif( + jnp.ndim(arr) > cheb.ndim, + NotImplementedError, + msg=f"Only one additional axis for batch dimension is allowed. " + f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", + ) + return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] + + def intersect2d(self, k=0.0, eps=_eps): + """Coordinates yᵢ such that f(x, yᵢ) = k(x). + + Parameters + ---------- + k : jnp.ndarray + Shape must broadcast with (..., *cheb.shape[:-1]). + Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. + eps : float + Absolute tolerance with which to consider value as zero. + + Returns + ------- + y : jnp.ndarray + Shape (..., *cheb.shape[:-1], N - 1). + Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. + is_intersect : jnp.ndarray + Shape y.shape. + Boolean array into ``y`` indicating whether element is an intersect. + df_dy_sign : jnp.ndarray + Shape y.shape. + Sign of ∂f/∂y (x, yᵢ). + + """ + c = _subtract(ChebyshevBasisSet._chebcast(self.cheb, k), k) + # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) + y = chebroots_vec(c) + assert y.shape == (*c.shape[:-1], self.N - 1) + + # Intersects must satisfy y ∈ [-1, 1]. + # Pick sentinel such that only distinct roots are considered intersects. + y = filter_distinct(y, sentinel=-2.0, eps=eps) + is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) + y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos + + # TODO: Multipoint evaluation with FFT. + # Chapter 10, https://doi.org/10.1017/CBO9781139856065. + n = jnp.arange(self.N) + # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) + # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) + df_dy_sign = jnp.sign( + jnp.linalg.vecdot( + n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), + self.cheb[..., jnp.newaxis, :], + ) + ) + y = bijection_from_disc(y, self.domain[0], self.domain[-1]) + return y, is_intersect, df_dy_sign + + def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): + """Coordinates z(x, yᵢ) such that fₓ(yᵢ) = k for every x. + + Parameters + ---------- + k : jnp.ndarray + Shape must broadcast with (..., *cheb.shape[:-2]). + Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. + num_intersect : int or None + Specify to return the first ``num_intersect`` intersects. + This is useful if ``num_intersect`` tightly bounds the actual number. + + If not specified, then all intersects are returned. If there were fewer + intersects detected than the size of the last axis of the returned arrays, + then that axis is padded with ``pad_value``. + pad_value : float + Value with which to pad array. Default 0. + + Returns + ------- + z1, z2 : (jnp.ndarray, jnp.ndarray) + Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). + ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, + respectively. + + """ + errorif( + self.N < 2, + NotImplementedError, + "This method requires the Chebyshev spectral resolution of at " + f"least 2, but got N={self.N}.", + ) + + # Add axis to use same k over all Chebyshev series of the piecewise object. + y, is_intersect, df_dy_sign = self.intersect2d( + jnp.atleast_1d(k)[..., jnp.newaxis] + ) + # Flatten so that last axis enumerates intersects along the piecewise object. + y, is_intersect, df_dy_sign = map( + flatten_matrix, (self.isomorphism_to_C1(y), is_intersect, df_dy_sign) + ) + + # Note for bounce point applications: + # We ignore the degenerate edge case where the boundary shared by adjacent + # polynomials is a left intersect point i.e. ``is_z1`` because the subset of + # pitch values that generate this edge case has zero measure. Note that + # the technique to account for this would be to disqualify intersects + # within ``_eps`` from ``domain[-1]``. + is_z1 = (df_dy_sign <= 0) & is_intersect + is_z2 = (df_dy_sign >= 0) & epigraph_and(is_intersect, df_dy_sign) + + sentinel = self.domain[0] - 1.0 + z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) + z2 = take_mask(y, is_z2, size=num_intersect, fill_value=sentinel) + + mask = (z1 > sentinel) & (z2 > sentinel) + # Set outside mask to same value so integration is over set of measure zero. + z1 = jnp.where(mask, z1, pad_value) + z2 = jnp.where(mask, z2, pad_value) + return z1, z2 + + def eval1d(self, z, cheb=None): + """Evaluate piecewise Chebyshev spline at coordinates z. + + Parameters + ---------- + z : jnp.ndarray + Shape (..., *cheb.shape[:-2], z.shape[-1]). + Coordinates in [sef.domain[0], ∞). + The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where + ``z // domain`` yields the index into the proper Chebyshev series + along the second to last axis of ``cheb`` and ``z % domain`` is + the coordinate value on the domain of that Chebyshev series. + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients to use. If not given, uses ``self.cheb``. + + Returns + ------- + f : jnp.ndarray + Shape z.shape. + Chebyshev basis evaluated at z. + + """ + cheb = self._chebcast(setdefault(cheb, self.cheb), z) + N = cheb.shape[-1] + x_idx, y = self.isomorphism_to_C2(z) + y = bijection_to_disc(y, self.domain[0], self.domain[1]) + # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) + # are held in cheb with shape (..., num cheb series, N). + cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) + f = idct_non_uniform(y, cheb, N) + assert f.shape == z.shape + return f + + def isomorphism_to_C1(self, y): + """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². + + Maps row x of y to z = y + f(x) where f(x) = x * |domain|. + + Parameters + ---------- + y : jnp.ndarray + Shape (..., y.shape[-2], y.shape[-1]). + Second to last axis iterates the rows. + + Returns + ------- + z : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. + + """ + assert y.ndim >= 2 + z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) + z = y + z_shift[:, jnp.newaxis] + return z + + def isomorphism_to_C2(self, z): + """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. + + Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. + + Parameters + ---------- + z : jnp.ndarray + Shape z.shape. + + Returns + ------- + x_idx, y_val : (jnp.ndarray, jnp.ndarray) + Shape z.shape. + Isomorphic coordinates. + + """ + x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) + x_idx = x_idx.astype(int) + y_val += self.domain[0] + return x_idx, y_val + + def _check_shape(self, z1, z2, k): + """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" + # Ensure pitch batch dim exists and add back dim to broadcast with wells. + k = atleast_nd(self.cheb.ndim - 1, k)[..., jnp.newaxis] + # Same but back dim already exists. + z1, z2 = atleast_nd(self.cheb.ndim, z1, z2) + # Cheb has shape (..., M, N) and others + # have shape (K, ..., W) + errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) + return z1, z2, k + + def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): + """Check that intersects are computed correctly. + + Parameters + ---------- + z1, z2 : jnp.ndarray + Shape must broadcast with (k, *self.cheb.shape[:-2], W). + ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, + respectively. + k : jnp.ndarray + Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). + k such that fₓ(yᵢ) = k. + plot : bool + Whether to plot stuff. Default is true. + kwargs : dict + Keyword arguments into ``self.plot``. + + """ + assert z1.shape == z2.shape + mask = (z1 - z2) != 0.0 + z1 = jnp.where(mask, z1, jnp.nan) + z2 = jnp.where(mask, z2, jnp.nan) + z1, z2, k = self._check_shape(z1, z2, k) + + err_1 = jnp.any(z1 > z2, axis=-1) + err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) + f_m = self.eval1d((z1 + z2) / 2) + assert f_m.shape == z1.shape + err_3 = jnp.any(f_m > k + self._eps, axis=-1) + if not (plot or jnp.any(err_1 | err_2 | err_3)): + return + + # Ensure l axis exists for iteration in below loop. + cheb = atleast_nd(3, self.cheb) + mask, z1, z2, f_m = atleast_3d_mid(mask, z1, z2, f_m) + err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) + + for l in np.ndindex(cheb.shape[:-2]): + for p in range(k.shape[0]): + idx = (p, *l) + if not (err_1[idx] or err_2[idx] or err_3[idx]): + continue + _z1 = z1[idx][mask[idx]] + _z2 = z2[idx][mask[idx]] + if plot: + self.plot1d( + cheb=cheb[l], + z1=_z1, + z2=_z2, + k=k[idx], + **kwargs, + ) + print(" z1 | z2") + print(jnp.column_stack([_z1, _z2])) + assert not err_1[idx], "Intersects have an inversion.\n" + assert not err_2[idx], "Detected discontinuity.\n" + assert not err_3[idx], ( + "Detected f > k in well. Increase Chebyshev resolution.\n" + f"{f_m[idx][mask[idx]]} > {k[idx] + self._eps}" + ) + idx = (slice(None), *l) + if plot: + self.plot1d( + cheb=cheb[l], + z1=z1[idx], + z2=z2[idx], + k=k[idx], + **kwargs, + ) + + def plot1d( + self, + cheb, + num=1000, + z1=None, + z2=None, + k=None, + k_transparency=0.5, + klabel=r"$k$", + title=r"Intersects $z$ in epigraph of $f(z) = k$", + hlabel=r"$z$", + vlabel=r"$f(z)$", + show=True, + ): + """Plot the piecewise Chebyshev series. + + Parameters + ---------- + cheb : jnp.ndarray + Shape (M, N). + Piecewise Chebyshev series f. + num : int + Number of points to evaluate ``cheb`` for plot. + z1 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂y <= 0. + z2 : jnp.ndarray + Shape (k.shape[0], W). + Optional, intersects with ∂f/∂y >= 0. + k : jnp.ndarray + Shape (k.shape[0], ). + Optional, k such that fₓ(yᵢ) = k. + k_transparency : float + Transparency of pitch lines. + klabel : float + Label of intersect lines. + title : str + Plot title. + hlabel : str + Horizontal axis label. + vlabel : str + Vertical axis label. + show : bool + Whether to show the plot. Default is true. + + Returns + ------- + fig, ax : matplotlib figure and axes + + """ + fig, ax = plt.subplots() + legend = {} + z = jnp.linspace( + start=self.domain[0], + stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, + num=num, + ) + _add2legend(legend, ax.plot(z, self.eval1d(z, cheb), label=vlabel)) + _plot_intersect( + ax=ax, + legend=legend, + z1=z1, + z2=z2, + k=k, + k_transparency=k_transparency, + klabel=klabel, + ) + ax.set_xlabel(hlabel) + ax.set_ylabel(vlabel) + ax.legend(legend.values(), legend.keys()) + ax.set_title(title) + plt.tight_layout() + if show: + plt.show() + plt.close() + return fig, ax + + +def _add2legend(legend, lines): + """Add lines to legend if it's not already in it.""" + for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): + label = line.get_label() + if label not in legend: + legend[label] = line + + +def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel): + """Plot intersects on ``ax``.""" + if k is None: + return + + k = jnp.atleast_1d(jnp.squeeze(k)) + assert k.ndim == 1 + z1, z2 = jnp.atleast_2d(z1, z2) + assert z1.ndim == z2.ndim >= 2 + assert k.shape[0] == z1.shape[0] == z2.shape[0] + for p in k: + _add2legend( + legend, + ax.axhline(p, color="tab:purple", alpha=k_transparency, label=klabel), + ) + for i in range(k.size): + _z1, _z2 = z1[i], z2[i] + if _z1.size == _z2.size: + mask = (z1 - z2) != 0.0 + _z1 = z1[mask] + _z2 = z2[mask] + ax.scatter(_z1, jnp.full_like(_z1, k[i]), marker="v", color="tab:red") + ax.scatter(_z2, jnp.full_like(_z2, k[i]), marker="^", color="tab:green") diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index ac22db4a0b..89dc4f530c 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,617 +1,31 @@ """Methods for computing bounce integrals (singular or otherwise).""" -import numpy as np from interpax import CubicHermiteSpline -from matplotlib import pyplot as plt from orthax.legendre import leggauss -from desc.backend import dct, idct, irfft, jnp, rfft +from desc.backend import jnp +from desc.integrals.basis import FourierChebyshevBasis from desc.integrals.bounce_utils import ( - _add2legend, _check_bounce_points, - _plot_intersect, bounce_points, bounce_quadrature, - chebroots_vec, - epigraph_and, - flatten_matrix, get_alpha, interp_to_argmin_B_soft, plot_ppoly, - subtract, ) from desc.integrals.interp_utils import ( - cheb_from_dct, - cheb_pts, - filter_distinct, - fourier_pts, - harmonic, - idct_non_uniform, interp_rfft2, irfft2_non_uniform, - irfft_non_uniform, polyder_vec, transform_to_desc, ) from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, - bijection_to_disc, get_quadrature, grad_automorphism_sin, ) -from desc.utils import ( - atleast_2d_end, - atleast_3d_mid, - atleast_nd, - errorif, - isposint, - setdefault, - take_mask, - warnif, -) - - -class FourierChebyshevBasis: - """Fourier-Chebyshev series. - - f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) - where ψₘ are trigonometric polynomials on [0, 2π] - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. - - Notes - ----- - Performance may improve significantly - if the spectral resolutions ``M`` and ``N`` are powers of two. - - Attributes - ---------- - M : int - Fourier spectral resolution. - N : int - Chebyshev spectral resolution. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots grid for Chebyshev points. - domain : (float, float) - Domain for y coordinates. - - """ - - def __init__(self, f, domain=(-1, 1), lobatto=False): - """Interpolate Fourier-Chebyshev basis to ``f``. - - Parameters - ---------- - f : jnp.ndarray - Shape (..., M, N). - Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots grid for Chebyshev points. - - """ - self.M = f.shape[-2] - self.N = f.shape[-1] - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = tuple(domain) - errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") - self.lobatto = bool(lobatto) - self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) - - @staticmethod - def _fast_transform(f, lobatto): - M = f.shape[-2] - N = f.shape[-1] - return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) - - @staticmethod - def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): - """Tensor product grid of optimal collocation nodes for this basis. - - Parameters - ---------- - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. - L : int or jnp.ndarray - Optional, resolution in radial direction of domain [0, 1]. - May also be an array of coordinates values. If given, then the - returned ``coords`` is a 3D tensor-product with shape (L * M * N, 3). - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - lobatto : bool - Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) - instead of the interior roots grid for Chebyshev points. - - Returns - ------- - coords : jnp.ndarray - Shape (M * N, 2). - Grid of (x, y) points for optimal interpolation. - - """ - x = fourier_pts(M) - y = cheb_pts(N, lobatto, domain) - if L is not None: - if isposint(L): - L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) - coords = (L, x, y) - else: - coords = (x, y) - coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) - coords = jnp.column_stack(coords) - return coords - - def evaluate(self, M, N): - """Evaluate Fourier-Chebyshev series. - - Parameters - ---------- - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. - - Returns - ------- - fq : jnp.ndarray - Shape (..., M, N) - Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. - - """ - fq = idct(irfft(self._c, n=M, axis=-2), type=2 - self.lobatto, n=N, axis=-1) * ( - M * (N - self.lobatto) - ) - return fq - - def harmonics(self): - """Spectral coefficients aₘₙ of the interpolating polynomial. - - Transform Fourier interpolant harmonics to Nyquist trigonometric - interpolant harmonics so that the coefficients are all real. - - Returns - ------- - a_mn : jnp.ndarray - Shape (..., M, N). - Real valued spectral coefficients for Fourier-Chebyshev basis. - - """ - a_mn = harmonic(cheb_from_dct(self._c, axis=-1), self.M, axis=-2) - assert a_mn.shape[-2:] == (self.M, self.N) - return a_mn - - def compute_cheb(self, x): - """Evaluate Fourier basis at ``x`` to obtain set of 1D Chebyshev coefficients. - - Parameters - ---------- - x : jnp.ndarray - Points to evaluate Fourier basis. - - Returns - ------- - cheb : ChebyshevBasisSet - Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - - """ - # Always add new axis to broadcast against Chebyshev coefficients. - x = jnp.atleast_1d(x)[..., jnp.newaxis] - cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) - assert cheb.shape[-2:] == (x.shape[-2], self.N) - return ChebyshevBasisSet(cheb, self.domain) - - -class ChebyshevBasisSet: - """Chebyshev series. - - { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ] - - Attributes - ---------- - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - M : int - Number of function in this basis set. - N : int - Chebyshev spectral resolution. - domain : (float, float) - Domain for y coordinates. - - """ - - _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - - def __init__(self, cheb, domain=(-1, 1)): - """Make Chebyshev series basis from given coefficients. - - Parameters - ---------- - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - - """ - self.cheb = jnp.atleast_2d(cheb) - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = tuple(domain) - - @property - def M(self): - """Number of function in this basis set.""" - return self.cheb.shape[-2] - - @property - def N(self): - """Chebyshev spectral resolution.""" - return self.cheb.shape[-1] - - @staticmethod - def _chebcast(cheb, arr): - # Input should not have rightmost dimension of cheb that iterates coefficients, - # but may have additional leftmost dimension for batch operation. - errorif( - jnp.ndim(arr) > cheb.ndim, - NotImplementedError, - msg=f"Only one additional axis for batch dimension is allowed. " - f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", - ) - return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] - - def intersect2d(self, k=0.0, eps=_eps): - """Coordinates yᵢ such that f(x, yᵢ) = k(x). - - Parameters - ---------- - k : jnp.ndarray - Shape must broadcast with (..., *cheb.shape[:-1]). - Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. - eps : float - Absolute tolerance with which to consider value as zero. - - Returns - ------- - y : jnp.ndarray - Shape (..., *cheb.shape[:-1], N - 1). - Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. - is_intersect : jnp.ndarray - Shape y.shape. - Boolean array into ``y`` indicating whether element is an intersect. - df_dy_sign : jnp.ndarray - Shape y.shape. - Sign of ∂f/∂y (x, yᵢ). - - """ - c = subtract(ChebyshevBasisSet._chebcast(self.cheb, k), k) - # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = chebroots_vec(c) - assert y.shape == (*c.shape[:-1], self.N - 1) - - # Intersects must satisfy y ∈ [-1, 1]. - # Pick sentinel such that only distinct roots are considered intersects. - y = filter_distinct(y, sentinel=-2.0, eps=eps) - is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) - y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos - - # TODO: Multipoint evaluation with FFT. - # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - n = jnp.arange(self.N) - # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) - # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) - df_dy_sign = jnp.sign( - jnp.linalg.vecdot( - n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), - self.cheb[..., jnp.newaxis, :], - ) - ) - y = bijection_from_disc(y, self.domain[0], self.domain[-1]) - return y, is_intersect, df_dy_sign - - def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): - """Coordinates z(x, yᵢ) such that fₓ(yᵢ) = k for every x. - - Parameters - ---------- - k : jnp.ndarray - Shape must broadcast with (..., *cheb.shape[:-2]). - Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. - num_intersect : int or None - Specify to return the first ``num_intersect`` intersects. - This is useful if ``num_intersect`` tightly bounds the actual number. - - If not specified, then all intersects are returned. If there were fewer - intersects detected than the size of the last axis of the returned arrays, - then that axis is padded with ``pad_value``. - pad_value : float - Value with which to pad array. Default 0. - - Returns - ------- - z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). - ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. - - """ - errorif( - self.N < 2, - NotImplementedError, - "This method requires the Chebyshev spectral resolution of at " - f"least 2, but got N={self.N}.", - ) - - # Add axis to use same k over all Chebyshev series of the piecewise object. - y, is_intersect, df_dy_sign = self.intersect2d( - jnp.atleast_1d(k)[..., jnp.newaxis] - ) - # Flatten so that last axis enumerates intersects along the piecewise object. - y, is_intersect, df_dy_sign = map( - flatten_matrix, (self.isomorphism_to_C1(y), is_intersect, df_dy_sign) - ) - - # Note for bounce point applications: - # We ignore the degenerate edge case where the boundary shared by adjacent - # polynomials is a left intersect point i.e. ``is_z1`` because the subset of - # pitch values that generate this edge case has zero measure. Note that - # the technique to account for this would be to disqualify intersects - # within ``_eps`` from ``domain[-1]``. - is_z1 = (df_dy_sign <= 0) & is_intersect - is_z2 = (df_dy_sign >= 0) & epigraph_and(is_intersect, df_dy_sign) - - sentinel = self.domain[0] - 1.0 - z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) - z2 = take_mask(y, is_z2, size=num_intersect, fill_value=sentinel) - - mask = (z1 > sentinel) & (z2 > sentinel) - # Set outside mask to same value so integration is over set of measure zero. - z1 = jnp.where(mask, z1, pad_value) - z2 = jnp.where(mask, z2, pad_value) - return z1, z2 - - def eval1d(self, z, cheb=None): - """Evaluate piecewise Chebyshev spline at coordinates z. - - Parameters - ---------- - z : jnp.ndarray - Shape (..., *cheb.shape[:-2], z.shape[-1]). - Coordinates in [sef.domain[0], ∞). - The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where - ``z // domain`` yields the index into the proper Chebyshev series - along the second to last axis of ``cheb`` and ``z % domain`` is - the coordinate value on the domain of that Chebyshev series. - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients to use. If not given, uses ``self.cheb``. - - Returns - ------- - f : jnp.ndarray - Shape z.shape. - Chebyshev basis evaluated at z. - - """ - cheb = self._chebcast(setdefault(cheb, self.cheb), z) - N = cheb.shape[-1] - x_idx, y = self.isomorphism_to_C2(z) - y = bijection_to_disc(y, self.domain[0], self.domain[1]) - # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) - # are held in cheb with shape (..., num cheb series, N). - cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) - f = idct_non_uniform(y, cheb, N) - assert f.shape == z.shape - return f - - def isomorphism_to_C1(self, y): - """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². - - Maps row x of y to z = y + f(x) where f(x) = x * |domain|. - - Parameters - ---------- - y : jnp.ndarray - Shape (..., y.shape[-2], y.shape[-1]). - Second to last axis iterates the rows. - - Returns - ------- - z : jnp.ndarray - Shape y.shape. - Isomorphic coordinates. - - """ - assert y.ndim >= 2 - z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) - z = y + z_shift[:, jnp.newaxis] - return z - - def isomorphism_to_C2(self, z): - """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - - Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. - - Parameters - ---------- - z : jnp.ndarray - Shape z.shape. - - Returns - ------- - x_idx, y_val : (jnp.ndarray, jnp.ndarray) - Shape z.shape. - Isomorphic coordinates. - - """ - x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) - x_idx = x_idx.astype(int) - y_val += self.domain[0] - return x_idx, y_val - - def _check_shape(self, z1, z2, k): - """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" - # Ensure pitch batch dim exists and add back dim to broadcast with wells. - k = atleast_nd(self.cheb.ndim - 1, k)[..., jnp.newaxis] - # Same but back dim already exists. - z1, z2 = atleast_nd(self.cheb.ndim, z1, z2) - # Cheb has shape (..., M, N) and others - # have shape (K, ..., W) - errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) - return z1, z2, k - - def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): - """Check that intersects are computed correctly. - - Parameters - ---------- - z1, z2 : jnp.ndarray - Shape must broadcast with (k, *self.cheb.shape[:-2], W). - ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. - k : jnp.ndarray - Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). - k such that fₓ(yᵢ) = k. - plot : bool - Whether to plot stuff. Default is true. - kwargs : dict - Keyword arguments into ``self.plot``. - - """ - assert z1.shape == z2.shape - mask = (z1 - z2) != 0.0 - z1 = jnp.where(mask, z1, jnp.nan) - z2 = jnp.where(mask, z2, jnp.nan) - z1, z2, k = self._check_shape(z1, z2, k) - - err_1 = jnp.any(z1 > z2, axis=-1) - err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) - f_m = self.eval1d((z1 + z2) / 2) - assert f_m.shape == z1.shape - err_3 = jnp.any(f_m > k + self._eps, axis=-1) - if not (plot or jnp.any(err_1 | err_2 | err_3)): - return - - # Ensure l axis exists for iteration in below loop. - cheb = atleast_nd(3, self.cheb) - mask, z1, z2, f_m = atleast_3d_mid(mask, z1, z2, f_m) - err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) - - for l in np.ndindex(cheb.shape[:-2]): - for p in range(k.shape[0]): - idx = (p, *l) - if not (err_1[idx] or err_2[idx] or err_3[idx]): - continue - _z1 = z1[idx][mask[idx]] - _z2 = z2[idx][mask[idx]] - if plot: - self.plot1d( - cheb=cheb[l], - z1=_z1, - z2=_z2, - k=k[idx], - **kwargs, - ) - print(" z1 | z2") - print(jnp.column_stack([_z1, _z2])) - assert not err_1[idx], "Intersects have an inversion.\n" - assert not err_2[idx], "Detected discontinuity.\n" - assert not err_3[idx], ( - "Detected f > k in well. Increase Chebyshev resolution.\n" - f"{f_m[idx][mask[idx]]} > {k[idx] + self._eps}" - ) - idx = (slice(None), *l) - if plot: - self.plot1d( - cheb=cheb[l], - z1=z1[idx], - z2=z2[idx], - k=k[idx], - **kwargs, - ) - - def plot1d( - self, - cheb, - num=1000, - z1=None, - z2=None, - k=None, - k_transparency=0.5, - klabel=r"$k$", - title=r"Intersects $z$ in epigraph of $f(z) = k$", - hlabel=r"$z$", - vlabel=r"$f(z)$", - show=True, - ): - """Plot the piecewise Chebyshev series. - - Parameters - ---------- - cheb : jnp.ndarray - Shape (M, N). - Piecewise Chebyshev series f. - num : int - Number of points to evaluate ``cheb`` for plot. - z1 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂y <= 0. - z2 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂y >= 0. - k : jnp.ndarray - Shape (k.shape[0], ). - Optional, k such that fₓ(yᵢ) = k. - k_transparency : float - Transparency of pitch lines. - klabel : float - Label of intersect lines. - title : str - Plot title. - hlabel : str - Horizontal axis label. - vlabel : str - Vertical axis label. - show : bool - Whether to show the plot. Default is true. - - Returns - ------- - fig, ax : matplotlib figure and axes - - """ - fig, ax = plt.subplots() - legend = {} - z = jnp.linspace( - start=self.domain[0], - stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, - num=num, - ) - _add2legend(legend, ax.plot(z, self.eval1d(z, cheb), label=vlabel)) - _plot_intersect( - ax=ax, - legend=legend, - z1=z1, - z2=z2, - k=k, - k_transparency=k_transparency, - klabel=klabel, - ) - ax.set_xlabel(hlabel) - ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys()) - ax.set_title(title) - plt.tight_layout() - if show: - plt.show() - plt.close() - return fig, ax +from desc.utils import errorif, flatten_matrix, setdefault, warnif def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): @@ -669,6 +83,7 @@ def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): # Perhaps tell the optimizer to perturb the coefficients of the # |B|(α, ζ) directly? Maybe auto diff to see change on |B|(θ, ζ) # and hence stream functions. just guessing. not sure if feasible / useful. + # TODO: Allow multiple starting labels for near-rational surfaces. # can just concatenate along second to last axis of cheb. @@ -697,7 +112,7 @@ class Bounce2D: along field lines between bounce points, it is required to identify these points with field-line-following coordinates. In the special case of a linear function summing integrals between bounce points over a flux surface, arbitrary - coordinate systems may be used as this operation becomes a surface integral, + coordinate systems may be used as this operation reduces to a surface integral, which is invariant to the order of summation. The DESC coordinate system is related to field-line-following coordinate @@ -706,11 +121,11 @@ class Bounce2D: globally convergent root-finding algorithm here. For the task of finding bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration is not a globally convergent algorithm to find the real roots of - f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. + f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. For this, function approximation of |B| is necessary. Therefore, to compute bounce points {(ζ₁, ζ₂)}, we approximate |B| by a - series expansion of basis functions in (α, ζ) coordinates restricting the + series expansion of basis functions in (α, ζ) coordinates, restricting the class of basis functions to low order (e.g. N = 2ᵏ where k is small) algebraic or trigonometric polynomial with integer frequencies. These are the two classes useful for function approximation and for which there exists @@ -748,11 +163,11 @@ class of basis functions to low order (e.g. N = 2ᵏ where k is small) g : α, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mα + (m ι + n)ϕ]) However, the basis for the latter are trigonometric functions with - irrational frequencies since the rotational transform is irrational. + irrational frequencies, courtesy of the irrational rotational transform. Globally convergent root-finding schemes for that basis (at fixed α) are not known. The denominator of a close rational could be absorbed into the coordinate ϕ, but this balloons the frequency, and hence the degree of the - series. Although since Fourier series may converge faster than Chebyshev, + series. Although, because Fourier series may converge faster than Chebyshev, an alternate strategy that should work is to interpolate |B| to a double Fourier series in (ϑ, ϕ), then apply bisection methods to find roots of f with mesh size inversely proportional to the max frequency along the field @@ -952,7 +367,7 @@ def required_names(): @staticmethod def reshape_data(grid, *data): - """Reshape``data`` given by ``names`` for input to ``self.integrate``. + """Reshape ``data`` arrays for acceptable input to ``integrate``. Parameters ---------- @@ -964,10 +379,11 @@ def reshape_data(grid, *data): Returns ------- f : list[jnp.ndarray] - List of reshaped data which may be given to ``self.integrate``. + List of reshaped arrays which may be given to ``integrate``. """ - return [grid.meshgrid_reshape(d, "rtz")[:, jnp.newaxis] for d in data] + f = [grid.meshgrid_reshape(d, "rtz")[:, jnp.newaxis] for d in data] + return f @property def _L(self): @@ -999,7 +415,7 @@ def bounce_points(self, pitch, num_well=None): ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, L, num_well). - The field line-following coordinates of bounce points. + ζ coordinates of bounce points. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. @@ -1009,10 +425,12 @@ def bounce_points(self, pitch, num_well=None): def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): """Check that bounce points are computed correctly and plot them.""" kwargs.setdefault( - "title", r"Intersects $\zeta$ for $\vertB(\zeta)\vert = 1/\lambda$" + "title", + r"Intersects $\zeta$ in epigraph of $\vert B \vert(\zeta) = 1/\lambda$", ) + kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") - kwargs.setdefault("vlabel", r"$\vertB\vert(\zeta)$") + kwargs.setdefault("vlabel", r"$\vert B \vert(\zeta)$") self._B.check_intersect1d(bp1, bp2, 1 / pitch, plot, **kwargs) def integrate(self, pitch, integrand, f, weight=None, num_well=None): @@ -1126,7 +544,7 @@ class Bounce1D: along field lines between bounce points, it is required to identify these points with field-line-following coordinates. In the special case of a linear function summing integrals between bounce points over a flux surface, arbitrary - coordinate systems may be used as this operation becomes a surface integral, + coordinate systems may be used as this operation reduces to a surface integral, which is invariant to the order of summation. The DESC coordinate system is related to field-line-following coordinate @@ -1157,10 +575,9 @@ class Bounce1D: Warnings -------- The supplied data must be from a Clebsch coordinate (ρ, α, ζ) tensor-product grid. - The field-line-following coordinate ζ must be strictly increasing. - The ζ coordinate is preferably uniformly spaced, although this is not required. - These are used as knots to construct splines. - A reference density is 100 knots per toroidal transit. + ζ coordinates must be strictly increasing and preferably uniformly spaced. + These are used as knots to construct splines; a reference knot density is 100 + knots per toroidal transit. Examples -------- @@ -1267,7 +684,7 @@ def required_names(): @staticmethod def reshape_data(grid, *data): - """Reshape ``data`` given by ``names`` for input to ``self.integrate``. + """Reshape ``data`` arrays for acceptable input to ``integrate``. Parameters ---------- @@ -1279,12 +696,11 @@ def reshape_data(grid, *data): Returns ------- f : list[jnp.ndarray] - List of reshaped data which may be given to ``self.integrate``. + List of reshaped data which may be given to ``integrate``. """ - return [ - grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in data - ] + f = [grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in data] + return f def bounce_points(self, pitch, num_well=None): """Compute bounce points. @@ -1311,7 +727,7 @@ def bounce_points(self, pitch, num_well=None): ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). - The field line-following coordinates of bounce points. + ζ coordinates of bounce points. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. @@ -1335,7 +751,7 @@ def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): ---------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). - The field line-following coordinates of bounce points. + ζ coordinates of bounce points. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. pitch : jnp.ndarray diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index b87a7d818a..fe0798cd2e 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -1,12 +1,10 @@ """Utilities for bounce integrals.""" -from functools import partial - from interpax import PPoly from matplotlib import pyplot as plt -from orthax.chebyshev import chebroots -from desc.backend import flatnonzero, imap, jnp, put, softmax +from desc.backend import imap, jnp, softmax +from desc.integrals.basis import _add2legend, _plot_intersect, epigraph_and from desc.integrals.interp_utils import ( interp1d_vec, interp1d_vec_with_df, @@ -20,33 +18,6 @@ ) from desc.utils import atleast_3d_mid, errorif, setdefault, take_mask -# TODO: Boyd's method 𝒪(N²) instead of Chebyshev companion matrix 𝒪(N³). -# John P. Boyd, Computing real roots of a polynomial in Chebyshev series -# form through subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. -chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") - - -def flatten_matrix(y): - """Flatten matrix to vector.""" - return y.reshape(*y.shape[:-2], -1) - - -def subtract(c, k): - """Subtract ``k`` from first index of last axis of ``c``. - - Semantically same as ``return c.copy().at[...,0].add(-k)``, - but allows dimension to increase. - """ - c_0 = c[..., 0] - k - c = jnp.concatenate( - [ - c_0[..., jnp.newaxis], - jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), - ], - axis=-1, - ) - return c - def get_pitch(min_B, max_B, num, relative_shift=1e-6): """Return uniformly spaced values between ``1/max_B`` and ``1/min_B``. @@ -106,53 +77,6 @@ def get_alpha(alpha_0, iota, num_transit, period): return alpha -@partial(jnp.vectorize, signature="(m),(m)->(m)") -def epigraph_and(is_intersect, df_dy_sign): - """Set and epigraph of f with ``is_intersect``. - - Remove intersects for which there does not exist a connected path between - adjacent intersects in the epigraph of a continuous map ``f``. - - Parameters - ---------- - is_intersect : jnp.ndarray - Boolean array indicating whether element is an intersect. - df_dy_sign : jnp.ndarray - Shape ``is_intersect.shape``. - Sign of ∂f/∂y (yᵢ) for f(yᵢ) = 0. - - Returns - ------- - is_intersect : jnp.ndarray - Boolean array indicating whether element is an intersect - and satisfies the stated condition. - - """ - # The pairs ``y1`` and ``y2`` are boundaries of an integral only if ``y1 <= y2``. - # For the integrals to be over wells, it is required that the first intersect - # has a non-positive derivative. Now, by continuity, - # ``df_dy_sign[...,k]<=0`` implies ``df_dy_sign[...,k+1]>=0``, - # so there can be at most one inversion, and if it exists, the inversion - # must be at the first pair. To correct the inversion, it suffices to disqualify the - # first intersect as a right boundary, except under an edge case of a series of - # inflection points. - idx = flatnonzero(is_intersect, size=2, fill_value=-1) # idx of first 2 intersects - edge_case = ( - (df_dy_sign[idx[0]] == 0) - & (df_dy_sign[idx[1]] < 0) - & is_intersect[idx[0]] - & is_intersect[idx[1]] - # In theory, we need to keep propagating this edge case, e.g. - # (df_dy_sign[..., 1] < 0) | ( - # (df_dy_sign[..., 1] == 0) & (df_dy_sign[..., 2] < 0)... - # ). - # At each step, the likelihood that an intersection has already been lost - # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. - ) - return put(is_intersect, idx[0], edge_case) - - def _check_spline_shape(knots, B, dB_dz, pitch=None): """Ensure inputs have compatible shape, and return them with full dimension. @@ -160,7 +84,7 @@ def _check_spline_shape(knots, B, dB_dz, pitch=None): ---------- knots : jnp.ndarray Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. Must be strictly increasing. + ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray Shape (B.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. @@ -223,7 +147,7 @@ def bounce_points( line. If two-dimensional, the first axis is the batch axis. knots : jnp.ndarray Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. Must be strictly increasing. + ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray Shape (B.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. @@ -256,7 +180,7 @@ def bounce_points( ------- bp1, bp2 : (jnp.ndarray, jnp.ndarray) Shape (P, S, num_well). - The field line-following coordinates of bounce points. + ζ coordinates of bounce points. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. @@ -311,14 +235,14 @@ def bounce_points( def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): """Check that bounce points are computed correctly.""" - eps = jnp.finfo(jnp.array(1.0).dtype).eps * 10 - title = kwargs.pop( + eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) + kwargs.setdefault( "title", r"Intersects $\zeta$ in epigraph of $\vert B \vert(\zeta) = 1/\lambda$", ) - klabel = kwargs.pop("klabel", r"$1/\lambda$") - hlabel = kwargs.pop("hlabel", r"$\zeta$") - vlabel = kwargs.pop("vlabel", r"$\vert B \vert(\zeta)$") + kwargs.setdefault("klabel", r"$1/\lambda$") + kwargs.setdefault("hlabel", r"$\zeta$") + kwargs.setdefault("vlabel", r"$\vert B \vert(\zeta)$") assert bp1.shape == bp2.shape mask = (bp1 - bp2) != 0.0 @@ -344,12 +268,9 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): z1=_bp1, z2=_bp2, k=1 / pitch[p, s], - klabel=klabel, - title=title, - hlabel=hlabel, - vlabel=vlabel, **kwargs, ) + print(" bp1 | bp2") print(jnp.column_stack([_bp1, _bp2])) assert not err_1[p, s], "Intersects have an inversion.\n" @@ -364,10 +285,6 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): z1=bp1[:, s], z2=bp2[:, s], k=1 / pitch[:, s], - klabel=klabel, - title=title, - hlabel=hlabel, - vlabel=vlabel, **kwargs, ) @@ -399,7 +316,7 @@ def bounce_quadrature( Quadrature weights. bp1, bp2 : jnp.ndarray Shape (P, S, num_well). - The field line-following coordinates of bounce points. + ζ coordinates of bounce points. The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, respectively, for the bounce integrals. pitch : jnp.ndarray @@ -423,8 +340,7 @@ def bounce_quadrature( Must include names in ``Bounce1D.required_names()``. knots : jnp.ndarray Shape (knots.size, ). - Field line-following sorted, unique ζ coordinates where the arrays in - ``data`` and ``f`` were evaluated. + Unique ζ coordinates where the arrays in ``data`` and ``f`` were evaluated. method : str Method of interpolation for functions contained in ``f``. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. @@ -442,7 +358,7 @@ def bounce_quadrature( ------- result : jnp.ndarray Shape (P, S, num_well). - Quadrature for every pitch along every field line. + Quadrature for every pitch. First axis enumerates pitch values. Second axis enumerates the field lines. Last axis enumerates the bounce integrals. @@ -519,7 +435,7 @@ def _interpolate_and_integrate( Quadrature weights. Q : jnp.ndarray Shape (P, S, Q.shape[2], w.size). - Quadrature points at field line-following ζ coordinates. + Quadrature points at ζ coordinates. data : dict[str, jnp.ndarray] Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. Must include names in ``Bounce1D.required_names()``. @@ -528,7 +444,7 @@ def _interpolate_and_integrate( ------- result : jnp.ndarray Shape Q.shape[:-1]. - Quadrature for every pitch along every field line. + Quadrature for every pitch. """ assert pitch.ndim == 2 @@ -573,17 +489,15 @@ def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): Parameters ---------- Z : jnp.ndarray - Quadrature points at field line-following ζ coordinates. + Quadrature points at ζ coordinates. f : list of jnp.ndarray Arguments to the integrand interpolated to Z. b_sup_z : jnp.ndarray - Contravariant field-line following toroidal component of magnetic field, - interpolated to Z. + Contravariant toroidal component of magnetic field, interpolated to Z. B : jnp.ndarray Norm of magnetic field, interpolated to Z. B_z_ra : jnp.ndarray - Norm of magnetic field, derivative with respect to field-line following - coordinate. + Norm of magnetic field, derivative with respect to ζ. result : jnp.ndarray Output of ``_interpolate_and_integrate``. plot : bool @@ -625,7 +539,7 @@ def _plot_check_interp(Z, V, name=""): if marked.size == 0: continue fig, ax = plt.subplots() - ax.set_xlabel(r"Field line $\zeta$") + ax.set_xlabel(r"$\zeta$") ax.set_ylabel(name) ax.set_title( f"Interpolation of {name} to quadrature points. Index {p},{s}." @@ -643,13 +557,13 @@ def _plot_check_interp(Z, V, name=""): def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): - """Return extrema (ζ*, |B|(ζ*)) along field line. + """Return extrema (ζ*, |B|(ζ*)). Parameters ---------- knots : jnp.ndarray Shape (knots.size, ). - Field line-following ζ coordinates of spline knots. Must be strictly increasing. + ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray Shape (B.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. @@ -787,7 +701,7 @@ def plot_ppoly( Optional, k such that f(ζ) = k. k_transparency : float Transparency of intersect lines. - klabel : float + klabel : str Label of intersect lines. title : str Plot title. @@ -846,36 +760,3 @@ def plot_ppoly( plt.show() plt.close() return fig, ax - - -def _add2legend(legend, lines): - """Add lines to legend if it's not already in it.""" - for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): - label = line.get_label() - if label not in legend: - legend[label] = line - - -def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel): - """Plot intersects on ``ax``.""" - if k is None: - return - - k = jnp.atleast_1d(jnp.squeeze(k)) - assert k.ndim == 1 - z1, z2 = jnp.atleast_2d(z1, z2) - assert z1.ndim == z2.ndim >= 2 - assert k.shape[0] == z1.shape[0] == z2.shape[0] - for p in k: - _add2legend( - legend, - ax.axhline(p, color="tab:purple", alpha=k_transparency, label=klabel), - ) - for i in range(k.size): - _z1, _z2 = z1[i], z2[i] - if _z1.size == _z2.size: - mask = (z1 - z2) != 0.0 - _z1 = z1[mask] - _z2 = z2[mask] - ax.scatter(_z1, jnp.full_like(_z1, k[i]), marker="v", color="tab:red") - ax.scatter(_z2, jnp.full_like(_z2, k[i]), marker="^", color="tab:green") diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 562b2edfb7..b1c36c3aa8 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -3,7 +3,7 @@ from functools import partial from interpax import interp1d -from orthax.chebyshev import chebvander +from orthax.chebyshev import chebroots, chebvander from orthax.polynomial import polyvander from desc.backend import dct, jnp, rfft, rfft2, take @@ -11,6 +11,12 @@ from desc.integrals.quad_utils import bijection_from_disc from desc.utils import Index, errorif +# TODO: Boyd's method 𝒪(N²) instead of Chebyshev companion matrix 𝒪(N³). +# John P. Boyd, Computing real roots of a polynomial in Chebyshev series +# form through subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. +chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") + + # TODO: Transformation to make nodes more uniform Boyd eq. 16.46 pg. 336. # Have a hunch it won't change locations of complex poles much, so using # more uniformly spaced nodes could speed up convergence. diff --git a/desc/utils.py b/desc/utils.py index f3c0923ed5..9bfaf7b21e 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -730,6 +730,11 @@ def take_mask(a, mask, size=None, fill_value=None): ) +def flatten_matrix(y): + """Flatten matrix to vector.""" + return y.reshape(*y.shape[:-2], -1) + + # TODO: Eventually remove and use numpy's stuff. # https://github.com/numpy/numpy/issues/25805 def atleast_nd(ndmin, *arys): diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index 782f4610ff..cfa827e904 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -14,7 +14,7 @@ from desc.examples import get from desc.grid import LinearGrid from desc.integrals import Bounce2D -from desc.integrals.bounce_integral import FourierChebyshevBasis +from desc.integrals.basis import FourierChebyshevBasis from desc.integrals.bounce_utils import get_alpha, get_pitch from desc.integrals.interp_utils import fourier_pts From 540d0628429d45492d81bb3df60cfd3c28b728cf Mon Sep 17 00:00:00 2001 From: unalmis Date: Sun, 25 Aug 2024 23:58:38 -0400 Subject: [PATCH 217/241] Review algorithm. Fix documentation of integrals and use better names for functions --- desc/integrals/basis.py | 106 ++++++++++++++++------------- desc/integrals/bounce_integral.py | 41 +++++------ desc/integrals/bounce_utils.py | 58 ++++++++-------- desc/integrals/interp_utils.py | 19 +++--- desc/integrals/quad_utils.py | 38 +++++------ desc/integrals/surface_integral.py | 2 +- tests/test_integrals.py | 69 +++++++++---------- tests/test_quad_utils.py | 5 ++ 8 files changed, 178 insertions(+), 160 deletions(-) diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index 72aecaac66..c93eb75e45 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -47,16 +47,16 @@ def _subtract(c, k): @partial(jnp.vectorize, signature="(m),(m)->(m)") -def epigraph_and(is_intersect, df_dy_sign): - """Set and epigraph of f with ``is_intersect``. +def _in_epigraph_and(is_intersect, df_dy_sign): + """Set and epigraph of function f with the given set of points. - Remove intersects for which there does not exist a connected path between + Return only intersects where there is a connected path between adjacent intersects in the epigraph of a continuous map ``f``. Parameters ---------- is_intersect : jnp.ndarray - Boolean array indicating whether element is an intersect. + Boolean array indicating whether index corresponds to an intersect. df_dy_sign : jnp.ndarray Shape ``is_intersect.shape``. Sign of ∂f/∂y (yᵢ) for f(yᵢ) = 0. @@ -88,11 +88,23 @@ def epigraph_and(is_intersect, df_dy_sign): # ). # At each step, the likelihood that an intersection has already been lost # due to floating point errors grows, so the real solution is to pick a less - # degenerate pitch value - one that does not ride the global extrema of |B|. + # degenerate pitch value - one that does not ride the global extrema of f. ) return put(is_intersect, idx[0], edge_case) +def _chebcast(cheb, arr): + # Input should not have rightmost dimension of cheb that iterates coefficients, + # but may have additional leftmost dimension for batch operation. + errorif( + jnp.ndim(arr) > cheb.ndim, + NotImplementedError, + msg=f"Only one additional axis for batch dimension is allowed. " + f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", + ) + return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] + + class FourierChebyshevBasis: """Fourier-Chebyshev series. @@ -138,15 +150,19 @@ def __init__(self, f, domain=(-1, 1), lobatto=False): self.N = f.shape[-1] errorif(domain[0] > domain[-1], msg="Got inverted domain.") self.domain = tuple(domain) - errorif(lobatto, NotImplementedError, "JAX has not implemented type 1 DCT.") + errorif(lobatto, NotImplementedError, "JAX hasn't implemented type 1 DCT.") self.lobatto = bool(lobatto) self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) @staticmethod def _fast_transform(f, lobatto): - M = f.shape[-2] N = f.shape[-1] - return rfft(dct(f, type=2 - lobatto, axis=-1), axis=-2) / (M * (N - lobatto)) + c = rfft( + dct(f, type=2 - lobatto, axis=-1) / (N - lobatto), + axis=-2, + norm="forward", + ) + return c @staticmethod def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): @@ -201,12 +217,16 @@ def evaluate(self, M, N): ------- fq : jnp.ndarray Shape (..., M, N) - Fourier-Chebyshev series evaluated at ``FourierChebyshevBasis.nodes(M, N)``. + Fourier-Chebyshev series evaluated at + ``FourierChebyshevBasis.nodes(M,N,L,self.domain,self.lobatto)``. """ - fq = idct(irfft(self._c, n=M, axis=-2), type=2 - self.lobatto, n=N, axis=-1) * ( - M * (N - self.lobatto) - ) + fq = idct( + irfft(self._c, n=M, axis=-2, norm="forward"), + type=2 - self.lobatto, + n=N, + axis=-1, + ) * (N - self.lobatto) return fq def harmonics(self): @@ -259,7 +279,7 @@ class ChebyshevBasisSet: Shape (..., M, N). Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). M : int - Number of function in this basis set. + Number of functions in this basis set. N : int Chebyshev spectral resolution. domain : (float, float) @@ -287,7 +307,7 @@ def __init__(self, cheb, domain=(-1, 1)): @property def M(self): - """Number of function in this basis set.""" + """Number of functions in this basis set.""" return self.cheb.shape[-2] @property @@ -295,18 +315,6 @@ def N(self): """Chebyshev spectral resolution.""" return self.cheb.shape[-1] - @staticmethod - def _chebcast(cheb, arr): - # Input should not have rightmost dimension of cheb that iterates coefficients, - # but may have additional leftmost dimension for batch operation. - errorif( - jnp.ndim(arr) > cheb.ndim, - NotImplementedError, - msg=f"Only one additional axis for batch dimension is allowed. " - f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", - ) - return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] - def intersect2d(self, k=0.0, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). @@ -331,7 +339,7 @@ def intersect2d(self, k=0.0, eps=_eps): Sign of ∂f/∂y (x, yᵢ). """ - c = _subtract(ChebyshevBasisSet._chebcast(self.cheb, k), k) + c = _subtract(_chebcast(self.cheb, k), k) # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) y = chebroots_vec(c) assert y.shape == (*c.shape[:-1], self.N - 1) @@ -340,7 +348,8 @@ def intersect2d(self, k=0.0, eps=_eps): # Pick sentinel such that only distinct roots are considered intersects. y = filter_distinct(y, sentinel=-2.0, eps=eps) is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) - y = jnp.where(is_intersect, y.real, 1.0) # ensure y is in domain of arcos + # Ensure y is in domain of arcos; choose 1 because kernel probably cheaper. + y = jnp.where(is_intersect, y.real, 1.0) # TODO: Multipoint evaluation with FFT. # Chapter 10, https://doi.org/10.1017/CBO9781139856065. @@ -379,7 +388,8 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): z1, z2 : (jnp.ndarray, jnp.ndarray) Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. + respectively. The points are ordered such that the path between + ``z1`` and ``z2`` lies in the epigraph of f. """ errorif( @@ -400,12 +410,14 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): # Note for bounce point applications: # We ignore the degenerate edge case where the boundary shared by adjacent - # polynomials is a left intersect point i.e. ``is_z1`` because the subset of - # pitch values that generate this edge case has zero measure. Note that - # the technique to account for this would be to disqualify intersects - # within ``_eps`` from ``domain[-1]``. + # polynomials is a left intersection i.e. ``is_z1`` because the subset of + # pitch values that generate this edge case has zero measure. By ignoring + # this, for those subset of pitch values the integrations will be done in + # the hypograph of |B| rather than the epigraph, which will be integrated + # to zero. If we decide later to not ignore this, the technique to solve + # this is to disqualify intersects within ``_eps`` from ``domain[-1]``. is_z1 = (df_dy_sign <= 0) & is_intersect - is_z2 = (df_dy_sign >= 0) & epigraph_and(is_intersect, df_dy_sign) + is_z2 = (df_dy_sign >= 0) & _in_epigraph_and(is_intersect, df_dy_sign) sentinel = self.domain[0] - 1.0 z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) @@ -418,7 +430,7 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): return z1, z2 def eval1d(self, z, cheb=None): - """Evaluate piecewise Chebyshev spline at coordinates z. + """Evaluate piecewise Chebyshev series at coordinates z. Parameters ---------- @@ -440,7 +452,7 @@ def eval1d(self, z, cheb=None): Chebyshev basis evaluated at z. """ - cheb = self._chebcast(setdefault(cheb, self.cheb), z) + cheb = _chebcast(setdefault(cheb, self.cheb), z) N = cheb.shape[-1] x_idx, y = self.isomorphism_to_C2(z) y = bijection_to_disc(y, self.domain[0], self.domain[1]) @@ -477,7 +489,8 @@ def isomorphism_to_C1(self, y): def isomorphism_to_C2(self, z): """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - Returns index x and value y such that z = f(x) + y where f(x) = x * |domain|. + Returns index x and minimum value y such that + z = f(x) + y where f(x) = x * |domain|. Parameters ---------- @@ -513,11 +526,11 @@ def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): Parameters ---------- z1, z2 : jnp.ndarray - Shape must broadcast with (k, *self.cheb.shape[:-2], W). + Shape must broadcast with (*self.cheb.shape[:-2], W). ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, respectively. k : jnp.ndarray - Shape must broadcast with (k.shape[0], *self.cheb.shape[:-2]). + Shape must broadcast with *self.cheb.shape[:-2]. k such that fₓ(yᵢ) = k. plot : bool Whether to plot stuff. Default is true. @@ -533,15 +546,15 @@ def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): err_1 = jnp.any(z1 > z2, axis=-1) err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) - f_m = self.eval1d((z1 + z2) / 2) - assert f_m.shape == z1.shape - err_3 = jnp.any(f_m > k + self._eps, axis=-1) + f_midpoint = self.eval1d((z1 + z2) / 2) + assert f_midpoint.shape == z1.shape + err_3 = jnp.any(f_midpoint > k + self._eps, axis=-1) if not (plot or jnp.any(err_1 | err_2 | err_3)): return # Ensure l axis exists for iteration in below loop. cheb = atleast_nd(3, self.cheb) - mask, z1, z2, f_m = atleast_3d_mid(mask, z1, z2, f_m) + mask, z1, z2, f_midpoint = atleast_3d_mid(mask, z1, z2, f_midpoint) err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) for l in np.ndindex(cheb.shape[:-2]): @@ -564,8 +577,9 @@ def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): assert not err_1[idx], "Intersects have an inversion.\n" assert not err_2[idx], "Detected discontinuity.\n" assert not err_3[idx], ( - "Detected f > k in well. Increase Chebyshev resolution.\n" - f"{f_m[idx][mask[idx]]} > {k[idx] + self._eps}" + "Detected f > k in well, implying a path between z1 and z2 " + "is in hypograph(f). Increase Chebyshev resolution.\n" + f"{f_midpoint[idx][mask[idx]]} > {k[idx] + self._eps}" ) idx = (slice(None), *l) if plot: @@ -586,7 +600,7 @@ def plot1d( k=None, k_transparency=0.5, klabel=r"$k$", - title=r"Intersects $z$ in epigraph of $f(z) = k$", + title=r"Intersects $z$ in epigraph($f$) s.t. $f(z) = k$", hlabel=r"$z$", vlabel=r"$f(z)$", show=True, diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 89dc4f530c..298e11a7af 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -78,14 +78,15 @@ def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): # TODO: -# After GitHub issue #1034 is resolved, we can also pass in the previous +# After GitHub issue #1034 is resolved, we should pass in the previous # θ(α) coordinates as an initial guess for the next coordinate mapping. # Perhaps tell the optimizer to perturb the coefficients of the # |B|(α, ζ) directly? Maybe auto diff to see change on |B|(θ, ζ) -# and hence stream functions. just guessing. not sure if feasible / useful. +# and hence stream functions. Not sure how feasible... # TODO: Allow multiple starting labels for near-rational surfaces. -# can just concatenate along second to last axis of cheb. +# can just concatenate along second to last axis of cheb, but will +# do in later pull request since it's not urgent. class Bounce2D: @@ -171,11 +172,11 @@ class of basis functions to low order (e.g. N = 2ᵏ where k is small) an alternate strategy that should work is to interpolate |B| to a double Fourier series in (ϑ, ϕ), then apply bisection methods to find roots of f with mesh size inversely proportional to the max frequency along the field - line: M ι + N. ``Bounce2D`` does not use this approach because the + line: M ι + N. ``Bounce2D`` does not use that approach because that root-finding scheme is inferior. After obtaining the bounce points, the supplied quadrature is performed. - By default, Gauss quadrature is performed after removing the singularity. + By default, this is a Gauss quadrature after removing the singularity. Fast fourier transforms interpolate functions in the integrand to the quadrature nodes. @@ -194,7 +195,7 @@ class of basis functions to low order (e.g. N = 2ᵏ where k is small) Uses one-dimensional local spline methods for the same task. An advantage of ``Bounce2D`` over ``Bounce1D`` is that the coordinates on which the root-finding must be done to map from DESC to Clebsch coords is - fixed to ``M*N``, independent of the number of toroidal transits. + fixed to ``L*M*N``, independent of the number of toroidal transits. Warnings -------- @@ -223,11 +224,11 @@ def __init__( M, N, alpha_0=0.0, - num_transit=50, + num_transit=32, quad=leggauss(32), automorphism=(automorphism_sin, grad_automorphism_sin), - B_ref=1.0, - L_ref=1.0, + Bref=1.0, + Lref=1.0, check=False, **kwargs, ): @@ -250,7 +251,7 @@ def __init__( desc_from_clebsch : jnp.ndarray Shape (L * M * N, 3). DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates - ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. + ``FourierChebyshevBasis.nodes(M,N,L,domain=FourierBounce.domain)``. M : int Grid resolution in poloidal direction for Clebsch coordinate grid. Preferably power of 2. A good choice is ``m``. If the poloidal stream @@ -271,9 +272,9 @@ def __init__( The second callable should be the derivative of the first. This map defines a change of variable for the bounce integral. The choice made for the automorphism will affect the performance of the quadrature method. - B_ref : float + Bref : float Optional. Reference magnetic field strength for normalization. - L_ref : float + Lref : float Optional. Reference length scale for normalization. check : bool Flag for debugging. Must be false for JAX transformations. @@ -292,13 +293,13 @@ def __init__( self._m = grid.num_theta self._n = grid.num_zeta self._b_sup_z = jnp.expand_dims( - transform_to_desc(grid, jnp.abs(data["B^zeta"]) / data["|B|"] * L_ref), + transform_to_desc(grid, jnp.abs(data["B^zeta"]) / data["|B|"] * Lref), axis=1, ) self._x, self._w = get_quadrature(quad, automorphism) # Compute global splines. - T, B = _transform_to_clebsch(grid, desc_from_clebsch, M, N, data["|B|"] / B_ref) + T, B = _transform_to_clebsch(grid, desc_from_clebsch, M, N, data["|B|"] / Bref) # peel off field lines alphas = get_alpha( alpha_0, @@ -337,6 +338,7 @@ def desc_from_clebsch(eq, L, M, N, clebsch=None, **kwargs): Preferably power of 2. clebsch : jnp.ndarray Optional, Clebsch coordinate tensor-product grid (ρ, α, ζ). + ``FourierChebyshevBasis.nodes(M,N,L,domain=FourierBounce.domain)``. If given, ``L``, ``M``, and ``N`` are ignored. kwargs : dict Additional parameters to supply to the coordinate mapping function. @@ -426,7 +428,8 @@ def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): """Check that bounce points are computed correctly and plot them.""" kwargs.setdefault( "title", - r"Intersects $\zeta$ in epigraph of $\vert B \vert(\zeta) = 1/\lambda$", + r"Intersects $\zeta$ in epigraph($\vert B \vert$) s.t. " + r"$\vert B \vert(\zeta) = 1/\lambda$", ) kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") @@ -565,7 +568,7 @@ class Bounce1D: This is useful if one can efficiently obtain data along field lines. After obtaining the bounce points, the supplied quadrature is performed. - By default, Gauss quadrature is performed after removing the singularity. + By default, this is a Gauss quadrature after removing the singularity. Local splines interpolate functions in the integrand to the quadrature nodes. See Also @@ -575,9 +578,9 @@ class Bounce1D: Warnings -------- The supplied data must be from a Clebsch coordinate (ρ, α, ζ) tensor-product grid. - ζ coordinates must be strictly increasing and preferably uniformly spaced. - These are used as knots to construct splines; a reference knot density is 100 - knots per toroidal transit. + The ζ coordinates (the unique values prior to taking the tensor-product) must be + strictly increasing and preferably uniformly spaced. These are used as knots to + construct splines; a reference knot density is 100 knots per toroidal transit. Examples -------- diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index fe0798cd2e..dc3e371c57 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -4,10 +4,10 @@ from matplotlib import pyplot as plt from desc.backend import imap, jnp, softmax -from desc.integrals.basis import _add2legend, _plot_intersect, epigraph_and +from desc.integrals.basis import _add2legend, _in_epigraph_and, _plot_intersect from desc.integrals.interp_utils import ( interp1d_vec, - interp1d_vec_with_df, + interp1d_vec_Hermite, poly_root, polyval_vec, ) @@ -185,7 +185,7 @@ def bounce_points( respectively, for the bounce integrals. If there were less than ``num_wells`` wells detected along a field line, - then the last axis, which enumerates bounce points for a particular field + then the last axis, which enumerates bounce points for a particular field line and pitch, is padded with zero. """ @@ -213,7 +213,7 @@ def bounce_points( # we ignore the bounce points of particles only assigned to a class that are # trapped outside this snapshot of the field line. is_bp1 = (dB_dz_sign <= 0) & is_intersect - is_bp2 = (dB_dz_sign >= 0) & epigraph_and(is_intersect, dB_dz_sign) + is_bp2 = (dB_dz_sign >= 0) & _in_epigraph_and(is_intersect, dB_dz_sign) # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) @@ -238,7 +238,8 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) kwargs.setdefault( "title", - r"Intersects $\zeta$ in epigraph of $\vert B \vert(\zeta) = 1/\lambda$", + r"Intersects $\zeta$ in epigraph($\vert B \vert$) s.t. " + r"$\vert B \vert(\zeta) = 1/\lambda$", ) kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") @@ -277,7 +278,8 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): assert not err_2[p, s], "Detected discontinuity.\n" assert not err_3, ( f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {1 / pitch[p, s] + eps} " - f"= 1/λ in well. Use more knots.\n" + "= 1/λ in well, implying a path between bounce points is in " + "hypograph(|B|). Use more knots.\n" ) if plot: plot_ppoly( @@ -435,7 +437,7 @@ def _interpolate_and_integrate( Quadrature weights. Q : jnp.ndarray Shape (P, S, Q.shape[2], w.size). - Quadrature points at ζ coordinates. + Quadrature points in ζ coordinates. data : dict[str, jnp.ndarray] Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. Must include names in ``Bounce1D.required_names()``. @@ -462,14 +464,14 @@ def _interpolate_and_integrate( pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Q.ndim == 4) else 2) shape = Q.shape Q = Q.reshape(Q.shape[0], Q.shape[1], -1) - b_sup_z = interp1d_vec_with_df( + b_sup_z = interp1d_vec_Hermite( Q, knots, data["B^zeta"] / data["|B|"], data["B^zeta_z|r,a"] / data["|B|"] - data["B^zeta"] * data["|B|_z|r,a"] / data["|B|"] ** 2, ).reshape(shape) - B = interp1d_vec_with_df(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) + B = interp1d_vec_Hermite(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) # Spline the integrand so that we can evaluate it at quadrature points without # expensive coordinate mappings and root finding. Spline each function separately so # that the singularity near the bounce points can be captured more accurately than @@ -483,13 +485,13 @@ def _interpolate_and_integrate( return result -def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): +def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): """Check for floating point errors. Parameters ---------- - Z : jnp.ndarray - Quadrature points at ζ coordinates. + Q : jnp.ndarray + Quadrature points in ζ coordinates. f : list of jnp.ndarray Arguments to the integrand interpolated to Z. b_sup_z : jnp.ndarray @@ -504,9 +506,9 @@ def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): Whether to plot stuff. """ - assert jnp.isfinite(Z).all(), "NaN interpolation point." + assert jnp.isfinite(Q).all(), "NaN interpolation point." # Integrals that we should be computing. - marked = jnp.any(Z != 0, axis=-1) + marked = jnp.any(Q != 0.0, axis=-1) goal = jnp.sum(marked) msg = "Interpolation failed." @@ -527,15 +529,15 @@ def _check_interp(Z, f, b_sup_z, B, B_z_ra, result, plot): "can be caused by floating point error or a poor choice of quadrature nodes." ) if plot: - _plot_check_interp(Z, B, name=r"$\vert B \vert$") - _plot_check_interp(Z, b_sup_z, name=r"$ (B / \vert B \vert) \cdot e^{\zeta}$") + _plot_check_interp(Q, B, name=r"$\vert B \vert$") + _plot_check_interp(Q, b_sup_z, name=r"$ (B / \vert B \vert) \cdot e^{\zeta}$") -def _plot_check_interp(Z, V, name=""): - """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Z).""" - for p in range(Z.shape[0]): - for s in range(Z.shape[1]): - marked = jnp.nonzero(jnp.any(Z != 0, axis=-1))[0] +def _plot_check_interp(Q, V, name=""): + """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Q).""" + for p in range(Q.shape[0]): + for s in range(Q.shape[1]): + marked = jnp.nonzero(jnp.any(Q != 0.0, axis=-1))[0] if marked.size == 0: continue fig, ax = plt.subplots() @@ -545,7 +547,7 @@ def _plot_check_interp(Z, V, name=""): f"Interpolation of {name} to quadrature points. Index {p},{s}." ) for i in marked: - ax.plot(Z[p, s, i], V[p, s, i], marker="o") + ax.plot(Q[p, s, i], V[p, s, i], marker="o") fig.text( 0.01, 0.01, @@ -673,7 +675,7 @@ def plot_ppoly( k=None, k_transparency=0.5, klabel=r"$k$", - title=r"Intersects $z$ in epigraph of $f(z) = k$", + title=r"Intersects $z$ in epigraph($f$) s.t. $f(z) = k$", hlabel=r"$z$", vlabel=r"$f(z)$", show=True, @@ -692,13 +694,13 @@ def plot_ppoly( Number of points to evaluate for plot. z1 : jnp.ndarray Shape (k.shape[0], W). - Optional, intersects with ∂f/∂ζ <= 0. + Optional, intersects with ∂f/∂z <= 0. z2 : jnp.ndarray Shape (k.shape[0], W). - Optional, intersects with ∂f/∂ζ >= 0. + Optional, intersects with ∂f/∂z >= 0. k : jnp.ndarray Shape (k.shape[0], ). - Optional, k such that f(ζ) = k. + Optional, k such that f(z) = k. k_transparency : float Transparency of intersect lines. klabel : str @@ -712,9 +714,9 @@ def plot_ppoly( show : bool Whether to show the plot. Default is true. start : float - Minimum ζ on plot. + Minimum z on plot. stop : float - Maximum ζ on plot. + Maximum z on plot. include_knots : bool Whether to plot vertical lines at the knots. knot_transparency : float diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index b1c36c3aa8..883da3ef21 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -1,4 +1,4 @@ -"""Interpolation utilities.""" +"""Fast interpolation utilities.""" from functools import partial @@ -19,14 +19,15 @@ # TODO: Transformation to make nodes more uniform Boyd eq. 16.46 pg. 336. # Have a hunch it won't change locations of complex poles much, so using -# more uniformly spaced nodes could speed up convergence. +# more uniformly spaced nodes could speed up convergence (wrt early +# series truncation, not the infinite limit). def cheb_pts(N, lobatto=False, domain=(-1, 1)): """Get ``N`` Chebyshev points mapped to given domain. - Notes - ----- + Warnings + -------- This is a common definition of the Chebyshev points (see Boyd, Chebyshev and Fourier Spectral Methods p. 498). These are the points demanded by discrete cosine transformations to interpolate Chebyshev series because the cosine @@ -307,7 +308,7 @@ def transform_to_desc(grid, f): ------- a : jnp.ndarray Shape (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) - Coefficients of 2D real FFT. + Complex coefficients of 2D real FFT. """ f = grid.meshgrid_reshape(f, order="rtz") @@ -325,8 +326,8 @@ def cheb_from_dct(a, axis=-1): a : jnp.ndarray Discrete cosine transform coefficients, e.g. ``a=dct(f,type=2,axis=axis,norm="forward")``. - The discrete cosine transformation used by scipy is defined here. - docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct + The discrete cosine transformation used by scipy is defined here: + https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html. axis : int Axis along which to transform. @@ -472,8 +473,8 @@ def polyval_vec(x, c): @partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") -def interp1d_vec_with_df(xq, x, f, fx): - """Vectorized interp1d.""" +def interp1d_vec_Hermite(xq, x, f, fx): + """Vectorized cubic Hermite spline.""" return interp1d(xq, x, f, method="cubic", fx=fx) diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index d9950ad07b..b14f691b02 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -8,19 +8,19 @@ def bijection_to_disc(x, a, b): """[a, b] ∋ x ↦ y ∈ [−1, 1].""" - y = 2 * (x - a) / (b - a) - 1 + y = 2.0 * (x - a) / (b - a) - 1.0 return y def bijection_from_disc(x, a, b): """[−1, 1] ∋ x ↦ y ∈ [a, b].""" - y = (x + 1) / 2 * (b - a) + a + y = 0.5 * (b - a) * (x + 1.0) + a return y def grad_bijection_from_disc(a, b): - """Gradient of affine bijection.""" - dy_dx = (b - a) / 2 + """Gradient of affine bijection from disc.""" + dy_dx = 0.5 * (b - a) return dy_dx @@ -42,13 +42,13 @@ def automorphism_arcsin(x): Transformed points. """ - y = 2 * jnp.arcsin(x) / jnp.pi + y = 2.0 * jnp.arcsin(x) / jnp.pi return y def grad_automorphism_arcsin(x): """Gradient of arcsin automorphism.""" - dy_dx = 2 / (jnp.sqrt(1 - x**2) * jnp.pi) + dy_dx = 2.0 / (jnp.sqrt(1.0 - x**2) * jnp.pi) return dy_dx @@ -85,7 +85,7 @@ def automorphism_sin(x, s=0, m=10): errorif(not (0 <= s <= 1)) # s = 0 -> derivative vanishes like cosine. # s = 1 -> derivative vanishes like cosine^k. - y0 = jnp.sin(jnp.pi * x / 2) + y0 = jnp.sin(0.5 * jnp.pi * x) y1 = x + jnp.sin(jnp.pi * x) / jnp.pi # k = 2 y = (1 - s) * y0 + s * y1 # y is an expansion, so y(x) > x near x ∈ {−1, 1} and there is a tendency @@ -96,8 +96,8 @@ def automorphism_sin(x, s=0, m=10): def grad_automorphism_sin(x, s=0): """Gradient of sin automorphism.""" - dy0_dx = jnp.pi * jnp.cos(jnp.pi * x / 2) / 2 - dy1_dx = 1 + jnp.cos(jnp.pi * x) + dy0_dx = 0.5 * jnp.pi * jnp.cos(0.5 * jnp.pi * x) + dy1_dx = 1.0 + jnp.cos(jnp.pi * x) dy_dx = (1 - s) * dy0_dx + s * dy1_dx return dy_dx @@ -138,7 +138,7 @@ def tanh_sinh(deg, m=10): return x, w -def leggausslob(deg): +def leggauss_lobatto(deg): """Lobatto-Gauss-Legendre quadrature. Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the @@ -210,7 +210,7 @@ def get_quadrature(quad, automorphism): def composite_linspace(x, num): - """Returns linearly spaced points between every pair of points ``x``. + """Returns linearly spaced values between every pair of values in ``x``. Parameters ---------- @@ -218,18 +218,18 @@ def composite_linspace(x, num): First axis has values to return linearly spaced values between. The remaining axes are batch axes. Assumes input is sorted along first axis. num : int - Number of points between every pair of points in ``x``. + Number of values between every pair of values in ``x``. Returns ------- - pts : jnp.ndarray + vals : jnp.ndarray Shape ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]). - Linearly spaced points between ``x``. + Linearly spaced values between ``x``. """ x = jnp.atleast_1d(x) - pts = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) - pts = jnp.swapaxes(pts, 0, 1).reshape(-1, *x.shape[1:]) - pts = jnp.append(pts, x[jnp.newaxis, -1], axis=0) - assert pts.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) - return pts + vals = jnp.linspace(x[:-1], x[1:], num + 1, endpoint=False) + vals = jnp.swapaxes(vals, 0, 1).reshape(-1, *x.shape[1:]) + vals = jnp.append(vals, x[jnp.newaxis, -1], axis=0) + assert vals.shape == ((x.shape[0] - 1) * num + x.shape[0], *x.shape[1:]) + return vals diff --git a/desc/integrals/surface_integral.py b/desc/integrals/surface_integral.py index acc1e6c1b9..944a711904 100644 --- a/desc/integrals/surface_integral.py +++ b/desc/integrals/surface_integral.py @@ -100,7 +100,7 @@ def line_integrals( The coordinate curve to compute the integration over. To clarify, a theta (poloidal) curve is the intersection of a rho surface (flux surface) and zeta (toroidal) surface. - fix_surface : str, float + fix_surface : (str, float) A tuple of the form: label, value. ``fix_surface`` label should differ from ``line_label``. By default, ``fix_surface`` is chosen to be the flux surface at rho=1. diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 896b412b69..ad9726b310 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -47,7 +47,7 @@ bijection_from_disc, grad_automorphism_sin, grad_bijection_from_disc, - leggausslob, + leggauss_lobatto, tanh_sinh, ) from desc.integrals.singularities import _get_quadrature_nodes @@ -729,9 +729,8 @@ def filter(bp1, bp2): mask = (bp1 - bp2) != 0.0 return bp1[mask], bp2[mask] - @staticmethod @pytest.mark.unit - def test_bp1_first(): + def test_bp1_first(self): """Test that bounce points are computed correctly.""" start = np.pi / 3 end = 6 * np.pi @@ -745,9 +744,8 @@ def test_bp1_first(): np.testing.assert_allclose(bp1, intersect[0::2]) np.testing.assert_allclose(bp2, intersect[1::2]) - @staticmethod @pytest.mark.unit - def test_bp2_first(): + def test_bp2_first(self): """Test that bounce points are computed correctly.""" start = -3 * np.pi end = -start @@ -761,9 +759,8 @@ def test_bp2_first(): np.testing.assert_allclose(bp1, intersect[1:-1:2]) np.testing.assert_allclose(bp2, intersect[0::2][1:]) - @staticmethod @pytest.mark.unit - def test_bp1_before_extrema(): + def test_bp1_before_extrema(self): """Test that bounce points are computed correctly.""" start = -np.pi end = -2 * start @@ -771,9 +768,9 @@ def test_bp1_before_extrema(): B = CubicHermiteSpline( k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[3] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + dB_dz = B.derivative() + pitch = 1 / B(dB_dz.roots(extrapolate=False))[3] + 1e-13 + bp1, bp2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) @@ -783,9 +780,8 @@ def test_bp1_before_extrema(): np.testing.assert_allclose(intersect[2], intersect[3], rtol=1e-6) np.testing.assert_allclose(bp2, intersect[[3, 4]], rtol=1e-6) - @staticmethod @pytest.mark.unit - def test_bp2_before_extrema(): + def test_bp2_before_extrema(self): """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start @@ -795,18 +791,17 @@ def test_bp2_before_extrema(): np.cos(k) + 2 * np.sin(-2 * k) + k / 4, -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 4, ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + dB_dz = B.derivative() + pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] + bp1, bp2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(bp1, intersect[[0, -2]]) np.testing.assert_allclose(bp2, intersect[[1, -1]]) - @staticmethod @pytest.mark.unit - def test_extrema_first_and_before_bp1(): + def test_extrema_first_and_before_bp1(self): """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start @@ -816,10 +811,10 @@ def test_extrema_first_and_before_bp1(): np.cos(k) + 2 * np.sin(-2 * k) + k / 20, -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[2] - 1e-13 + dB_dz = B.derivative() + pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] - 1e-13 bp1, bp2 = bounce_points( - pitch, k[2:], B.c[:, 2:], B_z_ra.c[:, 2:], check=True, plot=False + pitch, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False ) plot_ppoly(B, z1=bp1, z2=bp2, k=1 / pitch, start=k[2]) bp1, bp2 = TestBouncePoints.filter(bp1, bp2) @@ -830,9 +825,8 @@ def test_extrema_first_and_before_bp1(): np.testing.assert_allclose(bp1, intersect[[0, 2, 4]], rtol=1e-6) np.testing.assert_allclose(bp2, intersect[[0, 3, 5]], rtol=1e-6) - @staticmethod @pytest.mark.unit - def test_extrema_first_and_before_bp2(): + def test_extrema_first_and_before_bp2(self): """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start + 1 @@ -842,9 +836,9 @@ def test_extrema_first_and_before_bp2(): np.cos(k) + 2 * np.sin(-2 * k) + k / 10, -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, ) - B_z_ra = B.derivative() - pitch = 1 / B(B_z_ra.roots(extrapolate=False))[1] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, B_z_ra.c, check=True) + dB_dz = B.derivative() + pitch = 1 / B(dB_dz.roots(extrapolate=False))[1] + 1e-13 + bp1, bp2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) bp1, bp2 = TestBouncePoints.filter(bp1, bp2) assert bp1.size and bp2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. @@ -864,17 +858,17 @@ def test_get_extrema(self): B = CubicHermiteSpline( k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) - B_z_ra = B.derivative() - extrema, B_extrema = _get_extrema(k, B.c, B_z_ra.c) - mask = ~np.isnan(extrema) - extrema, B_extrema = extrema[mask], B_extrema[mask] - idx = np.argsort(extrema) + dB_dz = B.derivative() + ext, B_ext = _get_extrema(k, B.c, dB_dz.c) + mask = ~np.isnan(ext) + ext, B_ext = ext[mask], B_ext[mask] + idx = np.argsort(ext) - extrema_scipy = np.sort(B_z_ra.roots(extrapolate=False)) - B_extrema_scipy = B(extrema_scipy) - assert extrema.size == extrema_scipy.size - np.testing.assert_allclose(extrema[idx], extrema_scipy) - np.testing.assert_allclose(B_extrema[idx], B_extrema_scipy) + ext_scipy = np.sort(dB_dz.roots(extrapolate=False)) + B_ext_scipy = B(ext_scipy) + assert ext.size == ext_scipy.size + np.testing.assert_allclose(ext[idx], ext_scipy) + np.testing.assert_allclose(B_ext[idx], B_ext_scipy) class TestBounceQuadrature: @@ -899,7 +893,7 @@ def _mod_chebu_gauss(deg): (True, tanh_sinh(40), None), (True, leggauss(25), "default"), (False, tanh_sinh(20), None), - (False, leggausslob(10), "default"), + (False, leggauss_lobatto(10), "default"), # sin automorphism still helps out chebyshev quadrature (True, _mod_cheb_gauss(30), "default"), (False, _mod_chebu_gauss(10), "default"), @@ -1136,7 +1130,6 @@ def dB_dz(z): knots=zeta, B=bounce.B, dB_dz=bounce._dB_dz, - method="cubic", ), rtol=1e-3, ) @@ -1355,4 +1348,4 @@ def integrand_grad(*args, **kwargs2): assert np.isclose(grad(fun1)(pitch), truth, rtol=1e-3) # Make sure bounce points get differentiated too. result = fun2(pitch) - assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-3) + assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-1) diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index 130b2732b8..662e9fcef7 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -62,3 +62,8 @@ def test_automorphism(): assert np.isfinite(y).all() y = 1 / np.sqrt(1 - np.abs(automorphism_arcsin(x))) assert np.isfinite(y).all() + + +@pytest.mark.unit +def test_leggauss_lobatto(): + """Test that quadrature points and weights are correct.""" From 04f87a388a35e4991ba9840faac22c9c7971a379 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 26 Aug 2024 13:55:35 -0400 Subject: [PATCH 218/241] Debugging fourier bounce stuff --- desc/integrals/basis.py | 41 +++++++-- desc/integrals/bounce_integral.py | 82 ++++++++++------- desc/integrals/bounce_utils.py | 148 +++++++++++++++--------------- desc/integrals/interp_utils.py | 65 ++++++------- desc/utils.py | 4 + tests/test_fourier_bounce.py | 33 +++---- tests/test_integrals.py | 103 ++++++++++----------- tests/test_interp_utils.py | 9 +- 8 files changed, 268 insertions(+), 217 deletions(-) diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index c93eb75e45..68422ea680 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -50,8 +50,12 @@ def _subtract(c, k): def _in_epigraph_and(is_intersect, df_dy_sign): """Set and epigraph of function f with the given set of points. - Return only intersects where there is a connected path between - adjacent intersects in the epigraph of a continuous map ``f``. + Return only intersects where the straight line path between adjacent + intersects resides in the epigraph of a continuous map ``f``. + + Warnings + -------- + Does not support keyword arguments. Parameters ---------- @@ -196,7 +200,7 @@ def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): if L is not None: if isposint(L): L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) - coords = (L, x, y) + coords = (jnp.atleast_1d(L), x, y) else: coords = (x, y) coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) @@ -388,8 +392,9 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): z1, z2 : (jnp.ndarray, jnp.ndarray) Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. The points are ordered such that the path between - ``z1`` and ``z2`` lies in the epigraph of f. + respectively. The points are grouped and ordered such that the + straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of f. """ errorif( @@ -602,7 +607,7 @@ def plot1d( klabel=r"$k$", title=r"Intersects $z$ in epigraph($f$) s.t. $f(z) = k$", hlabel=r"$z$", - vlabel=r"$f(z)$", + vlabel=r"$f$", show=True, ): """Plot the piecewise Chebyshev series. @@ -660,7 +665,7 @@ def plot1d( ) ax.set_xlabel(hlabel) ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys()) + ax.legend(legend.values(), legend.keys(), loc="lower right") ax.set_title(title) plt.tight_layout() if show: @@ -698,5 +703,23 @@ def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel): mask = (z1 - z2) != 0.0 _z1 = z1[mask] _z2 = z2[mask] - ax.scatter(_z1, jnp.full_like(_z1, k[i]), marker="v", color="tab:red") - ax.scatter(_z2, jnp.full_like(_z2, k[i]), marker="^", color="tab:green") + _add2legend( + legend, + ax.scatter( + _z1, + jnp.full_like(_z1, k[i]), + marker="v", + color="tab:red", + label=r"$z_1$", + ), + ) + _add2legend( + legend, + ax.scatter( + _z2, + jnp.full_like(_z2, k[i]), + marker="^", + color="tab:green", + label=r"$z_2$", + ), + ) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 298e11a7af..b78af94964 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -415,17 +415,37 @@ def bounce_points(self, pitch, num_well=None): Returns ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) + z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L, num_well). - ζ coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of |B|. """ return self._B.intersect1d(1 / jnp.atleast_2d(pitch), num_well) - def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): - """Check that bounce points are computed correctly and plot them.""" + def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): + """Check that bounce points are computed correctly. + + Parameters + ---------- + z1, z2 : (jnp.ndarray, jnp.ndarray) + Shape (P, L, num_well). + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of |B|. + pitch : jnp.ndarray + Shape (P, L). + λ values to evaluate the bounce integral at each field line. λ(ρ) is + specified by ``pitch[...,ρ]`` where in the latter the labels ρ are + interpreted as the index into the last axis that corresponds to that field + line. If two-dimensional, the first axis is the batch axis. + plot : bool + Whether to plot stuff. + kwargs : dict + Keyword arguments into ``ChebyshevBasisSet.plot1d``. + + """ kwargs.setdefault( "title", r"Intersects $\zeta$ in epigraph($\vert B \vert$) s.t. " @@ -433,8 +453,8 @@ def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): ) kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") - kwargs.setdefault("vlabel", r"$\vert B \vert(\zeta)$") - self._B.check_intersect1d(bp1, bp2, 1 / pitch, plot, **kwargs) + kwargs.setdefault("vlabel", r"$\vert B \vert$") + self._B.check_intersect1d(z1, z2, 1 / pitch, plot, **kwargs) def integrate(self, pitch, integrand, f, weight=None, num_well=None): """Bounce integrate ∫ f(ℓ) dℓ. @@ -487,21 +507,21 @@ def integrate(self, pitch, integrand, f, weight=None, num_well=None): """ pitch = jnp.atleast_2d(pitch) - bp1, bp2 = self.bounce_points(pitch, num_well) - result = self._integrate(bp1, bp2, pitch, integrand, f) + z1, z2 = self.bounce_points(pitch, num_well) + result = self._integrate(z1, z2, pitch, integrand, f) errorif(weight is not None, NotImplementedError) return result - def _integrate(self, bp1, bp2, pitch, integrand, f): - assert bp1.ndim == 3 - assert bp1.shape == bp2.shape + def _integrate(self, z1, z2, pitch, integrand, f): + assert z1.ndim == 3 + assert z1.shape == z2.shape assert pitch.ndim == 2 - W = bp1.shape[-1] # number of wells + W = z1.shape[-1] # number of wells shape = (pitch.shape[0], self._L, W, self._x.size) # quadrature points parameterized by ζ for each pitch and flux surface Q_zeta = flatten_matrix( - bijection_from_disc(self._x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]) + bijection_from_disc(self._x, z1[..., jnp.newaxis], z2[..., jnp.newaxis]) ) # quadrature points in (θ, ζ) coordinates Q = jnp.stack([self._T.eval1d(Q_zeta), Q_zeta], axis=-1) @@ -728,11 +748,11 @@ def bounce_points(self, pitch, num_well=None): Returns ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) + z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). - ζ coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of |B|. If there were less than ``num_wells`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field @@ -747,16 +767,16 @@ def bounce_points(self, pitch, num_well=None): num_well=num_well, ) - def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): + def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): """Check that bounce points are computed correctly. Parameters ---------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) + z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). - ζ coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of |B|. pitch : jnp.ndarray Shape must broadcast with (P, L * M). λ values to evaluate the bounce integral at each field line. λ(ρ,α) is @@ -770,8 +790,8 @@ def check_bounce_points(self, bp1, bp2, pitch, plot=True, **kwargs): """ _check_bounce_points( - bp1=bp1, - bp2=bp2, + z1=z1, + z2=z2, pitch=jnp.atleast_2d(pitch), knots=self._zeta, B=self.B, @@ -848,12 +868,12 @@ def integrate( """ pitch = jnp.atleast_2d(pitch) - bp1, bp2 = self.bounce_points(pitch, num_well) + z1, z2 = self.bounce_points(pitch, num_well) result = bounce_quadrature( x=self._x, w=self._w, - bp1=bp1, - bp2=bp2, + z1=z1, + z2=z2, pitch=pitch, integrand=integrand, f=f, @@ -866,8 +886,8 @@ def integrate( if weight is not None: result *= interp_to_argmin_B_soft( g=weight, - bp1=bp1, - bp2=bp2, + z1=z1, + z2=z2, knots=self._zeta, B=self.B, dB_dz=self._dB_dz, diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index dc3e371c57..c7349b7ba1 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -1,4 +1,4 @@ -"""Utilities for bounce integrals.""" +"""Utilities and functional programming interface for bounce integrals.""" from interpax import PPoly from matplotlib import pyplot as plt @@ -6,8 +6,8 @@ from desc.backend import imap, jnp, softmax from desc.integrals.basis import _add2legend, _in_epigraph_and, _plot_intersect from desc.integrals.interp_utils import ( + interp1d_Hermite_vec, interp1d_vec, - interp1d_vec_Hermite, poly_root, polyval_vec, ) @@ -178,11 +178,11 @@ def bounce_points( Returns ------- - bp1, bp2 : (jnp.ndarray, jnp.ndarray) + z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, S, num_well). - ζ coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of |B|. If there were less than ``num_wells`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field @@ -212,28 +212,28 @@ def bounce_points( # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, # we ignore the bounce points of particles only assigned to a class that are # trapped outside this snapshot of the field line. - is_bp1 = (dB_dz_sign <= 0) & is_intersect - is_bp2 = (dB_dz_sign >= 0) & _in_epigraph_and(is_intersect, dB_dz_sign) + is_z1 = (dB_dz_sign <= 0) & is_intersect + is_z2 = (dB_dz_sign >= 0) & _in_epigraph_and(is_intersect, dB_dz_sign) # Transform out of local power basis expansion. intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) # New versions of JAX only like static sentinels. sentinel = -10000000.0 # instead of knots[0] - 1 - bp1 = take_mask(intersect, is_bp1, size=num_well, fill_value=sentinel) - bp2 = take_mask(intersect, is_bp2, size=num_well, fill_value=sentinel) + z1 = take_mask(intersect, is_z1, size=num_well, fill_value=sentinel) + z2 = take_mask(intersect, is_z2, size=num_well, fill_value=sentinel) - mask = (bp1 > sentinel) & (bp2 > sentinel) + mask = (z1 > sentinel) & (z2 > sentinel) # Set outside mask to same value so integration is over set of measure zero. - bp1 = jnp.where(mask, bp1, 0.0) - bp2 = jnp.where(mask, bp2, 0.0) + z1 = jnp.where(mask, z1, 0.0) + z2 = jnp.where(mask, z2, 0.0) if check: - _check_bounce_points(bp1, bp2, pitch, knots, B, plot, **kwargs) + _check_bounce_points(z1, z2, pitch, knots, B, plot, **kwargs) - return bp1, bp2 + return z1, z2 -def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): +def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): """Check that bounce points are computed correctly.""" eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) kwargs.setdefault( @@ -243,49 +243,49 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): ) kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") - kwargs.setdefault("vlabel", r"$\vert B \vert(\zeta)$") + kwargs.setdefault("vlabel", r"$\vert B \vert$") - assert bp1.shape == bp2.shape - mask = (bp1 - bp2) != 0.0 - bp1 = jnp.where(mask, bp1, jnp.nan) - bp2 = jnp.where(mask, bp2, jnp.nan) + assert z1.shape == z2.shape + mask = (z1 - z2) != 0.0 + z1 = jnp.where(mask, z1, jnp.nan) + z2 = jnp.where(mask, z2, jnp.nan) - err_1 = jnp.any(bp1 > bp2, axis=-1) - err_2 = jnp.any(bp1[..., 1:] < bp2[..., :-1], axis=-1) + err_1 = jnp.any(z1 > z2, axis=-1) + err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) - P, S, _ = bp1.shape + P, S, _ = z1.shape for s in range(S): Bs = PPoly(B[:, s], knots) for p in range(P): - Bs_midpoint = Bs((bp1[p, s] + bp2[p, s]) / 2) + Bs_midpoint = Bs((z1[p, s] + z2[p, s]) / 2) err_3 = jnp.any(Bs_midpoint > 1 / pitch[p, s] + eps) if not (err_1[p, s] or err_2[p, s] or err_3): continue - _bp1 = bp1[p, s][mask[p, s]] - _bp2 = bp2[p, s][mask[p, s]] + _z1 = z1[p, s][mask[p, s]] + _z2 = z2[p, s][mask[p, s]] if plot: plot_ppoly( ppoly=Bs, - z1=_bp1, - z2=_bp2, + z1=_z1, + z2=_z2, k=1 / pitch[p, s], **kwargs, ) - print(" bp1 | bp2") - print(jnp.column_stack([_bp1, _bp2])) + print(" z1 | z2") + print(jnp.column_stack([_z1, _z2])) assert not err_1[p, s], "Intersects have an inversion.\n" assert not err_2[p, s], "Detected discontinuity.\n" assert not err_3, ( f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {1 / pitch[p, s] + eps} " - "= 1/λ in well, implying a path between bounce points is in " - "hypograph(|B|). Use more knots.\n" + "= 1/λ in well, implying the straight line path between bounce points " + "is in hypograph(|B|). Use more knots.\n" ) if plot: plot_ppoly( ppoly=Bs, - z1=bp1[:, s], - z2=bp2[:, s], + z1=z1[:, s], + z2=z2[:, s], k=1 / pitch[:, s], **kwargs, ) @@ -294,8 +294,8 @@ def _check_bounce_points(bp1, bp2, pitch, knots, B, plot=True, **kwargs): def bounce_quadrature( x, w, - bp1, - bp2, + z1, + z2, pitch, integrand, f, @@ -316,11 +316,11 @@ def bounce_quadrature( w : jnp.ndarray Shape (w.size, ). Quadrature weights. - bp1, bp2 : jnp.ndarray + z1, z2 : jnp.ndarray Shape (P, S, num_well). - ζ coordinates of bounce points. - The pairs ``bp1`` and ``bp2`` form left and right integration boundaries, - respectively, for the bounce integrals. + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of |B|. pitch : jnp.ndarray Shape must broadcast with (P, S). λ values to evaluate the bounce integral at each field line. λ(ρ,α) is @@ -365,7 +365,7 @@ def bounce_quadrature( Last axis enumerates the bounce integrals. """ - errorif(bp1.ndim != 3 or bp1.shape != bp2.shape) + errorif(z1.ndim != 3 or z1.shape != z2.shape) errorif(x.ndim != 1 or x.shape != w.shape) pitch = jnp.atleast_2d(pitch) if not isinstance(f, (list, tuple)): @@ -375,7 +375,7 @@ def bounce_quadrature( if batch: result = _interpolate_and_integrate( w=w, - Q=bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), + Q=bijection_from_disc(x, z1[..., jnp.newaxis], z2[..., jnp.newaxis]), pitch=pitch, integrand=integrand, f=f, @@ -389,12 +389,12 @@ def bounce_quadrature( f = list(f) # TODO: Use batched vmap. - def loop(bp): - bp1, bp2 = bp + def loop(z): + z1, z2 = z # Need to return tuple because input was tuple; artifact of JAX map. return None, _interpolate_and_integrate( w=w, - Q=bijection_from_disc(x, bp1[..., jnp.newaxis], bp2[..., jnp.newaxis]), + Q=bijection_from_disc(x, z1[..., jnp.newaxis], z2[..., jnp.newaxis]), pitch=pitch, integrand=integrand, f=f, @@ -406,13 +406,13 @@ def loop(bp): ) result = jnp.moveaxis( - imap(loop, (jnp.moveaxis(bp1, -1, 0), jnp.moveaxis(bp2, -1, 0)))[1], + imap(loop, (jnp.moveaxis(z1, -1, 0), jnp.moveaxis(z2, -1, 0)))[1], source=0, destination=-1, ) - result = result * grad_bijection_from_disc(bp1, bp2) - assert result.shape == (pitch.shape[0], data["|B|"].shape[0], bp1.shape[-1]) + result = result * grad_bijection_from_disc(z1, z2) + assert result.shape == (pitch.shape[0], data["|B|"].shape[0], z1.shape[-1]) return result @@ -464,14 +464,14 @@ def _interpolate_and_integrate( pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Q.ndim == 4) else 2) shape = Q.shape Q = Q.reshape(Q.shape[0], Q.shape[1], -1) - b_sup_z = interp1d_vec_Hermite( + b_sup_z = interp1d_Hermite_vec( Q, knots, data["B^zeta"] / data["|B|"], data["B^zeta_z|r,a"] / data["|B|"] - data["B^zeta"] * data["|B|_z|r,a"] / data["|B|"] ** 2, ).reshape(shape) - B = interp1d_vec_Hermite(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) + B = interp1d_Hermite_vec(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) # Spline the integrand so that we can evaluate it at quadrature points without # expensive coordinate mappings and root finding. Spline each function separately so # that the singularity near the bounce points can be captured more accurately than @@ -493,11 +493,11 @@ def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): Q : jnp.ndarray Quadrature points in ζ coordinates. f : list of jnp.ndarray - Arguments to the integrand interpolated to Z. + Arguments to the integrand interpolated to Q. b_sup_z : jnp.ndarray - Contravariant toroidal component of magnetic field, interpolated to Z. + Contravariant toroidal component of magnetic field, interpolated to Q. B : jnp.ndarray - Norm of magnetic field, interpolated to Z. + Norm of magnetic field, interpolated to Q. B_z_ra : jnp.ndarray Norm of magnetic field, derivative with respect to ζ. result : jnp.ndarray @@ -559,7 +559,7 @@ def _plot_check_interp(Q, V, name=""): def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): - """Return extrema (ζ*, |B|(ζ*)). + """Return ext (ζ*, |B|(ζ*)). Parameters ---------- @@ -583,7 +583,7 @@ def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): Returns ------- - extrema, B_extrema : jnp.ndarray + ext, B_ext : jnp.ndarray Shape (S, (knots.size - 1) * (degree - 1)). First array enumerates ζ*. Second array enumerates |B|(ζ*) Sorted order of ζ* is not promised. @@ -591,17 +591,17 @@ def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): """ B, dB_dz, _ = _check_spline_shape(knots, B, dB_dz) S, degree = B.shape[1], B.shape[0] - 1 - extrema = poly_root( + ext = poly_root( c=dB_dz, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel ) - assert extrema.shape == (S, knots.size - 1, degree - 1) - B_extrema = polyval_vec(x=extrema, c=B[..., jnp.newaxis]).reshape(S, -1) + assert ext.shape == (S, knots.size - 1, degree - 1) + B_ext = polyval_vec(x=ext, c=B[..., jnp.newaxis]).reshape(S, -1) # Transform out of local power basis expansion. - extrema = (extrema + knots[:-1, jnp.newaxis]).reshape(S, -1) - return extrema, B_extrema + ext = (ext + knots[:-1, jnp.newaxis]).reshape(S, -1) + return ext, B_ext -def interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta=-50): +def interp_to_argmin_B_soft(g, z1, z2, knots, B, dB_dz, method="cubic", beta=-50): """Interpolate ``g`` to the deepest point in the magnetic well. Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns mean_A g(ζ). @@ -618,12 +618,14 @@ def interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta=- """ ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) - assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] + assert ext.shape[0] == B.shape[0] == z1.shape[1] == z2.shape[1] + # TODO: Check if softmax has where argument that works like this. + # (numpy ufunc where typically does not.) argmin = softmax( beta * jnp.where( - (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), jnp.expand_dims(B / jnp.mean(B, axis=-1, keepdims=True), axis=1), 1e2, # >> max(|B|) / mean(|B|) ), @@ -633,12 +635,12 @@ def interp_to_argmin_B_soft(g, bp1, bp2, knots, B, dB_dz, method="cubic", beta=- argmin, interp1d_vec(ext, knots, jnp.atleast_2d(g), method=method)[:, jnp.newaxis], ) - assert g.shape == bp1.shape == bp2.shape + assert g.shape == z1.shape == z2.shape return g # Less efficient than soft if P >> 1. -def interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): +def interp_to_argmin_B_hard(g, z1, z2, knots, B, dB_dz, method="cubic"): """Interpolate ``g`` to the deepest point in the magnetic well. Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns g(A). @@ -651,11 +653,11 @@ def interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): """ ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) - assert ext.shape[0] == B.shape[0] == bp1.shape[1] == bp2.shape[1] + assert ext.shape[0] == B.shape[0] == z1.shape[1] == z2.shape[1] argmin = jnp.argmin( jnp.where( - (bp1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < bp2[..., jnp.newaxis]), + (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), B[:, jnp.newaxis], 1e2 + jnp.max(B), ), @@ -663,7 +665,7 @@ def interp_to_argmin_B_hard(g, bp1, bp2, knots, B, dB_dz, method="cubic"): ) A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) g = interp1d_vec(A, knots, jnp.atleast_2d(g), method=method) - assert g.shape == bp1.shape == bp2.shape + assert g.shape == z1.shape == z2.shape return g @@ -677,7 +679,7 @@ def plot_ppoly( klabel=r"$k$", title=r"Intersects $z$ in epigraph($f$) s.t. $f(z) = k$", hlabel=r"$z$", - vlabel=r"$f(z)$", + vlabel=r"$f$", show=True, start=None, stop=None, @@ -755,7 +757,7 @@ def plot_ppoly( ) ax.set_xlabel(hlabel) ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys()) + ax.legend(legend.values(), legend.keys(), loc="lower right") ax.set_title(title) plt.tight_layout() if show: diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 883da3ef21..114d5faf05 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -2,6 +2,7 @@ from functools import partial +import numpy as np from interpax import interp1d from orthax.chebyshev import chebroots, chebvander from orthax.polynomial import polyvander @@ -165,14 +166,13 @@ def interp_rfft(xq, f, axis=-1): Real function value at query points. """ - assert f.ndim >= 1 a = rfft(f, axis=axis, norm="forward") fq = irfft_non_uniform(xq, a, f.shape[axis], axis) return fq def irfft_non_uniform(xq, a, n, axis=-1): - """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π] periodic. + """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]. Parameters ---------- @@ -192,18 +192,17 @@ def irfft_non_uniform(xq, a, n, axis=-1): Real function value at query points. """ - assert a.ndim >= 1 + # |a| << |basis|, so move a instead of basis a = ( - (2.0 * a) - .at[Index.get(0, axis, a.ndim)] + jnp.moveaxis(a, axis, -1) + .at[..., 0] .divide(2.0) - .at[Index.get(-1, axis, a.ndim)] + .at[..., -1] .divide(1.0 + ((n % 2) == 0)) ) - a = jnp.moveaxis(a, axis, -1) m = jnp.fft.rfftfreq(n, d=1 / n) basis = jnp.exp(-1j * m * xq[..., jnp.newaxis]) - fq = jnp.linalg.vecdot(basis, a).real + fq = 2.0 * jnp.linalg.vecdot(basis, a).real # TODO: Test JAX does this optimization automatically. # ℜ〈 basis, a 〉= cos(m xq)⋅ℜ(a) − sin(m xq)⋅ℑ(a) return fq @@ -217,15 +216,17 @@ def interp_rfft2(xq, f, axes=(-2, -1)): xq : jnp.ndarray Shape (..., 2). Real query points where interpolation is desired. - Last axis must hold coordinates for a given point. Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(f.shape,axes)``. + Last axis must hold coordinates for a given point. The coordinates stored + along ``xq[...,0]`` (``xq[...,1]``) must be the same coordinate enumerated + across axis ``min(axes)`` (``max(axes)``) of the function values ``f``. f : jnp.ndarray Shape (..., f.shape[-2], f.shape[-1]). Real (2π × 2π) periodic function values on uniform tensor-product grid to interpolate. axes : tuple[int, int] Axes along which to transform. - The real transform is done along ``axes[-1]``, so it will be more + The real transform is done along ``axes[1]``, so it will be more efficient for that to denote the larger size axis in ``axes``. Returns @@ -234,15 +235,13 @@ def interp_rfft2(xq, f, axes=(-2, -1)): Real function value at query points. """ - assert xq.shape[-1] == 2 - assert f.ndim >= 2 a = rfft2(f, axes=axes, norm="forward") - fq = irfft2_non_uniform(xq, a, f.shape[axes[0]], f.shape[axes[-1]], axes) + fq = irfft2_non_uniform(xq, a, f.shape[axes[0]], f.shape[axes[1]], axes) return fq def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): - """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]² periodic. + """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]². Parameters ---------- @@ -251,6 +250,9 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): Real query points where interpolation is desired. Last axis must hold coordinates for a given point. Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(a.shape,axes)``. + Last axis must hold coordinates for a given point. The coordinates stored + along ``xq[...,0]`` (``xq[...,1]``) must be the same coordinate enumerated + across axis ``min(axes)`` (``max(axes)``) of the Fourier coefficients ``a``. a : jnp.ndarray Shape (..., a.shape[-2], a.shape[-1]). Fourier coefficients ``a=rfft2(f,axes=axes,norm="forward")``. @@ -267,29 +269,29 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): Real function value at query points. """ - assert xq.shape[-1] == 2 - assert a.ndim >= 2 + errorif(not (len(axes) == xq.shape[-1] == 2), msg="This is a 2D transform.") + errorif(a.ndim < 2, msg=f"Dimension mismatch, a.shape: {a.shape}.") + + # |a| << |basis|, so move a instead of basis a = ( - (2.0 * a) - .at[Index.get(0, axes[-1], a.ndim)] + jnp.moveaxis(a, source=axes, destination=(-2, -1)) + .at[..., 0] .divide(2.0) - .at[Index.get(-1, axes[-1], a.ndim)] + .at[..., -1] .divide(1.0 + ((N % 2) == 0)) ) - a = jnp.moveaxis(a, source=axes, destination=(-2, -1)) - a = a.reshape(*a.shape[:-2], -1) m = jnp.fft.fftfreq(M, d=1 / M) n = jnp.fft.rfftfreq(N, d=1 / N) + idx = np.argsort(axes) basis = jnp.exp( - -1j + 1j * ( - (m * xq[..., 0, jnp.newaxis])[..., jnp.newaxis] - + (n * xq[..., -1, jnp.newaxis])[..., jnp.newaxis, :] + (m * xq[..., idx[0], jnp.newaxis])[..., jnp.newaxis] + + (n * xq[..., idx[1], jnp.newaxis])[..., jnp.newaxis, :] ) - ).reshape(*xq.shape[:-1], m.size * n.size) - - fq = jnp.linalg.vecdot(basis, a).real + ) + fq = 2.0 * jnp.sum(basis * a, axis=(-2, -1)).real return fq @@ -312,8 +314,8 @@ def transform_to_desc(grid, f): """ f = grid.meshgrid_reshape(f, order="rtz") + # Real fft done over poloidal since num_theta > num_zeta usually. a = rfft2(f, axes=(-1, -2), norm="forward") - # Real fft done over poloidal since grid.num_theta > grid.num_zeta usually. assert a.shape == (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) return a @@ -365,7 +367,6 @@ def interp_dct(xq, f, lobatto=False, axis=-1): """ lobatto = bool(lobatto) errorif(lobatto, NotImplementedError, "JAX hasn't implemented type 1 DCT.") - assert f.ndim >= 1 a = cheb_from_dct(dct(f, type=2 - lobatto, axis=axis), axis) / ( f.shape[axis] - lobatto ) @@ -394,7 +395,6 @@ def idct_non_uniform(xq, a, n, axis=-1): Real function value at query points. """ - assert a.ndim >= 1 a = jnp.moveaxis(a, axis, -1) # Could use Clenshaw recursion with fq = chebval(xq, a, tensor=False). basis = chebvander(xq, n - 1) @@ -467,14 +467,15 @@ def polyval_vec(x, c): return val +# Warning: method must be specified as keyword argument. interp1d_vec = jnp.vectorize( interp1d, signature="(m),(n),(n)->(m)", excluded={"method"} ) @partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") -def interp1d_vec_Hermite(xq, x, f, fx): - """Vectorized cubic Hermite spline.""" +def interp1d_Hermite_vec(xq, x, f, fx): + """Vectorized cubic Hermite spline. Does not support keyword arguments.""" return interp1d(xq, x, f, method="cubic", fx=fx) diff --git a/desc/utils.py b/desc/utils.py index 9bfaf7b21e..27b5fa79ad 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -696,6 +696,10 @@ def broadcast_tree(tree_in, tree_out, dtype=int): def take_mask(a, mask, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. + Warnings + -------- + The parameters ``size`` and ``fill_value`` must be specified as keyword arguments. + Parameters ---------- a : jnp.ndarray diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py index cfa827e904..714afd453c 100644 --- a/tests/test_fourier_bounce.py +++ b/tests/test_fourier_bounce.py @@ -4,7 +4,6 @@ import pytest from matplotlib import pyplot as plt from numpy.polynomial.chebyshev import chebinterpolate, chebroots -from numpy.polynomial.legendre import leggauss from tests.test_integrals import TestBounce1D from tests.test_plotting import tol_1d @@ -79,7 +78,7 @@ def f(z): def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): """Test bounce points...""" eq = get("W7-X") - clebsch = FourierChebyshevBasis.nodes(M, N, rho=rho) + clebsch = FourierChebyshevBasis.nodes(M, N, L=rho) desc_from_clebsch = map_coordinates( eq, clebsch, @@ -143,8 +142,7 @@ def test_drift(): np.testing.assert_allclose(data["psi"], psi) np.testing.assert_allclose(data["iota"], iota) assert np.all(data["B^zeta"] > 0) - B_ref = 2 * np.abs(psi_boundary) / data["a"] ** 2 - data["B ref"] = B_ref + data["Bref"] = 2 * np.abs(psi_boundary) / data["a"] ** 2 data["rho"] = rho data["alpha"] = alpha data["zeta"] = zeta @@ -159,21 +157,17 @@ def test_drift(): data_2 = eq.compute( names=Bounce2D.required_names() + ["cvdrift", "gbdrift"], grid=grid ) - normalization = -np.sign(data["psi"]) * data["B ref"] * data["a"] ** 2 - data_2["cvdrift"] = data_2["cvdrift"] * normalization - data_2["gbdrift"] = data_2["gbdrift"] * normalization M, N = eq.M_grid, 20 - fb = Bounce2D( - grid, - data_2, - M, - N, + bounce = Bounce2D( + grid=grid, + data=data_2, desc_from_clebsch=Bounce2D.desc_from_clebsch(eq, rho, M, N), + M=M, + N=N, alpha_0=data["alpha"], num_transit=1, - B_ref=data["B ref"], - L_ref=data["a"], - quad=leggauss(50), # converges to absolute and relative tolerance of 1e-7 + Bref=data["Bref"], + Lref=data["a"], check=True, plot=True, ) @@ -185,13 +179,16 @@ def integrand_num(cvdrift, gbdrift, B, pitch): def integrand_den(B, pitch): return 1 / jnp.sqrt(1 - pitch * B) - drift_numerical_num = fb.integrate( + normalization = -np.sign(data["psi"]) * data["Bref"] * data["a"] ** 2 + drift_numerical_num = bounce.integrate( pitch=pitch[:, np.newaxis], integrand=integrand_num, - f=Bounce2D.reshape_data(grid, data_2, ["cvdrift", "gbdrift"]), + f=Bounce2D.reshape_data( + grid, data_2["cvdrift"] * normalization, data_2["gbdrift"] * normalization + ), num_well=1, ) - drift_numerical_den = fb.integrate( + drift_numerical_den = bounce.integrate( pitch=pitch[:, np.newaxis], integrand=integrand_den, f=[], diff --git a/tests/test_integrals.py b/tests/test_integrals.py index ad9726b310..0379844326 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -724,13 +724,13 @@ class TestBouncePoints: """Test that bounce points are computed correctly.""" @staticmethod - def filter(bp1, bp2): + def filter(z1, z2): """Remove bounce points whose integrals have zero measure.""" - mask = (bp1 - bp2) != 0.0 - return bp1[mask], bp2[mask] + mask = (z1 - z2) != 0.0 + return z1[mask], z2[mask] @pytest.mark.unit - def test_bp1_first(self): + def test_z1_first(self): """Test that bounce points are computed correctly.""" start = np.pi / 3 end = 6 * np.pi @@ -738,14 +738,14 @@ def test_bp1_first(self): B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) - bp1, bp2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - assert bp1.size and bp2.size - np.testing.assert_allclose(bp1, intersect[0::2]) - np.testing.assert_allclose(bp2, intersect[1::2]) + z1, z2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) + z1, z2 = TestBouncePoints.filter(z1, z2) + assert z1.size and z2.size + np.testing.assert_allclose(z1, intersect[0::2]) + np.testing.assert_allclose(z2, intersect[1::2]) @pytest.mark.unit - def test_bp2_first(self): + def test_z2_first(self): """Test that bounce points are computed correctly.""" start = -3 * np.pi end = -start @@ -753,14 +753,14 @@ def test_bp2_first(self): B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) - bp1, bp2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - assert bp1.size and bp2.size - np.testing.assert_allclose(bp1, intersect[1:-1:2]) - np.testing.assert_allclose(bp2, intersect[0::2][1:]) + z1, z2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) + z1, z2 = TestBouncePoints.filter(z1, z2) + assert z1.size and z2.size + np.testing.assert_allclose(z1, intersect[1:-1:2]) + np.testing.assert_allclose(z2, intersect[0::2][1:]) @pytest.mark.unit - def test_bp1_before_extrema(self): + def test_z1_before_extrema(self): """Test that bounce points are computed correctly.""" start = -np.pi end = -2 * start @@ -770,18 +770,18 @@ def test_bp1_before_extrema(self): ) dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[3] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - assert bp1.size and bp2.size + z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) + z1, z2 = TestBouncePoints.filter(z1, z2) + assert z1.size and z2.size intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[1], 1.982767, rtol=1e-6) - np.testing.assert_allclose(bp1, intersect[[1, 2]], rtol=1e-6) + np.testing.assert_allclose(z1[1], 1.982767, rtol=1e-6) + np.testing.assert_allclose(z1, intersect[[1, 2]], rtol=1e-6) # intersect array could not resolve double root as single at index 2,3 np.testing.assert_allclose(intersect[2], intersect[3], rtol=1e-6) - np.testing.assert_allclose(bp2, intersect[[3, 4]], rtol=1e-6) + np.testing.assert_allclose(z2, intersect[[3, 4]], rtol=1e-6) @pytest.mark.unit - def test_bp2_before_extrema(self): + def test_z2_before_extrema(self): """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start @@ -793,15 +793,15 @@ def test_bp2_before_extrema(self): ) dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] - bp1, bp2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - assert bp1.size and bp2.size + z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) + z1, z2 = TestBouncePoints.filter(z1, z2) + assert z1.size and z2.size intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1, intersect[[0, -2]]) - np.testing.assert_allclose(bp2, intersect[[1, -1]]) + np.testing.assert_allclose(z1, intersect[[0, -2]]) + np.testing.assert_allclose(z2, intersect[[1, -1]]) @pytest.mark.unit - def test_extrema_first_and_before_bp1(self): + def test_extrema_first_and_before_z1(self): """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start @@ -813,20 +813,20 @@ def test_extrema_first_and_before_bp1(self): ) dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] - 1e-13 - bp1, bp2 = bounce_points( + z1, z2 = bounce_points( pitch, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False ) - plot_ppoly(B, z1=bp1, z2=bp2, k=1 / pitch, start=k[2]) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - assert bp1.size and bp2.size + plot_ppoly(B, z1=z1, z2=z2, k=1 / pitch, start=k[2]) + z1, z2 = TestBouncePoints.filter(z1, z2) + assert z1.size and z2.size intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[0], 0.835319, rtol=1e-6) + np.testing.assert_allclose(z1[0], 0.835319, rtol=1e-6) intersect = intersect[intersect >= k[2]] - np.testing.assert_allclose(bp1, intersect[[0, 2, 4]], rtol=1e-6) - np.testing.assert_allclose(bp2, intersect[[0, 3, 5]], rtol=1e-6) + np.testing.assert_allclose(z1, intersect[[0, 2, 4]], rtol=1e-6) + np.testing.assert_allclose(z2, intersect[[0, 3, 5]], rtol=1e-6) @pytest.mark.unit - def test_extrema_first_and_before_bp2(self): + def test_extrema_first_and_before_z2(self): """Test that bounce points are computed correctly.""" start = -1.2 * np.pi end = -2 * start + 1 @@ -838,16 +838,16 @@ def test_extrema_first_and_before_bp2(self): ) dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[1] + 1e-13 - bp1, bp2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - assert bp1.size and bp2.size + z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) + z1, z2 = TestBouncePoints.filter(z1, z2) + assert z1.size and z2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) - np.testing.assert_allclose(bp1[0], -0.671904, rtol=1e-6) - np.testing.assert_allclose(bp1, intersect[[0, 3, 5]], rtol=1e-5) + np.testing.assert_allclose(z1[0], -0.671904, rtol=1e-6) + np.testing.assert_allclose(z1, intersect[[0, 3, 5]], rtol=1e-5) # intersect array could not resolve double root as single at index 0,1 np.testing.assert_allclose(intersect[0], intersect[1], rtol=1e-5) - np.testing.assert_allclose(bp2, intersect[[2, 4, 6]], rtol=1e-5) + np.testing.assert_allclose(z2, intersect[[2, 4, 6]], rtol=1e-5) @pytest.mark.unit def test_get_extrema(self): @@ -905,12 +905,12 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): m = 1 - p # Some prime number that doesn't appear anywhere in calculation. # Ensures no lucky cancellation occurs from this test case since otherwise - # (bp2 - bp1) / pi = pi / (bp2 - bp1) which could mask errors since pi + # (z2 - z1) / pi = pi / (z2 - z1) which could mask errors since pi # appears often in transformations. v = 7 - bp1 = -np.pi / 2 * v - bp2 = -bp1 - knots = np.linspace(bp1, bp2, 50) + z1 = -np.pi / 2 * v + z2 = -z1 + knots = np.linspace(z1, z2, 50) pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) db = np.sin(2 * knots / v) / v @@ -1125,8 +1125,8 @@ def dB_dz(z): f(argmin), func( f(zeta), - bp1=np.array(0, ndmin=3), - bp2=np.array(2 * np.pi, ndmin=3), + z1=np.array(0, ndmin=3), + z2=np.array(2 * np.pi, ndmin=3), knots=zeta, B=bounce.B, dB_dz=bounce._dB_dz, @@ -1263,8 +1263,7 @@ def test_drift(self): np.testing.assert_allclose(data["psi"], psi) np.testing.assert_allclose(data["iota"], iota) assert np.all(data["B^zeta"] > 0) - Bref = 2 * np.abs(psi_boundary) / data["a"] ** 2 - data["Bref"] = Bref + data["Bref"] = 2 * np.abs(psi_boundary) / data["a"] ** 2 data["rho"] = rho data["alpha"] = alpha data["zeta"] = zeta @@ -1279,7 +1278,7 @@ def test_drift(self): grid.source_grid, data, quad=leggauss(28), # converges to absolute and relative tolerance of 1e-7 - Bref=Bref, + Bref=data["Bref"], Lref=data["a"], check=True, ) diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 14b8456a28..78b25f5990 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -218,9 +218,14 @@ def test_interp_rfft2(self, func, m, n): assert not np.any(np.isclose(xq[..., 0, np.newaxis], x)) assert not np.any(np.isclose(xq[..., 1, np.newaxis], y)) x, y = map(np.ravel, list(np.meshgrid(x, y, indexing="ij"))) + truth = func(xq[..., 0], xq[..., 1]) np.testing.assert_allclose( - interp_rfft2(xq, func(x, y).reshape(m, n)), - func(xq[..., 0], xq[..., 1]), + interp_rfft2(xq, func(x, y).reshape(m, n), axes=(-2, -1)), + truth, + ) + np.testing.assert_allclose( + interp_rfft2(xq, func(x, y).reshape(m, n), axes=(-1, -2)), + truth, ) @staticmethod From 1a24a43adfd711e4234645cb43bbb2b6c34a3d49 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 26 Aug 2024 17:41:18 -0400 Subject: [PATCH 219/241] Fix bug in Fourier bounce with interpolation of b_sup_z --- desc/compute/_bootstrap.py | 2 +- desc/compute/_equil.py | 2 +- desc/compute/_field.py | 2 +- desc/compute/_metric.py | 2 +- desc/compute/_profiles.py | 2 +- desc/compute/_stability.py | 2 +- desc/integrals/basis.py | 168 +++++++++--------- desc/integrals/bounce_integral.py | 52 ++++-- desc/integrals/bounce_utils.py | 4 +- desc/integrals/interp_utils.py | 27 +-- desc/integrals/quad_utils.py | 33 ++-- tests/test_fourier_bounce.py | 206 --------------------- tests/test_integrals.py | 286 ++++++++++++++++++++++++++---- tests/test_interp_utils.py | 127 ++++++------- tests/test_quad_utils.py | 26 ++- 15 files changed, 486 insertions(+), 455 deletions(-) delete mode 100644 tests/test_fourier_bounce.py diff --git a/desc/compute/_bootstrap.py b/desc/compute/_bootstrap.py index 48af83b4e5..2329682c06 100644 --- a/desc/compute/_bootstrap.py +++ b/desc/compute/_bootstrap.py @@ -13,7 +13,7 @@ from scipy.special import roots_legendre from ..backend import fori_loop, jnp -from ..integrals import surface_averages_map +from ..integrals.surface_integral import surface_averages_map from .data_index import register_compute_fun diff --git a/desc/compute/_equil.py b/desc/compute/_equil.py index 7cd01491ed..2cb7a93607 100644 --- a/desc/compute/_equil.py +++ b/desc/compute/_equil.py @@ -14,7 +14,7 @@ from desc.backend import jnp -from ..integrals import surface_averages +from ..integrals.surface_integral import surface_averages from .data_index import register_compute_fun from .utils import cross, dot, safediv, safenorm diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 31a9d58a18..8af2e8368c 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -13,7 +13,7 @@ from desc.backend import jnp -from ..integrals import ( +from ..integrals.surface_integral import ( surface_averages, surface_integrals_map, surface_max, diff --git a/desc/compute/_metric.py b/desc/compute/_metric.py index 536bd05bb7..ceb6703386 100644 --- a/desc/compute/_metric.py +++ b/desc/compute/_metric.py @@ -13,7 +13,7 @@ from desc.backend import jnp -from ..integrals import surface_averages +from ..integrals.surface_integral import surface_averages from .data_index import register_compute_fun from .utils import cross, dot, safediv, safenorm diff --git a/desc/compute/_profiles.py b/desc/compute/_profiles.py index 84de48e576..4a647fdfa2 100644 --- a/desc/compute/_profiles.py +++ b/desc/compute/_profiles.py @@ -13,7 +13,7 @@ from desc.backend import cond, jnp -from ..integrals import surface_averages, surface_integrals +from ..integrals.surface_integral import surface_averages, surface_integrals from .data_index import register_compute_fun from .utils import cumtrapz, dot, safediv diff --git a/desc/compute/_stability.py b/desc/compute/_stability.py index 4a985a4dc5..3b820f83b0 100644 --- a/desc/compute/_stability.py +++ b/desc/compute/_stability.py @@ -13,7 +13,7 @@ from desc.backend import jnp -from ..integrals import surface_integrals_map +from ..integrals.surface_integral import surface_integrals_map from .data_index import register_compute_fun from .utils import dot diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index 68422ea680..0baa6ae80c 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -319,6 +319,86 @@ def N(self): """Chebyshev spectral resolution.""" return self.cheb.shape[-1] + def isomorphism_to_C1(self, y): + """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². + + Maps row x of y to z = y + f(x) where f(x) = x * |domain|. + + Parameters + ---------- + y : jnp.ndarray + Shape (..., y.shape[-2], y.shape[-1]). + Second to last axis iterates the rows. + + Returns + ------- + z : jnp.ndarray + Shape y.shape. + Isomorphic coordinates. + + """ + assert y.ndim >= 2 + z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) + z = y + z_shift[:, jnp.newaxis] + return z + + def isomorphism_to_C2(self, z): + """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. + + Returns index x and minimum value y such that + z = f(x) + y where f(x) = x * |domain|. + + Parameters + ---------- + z : jnp.ndarray + Shape z.shape. + + Returns + ------- + x_idx, y_val : (jnp.ndarray, jnp.ndarray) + Shape z.shape. + Isomorphic coordinates. + + """ + x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) + x_idx = x_idx.astype(int) + y_val += self.domain[0] + return x_idx, y_val + + def eval1d(self, z, cheb=None): + """Evaluate piecewise Chebyshev series at coordinates z. + + Parameters + ---------- + z : jnp.ndarray + Shape (..., *cheb.shape[:-2], z.shape[-1]). + Coordinates in [sef.domain[0], ∞). + The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where + ``z // domain`` yields the index into the proper Chebyshev series + along the second to last axis of ``cheb`` and ``z % domain`` is + the coordinate value on the domain of that Chebyshev series. + cheb : jnp.ndarray + Shape (..., M, N). + Chebyshev coefficients to use. If not given, uses ``self.cheb``. + + Returns + ------- + f : jnp.ndarray + Shape z.shape. + Chebyshev basis evaluated at z. + + """ + cheb = _chebcast(setdefault(cheb, self.cheb), z) + N = cheb.shape[-1] + x_idx, y = self.isomorphism_to_C2(z) + y = bijection_to_disc(y, self.domain[0], self.domain[1]) + # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) + # are held in cheb with shape (..., num cheb series, N). + cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) + f = idct_non_uniform(y, cheb, N) + assert f.shape == z.shape + return f + def intersect2d(self, k=0.0, eps=_eps): """Coordinates yᵢ such that f(x, yᵢ) = k(x). @@ -434,86 +514,6 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): z2 = jnp.where(mask, z2, pad_value) return z1, z2 - def eval1d(self, z, cheb=None): - """Evaluate piecewise Chebyshev series at coordinates z. - - Parameters - ---------- - z : jnp.ndarray - Shape (..., *cheb.shape[:-2], z.shape[-1]). - Coordinates in [sef.domain[0], ∞). - The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where - ``z // domain`` yields the index into the proper Chebyshev series - along the second to last axis of ``cheb`` and ``z % domain`` is - the coordinate value on the domain of that Chebyshev series. - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients to use. If not given, uses ``self.cheb``. - - Returns - ------- - f : jnp.ndarray - Shape z.shape. - Chebyshev basis evaluated at z. - - """ - cheb = _chebcast(setdefault(cheb, self.cheb), z) - N = cheb.shape[-1] - x_idx, y = self.isomorphism_to_C2(z) - y = bijection_to_disc(y, self.domain[0], self.domain[1]) - # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) - # are held in cheb with shape (..., num cheb series, N). - cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) - f = idct_non_uniform(y, cheb, N) - assert f.shape == z.shape - return f - - def isomorphism_to_C1(self, y): - """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². - - Maps row x of y to z = y + f(x) where f(x) = x * |domain|. - - Parameters - ---------- - y : jnp.ndarray - Shape (..., y.shape[-2], y.shape[-1]). - Second to last axis iterates the rows. - - Returns - ------- - z : jnp.ndarray - Shape y.shape. - Isomorphic coordinates. - - """ - assert y.ndim >= 2 - z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) - z = y + z_shift[:, jnp.newaxis] - return z - - def isomorphism_to_C2(self, z): - """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - - Returns index x and minimum value y such that - z = f(x) + y where f(x) = x * |domain|. - - Parameters - ---------- - z : jnp.ndarray - Shape z.shape. - - Returns - ------- - x_idx, y_val : (jnp.ndarray, jnp.ndarray) - Shape z.shape. - Isomorphic coordinates. - - """ - x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) - x_idx = x_idx.astype(int) - y_val += self.domain[0] - return x_idx, y_val - def _check_shape(self, z1, z2, k): """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" # Ensure pitch batch dim exists and add back dim to broadcast with wells. @@ -533,7 +533,9 @@ def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): z1, z2 : jnp.ndarray Shape must broadcast with (*self.cheb.shape[:-2], W). ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. + respectively. The points are grouped and ordered such that the + straight line path between the intersects in ``z1`` and ``z2`` + resides in the epigraph of f. k : jnp.ndarray Shape must broadcast with *self.cheb.shape[:-2]. k such that fₓ(yᵢ) = k. @@ -582,8 +584,8 @@ def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): assert not err_1[idx], "Intersects have an inversion.\n" assert not err_2[idx], "Detected discontinuity.\n" assert not err_3[idx], ( - "Detected f > k in well, implying a path between z1 and z2 " - "is in hypograph(f). Increase Chebyshev resolution.\n" + "Detected f > k in well, implying the straight line path between " + "z1 and z2 is in hypograph(f). Increase spectral resolution.\n" f"{f_midpoint[idx][mask[idx]]} > {k[idx] + self._eps}" ) idx = (slice(None), *l) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index b78af94964..8f7df50243 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -3,7 +3,7 @@ from interpax import CubicHermiteSpline from orthax.legendre import leggauss -from desc.backend import jnp +from desc.backend import jnp, rfft2 from desc.integrals.basis import FourierChebyshevBasis from desc.integrals.bounce_utils import ( _check_bounce_points, @@ -13,12 +13,7 @@ interp_to_argmin_B_soft, plot_ppoly, ) -from desc.integrals.interp_utils import ( - interp_rfft2, - irfft2_non_uniform, - polyder_vec, - transform_to_desc, -) +from desc.integrals.interp_utils import interp_rfft2, irfft2_non_uniform, polyder_vec from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, @@ -70,6 +65,7 @@ def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): # which is not a tensor product node set in DESC space. xq=desc_from_clebsch[:, 1:].reshape(grid.num_rho, -1, 2), f=grid.meshgrid_reshape(B, order="rtz")[:, jnp.newaxis], + # Real fft over poloidal since usually num theta > num zeta. axes=(-1, -2), ).reshape(grid.num_rho, M, N), domain=Bounce2D.domain, @@ -77,6 +73,30 @@ def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): return T, B +def _transform_to_desc(grid, f): + """Transform to DESC spectral domain. + + Parameters + ---------- + grid : Grid + Tensor-product grid in (θ, ζ) with uniformly spaced nodes in + (2π × 2π) poloidal and toroidal coordinates. + f : jnp.ndarray + Function evaluated on ``grid``. + + Returns + ------- + a : jnp.ndarray + Shape (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) + Complex coefficients of 2D real FFT. + + """ + f = grid.meshgrid_reshape(f, order="rtz") + a = rfft2(f, axes=(-1, -2), norm="forward") + assert a.shape == (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) + return a + + # TODO: # After GitHub issue #1034 is resolved, we should pass in the previous # θ(α) coordinates as an initial guess for the next coordinate mapping. @@ -292,14 +312,20 @@ def __init__( ) self._m = grid.num_theta self._n = grid.num_zeta - self._b_sup_z = jnp.expand_dims( - transform_to_desc(grid, jnp.abs(data["B^zeta"]) / data["|B|"] * Lref), - axis=1, - ) self._x, self._w = get_quadrature(quad, automorphism) # Compute global splines. - T, B = _transform_to_clebsch(grid, desc_from_clebsch, M, N, data["|B|"] / Bref) + self._b_sup_z = _transform_to_desc( + grid, + jnp.abs(data["B^zeta"]) / data["|B|"] * Lref, + )[:, jnp.newaxis] + T, B = _transform_to_clebsch( + grid, + desc_from_clebsch, + M, + N, + data["|B|"] / Bref, + ) # peel off field lines alphas = get_alpha( alpha_0, @@ -535,7 +561,7 @@ def _integrate(self, z1, z2, pitch, integrand, f): pitch=pitch[..., jnp.newaxis, jnp.newaxis], ) / irfft2_non_uniform( - Q, self._b_sup_z, self._m, self._n, axes=(-1, -2) + xq=Q, a=self._b_sup_z, M=self._n, N=self._m, axes=(-1, -2) ).reshape(shape), self._w, ) diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index c7349b7ba1..d500f1b338 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -278,8 +278,8 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): assert not err_2[p, s], "Detected discontinuity.\n" assert not err_3, ( f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {1 / pitch[p, s] + eps} " - "= 1/λ in well, implying the straight line path between bounce points " - "is in hypograph(|B|). Use more knots.\n" + "= 1/λ in well, implying the straight line path between " + "bounce points is in hypograph(|B|). Use more knots.\n" ) if plot: plot_ppoly( diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 114d5faf05..dd4b5c6466 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -259,7 +259,7 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): M : int Spectral resolution of ``a`` along ``axes[0]``. N : int - Spectral resolution of ``a`` along ``axes[-1]``. + Spectral resolution of ``a`` along ``axes[1]``. axes : tuple[int, int] Axes along which to transform. @@ -295,31 +295,6 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): return fq -def transform_to_desc(grid, f): - """Transform to DESC spectral domain. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (θ, ζ) with uniformly spaced nodes in - (2π × 2π) poloidal and toroidal coordinates. - f : jnp.ndarray - Function evaluated on ``grid``. - - Returns - ------- - a : jnp.ndarray - Shape (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) - Complex coefficients of 2D real FFT. - - """ - f = grid.meshgrid_reshape(f, order="rtz") - # Real fft done over poloidal since num_theta > num_zeta usually. - a = rfft2(f, axes=(-1, -2), norm="forward") - assert a.shape == (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) - return a - - def cheb_from_dct(a, axis=-1): """Get discrete Chebyshev transform from discrete cosine transform. diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index b14f691b02..89ad99d827 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -138,7 +138,7 @@ def tanh_sinh(deg, m=10): return x, w -def leggauss_lobatto(deg): +def leggauss_lob(deg, interior_only=False): """Lobatto-Gauss-Legendre quadrature. Returns quadrature points xₖ and weights wₖ for the approximate evaluation of the @@ -147,26 +147,30 @@ def leggauss_lobatto(deg): Parameters ---------- deg : int - Number of (interior) quadrature points to return. + Number of quadrature points. + interior_only : bool + Whether to exclude the points and weights at -1 and 1; + useful if f(-1) = f(1) = 0. If ``True``, then ``deg`` points are still + returned; these are the interior points for lobatto quadrature of ``deg+2``. Returns ------- x, w : (jnp.ndarray, jnp.ndarray) - Quadrature points in (-1, 1) and associated weights. - Excludes points and weights at -1 and 1. + Shape (deg, ). + Quadrature points and weights. """ - # Designate two degrees for endpoints. - deg = int(deg) + 2 + N = deg + 2 * bool(interior_only) + errorif(N < 2) - # Golub-Welsh algorithm for eigenvalues of orthogonal polynomials - n = jnp.arange(2, deg - 1) + # Golub-Welsh algorithm + n = jnp.arange(2, N - 1) x = eigh_tridiagonal( - jnp.zeros(deg - 2), + jnp.zeros(N - 2), jnp.sqrt((n**2 - 1) / (4 * n**2 - 1)), eigvals_only=True, ) - c0 = put(jnp.zeros(deg), -1, 1) + c0 = put(jnp.zeros(N), -1, 1) # improve (single multiplicity) roots by one application of Newton c = legder(c0) @@ -174,7 +178,14 @@ def leggauss_lobatto(deg): df = legval(x=x, c=legder(c)) x -= dy / df - w = 2 / (deg * (deg - 1) * legval(x=x, c=c0) ** 2) + w = 2 / (N * (N - 1) * legval(x=x, c=c0) ** 2) + + if not interior_only: + x = jnp.hstack([-1.0, x, 1.0]) + w_end = 2 / (deg * (deg - 1)) + w = jnp.hstack([w_end, w, w_end]) + + assert x.size == w.size == deg return x, w diff --git a/tests/test_fourier_bounce.py b/tests/test_fourier_bounce.py deleted file mode 100644 index 714afd453c..0000000000 --- a/tests/test_fourier_bounce.py +++ /dev/null @@ -1,206 +0,0 @@ -"""Test interpolation to Clebsch coordinates and Fourier bounce integration.""" - -import numpy as np -import pytest -from matplotlib import pyplot as plt -from numpy.polynomial.chebyshev import chebinterpolate, chebroots -from tests.test_integrals import TestBounce1D -from tests.test_plotting import tol_1d - -from desc.backend import jnp -from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import get_rtz_grid, map_coordinates -from desc.examples import get -from desc.grid import LinearGrid -from desc.integrals import Bounce2D -from desc.integrals.basis import FourierChebyshevBasis -from desc.integrals.bounce_utils import get_alpha, get_pitch -from desc.integrals.interp_utils import fourier_pts - - -@pytest.mark.unit -@pytest.mark.parametrize( - "alpha_0, iota, num_period, period", - [(0, np.sqrt(2), 1, 2 * np.pi), (0, np.arange(1, 3) * np.sqrt(2), 5, 2 * np.pi)], -) -def test_alpha_sequence(alpha_0, iota, num_period, period): - """Test field line poloidal label tracking.""" - iota = np.atleast_1d(iota) - alphas = get_alpha(alpha_0, iota, num_period, period) - assert alphas.shape == (iota.size, num_period) - for i in range(iota.size): - assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" - print(alphas) - - -class TestBouncePoints: - """Test that bounce points are computed correctly.""" - - @staticmethod - def _cheb_intersect(cheb, k): - cheb = cheb.copy() - cheb[0] = cheb[0] - k - roots = chebroots(cheb) - intersect = roots[ - np.logical_and(np.isreal(roots), np.abs(roots.real) <= 1) - ].real - return intersect - - @staticmethod - def _periodic_fun(nodes, M, N): - alpha, zeta = nodes.T - f = -2 * np.cos(1 / (0.1 + zeta**2)) + 2 - return f.reshape(M, N) - - @pytest.mark.unit - def test_bp1_first(self): - """Test that bounce points are computed correctly.""" - M, N = 1, 10 - domain = (-1, 1) - nodes = FourierChebyshevBasis.nodes(M, N, domain=domain) - f = self._periodic_fun(nodes, M, N) - fcb = FourierChebyshevBasis(f, domain=domain) - pcb = fcb.compute_cheb(fourier_pts(M)) - pitch = 1 / np.linspace(1, 4, 20) - bp1, bp2 = pcb.intersect1d(pitch) - pcb.check_intersect1d(bp1, bp2, pitch) - bp1, bp2 = TestBouncePoints.filter(bp1, bp2) - - def f(z): - return -2 * np.cos(1 / (0.1 + z**2)) + 2 - - r = self._cheb_intersect(chebinterpolate(f, N), 1 / pitch) - np.testing.assert_allclose(bp1, r[::2], rtol=1e-3) - np.testing.assert_allclose(bp2, r[1::2], rtol=1e-3) - - -@pytest.mark.unit -def test_fourier_chebyshev(rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): - """Test bounce points...""" - eq = get("W7-X") - clebsch = FourierChebyshevBasis.nodes(M, N, L=rho) - desc_from_clebsch = map_coordinates( - eq, - clebsch, - inbasis=("rho", "alpha", "zeta"), - period=(np.inf, 2 * np.pi, np.inf), - ) - grid = LinearGrid( - rho=rho, M=eq.M_grid, N=eq.N_grid, sym=False, NFP=eq.NFP - ) # check if NFP!=1 works - data = eq.compute( - names=Bounce2D.required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid - ) - fb = Bounce2D( - grid, data, M, N, desc_from_clebsch, check=True, warn=False - ) # TODO check true - pitch = get_pitch( - grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 - ) - result = fb.integrate(f, [], pitch) # noqa: F841 - - -@pytest.mark.unit -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) -def test_drift(): - """Test bounce-averaged drift with analytical expressions.""" - eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi_boundary = eq.Psi / (2 * np.pi) - psi = 0.25 * psi_boundary - rho = np.sqrt(psi / psi_boundary) - np.testing.assert_allclose(rho, 0.5) - - # Make a set of nodes along a single fieldline. - grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - data = eq.compute(["iota"], grid=grid_fsa) - iota = grid_fsa.compress(data["iota"]).item() - alpha = 0 - zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = get_rtz_grid( - eq, - rho, - alpha, - zeta, - coordinates="raz", - period=(np.inf, 2 * np.pi, np.inf), - iota=np.array([iota]), - ) - data = eq.compute( - Bounce2D.required_names() - + [ - "cvdrift", - "gbdrift", - "grad(psi)", - "grad(alpha)", - "shear", - "iota", - "psi", - "a", - ], - grid=grid, - ) - np.testing.assert_allclose(data["psi"], psi) - np.testing.assert_allclose(data["iota"], iota) - assert np.all(data["B^zeta"] > 0) - data["Bref"] = 2 * np.abs(psi_boundary) / data["a"] ** 2 - data["rho"] = rho - data["alpha"] = alpha - data["zeta"] = zeta - data["psi"] = grid.compress(data["psi"]) - data["iota"] = grid.compress(data["iota"]) - data["shear"] = grid.compress(data["shear"]) - - # Compute analytic approximation. - drift_analytic, cvdrift, gbdrift, pitch = TestBounce1D.drift_analytic(data) - # Compute numerical result. - grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP) - data_2 = eq.compute( - names=Bounce2D.required_names() + ["cvdrift", "gbdrift"], grid=grid - ) - M, N = eq.M_grid, 20 - bounce = Bounce2D( - grid=grid, - data=data_2, - desc_from_clebsch=Bounce2D.desc_from_clebsch(eq, rho, M, N), - M=M, - N=N, - alpha_0=data["alpha"], - num_transit=1, - Bref=data["Bref"], - Lref=data["a"], - check=True, - plot=True, - ) - - def integrand_num(cvdrift, gbdrift, B, pitch): - g = jnp.sqrt(1 - pitch * B) - return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - - def integrand_den(B, pitch): - return 1 / jnp.sqrt(1 - pitch * B) - - normalization = -np.sign(data["psi"]) * data["Bref"] * data["a"] ** 2 - drift_numerical_num = bounce.integrate( - pitch=pitch[:, np.newaxis], - integrand=integrand_num, - f=Bounce2D.reshape_data( - grid, data_2["cvdrift"] * normalization, data_2["gbdrift"] * normalization - ), - num_well=1, - ) - drift_numerical_den = bounce.integrate( - pitch=pitch[:, np.newaxis], - integrand=integrand_den, - f=[], - num_well=1, - ) - drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) - msg = "There should be one bounce integral per pitch in this example." - assert drift_numerical.size == drift_analytic.size, msg - np.testing.assert_allclose(drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2) - - fig, ax = plt.subplots() - ax.plot(1 / pitch, drift_analytic) - ax.plot(1 / pitch, drift_numerical) - plt.show() - return fig diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 0379844326..e5d600f917 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -6,7 +6,7 @@ import pytest from jax import grad from matplotlib import pyplot as plt -from numpy.polynomial.chebyshev import chebgauss, chebweight +from numpy.polynomial.chebyshev import chebgauss, chebinterpolate, chebroots, chebweight from numpy.polynomial.legendre import leggauss from scipy import integrate from scipy.interpolate import CubicHermiteSpline @@ -17,11 +17,12 @@ from desc.basis import FourierZernikeBasis from desc.compute.utils import dot from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import get_rtz_grid +from desc.equilibrium.coords import get_rtz_grid, map_coordinates from desc.examples import get from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid from desc.integrals import ( Bounce1D, + Bounce2D, DFTInterpolator, FFTInterpolator, line_integrals, @@ -34,20 +35,23 @@ surface_variance, virtual_casing_biot_savart, ) +from desc.integrals.basis import FourierChebyshevBasis from desc.integrals.bounce_utils import ( _get_extrema, bounce_points, + get_alpha, get_pitch, interp_to_argmin_B_hard, interp_to_argmin_B_soft, plot_ppoly, ) +from desc.integrals.interp_utils import fourier_pts from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, grad_automorphism_sin, grad_bijection_from_disc, - leggauss_lobatto, + leggauss_lob, tanh_sinh, ) from desc.integrals.singularities import _get_quadrature_nodes @@ -720,7 +724,7 @@ def test_biest_interpolators(self): np.testing.assert_allclose(g1, ff) -class TestBouncePoints: +class TestBounce1DPoints: """Test that bounce points are computed correctly.""" @staticmethod @@ -739,7 +743,7 @@ def test_z1_first(self): pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) z1, z2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) - z1, z2 = TestBouncePoints.filter(z1, z2) + z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[0::2]) np.testing.assert_allclose(z2, intersect[1::2]) @@ -754,7 +758,7 @@ def test_z2_first(self): pitch = 2.0 intersect = B.solve(1 / pitch, extrapolate=False) z1, z2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) - z1, z2 = TestBouncePoints.filter(z1, z2) + z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[1:-1:2]) np.testing.assert_allclose(z2, intersect[0::2][1:]) @@ -771,7 +775,7 @@ def test_z1_before_extrema(self): dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[3] + 1e-13 z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) - z1, z2 = TestBouncePoints.filter(z1, z2) + z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(z1[1], 1.982767, rtol=1e-6) @@ -794,7 +798,7 @@ def test_z2_before_extrema(self): dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) - z1, z2 = TestBouncePoints.filter(z1, z2) + z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(z1, intersect[[0, -2]]) @@ -817,7 +821,7 @@ def test_extrema_first_and_before_z1(self): pitch, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False ) plot_ppoly(B, z1=z1, z2=z2, k=1 / pitch, start=k[2]) - z1, z2 = TestBouncePoints.filter(z1, z2) + z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(1 / pitch, extrapolate=False) np.testing.assert_allclose(z1[0], 0.835319, rtol=1e-6) @@ -839,7 +843,7 @@ def test_extrema_first_and_before_z2(self): dB_dz = B.derivative() pitch = 1 / B(dB_dz.roots(extrapolate=False))[1] + 1e-13 z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) - z1, z2 = TestBouncePoints.filter(z1, z2) + z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. intersect = B.solve(1 / pitch, extrapolate=False) @@ -871,20 +875,20 @@ def test_get_extrema(self): np.testing.assert_allclose(B_ext[idx], B_ext_scipy) -class TestBounceQuadrature: - """Test bounce quadrature accuracy.""" +def _mod_cheb_gauss(deg): + x, w = chebgauss(deg) + w /= chebweight(x) + return x, w - @staticmethod - def _mod_cheb_gauss(deg): - x, w = chebgauss(deg) - w /= chebweight(x) - return x, w - @staticmethod - def _mod_chebu_gauss(deg): - x, w = roots_chebyu(deg) - w *= chebweight(x) - return x, w +def _mod_chebu_gauss(deg): + x, w = roots_chebyu(deg) + w *= chebweight(x) + return x, w + + +class TestBounce1DQuadrature: + """Test bounce quadrature accuracy.""" @pytest.mark.unit @pytest.mark.parametrize( @@ -893,7 +897,7 @@ def _mod_chebu_gauss(deg): (True, tanh_sinh(40), None), (True, leggauss(25), "default"), (False, tanh_sinh(20), None), - (False, leggauss_lobatto(10), "default"), + (False, leggauss_lob(10), "default"), # sin automorphism still helps out chebyshev quadrature (True, _mod_cheb_gauss(30), "default"), (False, _mod_chebu_gauss(10), "default"), @@ -930,7 +934,7 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): data, quad, check=True, - **kwargs + **kwargs, ) result = bounce.integrate(pitch, integrand, [], check=True) assert np.count_nonzero(result) == 1 @@ -964,14 +968,14 @@ def elliptic_incomplete(k2): # Scipy's elliptic integrals are broken. # https://github.com/scipy/scipy/issues/20525. k = np.sqrt(k2) - K = TestBounceQuadrature._adaptive_elliptic(K_integrand, k) - E = TestBounceQuadrature._adaptive_elliptic(E_integrand, k) + K = TestBounce1DQuadrature._adaptive_elliptic(K_integrand, k) + E = TestBounce1DQuadrature._adaptive_elliptic(E_integrand, k) # Make sure scipy's adaptive quadrature is not broken. np.testing.assert_allclose( - K, TestBounceQuadrature._fixed_elliptic(K_integrand, k, 10) + K, TestBounce1DQuadrature._fixed_elliptic(K_integrand, k, 10) ) np.testing.assert_allclose( - E, TestBounceQuadrature._fixed_elliptic(E_integrand, k, 10) + E, TestBounce1DQuadrature._fixed_elliptic(E_integrand, k, 10) ) I_0 = 4 / k * K @@ -985,32 +989,32 @@ def elliptic_incomplete(k2): # Check for math mistakes. np.testing.assert_allclose( I_2, - TestBounceQuadrature._adaptive_elliptic( + TestBounce1DQuadrature._adaptive_elliptic( lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k ), ) np.testing.assert_allclose( I_3, - TestBounceQuadrature._adaptive_elliptic( + TestBounce1DQuadrature._adaptive_elliptic( lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * Z * np.sin(Z), k ), ) np.testing.assert_allclose( I_4, - TestBounceQuadrature._adaptive_elliptic( + TestBounce1DQuadrature._adaptive_elliptic( lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k ), ) np.testing.assert_allclose( I_5, - TestBounceQuadrature._adaptive_elliptic( + TestBounce1DQuadrature._adaptive_elliptic( lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.sin(Z) ** 2, k ), ) # scipy fails np.testing.assert_allclose( I_6, - TestBounceQuadrature._fixed_elliptic( + TestBounce1DQuadrature._fixed_elliptic( lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k, deg=10, @@ -1018,7 +1022,7 @@ def elliptic_incomplete(k2): ) np.testing.assert_allclose( I_7, - TestBounceQuadrature._adaptive_elliptic( + TestBounce1DQuadrature._adaptive_elliptic( lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k ), ) @@ -1026,7 +1030,7 @@ def elliptic_incomplete(k2): class TestBounce1D: - """Test bounce integral methods that use one-dimensional local splines.""" + """Test bounce integration with one-dimensional local spline methods.""" @pytest.mark.unit def test_integrate_checks(self): @@ -1136,7 +1140,19 @@ def dB_dz(z): @staticmethod def drift_analytic(data): - """Compute analytic approximation for bounce-averaged binormal drift.""" + """Compute analytic approximation for bounce-averaged binormal drift. + + Returns + ------- + drift_analytic : jnp.ndarray + Analytic approximation for the true result that the numerical computation + should attempt to match. + cvdrift, gbdrift : jnp.ndarray + Numerically computed ``data["cvdrift"]` and ``data["gbdrift"]`` normalized + by some scale factors for this unit test. These should be fed to the bounce + integration as input. + + """ B = data["|B|"] / data["Bref"] B0 = np.mean(B) # epsilon should be changed to dimensionless, and computed in a way that @@ -1201,7 +1217,7 @@ def drift_analytic(data): pitch = get_pitch(np.min(B), np.max(B), 100)[1:] k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = ( - TestBounceQuadrature.elliptic_incomplete(k2) + TestBounce1DQuadrature.elliptic_incomplete(k2) ) y = np.sqrt(2 * epsilon * pitch * B0) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) @@ -1348,3 +1364,199 @@ def integrand_grad(*args, **kwargs2): # Make sure bounce points get differentiated too. result = fun2(pitch) assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-1) + + +class TestBounce2DPoints: + """Test that bounce points are computed correctly.""" + + @staticmethod + def _cheb_intersect(cheb, k): + cheb = cheb.copy() + cheb[0] = cheb[0] - k + roots = chebroots(cheb) + intersect = roots[ + np.logical_and(np.isreal(roots), np.abs(roots.real) <= 1) + ].real + return intersect + + @staticmethod + def _periodic_fun(nodes, M, N): + alpha, zeta = nodes.T + f = -2 * np.cos(1 / (0.1 + zeta**2)) + 2 + return f.reshape(M, N) + + @pytest.mark.unit + def test_bp1_first(self): + """Test that bounce points are computed correctly.""" + M, N = 1, 10 + domain = (-1, 1) + nodes = FourierChebyshevBasis.nodes(M, N, domain=domain) + f = self._periodic_fun(nodes, M, N) + fcb = FourierChebyshevBasis(f, domain=domain) + pcb = fcb.compute_cheb(fourier_pts(M)) + pitch = 1 / np.linspace(1, 4, 20) + bp1, bp2 = pcb.intersect1d(pitch) + pcb.check_intersect1d(bp1, bp2, pitch) + bp1, bp2 = TestBounce1DPoints.filter(bp1, bp2) + + def f(z): + return -2 * np.cos(1 / (0.1 + z**2)) + 2 + + r = self._cheb_intersect(chebinterpolate(f, N), 1 / pitch) + np.testing.assert_allclose(bp1, r[::2], rtol=1e-3) + np.testing.assert_allclose(bp2, r[1::2], rtol=1e-3) + + +class TestBounce2D: + """Test bounce integration with two-dimensional pseudo-spectral methods.""" + + @pytest.mark.unit + @pytest.mark.parametrize( + "alpha_0, iota, num_period, period", + [ + (0, np.sqrt(2), 1, 2 * np.pi), + (0, np.arange(1, 3) * np.sqrt(2), 5, 2 * np.pi), + ], + ) + def test_alpha_sequence(self, alpha_0, iota, num_period, period): + """Test field line poloidal label tracking.""" + iota = np.atleast_1d(iota) + alphas = get_alpha(alpha_0, iota, num_period, period) + assert alphas.shape == (iota.size, num_period) + for i in range(iota.size): + assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" + print(alphas) + + @pytest.mark.unit + def test_fourier_chebyshev(self, rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): + """Test bounce points...""" + eq = get("W7-X") + clebsch = FourierChebyshevBasis.nodes(M, N, L=rho) + desc_from_clebsch = map_coordinates( + eq, + clebsch, + inbasis=("rho", "alpha", "zeta"), + period=(np.inf, 2 * np.pi, np.inf), + ) + grid = LinearGrid( + rho=rho, M=eq.M_grid, N=eq.N_grid, sym=False, NFP=eq.NFP + ) # check if NFP!=1 works + data = eq.compute( + names=Bounce2D.required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid + ) + fb = Bounce2D( + grid, data, M, N, desc_from_clebsch, check=True, warn=False + ) # TODO check true + pitch = get_pitch( + grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 + ) + result = fb.integrate(f, [], pitch) # noqa: F841 + + @pytest.mark.unit + @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) + def test_drift(self): + """Test bounce-averaged drift with analytical expressions.""" + eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") + psi_boundary = eq.Psi / (2 * np.pi) + psi = 0.25 * psi_boundary + rho = np.sqrt(psi / psi_boundary) + np.testing.assert_allclose(rho, 0.5) + + # Make a set of nodes along a single fieldline. + grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) + data = eq.compute(["iota"], grid=grid_fsa) + iota = grid_fsa.compress(data["iota"]).item() + alpha = 0 + zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) + grid = get_rtz_grid( + eq, + rho, + alpha, + zeta, + coordinates="raz", + period=(np.inf, 2 * np.pi, np.inf), + iota=np.array([iota]), + ) + data = eq.compute( + Bounce2D.required_names() + + [ + "cvdrift", + "gbdrift", + "grad(psi)", + "grad(alpha)", + "shear", + "iota", + "psi", + "a", + ], + grid=grid, + ) + np.testing.assert_allclose(data["psi"], psi) + np.testing.assert_allclose(data["iota"], iota) + assert np.all(data["B^zeta"] > 0) + data["Bref"] = 2 * np.abs(psi_boundary) / data["a"] ** 2 + data["rho"] = rho + data["alpha"] = alpha + data["zeta"] = zeta + data["psi"] = grid.compress(data["psi"]) + data["iota"] = grid.compress(data["iota"]) + data["shear"] = grid.compress(data["shear"]) + + # Compute analytic approximation. + drift_analytic, cvdrift, gbdrift, pitch = TestBounce1D.drift_analytic(data) + # Compute numerical result. + grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP) + data_2 = eq.compute( + names=Bounce2D.required_names() + ["cvdrift", "gbdrift"], grid=grid + ) + M, N = eq.M_grid, 20 + bounce = Bounce2D( + grid=grid, + data=data_2, + desc_from_clebsch=Bounce2D.desc_from_clebsch(eq, rho, M, N), + M=M, + N=N, + alpha_0=data["alpha"], + num_transit=1, + Bref=data["Bref"], + Lref=data["a"], + check=True, + plot=True, + ) + + def integrand_num(cvdrift, gbdrift, B, pitch): + g = jnp.sqrt(1 - pitch * B) + return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) + + def integrand_den(B, pitch): + return 1 / jnp.sqrt(1 - pitch * B) + + normalization = -np.sign(data["psi"]) * data["Bref"] * data["a"] ** 2 + drift_numerical_num = bounce.integrate( + pitch=pitch[:, np.newaxis], + integrand=integrand_num, + f=Bounce2D.reshape_data( + grid, + data_2["cvdrift"] * normalization, + data_2["gbdrift"] * normalization, + ), + num_well=1, + ) + drift_numerical_den = bounce.integrate( + pitch=pitch[:, np.newaxis], + integrand=integrand_den, + f=[], + num_well=1, + ) + drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) + msg = "There should be one bounce integral per pitch in this example." + assert drift_numerical.size == drift_analytic.size, msg + np.testing.assert_allclose( + drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2 + ) + + fig, ax = plt.subplots() + ax.plot(1 / pitch, drift_analytic) + ax.plot(1 / pitch, drift_numerical) + plt.show() + return fig diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 78b25f5990..f2225c0334 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -13,7 +13,7 @@ from scipy.fft import dct as sdct from scipy.fft import idct as sidct -from desc.backend import dct, idct, jnp, rfft +from desc.backend import dct, idct, rfft from desc.integrals.interp_utils import ( cheb_from_dct, cheb_pts, @@ -29,13 +29,6 @@ from desc.integrals.quad_utils import bijection_to_disc -def filter_not_nan(a): - """Filter out nan from ``a`` while asserting nan is padded at right.""" - is_nan = jnp.isnan(a) - assert jnp.array_equal(is_nan, jnp.sort(is_nan, axis=-1)) - return a[~is_nan] - - @pytest.mark.unit def test_poly_root(): """Test vectorized computation of cubic polynomial exact roots.""" @@ -70,15 +63,12 @@ def test_poly_root(): root = poly_root(c.T, sort=True, distinct=True) for j in range(c.shape[0]): unique_roots = np.unique(np.roots(c[j])) - root_filter = filter_not_nan(root[j]) - assert root_filter.size == unique_roots.size, j np.testing.assert_allclose( - actual=root_filter, - desired=unique_roots, - err_msg=str(j), + actual=root[j][~np.isnan(root[j])], desired=unique_roots, err_msg=str(j) ) c = np.array([0, 1, -1, -8, 12]) - root = filter_not_nan(poly_root(c, sort=True, distinct=True)) + root = poly_root(c, sort=True, distinct=True) + root = root[~np.isnan(root)] unique_root = np.unique(np.roots(c)) assert root.size == unique_root.size np.testing.assert_allclose(root, unique_root) @@ -102,11 +92,11 @@ def test_polyval_vec(): def test(x, c): val = polyval_vec(x=x, c=c) + c = np.moveaxis(c, 0, -1) + x = x[..., np.newaxis] np.testing.assert_allclose( val, - np.vectorize(np.polyval, signature="(m),(n)->(n)")( - np.moveaxis(c, 0, -1), x[..., np.newaxis] - ).squeeze(axis=-1), + np.vectorize(np.polyval, signature="(m),(n)->(n)")(c, x).squeeze(axis=-1), ) quartic = 5 @@ -125,6 +115,47 @@ def test(x, c): test(x, c) +def _f_1d(x): + """Test function for 1D FFT.""" + return np.cos(7 * x) + np.sin(x) - 33.2 + + +def _f_1d_nyquist_freq(): + return 7 + + +def _f_2d(x, y): + """Test function for 2D FFT.""" + x_freq, y_freq = 3, 5 + return ( + # something that's not separable + np.cos(x_freq * x) * np.sin(2 * x + y) + + np.sin(y_freq * y) * np.cos(x + 3 * y) + # DC terms + - 33.2 + + np.cos(x) + + np.cos(y) + ) + + +def _f_2d_nyquist_freq(): + x_freq_nyquist = 3 + 2 + y_freq_nyquist = 5 + 3 + return x_freq_nyquist, y_freq_nyquist + + +def _identity(x): + return x + + +def _f_non_periodic(z): + return np.sin(np.sqrt(2) * z) * np.cos(1 / (2 + z)) * np.cos(z**2) * z + + +def _f_algebraic(z): + return z**3 - 10 * z**6 - z - np.e + z**4 + + class TestFastInterp: """Test fast interpolation.""" @@ -145,23 +176,6 @@ def test_rfftfreq(self, M): """Make sure numpy uses Nyquist interpolant frequencies.""" np.testing.assert_allclose(np.fft.rfftfreq(M, d=1 / M), np.arange(M // 2 + 1)) - @staticmethod - def _interp_rfft_harmonic(xq, f): - M = f.shape[-1] - fq = jnp.linalg.vecdot( - harmonic_vander(xq, M), harmonic(rfft(f, norm="forward"), M) - ) - return fq - - @staticmethod - def _f_1d(x): - """Test function for 1D FFT.""" - return np.cos(7 * x) + np.sin(x) - 33.2 - - @staticmethod - def _f_1d_nyquist_freq(): - return 7 - @pytest.mark.unit @pytest.mark.parametrize( "func, n", @@ -176,29 +190,15 @@ def test_interp_rfft(self, func, n): x = np.linspace(0, 2 * np.pi, n, endpoint=False) assert not np.any(np.isclose(xq[..., np.newaxis], x)) f, fq = func(x), func(xq) - np.testing.assert_allclose(self._interp_rfft_harmonic(xq, f), fq) np.testing.assert_allclose(interp_rfft(xq, f), fq) - - @staticmethod - def _f_2d(x, y): - """Test function for 2D FFT.""" - x_freq, y_freq = 3, 5 - return ( - # something that's not separable - np.cos(x_freq * x) * np.sin(2 * x + y) - + np.sin(y_freq * y) * np.cos(x + 3 * y) - # DC terms - - 33.2 - + np.cos(x) - + np.cos(y) + M = f.shape[-1] + np.testing.assert_allclose( + np.sum( + harmonic_vander(xq, M) * harmonic(rfft(f, norm="forward"), M), axis=-1 + ), + fq, ) - @staticmethod - def _f_2d_nyquist_freq(): - x_freq_nyquist = 3 + 2 - y_freq_nyquist = 5 + 3 - return x_freq_nyquist, y_freq_nyquist - @pytest.mark.xfail( reason="Numpy, jax, and scipy need to fix bug with 2D FFT (fft2)." ) @@ -228,25 +228,12 @@ def test_interp_rfft2(self, func, m, n): truth, ) - @staticmethod - def _identity(x): - # Identity map known for bad Gibbs; - # only if distribution of spectral coefficients is correct will DCT - # recover Chebyshev interpolation, avoiding Gibbs and Runge. - return x - - @staticmethod - def _f_non_periodic(z): - return np.sin(np.sqrt(2) * z) * np.cos(1 / (2 + z)) * np.cos(z**2) * z - - @staticmethod - def _f_algebraic(z): - return z**3 - 10 * z**6 - z - np.e + z**4 - @pytest.mark.unit @pytest.mark.parametrize( "f, M, lobatto", [ + # Identity map known for bad Gibbs; if discrete Chebyshev transform + # implemented correctly then won't see Gibbs. (_identity, 2, False), (_identity, 3, False), (_identity, 3, True), @@ -316,7 +303,7 @@ def test_interp_dct(self, f, M): z = cheb_pts(M) fz = f(z) np.testing.assert_allclose(c0, cheb_from_dct(dct(fz, 2) / M), atol=1e-13) - if np.allclose(self._f_algebraic(z), fz): + if np.allclose(_f_algebraic(z), fz): np.testing.assert_allclose( cheb2poly(c0), np.array([-np.e, -1, 0, 1, 1, 0, -10]), atol=1e-13 ) diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index 662e9fcef7..a23b81c8d8 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -12,6 +12,7 @@ grad_automorphism_arcsin, grad_automorphism_sin, grad_bijection_from_disc, + leggauss_lob, tanh_sinh, ) from desc.utils import only1 @@ -66,4 +67,27 @@ def test_automorphism(): @pytest.mark.unit def test_leggauss_lobatto(): - """Test that quadrature points and weights are correct.""" + """Test quadrature points and weights against known values.""" + with pytest.raises(ValueError): + x, w = leggauss_lob(1) + x, w = leggauss_lob(0, True) + assert x.size == w.size == 0 + + x, w = leggauss_lob(2) + np.testing.assert_allclose(x, [-1, 1]) + np.testing.assert_allclose(w, [1, 1]) + + x, w = leggauss_lob(3) + np.testing.assert_allclose(x, [-1, 0, 1]) + np.testing.assert_allclose(w, [1 / 3, 4 / 3, 1 / 3]) + np.testing.assert_allclose(leggauss_lob(x.size - 2, True), (x[1:-1], w[1:-1])) + + x, w = leggauss_lob(4) + np.testing.assert_allclose(x, [-1, -np.sqrt(1 / 5), np.sqrt(1 / 5), 1]) + np.testing.assert_allclose(w, [1 / 6, 5 / 6, 5 / 6, 1 / 6]) + np.testing.assert_allclose(leggauss_lob(x.size - 2, True), (x[1:-1], w[1:-1])) + + x, w = leggauss_lob(5) + np.testing.assert_allclose(x, [-1, -np.sqrt(3 / 7), 0, np.sqrt(3 / 7), 1]) + np.testing.assert_allclose(w, [1 / 10, 49 / 90, 32 / 45, 49 / 90, 1 / 10]) + np.testing.assert_allclose(leggauss_lob(x.size - 2, True), (x[1:-1], w[1:-1])) From b6ade4c6560a11355d227b8c21a4565594579f64 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 27 Aug 2024 02:08:37 -0400 Subject: [PATCH 220/241] Preparing merge into bounce branch --- desc/basis.py | 4 - desc/integrals/basis.py | 32 ++-- desc/integrals/bounce_integral.py | 28 ++-- desc/integrals/bounce_utils.py | 254 ++++++++++++++++++------------ desc/integrals/interp_utils.py | 12 +- tests/test_integrals.py | 49 +++--- tests/test_interp_utils.py | 190 +++++++++++----------- 7 files changed, 313 insertions(+), 256 deletions(-) diff --git a/desc/basis.py b/desc/basis.py index 19c8d99ddb..b01ec871f9 100644 --- a/desc/basis.py +++ b/desc/basis.py @@ -1131,10 +1131,6 @@ def evaluate( m = m[midx] n = n[nidx] - # TODO: in map_clebsch_root findign - # lambda should be fixed to rho and zeta - # so lambda is slimmed to 1d fourier series for fixed rho zeta. - # cache radial and toroidal for rootfinding radial = zernike_radial(r[:, np.newaxis], lm[:, 0], lm[:, 1], dr=derivatives[0]) poloidal = fourier(t[:, np.newaxis], m, dt=derivatives[1]) toroidal = fourier(z[:, np.newaxis], n, NFP=self.NFP, dt=derivatives[2]) diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index 0baa6ae80c..6a40b0bc8f 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -7,10 +7,10 @@ from desc.backend import dct, flatnonzero, idct, irfft, jnp, put, rfft from desc.integrals.interp_utils import ( + _filter_distinct, cheb_from_dct, cheb_pts, chebroots_vec, - filter_distinct, fourier_pts, harmonic, idct_non_uniform, @@ -50,8 +50,8 @@ def _subtract(c, k): def _in_epigraph_and(is_intersect, df_dy_sign): """Set and epigraph of function f with the given set of points. - Return only intersects where the straight line path between adjacent - intersects resides in the epigraph of a continuous map ``f``. + Used to return only intersects where the straight line path between + adjacent intersects resides in the epigraph of a continuous map ``f``. Warnings -------- @@ -430,7 +430,7 @@ def intersect2d(self, k=0.0, eps=_eps): # Intersects must satisfy y ∈ [-1, 1]. # Pick sentinel such that only distinct roots are considered intersects. - y = filter_distinct(y, sentinel=-2.0, eps=eps) + y = _filter_distinct(y, sentinel=-2.0, eps=eps) is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) # Ensure y is in domain of arcos; choose 1 because kernel probably cheaper. y = jnp.where(is_intersect, y.real, 1.0) @@ -471,17 +471,16 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): ------- z1, z2 : (jnp.ndarray, jnp.ndarray) Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). - ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. The points are grouped and ordered such that the - straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of f. + ``z1`` and ``z2`` are intersects satisfying ∂f/∂y <= 0 and ∂f/∂y >= 0, + respectively. The points are grouped and ordered such that the straight + line path between ``z1`` and ``z2`` resides in the epigraph of f. """ errorif( self.N < 2, NotImplementedError, - "This method requires the Chebyshev spectral resolution of at " - f"least 2, but got N={self.N}.", + "This method requires a Chebyshev spectral resolution of N > 1, " + f"but got N = {self.N}.", ) # Add axis to use same k over all Chebyshev series of the piecewise object. @@ -498,9 +497,9 @@ def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): # polynomials is a left intersection i.e. ``is_z1`` because the subset of # pitch values that generate this edge case has zero measure. By ignoring # this, for those subset of pitch values the integrations will be done in - # the hypograph of |B| rather than the epigraph, which will be integrated - # to zero. If we decide later to not ignore this, the technique to solve - # this is to disqualify intersects within ``_eps`` from ``domain[-1]``. + # the hypograph of |B|, which will yield zero. If in far future decide to + # not ignore this, note the solution is to disqualify intersects within + # ``_eps`` from ``domain[-1]``. is_z1 = (df_dy_sign <= 0) & is_intersect is_z2 = (df_dy_sign >= 0) & _in_epigraph_and(is_intersect, df_dy_sign) @@ -532,10 +531,9 @@ def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): ---------- z1, z2 : jnp.ndarray Shape must broadcast with (*self.cheb.shape[:-2], W). - ``z1``, ``z2`` holds intersects satisfying ∂f/∂y <= 0, ∂f/∂y >= 0, - respectively. The points are grouped and ordered such that the - straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of f. + ``z1`` and ``z2`` are intersects satisfying ∂f/∂y <= 0 and ∂f/∂y >= 0, + respectively. The points are grouped and ordered such that the straight + line path between ``z1`` and ``z2`` resides in the epigraph of f. k : jnp.ndarray Shape must broadcast with *self.cheb.shape[:-2]. k such that fₓ(yᵢ) = k. diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 8f7df50243..7d80dc8e31 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -10,7 +10,7 @@ bounce_points, bounce_quadrature, get_alpha, - interp_to_argmin_B_soft, + interp_to_argmin_g, plot_ppoly, ) from desc.integrals.interp_utils import interp_rfft2, irfft2_non_uniform, polyder_vec @@ -444,8 +444,8 @@ def bounce_points(self, pitch, num_well=None): z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L, num_well). ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of |B|. + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of |B|. """ return self._B.intersect1d(1 / jnp.atleast_2d(pitch), num_well) @@ -458,8 +458,8 @@ def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L, num_well). ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of |B|. + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of |B|. pitch : jnp.ndarray Shape (P, L). λ values to evaluate the bounce integral at each field line. λ(ρ) is @@ -777,8 +777,8 @@ def bounce_points(self, pitch, num_well=None): z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of |B|. + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of |B|. If there were less than ``num_wells`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field @@ -801,8 +801,8 @@ def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of |B|. + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of |B|. pitch : jnp.ndarray Shape must broadcast with (P, L * M). λ values to evaluate the bounce integral at each field line. λ(ρ,α) is @@ -877,7 +877,7 @@ def integrate( wells detected along a field line than the size of the last axis of the returned arrays, then that axis is padded with zero. method : str - Method of interpolation for functions contained in ``f``. + Method of interpolation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Default is cubic C1 local spline. batch : bool @@ -910,13 +910,13 @@ def integrate( check=check, ) if weight is not None: - result *= interp_to_argmin_B_soft( - g=weight, + result *= interp_to_argmin_g( + h=weight, z1=z1, z2=z2, knots=self._zeta, - B=self.B, - dB_dz=self._dB_dz, + g=self.B, + dg_dz=self._dB_dz, method=method, ) assert result.shape[-1] == setdefault(num_well, (self._zeta.size - 1) * 3) diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index d500f1b338..f99df28e2b 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -77,7 +77,7 @@ def get_alpha(alpha_0, iota, num_transit, period): return alpha -def _check_spline_shape(knots, B, dB_dz, pitch=None): +def _check_spline_shape(knots, g, dg_dz, pitch=None): """Ensure inputs have compatible shape, and return them with full dimension. Parameters @@ -85,18 +85,18 @@ def _check_spline_shape(knots, B, dB_dz, pitch=None): knots : jnp.ndarray Shape (knots.size, ). ζ coordinates of spline knots. Must be strictly increasing. - B : jnp.ndarray - Shape (B.shape[0], S, knots.size - 1). - Polynomial coefficients of the spline of |B| in local power basis. + g : jnp.ndarray + Shape (g.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of g in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - dB_dz : jnp.ndarray - Shape (B.shape[0] - 1, *B.shape[1:]). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. + dg_dz : jnp.ndarray + Shape (g.shape[0] - 1, *g.shape[1:]). + Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. pitch : jnp.ndarray Shape must broadcast with (P, S). λ values to evaluate the bounce integral at each field line. λ(ρ,α) is @@ -107,29 +107,29 @@ def _check_spline_shape(knots, B, dB_dz, pitch=None): """ errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") errorif( - B.shape[-1] != (knots.size - 1), + g.shape[-1] != (knots.size - 1), msg=( "Last axis does not enumerate polynomials of spline. " - f"B.shape={B.shape}. knots.shape={knots.shape}." + f"Spline shape {g.shape}. Knots shape {knots.shape}." ), ) errorif( - B.ndim > 3 - or dB_dz.ndim > 3 - or (B.shape[0] - 1) != dB_dz.shape[0] - or B.shape[1:] != dB_dz.shape[1:], - msg=f"Invalid shape for spline. B.shape={B.shape}. dB_dz.shape={dB_dz.shape}.", + g.ndim > 3 + or dg_dz.ndim > 3 + or (g.shape[0] - 1) != dg_dz.shape[0] + or g.shape[1:] != dg_dz.shape[1:], + msg=f"Invalid shape {g.shape} for spline and derivative {dg_dz.shape}.", ) # Add axis which enumerates field lines if necessary. - B, dB_dz = atleast_3d_mid(B, dB_dz) + g, dg_dz = atleast_3d_mid(g, dg_dz) if pitch is not None: pitch = jnp.atleast_2d(pitch) errorif( pitch.ndim != 2 - or not (pitch.shape[-1] == 1 or pitch.shape[-1] == B.shape[1]), + or not (pitch.shape[-1] == 1 or pitch.shape[-1] == g.shape[1]), msg=f"Invalid shape {pitch.shape} for pitch angles.", ) - return B, dB_dz, pitch + return g, dg_dz, pitch def bounce_points( @@ -149,17 +149,17 @@ def bounce_points( Shape (knots.size, ). ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray - Shape (B.shape[0], S, knots.size - 1). + Shape (g.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. dB_dz : jnp.ndarray - Shape (B.shape[0] - 1, *B.shape[1:]). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + Shape (g.shape[0] - 1, *g.shape[1:]). + Polynomial coefficients of the spline of (∂|B|/∂ζ)|(ρ,α) in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -181,8 +181,8 @@ def bounce_points( z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, S, num_well). ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of |B|. + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of |B|. If there were less than ``num_wells`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field @@ -319,8 +319,8 @@ def bounce_quadrature( z1, z2 : jnp.ndarray Shape (P, S, num_well). ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between the intersects in ``z1`` and ``z2`` - resides in the epigraph of |B|. + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of |B|. pitch : jnp.ndarray Shape must broadcast with (P, S). λ values to evaluate the bounce integral at each field line. λ(ρ,α) is @@ -344,7 +344,7 @@ def bounce_quadrature( Shape (knots.size, ). Unique ζ coordinates where the arrays in ``data`` and ``f`` were evaluated. method : str - Method of interpolation for functions contained in ``f``. + Method of interpolation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Default is cubic C1 local spline. batch : bool @@ -472,10 +472,8 @@ def _interpolate_and_integrate( - data["B^zeta"] * data["|B|_z|r,a"] / data["|B|"] ** 2, ).reshape(shape) B = interp1d_Hermite_vec(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) - # Spline the integrand so that we can evaluate it at quadrature points without - # expensive coordinate mappings and root finding. Spline each function separately so - # that the singularity near the bounce points can be captured more accurately than - # can be by any polynomial. + # Spline each function separately so that operations in the integrand that do not + # preserve smoothness can be captured by the quadrature. f = [interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) @@ -558,115 +556,175 @@ def _plot_check_interp(Q, V, name=""): plt.show() -def _get_extrema(knots, B, dB_dz, sentinel=jnp.nan): - """Return ext (ζ*, |B|(ζ*)). +def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): + """Return ext (ζ*, g(ζ*)). Parameters ---------- knots : jnp.ndarray Shape (knots.size, ). ζ coordinates of spline knots. Must be strictly increasing. - B : jnp.ndarray - Shape (B.shape[0], S, knots.size - 1). - Polynomial coefficients of the spline of |B| in local power basis. + g : jnp.ndarray + Shape (g.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of g in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. - dB_dz : jnp.ndarray - Shape (B.shape[0] - 1, *B.shape[1:]). - Polynomial coefficients of the spline of (∂|B|/∂ζ)|ρ,α in local power basis. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. + dg_dz : jnp.ndarray + Shape (g.shape[0] - 1, *g.shape[1:]). + Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. sentinel : float Value with which to pad array to return fixed shape. Returns ------- - ext, B_ext : jnp.ndarray + ext, g_ext : jnp.ndarray Shape (S, (knots.size - 1) * (degree - 1)). - First array enumerates ζ*. Second array enumerates |B|(ζ*) - Sorted order of ζ* is not promised. + First array enumerates ζ*. Second array enumerates g(ζ*) + Sorting order of extrema is arbitrary. """ - B, dB_dz, _ = _check_spline_shape(knots, B, dB_dz) - S, degree = B.shape[1], B.shape[0] - 1 + g, dg_dz, _ = _check_spline_shape(knots, g, dg_dz) + S, degree = g.shape[1], g.shape[0] - 1 ext = poly_root( - c=dB_dz, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel + c=dg_dz, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel ) assert ext.shape == (S, knots.size - 1, degree - 1) - B_ext = polyval_vec(x=ext, c=B[..., jnp.newaxis]).reshape(S, -1) + g_ext = polyval_vec(x=ext, c=g[..., jnp.newaxis]).reshape(S, -1) # Transform out of local power basis expansion. ext = (ext + knots[:-1, jnp.newaxis]).reshape(S, -1) - return ext, B_ext + return ext, g_ext + + +def _where_argmin_g(z1, z2, ext, g_ext, upper_sentinel): + assert z1.shape[1] == z2.shape[1] == ext.shape[0] == g_ext.shape[0] + return jnp.where( + (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) + & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), + g_ext[:, jnp.newaxis], + upper_sentinel, # don't make too large or softmax loses resolution + ) -def interp_to_argmin_B_soft(g, z1, z2, knots, B, dB_dz, method="cubic", beta=-50): - """Interpolate ``g`` to the deepest point in the magnetic well. +def interp_to_argmin_g( + h, z1, z2, knots, g, dg_dz, method="cubic", beta=-100, upper_sentinel=1e2 +): + """Interpolate ``h`` to the deepest point of ``g`` between ``z1`` and ``z2``. - Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E |B|(ζ). Returns mean_A g(ζ). + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E g(ζ). Returns mean_A h(ζ). Parameters ---------- - g : jnp.ndarray + h : jnp.ndarray Shape must broadcast with (S, knots.size). Values evaluated on ``knots`` to interpolate. + z1, z2 : jnp.ndarray + Shape (P, S, num_well). + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of g. + knots : jnp.ndarray + Shape (knots.size, ). + ζ coordinates of spline knots. Must be strictly increasing. + g : jnp.ndarray + Shape (g.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of g in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. + dg_dz : jnp.ndarray + Shape (g.shape[0] - 1, *g.shape[1:]). + Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. + method : str + Method of interpolation. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is cubic C1 local spline. beta : float More negative gives exponentially better approximation at the expense of noisier gradients - noisier in the physics sense (unrelated to the automatic differentiation). + upper_sentinel : float + Something a good bit larger than ``g``. For example if max(g) is + 10, then 50 will more than suffice. + + Warnings + -------- + Recall that if ``g`` is small then the effect of β is reduced. + + Returns + ------- + h : jnp.ndarray + Shape (P, S, num_well) """ - ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) - assert ext.shape[0] == B.shape[0] == z1.shape[1] == z2.shape[1] - # TODO: Check if softmax has where argument that works like this. - # (numpy ufunc where typically does not.) - argmin = softmax( - beta - * jnp.where( - (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), - jnp.expand_dims(B / jnp.mean(B, axis=-1, keepdims=True), axis=1), - 1e2, # >> max(|B|) / mean(|B|) - ), - axis=-1, - ) - g = jnp.linalg.vecdot( + ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) + argmin = softmax(beta * _where_argmin_g(z1, z2, ext, g, upper_sentinel), axis=-1) + h = jnp.linalg.vecdot( argmin, - interp1d_vec(ext, knots, jnp.atleast_2d(g), method=method)[:, jnp.newaxis], + interp1d_vec(ext, knots, jnp.atleast_2d(h), method=method)[:, jnp.newaxis], ) - assert g.shape == z1.shape == z2.shape - return g + assert h.shape == z1.shape + return h + +def interp_to_argmin_g_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): + """Interpolate ``h`` to the deepest point of ``g`` between ``z1`` and ``z2``. -# Less efficient than soft if P >> 1. -def interp_to_argmin_B_hard(g, z1, z2, knots, B, dB_dz, method="cubic"): - """Interpolate ``g`` to the deepest point in the magnetic well. + Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E g(ζ). Returns h(A). - Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E |B|(ζ). Returns g(A). + See Also + -------- + interp_to_argmin_g + Accomplishes the same task, but handles the case of non-unique global minima + more correctly. It is also more efficient if P >> 1. Parameters ---------- - g : jnp.ndarray + h : jnp.ndarray Shape must broadcast with (S, knots.size). Values evaluated on ``knots`` to interpolate. + z1, z2 : jnp.ndarray + Shape (P, S, num_well). + ζ coordinates of bounce points. The points are grouped and ordered such + that the straight line path between ``z1`` and ``z2`` resides in the + epigraph of g. + knots : jnp.ndarray + Shape (knots.size, ). + ζ coordinates of spline knots. Must be strictly increasing. + g : jnp.ndarray + Shape (g.shape[0], S, knots.size - 1). + Polynomial coefficients of the spline of g in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. + dg_dz : jnp.ndarray + Shape (g.shape[0] - 1, *g.shape[1:]). + Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. + First axis enumerates the coefficients of power series. Second axis + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. + method : str + Method of interpolation. + See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. + Default is cubic C1 local spline. """ - ext, B = _get_extrema(knots, B, dB_dz, sentinel=0) - assert ext.shape[0] == B.shape[0] == z1.shape[1] == z2.shape[1] - argmin = jnp.argmin( - jnp.where( - (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), - B[:, jnp.newaxis], - 1e2 + jnp.max(B), - ), - axis=-1, - ) + ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) + # We can use the non-differentiable max because we actually want the gradients + # to accumulate through only the minimum anyway since we are differentiating how + # our physics objective changes wrt physics stuff not wrt which of the extrema + # are interpolated to. + argmin = jnp.argmin(_where_argmin_g(z1, z2, ext, g, jnp.max(g) + 10), axis=-1) A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) - g = interp1d_vec(A, knots, jnp.atleast_2d(g), method=method) - assert g.shape == z1.shape == z2.shape - return g + h = interp1d_vec(A, knots, jnp.atleast_2d(h), method=method) + assert h.shape == z1.shape + return h def plot_ppoly( diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index dd4b5c6466..c0fd0818c7 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -32,10 +32,8 @@ def cheb_pts(N, lobatto=False, domain=(-1, 1)): This is a common definition of the Chebyshev points (see Boyd, Chebyshev and Fourier Spectral Methods p. 498). These are the points demanded by discrete cosine transformations to interpolate Chebyshev series because the cosine - basis for the DCT is defined on [0, π]. - - They differ in ordering from the points returned by - ``numpy.polynomial.chebyshev.chebpts1`` and + basis for the DCT is defined on [0, π]. They differ in ordering from the + points returned by ``numpy.polynomial.chebyshev.chebpts1`` and ``numpy.polynomial.chebyshev.chebpts2``. Parameters @@ -291,7 +289,7 @@ def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): + (n * xq[..., idx[1], jnp.newaxis])[..., jnp.newaxis, :] ) ) - fq = 2.0 * jnp.sum(basis * a, axis=(-2, -1)).real + fq = 2.0 * jnp.real(basis * a).sum(axis=(-2, -1)) return fq @@ -540,7 +538,7 @@ def poly_root( if sort or distinct: r = jnp.sort(r, axis=-1) - return filter_distinct(r, sentinel, eps) if distinct else r + return _filter_distinct(r, sentinel, eps) if distinct else r def _root_cubic(a, b, c, d, sentinel, eps, distinct): @@ -623,7 +621,7 @@ def _concat_sentinel(r, sentinel, num=1): return jnp.append(r, sent, axis=-1) -def filter_distinct(r, sentinel, eps): +def _filter_distinct(r, sentinel, eps): """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" # eps needs to be low enough that close distinct roots do not get removed. # Otherwise, algorithms relying on continuity will fail. diff --git a/tests/test_integrals.py b/tests/test_integrals.py index e5d600f917..bb83ab7d13 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -41,8 +41,8 @@ bounce_points, get_alpha, get_pitch, - interp_to_argmin_B_hard, - interp_to_argmin_B_soft, + interp_to_argmin_g, + interp_to_argmin_g_hard, plot_ppoly, ) from desc.integrals.interp_utils import fourier_pts @@ -1063,7 +1063,7 @@ def denominator(B, pitch): data = eq.compute( Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) - bounce = Bounce1D(grid.source_grid, data, check=True) + bounce = Bounce1D(grid.source_grid, data, check=True, quad=leggauss(3)) pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) @@ -1094,18 +1094,18 @@ def denominator(B, pitch): print(pitch[:, i, j]) @pytest.mark.unit - @pytest.mark.parametrize("func", [interp_to_argmin_B_soft, interp_to_argmin_B_hard]) - def test_interp_to_argmin_B(self, func): + @pytest.mark.parametrize("func", [interp_to_argmin_g, interp_to_argmin_g_hard]) + def test_interp_to_argmin_g(self, func): """Test argmin interpolation.""" # noqa: D202 # Test functions chosen with purpose; don't change unless plotted and compared. - def f(z): + def h(z): return np.cos(3 * z) * np.sin(2 * np.cos(z)) + np.cos(1.2 * z) - def B(z): + def g(z): return np.sin(3 * z) * np.cos(1 / (1 + z)) * np.cos(z**2) * z - def dB_dz(z): + def dg_dz(z): return ( 3 * z * np.cos(3 * z) * np.cos(z**2) * np.cos(1 / (1 + z)) - 2 * z**2 * np.sin(3 * z) * np.sin(z**2) * np.cos(1 / (1 + z)) @@ -1119,21 +1119,21 @@ def dB_dz(z): { "B^zeta": np.ones_like(zeta), "B^zeta_z|r,a": np.ones_like(zeta), - "|B|": B(zeta), - "|B|_z|r,a": dB_dz(zeta), + "|B|": g(zeta), + "|B|_z|r,a": dg_dz(zeta), }, ) np.testing.assert_allclose(bounce._zeta, zeta) argmin = 5.61719 np.testing.assert_allclose( - f(argmin), + h(argmin), func( - f(zeta), + h(zeta), z1=np.array(0, ndmin=3), z2=np.array(2 * np.pi, ndmin=3), knots=zeta, - B=bounce.B, - dB_dz=bounce._dB_dz, + g=bounce.B, + dg_dz=bounce._dB_dz, ), rtol=1e-3, ) @@ -1386,25 +1386,26 @@ def _periodic_fun(nodes, M, N): return f.reshape(M, N) @pytest.mark.unit - def test_bp1_first(self): + def test_z1_first(self): """Test that bounce points are computed correctly.""" M, N = 1, 10 domain = (-1, 1) nodes = FourierChebyshevBasis.nodes(M, N, domain=domain) - f = self._periodic_fun(nodes, M, N) - fcb = FourierChebyshevBasis(f, domain=domain) - pcb = fcb.compute_cheb(fourier_pts(M)) - pitch = 1 / np.linspace(1, 4, 20) - bp1, bp2 = pcb.intersect1d(pitch) - pcb.check_intersect1d(bp1, bp2, pitch) - bp1, bp2 = TestBounce1DPoints.filter(bp1, bp2) + f = -self._periodic_fun(nodes, M, N) + cheb = FourierChebyshevBasis(f, domain=domain).compute_cheb(fourier_pts(M)) + pitch = 1 / np.linspace(1, 4, 1) + z1, z2 = cheb.intersect1d(1 / pitch, num_intersect=1) + print(z1) + print(z2) + cheb.check_intersect1d(z1, z2, 1 / pitch) + z1, z2 = TestBounce1DPoints.filter(z1, z2) def f(z): return -2 * np.cos(1 / (0.1 + z**2)) + 2 r = self._cheb_intersect(chebinterpolate(f, N), 1 / pitch) - np.testing.assert_allclose(bp1, r[::2], rtol=1e-3) - np.testing.assert_allclose(bp2, r[1::2], rtol=1e-3) + np.testing.assert_allclose(z1, r[::2], rtol=1e-3) + np.testing.assert_allclose(z2, r[1::2], rtol=1e-3) class TestBounce2D: diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index f2225c0334..250ca42a8e 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -29,90 +29,93 @@ from desc.integrals.quad_utils import bijection_to_disc -@pytest.mark.unit -def test_poly_root(): - """Test vectorized computation of cubic polynomial exact roots.""" - cubic = 4 - c = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) - constant = np.stack([constant, constant]) - root = poly_root(c, constant, sort=True) - - for i in range(constant.shape[0]): - for j in range(c.shape[1]): - for k in range(c.shape[2]): - d = c[-1, j, k] - constant[i, j, k] - np.testing.assert_allclose( - actual=root[i, j, k], - desired=np.sort(np.roots([*c[:-1, j, k], d])), - ) - - c = np.array( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1], - [1, -1, -8, 12], - [1, -6, 11, -6], - [0, -6, 11, -2], - ] - ) - root = poly_root(c.T, sort=True, distinct=True) - for j in range(c.shape[0]): - unique_roots = np.unique(np.roots(c[j])) - np.testing.assert_allclose( - actual=root[j][~np.isnan(root[j])], desired=unique_roots, err_msg=str(j) - ) - c = np.array([0, 1, -1, -8, 12]) - root = poly_root(c, sort=True, distinct=True) - root = root[~np.isnan(root)] - unique_root = np.unique(np.roots(c)) - assert root.size == unique_root.size - np.testing.assert_allclose(root, unique_root) - - -@pytest.mark.unit -def test_polyder_vec(): - """Test vectorized computation of polynomial derivative.""" - quintic = 6 - c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - derivative = polyder_vec(c) - desired = np.vectorize(np.polyder, signature="(m)->(n)")(c.T).T - np.testing.assert_allclose(derivative, desired) - - -@pytest.mark.unit -def test_polyval_vec(): - """Test vectorized computation of polynomial evaluation.""" - - def test(x, c): - val = polyval_vec(x=x, c=c) - c = np.moveaxis(c, 0, -1) - x = x[..., np.newaxis] - np.testing.assert_allclose( - val, - np.vectorize(np.polyval, signature="(m),(n)->(n)")(c, x).squeeze(axis=-1), +class TestPolyUtils: + """Test polynomial stuff used for local spline interpolation.""" + + @pytest.mark.unit + def test_poly_root(self): + """Test vectorized computation of cubic polynomial exact roots.""" + cubic = 4 + c = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) + constant = np.stack([constant, constant]) + root = poly_root(c, constant, sort=True) + + for i in range(constant.shape[0]): + for j in range(c.shape[1]): + for k in range(c.shape[2]): + d = c[-1, j, k] - constant[i, j, k] + np.testing.assert_allclose( + actual=root[i, j, k], + desired=np.sort(np.roots([*c[:-1, j, k], d])), + ) + + c = np.array( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [1, -1, -8, 12], + [1, -6, 11, -6], + [0, -6, 11, -2], + ] ) + root = poly_root(c.T, sort=True, distinct=True) + for j in range(c.shape[0]): + unique_roots = np.unique(np.roots(c[j])) + np.testing.assert_allclose( + actual=root[j][~np.isnan(root[j])], desired=unique_roots, err_msg=str(j) + ) + c = np.array([0, 1, -1, -8, 12]) + root = poly_root(c, sort=True, distinct=True) + root = root[~np.isnan(root)] + unique_root = np.unique(np.roots(c)) + assert root.size == unique_root.size + np.testing.assert_allclose(root, unique_root) - quartic = 5 - c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) - test(x, c) + @pytest.mark.unit + def test_polyder_vec(self): + """Test vectorized computation of polynomial derivative.""" + quintic = 6 + c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + derivative = polyder_vec(c) + desired = np.vectorize(np.polyder, signature="(m)->(n)")(c.T).T + np.testing.assert_allclose(derivative, desired) + + @pytest.mark.unit + def test_polyval_vec(self): + """Test vectorized computation of polynomial evaluation.""" + + def test(x, c): + val = polyval_vec(x=x, c=c) + c = np.moveaxis(c, 0, -1) + x = x[..., np.newaxis] + np.testing.assert_allclose( + val, + np.vectorize(np.polyval, signature="(m),(n)->(n)")(c, x).squeeze( + axis=-1 + ), + ) - x = np.stack([x, x * 2], axis=0) - x = np.stack([x, x * 2, x * 3, x * 4], axis=0) - # make sure broadcasting won't hide error in implementation - assert np.unique(x.shape).size == x.ndim - assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] - assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 - test(x, c) + quartic = 5 + c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi + # make sure broadcasting won't hide error in implementation + assert np.unique(c.shape).size == c.ndim + x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) + test(x, c) + + x = np.stack([x, x * 2], axis=0) + x = np.stack([x, x * 2, x * 3, x * 4], axis=0) + # make sure broadcasting won't hide error in implementation + assert np.unique(x.shape).size == x.ndim + assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] + assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 + test(x, c) def _f_1d(x): @@ -199,9 +202,7 @@ def test_interp_rfft(self, func, n): fq, ) - @pytest.mark.xfail( - reason="Numpy, jax, and scipy need to fix bug with 2D FFT (fft2)." - ) + @pytest.mark.xfail(reason="Does numpy, jax, and scipy need to fix a bug with FFT?") @pytest.mark.unit @pytest.mark.parametrize( "func, m, n", @@ -254,8 +255,7 @@ def test_dct(self, f, M, lobatto): or interior roots grid for Chebyshev points. """ - # Want to unit test external code used in Fourier Chebyshev interpolation - # due to issues like + # Need to test fft used in Fourier Chebyshev interpolation due to issues like # https://github.com/scipy/scipy/issues/15033 # https://github.com/scipy/scipy/issues/21198 # https://github.com/google/jax/issues/22466, @@ -278,7 +278,7 @@ def test_dct(self, f, M, lobatto): else: fq_2 = norm * idct(dct(f(m), type=dct_type), n=n.size, type=dct_type) np.testing.assert_allclose(fq_1, f(n), atol=1e-14) - # JAX is much less accurate than scipy. + # JAX is less accurate than scipy. np.testing.assert_allclose(fq_2, f(n), atol=1e-6) fig, ax = plt.subplots() @@ -295,17 +295,23 @@ def test_dct(self, f, M, lobatto): def test_interp_dct(self, f, M): """Test non-uniform DCT interpolation.""" c0 = chebinterpolate(f, M - 1) - assert not np.allclose(c0, cheb_from_dct(dct(f(chebpts1(M)), 2) / M)), ( - "Interpolation should fail because cosine basis is in different domain. " - "Use better test function." + assert not np.allclose( + c0, + cheb_from_dct(dct(f(chebpts1(M)), 2)) / M, + ), ( + "Interpolation should fail because cosine basis is in wrong domain, " + "yet the supplied test function was interpolated fine using this wrong " + "domain. Pick a better test function." ) # test interpolation z = cheb_pts(M) fz = f(z) np.testing.assert_allclose(c0, cheb_from_dct(dct(fz, 2) / M), atol=1e-13) - if np.allclose(_f_algebraic(z), fz): + if np.allclose(_f_algebraic(z), fz): # Should reconstruct exactly. np.testing.assert_allclose( - cheb2poly(c0), np.array([-np.e, -1, 0, 1, 1, 0, -10]), atol=1e-13 + cheb2poly(c0), + np.array([-np.e, -1, 0, 1, 1, 0, -10]), + atol=1e-13, ) # test evaluation xq = np.arange(10 * 3 * 2).reshape(10, 3, 2) From 5fa37580c1803e99e169708705b87410be1b12b1 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 27 Aug 2024 02:51:11 -0400 Subject: [PATCH 221/241] fix docstrings --- desc/integrals/bounce_integral.py | 27 +++++++++++++-------------- desc/integrals/bounce_utils.py | 13 ++++++++++--- tests/test_integrals.py | 2 +- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 7d80dc8e31..8dc468e85f 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -591,10 +591,7 @@ class Bounce1D: For applications which reduce to computing a nonlinear function of distance along field lines between bounce points, it is required to identify these - points with field-line-following coordinates. In the special case of a linear - function summing integrals between bounce points over a flux surface, arbitrary - coordinate systems may be used as this operation reduces to a surface integral, - which is invariant to the order of summation. + points with field-line-following coordinates. The DESC coordinate system is related to field-line-following coordinate systems by a relation whose solution is best found with Newton iteration. @@ -634,12 +631,14 @@ class Bounce1D: Attributes ---------- - B : jnp.ndarray + _B : jnp.ndarray + TODO: Make this (4, L, M, N-1) now that tensor product in rho and alpha + required as well after GitHub PR #1214. Shape (4, L * M, N - 1). Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis - enumerates the splines along the field lines. Last axis enumerates the - polynomials that compose the spline along a particular field line. + enumerates the splines. Last axis enumerates the polynomials that + compose a particular spline. """ @@ -709,7 +708,7 @@ def __init__( # Compute local splines. self._zeta = grid.compress(grid.nodes[:, 2], surface_label="zeta") - self.B = jnp.moveaxis( + self._B = jnp.moveaxis( CubicHermiteSpline( x=self._zeta, y=self._data["|B|"], @@ -720,11 +719,11 @@ def __init__( source=1, destination=-1, ) - self._dB_dz = polyder_vec(self.B) + self._dB_dz = polyder_vec(self._B) degree = 3 - assert self.B.shape[0] == degree + 1 + assert self._B.shape[0] == degree + 1 assert self._dB_dz.shape[0] == degree - assert self.B.shape[-1] == self._dB_dz.shape[-1] == grid.num_zeta - 1 + assert self._B.shape[-1] == self._dB_dz.shape[-1] == grid.num_zeta - 1 @staticmethod def required_names(): @@ -788,7 +787,7 @@ def bounce_points(self, pitch, num_well=None): return bounce_points( pitch=pitch, knots=self._zeta, - B=self.B, + B=self._B, dB_dz=self._dB_dz, num_well=num_well, ) @@ -820,7 +819,7 @@ def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): z2=z2, pitch=jnp.atleast_2d(pitch), knots=self._zeta, - B=self.B, + B=self._B, plot=plot, **kwargs, ) @@ -915,7 +914,7 @@ def integrate( z1=z1, z2=z2, knots=self._zeta, - g=self.B, + g=self._B, dg_dz=self._dB_dz, method=method, ) diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index f99df28e2b..5b9f206809 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -149,13 +149,13 @@ def bounce_points( Shape (knots.size, ). ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray - Shape (g.shape[0], S, knots.size - 1). + Shape (B.shape[0], S, knots.size - 1). Polynomial coefficients of the spline of |B| in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines. Last axis enumerates the polynomials that compose a particular spline. dB_dz : jnp.ndarray - Shape (g.shape[0] - 1, *g.shape[1:]). + Shape (B.shape[0] - 1, *B.shape[1:]). Polynomial coefficients of the spline of (∂|B|/∂ζ)|(ρ,α) in local power basis. First axis enumerates the coefficients of power series. Second axis enumerates the splines. Last axis enumerates the polynomials that @@ -660,7 +660,8 @@ def interp_to_argmin_g( Returns ------- h : jnp.ndarray - Shape (P, S, num_well) + Shape (P, S, num_well). + mean_A h(ζ) """ ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) @@ -714,6 +715,12 @@ def interp_to_argmin_g_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. Default is cubic C1 local spline. + Returns + ------- + h : jnp.ndarray + Shape (P, S, num_well). + h(A) + """ ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) # We can use the non-differentiable max because we actually want the gradients diff --git a/tests/test_integrals.py b/tests/test_integrals.py index bb83ab7d13..6565231a07 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1132,7 +1132,7 @@ def dg_dz(z): z1=np.array(0, ndmin=3), z2=np.array(2 * np.pi, ndmin=3), knots=zeta, - g=bounce.B, + g=bounce._B, dg_dz=bounce._dB_dz, ), rtol=1e-3, From 594cbd87ad35591287dc6130b88126262563fb4d Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 27 Aug 2024 03:13:10 -0400 Subject: [PATCH 222/241] Remove stuff that should be in ku/fourier_bounce that came here after merging --- desc/integrals/__init__.py | 2 +- desc/integrals/basis.py | 604 +----------------------------- desc/integrals/bounce_integral.py | 555 +-------------------------- desc/integrals/bounce_utils.py | 28 -- desc/integrals/interp_utils.py | 368 +----------------- tests/test_integrals.py | 205 +--------- tests/test_interp_utils.py | 227 +---------- 7 files changed, 10 insertions(+), 1979 deletions(-) diff --git a/desc/integrals/__init__.py b/desc/integrals/__init__.py index e3d59d02ef..88cc3001ca 100644 --- a/desc/integrals/__init__.py +++ b/desc/integrals/__init__.py @@ -1,6 +1,6 @@ """Classes for function integration.""" -from .bounce_integral import Bounce1D, Bounce2D +from .bounce_integral import Bounce1D from .singularities import ( DFTInterpolator, FFTInterpolator, diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index 6a40b0bc8f..8b4f5fc3dd 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -2,31 +2,8 @@ from functools import partial -import numpy as np -from matplotlib import pyplot as plt - -from desc.backend import dct, flatnonzero, idct, irfft, jnp, put, rfft -from desc.integrals.interp_utils import ( - _filter_distinct, - cheb_from_dct, - cheb_pts, - chebroots_vec, - fourier_pts, - harmonic, - idct_non_uniform, - irfft_non_uniform, -) -from desc.integrals.quad_utils import bijection_from_disc, bijection_to_disc -from desc.utils import ( - atleast_2d_end, - atleast_3d_mid, - atleast_nd, - errorif, - flatten_matrix, - isposint, - setdefault, - take_mask, -) +from desc.backend import flatnonzero, jnp, put +from desc.utils import setdefault def _subtract(c, k): @@ -97,583 +74,6 @@ def _in_epigraph_and(is_intersect, df_dy_sign): return put(is_intersect, idx[0], edge_case) -def _chebcast(cheb, arr): - # Input should not have rightmost dimension of cheb that iterates coefficients, - # but may have additional leftmost dimension for batch operation. - errorif( - jnp.ndim(arr) > cheb.ndim, - NotImplementedError, - msg=f"Only one additional axis for batch dimension is allowed. " - f"Got {jnp.ndim(arr) - cheb.ndim + 1} additional axes.", - ) - return cheb if jnp.ndim(arr) < cheb.ndim else cheb[jnp.newaxis] - - -class FourierChebyshevBasis: - """Fourier-Chebyshev series. - - f(x, y) = ∑ₘₙ aₘₙ ψₘ(x) Tₙ(y) - where ψₘ are trigonometric polynomials on [0, 2π] - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ]. - - Notes - ----- - Performance may improve significantly - if the spectral resolutions ``M`` and ``N`` are powers of two. - - Attributes - ---------- - M : int - Fourier spectral resolution. - N : int - Chebyshev spectral resolution. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots grid for Chebyshev points. - domain : (float, float) - Domain for y coordinates. - - """ - - def __init__(self, f, domain=(-1, 1), lobatto=False): - """Interpolate Fourier-Chebyshev basis to ``f``. - - Parameters - ---------- - f : jnp.ndarray - Shape (..., M, N). - Samples of real function on the ``FourierChebyshevBasis.nodes`` grid. - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots grid for Chebyshev points. - - """ - self.M = f.shape[-2] - self.N = f.shape[-1] - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = tuple(domain) - errorif(lobatto, NotImplementedError, "JAX hasn't implemented type 1 DCT.") - self.lobatto = bool(lobatto) - self._c = FourierChebyshevBasis._fast_transform(f, self.lobatto) - - @staticmethod - def _fast_transform(f, lobatto): - N = f.shape[-1] - c = rfft( - dct(f, type=2 - lobatto, axis=-1) / (N - lobatto), - axis=-2, - norm="forward", - ) - return c - - @staticmethod - def nodes(M, N, L=None, domain=(-1, 1), lobatto=False): - """Tensor product grid of optimal collocation nodes for this basis. - - Parameters - ---------- - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. - L : int or jnp.ndarray - Optional, resolution in radial direction of domain [0, 1]. - May also be an array of coordinates values. If given, then the - returned ``coords`` is a 3D tensor-product with shape (L * M * N, 3). - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - lobatto : bool - Whether to use the Gauss-Lobatto (Extrema-plus-Endpoint) - instead of the interior roots grid for Chebyshev points. - - Returns - ------- - coords : jnp.ndarray - Shape (M * N, 2). - Grid of (x, y) points for optimal interpolation. - - """ - x = fourier_pts(M) - y = cheb_pts(N, lobatto, domain) - if L is not None: - if isposint(L): - L = jnp.flipud(jnp.linspace(1, 0, L, endpoint=False)) - coords = (jnp.atleast_1d(L), x, y) - else: - coords = (x, y) - coords = list(map(jnp.ravel, jnp.meshgrid(*coords, indexing="ij"))) - coords = jnp.column_stack(coords) - return coords - - def evaluate(self, M, N): - """Evaluate Fourier-Chebyshev series. - - Parameters - ---------- - M : int - Grid resolution in x direction. Preferably power of 2. - N : int - Grid resolution in y direction. Preferably power of 2. - - Returns - ------- - fq : jnp.ndarray - Shape (..., M, N) - Fourier-Chebyshev series evaluated at - ``FourierChebyshevBasis.nodes(M,N,L,self.domain,self.lobatto)``. - - """ - fq = idct( - irfft(self._c, n=M, axis=-2, norm="forward"), - type=2 - self.lobatto, - n=N, - axis=-1, - ) * (N - self.lobatto) - return fq - - def harmonics(self): - """Spectral coefficients aₘₙ of the interpolating polynomial. - - Transform Fourier interpolant harmonics to Nyquist trigonometric - interpolant harmonics so that the coefficients are all real. - - Returns - ------- - a_mn : jnp.ndarray - Shape (..., M, N). - Real valued spectral coefficients for Fourier-Chebyshev basis. - - """ - a_mn = harmonic(cheb_from_dct(self._c, axis=-1), self.M, axis=-2) - assert a_mn.shape[-2:] == (self.M, self.N) - return a_mn - - def compute_cheb(self, x): - """Evaluate Fourier basis at ``x`` to obtain set of 1D Chebyshev coefficients. - - Parameters - ---------- - x : jnp.ndarray - Points to evaluate Fourier basis. - - Returns - ------- - cheb : ChebyshevBasisSet - Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - - """ - # Always add new axis to broadcast against Chebyshev coefficients. - x = jnp.atleast_1d(x)[..., jnp.newaxis] - cheb = cheb_from_dct(irfft_non_uniform(x, self._c, self.M, axis=-2), axis=-1) - assert cheb.shape[-2:] == (x.shape[-2], self.N) - return ChebyshevBasisSet(cheb, self.domain) - - -class ChebyshevBasisSet: - """Chebyshev series. - - { fₓ | fₓ : y ↦ ∑ₙ₌₀ᴺ⁻¹ aₙ(x) Tₙ(y) } - and Tₙ are Chebyshev polynomials on [−yₘᵢₙ, yₘₐₓ] - - Attributes - ---------- - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients αₙ(x) for fₓ(y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - M : int - Number of functions in this basis set. - N : int - Chebyshev spectral resolution. - domain : (float, float) - Domain for y coordinates. - - """ - - _eps = min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e2, 1e-10) - - def __init__(self, cheb, domain=(-1, 1)): - """Make Chebyshev series basis from given coefficients. - - Parameters - ---------- - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients αₙ(x=``x``) for f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y). - domain : (float, float) - Domain for y coordinates. Default is [-1, 1]. - - """ - self.cheb = jnp.atleast_2d(cheb) - errorif(domain[0] > domain[-1], msg="Got inverted domain.") - self.domain = tuple(domain) - - @property - def M(self): - """Number of functions in this basis set.""" - return self.cheb.shape[-2] - - @property - def N(self): - """Chebyshev spectral resolution.""" - return self.cheb.shape[-1] - - def isomorphism_to_C1(self, y): - """Return coordinates z ∈ ℂ isomorphic to (x, y) ∈ ℂ². - - Maps row x of y to z = y + f(x) where f(x) = x * |domain|. - - Parameters - ---------- - y : jnp.ndarray - Shape (..., y.shape[-2], y.shape[-1]). - Second to last axis iterates the rows. - - Returns - ------- - z : jnp.ndarray - Shape y.shape. - Isomorphic coordinates. - - """ - assert y.ndim >= 2 - z_shift = jnp.arange(y.shape[-2]) * (self.domain[-1] - self.domain[0]) - z = y + z_shift[:, jnp.newaxis] - return z - - def isomorphism_to_C2(self, z): - """Return coordinates (x, y) ∈ ℂ² isomorphic to z ∈ ℂ. - - Returns index x and minimum value y such that - z = f(x) + y where f(x) = x * |domain|. - - Parameters - ---------- - z : jnp.ndarray - Shape z.shape. - - Returns - ------- - x_idx, y_val : (jnp.ndarray, jnp.ndarray) - Shape z.shape. - Isomorphic coordinates. - - """ - x_idx, y_val = jnp.divmod(z - self.domain[0], self.domain[-1] - self.domain[0]) - x_idx = x_idx.astype(int) - y_val += self.domain[0] - return x_idx, y_val - - def eval1d(self, z, cheb=None): - """Evaluate piecewise Chebyshev series at coordinates z. - - Parameters - ---------- - z : jnp.ndarray - Shape (..., *cheb.shape[:-2], z.shape[-1]). - Coordinates in [sef.domain[0], ∞). - The coordinates z ∈ ℝ are assumed isomorphic to (x, y) ∈ ℝ² where - ``z // domain`` yields the index into the proper Chebyshev series - along the second to last axis of ``cheb`` and ``z % domain`` is - the coordinate value on the domain of that Chebyshev series. - cheb : jnp.ndarray - Shape (..., M, N). - Chebyshev coefficients to use. If not given, uses ``self.cheb``. - - Returns - ------- - f : jnp.ndarray - Shape z.shape. - Chebyshev basis evaluated at z. - - """ - cheb = _chebcast(setdefault(cheb, self.cheb), z) - N = cheb.shape[-1] - x_idx, y = self.isomorphism_to_C2(z) - y = bijection_to_disc(y, self.domain[0], self.domain[1]) - # Chebyshev coefficients αₙ for f(z) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x[z]) Tₙ(y[z]) - # are held in cheb with shape (..., num cheb series, N). - cheb = jnp.take_along_axis(cheb, x_idx[..., jnp.newaxis], axis=-2) - f = idct_non_uniform(y, cheb, N) - assert f.shape == z.shape - return f - - def intersect2d(self, k=0.0, eps=_eps): - """Coordinates yᵢ such that f(x, yᵢ) = k(x). - - Parameters - ---------- - k : jnp.ndarray - Shape must broadcast with (..., *cheb.shape[:-1]). - Specify to find solutions yᵢ to f(x, yᵢ) = k(x). Default 0. - eps : float - Absolute tolerance with which to consider value as zero. - - Returns - ------- - y : jnp.ndarray - Shape (..., *cheb.shape[:-1], N - 1). - Solutions yᵢ of f(x, yᵢ) = k(x), in ascending order. - is_intersect : jnp.ndarray - Shape y.shape. - Boolean array into ``y`` indicating whether element is an intersect. - df_dy_sign : jnp.ndarray - Shape y.shape. - Sign of ∂f/∂y (x, yᵢ). - - """ - c = _subtract(_chebcast(self.cheb, k), k) - # roots yᵢ of f(x, y) = ∑ₙ₌₀ᴺ⁻¹ αₙ(x) Tₙ(y) - k(x) - y = chebroots_vec(c) - assert y.shape == (*c.shape[:-1], self.N - 1) - - # Intersects must satisfy y ∈ [-1, 1]. - # Pick sentinel such that only distinct roots are considered intersects. - y = _filter_distinct(y, sentinel=-2.0, eps=eps) - is_intersect = (jnp.abs(y.imag) <= eps) & (jnp.abs(y.real) <= 1.0) - # Ensure y is in domain of arcos; choose 1 because kernel probably cheaper. - y = jnp.where(is_intersect, y.real, 1.0) - - # TODO: Multipoint evaluation with FFT. - # Chapter 10, https://doi.org/10.1017/CBO9781139856065. - n = jnp.arange(self.N) - # ∂f/∂y = ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n Uₙ₋₁(y) - # sign ∂f/∂y = sign ∑ₙ₌₀ᴺ⁻¹ aₙ(x) n sin(n arcos y) - df_dy_sign = jnp.sign( - jnp.linalg.vecdot( - n * jnp.sin(n * jnp.arccos(y)[..., jnp.newaxis]), - self.cheb[..., jnp.newaxis, :], - ) - ) - y = bijection_from_disc(y, self.domain[0], self.domain[-1]) - return y, is_intersect, df_dy_sign - - def intersect1d(self, k=0.0, num_intersect=None, pad_value=0.0): - """Coordinates z(x, yᵢ) such that fₓ(yᵢ) = k for every x. - - Parameters - ---------- - k : jnp.ndarray - Shape must broadcast with (..., *cheb.shape[:-2]). - Specify to find solutions yᵢ to fₓ(yᵢ) = k. Default 0. - num_intersect : int or None - Specify to return the first ``num_intersect`` intersects. - This is useful if ``num_intersect`` tightly bounds the actual number. - - If not specified, then all intersects are returned. If there were fewer - intersects detected than the size of the last axis of the returned arrays, - then that axis is padded with ``pad_value``. - pad_value : float - Value with which to pad array. Default 0. - - Returns - ------- - z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape broadcasts with (..., *self.cheb.shape[:-2], num_intersect). - ``z1`` and ``z2`` are intersects satisfying ∂f/∂y <= 0 and ∂f/∂y >= 0, - respectively. The points are grouped and ordered such that the straight - line path between ``z1`` and ``z2`` resides in the epigraph of f. - - """ - errorif( - self.N < 2, - NotImplementedError, - "This method requires a Chebyshev spectral resolution of N > 1, " - f"but got N = {self.N}.", - ) - - # Add axis to use same k over all Chebyshev series of the piecewise object. - y, is_intersect, df_dy_sign = self.intersect2d( - jnp.atleast_1d(k)[..., jnp.newaxis] - ) - # Flatten so that last axis enumerates intersects along the piecewise object. - y, is_intersect, df_dy_sign = map( - flatten_matrix, (self.isomorphism_to_C1(y), is_intersect, df_dy_sign) - ) - - # Note for bounce point applications: - # We ignore the degenerate edge case where the boundary shared by adjacent - # polynomials is a left intersection i.e. ``is_z1`` because the subset of - # pitch values that generate this edge case has zero measure. By ignoring - # this, for those subset of pitch values the integrations will be done in - # the hypograph of |B|, which will yield zero. If in far future decide to - # not ignore this, note the solution is to disqualify intersects within - # ``_eps`` from ``domain[-1]``. - is_z1 = (df_dy_sign <= 0) & is_intersect - is_z2 = (df_dy_sign >= 0) & _in_epigraph_and(is_intersect, df_dy_sign) - - sentinel = self.domain[0] - 1.0 - z1 = take_mask(y, is_z1, size=num_intersect, fill_value=sentinel) - z2 = take_mask(y, is_z2, size=num_intersect, fill_value=sentinel) - - mask = (z1 > sentinel) & (z2 > sentinel) - # Set outside mask to same value so integration is over set of measure zero. - z1 = jnp.where(mask, z1, pad_value) - z2 = jnp.where(mask, z2, pad_value) - return z1, z2 - - def _check_shape(self, z1, z2, k): - """Return shapes that broadcast with (k.shape[0], *self.cheb.shape[:-2], W).""" - # Ensure pitch batch dim exists and add back dim to broadcast with wells. - k = atleast_nd(self.cheb.ndim - 1, k)[..., jnp.newaxis] - # Same but back dim already exists. - z1, z2 = atleast_nd(self.cheb.ndim, z1, z2) - # Cheb has shape (..., M, N) and others - # have shape (K, ..., W) - errorif(not (z1.ndim == z2.ndim == k.ndim == self.cheb.ndim)) - return z1, z2, k - - def check_intersect1d(self, z1, z2, k, plot=True, **kwargs): - """Check that intersects are computed correctly. - - Parameters - ---------- - z1, z2 : jnp.ndarray - Shape must broadcast with (*self.cheb.shape[:-2], W). - ``z1`` and ``z2`` are intersects satisfying ∂f/∂y <= 0 and ∂f/∂y >= 0, - respectively. The points are grouped and ordered such that the straight - line path between ``z1`` and ``z2`` resides in the epigraph of f. - k : jnp.ndarray - Shape must broadcast with *self.cheb.shape[:-2]. - k such that fₓ(yᵢ) = k. - plot : bool - Whether to plot stuff. Default is true. - kwargs : dict - Keyword arguments into ``self.plot``. - - """ - assert z1.shape == z2.shape - mask = (z1 - z2) != 0.0 - z1 = jnp.where(mask, z1, jnp.nan) - z2 = jnp.where(mask, z2, jnp.nan) - z1, z2, k = self._check_shape(z1, z2, k) - - err_1 = jnp.any(z1 > z2, axis=-1) - err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) - f_midpoint = self.eval1d((z1 + z2) / 2) - assert f_midpoint.shape == z1.shape - err_3 = jnp.any(f_midpoint > k + self._eps, axis=-1) - if not (plot or jnp.any(err_1 | err_2 | err_3)): - return - - # Ensure l axis exists for iteration in below loop. - cheb = atleast_nd(3, self.cheb) - mask, z1, z2, f_midpoint = atleast_3d_mid(mask, z1, z2, f_midpoint) - err_1, err_2, err_3 = atleast_2d_end(err_1, err_2, err_3) - - for l in np.ndindex(cheb.shape[:-2]): - for p in range(k.shape[0]): - idx = (p, *l) - if not (err_1[idx] or err_2[idx] or err_3[idx]): - continue - _z1 = z1[idx][mask[idx]] - _z2 = z2[idx][mask[idx]] - if plot: - self.plot1d( - cheb=cheb[l], - z1=_z1, - z2=_z2, - k=k[idx], - **kwargs, - ) - print(" z1 | z2") - print(jnp.column_stack([_z1, _z2])) - assert not err_1[idx], "Intersects have an inversion.\n" - assert not err_2[idx], "Detected discontinuity.\n" - assert not err_3[idx], ( - "Detected f > k in well, implying the straight line path between " - "z1 and z2 is in hypograph(f). Increase spectral resolution.\n" - f"{f_midpoint[idx][mask[idx]]} > {k[idx] + self._eps}" - ) - idx = (slice(None), *l) - if plot: - self.plot1d( - cheb=cheb[l], - z1=z1[idx], - z2=z2[idx], - k=k[idx], - **kwargs, - ) - - def plot1d( - self, - cheb, - num=1000, - z1=None, - z2=None, - k=None, - k_transparency=0.5, - klabel=r"$k$", - title=r"Intersects $z$ in epigraph($f$) s.t. $f(z) = k$", - hlabel=r"$z$", - vlabel=r"$f$", - show=True, - ): - """Plot the piecewise Chebyshev series. - - Parameters - ---------- - cheb : jnp.ndarray - Shape (M, N). - Piecewise Chebyshev series f. - num : int - Number of points to evaluate ``cheb`` for plot. - z1 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂y <= 0. - z2 : jnp.ndarray - Shape (k.shape[0], W). - Optional, intersects with ∂f/∂y >= 0. - k : jnp.ndarray - Shape (k.shape[0], ). - Optional, k such that fₓ(yᵢ) = k. - k_transparency : float - Transparency of pitch lines. - klabel : float - Label of intersect lines. - title : str - Plot title. - hlabel : str - Horizontal axis label. - vlabel : str - Vertical axis label. - show : bool - Whether to show the plot. Default is true. - - Returns - ------- - fig, ax : matplotlib figure and axes - - """ - fig, ax = plt.subplots() - legend = {} - z = jnp.linspace( - start=self.domain[0], - stop=self.domain[0] + (self.domain[1] - self.domain[0]) * self.M, - num=num, - ) - _add2legend(legend, ax.plot(z, self.eval1d(z, cheb), label=vlabel)) - _plot_intersect( - ax=ax, - legend=legend, - z1=z1, - z2=z2, - k=k, - k_transparency=k_transparency, - klabel=klabel, - ) - ax.set_xlabel(hlabel) - ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys(), loc="lower right") - ax.set_title(title) - plt.tight_layout() - if show: - plt.show() - plt.close() - return fig, ax - - def _add2legend(legend, lines): """Add lines to legend if it's not already in it.""" for line in setdefault(lines, [lines], hasattr(lines, "__iter__")): diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 8dc468e85f..811fbda12c 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -3,570 +3,21 @@ from interpax import CubicHermiteSpline from orthax.legendre import leggauss -from desc.backend import jnp, rfft2 -from desc.integrals.basis import FourierChebyshevBasis +from desc.backend import jnp from desc.integrals.bounce_utils import ( _check_bounce_points, bounce_points, bounce_quadrature, - get_alpha, interp_to_argmin_g, plot_ppoly, ) -from desc.integrals.interp_utils import interp_rfft2, irfft2_non_uniform, polyder_vec +from desc.integrals.interp_utils import polyder_vec from desc.integrals.quad_utils import ( automorphism_sin, - bijection_from_disc, get_quadrature, grad_automorphism_sin, ) -from desc.utils import errorif, flatten_matrix, setdefault, warnif - - -def _transform_to_clebsch(grid, desc_from_clebsch, M, N, B): - """Transform to Clebsch spectral domain. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in - (2π × 2π) poloidal and toroidal coordinates. - Note that below shape notation defines - L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. - desc_from_clebsch : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates - ``FourierChebyshevBasis.nodes(M,N,domain=FourierBounce.domain)``. - M : int - Grid resolution in poloidal direction for Clebsch coordinate grid. - Preferably power of 2. A good choice is ``m``. If the poloidal stream - function condenses the Fourier spectrum of |B| significantly, then a - larger number may be beneficial. - N : int - Grid resolution in toroidal direction for Clebsch coordinate grid. - Preferably power of 2. - B : jnp.ndarray - |B| evaluated on ``grid``. - - Returns - ------- - T, B : (FourierChebyshevBasis, FourierChebyshevBasis) - - """ - T = FourierChebyshevBasis( - # θ is computed on the optimal nodes in Clebsch space, - # which is a tensor product node set in Clebsch space. - f=desc_from_clebsch[:, 1].reshape(grid.num_rho, M, N), - domain=Bounce2D.domain, - ) - B = FourierChebyshevBasis( - f=interp_rfft2( - # Interpolate to optimal nodes in Clebsch space, - # which is not a tensor product node set in DESC space. - xq=desc_from_clebsch[:, 1:].reshape(grid.num_rho, -1, 2), - f=grid.meshgrid_reshape(B, order="rtz")[:, jnp.newaxis], - # Real fft over poloidal since usually num theta > num zeta. - axes=(-1, -2), - ).reshape(grid.num_rho, M, N), - domain=Bounce2D.domain, - ) - return T, B - - -def _transform_to_desc(grid, f): - """Transform to DESC spectral domain. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (θ, ζ) with uniformly spaced nodes in - (2π × 2π) poloidal and toroidal coordinates. - f : jnp.ndarray - Function evaluated on ``grid``. - - Returns - ------- - a : jnp.ndarray - Shape (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) - Complex coefficients of 2D real FFT. - - """ - f = grid.meshgrid_reshape(f, order="rtz") - a = rfft2(f, axes=(-1, -2), norm="forward") - assert a.shape == (grid.num_rho, grid.num_theta // 2 + 1, grid.num_zeta) - return a - - -# TODO: -# After GitHub issue #1034 is resolved, we should pass in the previous -# θ(α) coordinates as an initial guess for the next coordinate mapping. -# Perhaps tell the optimizer to perturb the coefficients of the -# |B|(α, ζ) directly? Maybe auto diff to see change on |B|(θ, ζ) -# and hence stream functions. Not sure how feasible... - -# TODO: Allow multiple starting labels for near-rational surfaces. -# can just concatenate along second to last axis of cheb, but will -# do in later pull request since it's not urgent. - - -class Bounce2D: - """Computes bounce integrals using two-dimensional pseudo-spectral methods. - - The bounce integral is defined as ∫ f(ℓ) dℓ, where - dℓ parameterizes the distance along the field line in meters, - f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, - where λ is a constant proportional to the magnetic moment over energy - and |B| is the norm of the magnetic field. - - For a particle with fixed λ, bounce points are defined to be the location on the - field line such that the particle's velocity parallel to the magnetic field is zero. - The bounce integral is defined up to a sign. We choose the sign that corresponds to - the particle's guiding center trajectory traveling in the direction of increasing - field-line-following coordinate ζ. - - Notes - ----- - Brief motivation and description of algorithm for developers. - - For applications which reduce to computing a nonlinear function of distance - along field lines between bounce points, it is required to identify these - points with field-line-following coordinates. In the special case of a linear - function summing integrals between bounce points over a flux surface, arbitrary - coordinate systems may be used as this operation reduces to a surface integral, - which is invariant to the order of summation. - - The DESC coordinate system is related to field-line-following coordinate - systems by a relation whose solution is best found with Newton iteration. - There is a unique real solution to this equation, so Newton iteration is a - globally convergent root-finding algorithm here. For the task of finding - bounce points, even if the inverse map: θ(α, ζ) was known, Newton iteration - is not a globally convergent algorithm to find the real roots of - f : ζ ↦ |B|(ζ) − 1/λ where ζ is a field-line-following coordinate. - For this, function approximation of |B| is necessary. - - Therefore, to compute bounce points {(ζ₁, ζ₂)}, we approximate |B| by a - series expansion of basis functions in (α, ζ) coordinates, restricting the - class of basis functions to low order (e.g. N = 2ᵏ where k is small) - algebraic or trigonometric polynomial with integer frequencies. These are - the two classes useful for function approximation and for which there exists - globally convergent root-finding algorithms. We require low order because - the computation expenses grow with the number of potential roots, and the - theorem of algebra states that number is N (2N) for algebraic - (trigonometric) polynomials of degree N. - - The frequency transform of a map under the chosen basis must be concentrated - at low frequencies for the series to converge to the true function fast. - For periodic (non-periodic) maps, the best basis is a Fourier (Chebyshev) - series. Both converge exponentially, but the larger region of convergence in - the complex plane of Fourier series make it preferable in practice to choose - coordinate systems such that the function to approximate is periodic. The - Chebyshev series is preferred to other orthogonal polynomial series since - fast discrete polynomial transforms (DPT) are implemented via fast transform - to Chebyshev then DCT. Although nothing prohibits a direct DPT, we want to - rely on existing, optimized libraries. There are other reasons to prefer - Chebyshev series not discussed here. - - Therefore, |B| is interpolated to a Fourier-Chebyshev series in (α, ζ). - The roots of f are computed as the eigenvalues of the Chebyshev companion - matrix. This will later be replaced with Boyd's method: - Computing real roots of a polynomial in Chebyshev series form through - subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. - - Computing accurate series expansions in (α, ζ) coordinates demands - particular interpolation points in that coordinate system. Newton iteration - is used to compute θ at these interpolation points. Note that interpolation - is necessary because there is no transformation that converts series - coefficients in periodic coordinates, e.g. (ϑ, ϕ), to a low order - polynomial basis in non-periodic coordinates. For example, one can obtain - series coefficients in (α, ϕ) coordinates from those in (ϑ, ϕ) as follows - g : ϑ, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mϑ + nϕ]) - - g : α, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mα + (m ι + n)ϕ]) - However, the basis for the latter are trigonometric functions with - irrational frequencies, courtesy of the irrational rotational transform. - Globally convergent root-finding schemes for that basis (at fixed α) are - not known. The denominator of a close rational could be absorbed into the - coordinate ϕ, but this balloons the frequency, and hence the degree of the - series. Although, because Fourier series may converge faster than Chebyshev, - an alternate strategy that should work is to interpolate |B| to a double - Fourier series in (ϑ, ϕ), then apply bisection methods to find roots of f - with mesh size inversely proportional to the max frequency along the field - line: M ι + N. ``Bounce2D`` does not use that approach because that - root-finding scheme is inferior. - - After obtaining the bounce points, the supplied quadrature is performed. - By default, this is a Gauss quadrature after removing the singularity. - Fast fourier transforms interpolate functions in the integrand to the - quadrature nodes. - - Fast transforms are used where possible, though fast multipoint methods - are not yet implemented. For non-uniform interpolation, Vandermode MMT with - the linear algebra libraries of JAX are used. It should be worthwhile to use - the inverse non-uniform fast transforms. Fast multipoint methods are - preferable because they are exact, but this requires more development work. - Future work may implement these techniques, along with empirical testing of - a few change of variables for the Chebyshev interpolation that may allow - earlier truncation of the series without loss of accuracy. - - See Also - -------- - Bounce1D - Uses one-dimensional local spline methods for the same task. - An advantage of ``Bounce2D`` over ``Bounce1D`` is that the coordinates on - which the root-finding must be done to map from DESC to Clebsch coords is - fixed to ``L*M*N``, independent of the number of toroidal transits. - - Warnings - -------- - It is assumed that ζ = ϕ. - - Attributes - ---------- - _B : ChebyshevBasisSet - Set of 1D Chebyshev spectral coefficients of |B| along field line. - {|B|_α : ζ ↦ |B|(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the - sequence of poloidal coordinates that specify the field line. - _T : ChebyshevBasisSet - Set of 1D Chebyshev spectral coefficients of θ along field line. - {θ_α : ζ ↦ θ(α, ζ) | α ∈ A } where A = (α₀, α₁, …, αₘ₋₁) is the - sequence of poloidal coordinates that specify the field line. - - """ - - domain = (0, 2 * jnp.pi) - - def __init__( - self, - grid, - data, - desc_from_clebsch, - M, - N, - alpha_0=0.0, - num_transit=32, - quad=leggauss(32), - automorphism=(automorphism_sin, grad_automorphism_sin), - Bref=1.0, - Lref=1.0, - check=False, - **kwargs, - ): - """Returns an object to compute bounce integrals. - - Notes - ----- - Performance may improve significantly - if the spectral resolutions ``M`` and ``N`` are powers of two. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (ρ, θ, ζ) with uniformly spaced nodes in - (2π × 2π) poloidal and toroidal coordinates. - Note that below shape notation defines - L = ``grid.num_rho``, m = ``grid.num_theta``, and n = ``grid.num_zeta``. - data : dict[str, jnp.ndarray] - Data evaluated on ``grid``. Must include ``FourierBounce.required_names()``. - desc_from_clebsch : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinates (ρ, θ, ζ) sourced from the Clebsch coordinates - ``FourierChebyshevBasis.nodes(M,N,L,domain=FourierBounce.domain)``. - M : int - Grid resolution in poloidal direction for Clebsch coordinate grid. - Preferably power of 2. A good choice is ``m``. If the poloidal stream - function condenses the Fourier spectrum of |B| significantly, then a - larger number may be beneficial. - N : int - Grid resolution in toroidal direction for Clebsch coordinate grid. - Preferably power of 2. - alpha_0 : float - Starting field line poloidal label. - num_transit : int - Number of toroidal transits to follow field line. - quad : (jnp.ndarray, jnp.ndarray) - Quadrature points xₖ and weights wₖ for the approximate evaluation of an - integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. - automorphism : (Callable, Callable) or None - The first callable should be an automorphism of the real interval [-1, 1]. - The second callable should be the derivative of the first. This map defines - a change of variable for the bounce integral. The choice made for the - automorphism will affect the performance of the quadrature method. - Bref : float - Optional. Reference magnetic field strength for normalization. - Lref : float - Optional. Reference length scale for normalization. - check : bool - Flag for debugging. Must be false for JAX transformations. - - """ - errorif(grid.sym, NotImplementedError, msg="Need grid that works with FFTs.") - # Strictly increasing zeta knots enforces dζ > 0. - # To retain dℓ = (|B|/B^ζ) dζ > 0 after fixing dζ > 0, we require - # B^ζ = B⋅∇ζ > 0. This is equivalent to changing the sign of ∇ζ or [∂ℓ/∂ζ]|ρ,a. - # Recall dζ = ∇ζ⋅dR, implying 1 = ∇ζ⋅(e_ζ|ρ,a). Hence, a sign change in ∇ζ - # requires the same sign change in e_ζ|ρ,a to retain the metric identity. - warnif( - check and kwargs.pop("warn", True) and jnp.any(data["B^zeta"] <= 0), - msg="(∂ℓ/∂ζ)|ρ,a > 0 is required. Enforcing positive B^ζ.", - ) - self._m = grid.num_theta - self._n = grid.num_zeta - self._x, self._w = get_quadrature(quad, automorphism) - - # Compute global splines. - self._b_sup_z = _transform_to_desc( - grid, - jnp.abs(data["B^zeta"]) / data["|B|"] * Lref, - )[:, jnp.newaxis] - T, B = _transform_to_clebsch( - grid, - desc_from_clebsch, - M, - N, - data["|B|"] / Bref, - ) - # peel off field lines - alphas = get_alpha( - alpha_0, - grid.compress(data["iota"]), - num_transit, - period=Bounce2D.domain[-1], - ) - self._B = B.compute_cheb(alphas) - # Evaluating set of Chebyshev series more efficient than evaluating - # Fourier Chebyshev series, so we project θ to Chebyshev series as well. - self._T = T.compute_cheb(alphas) - assert self._B.M == self._T.M == num_transit - assert self._B.N == self._T.N == N - assert ( - self._B.cheb.shape == self._T.cheb.shape == (grid.num_rho, num_transit, N) - ) - - @staticmethod - def desc_from_clebsch(eq, L, M, N, clebsch=None, **kwargs): - """Return DESC coordinates of optimal Fourier Chebyshev basis nodes. - - Parameters - ---------- - eq : Equilibrium - Equilibrium to use defining the coordinate mapping. - L : int or jnp.ndarray - Number of flux surfaces uniformly in [0, 1] on which to compute. - May also be an array of non-uniform coordinates. - M : int - Grid resolution in poloidal direction for Clebsch coordinate grid. - Preferably power of 2. A good choice is ``m``. If the poloidal stream - function condenses the Fourier spectrum of |B| significantly, then a - larger number may be beneficial. - N : int - Grid resolution in toroidal direction for Clebsch coordinate grid. - Preferably power of 2. - clebsch : jnp.ndarray - Optional, Clebsch coordinate tensor-product grid (ρ, α, ζ). - ``FourierChebyshevBasis.nodes(M,N,L,domain=FourierBounce.domain)``. - If given, ``L``, ``M``, and ``N`` are ignored. - kwargs : dict - Additional parameters to supply to the coordinate mapping function. - See ``desc.equilibrium.Equilibrium.map_coordinates``. - - Returns - ------- - desc_coords : jnp.ndarray - Shape (L * M * N, 3). - DESC coordinate grid (ρ, θ, ζ) sourced from the Clebsch coordinate - tensor-product grid (ρ, α, ζ). - - """ - if clebsch is None: - clebsch = FourierChebyshevBasis.nodes(M, N, L, Bounce2D.domain) - desc_coords = eq.map_coordinates( - coords=clebsch, - inbasis=("rho", "alpha", "zeta"), - period=(jnp.inf, 2 * jnp.pi, jnp.inf), - **kwargs, - ) - return desc_coords - - @staticmethod - def required_names(): - """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "|B|", "iota"] - - @staticmethod - def reshape_data(grid, *data): - """Reshape ``data`` arrays for acceptable input to ``integrate``. - - Parameters - ---------- - grid : Grid - Tensor-product grid in (ρ, θ, ζ). - data : jnp.ndarray - Data evaluated on grid. - - Returns - ------- - f : list[jnp.ndarray] - List of reshaped arrays which may be given to ``integrate``. - - """ - f = [grid.meshgrid_reshape(d, "rtz")[:, jnp.newaxis] for d in data] - return f - - @property - def _L(self): - """int: Number of flux surfaces to compute on.""" - return self._B.cheb.shape[0] - - def bounce_points(self, pitch, num_well=None): - """Compute bounce points. - - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - num_well : int or None - Specify to return the first ``num_well`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number. As a reference, there are typically at most 5 - wells per toroidal transit for a given pitch. - - If not specified, then all bounce points are returned. If there were fewer - wells detected along a field line than the size of the last axis of the - returned arrays, then that axis is padded with zero. - - Returns - ------- - z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, L, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between ``z1`` and ``z2`` resides in the - epigraph of |B|. - - """ - return self._B.intersect1d(1 / jnp.atleast_2d(pitch), num_well) - - def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): - """Check that bounce points are computed correctly. - - Parameters - ---------- - z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, L, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such - that the straight line path between ``z1`` and ``z2`` resides in the - epigraph of |B|. - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - plot : bool - Whether to plot stuff. - kwargs : dict - Keyword arguments into ``ChebyshevBasisSet.plot1d``. - - """ - kwargs.setdefault( - "title", - r"Intersects $\zeta$ in epigraph($\vert B \vert$) s.t. " - r"$\vert B \vert(\zeta) = 1/\lambda$", - ) - kwargs.setdefault("klabel", r"$1/\lambda$") - kwargs.setdefault("hlabel", r"$\zeta$") - kwargs.setdefault("vlabel", r"$\vert B \vert$") - self._B.check_intersect1d(z1, z2, 1 / pitch, plot, **kwargs) - - def integrate(self, pitch, integrand, f, weight=None, num_well=None): - """Bounce integrate ∫ f(ℓ) dℓ. - - Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line - for every λ value in ``pitch``. - - Parameters - ---------- - pitch : jnp.ndarray - Shape (P, L). - λ values to evaluate the bounce integral at each field line. λ(ρ) is - specified by ``pitch[...,ρ]`` where in the latter the labels ρ are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. - integrand : callable - The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list[jnp.ndarray] - Shape (L, 1, m, n). - Real scalar-valued (2π × 2π) periodic in (θ, ζ) functions evaluated - on the ``grid`` supplied to construct this object. These functions - should be arguments to the callable ``integrand``. Use the method - ``self.reshape_data`` to reshape the data into the expected shape. - weight : jnp.ndarray - Shape (L, 1, m, n). - If supplied, the bounce integral labeled by well j is weighted such that - the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - interpolated to the deepest point in the magnetic well. Use the method - ``self.reshape_data`` to reshape the data into the expected shape. - num_well : int or None - Specify to return the first ``num_well`` pairs of bounce points for each - pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number. As a reference, there are typically at most 5 - wells per toroidal transit for a given pitch. - - If not specified, then all bounce points are returned. If there were fewer - wells detected along a field line than the size of the last axis of the - returned arrays, then that axis is padded with zero. - - Returns - ------- - result : jnp.ndarray - Shape (P, L, num_well). - First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. - - """ - pitch = jnp.atleast_2d(pitch) - z1, z2 = self.bounce_points(pitch, num_well) - result = self._integrate(z1, z2, pitch, integrand, f) - errorif(weight is not None, NotImplementedError) - return result - - def _integrate(self, z1, z2, pitch, integrand, f): - assert z1.ndim == 3 - assert z1.shape == z2.shape - assert pitch.ndim == 2 - W = z1.shape[-1] # number of wells - shape = (pitch.shape[0], self._L, W, self._x.size) - - # quadrature points parameterized by ζ for each pitch and flux surface - Q_zeta = flatten_matrix( - bijection_from_disc(self._x, z1[..., jnp.newaxis], z2[..., jnp.newaxis]) - ) - # quadrature points in (θ, ζ) coordinates - Q = jnp.stack([self._T.eval1d(Q_zeta), Q_zeta], axis=-1) - - # interpolate and integrate - f = [interp_rfft2(Q, f_i, axes=(-1, -2)).reshape(shape) for f_i in f] - result = jnp.dot( - integrand( - *f, - B=self._B.eval1d(Q_zeta).reshape(shape), - pitch=pitch[..., jnp.newaxis, jnp.newaxis], - ) - / irfft2_non_uniform( - xq=Q, a=self._b_sup_z, M=self._n, N=self._m, axes=(-1, -2) - ).reshape(shape), - self._w, - ) - assert result.shape == (pitch.shape[0], self._L, W) - return result +from desc.utils import setdefault, warnif class Bounce1D: diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 5b9f206809..c96ecf1508 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -49,34 +49,6 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): return pitch -# TODO: Generalize this beyond ζ = ϕ or just map to Clebsch with ϕ. -def get_alpha(alpha_0, iota, num_transit, period): - """Get sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) of field line. - - Parameters - ---------- - alpha_0 : float - Starting field line poloidal label. - iota : jnp.ndarray - Shape (iota.size, ). - Rotational transform normalized by 2π. - num_transit : float - Number of ``period``s to follow field line. - period : float - Toroidal period after which to update label. - - Returns - ------- - alpha : jnp.ndarray - Shape (iota.size, num_transit). - Sequence of poloidal coordinates A = (α₀, α₁, …, αₘ₋₁) that specify field line. - - """ - # Δϕ (∂α/∂ϕ) = Δϕ ι̅ = Δϕ ι/2π = Δϕ data["iota"] - alpha = alpha_0 + period * iota[:, jnp.newaxis] * jnp.arange(num_transit) - return alpha - - def _check_spline_shape(knots, g, dg_dz, pitch=None): """Ensure inputs have compatible shape, and return them with full dimension. diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index c0fd0818c7..3f48a31101 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -2,377 +2,11 @@ from functools import partial -import numpy as np from interpax import interp1d -from orthax.chebyshev import chebroots, chebvander from orthax.polynomial import polyvander -from desc.backend import dct, jnp, rfft, rfft2, take +from desc.backend import jnp from desc.compute.utils import safediv -from desc.integrals.quad_utils import bijection_from_disc -from desc.utils import Index, errorif - -# TODO: Boyd's method 𝒪(N²) instead of Chebyshev companion matrix 𝒪(N³). -# John P. Boyd, Computing real roots of a polynomial in Chebyshev series -# form through subdivision. https://doi.org/10.1016/j.apnum.2005.09.007. -chebroots_vec = jnp.vectorize(chebroots, signature="(m)->(n)") - - -# TODO: Transformation to make nodes more uniform Boyd eq. 16.46 pg. 336. -# Have a hunch it won't change locations of complex poles much, so using -# more uniformly spaced nodes could speed up convergence (wrt early -# series truncation, not the infinite limit). - - -def cheb_pts(N, lobatto=False, domain=(-1, 1)): - """Get ``N`` Chebyshev points mapped to given domain. - - Warnings - -------- - This is a common definition of the Chebyshev points (see Boyd, Chebyshev and - Fourier Spectral Methods p. 498). These are the points demanded by discrete - cosine transformations to interpolate Chebyshev series because the cosine - basis for the DCT is defined on [0, π]. They differ in ordering from the - points returned by ``numpy.polynomial.chebyshev.chebpts1`` and - ``numpy.polynomial.chebyshev.chebpts2``. - - Parameters - ---------- - N : int - Number of points. - lobatto : bool - Whether to return the Gauss-Lobatto (extrema-plus-endpoint) - instead of the interior roots for Chebyshev points. - domain : (float, float) - Domain for points. - - Returns - ------- - pts : jnp.ndarray - Shape (N, ). - Chebyshev points mapped to given domain. - - """ - n = jnp.arange(N) - if lobatto: - y = jnp.cos(jnp.pi * n / (N - 1)) - else: - y = jnp.cos(jnp.pi * (2 * n + 1) / (2 * N)) - return bijection_from_disc(y, domain[0], domain[-1]) - - -def fourier_pts(M): - """Get ``M`` Fourier points.""" - m = jnp.arange(1, M + 1) - return -jnp.pi + 2 * jnp.pi * m / M - - -def harmonic(a, M, axis=-1): - """Spectral coefficients of the Nyquist trigonometric interpolant. - - Parameters - ---------- - a : jnp.ndarray - Fourier coefficients ``a=rfft(f,norm="forward",axis=axis)``. - M : int - Spectral resolution of ``a``. - axis : int - Axis along which coefficients are stored. - - Returns - ------- - h : jnp.ndarray - Nyquist trigonometric interpolant coefficients. - Coefficients ordered along ``axis`` of size ``M`` to match ordering of - [1, cos(x), ..., cos(mx), sin(x), sin(2x), ..., sin(mx)] basis. - - """ - is_even = (M % 2) == 0 - # cos(mx) coefficients - an = 2.0 * ( - a.real.at[Index.get(0, axis, a.ndim)] - .divide(2.0) - .at[Index.get(-1, axis, a.ndim)] - .divide(1.0 + is_even) - ) - # sin(mx) coefficients - bn = -2.0 * take( - a.imag, - jnp.arange(1, a.shape[axis] - is_even), - axis, - unique_indices=True, - indices_are_sorted=True, - ) - h = jnp.concatenate([an, bn], axis=axis) - assert h.shape[axis] == M - return h - - -def harmonic_vander(x, M): - """Nyquist trigonometric interpolant basis evaluated at ``x``. - - Parameters - ---------- - x : jnp.ndarray - Points at which to evaluate pseudo-Vandermonde matrix. - M : int - Spectral resolution. - - Returns - ------- - basis : jnp.ndarray - Shape (*x.shape, M). - Pseudo-Vandermonde matrix of degree ``M-1`` and sample points ``x``. - Last axis ordered as [1, cos(x), ..., cos(mx), sin(x), sin(2x), ..., sin(mx)]. - - """ - m = jnp.fft.rfftfreq(M, d=1 / M) - mx = m * x[..., jnp.newaxis] - basis = jnp.concatenate( - [jnp.cos(mx), jnp.sin(mx[..., 1 : m.size - ((M % 2) == 0)])], axis=-1 - ) - assert basis.shape == (*x.shape, M) - return basis - - -# TODO: For inverse transforms, do multipoint evaluation with FFT. -# FFT cost is 𝒪(M N log[M N]) while direct evaluation is 𝒪(M² N²). -# Chapter 10, https://doi.org/10.1017/CBO9781139856065. -# Right now we just do an MMT with the Vandermode matrix. -# Multipoint is likely better than using NFFT to evaluate f(xq) given fourier -# coefficients because evaluation points are quadratically packed near edges as -# required by quadrature to avoid runge. NFFT is only approximation anyway. -# https://github.com/flatironinstitute/jax-finufft. - - -def interp_rfft(xq, f, axis=-1): - """Interpolate real-valued ``f`` to ``xq`` with FFT. - - Parameters - ---------- - xq : jnp.ndarray - Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with arrays of shape ``np.delete(f.shape,axis)``. - f : jnp.ndarray - Real 2π periodic function values on uniform grid to interpolate. - axis : int - Axis along which to transform. - - Returns - ------- - fq : jnp.ndarray - Real function value at query points. - - """ - a = rfft(f, axis=axis, norm="forward") - fq = irfft_non_uniform(xq, a, f.shape[axis], axis) - return fq - - -def irfft_non_uniform(xq, a, n, axis=-1): - """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]. - - Parameters - ---------- - xq : jnp.ndarray - Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with arrays of shape ``np.delete(a.shape,axis)``. - a : jnp.ndarray - Fourier coefficients ``a=rfft(f,axis=axis,norm="forward")``. - n : int - Spectral resolution of ``a``. - axis : int - Axis along which to transform. - - Returns - ------- - fq : jnp.ndarray - Real function value at query points. - - """ - # |a| << |basis|, so move a instead of basis - a = ( - jnp.moveaxis(a, axis, -1) - .at[..., 0] - .divide(2.0) - .at[..., -1] - .divide(1.0 + ((n % 2) == 0)) - ) - m = jnp.fft.rfftfreq(n, d=1 / n) - basis = jnp.exp(-1j * m * xq[..., jnp.newaxis]) - fq = 2.0 * jnp.linalg.vecdot(basis, a).real - # TODO: Test JAX does this optimization automatically. - # ℜ〈 basis, a 〉= cos(m xq)⋅ℜ(a) − sin(m xq)⋅ℑ(a) - return fq - - -def interp_rfft2(xq, f, axes=(-2, -1)): - """Interpolate real-valued ``f`` to ``xq`` with FFT. - - Parameters - ---------- - xq : jnp.ndarray - Shape (..., 2). - Real query points where interpolation is desired. - Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(f.shape,axes)``. - Last axis must hold coordinates for a given point. The coordinates stored - along ``xq[...,0]`` (``xq[...,1]``) must be the same coordinate enumerated - across axis ``min(axes)`` (``max(axes)``) of the function values ``f``. - f : jnp.ndarray - Shape (..., f.shape[-2], f.shape[-1]). - Real (2π × 2π) periodic function values on uniform tensor-product grid - to interpolate. - axes : tuple[int, int] - Axes along which to transform. - The real transform is done along ``axes[1]``, so it will be more - efficient for that to denote the larger size axis in ``axes``. - - Returns - ------- - fq : jnp.ndarray - Real function value at query points. - - """ - a = rfft2(f, axes=axes, norm="forward") - fq = irfft2_non_uniform(xq, a, f.shape[axes[0]], f.shape[axes[1]], axes) - return fq - - -def irfft2_non_uniform(xq, a, M, N, axes=(-2, -1)): - """Evaluate Fourier coefficients ``a`` at ``xq`` ∈ [0, 2π]². - - Parameters - ---------- - xq : jnp.ndarray - Shape (..., 2). - Real query points where interpolation is desired. - Last axis must hold coordinates for a given point. - Shape ``xq.shape[:-1]`` must broadcast with shape ``np.delete(a.shape,axes)``. - Last axis must hold coordinates for a given point. The coordinates stored - along ``xq[...,0]`` (``xq[...,1]``) must be the same coordinate enumerated - across axis ``min(axes)`` (``max(axes)``) of the Fourier coefficients ``a``. - a : jnp.ndarray - Shape (..., a.shape[-2], a.shape[-1]). - Fourier coefficients ``a=rfft2(f,axes=axes,norm="forward")``. - M : int - Spectral resolution of ``a`` along ``axes[0]``. - N : int - Spectral resolution of ``a`` along ``axes[1]``. - axes : tuple[int, int] - Axes along which to transform. - - Returns - ------- - fq : jnp.ndarray - Real function value at query points. - - """ - errorif(not (len(axes) == xq.shape[-1] == 2), msg="This is a 2D transform.") - errorif(a.ndim < 2, msg=f"Dimension mismatch, a.shape: {a.shape}.") - - # |a| << |basis|, so move a instead of basis - a = ( - jnp.moveaxis(a, source=axes, destination=(-2, -1)) - .at[..., 0] - .divide(2.0) - .at[..., -1] - .divide(1.0 + ((N % 2) == 0)) - ) - - m = jnp.fft.fftfreq(M, d=1 / M) - n = jnp.fft.rfftfreq(N, d=1 / N) - idx = np.argsort(axes) - basis = jnp.exp( - 1j - * ( - (m * xq[..., idx[0], jnp.newaxis])[..., jnp.newaxis] - + (n * xq[..., idx[1], jnp.newaxis])[..., jnp.newaxis, :] - ) - ) - fq = 2.0 * jnp.real(basis * a).sum(axis=(-2, -1)) - return fq - - -def cheb_from_dct(a, axis=-1): - """Get discrete Chebyshev transform from discrete cosine transform. - - Parameters - ---------- - a : jnp.ndarray - Discrete cosine transform coefficients, e.g. - ``a=dct(f,type=2,axis=axis,norm="forward")``. - The discrete cosine transformation used by scipy is defined here: - https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html. - axis : int - Axis along which to transform. - - Returns - ------- - cheb : jnp.ndarray - Chebyshev coefficients along ``axis``. - - """ - cheb = a.copy().at[Index.get(0, axis, a.ndim)].divide(2.0) - return cheb - - -def interp_dct(xq, f, lobatto=False, axis=-1): - """Interpolate ``f`` to ``xq`` with discrete Chebyshev transform. - - Parameters - ---------- - xq : jnp.ndarray - Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with shape ``np.delete(f.shape,axis)``. - f : jnp.ndarray - Real function values on Chebyshev points to interpolate. - lobatto : bool - Whether ``f`` was sampled on the Gauss-Lobatto (extrema-plus-endpoint) - or interior roots grid for Chebyshev points. - axis : int - Axis along which to transform. - - Returns - ------- - fq : jnp.ndarray - Real function value at query points. - - """ - lobatto = bool(lobatto) - errorif(lobatto, NotImplementedError, "JAX hasn't implemented type 1 DCT.") - a = cheb_from_dct(dct(f, type=2 - lobatto, axis=axis), axis) / ( - f.shape[axis] - lobatto - ) - fq = idct_non_uniform(xq, a, f.shape[axis], axis) - return fq - - -def idct_non_uniform(xq, a, n, axis=-1): - """Evaluate discrete Chebyshev transform coefficients ``a`` at ``xq`` ∈ [-1, 1]. - - Parameters - ---------- - xq : jnp.ndarray - Real query points where interpolation is desired. - Shape of ``xq`` must broadcast with shape ``np.delete(a.shape,axis)``. - a : jnp.ndarray - Discrete Chebyshev transform coefficients. - n : int - Spectral resolution of ``a``. - axis : int - Axis along which to transform. - - Returns - ------- - fq : jnp.ndarray - Real function value at query points. - - """ - a = jnp.moveaxis(a, axis, -1) - # Could use Clenshaw recursion with fq = chebval(xq, a, tensor=False). - basis = chebvander(xq, n - 1) - fq = jnp.linalg.vecdot(basis, a) - return fq def polyder_vec(c): diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 6565231a07..391d29bb1c 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -6,7 +6,7 @@ import pytest from jax import grad from matplotlib import pyplot as plt -from numpy.polynomial.chebyshev import chebgauss, chebinterpolate, chebroots, chebweight +from numpy.polynomial.chebyshev import chebgauss, chebweight from numpy.polynomial.legendre import leggauss from scipy import integrate from scipy.interpolate import CubicHermiteSpline @@ -17,12 +17,11 @@ from desc.basis import FourierZernikeBasis from desc.compute.utils import dot from desc.equilibrium import Equilibrium -from desc.equilibrium.coords import get_rtz_grid, map_coordinates +from desc.equilibrium.coords import get_rtz_grid from desc.examples import get from desc.grid import ConcentricGrid, Grid, LinearGrid, QuadratureGrid from desc.integrals import ( Bounce1D, - Bounce2D, DFTInterpolator, FFTInterpolator, line_integrals, @@ -35,17 +34,14 @@ surface_variance, virtual_casing_biot_savart, ) -from desc.integrals.basis import FourierChebyshevBasis from desc.integrals.bounce_utils import ( _get_extrema, bounce_points, - get_alpha, get_pitch, interp_to_argmin_g, interp_to_argmin_g_hard, plot_ppoly, ) -from desc.integrals.interp_utils import fourier_pts from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, @@ -1364,200 +1360,3 @@ def integrand_grad(*args, **kwargs2): # Make sure bounce points get differentiated too. result = fun2(pitch) assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-1) - - -class TestBounce2DPoints: - """Test that bounce points are computed correctly.""" - - @staticmethod - def _cheb_intersect(cheb, k): - cheb = cheb.copy() - cheb[0] = cheb[0] - k - roots = chebroots(cheb) - intersect = roots[ - np.logical_and(np.isreal(roots), np.abs(roots.real) <= 1) - ].real - return intersect - - @staticmethod - def _periodic_fun(nodes, M, N): - alpha, zeta = nodes.T - f = -2 * np.cos(1 / (0.1 + zeta**2)) + 2 - return f.reshape(M, N) - - @pytest.mark.unit - def test_z1_first(self): - """Test that bounce points are computed correctly.""" - M, N = 1, 10 - domain = (-1, 1) - nodes = FourierChebyshevBasis.nodes(M, N, domain=domain) - f = -self._periodic_fun(nodes, M, N) - cheb = FourierChebyshevBasis(f, domain=domain).compute_cheb(fourier_pts(M)) - pitch = 1 / np.linspace(1, 4, 1) - z1, z2 = cheb.intersect1d(1 / pitch, num_intersect=1) - print(z1) - print(z2) - cheb.check_intersect1d(z1, z2, 1 / pitch) - z1, z2 = TestBounce1DPoints.filter(z1, z2) - - def f(z): - return -2 * np.cos(1 / (0.1 + z**2)) + 2 - - r = self._cheb_intersect(chebinterpolate(f, N), 1 / pitch) - np.testing.assert_allclose(z1, r[::2], rtol=1e-3) - np.testing.assert_allclose(z2, r[1::2], rtol=1e-3) - - -class TestBounce2D: - """Test bounce integration with two-dimensional pseudo-spectral methods.""" - - @pytest.mark.unit - @pytest.mark.parametrize( - "alpha_0, iota, num_period, period", - [ - (0, np.sqrt(2), 1, 2 * np.pi), - (0, np.arange(1, 3) * np.sqrt(2), 5, 2 * np.pi), - ], - ) - def test_alpha_sequence(self, alpha_0, iota, num_period, period): - """Test field line poloidal label tracking.""" - iota = np.atleast_1d(iota) - alphas = get_alpha(alpha_0, iota, num_period, period) - assert alphas.shape == (iota.size, num_period) - for i in range(iota.size): - assert np.unique(alphas[i]).size == num_period, f"{iota} is irrational" - print(alphas) - - @pytest.mark.unit - def test_fourier_chebyshev(self, rho=1, M=8, N=32, f=lambda B, pitch: B * pitch): - """Test bounce points...""" - eq = get("W7-X") - clebsch = FourierChebyshevBasis.nodes(M, N, L=rho) - desc_from_clebsch = map_coordinates( - eq, - clebsch, - inbasis=("rho", "alpha", "zeta"), - period=(np.inf, 2 * np.pi, np.inf), - ) - grid = LinearGrid( - rho=rho, M=eq.M_grid, N=eq.N_grid, sym=False, NFP=eq.NFP - ) # check if NFP!=1 works - data = eq.compute( - names=Bounce2D.required_names() + ["min_tz |B|", "max_tz |B|"], grid=grid - ) - fb = Bounce2D( - grid, data, M, N, desc_from_clebsch, check=True, warn=False - ) # TODO check true - pitch = get_pitch( - grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 - ) - result = fb.integrate(f, [], pitch) # noqa: F841 - - @pytest.mark.unit - @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) - def test_drift(self): - """Test bounce-averaged drift with analytical expressions.""" - eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") - psi_boundary = eq.Psi / (2 * np.pi) - psi = 0.25 * psi_boundary - rho = np.sqrt(psi / psi_boundary) - np.testing.assert_allclose(rho, 0.5) - - # Make a set of nodes along a single fieldline. - grid_fsa = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - data = eq.compute(["iota"], grid=grid_fsa) - iota = grid_fsa.compress(data["iota"]).item() - alpha = 0 - zeta = np.linspace(-np.pi / iota, np.pi / iota, (2 * eq.M_grid) * 4 + 1) - grid = get_rtz_grid( - eq, - rho, - alpha, - zeta, - coordinates="raz", - period=(np.inf, 2 * np.pi, np.inf), - iota=np.array([iota]), - ) - data = eq.compute( - Bounce2D.required_names() - + [ - "cvdrift", - "gbdrift", - "grad(psi)", - "grad(alpha)", - "shear", - "iota", - "psi", - "a", - ], - grid=grid, - ) - np.testing.assert_allclose(data["psi"], psi) - np.testing.assert_allclose(data["iota"], iota) - assert np.all(data["B^zeta"] > 0) - data["Bref"] = 2 * np.abs(psi_boundary) / data["a"] ** 2 - data["rho"] = rho - data["alpha"] = alpha - data["zeta"] = zeta - data["psi"] = grid.compress(data["psi"]) - data["iota"] = grid.compress(data["iota"]) - data["shear"] = grid.compress(data["shear"]) - - # Compute analytic approximation. - drift_analytic, cvdrift, gbdrift, pitch = TestBounce1D.drift_analytic(data) - # Compute numerical result. - grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP) - data_2 = eq.compute( - names=Bounce2D.required_names() + ["cvdrift", "gbdrift"], grid=grid - ) - M, N = eq.M_grid, 20 - bounce = Bounce2D( - grid=grid, - data=data_2, - desc_from_clebsch=Bounce2D.desc_from_clebsch(eq, rho, M, N), - M=M, - N=N, - alpha_0=data["alpha"], - num_transit=1, - Bref=data["Bref"], - Lref=data["a"], - check=True, - plot=True, - ) - - def integrand_num(cvdrift, gbdrift, B, pitch): - g = jnp.sqrt(1 - pitch * B) - return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - - def integrand_den(B, pitch): - return 1 / jnp.sqrt(1 - pitch * B) - - normalization = -np.sign(data["psi"]) * data["Bref"] * data["a"] ** 2 - drift_numerical_num = bounce.integrate( - pitch=pitch[:, np.newaxis], - integrand=integrand_num, - f=Bounce2D.reshape_data( - grid, - data_2["cvdrift"] * normalization, - data_2["gbdrift"] * normalization, - ), - num_well=1, - ) - drift_numerical_den = bounce.integrate( - pitch=pitch[:, np.newaxis], - integrand=integrand_den, - f=[], - num_well=1, - ) - drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) - msg = "There should be one bounce integral per pitch in this example." - assert drift_numerical.size == drift_analytic.size, msg - np.testing.assert_allclose( - drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2 - ) - - fig, ax = plt.subplots() - ax.plot(1 / pitch, drift_analytic) - ax.plot(1 / pitch, drift_numerical) - plt.show() - return fig diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 250ca42a8e..0b03b16f4e 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -2,31 +2,8 @@ import numpy as np import pytest -from matplotlib import pyplot as plt -from numpy.polynomial.chebyshev import ( - cheb2poly, - chebinterpolate, - chebpts1, - chebpts2, - chebval, -) -from scipy.fft import dct as sdct -from scipy.fft import idct as sidct -from desc.backend import dct, idct, rfft -from desc.integrals.interp_utils import ( - cheb_from_dct, - cheb_pts, - harmonic, - harmonic_vander, - interp_dct, - interp_rfft, - interp_rfft2, - poly_root, - polyder_vec, - polyval_vec, -) -from desc.integrals.quad_utils import bijection_to_disc +from desc.integrals.interp_utils import poly_root, polyder_vec, polyval_vec class TestPolyUtils: @@ -116,205 +93,3 @@ def test(x, c): assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 test(x, c) - - -def _f_1d(x): - """Test function for 1D FFT.""" - return np.cos(7 * x) + np.sin(x) - 33.2 - - -def _f_1d_nyquist_freq(): - return 7 - - -def _f_2d(x, y): - """Test function for 2D FFT.""" - x_freq, y_freq = 3, 5 - return ( - # something that's not separable - np.cos(x_freq * x) * np.sin(2 * x + y) - + np.sin(y_freq * y) * np.cos(x + 3 * y) - # DC terms - - 33.2 - + np.cos(x) - + np.cos(y) - ) - - -def _f_2d_nyquist_freq(): - x_freq_nyquist = 3 + 2 - y_freq_nyquist = 5 + 3 - return x_freq_nyquist, y_freq_nyquist - - -def _identity(x): - return x - - -def _f_non_periodic(z): - return np.sin(np.sqrt(2) * z) * np.cos(1 / (2 + z)) * np.cos(z**2) * z - - -def _f_algebraic(z): - return z**3 - 10 * z**6 - z - np.e + z**4 - - -class TestFastInterp: - """Test fast interpolation.""" - - @pytest.mark.unit - @pytest.mark.parametrize("N", [2, 6, 7]) - def test_cheb_pts(self, N): - """Test we use Chebyshev points compatible with standard definition of DCT.""" - np.testing.assert_allclose(cheb_pts(N), chebpts1(N)[::-1], atol=1e-15) - np.testing.assert_allclose( - cheb_pts(N, lobatto=True, domain=(-np.pi, np.pi)), - np.pi * chebpts2(N)[::-1], - atol=1e-15, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("M", [1, 8, 9]) - def test_rfftfreq(self, M): - """Make sure numpy uses Nyquist interpolant frequencies.""" - np.testing.assert_allclose(np.fft.rfftfreq(M, d=1 / M), np.arange(M // 2 + 1)) - - @pytest.mark.unit - @pytest.mark.parametrize( - "func, n", - [ - (_f_1d, 2 * _f_1d_nyquist_freq() + 1), - (_f_1d, 2 * _f_1d_nyquist_freq()), - ], - ) - def test_interp_rfft(self, func, n): - """Test non-uniform FFT interpolation.""" - xq = np.array([7.34, 1.10134, 2.28]) - x = np.linspace(0, 2 * np.pi, n, endpoint=False) - assert not np.any(np.isclose(xq[..., np.newaxis], x)) - f, fq = func(x), func(xq) - np.testing.assert_allclose(interp_rfft(xq, f), fq) - M = f.shape[-1] - np.testing.assert_allclose( - np.sum( - harmonic_vander(xq, M) * harmonic(rfft(f, norm="forward"), M), axis=-1 - ), - fq, - ) - - @pytest.mark.xfail(reason="Does numpy, jax, and scipy need to fix a bug with FFT?") - @pytest.mark.unit - @pytest.mark.parametrize( - "func, m, n", - [ - (_f_2d, 2 * _f_2d_nyquist_freq()[0] + 1, 2 * _f_2d_nyquist_freq()[1] + 1), - (_f_2d, 2 * _f_2d_nyquist_freq()[0], 2 * _f_2d_nyquist_freq()[1]), - ], - ) - def test_interp_rfft2(self, func, m, n): - """Test non-uniform FFT interpolation.""" - xq = np.array([[7.34, 1.10134, 2.28], [1.1, 3.78432, 8.542]]).T - x = np.linspace(0, 2 * np.pi, m, endpoint=False) - y = np.linspace(0, 2 * np.pi, n, endpoint=False) - assert not np.any(np.isclose(xq[..., 0, np.newaxis], x)) - assert not np.any(np.isclose(xq[..., 1, np.newaxis], y)) - x, y = map(np.ravel, list(np.meshgrid(x, y, indexing="ij"))) - truth = func(xq[..., 0], xq[..., 1]) - np.testing.assert_allclose( - interp_rfft2(xq, func(x, y).reshape(m, n), axes=(-2, -1)), - truth, - ) - np.testing.assert_allclose( - interp_rfft2(xq, func(x, y).reshape(m, n), axes=(-1, -2)), - truth, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "f, M, lobatto", - [ - # Identity map known for bad Gibbs; if discrete Chebyshev transform - # implemented correctly then won't see Gibbs. - (_identity, 2, False), - (_identity, 3, False), - (_identity, 3, True), - (_identity, 4, True), - ], - ) - def test_dct(self, f, M, lobatto): - """Test discrete cosine transform interpolation. - - Parameters - ---------- - f : callable - Function to test. - M : int - Fourier spectral resolution. - lobatto : bool - Whether ``f`` should be sampled on the Gauss-Lobatto (extrema-plus-endpoint) - or interior roots grid for Chebyshev points. - - """ - # Need to test fft used in Fourier Chebyshev interpolation due to issues like - # https://github.com/scipy/scipy/issues/15033 - # https://github.com/scipy/scipy/issues/21198 - # https://github.com/google/jax/issues/22466, - domain = (0, 2 * np.pi) - m = cheb_pts(M, lobatto, domain) - n = cheb_pts(m.size * 10, lobatto, domain) - norm = (n.size - lobatto) / (m.size - lobatto) - - dct_type = 2 - lobatto - fq_1 = np.sqrt(norm) * sidct( - sdct(f(m), type=dct_type, norm="ortho", orthogonalize=False), - type=dct_type, - n=n.size, - norm="ortho", - orthogonalize=False, - ) - if lobatto: - # JAX has yet to implement type 1 DCT. - fq_2 = norm * sidct(sdct(f(m), type=dct_type), n=n.size, type=dct_type) - else: - fq_2 = norm * idct(dct(f(m), type=dct_type), n=n.size, type=dct_type) - np.testing.assert_allclose(fq_1, f(n), atol=1e-14) - # JAX is less accurate than scipy. - np.testing.assert_allclose(fq_2, f(n), atol=1e-6) - - fig, ax = plt.subplots() - ax.scatter(m, f(m)) - ax.plot(n, fq_1) - ax.plot(n, fq_2) - return fig - - @pytest.mark.unit - @pytest.mark.parametrize( - "f, M", - [(_f_non_periodic, 5), (_f_non_periodic, 6), (_f_algebraic, 7)], - ) - def test_interp_dct(self, f, M): - """Test non-uniform DCT interpolation.""" - c0 = chebinterpolate(f, M - 1) - assert not np.allclose( - c0, - cheb_from_dct(dct(f(chebpts1(M)), 2)) / M, - ), ( - "Interpolation should fail because cosine basis is in wrong domain, " - "yet the supplied test function was interpolated fine using this wrong " - "domain. Pick a better test function." - ) - # test interpolation - z = cheb_pts(M) - fz = f(z) - np.testing.assert_allclose(c0, cheb_from_dct(dct(fz, 2) / M), atol=1e-13) - if np.allclose(_f_algebraic(z), fz): # Should reconstruct exactly. - np.testing.assert_allclose( - cheb2poly(c0), - np.array([-np.e, -1, 0, 1, 1, 0, -10]), - atol=1e-13, - ) - # test evaluation - xq = np.arange(10 * 3 * 2).reshape(10, 3, 2) - xq = bijection_to_disc(xq, 0, xq.size) - fq = chebval(xq, c0, tensor=False) - np.testing.assert_allclose(fq, interp_dct(xq, fz), atol=1e-13) From 74e229fad849d14aa2bf24c9dae37cfe467d33f6 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 27 Aug 2024 03:17:51 -0400 Subject: [PATCH 223/241] Remove code that should be in fourier_bounce branch --- desc/integrals/basis.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index 8b4f5fc3dd..bef464cbc4 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -6,23 +6,6 @@ from desc.utils import setdefault -def _subtract(c, k): - """Subtract ``k`` from first index of last axis of ``c``. - - Semantically same as ``return c.copy().at[...,0].add(-k)``, - but allows dimension to increase. - """ - c_0 = c[..., 0] - k - c = jnp.concatenate( - [ - c_0[..., jnp.newaxis], - jnp.broadcast_to(c[..., 1:], (*c_0.shape, c.shape[-1] - 1)), - ], - axis=-1, - ) - return c - - @partial(jnp.vectorize, signature="(m),(m)->(m)") def _in_epigraph_and(is_intersect, df_dy_sign): """Set and epigraph of function f with the given set of points. From dedc01b11a553fff4fcde58fbefc4d7cf163b0a1 Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 27 Aug 2024 10:37:33 -0400 Subject: [PATCH 224/241] Improve tests and fix failing test --- desc/integrals/bounce_integral.py | 7 +++++-- desc/integrals/bounce_utils.py | 34 ++++++++++++++++++------------- desc/integrals/quad_utils.py | 5 +++-- tests/test_integrals.py | 23 ++++++++++----------- tests/test_utils.py | 2 +- 5 files changed, 40 insertions(+), 31 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 811fbda12c..23656cc016 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -42,7 +42,10 @@ class Bounce1D: For applications which reduce to computing a nonlinear function of distance along field lines between bounce points, it is required to identify these - points with field-line-following coordinates. + points with field-line-following coordinates. (In the special case of a linear + function summing integrals between bounce points over a flux surface, arbitrary + coordinate systems may be used as this operation reduces to a surface integral, + which is invariant to the order of summation). The DESC coordinate system is related to field-line-following coordinate systems by a relation whose solution is best found with Newton iteration. @@ -83,7 +86,7 @@ class Bounce1D: Attributes ---------- _B : jnp.ndarray - TODO: Make this (4, L, M, N-1) now that tensor product in rho and alpha + TODO: Make this (4, M, L, N-1) now that tensor product in rho and alpha required as well after GitHub PR #1214. Shape (4, L * M, N - 1). Polynomial coefficients of the spline of |B| in local power basis. diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index c96ecf1508..6d8b1023bf 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -444,8 +444,8 @@ def _interpolate_and_integrate( - data["B^zeta"] * data["|B|_z|r,a"] / data["|B|"] ** 2, ).reshape(shape) B = interp1d_Hermite_vec(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) - # Spline each function separately so that operations in the integrand that do not - # preserve smoothness can be captured by the quadrature. + # Spline each function separately so that operations in the integrand + # that do not preserve smoothness can be captured. f = [interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) @@ -463,13 +463,13 @@ def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): Q : jnp.ndarray Quadrature points in ζ coordinates. f : list of jnp.ndarray - Arguments to the integrand interpolated to Q. + Arguments to the integrand, interpolated to Q. b_sup_z : jnp.ndarray Contravariant toroidal component of magnetic field, interpolated to Q. B : jnp.ndarray Norm of magnetic field, interpolated to Q. B_z_ra : jnp.ndarray - Norm of magnetic field, derivative with respect to ζ. + Norm of magnetic field derivative, (∂|B|/∂ζ)|(ρ,α). result : jnp.ndarray Output of ``_interpolate_and_integrate``. plot : bool @@ -571,13 +571,13 @@ def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): return ext, g_ext -def _where_argmin_g(z1, z2, ext, g_ext, upper_sentinel): +def _where_for_argmin(z1, z2, ext, g_ext, upper_sentinel): assert z1.shape[1] == z2.shape[1] == ext.shape[0] == g_ext.shape[0] return jnp.where( (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), g_ext[:, jnp.newaxis], - upper_sentinel, # don't make too large or softmax loses resolution + upper_sentinel, ) @@ -622,12 +622,15 @@ def interp_to_argmin_g( expense of noisier gradients - noisier in the physics sense (unrelated to the automatic differentiation). upper_sentinel : float - Something a good bit larger than ``g``. For example if max(g) is - 10, then 50 will more than suffice. + Something larger than g. Choose value such that + exp(max(g)) << exp(``upper_sentinel``). Don't make too large or numerical + resolution is lost. Warnings -------- - Recall that if ``g`` is small then the effect of β is reduced. + Recall that if g is small then the effect of β is reduced. + If the intention is to use this function as argmax, be sure to supply + a lower sentinel for ``upper_sentinel``. Returns ------- @@ -637,7 +640,10 @@ def interp_to_argmin_g( """ ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) - argmin = softmax(beta * _where_argmin_g(z1, z2, ext, g, upper_sentinel), axis=-1) + # JAX softmax(x) does the proper shift to compute softmax(x - max(x)), but it's + # still not a good idea to compute over a large length scale, so we warn in + # docstring to choose upper sentinel properly. + argmin = softmax(beta * _where_for_argmin(z1, z2, ext, g, upper_sentinel), axis=-1) h = jnp.linalg.vecdot( argmin, interp1d_vec(ext, knots, jnp.atleast_2d(h), method=method)[:, jnp.newaxis], @@ -696,10 +702,10 @@ def interp_to_argmin_g_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): """ ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) # We can use the non-differentiable max because we actually want the gradients - # to accumulate through only the minimum anyway since we are differentiating how - # our physics objective changes wrt physics stuff not wrt which of the extrema - # are interpolated to. - argmin = jnp.argmin(_where_argmin_g(z1, z2, ext, g, jnp.max(g) + 10), axis=-1) + # to accumulate through only the minimum since we are differentiating how our + # physics objective changes wrt equilibrium perturbations not wrt which of the + # extrema get interpolated to. + argmin = jnp.argmin(_where_for_argmin(z1, z2, ext, g, jnp.max(g) + 1), axis=-1) A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) h = interp1d_vec(A, knots, jnp.atleast_2d(h), method=method) assert h.shape == z1.shape diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index 89ad99d827..d1f66057da 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -122,6 +122,7 @@ def tanh_sinh(deg, m=10): Returns ------- x, w : (jnp.ndarray, jnp.ndarray) + Shape (deg, ). Quadrature points and weights. """ @@ -190,7 +191,7 @@ def leggauss_lob(deg, interior_only=False): def get_quadrature(quad, automorphism): - """Apply automorphism to given quadrature points and weights. + """Apply automorphism to given quadrature. Parameters ---------- @@ -206,7 +207,7 @@ def get_quadrature(quad, automorphism): Returns ------- x, w : (jnp.ndarray, jnp.ndarray) - Quadrature points in [-1, 1] and associated weights. + Quadrature points and weights. """ x, w = quad diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 391d29bb1c..181afaee97 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -15,7 +15,7 @@ from desc.backend import jnp from desc.basis import FourierZernikeBasis -from desc.compute.utils import dot +from desc.compute.utils import dot, safediv from desc.equilibrium import Equilibrium from desc.equilibrium.coords import get_rtz_grid from desc.examples import get @@ -884,7 +884,7 @@ def _mod_chebu_gauss(deg): class TestBounce1DQuadrature: - """Test bounce quadrature accuracy.""" + """Test bounce quadrature.""" @pytest.mark.unit @pytest.mark.parametrize( @@ -900,7 +900,7 @@ class TestBounce1DQuadrature: ], ) def test_bounce_quadrature(self, is_strong, quad, automorphism): - """Test bounce integral matches elliptic integrals.""" + """Test bounce integral matches singular elliptic integrals.""" p = 1e-4 m = 1 - p # Some prime number that doesn't appear anywhere in calculation. @@ -1040,10 +1040,10 @@ def test_integrate_checks(self): def numerator(g_zz, B, pitch): f = (1 - pitch * B / 2) * g_zz - return f / jnp.sqrt(1 - pitch * B) + return safediv(f, jnp.sqrt(jnp.abs(1 - pitch * B))) def denominator(B, pitch): - return 1 / jnp.sqrt(1 - pitch * B) + return safediv(1, jnp.sqrt(jnp.abs(1 - pitch * B))) # Pick flux surfaces, field lines, and how far to follow the field line # in Clebsch-Type field-line coordinates ρ, α, ζ. @@ -1059,7 +1059,7 @@ def denominator(B, pitch): data = eq.compute( Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) - bounce = Bounce1D(grid.source_grid, data, check=True, quad=leggauss(3)) + bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) @@ -1070,20 +1070,19 @@ def denominator(B, pitch): check=True, ) den = bounce.integrate(pitch, denominator, [], check=True) - avg = num / den + avg = safediv(num, den) # Sum all bounce integrals across each particular field line. - avg = np.nansum(avg, axis=-1) - assert np.count_nonzero(avg) - # Split the resulting data by field line. + avg = np.sum(avg, axis=-1) + assert np.isfinite(avg).all() + # Group the averages by field line. avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) # The sum stored at index i, j i, j = 0, 0 print(avg[:, i, j]) # is the summed bounce average among wells along the field line with nodes # given in Clebsch-Type field-line coordinates ρ, α, ζ - raz_grid = grid.source_grid - nodes = raz_grid.nodes.reshape(rho.size, alpha.size, -1, 3) + nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes, "raz") print(nodes[i, j]) # for the pitch values stored in pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) diff --git a/tests/test_utils.py b/tests/test_utils.py index aa042fcbc8..2812e8a01b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -205,7 +205,7 @@ def test_broadcast_tree(): def _last_value(a): """Return the last non-nan value in ``a``.""" a = a[::-1] - idx = np.squeeze(flatnonzero(~np.isnan(a), size=1, fill_value=0)) + idx = jnp.squeeze(flatnonzero(~jnp.isnan(a), size=1, fill_value=0)) return a[idx] From 3e57eca50a791a332d44ab4e562dd0fade50a87c Mon Sep 17 00:00:00 2001 From: unalmis Date: Tue, 27 Aug 2024 11:14:17 -0400 Subject: [PATCH 225/241] Clean up tests and API --- desc/equilibrium/coords.py | 2 +- desc/integrals/bounce_integral.py | 8 +-- desc/integrals/bounce_utils.py | 6 +- .../baseline/test_binormal_drift_bounce1d.png | Bin 0 -> 18582 bytes tests/baseline/test_drift.png | Bin 18687 -> 0 bytes tests/test_integrals.py | 60 ++++++++++-------- 6 files changed, 40 insertions(+), 36 deletions(-) create mode 100644 tests/baseline/test_binormal_drift_bounce1d.png delete mode 100644 tests/baseline/test_drift.png diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 224f280f64..9d722ce52d 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -704,7 +704,7 @@ def get_rtz_grid( [radial, poloidal, toroidal], coordinates=coordinates, period=period ) if "iota" in kwargs: - kwargs["iota"] = grid.expand(kwargs["iota"]) + kwargs["iota"] = grid.expand(jnp.atleast_1d(kwargs["iota"])) inbasis = { "r": "rho", "t": "theta", diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 23656cc016..a1db3856a5 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -8,7 +8,7 @@ _check_bounce_points, bounce_points, bounce_quadrature, - interp_to_argmin_g, + interp_to_argmin, plot_ppoly, ) from desc.integrals.interp_utils import polyder_vec @@ -282,7 +282,7 @@ def integrate( self, pitch, integrand, - f, + f=None, weight=None, num_well=None, method="cubic", @@ -355,7 +355,7 @@ def integrate( z2=z2, pitch=pitch, integrand=integrand, - f=f, + f=setdefault(f, []), data=self._data, knots=self._zeta, method=method, @@ -363,7 +363,7 @@ def integrate( check=check, ) if weight is not None: - result *= interp_to_argmin_g( + result *= interp_to_argmin( h=weight, z1=z1, z2=z2, diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 6d8b1023bf..df175d4e2c 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -581,7 +581,7 @@ def _where_for_argmin(z1, z2, ext, g_ext, upper_sentinel): ) -def interp_to_argmin_g( +def interp_to_argmin( h, z1, z2, knots, g, dg_dz, method="cubic", beta=-100, upper_sentinel=1e2 ): """Interpolate ``h`` to the deepest point of ``g`` between ``z1`` and ``z2``. @@ -652,14 +652,14 @@ def interp_to_argmin_g( return h -def interp_to_argmin_g_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): +def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): """Interpolate ``h`` to the deepest point of ``g`` between ``z1`` and ``z2``. Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A ∈ argmin_E g(ζ). Returns h(A). See Also -------- - interp_to_argmin_g + interp_to_argmin Accomplishes the same task, but handles the case of non-unique global minima more correctly. It is also more efficient if P >> 1. diff --git a/tests/baseline/test_binormal_drift_bounce1d.png b/tests/baseline/test_binormal_drift_bounce1d.png new file mode 100644 index 0000000000000000000000000000000000000000..95339623df082e76afc334907dc6a5390cba1230 GIT binary patch literal 18582 zcmeIaWmMJO_b&<}ASFsj3L@PlAl=HQyF)%0BkGtRmHd&hlo#yzi&V?2ZLz}joA*>is8T>Gu2`ePz|YJ3a~3?i74f;I*Q);$c2 zYlb&)z;E7r20McvqV9@@?mAAk?p~IzHW+G_?#?fq++R3Y-Sf0@b#riX6yy=*;pe(% z@9ysGCdSMA^1mtDCRKDv=G-4w-R<7f27C*;0-AJtu&@*{{@TI7_0AA2*I_+g( zV2OmWYyx(@w8j;VclQZ_I4|D1EVTX3*DJ`hLn`Aak+ExLV%)mq-5)lvxVuhypG~UnPG^8!xMrxR{C<+RMYUcYOxO1nj1r8c>(RpXN0#}k!fdM zuInxQtE1s#<(7+`+f}g{0r>D#V3y;yL2OGOANmASmyBjMzQ&<_2oLH^v~0X~yv+Dn z7q9+|XE_vXv?A+TQf>Jpx0xxvCL+!!YYxI;!+WE33VM zE4OouD%|d36NE7`J&xYyW$5UTElB`0m;f5Sb-7x!?M8R8r_|g_d}J^`)i=!?7+_d- zou`J7!{FvsIpC){F_r@V)?^{7taM6_!$Ln5?lfe*ANKoi2dPwwdF{iYLaG405@=x> z=6y^fL0g+=dtcnS#^Y`dif^|dSbwEUhI&oEGX9=vDX*kT2+siwT1BZX4>Z^)CBI@g z2y=Fce&;h=hkN{N#-LUkMb4tuDTC_m&nI6W!>X93-q!{$ZKw+)?lMM`ttEt<{%;d4 z#emHt3eU}%j@K-tl%fKhRj#r}n{zj+t^#g^2AgvNhMiL08wmXU)&4QhuK-LnR-v5E z6pn7Cl*V_wOUxQ{-m{p0>S#`n!ri4>*kFkLieps#KJGIMqYsK36kiYWiV~=d!gvq; zdP9htI7@y8VfKHuKJJA)@P!A?n|*1rK8ufuY5FAh6(IK3SaVO+G#~HG1qwM_3n#{{ z(*}xlz$b&0*5dP$)ps{;BuAOb=I|$`oOGy?kUTkcPZ6hhHX7!C7K|3I7`Us2v*UWrt95Kl>C6shCWU)2mlUr2Ap0ys5KI4lsf4yP;sXr(Q(Ut z;S&2u=f?|tm8751cQxxJ{7!88If3?Of55zdL9{ZMgUT7_-7!5tc|RM|`Ib5!R!}V2 zxx47RPMN?>Kp#N)Z(|#uGyMSL&)*tjuw zkwe?sem+mJ_LgKr@ij~6R6kPg%RwYrfpyEPwFc#7gUY)ZX&JkpI?LTrw7DALwmq4O z1l}9pGbxloAOvbCM%3FEAoXsXH+w#Z{L9%Rv$lHa;IGzm(c+oDUf%m!N-#XMdTk3_ zmy}WFEVJwE{dQ-t()#eOd;0IxBI%clp%kZaB2I5`>2A!B?hdHLO8NJz0A2-yu$re8 z=1%4Py|;kumtK(6-dQP7gUEG3gx`t4U=hvwG?8_?T&}7tZz2;6KMeDJnr)Wm8{f`p z`B$dc_Tt~U#Sb^0JX`znw(|?J`|LXRd=E2|p_`}M$sh%hW8ZsHfPA;M;28pp zU7Fu^^{4$Q^oxQy^k&btLd=00lKJ4gamPulLDF)H7ybS7);fV&E8<1yNU1mfi?OYP3)a*BEVE(%+eK0T&3UZsA8!FYib_e5Mk)PIuX!a`eK2=}@BBR# zPifeGC^CsLT+lq>gbSU#(k&>1yrE2?+%7nrVX0KdKxi!T#cb51Ws0a3fr(>7}P9GGmev7Cz+S3kj7ghe@ zm)saKH*CPbfEi7T2hRercLnGkW({9^DA!8qYh`Q>t*t2d$4Hu%za4!3VnfA5zDNJr zk6#AK7AvXanJbj>=!j&h%44L6s|(J$)hpR}dZ77*#P957KLOAW$OlG{09&$qvhj+r zP$(9&0GDt3153Bh*1efP`ufuCK@GGB-B$^3v3E}14#XWaQMH>!?NxfHDdzUhz?Qn_ zZWz@FFMB{fx@~m(QI>ffJWkNAcg8O%HdaPXPEO_>nZp78u(d;Y&X7i7pIZ3>mGs8l z%?)SCM2^P49dqRj5dW1`RhjttcSJHa4xE?R338A~hIDs-)CWgF85=~LJ^~?&OwDw;N#Ql&XXK~BuR=&xhA&>!pWw7bg_QT=Ll!!a zu}5)>oqR>VmZsde$7(erB>)d`l*Zy3|esx+CK9Bj)`a0mvanM05V}dP-01 zN7cuvGrHS|t65CzccAxgl?axCa%gdn7k7bp$x@kVvWu^_BHeqw)DYY(cL(pQi_B5T zZ}WpyIlDGp?M@Q$y)XVPTZ|JihVKQjjnnz;9PQ^?jNV)NVw}W8?kllf6tqG zt_Mxmp%r5P3MhBNqil-u>3 zf7|Ip%vnM517dKFtf&BLcH?!$ig~uUyyv8H-RS0Q2zNWX!Oi+}0Z0Sclc?@#QztT* z;oywW{mQHkNjV*UZo20!86fpFc_7EKISir_X$Z{7!L(w2%DdvArnbS3JheKRrH|YK z1exwMntcJ_vEBi7$I<09jM7Upa6x=pFEl}{t@30!no&xME!%*TMkw!TM^>&*78p+X z2#sb~q#&OcX#F^26LSx0rwd6)_;-+HkP<(W#)w3>gJk%j$TvHe&FBQ*bw|2@zt=@( z2-~Bv>(miUt=E7N@}~~iSpmmWYq+PVkF1sXZAZ;7<{}NEoJCfdpkMne6j&>36OY zO+C+ay4(91`JQoBN^xN{@kO8z!<=hgX2&LvNy~svYWg5tjPu0$h?kgjZgPj<0 z`vu^xB?EOS;I4I5NwHg(0utx*`eL51KRrf^bO4Y&uyyYG@M3*3{MU5N^%7pZnUD2e zPW@dVlX?a+g42@X4qqam{kbj7qomIaq86oNGEGJQ)JvjoAtWfnbdhD@sJ=xz`c_^@ zY;SQ`@-46b$AdLA9on2=|EmOk|3^5(G%W=l=8LE`^Ep(Q2fM)rEASamMynhJ%Fn=# zB4$S$^)*ps!&B}oo}HoRM|dH)xOE`JYfFu z2tvDnbx=o4=j-Zi&kgiVB0RxKflU6@`w}J>Mr{vh<*RAhzRsQICw$(&Yx3iIohk^p zf`B4+2NNW>iWnD*L1W)ZZUCOLuwh?QsB8)FzrO+7-8+@dbR+&cKCk*ABNd>|rgD%T zPbtK)tOsGSzJZ6Xel4Eh;Nfz9L&U7&d=W+94{abI!;~gE`nN7Ds-h@s-Nwbwqz=br%G~nu zJR#E#m?j3eV((|SvX9NaX?AWZUnfLO7d@ZVx(bGb-Nl204e;N98$@=+@?ez+dB@Ei zJi1*y=iz9k7ix0mw?km2t*_t16QONF~r zlzbY_MojKa;v$c8&4Gn+VJT?OuA1p268jc2I~5m7ROFF;hpR9A`HH@xU?WBx8F}$9 zomk$};!MNh_uZ0tx35-DZ`1_tOV$Cc6arWH90%h2$h73!XzbIk2Z(mIeiX}y(-;MG zM|Vil*~)BWRthz^l%_6X(is)8^C2qy0f%|AeNd!+i+P`&!=uPFgZ%D@^N10veI4?8 zoq5{bec+DWAqi&dpD!BhEMh&ceCZfNa&QC_*eGr%=N zK}yO(>GuAlI;(~A-ph6z(Y(DXf+%cBD5j|a`gC)&#@3EYQO2%xZ}Qs5jhSk9Y1z%w zcwpN3z^f{I6xTi!5l>Hp&H4MMl%CNQ`&Kfa5&QBefY|;MBQAl%rU#K8#VY|@Q;k2~ zZXx7>BMCovy&!^g=#&)NpOY+jbnAJO%4mtz)m+Hrwy{fF$-XH7ntpeK&#z8RpNl1Z zGgoQbns>Nx zGMJf|Vq;@(_1|arcs6p6V$@VB4WIaMp|xMW@IvfuYFb59iikgWaQoi(mMV_{S@=k7rE=s;v@s{%`FeH}?v96E?h z5SSx>o^s-?58=~_uX6Sbzk6Vf-sDo>73qMu&CUXCpxqc;>HsIt4@hq*eu*NH{zmv| zB<3b0YKL^7tF~$W!riIfct;;=A{RP}R*pS!t5q8kKe0yRNU9-_4?qo0HaL(IVwe8# z9Ugah{LWMsp$4_=N*4LpSEtAQgCI`CgKJ!SFA}a-GOar8l~g+Su7vIU8ST3Y?tjNb z12psM4h8N9+g6PdjA0sjSzrPFT<4+pIl59RJravyBVk%@_}W#rEYI) z@wj6Fy^S6a&5DMpCsm<2A|$MxrSapXL`|WzkVQ#HaY#*YIt#EeP-6{P#7c^*zNehu zsrEt$*h+OtPpMAMS`}WxJB}M){1_#QB zeF~t;`y#_7#Lp>Ae}(Q%GbXqbOxbzlZG8K2?zfK0%mXq$fVROcuBRR;?o{}0jfGct zfaB!&Uhd0W&awMZ9{R@l&_2YAo!CwdL`Nb@vwxl!j2#Z&jv(gl{T7pp4axB{Nb3gJ zl``n)0%=+5;CESZN|?WWr;eI_p^)L@?+0NySD`>od9v{=rO#Y$y7pzLj@VqRG|cBh8ftlId@YFh$!;B>y`Jc%T6X_g+!H=XWND zzg;`aP+2-YGSCF&s=4V&cFAfY6CRL)dJQ4f?3f*c%OC9ZKpRXSsTIYosyYYZDyp;S zbpm)g6YLcv2SSJ6*Yvsjltvc&;`~QUx=+1tGKVET*(>frQ_creCrwr{;%BvorWi_% zmbxvk*ymc$-mttb!Y3=vgNXooyiRRG!IqeSZojfvoN)_>4DnQYJd##KEA0jK)Z z?uF#N;Z6ECG$iff8MUMkhwe9F#365Rnm+X7L7FyN`5hSwpzd<3)V09(&H6_t2AR=j z2gCf8Mq}F@guB_@^O#rzRqd3%w6dt7vv5E?oz4SO9t2XyR|3#w3B76>BARz*xml6m z&h}*mJqchnvjq zNiR&4C;K0cY?xB)OMrry0LU#{iS7E)G{IIq!Nk%ChSl`jDVXJLrh&O$;d69_%^<@kY9$9PI(^qkfmi_m-=W$<9AgLI zNc3J#&|aOPa{Wa#TxR$)fx2&|G)i)JtGHMA>agzSdHqFMOXEsvA&?Cqb|H1|OVJ_X z=c)_J`0~!2jXw&#$NRu~3n)xX;PFF96)u>8PUYM`*=?Mx0`(f-+_Y!Hl|9kp82Y^9 z(s`!(3`&R>myp7IQ4J&9?iQRAm)BRViS94`t}D@i3CR4Llv?3@d_0%Q3XD7)@K!m8 zS|sfIbB3v%b*^e_S~DKXrq4v5*aWi)GcJyeAQ7CFK+9T-tMP#>;U4mY8{?t5z6NDi zzXImNTKHE&U}##spjC@ycHu1w-aCpZe3@(~xy)KC*~Sk(Oh!)FP4&htJPuB0gAJg! z!vfxCjPwrA>mvHaCq0r*1LggkglcrkV{tYoR(%w*8VO8zpyR}@#%>{Xomy`4OHen<$vJ#+F-m0iQpa?}+?uL9Tt^1*uUhbL2FzY_Rc zf|4qBo?MczU;0$UJcS{y0mrIMZSeV4p)q;A$lt8q)-Ssb0*Smu$_sC!ssN@Zg!VO5=8fsizHex#+y4@?LYhH_1=t$F-}+8on*J zw*-B_m1RFE@Gg)~xOUI_(eey^uIj6kJvqV`qj6ssN1X*0nIP8*%m$4g=^zq1fvqBE z?1ye3;u(BB?RECVs_jhW_nw>dTf%Y2)t`4=GM=HSX$8~_Y?%g#78+u{9G5xb8G9(| zalLu1ws(kIhh3`hAx-_%E$EgUAj}{x3%HD)ENfDV-(gV;rV#&Tt9|ga%(hz@ge-55 zo&rdOrxVbQS3|i(g+^Gc>N4dVq01hJ%8t=BsS~y{kK$o!$1B?!kPLI~19M!_F!SXu zG$SCQmZ{OVbFo_LQnh~&8frq}+B{2RcyN#pS(-K!R`!#h_T5})Yn*N5b7hj4$hz$` zIxTZqzNZtQ>yvJF$;GOpjw7jqAX|jax445ZJ6c@o%Dg^BaKLG*Tr&Bts(e3fKXoQt zY+!5Scox_9G7&CE*f1 z-Hv+856g)ZaVRWJKe^~~8wxF_BHWchMhR0@J@2veJ=!F)R~x^ryhF~S#V9J8YSkJM zmyn=fYHHf=2FT1o{_NsyHuDPo^f&SMjO)z!2-jqF>{-`LAHbX#kbQu}LgfOI z2mjNnK&G{5Tg4RP=Xdc&nfrG(qQAW#i12@Whz*@?32hm9()YNmGvQZTRMs4mu=>*_ zRzY?#B-?Y(LiDl0=-rOG7>yRYuJ#`62+$UwyxqR5f%eeY@0+Zs|Swqs=8Y#y@?OFz=P+MGnRpE}=O@)KqSVn+)80v&oq(+yS&&-`r_X#>5$Uwcnyc%ir z+6N%u!Cv8o&iraO z5N83X%A}aq6l%FKzP(_0SM28}NnSh8!R<7Lquz$(_Y2QN2ej>X{h}t^p>n(MWA9^oeIa4b zDTVXUogtON_PgaZ^g2>l6#R$8gnO65zWx zYl!K*F{p>TI^|n&_;bd{?gUn)+z)wIYCn+;Ih#u;#{vcVFN}-R#`BZyxo}P7ji)DN zw`po#{_T`7(6&(}Kjm5O`byd!oX!C|m4Y*H-1nt-#B48l0J*m1+x+ zP(T(7v;xGht?5!L?DJzrGH!eet|&YI(Srxo)wf^f{>VB=7+IIB60`TT-q+gkG1$_tXyq^&N- zcl22UDaw8)gUPZQU^NsY2fhVf5a<`bHgO82lbp>BqpGai{>0_jkh0-hp#!XQdh*;B zaD&|zyl%E&+G6`d8EDF_m4tuFNVK-Dh#IQSzQaBJO!mxSV=VA#V80HiMDcM!e)U@ZZBTq`lEY# zZTgmwF9#$(Xa|C6E!56=asmIWgG)k^se+1}G}(2z^WCClMAC#^pz;Q(bPA-?$~ur# zRJ&41r@gTr#oU~sBB8(?3;6uA&5`Ddix1Iz4aZGP511GP^J;e!gl3x6Y?m;Iy2`8! z@XZfT*8aA?_Q{d9734_1GG+*MQXn^EV9lM!`C%iVPYO~Lt+h*G}SMU4}wk? zROpU^s+rONat&H4j5;jdhhu=R!Y}b>sCIWcuL5N*vX1DocT+m2GkIWFvA{761Zvg6 zAIc~vy@nsm7Ee?8WU3wqohJJ>Ab-u?M`zlA3J-8S#HGx1EuS?`?LG-1)wP`+=gr1# z%!6@*kz9}^LGA6j@FN^aJ%lz9`fvua;0dR2q35%pF{)3<64~2&{K$##$DCnq;Da=p zu*shhCHmK)<=D{jhS}3K%MIh@eoV(IY{zPLg$;s<(yR#_Xoz6(aUv>CF~<4)?@nf( zvbHWH*wc4qwxOfuAr6C2RM`ds)t9K zRKo5PG7AMXgO0}HB-6@I`m!V;UHA;C`#y1w3hvP)R)KzFy}#x+HdX565G|2tTe$_i z+Ox45GAh&8trK@}0kzoxz=jR5bLbO-bdLd3%qtA^+ToW=FRE=wvE3+;Kr)jeS5ndG z52^QcVfdgA2ODqz7zZK!&3Kl^VC*ZY%=$SJ&{h5!ua|+m>by*#(h5UYP2lQKN{$@- z3?uzhd7Gz!mct+nmtLtaZRe;A3QF`-8Xzcz71GQDh2sa164T0qn&bKHl6x8wn0=DC zHeGVX8SmmJnNvqXO$Im893S|VGtAJROF>+50qzrKCv&>*JmU|15f~5w?P=0>J8S1U z2G!?MQREgyv!{)tdz*G!kPh4jbqEO399q{vva!^YW!=8rVDND3k+8>vLfl@2rmx>i zc=u*wmUTGfKyO1Ck9(L+-%Z9xKU?0;)yNk&&`GIR2N@OF@|TXtKovzxDangdM;+h= zRw5wmpHTDiRUDW~{W-u4#U;1xOk#)RKL~|b_InMJnbXxm?km?srmicx>7y=8rt`q4 zgahdNr_F43zxsp=5t5zByQpxl znNSMzAOrJP27W^e1B><^ZdR^AhO1eA%D!HEr z&6+ff78vllE_I)L=p!K^QBqR6VGoY92fzVvJ{DsWH6pb~bdmIdcm(4-nO6~)65G>Q z-Dg&G3r^I1dMI=9eN%QdrT^a9pQ;hb6modX)=K)~zZM2Ct<_I4nKmkaYN-Hzw^>Un z?3b)iz+UvH-b(e!-LBd@%e4;F{rl*?Q7erB#GN z&Hata#Wa&jm!_z7u}V&e8gawzWL5&DesI)`BO%DFKt;-wgPo;$JQ@>4 zGuK3rC5P?3oOR3W-Si{vWF}~~tz(Ya$7S)|M^#a(eNz}esK`MTU9~O%k}EoYGGJt& zTdV3@Vxxu(x%ocV=A;D#=~>o>v0}b(|lm(G2O7}&tYyH2VuuOPBZ^8 zGIl7KGv!#0l{c4K^4Z{XnNbIrISrIa)DEw6hmF$r(tMb?=cq*c+^diXo{yx27)gdj zErj1tT;Xi_*@zT=fd6~-z50vg>Q|$DS3RH;zJSsks6c}8Gnt81F5EP%;5qMOR{q(A zCAe#t({3~raNFeQ#p0>#=-$$j2oNfJ6u==sdvMeYx4@wJWkq9}R~UU)wS+vj@Ko7b zFr|~-YQk+VQgjy$&gLT@K*PuEkJ3EuNL)#Df4~JJWgt4trkO%jm})b>>X<-iwtPg{ z>Y5(#R0(Llv{pVF23b3&K^Ti#rgi(RDEuEb@}oq5yOLsD8M}TvxkwX#RuSWdEKwm{ zg3o~@l9uZ7h{iFGMe?gXDWezs)*1;CagQiw)!$axPqnYv(uM2lDgWZ0qdf6uLL#{Rry1VcEV zpj;wtd<0}=m$!iCk(kDTz#;l?*Tn08owuM%+j`B2w0-VC=ck14k2!Smupn z=fIOQpQk&Fx^y~YG(*g2)Dd>7@?Myzq4>z+DmY(n?$TFqprYHF>siH{m(njk0e`4^ z{=^F8u8N}Q>)BA4k=0jjSD~KF%CPC-@6vMhL8+a2saOxINZ#PSIv7E{2MjzL!YV-< z-hE}ZxCZjJuGb^TLqr}efh0kMYU~ML)&Qz2)l1(mI^t6xMSGTJ2Wn}iip zI0tLtpW_>_I?Uob;zV(C9w+1ie}=X|upG^UE5jtN?cFXuHY(^Es#2Jf5>14*IXp~?c5R|pbm%!B#X=Bvq?$1HT5SF~=%eU^h>AJ{-jR4$c9 z4iZ=`(5nPXTYXTWfp|WPKR4!@2y4wZkqr5N;*p%$=s;th9B7aMK-mEE zoR;(xX0m?7W<=6P+|xeMV<(Wggn?5;qwwW+&hc`13~-d}_aO5S+bFKW(Ju)D4tu&T z%X8zF8t+QuYHxw4-#K4v*d1$y-5coe!@=GJsipg)r@e$1XU&sUr9N&@rdr_iWU6P) z-p5QutZ!NMU4`-X;9Lf@JA^!d<$5J_QkmxU5)WS7@S2W`dNmxz|FuH_-~-{1aqJc( zf;yFq0ezE&UP%go*MZ=?ZusV12xqjgjeG@>Y3#+d&KQn99hHpTUNCh7`4{&4fW!f8 zkiKO3+zPL%`% zgUOYh#v^)75dCZe#l!(n(R&M0Cq>hx?(Io8J|{ZAZCOBEV~^h0&`Z+RJ&L?u8jnif z&8h!HKXou^0pQ+TDW^S%#PvGmE>Z!q2U(hfX)Z?;PKe-RKR%b-7U7d8Z z9DhLh;KrkFxTV* zs21yC5&5(hSxhVD$p-XU9Rf8F*=@WcpJ!ZEj$`2kETTSN&aa1!&~*M$vxdt0$I^ae zK!fto8l<&YSpMWEhBxZ2Va=TgGCY?KVv+LPJ?iKT3H7g8t&}w`FMv^9$U}07Q1)1T zP5haH{o40ujVUPn^4clm_hu83o`d7R0VoR4Td1@Oj7Q&Kl32&8V7A-|G(Sgu-@Jnr)70>_fDnOZ?p(el10>$uwUc63h1zM_Fv zhhZ>m?Pu_d#_2Wjmt^kL#9kE9!oi~wL7Ofz=oV))b@brMS0T`qwbNc-H*BEWT?}zu zeA2!?Qn*qZ9l0Vv0-rlK0uGYZN6^lJrKR5=v}w!`pXaWR0}F1l0r=e01L#_ zw|@1t2i%0`BCjCO1PlvUKk`0xND&t~@0=cJEgyKW=lp2q;s4ZENkv?OzCMVJa4=We z7Nz;k{#dw&}>rMD1TL( z81;DpCY}?s^I749D16?&Uoa;H3_^h-o)HAOGe^T8g!N}PI}^Lrn@JbEi+Z0!tqxF) zp#vgV3#FXM2dq+3=ShXsLYYS8w>~_YZaPouI$ruI(n9psy4TJP^iI$K1yc^J%C+%> zQxm|E;SVN9K3{jzhvohaW&R>rFHGEU#K)?hegmFM2DR4^3AUj5^qsTN95jjKz6;u+m6Ci= z=*Mm*z?B04s7Z+hTQRk*mbry+0N>|0pcjgQ_a#yk1KHT_E8ce==Mh>uZ)nwiJs|?_ zWvc+5g9a;M{Or&5ftx)y9BFyG?b?1=YwjQly-xVkZ&h1yPzPXk2QZNcMh=Z9e#~L5 zva|OWKce+_W!CyzdtW!OPK>wE?-h8~1g(QQ#R#iuq0yi3h5C_r?pw}0$=rd-bj(|S zd#k(#T$NSO#-zaB18E#0=*3u*(=Tc%+P0{kpG{}!b2+Z0sXIG_osnG40M~%%KodOB z0GR!Sw9ptE_vTM55uqH5SBcP53zBB#Qs=cMaPk*0)&P2k^uW>^@+;sT%TxJ$LJg)} zL@4r{wlyKj(q};^IiwDxyG$1pqHaw{*!KK8zV#fphZ0u8X(em064AP4zPga3P$e`> zf!N3W*GS_IF3PZbvdVo?b(KHrvlk-rM?PutcTalu^bKV+~{#o!IKYv_HOUsPk53jB1pb_NVEHZkl zNb3LFh%X}u%jqR=PIw(d%Fvt|B zb-KUZLb`o8QR+I4y?(be<=M%H*lqo5#9V$sXyltsQ4+Gs6{ma)X+PgnBdT7jxeaTF z)FX#<(^Doj7U7)%xqn^;cJ(vSAZ&}E>Z|MM4>Rk5$RMBei`i_4X%(-fYQUftHo_fk8 zu2)HF<)usM3d(fw*t0v6kCUm)9(v9j_U|D-&nmu#Cc1tb*){%()2#ItgCF`u7EoLC zF1x3V)jNsDzgNHic!?G?nyxMKg4hbnGh(|A-gN|Hm-lUy3dh&dOV^tbt?w!c3@Muy*bany{wB)<`5){@Ixp=Ims!^@@n-Ud2D@fC~OlbzSuPctebdF#cL zzGN*PL*qMr2z7Btf7q21IIv-E1THJVi++20cjm(r_D0-uL?I8RO$JqYHM94Xos2rC zd3kfzOFPksYdLZd&GZO?D!1mQR20Jb1*Wu9O;QA7e)-dh0Wd0+2aUFXu>=8|U>8Q$ zepHi}XxaCNdXGn~Hth(&JXV^(zr#c2w)d^ae7+2c(_YnH`)*OvHPllM{hb8;_s*$l zZUQHvRGf_A>>XrSq>&)=G8or`0t-_PfL~HlnJd)pFW8CvzD!#pG!YYm{tCH$rW_I* z6}zVwAq&5%i}=l!L>7jz2Sd`CE?%Mu<8bZKqdMN+aryh__nV13B8#D3w$~+Lr{J{~ zkg)F3z&pY14xI&2)EUlXRX(1EDx4MUZ1L}0In-j$FH|2B!pY(;-5+;S|Hpkd;h+x+ zF2e#j|hz^_d%l(tzKUY54lp0_dDL2y4RBQ`A-G;nj3>D z$CW^|u(hkWQU%Hv<0>leO%X{{lnZ-F)`)_0>Ct~32wG1q-)X!M6x}LR@d+^SfJ+{V zf=7cTh=4xGHkBD&?K)a>Yf#RcW1^JX)>u5|MyI-l_8jJIxlcG9OjD z@4>lQjd~4sMLY8xEoL6BMiV?Il4`gZuU~(>?YiH!al|?(n9mXpN#7NBGO+vj9xMg=3;}> z(UnyMKQt1&BNBxwC>%U77=3I%O}o@b3{L;2)FG=^1yUON{s``qQx~;C7^pR1Fxe7q zE_7J6(##I93y00Ym!sup-UeDJKP zqb=v6slLHa6ann2Uo_kDHZz%>8?7n|;1yD3P4`soDU67aAGUJlMLLi1ps2K==u(2{=hZi>QmZGR@_`UJ(k zKn^x!e24(lTMiA)hZdHOCjHSJIx(egtlwU&eE15z>Vf8M)!X)|s0N)0GuAI(?}*d0 z@ZSdTOTB-HdF~K|zk())-f?Y{gz<`2y2%c06+y{7NLo|vKtNx%QSSM-M^;VOv>J%? z4$?)E6_WvPlDM-}D2|U8z!krvS+pN6fb9CYO!K%LAR7|f<`!XupbbFSK(*GNl{v~T z^ly>0S=!Mvx{}?8So%ayE;e&i-(Lr>PbJ}C0R9WQCPGRN7wGarPfEZ$OwDZzT&QXm zCTxo#hNo)NKoW-qa)2@V$Z8id@uRK3P(#N0vHcWvwdaAvn@hTXD>@Q=ivPaCC*B@- zpA5=nZbIto0BMjS-rehyEzx-frKe0Qyft7_!u0)~_NMpI%`zL6lLHB*zH}gwXL0|p zi4dYm!0uOfkTKTh48x2s^lFQ_O(OhHFx=O0h-l=}#AJCT{GmyS^B5J4!_6t0{`cP) z6A%al$^MN{W>!|6LPLJ=e{bHsBY;N(zSHM}$AAzdPVadY%SBbDT_aq3CG}#Ou*wa$ zZnJymzvn@x6F5V(KSgls=f)QMNbnoC=bX1QOO}Q+AUp`X12s!UDZ`Ujdjz{OEWxB- zi(yH>ZfKmW87##4!5 z?t{&u8q^UVVxDJ@b{8x31fg;gW(N*Z$Cp^sr`y~ZpvkAjSh;wIzI9*=?YG64b9i~w z4uXe$UUXOk7zW06Jp?C|z7@XR2$W?U?fJ!Qhz*Vk-kD8UO#bmLWgA@q7qwc9pEWV*;J-$9+tA@3mS^@oc$f{m zie?;%IX)=Yyg?U0i9kF#T*qi<*Zjwl8uu)BcaHJ5FnV>Xt+~2}0P`vV@S-+Ga;fCr zzhE&y^}AR}{3gA0_v;?gfD0}%eCgotaG~xG6tfTi<%VFs#WI@Ui?*m(ssGb=3;Ut^E3`XvZyTyB<3ZVo$K38{ zxFAP*zGTzq^1Gw(6>52a>6v)&s%McoLBhoz!UPPb8?11)`>5hmC>axQ<_U! zgr`;G2)5r>)VmqK=~4z$ue zY-pqpaX%Mm$LkwkX9)ta-|C^Sa(@ZTtK9OE{TO4}%7@mm+Xr2uv1GiLZ(#ox(k!GbKWIfNyXdfsLMVN!huYy3Mc#~ z6Aqdve1+kz{IdB1ax@0U9&rzS@T<@IrPPnrX)R9-xpkX8}QsLu`A#ECQ}>QIpC{;-9SkOwBYbMI27#<*!hO>^%+CS&^<9SMevpg_`RQ| zh&__xhba8MS1q^xotY0e($f#96IiB-h$WOIjs=vx@XhWd{g`&Lo@vJ>hZYw$2YA^h ztD$-Qd_b)CgAgwZ#lu8aABHa>0OM|q8bk|5pDu@WuNQ!`TN`ruibV`RvgY?jK z&wKpb-~IjW_x^MLx$F1Geb#yw&wA#}IlIr^uf5N_QB#p4z`cii?bPIiuNcGhP1JuF;YtsNbBIRrQ! zvE6^+=H}!o#K~#@|198ebg|;3WuS8ho8UOf>$zULMq~>8i;*XuYklpSydC0+q^8%$ z%^A=4nihXNcZb{FKYen$;^DGH@&l{eH|gbnJV3x;H(p~r84IS6dAo=&O?Wq!S+M{8 zlb(#%7~J>esS=N(W@kJvY9%pG50%nYe~mi0|I+p?Ko)qs^G!bs9vU7Vj;2S%WZ!y? zhYW!JW85@AK879-^YF(tXc>$f>ov6KHMa!#Lof3d<01G1H^aXT z{vhr=|G!`TKO?5Z;|%Q8tEqZV&i&=yPimP`i#$l9d+#p~*12zFSOMj(`I8HKuodZ7;UZW?*2z6qURR`G21zm!!ok z3jSHWgR7{l%)reZ9UBWHN`*}B!IhD)_vQ8fU)UWpBel@YwKd6-_rPI9v0j_??JhVC zD?4pBI{kSvU<+KtECM4d@dE>1r2|1pGti8LzWzW2^#A3225w~EcU!w|m4;(K`R($8 z%TL6-p9aMOysM|*SOQiN6Jv7fmdJ~J?@@rUKLHZBpZc;@05hX+Y@doTTFq}PT@-K+(~ShNB;oAHsKp##mqfit?tvhh{MKXjUX zWKK+%PEZ}HwewiIWe?A_3vo$Wn_ojFHNpc(4o}zls70+E&A@@T6c%UIHb%*8AVBrN zdq3142cb;xZ%eZP<{ZQLZ60k8UBtXk$QPkbK=3kA2I10jeh*4Xm#NAx!&%FVruE%7@E zA9M6e?fkH?Hi82jV~*IRgIba^jvmofgMWL?I9TcdP0=1RS(^8Q>i)Xak|Y1#;2dId zF%hzpy_QtZ)+q7THX>e`UUu1-0uq`XF$YU!mfN`E`x-0Mokg*41bTkv6sRUR`%1y7NX^TAB<)C`>&33 z`&dD7_fp`-lT%IwBej@c)H=iupVXQz={CO-mXVF5&r};s7f+p9c$UdINWn;_oWt?S#pWF=?c(5y{!oQDlg-Ht9R&~om;kvE z`qKk}nD(TlyHk;xR7TOyYn#m1)N2m)Yj$OIOYfttd=O|sMQTZrwIaWbg^)^A0!Zul z&rinX{xBA;3V z0YPPjVLVK|zOp13#jqK#R;8x39$C_nS0qirhr88Cw$(tK=5>8&_#g8PAVA9}$)wqN zC(fD)9W}TTrCPaLuJj9guYJyC*5&%%M$|Uj$YpZvd(s6^S185(Avlbdte=v~&XGPO7jy&Bv9$Wb6b>`4I z4~t7yy%}e|gq8Y_`E&*4yd7>pS(T6S)G)p}5d7PZ{J#3lj~~vcNUIE=%oMB-nX409 z`d;C(YG-Sd-q$R~XVt#hs0JwH!M_cca0P1FbiD0o?-k*({n%`;=f+48UgW)&flNuxSbBs|qYps| zEMSV*aV4f9VZg`5)j^@Srzf)luDf$x$TL**WrnBYt&l({Cb+Gi;Jt>upVS$#lnEp8 z@!xsb6~!!_&}={dhABgpw&uu4!1*ECR&vuY9%AOL*2?HjS+lP!3WbH!v8dH8)#Fp$ za%{jrAeM4j(z1ZmiVP(M>|O)d zXqLo(ti&^}DMEO8x}|ISXn*T`!EXANRzYj^92T1I5ukErD6p1{_=V`$HY;55T~O>O zYb-Y&vA#T%IN96jWctUrH`Z&I#TJJvzvJBLV?42E-%>a7xqRox!0qzx{lP|%5fk9p z#fQKVwm$d?8{`ESy;cYWf*xGMmGaTrNBMPytEGX+g^j_*8J@Q(++jwW5$aVeN=bLc zD8a3(QnI*c9`oUY|Nev+kyToy8WQ|7KnIIYPF658dkBZa>FMcjipo7TIpVDUl5UW$ z>USRe^ni(HZ$2eSm@JeaXvzT(nM#i+>u4TRNsBT;JpJ3s%Xz*+S)Qz1&>z`ooGXp} zVvI2DCEcDOaD~_e5 z*if?M14y~Hf$`p`Yx;dqxY6*gyRYtTy*kT&HI9aW)3vU+prU7Jk)zTn?yF;Iad}FT z&V5ak9eb2@mTyVlc!H1FhsafD&@HE5bqB6}1uV?MMDUL*SMja@O~D>{f6;cEd*f8J z?&+;6o0jiy{v9bg7sz1Yl&SW*qd=@r>!c*Bt%oqyZdZ_?FTJm*n$+aTp=P@&gAByW z3xAL-S=?ohs5IZTk2pE*h>}-klg^!;TALt7q5_DS0wIHdSq8`_$)7}YR}}9Hx%^l$ z-%DCrJ>2ub!Vh@n7}6GoX2Be+QOT^pEz4hE=`uD*dv)fxeAwd1ZL57;{GQeyAJS^L zJJ#!1oYbTXrCAkZO1;8dd&Z2X`gZTj@vowh;C6`MM4xAt1lUvuw{U~{8Odz^8R@WR zW2d|ChV#y`7?7EA%D+`{V3muZqQ}zA==R=azisDe5!Fe_`CD)uT%Knn01wgyL~&=x z&fkL~7HWXIsrXqadEAKcgT9y{)PH_6u5(wLGYoF>f^w}P2k@5 zL5>N;4%Y^Y@H^94#rO65KN;?Nu=S3;3Re~TwxCLttHC7T!G>N%3T@RH&p;WpPLSna zbXln2pY*4n-+UzaVx)xV>B?G z>{H1a$bE7{F&?gYUOEe0y!8naSZEr0YgG3(#WB?PEYOG%WDY_%o(wg+&j^l{eI3?U zFjBwiP#+x^7OMW`?%zogGy&0o#LG|q(;sIAGl|1&JS%MU(xa9Mk-7%%? z*9RQiSs&S^^<_#+asoG|cm-tb3@ zPd+SH(q8bNinz4$n*)=d;{qq}fGmu@c1<9y_!2*p#n*dznDLQqV>;xiKr>k(&?dMc zQX&Rl_<0>u5q%qE+!ufMVoS~h=lp3Qk-!K*(Vi=FQLs^wxp9x94#kzCkdU>?9^oq; zPmkaMM~IE3x>BcX=_|IkzVUgs&v4LcI4BKJc^$Z&P*Go>_u~lRzNNnJF9i%X zX&jJ^yp9BdcTsF$``ds`YceRMAuzJky@(DDkDBkR`n7sEP~G%EF(#WF>=2$vh*Mow zzQEJhV9w;26PGPJML;D&4i!N#1hC83EH9lYz8;N#i(4`L`7(sXS-QK-XJ2`s0Jujs z(CEXhKwb_L#i-9CoYR5S43kd3439pPbF;7N0hhiG_`2mK^LJP?a7m$#pV8Hy&v|6<}C}U*CeGIh2z|uY*BzF>ZT9 z{bYVcUU8oUNWj7zh%it4_uB@aOVreK>?fk`ilgP2H1^}e4r8AD~^N%_Kv+HaU$saQ#^1!h3uSmWRj65fpun(CWTH6Mg`eTQBaCsKq|uj zYh4}c^Az;Cj4&WBlmxdWaaP&8X#CzG?Pc5_ z$9VJUxl*Q-nV6Wr&1167 zzwIgMsI0$=>;g_GiGd0ryeoinbv^16=fq1V^h>sRnNz;i;aT%{&BdqTXgI?P_?bVn zLbKc|Os0f?`O?RB;%xYi1G!o_JY@w-R1xr>3J4g+3HjZQUHn`<^5Elf*4^Hmv4y52 zRp*BnPX>ko8Q!~+L<8Fg08nZ0-oIR~$j+vZiHW(cPj8{rdv9@RvfE1SWH5J?_LOp? zS;5q!O{SYM2{E6#h6X}O31n*_!NI{9 zEAmM{h{PqTI4UNP7Pfu8z8u@`NiCq5p#&zbjeA!zC?Qei}I>iJ%5KP6%jI-9>EPtB$x)$ zt-Gz2gSfNyTn4Mq|5aDeM(NS-Sx8hcUSKBNxyXat+eA(2Qr%>w$PU?JTD6!FM|H^1 z)RjQUYR_&BG41YIpFY;Zed+O0I;n+(yw+M1QbrdF`u@gSUX$5uMX$WyE<@_ihWspr z9n!s-q7N(fJDOW)IsyUac_sJNj?2wSq-1XIy0Zo& zulUF`VfPmQ#g5Fz;e*P^ri>0p5lGbV$!%7V{Zksg2LnVi+&IrizKs7K(=dIaFr z%ZPj}(}h6W8k-3rLdOD`4+!Pes!}p*rlG(zMua+m=H!R2)bef@8hT(6L@5V)UwE0y zh&$YKB?f2>F?t&+Xq$lPTGPGk+rArz^u8kxwSCJY@7^HR-T#%GkqfefOb{qh*dbB_ zxkL>YHY9(->=$!W6V$z*Sj-w+<&=Oxw?+g4-I{8?G3_X4Qve=WgB=cE) zpx0j$v?U_}@_T_fW`wsfX8!3%8AfoT~?&>_XFc! ztcamz;CDmtfkFzvH{%JZ(u1*ZvbE}U2Ag+ZI;*jp(AMx2SY)^c#+B53f2zYaB0qAEgg{G=6t}7bOC&I*^AASr27l78*szO&1u^_fLQ6 zxp+bIIW+dqh3Gcg^1cBXyWJyU#o8E?u5E}Ed~`qLLfj&^XMpGJ49{aoHki<1!)yS$ zs(p_wWYEd4T9o;FHU|Pkge~VmGpDfzvQAmy;!sHRmjQ^Se(En{2t2B{<| zFcHi`Q)%65LKk1-5a|-T#lNV6Jh6Ns&O)D8#A{6G&EPad#SWT)V1%`Km=qLuG? zI%pR&Lx={Qq@?|XP#OdK;O4V{7`SWo5d+{D6{;N-IorIk<6;p@#)bDyovu?h&EAHT zqk;oWE=!*IW+Sqte!DdRZzI~ufTdP247ZJSXZe&Lp{h!R%%VpyLH5F|S;3f{Ezx2Z za$MQ(!m`qo>fl;G=%TN9NIO9b?RLon?Kbi`p*5UjpxoPoboS^T-;uJ(VMhg=FB`VK z?JW*5dcgn~l_~ouXgbbCE^s@(VK$JJ^z&_@=*S^y1zwxsJV>^vTu8P{QV&*#FylGH z8fgvNiTX=2^?NyBMBVYPae#&)IF7n4HJ_;oQEM@ORH62^@X$<{0z^u||GZ}g15ZhwHUHudXx9+lVTA~a2){L9z zAPUq4$<3}$n23}km_{_?>c$(GUXsEuMVkSDGHv;;7`>wrzlVB)P{#);Ih;5=DvZoH z_=|?=uCVvS<)S$a$K|~n8d&PiUNRo%m2|6bKu!voCgW%gx83-)iSkgd&r60PE|&_2 zG}DT2BpDK$e31~l+&>}Ww(<*1dXt^ro)>8#^8I5J4y5>Ah;$ne7D$f! zzNZ7Z=)GAa-Vc1ih>}y)K!8=e!J9aXWvc5b94raDhEnhzpE`F>7FLS;tF!pjEH=;&l9{)xovSSzyx%l5J8V zbRe_8U!o{w8WQ*PptrkUnrTmJsha$&Gx<_z+`+EMkCA z@SqMtENI~yKp;Iw!|mCZL;wgy&Us>medyS;iF^QpUp$X=_fE?Z?M-MQ0$O;j+%m-P z!esK1Zq5Dg1)DzmU+q%Ce=_Kp-M}aNSKR7ugpZIh4=FrO0gJVrNh9m zhWvl!5EMScwzeK9Jk>%8Jm*U#7IGaE_8fF`(E!?Ug=9|g8p!-Ky||fXv?L6R;P+Q; zK>@Kdz8Fdsi`{qaFk0P@e8xp+nFoOc2;ncECIVo302a1|wM9_hl^J`^o7Wi1nVitP-^C8_BIM+(vI11L;7@7JVE>~Ex@*Xn{k66&QW z5_R8kwxRii4Gw>kot+&(iWqW?W5$5P2fh$La*T=Fnn{1IonYgZ_$$=xMg;fQL0tzn zpXUR-3~Sl+OiUj>emqaUWB5c>ITwW_BR7U6^#=>2sKO!}`Maj+6d6dJCG%Mvpz8n~E{0gb*pGqnT0{hQAXx zs#rd%RyXXgK5OLcFYMg@`=(_DDseLjgYMG55_ z35Jja*QRnIYlJaCN`Xgn(ki7Bg7(x6EQu0r60fu6NKJOlxE~+xxsFZO3w-ntSWSw_ z4gi*$t-%Njq!Be~$2~r^&C@=#n_T+X1;OhBBbl)j`k|Y1jzq{Ddbk8cKrE{U7O--K zxZm;5r2ZzlF|kgSER63ykEK$NxkD-UTUd=A{utVH_zy@Ze&BnpF?cPJ)D&1h%^B5K z9U?%udDY)2%BlL3K-YXzR3ipv3hi16YJ;XT6M)Uq>5(X`w4{Fm0zCvn7hJYmytiBGYdA_69{P*&j@{m$3fjxQ>xGRVw&8NAg{BIFa_>xgkuv`(0gN_ z_V?=UC+PH4EWi)^Ht2PYe3|bk!y*Wj`=1vy8y@JuMePSRk8h3>@F4)m*svUWW6d6fRn>&f`fs8+&5Z_s#||E z-PN05jAI;ooVoe-E69$12up*A1$ocY`5s==b1!n9o3c@%l&q;O(mGM*cAv_6&Yc)> zg6@Jwm@gzxCa4IIiB!8K-`K>v=~gCl}i@ zA=ch~@$SIKc;jEKC}proE&4iKA)%grfP|`P`-{a#LR6K8`5CELqAOiWon1~(h(%t4 zLDb9H+ORh3Fv#{#gP^q|^?O}A1;~20S1-3-aoG2nY8L4==$~m}<>Kdn{L${p?zo&&kA(r5F|o%WD}4e9Q9C&w{f z?@vFNfAGJ+^-*)y-qxo^DIvo3Aj23I+*cwOCY5~`-1(vHsf)e7;$!!T?sD65>|In`WmTTrCUUC+z+ z+1)3TRE^6{Gy}fQQ&mtB`m12RPdM|X=LOn~GEHOWnT4>UJjFj7{R)KrQH+ zT0JL0R2a2Q>!&d~Sv4;#{mxH6y4R3j<1Bn~TIfJ~viC*DCEZz28Ve!`^bHW<6z&_L z+xWFxLU~Vb7!QihGm$?LpLnYAAqHixOw=iA!vGYlf{h8FtJ3a3_P2>2=XRYtjJ=7ou?NeYt z(p{R6DJgZQko}0no1@^dz@?{`DBfnRsNJx1s5@uWSF)bRa7PG*K+Ag0I#M4{@`nax z`*@R|BE^32C-3y%SZ~KBvFg>a4Fm$9fCMl!XXwpPs87L)yUep2dgd))UvN;`}w@2Ge@r6XQsnP5G}?JhVr z<}xyw&$OGA+3?=_ub{K-ZiVzc)w0E}3|9@3zO84(kR?*QgU)SNkhLTPB|KKC8Qw;~bFksZ7_ zgXESz0zrRnL{!*}sk4=lt zLn_&r!0)pl%?{DVvXV8NF!m&3bSD6=w#U6FAp0s~i5dh$5iC)8JuBkAX!F$v%0>7O zu~!EZ7XhFLW=J0T&qqu)52CEncSvY1YK zTVp{_oUCLrYqih(xRhMclY;;R3mF37|Jfg66K{R_@;>$^wXoFwveR$~`}njwZ^%AL z&Sahq%4j_0Q%4jQVD-8ckc=BH$PJ~#S{FM@Upzf7(o1)22$r!Y+wF<$A8>RR?;>LD zmwPFW3pU4ulD{>W%_QuxV`w*L*!r*eXfeH2y&N)Ip<>gk!HZaO%|J;?EN=-z^&XHQ zBZS&Hngq^Se-5ikH~Y=sShu=NmS5leQpr9VU+h{pF7I%F5rgLD+j?7 z3P$Iy#{s9`%WAy~wj?Yo6I=2P3z&sUnZb~Y17rYMkWf@Used+EkbN=u=ZaRX#Al1$ zZmllFY=!hwUyac8SzjZSkOwm)!8?$Ga`iyoXh*J;d=rl>lqk5SQY61OdU^jQ(m!C%DkZDd)4Lf@a6cs5<) zzo$zY3~RBF`-ESQycYs2){}aq5a(!)HtXPkdScvy?!;yB6UaoM0R10_PzeVPp&kn? zHj$sm2#I6lPpyy;`f1Lnvx|^*&9IC=iqjW8ys(f<4aKi!D0hN~e{XN`J!+|Qe;<#< zpxFA2zhck4Dpz;X0bYN!oiqN;?Cpd|C{(DdC=8{8&S?xRB%CCOfHFgasb+;H?-^O~ zPwi-B!^eX=$)T00gYns45a({IxsYhmp(_CP*=0F+%qU3E^*#fGkA&|@Yr|WIN69$K zj9l9l-Y`ntw+zAwH-V8~azQS^?781t>dqPqzj3h9M(rta(aJtP_TKn(g ztwdMsYNeVG(dI22;IJ1^Tvf-4Wv3K5qGB_`PxregmLX=V&w=g~f)1T+lfLv|b@*N% z(>5e3KY;hq!*3Mm^Z5iJi}al~iUz*2YYH}tJ=+?WYCXZ37P++CN_)v4_Kq5g2N;lJ zQ5ZG(y#I**Nv`cJ#k}wnu_+%KSN*>EDxnf?w*@r?LU`x0BR`Z+K`zS^q;_kv@>Orj zd@Ek2i^cKOoRk~fnT%4%@xB;G+K{y3{!o5`K(yb()!G;{s2EpgbeXW2*FlNx-V`}w z(=5C-acJ0Ir?OE~kD8fo{_STIS=5e(uC>(PLJ_CS{LjLCw4BK=mg8MH=BL+xyzS|W z;5#eJ=)Dg4*WfFeBnW}fU}(P&S%p%*oAr3jotT&mS1XXbho|cW6?xiy?)t0+;Si~F zTRpEnVS}3y9tt^_g_-x>xAzaY?C-G|6Uu~t3tc)%n)G3j-XO9#KU+YM$9_AV*#I#f zE(@)050H&)<G;h8&}?ic6#Uv>@Arp! z&c4zydLI<+G+WRkG5wHX@vbIC4o|T<}Yxd-4W>BGakyA%m}TH5+DzQQyc*q^&BCIIK^GKchJrumL)XRjNAHs)yF#mKnsaX0OH$?{Y6qBe)&+<7;F z;SfWlK*hWI+X6k}uXe9&er4$Kt`0nS($;DZU1pfBWZ8dq!bNIPh<}kpoe{c;>B0CO zT*KseXQ!ErlW@*gH*sk*VPAep&dY3#Sd&^WE>(ORADN$x4;sbvh-(1YX%e-Yyore? z^;u!ysMCw)%dtTBB>O^vaZ?ruBlM2SgwSdz=pdS%PnPBt@o2qorXg9V9;m|C+3xyu?SGN$twy&Pny3EU!CqE7$q;3rxacs)tOR01CGcB^BZvBMelw)H9~&6wdI_6r;GJ*uCG{e9HF@szXDO5Sme8IX`w_h zC^sqxxzXQqZzibB-22)I%0bu?q!Uz_q1zocgIk;p*Gigj|3`jgLt2d3hxe`tRM4UO zEBV>X0D=QxCj$o|jn}o&;*d{r54i9AAnNZ3W+bK+;Gef~QyO~Xg!)t*c zQB;_U4tN5SZLyhFhn5@XR4O*9h{R@D)rsX5Kd?SDEZWK}=$7 z0#(y96>(WNB6yBI)Iw}xLTtJze0<5#lT30cV?NFW>P}pmyw3RIXUwjq6zlIw6puXX z(MYHU115kF!M(1;eA9Cl2M&MFXM0Q*%^lQdY8j!2pC=kn{kTCUDbwQEX*&!edN70BHO~1;|`V zO)$Vf$uZVJs%>~h=02AG>_Ci4g;4Y(zBlPaS5&|c&;UgCaK3CL$QQy%GN01bY;R3B zxGdaee4P5TKD`OTK6M*y-9Z^D1NjU0EF*fpQk6yN0uY0q<_fhWi zoj5O#lL{(vXAy9>tMEdU$UI@bSv^Pi<;(n;bXR-`TpF(|dy|JIf9JL@=}{&~FlH&h zln)e-!}Dy5UxCtS_T~I5?2Or7y)}=`VK%$^_x5v+pMbwKoUPRZVY?+mF$(?%6vF_~ zJ=r&%`U*(Dc$N>94$yS0(8Bu8w{vU(j^u0)ea4n+(!85*CTJ(4FxM%%l;g z#}_t-3vpE?Y5)QjyIR`^kOaEgh=Jt+1_8>m!A>rCS0N$%lk8Sn;d*H}E#U=}izHUs zs{>)ALf91o*MRw^Vf`h@hqN>lqK5?IgmgwS+ha-HQfK0%I|xBmJ_JoufI|PcgJ9ir z8e^tjx|`9PLJgkOa)8Bav(Max&)ydHQXC(dll=h-GTC`Cbl44hxIIZXC?A9J`T@s9 zChvw&5#8pKdyt~5K+}|$;X(#2F$MlPDp27Rl8@wLellWf?V1`L{&PxNnKJRtgLiUw zV-R%u;cKe@C=l8f@mir?9V0UGC9%=$R4%a%1R<-j5I`VGt%*On5`vqve)+4pVJ^YCytS;uJqm#9Ux9%ga4kb^<#ybf zD?cz0SXbQ?nshKaR4%VL*>9&>Pq8Tg^)JEMwCJ--0rqT_9&)nZR`Qo!hli}!72iU$ z?WxkG36Di+{5-nXqA}Er7C@-ZCoH>Jd%xLtaC*+%j0T85C!#~8(pq7K$)6{n4;4xjCQKP`E>)%3WuO}h`M~!WCUioQq}~JF z8|pbExRMDRr4H0oqPYC=;wj7472f5OA4JMfI3PZC6Jh(`Lgv6UpimaOMuM(6D=Y_^ z45`wie1emdSIYd3kO00bo!=k;(7PF!4g=S%6evzy%#V$UnWRdV`I6M^>7Wtm@HSQ+ zHg{c!OteiWx=h*68%s+)c%imrCiH_H8nSas{`IEM(R0~paz2Ae0gXz;nDx1jh4hDj z5oSXn%axtjR}EUW0?)s`-PLLwvW8Oa>_kxA;#1>-q;0WtN5A9nZ9!A`w*N&MXh^Zm zmI*ws%}U)i?3wWIm_g*F+}s1XaoO+b%zy!GYaGh#A9BPvC8k-Nal* znYEj=Vdd!&BP&^?ia19jO3f7Wu?{)f@f4{GF ztIHIgS#i&8o1$*u1at6^smN%Q74p;fR_r?(4lXL zkO6)ra#1vky(vfh>EO`~^rU70QsA>%m-orI2*i$0!RYkF(Y>5)$0tzB-g`qy!BCfF z^^Fh#nD<4fs9g41ogHrCy-;4hqj2=#$A7H{=r3qHqS`)1wN~^?ZRX7FNk_`s&D6h{ zCkXwZ`Q2ZQ9Pj+)=HkgMFjxdI3I;rpeSCmX6p(%Fh`N~jw2avcHI||{{9@6qVrCgp zNI`3=bxZL(&&pjax?8g9zj%D{3r!l6eYB(C}7%~Ph?psRii?g<=i*?t{e67EtJ9MAJVi}h5l&`1`g}Thp-?^n;k7Et%4>52X_4b)w z5e+vlQ3V4WUViQSvmZGw@X)lwYF8iX*1`1o`az@f1KfA@4w8LcW<@Vm(BoVe+Ud7U zFc@XZReMJT3VeFTAA^l&8V19QEV3Q$=3|`oiPLsGR+i$tz^F6MF(GNd@Io@4@#|LJ z^3$CT^>LlEI7qMs6MMmsxwuW9FBts93@iBX_yvRU)iE!KX{N)~DPWoe6P*bPMRL^M z)GiD_S`=GtSJ)!v{KevAHO) zU^x3V+H@ZRa})D5`H?O1mzF1p*R%37fa<|8DR)P;2^b5L8DOvewjQ@TQXn2nC&Q6V z);Qki&wpWiz@%sp4@8`s+6>&2UPd?iVX2?t>nw2*#CiPCS>*ZHJFTp5DE|?7l^Ca)71WiycymIWNhh+PnpIA}m z36%?hJ|vhDahrthCIkj<2pIKGoG#(X98?@XZBK}$)qi7n<^d*kgxJyk!#q$4XG_ry z;5D7lBG4+h%<*g0&WlekhsMqi5C9s&`U*uqs8k)sWWJW>f06 zQVvF^a?y%|g_K(aIk)Za{l?sOLG2HXkn5#*itF758)Tye@stG?xRT!0RA-_lLh~ZFf2z`V^XI08Q(U z(RBP%G-++#f*cERv^Fj)pJ=1gz`MG&BebrS9vb&mjGz7%!A*M_Q0-ULVo0>jBL#E+ z_aW4Hv-?{X;A3Sm-)n5+x-BhE@gST9Ek;Jy77DV`(?0J-u_Ixlsi}Z1A}a7rzJK3b zUB9kVQQBr#Zj{_wU|f|g*S{e9Itv}eV-azUd2GWmjCVXY*|2xNoU4+yZNJ0%&b*mH7nQl=ojVz>{9r3!o7$|_*qFEw$QQ2}$jC&OEskUix zC=D?^dmu2J17?`Z(1sJvJx11$hEQfIW9<+&@Lma`XK0CGkStLzgtDg?GqoaCn?W#h zF4lL&qr;c>4FrW8W$-cEzmG>|K5yq}MQi)`Ny~AljLU9L?o3#Lk1ZkSkiI=njrn^9 zGN|VKZ@1udDR=Gu_9t!j}r!n&F;M<^6elIo!Z@Kaa$h^fnG`` z@qiI);1oX$oexPMTsf3cQs}LQ8=Dp219jSlsE(>0#KKsMj6G zW`af}VDLYmUQ5e6y~%InL095C6V7Bj392bBFQxqg)I@TiJ2nZu;xreOL>)WNLU~s{2pY_sDm&ESfGXU6D$ppW2OVvP97pQ& zX=W7M7cymz_QyA-YEbGg2Cn%5_}rokA5sJT1;Buev%yQgX|ZMo;#`{L-1H1BR_H;J zMc3-T6)Js&f)sOJBD7lZxyZO}9dCpX)%SL7>Qy#B=0uuiORSz*J+V+q? z4Fl`;c9?gIw?D7nF8W)yWjRzY9tL zLM0F8uu`E(eQ41zr2IuphP6ip#0iGZu@9u73>JE?(e>gVOR9Q*%DF0A)Wk05pQk3mJ1QJ-7%uuS z|M5>+G@l3bLRLizcm-WkOl9WdkKf9 zJ&QeCo{>W$g;r9btO-0cwNe2mLBj)pGYg=nff7q`?JfPz%FW?xFQ!Ose&N!}Uo9mB zV6&{F-9QM=$612dWVP7`g9@M~XU@D0nq=Zv_)xGClA%PaiK)oA*AII98uS?d`tKpd zdBE6%ayz|m3;()E^r@8_ct)z-581t)e??FU`{4%&Zw2sJ{7!rM`*;_eW%KVW2FM=j z{U&7g&N{CF7@xHy*inv_`{B5KI1LoWV^rQeTp`Vz)jII6<*r#u{Ig(_V)ugt52|FY!96=48cA2^r zTPPtauFly!w{~p3bCq_h8p_c3!VsK?ZeZq4|zJF=le zpj%f}O%rCqa$_u*6WQtSzueWIjifYf4gUQVuKSPI+CshZp$Y?4#s=|M0Gd2sSy}38 z>E(x-zHf+IHZ`2kT1+LSCKhtBJmdw2e$1bKw048m=NhjS)+41?XwP@kU+hYbARVhV z`5Rw$)sT2Q!^b;MpCrxi-sNSEQTuJQ0&|t8%@%2BL&_s0%pS(=~F|5(5X!-YVD93QR_Fj;oly)ths37&#q zDV9OdKA1xNRrkUcJmmcb=DN<8_mKw~@^}0(-|+oowV?F`R|C9UeLKOk%%9%T-U43O zHh7nsN=q!joc}GU|0*-Zb5(KTQ*o6t=QCSKVc>0XY;?OFJb(sKDc7)KBPE)TgLR?$ z-Bs@jx}C@6D+JIpu*1{;>`>wIQ6{K>@_dV%Enu{$Ey^s_*W+5-e-3zBN`HkV0qv9K-+=+4|9XA^2m?-_0h-Z2>@F`#?uS#OH+}ffHolO4=Kc5aqUQ+V zXi^jvCi;R^{|@yU9f^Fp(X$s>0cZmcPyD`5EXaRU%XaGXbU@=DG>EtZp0VWlSX;#V zl|5R8GPSx^hElGqwpraSolH8k&cphsL!0cG)9 zDDFb&O&y4USt8hXr_cr>V-TlUQ0j4{Pe?5cL&0RA?gkiMEO4^bNrK7KH@uJt}#8v^l|Evm|Jn9_SPZ#(2=L!o#6%20h|c z9-uaCn6Sd(s4gRyIS~C_%&hU+_Ma)DB~hmB|9+6<74*b2JVEo3gxNp4`leZ1iCf@N z^aqS8f)?IA6#fOapZH++HY4(6<_Uju z`YI}_dH8bIGv%`3zrW^v>@Y2$U+qAR!iS!WN#K72WL1hD diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 181afaee97..81379154dc 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -38,8 +38,8 @@ _get_extrema, bounce_points, get_pitch, - interp_to_argmin_g, - interp_to_argmin_g_hard, + interp_to_argmin, + interp_to_argmin_hard, plot_ppoly, ) from desc.integrals.quad_utils import ( @@ -1089,8 +1089,8 @@ def denominator(B, pitch): print(pitch[:, i, j]) @pytest.mark.unit - @pytest.mark.parametrize("func", [interp_to_argmin_g, interp_to_argmin_g_hard]) - def test_interp_to_argmin_g(self, func): + @pytest.mark.parametrize("func", [interp_to_argmin, interp_to_argmin_hard]) + def test_interp_to_argmin(self, func): """Test argmin interpolation.""" # noqa: D202 # Test functions chosen with purpose; don't change unless plotted and compared. @@ -1146,6 +1146,9 @@ def drift_analytic(data): Numerically computed ``data["cvdrift"]` and ``data["gbdrift"]`` normalized by some scale factors for this unit test. These should be fed to the bounce integration as input. + pitch : jnp.ndarray + Shape (P, ). + Pitch values used. """ B = data["|B|"] / data["Bref"] @@ -1232,9 +1235,20 @@ def drift_analytic(data): drift_analytic = drift_analytic_num / drift_analytic_den return drift_analytic, cvdrift, gbdrift, pitch + @staticmethod + def drift_num_integrand(cvdrift, gbdrift, B, pitch): + """Integrand of numerator of bounce averaged binormal drift.""" + g = jnp.sqrt(1 - pitch * B) + return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) + + @staticmethod + def drift_den_integrand(B, pitch): + """Integrand of denominator of bounce averaged binormal drift.""" + return 1 / jnp.sqrt(1 - pitch * B) + @pytest.mark.unit @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) - def test_drift(self): + def test_binormal_drift_bounce1d(self): """Test bounce-averaged drift with analytical expressions.""" eq = Equilibrium.load(".//tests//inputs//low-beta-shifted-circle.h5") psi_boundary = eq.Psi / (2 * np.pi) @@ -1255,7 +1269,7 @@ def test_drift(self): zeta, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf), - iota=np.array([iota]), + iota=iota, ) data = eq.compute( Bounce1D.required_names() @@ -1293,25 +1307,17 @@ def test_drift(self): Lref=data["a"], check=True, ) - - def integrand_num(cvdrift, gbdrift, B, pitch): - g = jnp.sqrt(1 - pitch * B) - return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) - - def integrand_den(B, pitch): - return 1 / jnp.sqrt(1 - pitch * B) - + f = Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift) drift_numerical_num = bounce.integrate( pitch=pitch[:, np.newaxis], - integrand=integrand_num, - f=Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift), + integrand=TestBounce1D.drift_num_integrand, + f=f, num_well=1, check=True, ) drift_numerical_den = bounce.integrate( pitch=pitch[:, np.newaxis], - integrand=integrand_den, - f=[], + integrand=TestBounce1D.drift_den_integrand, num_well=1, weight=np.ones(zeta.size), check=True, @@ -1325,8 +1331,8 @@ def integrand_den(B, pitch): self._test_bounce_autodiff( bounce, - integrand_num, - f=[cvdrift, gbdrift], + TestBounce1D.drift_num_integrand, + f=f, weight=np.ones(zeta.size), ) @@ -1339,6 +1345,11 @@ def integrand_den(B, pitch): def _test_bounce_autodiff(bounce, integrand, **kwargs): """Make sure reverse mode AD works correctly on this algorithm.""" + def integrand_grad(*args, **kwargs2): + return jnp.vectorize( + grad(integrand, -1), signature="()," * len(kwargs["f"]) + "(),()->()" + )(*args, *kwargs2.values()) + def fun1(pitch): return jnp.sum(bounce.integrate(pitch, integrand, check=False, **kwargs)) @@ -1347,15 +1358,8 @@ def fun2(pitch): bounce.integrate(pitch, integrand_grad, check=True, **kwargs) ) - def integrand_grad(*args, **kwargs2): - fun = jnp.vectorize( - grad(integrand, -1), signature="()," * len(kwargs["f"]) + "(),()->()" - ) - return fun(*args, *kwargs2.values()) - pitch = 1.0 truth = 650 # Extrapolated from plot. assert np.isclose(grad(fun1)(pitch), truth, rtol=1e-3) # Make sure bounce points get differentiated too. - result = fun2(pitch) - assert np.isfinite(result) and not np.isclose(result, truth, rtol=1e-1) + assert np.isclose(fun2(pitch), -131750, rtol=1e-1) From 62d553b8f0d2aa7fae6a041d5511326604e87121 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 28 Aug 2024 04:46:56 -0400 Subject: [PATCH 226/241] Fix plotting bug from recent commits and address review comments part 1 --- desc/integrals/basis.py | 14 ++--- desc/integrals/bounce_integral.py | 27 ++++++---- desc/integrals/bounce_utils.py | 35 ++++++++----- desc/integrals/interp_utils.py | 39 ++++++-------- desc/integrals/quad_utils.py | 4 +- desc/utils.py | 6 +-- tests/test_integrals.py | 86 +++++++++++++++++++------------ tests/test_interp_utils.py | 11 ++-- 8 files changed, 118 insertions(+), 104 deletions(-) diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index bef464cbc4..0fb68ece3f 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -7,16 +7,12 @@ @partial(jnp.vectorize, signature="(m),(m)->(m)") -def _in_epigraph_and(is_intersect, df_dy_sign): +def _in_epigraph_and(is_intersect, df_dy_sign, /): """Set and epigraph of function f with the given set of points. Used to return only intersects where the straight line path between adjacent intersects resides in the epigraph of a continuous map ``f``. - Warnings - -------- - Does not support keyword arguments. - Parameters ---------- is_intersect : jnp.ndarray @@ -40,7 +36,7 @@ def _in_epigraph_and(is_intersect, df_dy_sign): # must be at the first pair. To correct the inversion, it suffices to disqualify the # first intersect as a right boundary, except under an edge case of a series of # inflection points. - idx = flatnonzero(is_intersect, size=2, fill_value=-1) # idx of first 2 intersects + idx = flatnonzero(is_intersect, size=2, fill_value=-1) edge_case = ( (df_dy_sign[idx[0]] == 0) & (df_dy_sign[idx[1]] < 0) @@ -83,9 +79,9 @@ def _plot_intersect(ax, legend, z1, z2, k, k_transparency, klabel): for i in range(k.size): _z1, _z2 = z1[i], z2[i] if _z1.size == _z2.size: - mask = (z1 - z2) != 0.0 - _z1 = z1[mask] - _z2 = z2[mask] + mask = (_z1 - _z2) != 0.0 + _z1 = _z1[mask] + _z2 = _z2[mask] _add2legend( legend, ax.scatter( diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index a1db3856a5..8c086671b5 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -185,14 +185,14 @@ def required_names(): return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] @staticmethod - def reshape_data(grid, *data): - """Reshape ``data`` arrays for acceptable input to ``integrate``. + def reshape_data(grid, *arys): + """Reshape arrays for acceptable input to ``integrate``. Parameters ---------- grid : Grid Clebsch coordinate (ρ, α, ζ) tensor-product grid. - data : jnp.ndarray + arys : jnp.ndarray Data evaluated on grid. Returns @@ -201,10 +201,10 @@ def reshape_data(grid, *data): List of reshaped data which may be given to ``integrate``. """ - f = [grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in data] + f = [grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in arys] return f - def bounce_points(self, pitch, num_well=None): + def points(self, pitch, num_well=None): """Compute bounce points. Parameters @@ -229,7 +229,7 @@ def bounce_points(self, pitch, num_well=None): ------- z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such + ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. @@ -246,14 +246,14 @@ def bounce_points(self, pitch, num_well=None): num_well=num_well, ) - def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): + def check_points(self, z1, z2, pitch, plot=True, **kwargs): """Check that bounce points are computed correctly. Parameters ---------- z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, L * M, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such + ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. pitch : jnp.ndarray @@ -264,11 +264,16 @@ def check_bounce_points(self, z1, z2, pitch, plot=True, **kwargs): line. If two-dimensional, the first axis is the batch axis. plot : bool Whether to plot stuff. - kwargs : dict + kwargs Keyword arguments into ``self.plot_ppoly``. + Returns + ------- + plots : list + List of matplotlib (fig, ax) tuples for the 1D plot of each field line. + """ - _check_bounce_points( + return _check_bounce_points( z1=z1, z2=z2, pitch=jnp.atleast_2d(pitch), @@ -347,7 +352,7 @@ def integrate( """ pitch = jnp.atleast_2d(pitch) - z1, z2 = self.bounce_points(pitch, num_well) + z1, z2 = self.points(pitch, num_well) result = bounce_quadrature( x=self._x, w=self._w, diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index df175d4e2c..1e52497e8e 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -3,7 +3,8 @@ from interpax import PPoly from matplotlib import pyplot as plt -from desc.backend import imap, jnp, softmax +from desc.backend import imap, jnp +from desc.backend import softmax as softargmax from desc.integrals.basis import _add2legend, _in_epigraph_and, _plot_intersect from desc.integrals.interp_utils import ( interp1d_Hermite_vec, @@ -145,14 +146,14 @@ def bounce_points( Flag for debugging. Must be false for JAX transformations. plot : bool Whether to plot some things if check is true. Default is true. - kwargs : dict + kwargs Keyword arguments into ``plot_ppoly``. Returns ------- z1, z2 : (jnp.ndarray, jnp.ndarray) Shape (P, S, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such + ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. @@ -216,6 +217,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") kwargs.setdefault("vlabel", r"$\vert B \vert$") + plots = [] assert z1.shape == z2.shape mask = (z1 - z2) != 0.0 @@ -254,13 +256,16 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): "bounce points is in hypograph(|B|). Use more knots.\n" ) if plot: - plot_ppoly( - ppoly=Bs, - z1=z1[:, s], - z2=z2[:, s], - k=1 / pitch[:, s], - **kwargs, + plots.append( + plot_ppoly( + ppoly=Bs, + z1=z1[:, s], + z2=z2[:, s], + k=1 / pitch[:, s], + **kwargs, + ) ) + return plots def bounce_quadrature( @@ -290,7 +295,7 @@ def bounce_quadrature( Quadrature weights. z1, z2 : jnp.ndarray Shape (P, S, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such + ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. pitch : jnp.ndarray @@ -529,7 +534,7 @@ def _plot_check_interp(Q, V, name=""): def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): - """Return ext (ζ*, g(ζ*)). + """Return extrema (ζ*, g(ζ*)). Parameters ---------- @@ -595,7 +600,7 @@ def interp_to_argmin( Values evaluated on ``knots`` to interpolate. z1, z2 : jnp.ndarray Shape (P, S, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such + ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of g. knots : jnp.ndarray @@ -643,7 +648,9 @@ def interp_to_argmin( # JAX softmax(x) does the proper shift to compute softmax(x - max(x)), but it's # still not a good idea to compute over a large length scale, so we warn in # docstring to choose upper sentinel properly. - argmin = softmax(beta * _where_for_argmin(z1, z2, ext, g, upper_sentinel), axis=-1) + argmin = softargmax( + beta * _where_for_argmin(z1, z2, ext, g, upper_sentinel), axis=-1 + ) h = jnp.linalg.vecdot( argmin, interp1d_vec(ext, knots, jnp.atleast_2d(h), method=method)[:, jnp.newaxis], @@ -670,7 +677,7 @@ def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): Values evaluated on ``knots`` to interpolate. z1, z2 : jnp.ndarray Shape (P, S, num_well). - ζ coordinates of bounce points. The points are grouped and ordered such + ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of g. knots : jnp.ndarray diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index 3f48a31101..b5977c4134 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -3,12 +3,12 @@ from functools import partial from interpax import interp1d -from orthax.polynomial import polyvander from desc.backend import jnp from desc.compute.utils import safediv +# These polynomial manipulation methods are chosen for performance on gpu. def polyder_vec(c): """Coefficients for the derivatives of the given set of polynomials. @@ -27,15 +27,12 @@ def polyder_vec(c): ``c.shape[0]-1``. """ - poly = (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T - return poly + return (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T -def polyval_vec(x, c): +def polyval_vec(*, x, c): """Evaluate the set of polynomials ``c`` at the points ``x``. - Note this function is not the same as ``np.polynomial.polynomial.polyval(x,c)``. - Parameters ---------- x : jnp.ndarray @@ -54,24 +51,21 @@ def polyval_vec(x, c): -------- .. code-block:: python - val = polyval_vec(x, c) - if val.ndim != max(x.ndim, c.ndim - 1): - raise ValueError(f"Incompatible shapes {x.shape} and {c.shape}.") - for index in np.ndindex(c.shape[1:]): - idx = (..., *index) - np.testing.assert_allclose( - actual=val[idx], - desired=np.poly1d(c[idx])(x[idx]), - err_msg=f"Failed with shapes {x.shape} and {c.shape}.", - ) + np.testing.assert_allclose( + polyval_vec(x=x, c=c), + np.sum( + np.polynomial.polynomial.polyvander(x, c.shape[0] - 1) + * np.moveaxis(np.flipud(c), 0, -1), + axis=-1, + ), + ) """ # Better than Horner's method as we expect to evaluate low order polynomials. # No need to use fast multipoint evaluation techniques for the same reason. - val = jnp.linalg.vecdot( - polyvander(x, c.shape[0] - 1), jnp.moveaxis(jnp.flipud(c), 0, -1) + return jnp.einsum( + "...i,i...", x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1), c ) - return val # Warning: method must be specified as keyword argument. @@ -81,8 +75,8 @@ def polyval_vec(x, c): @partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") -def interp1d_Hermite_vec(xq, x, f, fx): - """Vectorized cubic Hermite spline. Does not support keyword arguments.""" +def interp1d_Hermite_vec(xq, x, f, fx, /): + """Vectorized cubic Hermite spline.""" return interp1d(xq, x, f, method="cubic", fx=fx) @@ -99,8 +93,7 @@ def poly_root( a_max=None, sort=False, sentinel=jnp.nan, - # About 2e-12 for 64 bit jax. - eps=min(jnp.finfo(jnp.array(1.0).dtype).eps * 1e4, 1e-8), + eps=max(jnp.finfo(jnp.array(1.0).dtype).eps, 2.5e-12), distinct=False, ): """Roots of polynomial with given coefficients. diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index d1f66057da..187d76367e 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -151,7 +151,7 @@ def leggauss_lob(deg, interior_only=False): Number of quadrature points. interior_only : bool Whether to exclude the points and weights at -1 and 1; - useful if f(-1) = f(1) = 0. If ``True``, then ``deg`` points are still + useful if f(-1) = f(1) = 0. If true, then ``deg`` points are still returned; these are the interior points for lobatto quadrature of ``deg+2``. Returns @@ -216,7 +216,7 @@ def get_quadrature(quad, automorphism): # Apply automorphisms to supress singularities. auto, grad_auto = automorphism w = w * grad_auto(x) - # Recall bijection_from_disc(auto(x), ζ_b₁, ζ_b₂) = ζ. + # Recall bijection_from_disc(auto(x), ζ₁, ζ₂) = ζ. x = auto(x) return x, w diff --git a/desc/utils.py b/desc/utils.py index 27b5fa79ad..24521c8e01 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -693,13 +693,9 @@ def broadcast_tree(tree_in, tree_out, dtype=int): @partial(jnp.vectorize, signature="(m),(m)->(n)", excluded={"size", "fill_value"}) -def take_mask(a, mask, size=None, fill_value=None): +def take_mask(a, mask, /, *, size=None, fill_value=None): """JIT compilable method to return ``a[mask][:size]`` padded by ``fill_value``. - Warnings - -------- - The parameters ``size`` and ``fill_value`` must be specified as keyword arguments. - Parameters ---------- a : jnp.ndarray diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 81379154dc..260f559d8b 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -731,7 +731,7 @@ def filter(z1, z2): @pytest.mark.unit def test_z1_first(self): - """Test that bounce points are computed correctly.""" + """Case where straight line through first two intersects is in epigraph.""" start = np.pi / 3 end = 6 * np.pi knots = np.linspace(start, end, 5) @@ -746,7 +746,7 @@ def test_z1_first(self): @pytest.mark.unit def test_z2_first(self): - """Test that bounce points are computed correctly.""" + """Case where straight line through first two intersects is in hypograph.""" start = -3 * np.pi end = -start k = np.linspace(start, end, 5) @@ -761,7 +761,9 @@ def test_z2_first(self): @pytest.mark.unit def test_z1_before_extrema(self): - """Test that bounce points are computed correctly.""" + """Case where local maximum is the shared intersect between two wells.""" + # To make sure both regions in epigraph left and right of extrema are + # integrated over. start = -np.pi end = -2 * start k = np.linspace(start, end, 5) @@ -782,7 +784,9 @@ def test_z1_before_extrema(self): @pytest.mark.unit def test_z2_before_extrema(self): - """Test that bounce points are computed correctly.""" + """Case where local minimum is the shared intersect between two wells.""" + # To make sure both regions in hypgraph left and right of extrema are not + # integrated over. start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -802,7 +806,8 @@ def test_z2_before_extrema(self): @pytest.mark.unit def test_extrema_first_and_before_z1(self): - """Test that bounce points are computed correctly.""" + """Case where first intersect is extrema and second enters epigraph.""" + # To make sure we don't perform integral between first pair of intersects. start = -1.2 * np.pi end = -2 * start k = np.linspace(start, end, 7) @@ -827,7 +832,8 @@ def test_extrema_first_and_before_z1(self): @pytest.mark.unit def test_extrema_first_and_before_z2(self): - """Test that bounce points are computed correctly.""" + """Case where first intersect is extrema and second exits epigraph.""" + # To make sure we do perform integral between first pair of intersects. start = -1.2 * np.pi end = -2 * start + 1 k = np.linspace(start, end, 7) @@ -904,9 +910,8 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): p = 1e-4 m = 1 - p # Some prime number that doesn't appear anywhere in calculation. - # Ensures no lucky cancellation occurs from this test case since otherwise - # (z2 - z1) / pi = pi / (z2 - z1) which could mask errors since pi - # appears often in transformations. + # Ensures no lucky cancellation occurs from ζ₂ − ζ₁ / π = π / (ζ₂ − ζ₁) + # which could mask errors since π appears often in transformations. v = 7 z1 = -np.pi / 2 * v z2 = -z1 @@ -932,7 +937,7 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): check=True, **kwargs, ) - result = bounce.integrate(pitch, integrand, [], check=True) + result = bounce.integrate(pitch, integrand, check=True) assert np.count_nonzero(result) == 1 np.testing.assert_allclose(np.sum(result), truth, rtol=1e-4) @@ -1028,6 +1033,15 @@ def elliptic_incomplete(k2): class TestBounce1D: """Test bounce integration with one-dimensional local spline methods.""" + @staticmethod + def _example_numerator(g_zz, B, pitch): + f = (1 - pitch * B / 2) * g_zz + return safediv(f, jnp.sqrt(jnp.abs(1 - pitch * B))) + + @staticmethod + def _example_denominator(B, pitch): + return safediv(1, jnp.sqrt(jnp.abs(1 - pitch * B))) + @pytest.mark.unit def test_integrate_checks(self): """Test that all the internal correctness checks pass for real example.""" @@ -1038,55 +1052,59 @@ def test_integrate_checks(self): # coordinates. This is defined as # [∫ f(ℓ) / √(1 − λ|B|) dℓ] / [∫ 1 / √(1 − λ|B|) dℓ] - def numerator(g_zz, B, pitch): - f = (1 - pitch * B / 2) * g_zz - return safediv(f, jnp.sqrt(jnp.abs(1 - pitch * B))) - - def denominator(B, pitch): - return safediv(1, jnp.sqrt(jnp.abs(1 - pitch * B))) - - # Pick flux surfaces, field lines, and how far to follow the field line - # in Clebsch-Type field-line coordinates ρ, α, ζ. + # 1. Define python functions for the integrands. We do that above. + # 2. Pick flux surfaces, field lines, and how far to follow the field + # line in Clebsch coordinates ρ, α, ζ. rho = np.linspace(0.1, 1, 6) alpha = np.array([0]) zeta = np.linspace(-2 * np.pi, 2 * np.pi, 200) eq = get("HELIOTRON") - # Convert above coordinates to DESC computational coordinates. + # 3. Convert above coordinates to DESC computational coordinates. grid = get_rtz_grid( eq, rho, alpha, zeta, coordinates="raz", period=(np.inf, 2 * np.pi, np.inf) ) + # 4. Compute input data. data = eq.compute( Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) - bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) pitch = get_pitch( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) + # 5. Make the bounce integration operator. + bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) num = bounce.integrate( pitch, - numerator, - Bounce1D.reshape_data(grid.source_grid, data["g_zz"]), + integrand=TestBounce1D._example_numerator, + f=Bounce1D.reshape_data(grid.source_grid, data["g_zz"]), + check=True, + ) + den = bounce.integrate( + pitch, + integrand=TestBounce1D._example_denominator, check=True, ) - den = bounce.integrate(pitch, denominator, [], check=True) avg = safediv(num, den) + assert np.isfinite(avg).all() + # 6. Basic manipulation of the output. # Sum all bounce integrals across each particular field line. - avg = np.sum(avg, axis=-1) - assert np.isfinite(avg).all() + avg_sum = np.sum(avg, axis=-1) # Group the averages by field line. - avg = avg.reshape(pitch.shape[0], rho.size, alpha.size) - # The sum stored at index i, j + avg_sum = avg_sum.reshape(pitch.shape[0], rho.size, alpha.size) + # The sum stored at index i, j which denote some flux surface and field line i, j = 0, 0 - print(avg[:, i, j]) - # is the summed bounce average among wells along the field line with nodes - # given in Clebsch-Type field-line coordinates ρ, α, ζ + print(avg_sum[:, i, j]) + # This is the summed bounce average over all wells along the field line + # given by the field line following coordinates at index [i, j] of nodes nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes, "raz") print(nodes[i, j]) - # for the pitch values stored in - pitch = pitch.reshape(pitch.shape[0], rho.size, alpha.size) - print(pitch[:, i, j]) + # for the pitch values stored in index [:, i, j] of + print(pitch.reshape(pitch.shape[0], rho.size, alpha.size)[:, i, j]) + + # 7. Plotting utilities. + z1, z2 = bounce.points(pitch) + plots = bounce.check_points(z1, z2, pitch) # noqa: F841 @pytest.mark.unit @pytest.mark.parametrize("func", [interp_to_argmin, interp_to_argmin_hard]) diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index 0b03b16f4e..f4c2215075 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -2,6 +2,7 @@ import numpy as np import pytest +from numpy.polynomial.polynomial import polyvander from desc.integrals.interp_utils import poly_root, polyder_vec, polyval_vec @@ -69,13 +70,11 @@ def test_polyval_vec(self): """Test vectorized computation of polynomial evaluation.""" def test(x, c): - val = polyval_vec(x=x, c=c) - c = np.moveaxis(c, 0, -1) - x = x[..., np.newaxis] np.testing.assert_allclose( - val, - np.vectorize(np.polyval, signature="(m),(n)->(n)")(c, x).squeeze( - axis=-1 + polyval_vec(x=x, c=c), + np.sum( + polyvander(x, c.shape[0] - 1) * np.moveaxis(np.flipud(c), 0, -1), + axis=-1, ), ) From 565337635ef06088518d5af14bc5694d3311c0b6 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 28 Aug 2024 13:27:54 -0400 Subject: [PATCH 227/241] Resolves #1228 on pull request #854 --- desc/integrals/bounce_integral.py | 35 ++++++------ desc/integrals/bounce_utils.py | 26 ++++----- tests/test_integrals.py | 90 ++++++++++++++++--------------- tests/test_quad_utils.py | 4 +- 4 files changed, 79 insertions(+), 76 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 8c086671b5..831715925f 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -8,6 +8,7 @@ _check_bounce_points, bounce_points, bounce_quadrature, + get_pitch, interp_to_argmin, plot_ppoly, ) @@ -79,6 +80,9 @@ class Bounce1D: strictly increasing and preferably uniformly spaced. These are used as knots to construct splines; a reference knot density is 100 knots per toroidal transit. + Also note the argument ``pitch`` in the below method is defined as + 1/λ ~ E/μ = energy / magnetic moment. + Examples -------- See ``tests/test_integrals.py::TestBounce1D::test_integrate_checks``. @@ -97,6 +101,7 @@ class Bounce1D: """ plot_ppoly = staticmethod(plot_ppoly) + get_pitch = staticmethod(get_pitch) def __init__( self, @@ -211,7 +216,7 @@ def points(self, pitch, num_well=None): ---------- pitch : jnp.ndarray Shape must broadcast with (P, L * M). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch[...,ρ]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. @@ -238,13 +243,7 @@ def points(self, pitch, num_well=None): line and pitch, is padded with zero. """ - return bounce_points( - pitch=pitch, - knots=self._zeta, - B=self._B, - dB_dz=self._dB_dz, - num_well=num_well, - ) + return bounce_points(pitch, self._zeta, self._B, self._dB_dz, num_well) def check_points(self, z1, z2, pitch, plot=True, **kwargs): """Check that bounce points are computed correctly. @@ -258,7 +257,7 @@ def check_points(self, z1, z2, pitch, plot=True, **kwargs): epigraph of |B|. pitch : jnp.ndarray Shape must broadcast with (P, L * M). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. @@ -270,7 +269,7 @@ def check_points(self, z1, z2, pitch, plot=True, **kwargs): Returns ------- plots : list - List of matplotlib (fig, ax) tuples for the 1D plot of each field line. + Matplotlib (fig, ax) tuples for the 1D plot of each field line. """ return _check_bounce_points( @@ -303,7 +302,7 @@ def integrate( ---------- pitch : jnp.ndarray Shape must broadcast with (P, L * M). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. @@ -369,13 +368,13 @@ def integrate( ) if weight is not None: result *= interp_to_argmin( - h=weight, - z1=z1, - z2=z2, - knots=self._zeta, - g=self._B, - dg_dz=self._dB_dz, - method=method, + weight, + z1, + z2, + self._zeta, + self._B, + self._dB_dz, + method, ) assert result.shape[-1] == setdefault(num_well, (self._zeta.size - 1) * 3) return result diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 1e52497e8e..babf1c3f59 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -21,7 +21,7 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): - """Return uniformly spaced values between ``1/max_B`` and ``1/min_B``. + """Return 1/λ values uniformly spaced between ``min_B`` and ``max_B``. Parameters ---------- @@ -39,13 +39,15 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): ------- pitch : jnp.ndarray Shape (num + 2, *min_B.shape). + 1/λ values. Note ``pitch`` = 1/λ ~ E/μ = energy / magnetic moment. """ # Floating point error impedes consistent detection of bounce points riding # extrema. Shift values slightly to resolve this issue. min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B - pitch = composite_linspace(1 / jnp.stack([max_B, min_B]), num) + # Samples should be uniformly spaced in |B| and not λ (GitHub issue #1228). + pitch = composite_linspace(jnp.stack([min_B, max_B]), num) assert pitch.shape == (num + 2, *min_B.shape) return pitch @@ -72,7 +74,7 @@ def _check_spline_shape(knots, g, dg_dz, pitch=None): compose a particular spline. pitch : jnp.ndarray Shape must broadcast with (P, S). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. @@ -114,7 +116,7 @@ def bounce_points( ---------- pitch : jnp.ndarray Shape must broadcast with (P, S). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. @@ -167,7 +169,7 @@ def bounce_points( # Intersection points in local power basis. intersect = poly_root( c=B, - k=(1 / pitch)[..., jnp.newaxis], + k=pitch[..., jnp.newaxis], a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sort=True, @@ -232,7 +234,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): Bs = PPoly(B[:, s], knots) for p in range(P): Bs_midpoint = Bs((z1[p, s] + z2[p, s]) / 2) - err_3 = jnp.any(Bs_midpoint > 1 / pitch[p, s] + eps) + err_3 = jnp.any(Bs_midpoint > pitch[p, s] + eps) if not (err_1[p, s] or err_2[p, s] or err_3): continue _z1 = z1[p, s][mask[p, s]] @@ -242,7 +244,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): ppoly=Bs, z1=_z1, z2=_z2, - k=1 / pitch[p, s], + k=pitch[p, s], **kwargs, ) @@ -251,7 +253,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): assert not err_1[p, s], "Intersects have an inversion.\n" assert not err_2[p, s], "Detected discontinuity.\n" assert not err_3, ( - f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {1 / pitch[p, s] + eps} " + f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {pitch[p, s] + eps} " "= 1/λ in well, implying the straight line path between " "bounce points is in hypograph(|B|). Use more knots.\n" ) @@ -261,7 +263,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): ppoly=Bs, z1=z1[:, s], z2=z2[:, s], - k=1 / pitch[:, s], + k=pitch[:, s], **kwargs, ) ) @@ -300,7 +302,7 @@ def bounce_quadrature( epigraph of |B|. pitch : jnp.ndarray Shape must broadcast with (P, S). - λ values to evaluate the bounce integral at each field line. λ(ρ,α) is + 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are interpreted as the index into the last axis that corresponds to that field line. If two-dimensional, the first axis is the batch axis. @@ -484,7 +486,7 @@ def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): assert jnp.isfinite(Q).all(), "NaN interpolation point." # Integrals that we should be computing. marked = jnp.any(Q != 0.0, axis=-1) - goal = jnp.sum(marked) + goal = marked.sum() msg = "Interpolation failed." assert jnp.isfinite(B_z_ra).all(), msg @@ -501,7 +503,7 @@ def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): actual = jnp.sum(marked & jnp.isfinite(result)) assert goal == actual, ( f"Lost {goal - actual} integrals from NaN generation in the integrand. This " - "can be caused by floating point error or a poor choice of quadrature nodes." + "is caused by floating point error, usually due to a poor quadrature choice." ) if plot: _plot_check_interp(Q, B, name=r"$\vert B \vert$") diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 260f559d8b..3648ac3a5e 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -736,8 +736,8 @@ def test_z1_first(self): end = 6 * np.pi knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - pitch = 2.0 - intersect = B.solve(1 / pitch, extrapolate=False) + pitch = 0.5 + intersect = B.solve(pitch, extrapolate=False) z1, z2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size @@ -751,8 +751,8 @@ def test_z2_first(self): end = -start k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) - pitch = 2.0 - intersect = B.solve(1 / pitch, extrapolate=False) + pitch = 0.5 + intersect = B.solve(pitch, extrapolate=False) z1, z2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size @@ -771,11 +771,11 @@ def test_z1_before_extrema(self): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) dB_dz = B.derivative() - pitch = 1 / B(dB_dz.roots(extrapolate=False))[3] + 1e-13 + pitch = B(dB_dz.roots(extrapolate=False))[3] - 1e-13 z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size - intersect = B.solve(1 / pitch, extrapolate=False) + intersect = B.solve(pitch, extrapolate=False) np.testing.assert_allclose(z1[1], 1.982767, rtol=1e-6) np.testing.assert_allclose(z1, intersect[[1, 2]], rtol=1e-6) # intersect array could not resolve double root as single at index 2,3 @@ -796,11 +796,11 @@ def test_z2_before_extrema(self): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 4, ) dB_dz = B.derivative() - pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] + pitch = B(dB_dz.roots(extrapolate=False))[2] z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size - intersect = B.solve(1 / pitch, extrapolate=False) + intersect = B.solve(pitch, extrapolate=False) np.testing.assert_allclose(z1, intersect[[0, -2]]) np.testing.assert_allclose(z2, intersect[[1, -1]]) @@ -817,14 +817,14 @@ def test_extrema_first_and_before_z1(self): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, ) dB_dz = B.derivative() - pitch = 1 / B(dB_dz.roots(extrapolate=False))[2] - 1e-13 + pitch = B(dB_dz.roots(extrapolate=False))[2] + 1e-13 z1, z2 = bounce_points( pitch, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False ) - plot_ppoly(B, z1=z1, z2=z2, k=1 / pitch, start=k[2]) + plot_ppoly(B, z1=z1, z2=z2, k=pitch, start=k[2]) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size - intersect = B.solve(1 / pitch, extrapolate=False) + intersect = B.solve(pitch, extrapolate=False) np.testing.assert_allclose(z1[0], 0.835319, rtol=1e-6) intersect = intersect[intersect >= k[2]] np.testing.assert_allclose(z1, intersect[[0, 2, 4]], rtol=1e-6) @@ -843,12 +843,12 @@ def test_extrema_first_and_before_z2(self): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, ) dB_dz = B.derivative() - pitch = 1 / B(dB_dz.roots(extrapolate=False))[1] + 1e-13 + pitch = B(dB_dz.roots(extrapolate=False))[1] - 1e-13 z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. - intersect = B.solve(1 / pitch, extrapolate=False) + intersect = B.solve(pitch, extrapolate=False) np.testing.assert_allclose(z1[0], -0.671904, rtol=1e-6) np.testing.assert_allclose(z1, intersect[[0, 3, 5]], rtol=1e-5) # intersect array could not resolve double root as single at index 0,1 @@ -916,16 +916,16 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): z1 = -np.pi / 2 * v z2 = -z1 knots = np.linspace(z1, z2, 50) - pitch = 1 + 50 * jnp.finfo(jnp.array(1.0).dtype).eps + pitch = 1 - 50 * jnp.finfo(jnp.array(1.0).dtype).eps b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) db = np.sin(2 * knots / v) / v data = {"B^zeta": b, "B^zeta_z|r,a": db, "|B|": b, "|B|_z|r,a": db} if is_strong: - integrand = lambda B, pitch: 1 / jnp.sqrt(1 - pitch * m * B) + integrand = lambda B, pitch: 1 / jnp.sqrt(1 - m * B / pitch) truth = v * 2 * ellipkm1(p) else: - integrand = lambda B, pitch: jnp.sqrt(1 - pitch * m * B) + integrand = lambda B, pitch: jnp.sqrt(1 - m * B / pitch) truth = v * 2 * ellipe(m) kwargs = {} if automorphism != "default": @@ -939,7 +939,7 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): ) result = bounce.integrate(pitch, integrand, check=True) assert np.count_nonzero(result) == 1 - np.testing.assert_allclose(np.sum(result), truth, rtol=1e-4) + np.testing.assert_allclose(result.sum(), truth, rtol=1e-4) @staticmethod @partial(np.vectorize, excluded={0}) @@ -1018,7 +1018,7 @@ def elliptic_incomplete(k2): TestBounce1DQuadrature._fixed_elliptic( lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * np.cos(Z), k, - deg=10, + deg=11, ), ) np.testing.assert_allclose( @@ -1035,12 +1035,13 @@ class TestBounce1D: @staticmethod def _example_numerator(g_zz, B, pitch): - f = (1 - pitch * B / 2) * g_zz - return safediv(f, jnp.sqrt(jnp.abs(1 - pitch * B))) + # Note ``pitch`` = 1/λ ~ E/μ = energy / magnetic moment. + f = (1 - 0.5 * B / pitch) * g_zz + return safediv(f, jnp.sqrt(jnp.abs(1 - B / pitch))) @staticmethod def _example_denominator(B, pitch): - return safediv(1, jnp.sqrt(jnp.abs(1 - pitch * B))) + return safediv(1, jnp.sqrt(jnp.abs(1 - B / pitch))) @pytest.mark.unit def test_integrate_checks(self): @@ -1068,11 +1069,11 @@ def test_integrate_checks(self): data = eq.compute( Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) - pitch = get_pitch( - grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 - ) # 5. Make the bounce integration operator. bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) + pitch = bounce.get_pitch( + grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 + ) num = bounce.integrate( pitch, integrand=TestBounce1D._example_numerator, @@ -1089,13 +1090,13 @@ def test_integrate_checks(self): # 6. Basic manipulation of the output. # Sum all bounce integrals across each particular field line. - avg_sum = np.sum(avg, axis=-1) + avg_sum = avg.sum(axis=-1) # Group the averages by field line. avg_sum = avg_sum.reshape(pitch.shape[0], rho.size, alpha.size) # The sum stored at index i, j which denote some flux surface and field line i, j = 0, 0 print(avg_sum[:, i, j]) - # This is the summed bounce average over all wells along the field line + # is the summed bounce average over all wells along the field line # given by the field line following coordinates at index [i, j] of nodes nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes, "raz") print(nodes[i, j]) @@ -1141,7 +1142,7 @@ def dg_dz(z): np.testing.assert_allclose( h(argmin), func( - h(zeta), + h=h(zeta), z1=np.array(0, ndmin=3), z2=np.array(2 * np.pi, ndmin=3), knots=zeta, @@ -1166,7 +1167,7 @@ def drift_analytic(data): integration as input. pitch : jnp.ndarray Shape (P, ). - Pitch values used. + 1/λ values used. """ B = data["|B|"] / data["Bref"] @@ -1230,12 +1231,14 @@ def drift_analytic(data): np.testing.assert_allclose(gbdrift, gbdrift_analytic_low_order, atol=1e-2) np.testing.assert_allclose(cvdrift, cvdrift_analytic_low_order, atol=2e-2) - pitch = get_pitch(np.min(B), np.max(B), 100)[1:] - k2 = 0.5 * ((1 - pitch * B0) / (epsilon * pitch * B0) + 1) + # Exclude singularity not captured by analytic approximation for pitch near + # the maximum |B|. (This is captured by the numerical integration). + pitch = get_pitch(np.min(B), np.max(B), 100)[:-1] + k2 = 0.5 * ((1 - B0 / pitch) / (epsilon * B0 / pitch) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = ( TestBounce1DQuadrature.elliptic_incomplete(k2) ) - y = np.sqrt(2 * epsilon * pitch * B0) + y = np.sqrt(2 * epsilon * B0 / pitch) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) @@ -1256,13 +1259,13 @@ def drift_analytic(data): @staticmethod def drift_num_integrand(cvdrift, gbdrift, B, pitch): """Integrand of numerator of bounce averaged binormal drift.""" - g = jnp.sqrt(1 - pitch * B) + g = jnp.sqrt(1 - B / pitch) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) @staticmethod def drift_den_integrand(B, pitch): """Integrand of denominator of bounce averaged binormal drift.""" - return 1 / jnp.sqrt(1 - pitch * B) + return 1 / jnp.sqrt(1 - B / pitch) @pytest.mark.unit @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) @@ -1355,8 +1358,8 @@ def test_binormal_drift_bounce1d(self): ) fig, ax = plt.subplots() - ax.plot(1 / pitch, drift_analytic) - ax.plot(1 / pitch, drift_numerical) + ax.plot(pitch, drift_analytic) + ax.plot(pitch, drift_numerical) return fig @staticmethod @@ -1364,20 +1367,19 @@ def _test_bounce_autodiff(bounce, integrand, **kwargs): """Make sure reverse mode AD works correctly on this algorithm.""" def integrand_grad(*args, **kwargs2): - return jnp.vectorize( + grad_fun = jnp.vectorize( grad(integrand, -1), signature="()," * len(kwargs["f"]) + "(),()->()" - )(*args, *kwargs2.values()) + ) + return grad_fun(*args, *kwargs2.values()) def fun1(pitch): - return jnp.sum(bounce.integrate(pitch, integrand, check=False, **kwargs)) + return bounce.integrate(pitch, integrand, check=False, **kwargs).sum() def fun2(pitch): - return jnp.sum( - bounce.integrate(pitch, integrand_grad, check=True, **kwargs) - ) + return bounce.integrate(pitch, integrand_grad, check=True, **kwargs).sum() pitch = 1.0 - truth = 650 # Extrapolated from plot. - assert np.isclose(grad(fun1)(pitch), truth, rtol=1e-3) + truth = -650 # Extrapolated from plot. + np.testing.assert_allclose(grad(fun1)(pitch), truth, rtol=1e-3) # Make sure bounce points get differentiated too. - assert np.isclose(fun2(pitch), -131750, rtol=1e-1) + np.testing.assert_allclose(fun2(pitch), 131750, rtol=1e-1) diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index a23b81c8d8..c5a0b670dc 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -19,8 +19,8 @@ @pytest.mark.unit -def test_composite_linspace(): - """Test this utility function useful for Newton-Cotes integration over pitch.""" +def test_get_pitch(): + """Test this utility function which is used integration over pitch.""" B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) breaks = np.linspace(B_min_tz, B_max_tz, num=5) From add9aaf24ca1c4e19879fd07d4d766f0a6c5e3fb Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 28 Aug 2024 13:40:17 -0400 Subject: [PATCH 228/241] Fix claim for number of wells in an unoptimized stellarator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In general the number of wells is upper bound by the maximum frequency along the field line of the Fourier representation in straight field line coordinates of M + iota N. g : ϑ, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mϑ + nϕ]) g : α, ϕ ↦ ∑ₘₙ aₘₙ exp(j [mα + (m ι + n)ϕ]) --- desc/integrals/bounce_integral.py | 10 ++++++---- desc/integrals/bounce_utils.py | 5 +++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 831715925f..93bea0651a 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -223,8 +223,9 @@ def points(self, pitch, num_well=None): num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number. As a reference, there are typically at most 5 - wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically 20 wells + per toroidal transit for a given pitch. You can check this by plotting + the field lines with the ``check_points`` method. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the @@ -327,8 +328,9 @@ def integrate( num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number. As a reference, there are typically at most 5 - wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically 20 wells + per toroidal transit for a given pitch. You can check this by plotting + the field lines with the ``check_points`` method. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index babf1c3f59..4ae5357a7a 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -138,8 +138,9 @@ def bounce_points( num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly - bounds the actual number. As a reference, there are typically at most 5 - wells per toroidal transit for a given pitch. + bounds the actual number. As a reference, there are typically 20 wells + per toroidal transit for a given pitch. You can check this by plotting + the field lines with the ``_check_bounce_points`` method. If not specified, then all bounce points are returned. If there were fewer wells detected along a field line than the size of the last axis of the From b17e5135d82444468bdc3d066afd59dc45f83398 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 28 Aug 2024 14:44:37 -0400 Subject: [PATCH 229/241] Add description to test requested by Rory --- desc/integrals/bounce_integral.py | 2 +- tests/test_integrals.py | 30 ++++++++++++++++++++++++++---- tests/test_quad_utils.py | 4 ++-- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 93bea0651a..eb2a9e97e8 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -80,7 +80,7 @@ class Bounce1D: strictly increasing and preferably uniformly spaced. These are used as knots to construct splines; a reference knot density is 100 knots per toroidal transit. - Also note the argument ``pitch`` in the below method is defined as + Also note the argument ``pitch`` in the below methods is defined as 1/λ ~ E/μ = energy / magnetic moment. Examples diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 3648ac3a5e..6fa6d0c4d9 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1364,7 +1364,22 @@ def test_binormal_drift_bounce1d(self): @staticmethod def _test_bounce_autodiff(bounce, integrand, **kwargs): - """Make sure reverse mode AD works correctly on this algorithm.""" + """Make sure reverse mode AD works correctly on this algorithm. + + We use non-differentiable operations throughout the computations. + See https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html + and https://jax.readthedocs.io/en/latest/faq.html# + why-are-gradients-zero-for-functions-based-on-sort-order. + If the AD tool works properly, then these operations should be assigned + zero gradients while the gradients wrt parameters of our physics computations + will accumulate correctly. Less mature AD tools may have subtle bugs that + cause the gradients to not accumulate correctly (there's at least a few + GitHub issues that JAX has fixed related to this in past). + + This test first confirms the gradients computed by reverse mode AD matches + the analytic approximation of the true gradient. + + """ def integrand_grad(*args, **kwargs2): grad_fun = jnp.vectorize( @@ -1379,7 +1394,14 @@ def fun2(pitch): return bounce.integrate(pitch, integrand_grad, check=True, **kwargs).sum() pitch = 1.0 - truth = -650 # Extrapolated from plot. - np.testing.assert_allclose(grad(fun1)(pitch), truth, rtol=1e-3) - # Make sure bounce points get differentiated too. + # Compare against analytic approximation of true gradient; + # extrapolated from plot of analytic expression. + np.testing.assert_allclose(grad(fun1)(pitch), -650, rtol=1e-3) + # Leibniz rule for differentiating pitch angles in integrand. + # This should differ significantly from above because the bounce points + # are functions of pitch angles. The gradient is much larger because + # the derivative wrt bounce points is not included, which smooths the function. + # This is good to confirm that the AD of bounce points smooths the gradient + # rather than adding artificial noise due to incorrect accumulation of gradient + # from non-differentiable operations in the algorithm. np.testing.assert_allclose(fun2(pitch), 131750, rtol=1e-1) diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index c5a0b670dc..07dfcd85e6 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -19,8 +19,8 @@ @pytest.mark.unit -def test_get_pitch(): - """Test this utility function which is used integration over pitch.""" +def test_composite_linspace(): + """Test this utility function which is used for integration over pitch.""" B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) breaks = np.linspace(B_min_tz, B_max_tz, num=5) From e4dcd2eff599eea2d8a2229976c9435f848d25d7 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 28 Aug 2024 20:20:05 -0400 Subject: [PATCH 230/241] Clarify test as requested by @f0uriest --- desc/integrals/bounce_integral.py | 49 +++++----- desc/integrals/bounce_utils.py | 93 +++++++++--------- tests/test_integrals.py | 156 +++++++++++++++++------------- 3 files changed, 160 insertions(+), 138 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index eb2a9e97e8..19583de69d 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -8,7 +8,7 @@ _check_bounce_points, bounce_points, bounce_quadrature, - get_pitch, + get_pitch_inv, interp_to_argmin, plot_ppoly, ) @@ -80,9 +80,6 @@ class Bounce1D: strictly increasing and preferably uniformly spaced. These are used as knots to construct splines; a reference knot density is 100 knots per toroidal transit. - Also note the argument ``pitch`` in the below methods is defined as - 1/λ ~ E/μ = energy / magnetic moment. - Examples -------- See ``tests/test_integrals.py::TestBounce1D::test_integrate_checks``. @@ -101,7 +98,7 @@ class Bounce1D: """ plot_ppoly = staticmethod(plot_ppoly) - get_pitch = staticmethod(get_pitch) + get_pitch_inv = staticmethod(get_pitch_inv) def __init__( self, @@ -209,17 +206,17 @@ def reshape_data(grid, *arys): f = [grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in arys] return f - def points(self, pitch, num_well=None): + def points(self, pitch_inv, num_well=None): """Compute bounce points. Parameters ---------- - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape must broadcast with (P, L * M). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch[...,ρ]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,ρ]`` where in the latter the labels + (ρ,α) are interpreted as the index into the last axis that corresponds to + that field line. If two-dimensional, the first axis is the batch axis. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -244,9 +241,9 @@ def points(self, pitch, num_well=None): line and pitch, is padded with zero. """ - return bounce_points(pitch, self._zeta, self._B, self._dB_dz, num_well) + return bounce_points(pitch_inv, self._zeta, self._B, self._dB_dz, num_well) - def check_points(self, z1, z2, pitch, plot=True, **kwargs): + def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): """Check that bounce points are computed correctly. Parameters @@ -256,12 +253,12 @@ def check_points(self, z1, z2, pitch, plot=True, **kwargs): ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape must broadcast with (P, L * M). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels + (ρ,α) are interpreted as the index into the last axis that corresponds to + that field line. If two-dimensional, the first axis is the batch axis. plot : bool Whether to plot stuff. kwargs @@ -276,7 +273,7 @@ def check_points(self, z1, z2, pitch, plot=True, **kwargs): return _check_bounce_points( z1=z1, z2=z2, - pitch=jnp.atleast_2d(pitch), + pitch_inv=jnp.atleast_2d(pitch_inv), knots=self._zeta, B=self._B, plot=plot, @@ -285,7 +282,7 @@ def check_points(self, z1, z2, pitch, plot=True, **kwargs): def integrate( self, - pitch, + pitch_inv, integrand, f=None, weight=None, @@ -297,16 +294,16 @@ def integrate( """Bounce integrate ∫ f(ℓ) dℓ. Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line - for every λ value in ``pitch``. + for every λ value in ``pitch_inv``. Parameters ---------- - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape must broadcast with (P, L * M). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels + (ρ,α) are interpreted as the index into the last axis that corresponds to + that field line. If two-dimensional, the first axis is the batch axis. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -352,14 +349,14 @@ def integrate( Last axis enumerates the bounce integrals. """ - pitch = jnp.atleast_2d(pitch) - z1, z2 = self.points(pitch, num_well) + pitch_inv = jnp.atleast_2d(pitch_inv) + z1, z2 = self.points(pitch_inv, num_well) result = bounce_quadrature( x=self._x, w=self._w, z1=z1, z2=z2, - pitch=pitch, + pitch_inv=pitch_inv, integrand=integrand, f=setdefault(f, []), data=self._data, diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 4ae5357a7a..e9a9cff613 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -20,7 +20,7 @@ from desc.utils import atleast_3d_mid, errorif, setdefault, take_mask -def get_pitch(min_B, max_B, num, relative_shift=1e-6): +def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): """Return 1/λ values uniformly spaced between ``min_B`` and ``max_B``. Parameters @@ -37,9 +37,9 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): Returns ------- - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape (num + 2, *min_B.shape). - 1/λ values. Note ``pitch`` = 1/λ ~ E/μ = energy / magnetic moment. + 1/λ values. """ # Floating point error impedes consistent detection of bounce points riding @@ -47,12 +47,12 @@ def get_pitch(min_B, max_B, num, relative_shift=1e-6): min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B # Samples should be uniformly spaced in |B| and not λ (GitHub issue #1228). - pitch = composite_linspace(jnp.stack([min_B, max_B]), num) - assert pitch.shape == (num + 2, *min_B.shape) - return pitch + pitch_inv = composite_linspace(jnp.stack([min_B, max_B]), num) + assert pitch_inv.shape == (num + 2, *min_B.shape) + return pitch_inv -def _check_spline_shape(knots, g, dg_dz, pitch=None): +def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): """Ensure inputs have compatible shape, and return them with full dimension. Parameters @@ -72,12 +72,12 @@ def _check_spline_shape(knots, g, dg_dz, pitch=None): First axis enumerates the coefficients of power series. Second axis enumerates the splines. Last axis enumerates the polynomials that compose a particular spline. - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape must broadcast with (P, S). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels + (ρ,α) are interpreted as the index into the last axis that corresponds to + that field line. If two-dimensional, the first axis is the batch axis. """ errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") @@ -97,29 +97,29 @@ def _check_spline_shape(knots, g, dg_dz, pitch=None): ) # Add axis which enumerates field lines if necessary. g, dg_dz = atleast_3d_mid(g, dg_dz) - if pitch is not None: - pitch = jnp.atleast_2d(pitch) + if pitch_inv is not None: + pitch_inv = jnp.atleast_2d(pitch_inv) errorif( - pitch.ndim != 2 - or not (pitch.shape[-1] == 1 or pitch.shape[-1] == g.shape[1]), - msg=f"Invalid shape {pitch.shape} for pitch angles.", + pitch_inv.ndim != 2 + or not (pitch_inv.shape[-1] == 1 or pitch_inv.shape[-1] == g.shape[1]), + msg=f"Invalid shape {pitch_inv.shape} for pitch angles.", ) - return g, dg_dz, pitch + return g, dg_dz, pitch_inv def bounce_points( - pitch, knots, B, dB_dz, num_well=None, check=False, plot=True, **kwargs + pitch_inv, knots, B, dB_dz, num_well=None, check=False, plot=True, **kwargs ): """Compute the bounce points given spline of |B| and pitch λ. Parameters ---------- - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape must broadcast with (P, S). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels + (ρ,α) are interpreted as the index into the last axis that corresponds to + that field line. If two-dimensional, the first axis is the batch axis. knots : jnp.ndarray Shape (knots.size, ). ζ coordinates of spline knots. Must be strictly increasing. @@ -165,12 +165,12 @@ def bounce_points( line and pitch, is padded with zero. """ - B, dB_dz, pitch = _check_spline_shape(knots, B, dB_dz, pitch) - P, S, degree = pitch.shape[0], B.shape[1], B.shape[0] - 1 + B, dB_dz, pitch_inv = _check_spline_shape(knots, B, dB_dz, pitch_inv) + P, S, degree = pitch_inv.shape[0], B.shape[1], B.shape[0] - 1 # Intersection points in local power basis. intersect = poly_root( c=B, - k=pitch[..., jnp.newaxis], + k=pitch_inv[..., jnp.newaxis], a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sort=True, @@ -204,12 +204,12 @@ def bounce_points( z2 = jnp.where(mask, z2, 0.0) if check: - _check_bounce_points(z1, z2, pitch, knots, B, plot, **kwargs) + _check_bounce_points(z1, z2, pitch_inv, knots, B, plot, **kwargs) return z1, z2 -def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): +def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): """Check that bounce points are computed correctly.""" eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) kwargs.setdefault( @@ -235,7 +235,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): Bs = PPoly(B[:, s], knots) for p in range(P): Bs_midpoint = Bs((z1[p, s] + z2[p, s]) / 2) - err_3 = jnp.any(Bs_midpoint > pitch[p, s] + eps) + err_3 = jnp.any(Bs_midpoint > pitch_inv[p, s] + eps) if not (err_1[p, s] or err_2[p, s] or err_3): continue _z1 = z1[p, s][mask[p, s]] @@ -245,7 +245,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): ppoly=Bs, z1=_z1, z2=_z2, - k=pitch[p, s], + k=pitch_inv[p, s], **kwargs, ) @@ -254,7 +254,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): assert not err_1[p, s], "Intersects have an inversion.\n" assert not err_2[p, s], "Detected discontinuity.\n" assert not err_3, ( - f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {pitch[p, s] + eps} " + f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {pitch_inv[p, s] + eps} " "= 1/λ in well, implying the straight line path between " "bounce points is in hypograph(|B|). Use more knots.\n" ) @@ -264,7 +264,7 @@ def _check_bounce_points(z1, z2, pitch, knots, B, plot=True, **kwargs): ppoly=Bs, z1=z1[:, s], z2=z2[:, s], - k=pitch[:, s], + k=pitch_inv[:, s], **kwargs, ) ) @@ -276,7 +276,7 @@ def bounce_quadrature( w, z1, z2, - pitch, + pitch_inv, integrand, f, data, @@ -301,12 +301,12 @@ def bounce_quadrature( ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape must broadcast with (P, S). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch[...,(ρ,α)]`` where in the latter the labels (ρ,α) are - interpreted as the index into the last axis that corresponds to that field - line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels + (ρ,α) are interpreted as the index into the last axis that corresponds to + that field line. If two-dimensional, the first axis is the batch axis. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -347,7 +347,7 @@ def bounce_quadrature( """ errorif(z1.ndim != 3 or z1.shape != z2.shape) errorif(x.ndim != 1 or x.shape != w.shape) - pitch = jnp.atleast_2d(pitch) + pitch_inv = jnp.atleast_2d(pitch_inv) if not isinstance(f, (list, tuple)): f = [f] @@ -356,7 +356,7 @@ def bounce_quadrature( result = _interpolate_and_integrate( w=w, Q=bijection_from_disc(x, z1[..., jnp.newaxis], z2[..., jnp.newaxis]), - pitch=pitch, + pitch_inv=pitch_inv, integrand=integrand, f=f, data=data, @@ -375,7 +375,7 @@ def loop(z): return None, _interpolate_and_integrate( w=w, Q=bijection_from_disc(x, z1[..., jnp.newaxis], z2[..., jnp.newaxis]), - pitch=pitch, + pitch_inv=pitch_inv, integrand=integrand, f=f, data=data, @@ -392,14 +392,14 @@ def loop(z): ) result = result * grad_bijection_from_disc(z1, z2) - assert result.shape == (pitch.shape[0], data["|B|"].shape[0], z1.shape[-1]) + assert result.shape == (pitch_inv.shape[0], data["|B|"].shape[0], z1.shape[-1]) return result def _interpolate_and_integrate( w, Q, - pitch, + pitch_inv, integrand, f, data, @@ -429,9 +429,12 @@ def _interpolate_and_integrate( Quadrature for every pitch. """ - assert pitch.ndim == 2 + assert pitch_inv.ndim == 2 assert w.ndim == knots.ndim == 1 - assert 3 <= Q.ndim <= 4 and Q.shape[:2] == (pitch.shape[0], data["|B|"].shape[0]) + assert 3 <= Q.ndim <= 4 and Q.shape[:2] == ( + pitch_inv.shape[0], + data["|B|"].shape[0], + ) assert Q.shape[-1] == w.size assert knots.size == data["|B|"].shape[-1] assert ( @@ -441,7 +444,7 @@ def _interpolate_and_integrate( == data["|B|_z|r,a"].shape ) - pitch = jnp.expand_dims(pitch, axis=(2, 3) if (Q.ndim == 4) else 2) + pitch_inv = jnp.expand_dims(pitch_inv, axis=(2, 3) if (Q.ndim == 4) else 2) shape = Q.shape Q = Q.reshape(Q.shape[0], Q.shape[1], -1) b_sup_z = interp1d_Hermite_vec( @@ -455,7 +458,7 @@ def _interpolate_and_integrate( # Spline each function separately so that operations in the integrand # that do not preserve smoothness can be captured. f = [interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] - result = jnp.dot(integrand(*f, B=B, pitch=pitch) / b_sup_z, w) + result = jnp.dot(integrand(*f, B=B, pitch=1 / pitch_inv) / b_sup_z, w) if check: _check_interp(Q.reshape(shape), f, b_sup_z, B, data["|B|_z|r,a"], result, plot) diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 6fa6d0c4d9..08ffeb0042 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -37,7 +37,7 @@ from desc.integrals.bounce_utils import ( _get_extrema, bounce_points, - get_pitch, + get_pitch_inv, interp_to_argmin, interp_to_argmin_hard, plot_ppoly, @@ -736,9 +736,9 @@ def test_z1_first(self): end = 6 * np.pi knots = np.linspace(start, end, 5) B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) - pitch = 0.5 - intersect = B.solve(pitch, extrapolate=False) - z1, z2 = bounce_points(pitch, knots, B.c, B.derivative().c, check=True) + pitch_inv = 0.5 + intersect = B.solve(pitch_inv, extrapolate=False) + z1, z2 = bounce_points(pitch_inv, knots, B.c, B.derivative().c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[0::2]) @@ -751,9 +751,9 @@ def test_z2_first(self): end = -start k = np.linspace(start, end, 5) B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) - pitch = 0.5 - intersect = B.solve(pitch, extrapolate=False) - z1, z2 = bounce_points(pitch, k, B.c, B.derivative().c, check=True) + pitch_inv = 0.5 + intersect = B.solve(pitch_inv, extrapolate=False) + z1, z2 = bounce_points(pitch_inv, k, B.c, B.derivative().c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[1:-1:2]) @@ -771,11 +771,11 @@ def test_z1_before_extrema(self): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) dB_dz = B.derivative() - pitch = B(dB_dz.roots(extrapolate=False))[3] - 1e-13 - z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) + pitch_inv = B(dB_dz.roots(extrapolate=False))[3] - 1e-13 + z1, z2 = bounce_points(pitch_inv, k, B.c, dB_dz.c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size - intersect = B.solve(pitch, extrapolate=False) + intersect = B.solve(pitch_inv, extrapolate=False) np.testing.assert_allclose(z1[1], 1.982767, rtol=1e-6) np.testing.assert_allclose(z1, intersect[[1, 2]], rtol=1e-6) # intersect array could not resolve double root as single at index 2,3 @@ -796,11 +796,11 @@ def test_z2_before_extrema(self): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 4, ) dB_dz = B.derivative() - pitch = B(dB_dz.roots(extrapolate=False))[2] - z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) + pitch_inv = B(dB_dz.roots(extrapolate=False))[2] + z1, z2 = bounce_points(pitch_inv, k, B.c, dB_dz.c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size - intersect = B.solve(pitch, extrapolate=False) + intersect = B.solve(pitch_inv, extrapolate=False) np.testing.assert_allclose(z1, intersect[[0, -2]]) np.testing.assert_allclose(z2, intersect[[1, -1]]) @@ -817,14 +817,14 @@ def test_extrema_first_and_before_z1(self): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 20, ) dB_dz = B.derivative() - pitch = B(dB_dz.roots(extrapolate=False))[2] + 1e-13 + pitch_inv = B(dB_dz.roots(extrapolate=False))[2] + 1e-13 z1, z2 = bounce_points( - pitch, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False + pitch_inv, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False ) - plot_ppoly(B, z1=z1, z2=z2, k=pitch, start=k[2]) + plot_ppoly(B, z1=z1, z2=z2, k=pitch_inv, start=k[2]) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size - intersect = B.solve(pitch, extrapolate=False) + intersect = B.solve(pitch_inv, extrapolate=False) np.testing.assert_allclose(z1[0], 0.835319, rtol=1e-6) intersect = intersect[intersect >= k[2]] np.testing.assert_allclose(z1, intersect[[0, 2, 4]], rtol=1e-6) @@ -843,12 +843,12 @@ def test_extrema_first_and_before_z2(self): -np.sin(k) - 4 * np.cos(-2 * k) + 1 / 10, ) dB_dz = B.derivative() - pitch = B(dB_dz.roots(extrapolate=False))[1] - 1e-13 - z1, z2 = bounce_points(pitch, k, B.c, dB_dz.c, check=True) + pitch_inv = B(dB_dz.roots(extrapolate=False))[1] - 1e-13 + z1, z2 = bounce_points(pitch_inv, k, B.c, dB_dz.c, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. - intersect = B.solve(pitch, extrapolate=False) + intersect = B.solve(pitch_inv, extrapolate=False) np.testing.assert_allclose(z1[0], -0.671904, rtol=1e-6) np.testing.assert_allclose(z1, intersect[[0, 3, 5]], rtol=1e-5) # intersect array could not resolve double root as single at index 0,1 @@ -916,16 +916,16 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): z1 = -np.pi / 2 * v z2 = -z1 knots = np.linspace(z1, z2, 50) - pitch = 1 - 50 * jnp.finfo(jnp.array(1.0).dtype).eps + pitch_inv = 1 - 50 * jnp.finfo(jnp.array(1.0).dtype).eps b = np.clip(np.sin(knots / v) ** 2, 1e-7, 1) db = np.sin(2 * knots / v) / v data = {"B^zeta": b, "B^zeta_z|r,a": db, "|B|": b, "|B|_z|r,a": db} if is_strong: - integrand = lambda B, pitch: 1 / jnp.sqrt(1 - m * B / pitch) + integrand = lambda B, pitch: 1 / jnp.sqrt(1 - m * pitch * B) truth = v * 2 * ellipkm1(p) else: - integrand = lambda B, pitch: jnp.sqrt(1 - m * B / pitch) + integrand = lambda B, pitch: jnp.sqrt(1 - m * pitch * B) truth = v * 2 * ellipe(m) kwargs = {} if automorphism != "default": @@ -937,7 +937,7 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): check=True, **kwargs, ) - result = bounce.integrate(pitch, integrand, check=True) + result = bounce.integrate(pitch_inv, integrand, check=True) assert np.count_nonzero(result) == 1 np.testing.assert_allclose(result.sum(), truth, rtol=1e-4) @@ -1035,13 +1035,12 @@ class TestBounce1D: @staticmethod def _example_numerator(g_zz, B, pitch): - # Note ``pitch`` = 1/λ ~ E/μ = energy / magnetic moment. - f = (1 - 0.5 * B / pitch) * g_zz - return safediv(f, jnp.sqrt(jnp.abs(1 - B / pitch))) + f = (1 - 0.5 * pitch * B) * g_zz + return safediv(f, jnp.sqrt(jnp.abs(1 - pitch * B))) @staticmethod def _example_denominator(B, pitch): - return safediv(1, jnp.sqrt(jnp.abs(1 - B / pitch))) + return safediv(1, jnp.sqrt(jnp.abs(1 - pitch * B))) @pytest.mark.unit def test_integrate_checks(self): @@ -1071,17 +1070,17 @@ def test_integrate_checks(self): ) # 5. Make the bounce integration operator. bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) - pitch = bounce.get_pitch( + pitch_inv = bounce.get_pitch_inv( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) num = bounce.integrate( - pitch, + pitch_inv, integrand=TestBounce1D._example_numerator, f=Bounce1D.reshape_data(grid.source_grid, data["g_zz"]), check=True, ) den = bounce.integrate( - pitch, + pitch_inv, integrand=TestBounce1D._example_denominator, check=True, ) @@ -1092,7 +1091,7 @@ def test_integrate_checks(self): # Sum all bounce integrals across each particular field line. avg_sum = avg.sum(axis=-1) # Group the averages by field line. - avg_sum = avg_sum.reshape(pitch.shape[0], rho.size, alpha.size) + avg_sum = avg_sum.reshape(pitch_inv.shape[0], rho.size, alpha.size) # The sum stored at index i, j which denote some flux surface and field line i, j = 0, 0 print(avg_sum[:, i, j]) @@ -1100,12 +1099,12 @@ def test_integrate_checks(self): # given by the field line following coordinates at index [i, j] of nodes nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes, "raz") print(nodes[i, j]) - # for the pitch values stored in index [:, i, j] of - print(pitch.reshape(pitch.shape[0], rho.size, alpha.size)[:, i, j]) + # for the 1/pitch values stored in index [:, i, j] of + print(pitch_inv.reshape(pitch_inv.shape[0], rho.size, alpha.size)[:, i, j]) # 7. Plotting utilities. - z1, z2 = bounce.points(pitch) - plots = bounce.check_points(z1, z2, pitch) # noqa: F841 + z1, z2 = bounce.points(pitch_inv) + plots = bounce.check_points(z1, z2, pitch_inv) # noqa: F841 @pytest.mark.unit @pytest.mark.parametrize("func", [interp_to_argmin, interp_to_argmin_hard]) @@ -1165,7 +1164,7 @@ def drift_analytic(data): Numerically computed ``data["cvdrift"]` and ``data["gbdrift"]`` normalized by some scale factors for this unit test. These should be fed to the bounce integration as input. - pitch : jnp.ndarray + pitch_inv : jnp.ndarray Shape (P, ). 1/λ values used. @@ -1233,12 +1232,12 @@ def drift_analytic(data): # Exclude singularity not captured by analytic approximation for pitch near # the maximum |B|. (This is captured by the numerical integration). - pitch = get_pitch(np.min(B), np.max(B), 100)[:-1] - k2 = 0.5 * ((1 - B0 / pitch) / (epsilon * B0 / pitch) + 1) + pitch_inv = get_pitch_inv(np.min(B), np.max(B), 100)[:-1] + k2 = 0.5 * ((1 - B0 / pitch_inv) / (epsilon * B0 / pitch_inv) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = ( TestBounce1DQuadrature.elliptic_incomplete(k2) ) - y = np.sqrt(2 * epsilon * B0 / pitch) + y = np.sqrt(2 * epsilon * B0 / pitch_inv) I_0, I_2, I_4, I_6 = map(lambda I: I / y, (I_0, I_2, I_4, I_6)) I_1, I_3, I_5, I_7 = map(lambda I: I * y, (I_1, I_3, I_5, I_7)) @@ -1254,18 +1253,18 @@ def drift_analytic(data): ) / G0 drift_analytic_den = I_0 / G0 drift_analytic = drift_analytic_num / drift_analytic_den - return drift_analytic, cvdrift, gbdrift, pitch + return drift_analytic, cvdrift, gbdrift, pitch_inv @staticmethod def drift_num_integrand(cvdrift, gbdrift, B, pitch): """Integrand of numerator of bounce averaged binormal drift.""" - g = jnp.sqrt(1 - B / pitch) + g = jnp.sqrt(1 - pitch * B) return (cvdrift * g) - (0.5 * g * gbdrift) + (0.5 * gbdrift / g) @staticmethod def drift_den_integrand(B, pitch): """Integrand of denominator of bounce averaged binormal drift.""" - return 1 / jnp.sqrt(1 - B / pitch) + return 1 / jnp.sqrt(1 - pitch * B) @pytest.mark.unit @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) @@ -1318,7 +1317,7 @@ def test_binormal_drift_bounce1d(self): data["shear"] = grid.compress(data["shear"]) # Compute analytic approximation. - drift_analytic, cvdrift, gbdrift, pitch = TestBounce1D.drift_analytic(data) + drift_analytic, cvdrift, gbdrift, pitch_inv = TestBounce1D.drift_analytic(data) # Compute numerical result. bounce = Bounce1D( grid.source_grid, @@ -1330,14 +1329,14 @@ def test_binormal_drift_bounce1d(self): ) f = Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift) drift_numerical_num = bounce.integrate( - pitch=pitch[:, np.newaxis], + pitch_inv=pitch_inv[:, np.newaxis], integrand=TestBounce1D.drift_num_integrand, f=f, num_well=1, check=True, ) drift_numerical_den = bounce.integrate( - pitch=pitch[:, np.newaxis], + pitch_inv=pitch_inv[:, np.newaxis], integrand=TestBounce1D.drift_den_integrand, num_well=1, weight=np.ones(zeta.size), @@ -1350,7 +1349,7 @@ def test_binormal_drift_bounce1d(self): drift_numerical, drift_analytic, atol=5e-3, rtol=5e-2 ) - self._test_bounce_autodiff( + TestBounce1D._test_bounce_autodiff( bounce, TestBounce1D.drift_num_integrand, f=f, @@ -1358,26 +1357,49 @@ def test_binormal_drift_bounce1d(self): ) fig, ax = plt.subplots() - ax.plot(pitch, drift_analytic) - ax.plot(pitch, drift_numerical) + ax.plot(pitch_inv, drift_analytic) + ax.plot(pitch_inv, drift_numerical) return fig @staticmethod def _test_bounce_autodiff(bounce, integrand, **kwargs): """Make sure reverse mode AD works correctly on this algorithm. - We use non-differentiable operations throughout the computations. + Non-differentiable operations (e.g. ``take_mask``) are used in computation. See https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html and https://jax.readthedocs.io/en/latest/faq.html# why-are-gradients-zero-for-functions-based-on-sort-order. + If the AD tool works properly, then these operations should be assigned zero gradients while the gradients wrt parameters of our physics computations - will accumulate correctly. Less mature AD tools may have subtle bugs that - cause the gradients to not accumulate correctly (there's at least a few - GitHub issues that JAX has fixed related to this in past). + accumulate correctly. Less mature AD tools may have subtle bugs that cause + the gradients to not accumulate correctly. (There's more than a few + GitHub issues that JAX has fixed related to this in the past!) This test first confirms the gradients computed by reverse mode AD matches - the analytic approximation of the true gradient. + the analytic approximation of the true gradient. Then we confirm that the + partial gradients wrt the integrand and bounce points are correct. + + Apply the Leibniz integral rule + https://en.wikipedia.org/wiki/Leibniz_integral_rule, with + the label w summing over the magnetic wells: + + ∂_λ ∑_w ∫_ζ₁^ζ₂ f dζ (λ) = ∑_w [ + ∫_ζ₁^ζ₂ (∂f/∂λ)(λ) dζ + + f(λ,ζ₂) (∂ζ₂/∂λ)(λ) + - f(λ,ζ₁) (∂ζ₁/∂λ)(λ) + ] + where (∂ζ₁/∂λ)(λ) = -λ² / (∂|B|/∂ζ|ρ,α)(ζ₁) + (∂ζ₂/∂λ)(λ) = -λ² / (∂|B|/∂ζ|ρ,α)(ζ₂) + + All terms in these expressions are known analytically. + If we wanted, it's simple to check explicitly that AD takes each derivative + correctly because |w| = 1 is constant and our tokamak has symmetry + (∂|B|/∂ζ|ρ,α)(ζ₁) = - (∂|B|/∂ζ|ρ,α)(ζ₂). + + After confirming the left hand side is correct, we just check that derivative + wrt bounce points of the right hand side doesn't vanish due to some zero + gradient issue mentioned above. """ @@ -1388,20 +1410,20 @@ def integrand_grad(*args, **kwargs2): return grad_fun(*args, *kwargs2.values()) def fun1(pitch): - return bounce.integrate(pitch, integrand, check=False, **kwargs).sum() + return bounce.integrate(1 / pitch, integrand, check=False, **kwargs).sum() def fun2(pitch): - return bounce.integrate(pitch, integrand_grad, check=True, **kwargs).sum() + return bounce.integrate( + 1 / pitch, integrand_grad, check=True, **kwargs + ).sum() pitch = 1.0 - # Compare against analytic approximation of true gradient; - # extrapolated from plot of analytic expression. - np.testing.assert_allclose(grad(fun1)(pitch), -650, rtol=1e-3) - # Leibniz rule for differentiating pitch angles in integrand. - # This should differ significantly from above because the bounce points - # are functions of pitch angles. The gradient is much larger because - # the derivative wrt bounce points is not included, which smooths the function. - # This is good to confirm that the AD of bounce points smooths the gradient - # rather than adding artificial noise due to incorrect accumulation of gradient - # from non-differentiable operations in the algorithm. - np.testing.assert_allclose(fun2(pitch), 131750, rtol=1e-1) + # can easily obtain from math or just extrapolate from analytic expression plot + analytic_approximation_of_gradient = 650 + np.testing.assert_allclose( + grad(fun1)(pitch), analytic_approximation_of_gradient, rtol=1e-3 + ) + # It is expected that this is much larger because the integrand is singular + # wrt λ but the boundary derivative: f(λ,ζ₂) (∂ζ₂/∂λ)(λ) - f(λ,ζ₁) (∂ζ₁/∂λ)(λ). + # smooths out because the bounce points ζ₁ and ζ₂ are smooth functions of λ. + np.testing.assert_allclose(fun2(pitch), -131750, rtol=1e-1) From 1c1fa96896cfcb6fe558b11b9352864081302996 Mon Sep 17 00:00:00 2001 From: unalmis Date: Wed, 28 Aug 2024 21:46:40 -0400 Subject: [PATCH 231/241] Make Bounce1D pytree and ioable and ensure eigh_tridiagonal is reverse mode diffable --- desc/integrals/bounce_integral.py | 17 ++++++++--------- desc/integrals/bounce_utils.py | 4 ++-- desc/io/optimizable_io.py | 13 +++++++------ tests/test_integrals.py | 4 ++-- tests/test_quad_utils.py | 10 ++++++++++ 5 files changed, 29 insertions(+), 19 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 19583de69d..3fd3f0b1cc 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -18,10 +18,11 @@ get_quadrature, grad_automorphism_sin, ) +from desc.io import IOAble from desc.utils import setdefault, warnif -class Bounce1D: +class Bounce1D(IOAble): """Computes bounce integrals using one-dimensional local spline methods. The bounce integral is defined as ∫ f(ℓ) dℓ, where @@ -86,6 +87,8 @@ class Bounce1D: Attributes ---------- + required_names : list + Names in ``data_index`` required to compute bounce integrals. _B : jnp.ndarray TODO: Make this (4, M, L, N-1) now that tensor product in rho and alpha required as well after GitHub PR #1214. @@ -97,6 +100,7 @@ class Bounce1D: """ + required_names = ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] plot_ppoly = staticmethod(plot_ppoly) get_pitch_inv = staticmethod(get_pitch_inv) @@ -121,7 +125,7 @@ def __init__( L = ``grid.num_rho``, M = ``grid.num_alpha``, and N = ``grid.num_zeta``. data : dict[str, jnp.ndarray] Data evaluated on ``grid``. - Must include names in ``Bounce1D.required_names()``. + Must include names in ``Bounce1D.required_names``. quad : (jnp.ndarray, jnp.ndarray) Quadrature points xₖ and weights wₖ for the approximate evaluation of an integral ∫₋₁¹ g(x) dx = ∑ₖ wₖ g(xₖ). Default is 32 points. @@ -157,8 +161,8 @@ def __init__( "|B|_z|r,a": data["|B|_z|r,a"] / Bref, # This is already the correct sign. } self._data = { - key: grid.meshgrid_reshape(val, "raz").reshape(-1, grid.num_zeta) - for key, val in data.items() + name: grid.meshgrid_reshape(data[name], "raz").reshape(-1, grid.num_zeta) + for name in Bounce1D.required_names } self._x, self._w = get_quadrature(quad, automorphism) @@ -181,11 +185,6 @@ def __init__( assert self._dB_dz.shape[0] == degree assert self._B.shape[-1] == self._dB_dz.shape[-1] == grid.num_zeta - 1 - @staticmethod - def required_names(): - """Return names in ``data_index`` required to compute bounce integrals.""" - return ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] - @staticmethod def reshape_data(grid, *arys): """Reshape arrays for acceptable input to ``integrate``. diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index e9a9cff613..90d7a30273 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -319,7 +319,7 @@ def bounce_quadrature( These functions should be arguments to the callable ``integrand``. data : dict[str, jnp.ndarray] Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. - Must include names in ``Bounce1D.required_names()``. + Must include names in ``Bounce1D.required_names``. knots : jnp.ndarray Shape (knots.size, ). Unique ζ coordinates where the arrays in ``data`` and ``f`` were evaluated. @@ -420,7 +420,7 @@ def _interpolate_and_integrate( Quadrature points in ζ coordinates. data : dict[str, jnp.ndarray] Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. - Must include names in ``Bounce1D.required_names()``. + Must include names in ``Bounce1D.required_names``. Returns ------- diff --git a/desc/io/optimizable_io.py b/desc/io/optimizable_io.py index 554cdac070..e15a21756e 100644 --- a/desc/io/optimizable_io.py +++ b/desc/io/optimizable_io.py @@ -169,16 +169,17 @@ class IOAble(ABC, metaclass=_CombinedMeta): """Abstract Base Class for savable and loadable objects. Objects inheriting from this class can be saved and loaded via hdf5 or pickle. - To save properly, each object should have an attribute `_io_attrs_` which + To save properly, each object should have an attribute ``_io_attrs_`` which is a list of strings of the object attributes or properties that should be saved and loaded. - For saved objects to be loaded correctly, the __init__ method of any custom - types being saved should only assign attributes that are listed in `_io_attrs_`. + For saved objects to be loaded correctly, the ``__init__`` method of any custom + types being saved should only assign attributes that are listed in ``_io_attrs_``. Other attributes or other initialization should be done in a separate - `set_up()` method that can be called during __init__. The loading process - will involve creating an empty object, bypassing init, then setting any `_io_attrs_` - of the object, then calling `_set_up()` without any arguments, if it exists. + ``set_up()`` method that can be called during ``__init__``. The loading process + will involve creating an empty object, bypassing init, then setting any + ``_io_attrs_`` of the object, then calling ``_set_up()`` without any arguments, + if it exists. """ diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 08ffeb0042..dd14cc0dcd 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1066,7 +1066,7 @@ def test_integrate_checks(self): ) # 4. Compute input data. data = eq.compute( - Bounce1D.required_names() + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid + Bounce1D.required_names + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) # 5. Make the bounce integration operator. bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) @@ -1292,7 +1292,7 @@ def test_binormal_drift_bounce1d(self): iota=iota, ) data = eq.compute( - Bounce1D.required_names() + Bounce1D.required_names + [ "cvdrift", "gbdrift", diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index 07dfcd85e6..5a7c3d00e7 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -2,7 +2,9 @@ import numpy as np import pytest +from jax import grad +from desc.backend import jnp from desc.integrals.quad_utils import ( automorphism_arcsin, automorphism_sin, @@ -91,3 +93,11 @@ def test_leggauss_lobatto(): np.testing.assert_allclose(x, [-1, -np.sqrt(3 / 7), 0, np.sqrt(3 / 7), 1]) np.testing.assert_allclose(w, [1 / 10, 49 / 90, 32 / 45, 49 / 90, 1 / 10]) np.testing.assert_allclose(leggauss_lob(x.size - 2, True), (x[1:-1], w[1:-1])) + + def fun(a): + x, w = leggauss_lob(a.size) + return jnp.dot(x * a, w) + + # make sure differentiable + # https://github.com/PlasmaControl/DESC/pull/854#discussion_r1733323161 + assert np.isfinite(grad(fun)(jnp.arange(10) * np.pi)).all() From 45b7c7a26a5c1466a6b8898ab95370c4d0996f36 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 29 Aug 2024 17:50:25 -0400 Subject: [PATCH 232/241] Use more efficient vectorization --- desc/integrals/bounce_integral.py | 115 ++++---- desc/integrals/bounce_utils.py | 441 ++++++++++++++++-------------- desc/integrals/interp_utils.py | 173 +++++++----- desc/utils.py | 29 +- tests/test_integrals.py | 69 ++--- tests/test_interp_utils.py | 95 ++++--- 6 files changed, 492 insertions(+), 430 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 3fd3f0b1cc..fbe2b5f03a 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,5 +1,6 @@ """Methods for computing bounce integrals (singular or otherwise).""" +import numpy as np from interpax import CubicHermiteSpline from orthax.legendre import leggauss @@ -19,7 +20,7 @@ grad_automorphism_sin, ) from desc.io import IOAble -from desc.utils import setdefault, warnif +from desc.utils import atleast_nd, setdefault, warnif class Bounce1D(IOAble): @@ -64,7 +65,9 @@ class Bounce1D(IOAble): cannot support reconstruction of the function near the origin. As the functions of interest do not vanish at infinity, pseudo-spectral techniques are not used. Instead, function approximation is done with local splines. - This is useful if one can efficiently obtain data along field lines. + This is useful if one can efficiently obtain data along field lines and + most efficient if the number of toroidal transit to follow a field line is + not too large. After obtaining the bounce points, the supplied quadrature is performed. By default, this is a Gauss quadrature after removing the singularity. @@ -74,13 +77,6 @@ class Bounce1D(IOAble): -------- Bounce2D : Uses two-dimensional pseudo-spectral techniques for the same task. - Warnings - -------- - The supplied data must be from a Clebsch coordinate (ρ, α, ζ) tensor-product grid. - The ζ coordinates (the unique values prior to taking the tensor-product) must be - strictly increasing and preferably uniformly spaced. These are used as knots to - construct splines; a reference knot density is 100 knots per toroidal transit. - Examples -------- See ``tests/test_integrals.py::TestBounce1D::test_integrate_checks``. @@ -89,14 +85,14 @@ class Bounce1D(IOAble): ---------- required_names : list Names in ``data_index`` required to compute bounce integrals. - _B : jnp.ndarray - TODO: Make this (4, M, L, N-1) now that tensor product in rho and alpha - required as well after GitHub PR #1214. - Shape (4, L * M, N - 1). + B : jnp.ndarray + Shape (M, L, N - 1, B.shape[-1]). Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. For a polynomial + given by ∑ᵢⁿ cᵢ xⁱ, coefficient cᵢ is stored at ``B[...,n-i]``. + Third axis enumerates the polynomials that compose a particular spline. + Second axis enumerates flux surfaces. + First axis enumerates field lines of a particular flux surface. """ @@ -121,7 +117,10 @@ def __init__( ---------- grid : Grid Clebsch coordinate (ρ, α, ζ) tensor-product grid. - Note that below shape notation defines + The ζ coordinates (the unique values prior to taking the tensor-product) + must be strictly increasing and preferably uniformly spaced. These are used + as knots to construct splines. A reference knot density is 100 knots per + toroidal transit. Note that below shape notation defines L = ``grid.num_rho``, M = ``grid.num_alpha``, and N = ``grid.num_zeta``. data : dict[str, jnp.ndarray] Data evaluated on ``grid``. @@ -160,15 +159,12 @@ def __init__( "|B|": data["|B|"] / Bref, "|B|_z|r,a": data["|B|_z|r,a"] / Bref, # This is already the correct sign. } - self._data = { - name: grid.meshgrid_reshape(data[name], "raz").reshape(-1, grid.num_zeta) - for name in Bounce1D.required_names - } + self._data = dict(zip(data.keys(), Bounce1D.reshape_data(grid, *data.values()))) self._x, self._w = get_quadrature(quad, automorphism) # Compute local splines. self._zeta = grid.compress(grid.nodes[:, 2], surface_label="zeta") - self._B = jnp.moveaxis( + self.B = jnp.moveaxis( CubicHermiteSpline( x=self._zeta, y=self._data["|B|"], @@ -176,14 +172,12 @@ def __init__( axis=-1, check=check, ).c, - source=1, - destination=-1, + source=(0, 1), + destination=(-1, -2), ) - self._dB_dz = polyder_vec(self._B) - degree = 3 - assert self._B.shape[0] == degree + 1 - assert self._dB_dz.shape[0] == degree - assert self._B.shape[-1] == self._dB_dz.shape[-1] == grid.num_zeta - 1 + assert self.B.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 4) + self._dB_dz = polyder_vec(self.B) + assert self._dB_dz.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 3) @staticmethod def reshape_data(grid, *arys): @@ -202,20 +196,24 @@ def reshape_data(grid, *arys): List of reshaped data which may be given to ``integrate``. """ - f = [grid.meshgrid_reshape(d, "raz").reshape(-1, grid.num_zeta) for d in arys] + f = [grid.meshgrid_reshape(d, "arz") for d in arys] return f def points(self, pitch_inv, num_well=None): """Compute bounce points. + Notes + ----- + Only the dimensions following L are required. The leading axes are batch axes. + Parameters ---------- pitch_inv : jnp.ndarray - Shape must broadcast with (P, L * M). + Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,ρ]`` where in the latter the labels - (ρ,α) are interpreted as the index into the last axis that corresponds to - that field line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + are interpreted as the index into the last axis that corresponds to + that field line. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -230,17 +228,17 @@ def points(self, pitch_inv, num_well=None): Returns ------- z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, L * M, num_well). + Shape (P, M, L, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. If there were less than ``num_wells`` wells detected along a field line, - then the last axis, which enumerates bounce points for a particular field + then the last axis, which enumerates bounce points for a particular field line and pitch, is padded with zero. """ - return bounce_points(pitch_inv, self._zeta, self._B, self._dB_dz, num_well) + return bounce_points(pitch_inv, self._zeta, self.B, self._dB_dz, num_well) def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): """Check that bounce points are computed correctly. @@ -248,16 +246,16 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): Parameters ---------- z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, L * M, num_well). + Shape (P, M, L, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. pitch_inv : jnp.ndarray - Shape must broadcast with (P, L * M). + Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels - (ρ,α) are interpreted as the index into the last axis that corresponds to - that field line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + are interpreted as the index into the last axis that corresponds to + that field line. plot : bool Whether to plot stuff. kwargs @@ -272,9 +270,9 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): return _check_bounce_points( z1=z1, z2=z2, - pitch_inv=jnp.atleast_2d(pitch_inv), + pitch_inv=atleast_nd(3, pitch_inv), knots=self._zeta, - B=self._B, + B=self.B, plot=plot, **kwargs, ) @@ -295,14 +293,18 @@ def integrate( Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line for every λ value in ``pitch_inv``. + Notes + ----- + Only the dimensions following L are required. The leading axes are batch axes. + Parameters ---------- pitch_inv : jnp.ndarray - Shape must broadcast with (P, L * M). + Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels - (ρ,α) are interpreted as the index into the last axis that corresponds to - that field line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + are interpreted as the index into the last axis that corresponds to + that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -310,13 +312,13 @@ def integrate( ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list[jnp.ndarray] - Shape (L * M, N). + Shape (M, L, N). Real scalar-valued functions evaluated on the ``grid`` supplied to construct this object. These functions should be arguments to the callable ``integrand``. Use the method ``self.reshape_data`` to reshape the data into the expected shape. weight : jnp.ndarray - Shape must broadcast with (L * M, N). + Shape (M, L, N). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` interpolated to the deepest point in the magnetic well. Use the method @@ -343,12 +345,12 @@ def integrate( Returns ------- result : jnp.ndarray - Shape (P, L*M, num_well). - First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. + Shape (P, M, L, num_well). + Last axis enumerates the bounce integrals for a given pitch, field line, + and flux surface. """ - pitch_inv = jnp.atleast_2d(pitch_inv) + pitch_inv = atleast_nd(3, pitch_inv) z1, z2 = self.points(pitch_inv, num_well) result = bounce_quadrature( x=self._x, @@ -370,9 +372,10 @@ def integrate( z1, z2, self._zeta, - self._B, + self.B, self._dB_dz, method, ) - assert result.shape[-1] == setdefault(num_well, (self._zeta.size - 1) * 3) + assert result.shape[0] == pitch_inv.shape[0] + assert result.shape[-1] == setdefault(num_well, np.prod(self._dB_dz.shape[-2:])) return result diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 90d7a30273..7b2984c579 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -1,5 +1,6 @@ """Utilities and functional programming interface for bounce integrals.""" +import numpy as np from interpax import PPoly from matplotlib import pyplot as plt @@ -9,7 +10,7 @@ from desc.integrals.interp_utils import ( interp1d_Hermite_vec, interp1d_vec, - poly_root, + polyroot_vec, polyval_vec, ) from desc.integrals.quad_utils import ( @@ -17,7 +18,16 @@ composite_linspace, grad_bijection_from_disc, ) -from desc.utils import atleast_3d_mid, errorif, setdefault, take_mask +from desc.utils import ( + atleast_2d_end, + atleast_3d_mid, + atleast_nd, + errorif, + flatten_matrix, + is_broadcastable, + setdefault, + take_mask, +) def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): @@ -26,8 +36,10 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): Parameters ---------- min_B : jnp.ndarray + Shape (..., L). Minimum |B| value. max_B : jnp.ndarray + Shape (..., L). Maximum |B| value. num : int Number of values, not including endpoints. @@ -38,7 +50,7 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): Returns ------- pitch_inv : jnp.ndarray - Shape (num + 2, *min_B.shape). + Shape (num + 2, ..., L) with ndim > 2. 1/λ values. """ @@ -47,8 +59,9 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B # Samples should be uniformly spaced in |B| and not λ (GitHub issue #1228). - pitch_inv = composite_linspace(jnp.stack([min_B, max_B]), num) - assert pitch_inv.shape == (num + 2, *min_B.shape) + pitch_inv = atleast_3d_mid( + atleast_2d_end(composite_linspace(jnp.stack([min_B, max_B]), num)) + ) return pitch_inv @@ -58,50 +71,45 @@ def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): Parameters ---------- knots : jnp.ndarray - Shape (knots.size, ). + Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (g.shape[0], S, knots.size - 1). + Shape (M, L, N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (g.shape[0] - 1, *g.shape[1:]). + Shape (M, L, N - 1, g.shape[-1] - 1). Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. pitch_inv : jnp.ndarray - Shape must broadcast with (P, S). + Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels - (ρ,α) are interpreted as the index into the last axis that corresponds to - that field line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[:,α,ρ]`` where in the latter the labels + are interpreted as the index into the last axis that corresponds to + that field line. """ errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") errorif( - g.shape[-1] != (knots.size - 1), + g.shape[-2] != (knots.size - 1), msg=( - "Last axis does not enumerate polynomials of spline. " + "Second to last axis does not enumerate polynomials of spline. " f"Spline shape {g.shape}. Knots shape {knots.shape}." ), ) errorif( - g.ndim > 3 - or dg_dz.ndim > 3 - or (g.shape[0] - 1) != dg_dz.shape[0] - or g.shape[1:] != dg_dz.shape[1:], + not (g.ndim == dg_dz.ndim < 5) + or g.shape != (*dg_dz.shape[:-1], dg_dz.shape[-1] + 1), msg=f"Invalid shape {g.shape} for spline and derivative {dg_dz.shape}.", ) - # Add axis which enumerates field lines if necessary. - g, dg_dz = atleast_3d_mid(g, dg_dz) + g = atleast_nd(4, g) + dg_dz = atleast_nd(4, dg_dz) if pitch_inv is not None: - pitch_inv = jnp.atleast_2d(pitch_inv) + pitch_inv = atleast_nd(3, pitch_inv) errorif( - pitch_inv.ndim != 2 - or not (pitch_inv.shape[-1] == 1 or pitch_inv.shape[-1] == g.shape[1]), + pitch_inv.ndim > 3 or not is_broadcastable(pitch_inv.shape, g.shape[:2]), msg=f"Invalid shape {pitch_inv.shape} for pitch angles.", ) return g, dg_dz, pitch_inv @@ -112,29 +120,31 @@ def bounce_points( ): """Compute the bounce points given spline of |B| and pitch λ. + Notes + ----- + Only the dimensions following L are required. The leading axes are batch axes. + Parameters ---------- pitch_inv : jnp.ndarray - Shape must broadcast with (P, S). + Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels - (ρ,α) are interpreted as the index into the last axis that corresponds to - that field line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + are interpreted as the index into the last axis that corresponds to + that field line. knots : jnp.ndarray - Shape (knots.size, ). + Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray - Shape (B.shape[0], S, knots.size - 1). + Shape (M, L, N - 1, B.shape[-1]). Polynomial coefficients of the spline of |B| in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. dB_dz : jnp.ndarray - Shape (B.shape[0] - 1, *B.shape[1:]). + Shape (M, L, N - 1, B.shape[-1] - 1). Polynomial coefficients of the spline of (∂|B|/∂ζ)|(ρ,α) in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -155,7 +165,7 @@ def bounce_points( Returns ------- z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, S, num_well). + Shape (P, M, L, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. @@ -166,9 +176,7 @@ def bounce_points( """ B, dB_dz, pitch_inv = _check_spline_shape(knots, B, dB_dz, pitch_inv) - P, S, degree = pitch_inv.shape[0], B.shape[1], B.shape[0] - 1 - # Intersection points in local power basis. - intersect = poly_root( + intersect = polyroot_vec( c=B, k=pitch_inv[..., jnp.newaxis], a_min=jnp.array([0.0]), @@ -177,22 +185,28 @@ def bounce_points( sentinel=-1.0, distinct=True, ) - assert intersect.shape == (P, S, knots.size - 1, degree) + assert intersect.shape == ( + pitch_inv.shape[0], + B.shape[0], + B.shape[1], + knots.size - 1, + B.shape[-1] - 1, + ) # Reshape so that last axis enumerates intersects of a pitch along a field line. - dB_dz_sign = jnp.sign( - polyval_vec(x=intersect, c=dB_dz[..., jnp.newaxis]).reshape(P, S, -1) + dB_sign = flatten_matrix( + jnp.sign(polyval_vec(x=intersect, c=dB_dz[..., jnp.newaxis, :])) ) # Only consider intersect if it is within knots that bound that polynomial. - is_intersect = intersect.reshape(P, S, -1) >= 0 + is_intersect = flatten_matrix(intersect) >= 0 # Following discussion on page 3 and 5 of https://doi.org/10.1063/1.873749, # we ignore the bounce points of particles only assigned to a class that are # trapped outside this snapshot of the field line. - is_z1 = (dB_dz_sign <= 0) & is_intersect - is_z2 = (dB_dz_sign >= 0) & _in_epigraph_and(is_intersect, dB_dz_sign) + is_z1 = (dB_sign <= 0) & is_intersect + is_z2 = (dB_sign >= 0) & _in_epigraph_and(is_intersect, dB_sign) # Transform out of local power basis expansion. - intersect = (intersect + knots[:-1, jnp.newaxis]).reshape(P, S, -1) + intersect = flatten_matrix(intersect + knots[:-1, jnp.newaxis]) # New versions of JAX only like static sentinels. sentinel = -10000000.0 # instead of knots[0] - 1 z1 = take_mask(intersect, is_z1, size=num_well, fill_value=sentinel) @@ -230,41 +244,42 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): err_1 = jnp.any(z1 > z2, axis=-1) err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) - P, S, _ = z1.shape - for s in range(S): - Bs = PPoly(B[:, s], knots) - for p in range(P): - Bs_midpoint = Bs((z1[p, s] + z2[p, s]) / 2) - err_3 = jnp.any(Bs_midpoint > pitch_inv[p, s] + eps) - if not (err_1[p, s] or err_2[p, s] or err_3): + for ml in np.ndindex(B.shape[:-2]): + Bs = PPoly(B[ml].T, knots) + for p in range(pitch_inv.shape[0]): + idx = (p, *ml) + Bs_midpoint = Bs((z1[idx] + z2[idx]) / 2) + err_3 = jnp.any(Bs_midpoint > pitch_inv[idx] + eps) + if not (err_1[idx] or err_2[idx] or err_3): continue - _z1 = z1[p, s][mask[p, s]] - _z2 = z2[p, s][mask[p, s]] + _z1 = z1[idx][mask[idx]] + _z2 = z2[idx][mask[idx]] if plot: plot_ppoly( ppoly=Bs, z1=_z1, z2=_z2, - k=pitch_inv[p, s], + k=pitch_inv[idx], **kwargs, ) print(" z1 | z2") print(jnp.column_stack([_z1, _z2])) - assert not err_1[p, s], "Intersects have an inversion.\n" - assert not err_2[p, s], "Detected discontinuity.\n" + assert not err_1[idx], "Intersects have an inversion.\n" + assert not err_2[idx], "Detected discontinuity.\n" assert not err_3, ( - f"Detected |B| = {Bs_midpoint[mask[p, s]]} > {pitch_inv[p, s] + eps} " + f"Detected |B| = {Bs_midpoint[mask[idx]]} > {pitch_inv[idx] + eps} " "= 1/λ in well, implying the straight line path between " "bounce points is in hypograph(|B|). Use more knots.\n" ) if plot: + idx = (slice(None), *ml) plots.append( plot_ppoly( ppoly=Bs, - z1=z1[:, s], - z2=z2[:, s], - k=pitch_inv[:, s], + z1=z1[idx], + z2=z2[idx], + k=pitch_inv[idx], **kwargs, ) ) @@ -297,16 +312,16 @@ def bounce_quadrature( Shape (w.size, ). Quadrature weights. z1, z2 : jnp.ndarray - Shape (P, S, num_well). + Shape (P, M, L, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. pitch_inv : jnp.ndarray - Shape must broadcast with (P, S). + Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,(ρ,α)]`` where in the latter the labels - (ρ,α) are interpreted as the index into the last axis that corresponds to - that field line. If two-dimensional, the first axis is the batch axis. + specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + are interpreted as the index into the last axis that corresponds to + that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -314,14 +329,15 @@ def bounce_quadrature( ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list[jnp.ndarray] - Shape (S, knots.size). + Shape (M, L, N). Real scalar-valued functions evaluated on the ``knots``. These functions should be arguments to the callable ``integrand``. data : dict[str, jnp.ndarray] - Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. + Shape (M, L, N). + Required data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. Must include names in ``Bounce1D.required_names``. knots : jnp.ndarray - Shape (knots.size, ). + Shape (N, ). Unique ζ coordinates where the arrays in ``data`` and ``f`` were evaluated. method : str Method of interpolation. @@ -339,17 +355,17 @@ def bounce_quadrature( Returns ------- result : jnp.ndarray - Shape (P, S, num_well). - Quadrature for every pitch. + Shape (P, M, L, num_well). First axis enumerates pitch values. Second axis enumerates the field lines. - Last axis enumerates the bounce integrals. + Third axis enumerates the flux surfaces. Last axis enumerates the bounce + integrals. """ - errorif(z1.ndim != 3 or z1.shape != z2.shape) errorif(x.ndim != 1 or x.shape != w.shape) - pitch_inv = jnp.atleast_2d(pitch_inv) + errorif(z1.ndim != 4 or z1.shape != z2.shape) + errorif(pitch_inv.ndim != 3) if not isinstance(f, (list, tuple)): - f = [f] + f = list(f) # Integrate and complete the change of variable. if batch: @@ -366,8 +382,6 @@ def bounce_quadrature( plot=plot, ) else: - f = list(f) - # TODO: Use batched vmap. def loop(z): z1, z2 = z @@ -392,7 +406,7 @@ def loop(z): ) result = result * grad_bijection_from_disc(z1, z2) - assert result.shape == (pitch_inv.shape[0], data["|B|"].shape[0], z1.shape[-1]) + assert result.shape == z1.shape return result @@ -416,7 +430,7 @@ def _interpolate_and_integrate( Shape (w.size, ). Quadrature weights. Q : jnp.ndarray - Shape (P, S, Q.shape[2], w.size). + Shape (P, M, L, Q.shape[-2], w.size). Quadrature points in ζ coordinates. data : dict[str, jnp.ndarray] Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. @@ -429,58 +443,50 @@ def _interpolate_and_integrate( Quadrature for every pitch. """ - assert pitch_inv.ndim == 2 - assert w.ndim == knots.ndim == 1 - assert 3 <= Q.ndim <= 4 and Q.shape[:2] == ( - pitch_inv.shape[0], - data["|B|"].shape[0], - ) - assert Q.shape[-1] == w.size - assert knots.size == data["|B|"].shape[-1] - assert ( - data["B^zeta"].shape - == data["B^zeta_z|r,a"].shape - == data["|B|"].shape - == data["|B|_z|r,a"].shape - ) + assert w.ndim == 1 + assert 3 < Q.ndim < 6 and Q.shape[0] == pitch_inv.shape[0] and Q.shape[-1] == w.size + assert data["|B|"].shape[-1] == knots.size - pitch_inv = jnp.expand_dims(pitch_inv, axis=(2, 3) if (Q.ndim == 4) else 2) + if Q.ndim == 5: + pitch_inv = pitch_inv[..., jnp.newaxis] shape = Q.shape - Q = Q.reshape(Q.shape[0], Q.shape[1], -1) + Q = flatten_matrix(Q) b_sup_z = interp1d_Hermite_vec( Q, knots, data["B^zeta"] / data["|B|"], data["B^zeta_z|r,a"] / data["|B|"] - data["B^zeta"] * data["|B|_z|r,a"] / data["|B|"] ** 2, - ).reshape(shape) - B = interp1d_Hermite_vec(Q, knots, data["|B|"], data["|B|_z|r,a"]).reshape(shape) + ) + B = interp1d_Hermite_vec(Q, knots, data["|B|"], data["|B|_z|r,a"]) # Spline each function separately so that operations in the integrand # that do not preserve smoothness can be captured. - f = [interp1d_vec(Q, knots, f_i, method=method).reshape(shape) for f_i in f] - result = jnp.dot(integrand(*f, B=B, pitch=1 / pitch_inv) / b_sup_z, w) - + f = [interp1d_vec(Q, knots, f_i, method=method) for f_i in f] + result = jnp.dot( + (integrand(*f, B=B, pitch=1 / pitch_inv) / b_sup_z).reshape(shape), + w, + ) if check: - _check_interp(Q.reshape(shape), f, b_sup_z, B, data["|B|_z|r,a"], result, plot) + _check_interp(shape, Q, f, b_sup_z, B, result, plot) return result -def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): +def _check_interp(shape, Q, f, b_sup_z, B, result, plot): """Check for floating point errors. Parameters ---------- + shape : tuple + (P, M, L, Q.shape[-2], w.size). Q : jnp.ndarray Quadrature points in ζ coordinates. - f : list of jnp.ndarray + f : list[jnp.ndarray] Arguments to the integrand, interpolated to Q. b_sup_z : jnp.ndarray Contravariant toroidal component of magnetic field, interpolated to Q. B : jnp.ndarray Norm of magnetic field, interpolated to Q. - B_z_ra : jnp.ndarray - Norm of magnetic field derivative, (∂|B|/∂ζ)|(ρ,α). result : jnp.ndarray Output of ``_interpolate_and_integrate``. plot : bool @@ -488,106 +494,105 @@ def _check_interp(Q, f, b_sup_z, B, B_z_ra, result, plot): """ assert jnp.isfinite(Q).all(), "NaN interpolation point." + msg = "|B| has vanished, violating the hairy ball theorem." + assert not jnp.isclose(B, 0).any(), msg + assert not jnp.isclose(b_sup_z, 0).any(), msg + # Integrals that we should be computing. - marked = jnp.any(Q != 0.0, axis=-1) + marked = jnp.any(Q.reshape(shape) != 0.0, axis=-1) goal = marked.sum() msg = "Interpolation failed." - assert jnp.isfinite(B_z_ra).all(), msg - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(b_sup_z, axis=-1))), msg - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(B, axis=-1))), msg + assert goal == (marked & jnp.isfinite(b_sup_z).reshape(shape).all(axis=-1)).sum() + assert goal == (marked & jnp.isfinite(B).reshape(shape).all(axis=-1)).sum() for f_i in f: - assert goal == jnp.sum(marked & jnp.isfinite(jnp.sum(f_i, axis=-1))), msg - - msg = "|B| has vanished, violating the hairy ball theorem." - assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(b_sup_z, 0).any(), msg + assert goal == (marked & jnp.isfinite(f_i).reshape(shape).all(axis=-1)).sum() # Number of those integrals that were computed. - actual = jnp.sum(marked & jnp.isfinite(result)) + actual = (marked & jnp.isfinite(result)).sum() assert goal == actual, ( f"Lost {goal - actual} integrals from NaN generation in the integrand. This " "is caused by floating point error, usually due to a poor quadrature choice." ) if plot: - _plot_check_interp(Q, B, name=r"$\vert B \vert$") - _plot_check_interp(Q, b_sup_z, name=r"$ (B / \vert B \vert) \cdot e^{\zeta}$") + Q = Q.reshape(shape) + _plot_check_interp(Q, B.reshape(shape), name=r"$\vert B \vert$") + _plot_check_interp( + Q, b_sup_z.reshape(shape), name=r"$ (B / \vert B \vert) \cdot e^{\zeta}$" + ) def _plot_check_interp(Q, V, name=""): - """Plot V[λ, (ρ, α), (ζ₁, ζ₂)](Q).""" - for p in range(Q.shape[0]): - for s in range(Q.shape[1]): - marked = jnp.nonzero(jnp.any(Q != 0.0, axis=-1))[0] - if marked.size == 0: - continue - fig, ax = plt.subplots() - ax.set_xlabel(r"$\zeta$") - ax.set_ylabel(name) - ax.set_title( - f"Interpolation of {name} to quadrature points. Index {p},{s}." - ) - for i in marked: - ax.plot(Q[p, s, i], V[p, s, i], marker="o") - fig.text( - 0.01, - 0.01, - f"Each color specifies the set of points and values (ζ, {name}(ζ)) " - "used to evaluate an integral.", - ) - plt.tight_layout() - plt.show() + """Plot V[λ, α, ρ, (ζ₁, ζ₂)](Q).""" + for idx in np.ndindex(Q.shape[:-2]): + marked = jnp.nonzero(jnp.any(Q[idx] != 0.0, axis=-1))[0] + if marked.size == 0: + continue + fig, ax = plt.subplots() + ax.set_xlabel(r"$\zeta$") + ax.set_ylabel(name) + ax.set_title(f"Interpolation of {name} to quadrature points. Index {idx}.") + for i in marked: + ax.plot(Q[(*idx, i)], V[(*idx, i)], marker="o") + fig.text( + 0.01, + 0.01, + "Each color specifies particular bounce integral with the function " + f"{name} interpolated to the quadrature points.", + ) + plt.tight_layout() + plt.show() def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): - """Return extrema (ζ*, g(ζ*)). + """Return extrema (z*, g(z*)). + + Notes + ----- + Only the dimensions following L are required. The leading axes are batch axes. Parameters ---------- knots : jnp.ndarray - Shape (knots.size, ). + Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (g.shape[0], S, knots.size - 1). + Shape (M, L, N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (g.shape[0] - 1, *g.shape[1:]). - Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Shape (M, L, N - 1, g.shape[-1] - 1). + Polynomial coefficients of the spline of ∂g/∂z in local power basis. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. sentinel : float Value with which to pad array to return fixed shape. Returns ------- ext, g_ext : jnp.ndarray - Shape (S, (knots.size - 1) * (degree - 1)). - First array enumerates ζ*. Second array enumerates g(ζ*) + Shape (M, L, (N - 1) * (g.shape[-1] - 2)). + First array enumerates z*. Second array enumerates g(z*) Sorting order of extrema is arbitrary. """ g, dg_dz, _ = _check_spline_shape(knots, g, dg_dz) - S, degree = g.shape[1], g.shape[0] - 1 - ext = poly_root( + ext = polyroot_vec( c=dg_dz, a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sentinel=sentinel ) - assert ext.shape == (S, knots.size - 1, degree - 1) - g_ext = polyval_vec(x=ext, c=g[..., jnp.newaxis]).reshape(S, -1) + g_ext = flatten_matrix(polyval_vec(x=ext, c=g[..., jnp.newaxis, :])) # Transform out of local power basis expansion. - ext = (ext + knots[:-1, jnp.newaxis]).reshape(S, -1) + ext = flatten_matrix(ext + knots[:-1, jnp.newaxis]) return ext, g_ext def _where_for_argmin(z1, z2, ext, g_ext, upper_sentinel): - assert z1.shape[1] == z2.shape[1] == ext.shape[0] == g_ext.shape[0] + assert z1.shape[1:3] == z2.shape[1:3] == ext.shape[:2] == g_ext.shape[:2] return jnp.where( - (z1[..., jnp.newaxis] < ext[:, jnp.newaxis]) - & (ext[:, jnp.newaxis] < z2[..., jnp.newaxis]), - g_ext[:, jnp.newaxis], + (z1[..., jnp.newaxis] < ext[:, :, jnp.newaxis]) + & (ext[:, :, jnp.newaxis] < z2[..., jnp.newaxis]), + g_ext[:, :, jnp.newaxis], upper_sentinel, ) @@ -599,31 +604,31 @@ def interp_to_argmin( Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E g(ζ). Returns mean_A h(ζ). + Notes + ----- + Only the dimensions following L are required. The leading axes are batch axes. + Parameters ---------- h : jnp.ndarray - Shape must broadcast with (S, knots.size). + Shape (M, L, N). Values evaluated on ``knots`` to interpolate. z1, z2 : jnp.ndarray - Shape (P, S, num_well). - ζ coordinates of bounce points. The points are ordered and grouped such - that the straight line path between ``z1`` and ``z2`` resides in the - epigraph of g. + Shape (P, M, L, num_well). + Boundaries to detect argmin between. knots : jnp.ndarray - Shape (knots.size, ). - ζ coordinates of spline knots. Must be strictly increasing. + Shape (N, ). + z coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (g.shape[0], S, knots.size - 1). + Shape (M, L, N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (g.shape[0] - 1, *g.shape[1:]). - Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Shape (M, L, N - 1, g.shape[-1] - 1). + Polynomial coefficients of the spline of ∂g/∂z in local power basis. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. method : str Method of interpolation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. @@ -646,22 +651,25 @@ def interp_to_argmin( Returns ------- h : jnp.ndarray - Shape (P, S, num_well). + Shape (P, M, L, num_well). mean_A h(ζ) """ - ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) + z1 = atleast_nd(4, z1) + z2 = atleast_nd(4, z2) + ext, g_ext = _get_extrema(knots, g, dg_dz, sentinel=0) # JAX softmax(x) does the proper shift to compute softmax(x - max(x)), but it's # still not a good idea to compute over a large length scale, so we warn in # docstring to choose upper sentinel properly. argmin = softargmax( - beta * _where_for_argmin(z1, z2, ext, g, upper_sentinel), axis=-1 + beta * _where_for_argmin(z1, z2, ext, g_ext, upper_sentinel), + axis=-1, ) h = jnp.linalg.vecdot( argmin, - interp1d_vec(ext, knots, jnp.atleast_2d(h), method=method)[:, jnp.newaxis], + interp1d_vec(ext, knots, h, method=method)[:, :, jnp.newaxis], ) - assert h.shape == z1.shape + assert h.shape == z1.shape or h.shape == z2.shape return h @@ -676,31 +684,31 @@ def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): Accomplishes the same task, but handles the case of non-unique global minima more correctly. It is also more efficient if P >> 1. + Notes + ----- + Only the dimensions following L are required. The leading axes are batch axes. + Parameters ---------- h : jnp.ndarray - Shape must broadcast with (S, knots.size). + Shape (M, L, N). Values evaluated on ``knots`` to interpolate. z1, z2 : jnp.ndarray - Shape (P, S, num_well). - ζ coordinates of bounce points. The points are ordered and grouped such - that the straight line path between ``z1`` and ``z2`` resides in the - epigraph of g. + Shape (P, M, L, num_well). + Boundaries to detect argmin between. knots : jnp.ndarray - Shape (knots.size, ). - ζ coordinates of spline knots. Must be strictly increasing. + Shape (N, ). + z coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (g.shape[0], S, knots.size - 1). + Shape (M, L, N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (g.shape[0] - 1, *g.shape[1:]). - Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. - First axis enumerates the coefficients of power series. Second axis - enumerates the splines. Last axis enumerates the polynomials that - compose a particular spline. + Shape (M, L, N - 1, g.shape[-1] - 1). + Polynomial coefficients of the spline of ∂g/∂z in local power basis. + Last axis enumerates the coefficients of power series. Second to + last axis enumerates the polynomials that compose a particular spline. method : str Method of interpolation. See https://interpax.readthedocs.io/en/latest/_api/interpax.interp1d.html. @@ -709,19 +717,28 @@ def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): Returns ------- h : jnp.ndarray - Shape (P, S, num_well). + Shape (P, M, L, num_well). h(A) """ - ext, g = _get_extrema(knots, g, dg_dz, sentinel=0) + z1 = atleast_nd(4, z1) + z2 = atleast_nd(4, z2) + ext, g_ext = _get_extrema(knots, g, dg_dz, sentinel=0) # We can use the non-differentiable max because we actually want the gradients # to accumulate through only the minimum since we are differentiating how our # physics objective changes wrt equilibrium perturbations not wrt which of the # extrema get interpolated to. - argmin = jnp.argmin(_where_for_argmin(z1, z2, ext, g, jnp.max(g) + 1), axis=-1) - A = jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1) - h = interp1d_vec(A, knots, jnp.atleast_2d(h), method=method) - assert h.shape == z1.shape + argmin = jnp.argmin( + _where_for_argmin(z1, z2, ext, g_ext, jnp.max(g_ext) + 1), + axis=-1, + ) + h = interp1d_vec( + jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1), + knots, + h, + method=method, + ) + assert h.shape == z1.shape or h.shape == z2.shape return h diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index b5977c4134..c1b924fc6b 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -1,4 +1,11 @@ -"""Fast interpolation utilities.""" +"""Fast interpolation utilities. + +Notes +----- +These polynomial utilities are chosen for performance on gpu when among +methods that have the best (asymptotic) algorithmic complexity. For example, +we prefer not to use Horner's method. +""" from functools import partial @@ -7,27 +14,37 @@ from desc.backend import jnp from desc.compute.utils import safediv +# Warning: method must be specified as keyword argument. +interp1d_vec = jnp.vectorize( + interp1d, signature="(m),(n),(n)->(m)", excluded={"method"} +) + + +@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") +def interp1d_Hermite_vec(xq, x, f, fx, /): + """Vectorized cubic Hermite spline.""" + return interp1d(xq, x, f, method="cubic", fx=fx) + -# These polynomial manipulation methods are chosen for performance on gpu. def polyder_vec(c): """Coefficients for the derivatives of the given set of polynomials. Parameters ---------- c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. + Last axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[-1]-1``, coefficient cᵢ should be stored at + ``c[...,n-i]``. Returns ------- poly : jnp.ndarray Coefficients of polynomial derivative, ignoring the arbitrary constant. That is, - ``poly[i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is - ``c.shape[0]-1``. + ``poly[...,i]`` stores the coefficient of the monomial xⁿ⁻ⁱ⁻¹, where n is + ``c.shape[-1]-1``. """ - return (c[:-1].T * jnp.arange(c.shape[0] - 1, 0, -1)).T + return c[..., :-1] * jnp.arange(c.shape[-1] - 1, 0, -1) def polyval_vec(*, x, c): @@ -36,11 +53,11 @@ def polyval_vec(*, x, c): Parameters ---------- x : jnp.ndarray - Real coordinates at which to evaluate the set of polynomials. + Coordinates at which to evaluate the set of polynomials. c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. + Last axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[-1]-1``, coefficient cᵢ should be stored at + ``c[...,n-i]``. Returns ------- @@ -53,40 +70,51 @@ def polyval_vec(*, x, c): np.testing.assert_allclose( polyval_vec(x=x, c=c), - np.sum( - np.polynomial.polynomial.polyvander(x, c.shape[0] - 1) - * np.moveaxis(np.flipud(c), 0, -1), - axis=-1, - ), + np.sum(polyvander(x, c.shape[-1] - 1) * c[..., ::-1], axis=-1), ) """ # Better than Horner's method as we expect to evaluate low order polynomials. # No need to use fast multipoint evaluation techniques for the same reason. - return jnp.einsum( - "...i,i...", x[..., jnp.newaxis] ** jnp.arange(c.shape[0] - 1, -1, -1), c + return jnp.sum( + c * x[..., jnp.newaxis] ** jnp.arange(c.shape[-1] - 1, -1, -1), + axis=-1, ) -# Warning: method must be specified as keyword argument. -interp1d_vec = jnp.vectorize( - interp1d, signature="(m),(n),(n)->(m)", excluded={"method"} -) +# TODO: Eventually do a PR to move this stuff into interpax. -@partial(jnp.vectorize, signature="(m),(n),(n),(n)->(m)") -def interp1d_Hermite_vec(xq, x, f, fx, /): - """Vectorized cubic Hermite spline.""" - return interp1d(xq, x, f, method="cubic", fx=fx) +def _subtract_last(c, k): + """Subtract ``k`` from last index of last axis of ``c``. + Semantically same as ``return c.copy().at[...,-1].add(-k)``, + but allows dimension to increase. + """ + c_1 = c[..., -1] - k + c = jnp.concatenate( + [ + jnp.broadcast_to(c[..., :-1], (*c_1.shape, c.shape[-1] - 1)), + c_1[..., jnp.newaxis], + ], + axis=-1, + ) + return c -# TODO: Eventually do a PR to move this stuff into interpax. + +def _filter_distinct(r, sentinel, eps): + """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" + # eps needs to be low enough that close distinct roots do not get removed. + # Otherwise, algorithms relying on continuity will fail. + mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) + r = jnp.where(mask, sentinel, r) + return r _roots = jnp.vectorize(partial(jnp.roots, strip_zeros=False), signature="(m)->(n)") -def poly_root( +def polyroot_vec( c, k=0, a_min=None, @@ -101,26 +129,26 @@ def poly_root( Parameters ---------- c : jnp.ndarray - First axis should store coefficients of a polynomial. For a polynomial given by - ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[0]-1``, coefficient cᵢ should be stored at - ``c[n-i]``. + Last axis should store coefficients of a polynomial. For a polynomial given by + ∑ᵢⁿ cᵢ xⁱ, where n is ``c.shape[-1]-1``, coefficient cᵢ should be stored at + ``c[...,n-i]``. k : jnp.ndarray - Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. Should broadcast with arrays of - shape ``c.shape[1:]``. + Shape (..., *c.shape[:-1]). + Specify to find solutions to ∑ᵢⁿ cᵢ xⁱ = ``k``. a_min : jnp.ndarray + Shape (..., *c.shape[:-1]). Minimum ``a_min`` and maximum ``a_max`` value to return roots between. - If specified only real roots are returned. If None, returns all complex roots. - Should broadcast with arrays of shape ``c.shape[1:]``. + If specified only real roots are returned, otherwise returns all complex roots. a_max : jnp.ndarray + Shape (..., *c.shape[:-1]). Minimum ``a_min`` and maximum ``a_max`` value to return roots between. - If specified only real roots are returned. If None, returns all complex roots. - Should broadcast with arrays of shape ``c.shape[1:]``. + If specified only real roots are returned, otherwise returns all complex roots. sort : bool Whether to sort the roots. sentinel : float Value with which to pad array in place of filtered elements. Anything less than ``a_min`` or greater than ``a_max`` plus some floating point - error buffer will work just like nan while avoiding nan gradient. + error buffer will work just like nan while avoiding ``nan`` gradient. eps : float Absolute tolerance with which to consider value as zero. distinct : bool @@ -130,30 +158,29 @@ def poly_root( Returns ------- r : jnp.ndarray - Shape (..., c.shape[1:], c.shape[0] - 1). - The roots of the polynomial, iterated over the last axis. + Shape (..., *c.shape[:-1], c.shape[-1] - 1). + The roots of the polynomial, iterated over the last axis.First """ get_only_real_roots = not (a_min is None and a_max is None) - + num_coef = c.shape[-1] + c = _subtract_last(c, k) func = {2: _root_linear, 3: _root_quadratic, 4: _root_cubic} + if ( - c.shape[0] in func + num_coef in func and get_only_real_roots and not (jnp.iscomplexobj(c) or jnp.iscomplexobj(k)) ): # Compute from analytic formula to avoid the issue of complex roots with small # imaginary parts and to avoid nan in gradient. - r = func[c.shape[0]](*c[:-1], c[-1] - k, sentinel, eps, distinct) + r = func[num_coef](C=c, sentinel=sentinel, eps=eps, distinct=distinct) # We already filtered distinct roots for quadratics. - distinct = distinct and c.shape[0] > 3 + distinct = distinct and num_coef > 3 else: # Compute from eigenvalues of polynomial companion matrix. - c_n = c[-1] - k - c = [jnp.broadcast_to(c_i, c_n.shape) for c_i in c[:-1]] - c.append(c_n) - c = jnp.stack(c, axis=-1) r = _roots(c) + if get_only_real_roots: a_min = -jnp.inf if a_min is None else a_min[..., jnp.newaxis] a_max = +jnp.inf if a_max is None else a_max[..., jnp.newaxis] @@ -165,11 +192,13 @@ def poly_root( if sort or distinct: r = jnp.sort(r, axis=-1) - return _filter_distinct(r, sentinel, eps) if distinct else r + r = _filter_distinct(r, sentinel, eps) if distinct else r + assert r.shape[-1] == num_coef - 1 + return r -def _root_cubic(a, b, c, d, sentinel, eps, distinct): - """Return r such that a r³ + b r² + c r + d = 0, assuming real coef and roots.""" +def _root_cubic(C, sentinel, eps, distinct): + """Return real cubic root assuming real coefficients.""" # numerical.recipes/book.html, page 228 def irreducible(Q, R, b, mask): @@ -210,23 +239,36 @@ def root(b, c, d): reducible(Q, R, b), ) + a = C[..., 0] + b = C[..., 1] + c = C[..., 2] + d = C[..., 3] return jnp.where( - # Tests catch failure here if eps < 1e-12 for 64 bit jax. + # Tests catch failure here if eps < 1e-12 for 64 bit precision. jnp.expand_dims(jnp.abs(a) <= eps, axis=-1), - _concat_sentinel(_root_quadratic(b, c, d, sentinel, eps, distinct), sentinel), + _concat_sentinel( + _root_quadratic( + C=C[..., 1:], sentinel=sentinel, eps=eps, distinct=distinct + ), + sentinel, + ), root(b, c, d), ) -def _root_quadratic(a, b, c, sentinel, eps, distinct): - """Return r such that a r² + b r + c = 0, assuming real coefficients and roots.""" +def _root_quadratic(C, sentinel, eps, distinct): + """Return real quadratic root assuming real coefficients.""" # numerical.recipes/book.html, page 227 + a = C[..., 0] + b = C[..., 1] + c = C[..., 2] + discriminant = b**2 - 4 * a * c q = -0.5 * (b + jnp.sign(b) * jnp.sqrt(jnp.abs(discriminant))) r1 = jnp.where( discriminant < 0, sentinel, - safediv(q, a, _root_linear(b, c, sentinel, eps)), + safediv(q, a, _root_linear(C=C[..., 1:], sentinel=sentinel, eps=eps)), ) r2 = jnp.where( # more robust to remove repeated roots with discriminant @@ -237,21 +279,14 @@ def _root_quadratic(a, b, c, sentinel, eps, distinct): return jnp.stack([r1, r2], axis=-1) -def _root_linear(a, b, sentinel, eps, distinct=False): - """Return r such that a r + b = 0.""" +def _root_linear(C, sentinel, eps, distinct=False): + """Return real linear root assuming real coefficients.""" + a = C[..., 0] + b = C[..., 1] return safediv(-b, a, jnp.where(jnp.abs(b) <= eps, 0, sentinel)) def _concat_sentinel(r, sentinel, num=1): - """Concat ``sentinel`` ``num`` times to ``r`` on last axis.""" + """Append ``sentinel`` ``num`` times to ``r`` on last axis.""" sent = jnp.broadcast_to(sentinel, (*r.shape[:-1], num)) return jnp.append(r, sent, axis=-1) - - -def _filter_distinct(r, sentinel, eps): - """Set all but one of matching adjacent elements in ``r`` to ``sentinel``.""" - # eps needs to be low enough that close distinct roots do not get removed. - # Otherwise, algorithms relying on continuity will fail. - mask = jnp.isclose(jnp.diff(r, axis=-1, prepend=sentinel), 0, atol=eps) - r = jnp.where(mask, sentinel, r) - return r diff --git a/desc/utils.py b/desc/utils.py index 24521c8e01..0f6553b67f 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -737,34 +737,25 @@ def flatten_matrix(y): # TODO: Eventually remove and use numpy's stuff. # https://github.com/numpy/numpy/issues/25805 -def atleast_nd(ndmin, *arys): +def atleast_nd(ndmin, ary): """Adds dimensions to front if necessary.""" if ndmin == 1: - return jnp.atleast_1d(*arys) + return jnp.atleast_1d(ary) if ndmin == 2: - return jnp.atleast_2d(*arys) - tup = tuple(jnp.array(ary, ndmin=ndmin) for ary in arys) - if len(tup) == 1: - tup = tup[0] - return tup + return jnp.atleast_2d(ary) + return jnp.array(ary, ndmin=ndmin) if jnp.ndim(ary) < ndmin else ary -def atleast_3d_mid(*arys): +def atleast_3d_mid(ary): """Like np.atleast3d but if adds dim at axis 1 for 2d arrays.""" - arys = jnp.atleast_2d(*arys) - tup = tuple(ary[:, jnp.newaxis] if ary.ndim == 2 else ary for ary in arys) - if len(tup) == 1: - tup = tup[0] - return tup + ary = jnp.atleast_2d(ary) + return ary[:, jnp.newaxis] if ary.ndim == 2 else ary -def atleast_2d_end(*arys): +def atleast_2d_end(ary): """Like np.atleast2d but if adds dim at axis 1 for 1d arrays.""" - arys = jnp.atleast_1d(*arys) - tup = tuple(ary[:, jnp.newaxis] if ary.ndim == 1 else ary for ary in arys) - if len(tup) == 1: - tup = tup[0] - return tup + ary = jnp.atleast_1d(ary) + return ary[:, jnp.newaxis] if ary.ndim == 1 else ary PRINT_WIDTH = 60 # current longest name is BootstrapRedlConsistency with pre-text diff --git a/tests/test_integrals.py b/tests/test_integrals.py index dd14cc0dcd..f09c47c877 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -738,7 +738,7 @@ def test_z1_first(self): B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch_inv = 0.5 intersect = B.solve(pitch_inv, extrapolate=False) - z1, z2 = bounce_points(pitch_inv, knots, B.c, B.derivative().c, check=True) + z1, z2 = bounce_points(pitch_inv, knots, B.c.T, B.derivative().c.T, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[0::2]) @@ -753,7 +753,7 @@ def test_z2_first(self): B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch_inv = 0.5 intersect = B.solve(pitch_inv, extrapolate=False) - z1, z2 = bounce_points(pitch_inv, k, B.c, B.derivative().c, check=True) + z1, z2 = bounce_points(pitch_inv, k, B.c.T, B.derivative().c.T, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[1:-1:2]) @@ -772,7 +772,7 @@ def test_z1_before_extrema(self): ) dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[3] - 1e-13 - z1, z2 = bounce_points(pitch_inv, k, B.c, dB_dz.c, check=True) + z1, z2 = bounce_points(pitch_inv, k, B.c.T, dB_dz.c.T, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(pitch_inv, extrapolate=False) @@ -797,7 +797,7 @@ def test_z2_before_extrema(self): ) dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[2] - z1, z2 = bounce_points(pitch_inv, k, B.c, dB_dz.c, check=True) + z1, z2 = bounce_points(pitch_inv, k, B.c.T, dB_dz.c.T, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(pitch_inv, extrapolate=False) @@ -819,7 +819,7 @@ def test_extrema_first_and_before_z1(self): dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[2] + 1e-13 z1, z2 = bounce_points( - pitch_inv, k[2:], B.c[:, 2:], dB_dz.c[:, 2:], check=True, plot=False + pitch_inv, k[2:], B.c[:, 2:].T, dB_dz.c[:, 2:].T, check=True, plot=False ) plot_ppoly(B, z1=z1, z2=z2, k=pitch_inv, start=k[2]) z1, z2 = TestBounce1DPoints.filter(z1, z2) @@ -844,7 +844,7 @@ def test_extrema_first_and_before_z2(self): ) dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[1] - 1e-13 - z1, z2 = bounce_points(pitch_inv, k, B.c, dB_dz.c, check=True) + z1, z2 = bounce_points(pitch_inv, k, B.c.T, dB_dz.c.T, check=True) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. @@ -865,7 +865,7 @@ def test_get_extrema(self): k, np.cos(k) + 2 * np.sin(-2 * k), -np.sin(k) - 4 * np.cos(-2 * k) ) dB_dz = B.derivative() - ext, B_ext = _get_extrema(k, B.c, dB_dz.c) + ext, B_ext = _get_extrema(k, B.c.T, dB_dz.c.T) mask = ~np.isnan(ext) ext, B_ext = ext[mask], B_ext[mask] idx = np.argsort(ext) @@ -950,6 +950,8 @@ def _adaptive_elliptic(integrand, k): @staticmethod def _fixed_elliptic(integrand, k, deg): + # Can use this test to benchmark quadrature performance. + # Just k = np.atleast_1d(k) a = np.zeros_like(k) b = 2 * np.arcsin(k) @@ -963,7 +965,13 @@ def _fixed_elliptic(integrand, k, deg): @staticmethod def elliptic_incomplete(k2): - """Calculate elliptic integrals for bounce averaged binormal drift.""" + """Calculate elliptic integrals for bounce averaged binormal drift. + + The test is nice because it is independent of all the bounce integrals + and splines. One can test performance of different quadrature methods + by using that method in the ``_fixed_elliptic`` method above. + + """ K_integrand = lambda Z, k: 2 / np.sqrt(k**2 - np.sin(Z / 2) ** 2) * (k / 4) E_integrand = lambda Z, k: 2 * np.sqrt(k**2 - np.sin(Z / 2) ** 2) / (k * 4) # Scipy's elliptic integrals are broken. @@ -1056,7 +1064,7 @@ def test_integrate_checks(self): # 2. Pick flux surfaces, field lines, and how far to follow the field # line in Clebsch coordinates ρ, α, ζ. rho = np.linspace(0.1, 1, 6) - alpha = np.array([0]) + alpha = np.array([0, 0.5]) zeta = np.linspace(-2 * np.pi, 2 * np.pi, 200) eq = get("HELIOTRON") @@ -1069,7 +1077,7 @@ def test_integrate_checks(self): Bounce1D.required_names + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) # 5. Make the bounce integration operator. - bounce = Bounce1D(grid.source_grid, data, quad=leggauss(3), check=True) + bounce = Bounce1D(grid.source_grid, data, check=True) pitch_inv = bounce.get_pitch_inv( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) @@ -1085,22 +1093,21 @@ def test_integrate_checks(self): check=True, ) avg = safediv(num, den) - assert np.isfinite(avg).all() + assert np.isfinite(avg).all() and np.count_nonzero(avg) # 6. Basic manipulation of the output. - # Sum all bounce integrals across each particular field line. - avg_sum = avg.sum(axis=-1) - # Group the averages by field line. - avg_sum = avg_sum.reshape(pitch_inv.shape[0], rho.size, alpha.size) - # The sum stored at index i, j which denote some flux surface and field line - i, j = 0, 0 - print(avg_sum[:, i, j]) - # is the summed bounce average over all wells along the field line - # given by the field line following coordinates at index [i, j] of nodes - nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes, "raz") - print(nodes[i, j]) - # for the 1/pitch values stored in index [:, i, j] of - print(pitch_inv.reshape(pitch_inv.shape[0], rho.size, alpha.size)[:, i, j]) + # Sum all bounce averages across a particular field line, for every field line. + result = avg.sum(axis=-1) + # Group the result by pitch and flux surface. + result = result.reshape(pitch_inv.shape[0], alpha.size, rho.size) + # The result stored at + p, m, l = 3, 0, 1 + print("Result:", result[p, m, l]) + # corresponds to the 1/λ value + print("1/λ:", pitch_inv[p, m % pitch_inv.shape[1], l % pitch_inv.shape[-1]]) + # for the Clebsch-type field line coordinates + nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes[:, :2], "arz") + print("(α, ρ):", nodes[m, l, 0]) # 7. Plotting utilities. z1, z2 = bounce.points(pitch_inv) @@ -1145,7 +1152,7 @@ def dg_dz(z): z1=np.array(0, ndmin=3), z2=np.array(2 * np.pi, ndmin=3), knots=zeta, - g=bounce._B, + g=bounce.B, dg_dz=bounce._dB_dz, ), rtol=1e-3, @@ -1232,7 +1239,7 @@ def drift_analytic(data): # Exclude singularity not captured by analytic approximation for pitch near # the maximum |B|. (This is captured by the numerical integration). - pitch_inv = get_pitch_inv(np.min(B), np.max(B), 100)[:-1] + pitch_inv = get_pitch_inv(np.min(B), np.max(B), 100).squeeze()[:-1] k2 = 0.5 * ((1 - B0 / pitch_inv) / (epsilon * B0 / pitch_inv) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = ( TestBounce1DQuadrature.elliptic_incomplete(k2) @@ -1253,7 +1260,7 @@ def drift_analytic(data): ) / G0 drift_analytic_den = I_0 / G0 drift_analytic = drift_analytic_num / drift_analytic_den - return drift_analytic, cvdrift, gbdrift, pitch_inv + return drift_analytic, cvdrift, gbdrift, pitch_inv.reshape(-1, 1, 1) @staticmethod def drift_num_integrand(cvdrift, gbdrift, B, pitch): @@ -1329,14 +1336,14 @@ def test_binormal_drift_bounce1d(self): ) f = Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift) drift_numerical_num = bounce.integrate( - pitch_inv=pitch_inv[:, np.newaxis], + pitch_inv=pitch_inv, integrand=TestBounce1D.drift_num_integrand, f=f, num_well=1, check=True, ) drift_numerical_den = bounce.integrate( - pitch_inv=pitch_inv[:, np.newaxis], + pitch_inv=pitch_inv, integrand=TestBounce1D.drift_den_integrand, num_well=1, weight=np.ones(zeta.size), @@ -1357,8 +1364,8 @@ def test_binormal_drift_bounce1d(self): ) fig, ax = plt.subplots() - ax.plot(pitch_inv, drift_analytic) - ax.plot(pitch_inv, drift_numerical) + ax.plot(pitch_inv.squeeze(), drift_analytic) + ax.plot(pitch_inv.squeeze(), drift_numerical) return fig @staticmethod diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index f4c2215075..d48a32d6f4 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -4,32 +4,42 @@ import pytest from numpy.polynomial.polynomial import polyvander -from desc.integrals.interp_utils import poly_root, polyder_vec, polyval_vec +from desc.integrals.interp_utils import polyder_vec, polyroot_vec, polyval_vec class TestPolyUtils: - """Test polynomial stuff used for local spline interpolation.""" + """Test polynomial utilities used for local spline interpolation in integrals.""" @pytest.mark.unit def test_poly_root(self): """Test vectorized computation of cubic polynomial exact roots.""" - cubic = 4 - c = np.arange(-24, 24).reshape(cubic, 6, -1) * np.pi - # make sure broadcasting won't hide error in implementation + c = np.arange(-24, 24).reshape(4, 6, -1).transpose(-1, 1, 0) + # Ensure broadcasting won't hide error in implementation. assert np.unique(c.shape).size == c.ndim - constant = np.broadcast_to(np.arange(c.shape[-1]), c.shape[1:]) - constant = np.stack([constant, constant]) - root = poly_root(c, constant, sort=True) - for i in range(constant.shape[0]): - for j in range(c.shape[1]): - for k in range(c.shape[2]): - d = c[-1, j, k] - constant[i, j, k] - np.testing.assert_allclose( - actual=root[i, j, k], - desired=np.sort(np.roots([*c[:-1, j, k], d])), - ) + k = np.broadcast_to(np.arange(c.shape[-2]), c.shape[:-1]) + # Now increase dimension so that shapes still broadcast, but stuff like + # ``c[...,-1]-=k`` is not allowed because it grows the dimension of ``c``. + # This is needed functionality in ``poly_root`` that requires an awkward + # loop to obtain if using jnp.vectorize. + k = np.stack([k, k * 2 + 1]) + r = polyroot_vec(c, k, sort=True) + for i in range(k.shape[0]): + d = c.copy() + d[..., -1] -= k[i] + # np.roots cannot be vectorized because it strips leading zeros and + # output shape is therefore dynamic. + for idx in np.ndindex(d.shape[:-1]): + np.testing.assert_allclose( + r[(i, *idx)], + np.sort(np.roots(d[idx])), + err_msg=f"Eigenvalue branch of poly_root failed at {i, *idx}.", + ) + + # Now test analytic formula branch, Ensure it filters distinct roots, + # and ensure zero coefficients don't bust computation due to singularities + # in analytic formulae which are not present in iterative eigenvalue scheme. c = np.array( [ [1, 0, 0, 0], @@ -41,54 +51,53 @@ def test_poly_root(self): [0, -6, 11, -2], ] ) - root = poly_root(c.T, sort=True, distinct=True) + r = polyroot_vec(c, sort=True, distinct=True) for j in range(c.shape[0]): - unique_roots = np.unique(np.roots(c[j])) + root = r[j][~np.isnan(r[j])] + unique_root = np.unique(np.roots(c[j])) + assert root.size == unique_root.size np.testing.assert_allclose( - actual=root[j][~np.isnan(root[j])], desired=unique_roots, err_msg=str(j) + root, + unique_root, + err_msg=f"Analytic branch of poly_root failed at {j}.", ) c = np.array([0, 1, -1, -8, 12]) - root = poly_root(c, sort=True, distinct=True) - root = root[~np.isnan(root)] - unique_root = np.unique(np.roots(c)) - assert root.size == unique_root.size - np.testing.assert_allclose(root, unique_root) + r = polyroot_vec(c, sort=True, distinct=True) + r = r[~np.isnan(r)] + unique_r = np.unique(np.roots(c)) + assert r.size == unique_r.size + np.testing.assert_allclose(r, unique_r) @pytest.mark.unit def test_polyder_vec(self): """Test vectorized computation of polynomial derivative.""" - quintic = 6 - c = np.arange(-18, 18).reshape(quintic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation + c = np.arange(-18, 18).reshape(3, -1, 6) + # Ensure broadcasting won't hide error in implementation. assert np.unique(c.shape).size == c.ndim - derivative = polyder_vec(c) - desired = np.vectorize(np.polyder, signature="(m)->(n)")(c.T).T - np.testing.assert_allclose(derivative, desired) + np.testing.assert_allclose( + polyder_vec(c), + np.vectorize(np.polyder, signature="(m)->(n)")(c), + ) @pytest.mark.unit def test_polyval_vec(self): """Test vectorized computation of polynomial evaluation.""" def test(x, c): + # Ensure broadcasting won't hide error in implementation. + assert np.unique(x.shape).size == x.ndim + assert np.unique(c.shape).size == c.ndim np.testing.assert_allclose( polyval_vec(x=x, c=c), - np.sum( - polyvander(x, c.shape[0] - 1) * np.moveaxis(np.flipud(c), 0, -1), - axis=-1, - ), + np.sum(polyvander(x, c.shape[-1] - 1) * c[..., ::-1], axis=-1), ) - quartic = 5 - c = np.arange(-60, 60).reshape(quartic, 3, -1) * np.pi - # make sure broadcasting won't hide error in implementation - assert np.unique(c.shape).size == c.ndim - x = np.linspace(0, 20, c.shape[1] * c.shape[2]).reshape(c.shape[1], c.shape[2]) + c = np.arange(-60, 60).reshape(-1, 5, 3) + x = np.linspace(0, 20, np.prod(c.shape[:-1])).reshape(c.shape[:-1]) test(x, c) x = np.stack([x, x * 2], axis=0) x = np.stack([x, x * 2, x * 3, x * 4], axis=0) - # make sure broadcasting won't hide error in implementation - assert np.unique(x.shape).size == x.ndim - assert c.shape[1:] == x.shape[x.ndim - (c.ndim - 1) :] - assert np.unique((c.shape[0],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 + assert c.shape[:-1] == x.shape[x.ndim - (c.ndim - 1) :] + assert np.unique((c.shape[-1],) + x.shape[c.ndim - 1 :]).size == x.ndim - 1 test(x, c) From 03ff0b1cf158dd1622abec9a3921846f610b358b Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 29 Aug 2024 19:23:02 -0400 Subject: [PATCH 233/241] Make super useful plotting function public and super user-friendly --- desc/integrals/bounce_integral.py | 49 ++++++++++++++++++++++-- desc/integrals/bounce_utils.py | 14 +++++-- tests/baseline/test_bounce1d_checks.png | Bin 0 -> 70191 bytes tests/test_integrals.py | 15 +++++--- 4 files changed, 64 insertions(+), 14 deletions(-) create mode 100644 tests/baseline/test_bounce1d_checks.png diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index fbe2b5f03a..020f365bc6 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,12 +1,13 @@ """Methods for computing bounce integrals (singular or otherwise).""" import numpy as np -from interpax import CubicHermiteSpline +from interpax import CubicHermiteSpline, PPoly from orthax.legendre import leggauss from desc.backend import jnp from desc.integrals.bounce_utils import ( _check_bounce_points, + _set_default_plot_kwargs, bounce_points, bounce_quadrature, get_pitch_inv, @@ -20,7 +21,7 @@ grad_automorphism_sin, ) from desc.io import IOAble -from desc.utils import atleast_nd, setdefault, warnif +from desc.utils import atleast_nd, errorif, setdefault, warnif class Bounce1D(IOAble): @@ -79,7 +80,7 @@ class Bounce1D(IOAble): Examples -------- - See ``tests/test_integrals.py::TestBounce1D::test_integrate_checks``. + See ``tests/test_integrals.py::TestBounce1D::test_bounce1d_checks``. Attributes ---------- @@ -97,7 +98,6 @@ class Bounce1D(IOAble): """ required_names = ["B^zeta", "B^zeta_z|r,a", "|B|", "|B|_z|r,a"] - plot_ppoly = staticmethod(plot_ppoly) get_pitch_inv = staticmethod(get_pitch_inv) def __init__( @@ -379,3 +379,44 @@ def integrate( assert result.shape[0] == pitch_inv.shape[0] assert result.shape[-1] == setdefault(num_well, np.prod(self._dB_dz.shape[-2:])) return result + + def plot(self, pitch_inv, m, l, **kwargs): + """Plot the field line and bounce points of the given pitch angles. + + Parameters + ---------- + pitch_inv : jnp.ndarray + Shape (P, ). + 1/λ values to evaluate the bounce integral at the field line + specified by the (α(m), ρ(l)) Clebsch coordinate. + m, l : int, int + Indices into the nodes of the grid supplied to make this object. + ``alpha, rho = grid.meshgrid_reshape(grid.nodes[:, :2], "arz")[m, l, 0]``. + kwargs + Keyword arguments into ``desc/integrals/bounce_utils.py::plot_ppoly``. + + Returns + ------- + fig, ax + Matplotlib (fig, ax) tuple. + + """ + pitch_inv = jnp.atleast_1d(jnp.squeeze(pitch_inv)) + errorif( + pitch_inv.ndim != 1, + msg=f"Got pitch_inv.ndim={pitch_inv.ndim}, but expected 1.", + ) + z1, z2 = bounce_points( + pitch_inv[:, jnp.newaxis, jnp.newaxis], + self._zeta, + self.B[m, l], + self._dB_dz[m, l], + ) + fig, ax = plot_ppoly( + ppoly=PPoly(self.B[m, l].T, self._zeta), + z1=z1, + z2=z2, + k=pitch_inv, + **_set_default_plot_kwargs(kwargs), + ) + return fig, ax diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 7b2984c579..75acf2ed66 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -223,9 +223,7 @@ def bounce_points( return z1, z2 -def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): - """Check that bounce points are computed correctly.""" - eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) +def _set_default_plot_kwargs(kwargs): kwargs.setdefault( "title", r"Intersects $\zeta$ in epigraph($\vert B \vert$) s.t. " @@ -234,6 +232,12 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): kwargs.setdefault("klabel", r"$1/\lambda$") kwargs.setdefault("hlabel", r"$\zeta$") kwargs.setdefault("vlabel", r"$\vert B \vert$") + return kwargs + + +def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): + """Check that bounce points are computed correctly.""" + kwargs = _set_default_plot_kwargs(kwargs) plots = [] assert z1.shape == z2.shape @@ -244,6 +248,7 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): err_1 = jnp.any(z1 > z2, axis=-1) err_2 = jnp.any(z1[..., 1:] < z2[..., :-1], axis=-1) + eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) for ml in np.ndindex(B.shape[:-2]): Bs = PPoly(B[ml].T, knots) for p in range(pitch_inv.shape[0]): @@ -799,7 +804,8 @@ def plot_ppoly( Returns ------- - fig, ax : matplotlib figure and axes + fig, ax + Matplotlib (fig, ax) tuple. """ fig, ax = plt.subplots() diff --git a/tests/baseline/test_bounce1d_checks.png b/tests/baseline/test_bounce1d_checks.png new file mode 100644 index 0000000000000000000000000000000000000000..68d13814932f1e8ef72aaa5fb59ae74e58b90b80 GIT binary patch literal 70191 zcmeFYWmKF&6D~OT;Db914#6R4fB->*2M-e5-JO9D+zA#41W1Cr2lwFa?hrh7}qLHG3Kp;#xS;==G5CRkgg4YHk15duV z2RQ=&1YM=HUDX`ST|JDP%|MDqu8y`2uC`XjP@_etE;1n5C@0d z|17}n;B3J$L2^V5G=l0VtK$L!VH-Wa;qqVSS%Kg{AUR2KbP(%YTL;{_T?tA>(zYJb#{(Go$vJ$xCmf3W!J9P;m_Cme?$@t=QU8zbZ2or#|Fzx!%O)BGhM5TA@bJ)czod@Se2`dxJf>=i{J+Ae z&_Tf8N=vb{47}(gA|p{A?)u>R;2oMT2<;mWYpa$p{#$Nz@b%M;@}l>C$;Zd$O_*sQ zmq`!Ekf@@v^6vrPHvGm~C{+)K|7m2^QWQGYpPB5yTep*JQ3}%h5248a{aR33+GAI@ zH0{ud#Rs$i$9z~=S&8eiJt#O((op_;U?5kN0{%bc+e2?%Ex-By&*~=mZ&C8WU;m#t zWtZ0wTiv2_ZMvhl#DmaFl~yd#$-FlR7yF}P|Gs7hIOZ@#STy1g3He+ev@bTeRQO!k zzeu3@kE0G-h0MGE0RL~F|7UBO|2=E}|FE#7j<_SRGf?_p8*OfF(KmYxu+^<5>)H#- zv;B8gu$yQ4-&zSjoKMIdU!0#~>~RUI;&)Svzz2%>TpAuNHq18r@NW;Mxzwh=`S0o< zfl+h#mRbA%V|&E2rof0IU?eLDmL(Pz`8wq`7ik{7A!G$3x z*cd_$Y=mLx5r%HOe_nf~tUvWOod}ap{4}eOLKG+7I{a04+U?OAmQ`|1G99J!Px&sW z_yi15Tu)_|g~f>C72&GD{jUe=rsJ%U$M&=G7%~(ev#mKhs5s;cL=x`9-H?H6aVt{n zp$|q7Q^p|!4kDMV3L;%3Vw2Iew}eF_38GVbqRYyg6VY2cVS;*eWu&8y#+wv)5ZaL+ z*xzB3wJrte=?U%TGOLIG9l{X%n~d>>0dz%p!|4qi8=Ud6=TyGlezkg~q2?O3-G z0@Mmar;N`E8!nhJPD|R;RqJz3{dFum(6`LOTvQ#{a}-?IEvP1 z`oc{~USG8+6jXmwP+~_he$0qM7a5f$f-zy9lyzth>5>xvXn|9^*n58X)RC_0zT&a= zp6f4bUBrK6GtxcV&o6ky1sV4H>B51EdWUnFz~ZKXVd>;?=*Xc15gEb@bN0yMdiW=2 z93Oi}`WN+RBUQZ*t=~UHK!*_!RogatDc`jH`P4g-nI^i7ou~smpz55r(t_>Crl1-# zIn@`xxqbNF4wya8)vedTfgW2#au`KCgK^Sz`rEKtsP^RKlfmF|2ndeRM=9rL)i!Hw z3^c4AmiZe|JcDR*8ves0Hzd;P&ey`?-eV?7%Q_)kI`CpYFq0AilMXDAv~6xU!61e| z^_u^OBU+~R8Db01M%}})pWUtUNxDXBJDyEu+vg7Eh)M&Flao#{-37y%FVL`T*mJlv zjLx8h=zpvBP0j?^UymK^iYEm?=6uTUt6@ z`M8&!GcZ2hVbJ*NC4VFo9PKH5P@`dDXOpvRJtepI`0auiP?zF4CR=ZkC_+fEYzls9 z-%9J};ZWAXhl3bT(wfz5J+&s;JXO@G{#OdJ!}mQ0sG%iWm7m6G+$0+ls)-N|1t0FZ zokZ`Jy9WGLw~&Bz&3*c(rWY>yNc3Q)wPJe?uzO-qcvIX5N^58M3{ID>JF zdeo$;9?U*JTW^Drw70sU<@Rjxy zC$FD!AN0WM65nbGuXkkvAMR@W48{v8x&4G}>v%#XM(8B0nD6w95S&**jZ}y8KMv0j z#WBdAkZ9I+3j^hoj$m>Bd3X89H8;W!{LFU$Nj zqEp35;|d%{GotNc{^40TR+uKrYvM^NIMh$Ge(<9^Fi@LEE8rOaXX^=gV^ctPfXv$Y zBapGgAbLLrj#xq3_vqU6CU5r}AYGrtEEs(bkESngBp=3eJEh6vIwUR|HDzRO^`PJW z*QOZoe3GJ46=XvKGp8KCpJPT!rAKYz%PB=p%SLW`TJm-Q1`s1Xeot`>G=$gvavioC z?hAi<2buE6k-Ypj+Qqcs1C1gC+;C1AiCAh}tg4ZP!&@3?LxYnS8(?O=C}KA;XO|0Q znpxZ13_`h3S{ys>aliaLnS~wHAYy@Mhl=>J^eMMAd0`$E5@FS479Q83v7j~G&^ zffHOJ?B?b{R1)47qS~O@O-&p@umjY6< ze}qkkG+=8kxbr>+ZZ|zUY5s%W;5a|9ONh;<L0f!; zfGHLKZ^fV~k`3`zjThbMHXgH3msN`F&aq>dltq-Z4iJY+oO!1$57~K@u%WNgb}l(k z&yNeo^#>;FU1m|kW_9KIMHU)%@NdD=k>aNNdlt$Ej1k9s3P`TtAJ8izF`otSx^K_# zdSy4Ub4jskX|5mc#NjY}e7XP>amX9==Xs40X3`4_K|ajFMox2Db;Z)h3}b??Nafn- z24vvws?Nj)kqdk)(MRN(y!LGy%bkDiV7eIE3F|`?AU|+m^CpkxewZTb{!9)L;e|@O zUjN4VRv>o=_)?gO=mI7byng=%AMx7b00sdYk8h>4f5ns97)XZ$HBG?n1Rjg;xZ7qs!&jK&BK6r22qVpSLq z0!BTt7s&U{P>JhvL|H*72QM8Tht4c!zrZ`;A(6Rr0^yCe__GX38A*{8tTcT!8m#w5 z60Nv=>3_FHOL7+t4S}~3X0P|-#-ic~-uW8+@T5i->)k5?Li`BNY7RH%=L2FDo2k`5 zarY)LlN1-re+6`-|O*!@;JP}(~neCr0VmHatg_7w(isn^tz)@IRHyjG>2(JL}p( zNOgmiGcEmW6$5fE$~Lt7jUk-t`Mv?3OEPf`r1n47!e^a6j@JS%!?7z*oCm(bFSB1# zs`dIU(XL2O&VPS#)gJ#9e3mqOef|XRi5dO+mcBouk-#iyYHHWA*7zZnqW=5=EBKfF zkY|~v&0QqOzN_}>9z$>DYV77#=;hIUL!rVIiVR47yyB%mx2ERyr`X7PbkKzose$xZ8LqrGEwbd?8i6FMz z=O-C2Y6E$sDqKhhbkHI-HAwvL=|CWhh;`R+SlG*%94KuV{PNyBHptTqiA5niEIEq% z$!-A^H2W%XXfs6e;;ceTvqRV&!yto`=23+7_Sr@DKlfelJ;tCr}^uflA!pgClZ7%JmHP!R%HX9@I;Noz_4DX|-?PB2JT6uXWG zG3y6}&?Po!P&c%;<4`X`ZyB+jLh4&vfp$O}Yogx>=f?YgV51rmwLFNvx>3lxS~ z6oj9EuWj$+PJJ&|DoB^>f*u}f&E9blPJhyPo+v>;P1l}~i`Z^O+TQG@bUXCYpTJb{ z(|AMB5eWM0jBr9QzvdnSsr<#$KWmq`S{Vk>7cLf*=o#^FyVdqVV=dnf1uZA5QmqJ; zQ`qkq*R#?rHi%}B$49Eror&xXa_I)NmOL3!F+_i*U+6`2YQa6R*6VJE0?CZ{`R0;x z&wS(PdQQkq`0CH=^5WF!y+kA}Lv1GvYJd28#k~!^;IIqcM$Q>8)Q*kD)#~In-MOIK zb%Jujdb2IMv`?MMAFIISkp#rLbHT|S-C#l;e6!l4rAV_RlQ_m0iTq%M40zI^0i)p& z<;3_Hu7#JREmqM?R=Qyc{S2wjz2EvO$QcOIbjD7JwdR5)jPRwk__7%t7`_z$sN_yq zOb!8=e`0Lc>U+Fu4<6up(OW$0fXIa}YjT}1{E2vupHR1FSlp!OjixfLnYk1rU9^w# zGDf%4O4-#VGg!%1#^?$Iqs&%fk{4CcJ+g3coy)Ae(WOn->-uMPZiTFVsKqtAxReCMmHn!}iKI;qs--1+w7ylSJqm>Pewn5`uA}vdSv54` z5^((J;sjnN|5jV;Cfk}t9>>bo{5OB z%qp?Y@2K6Lar$Y3CXvU+lw-S4E4BX6t?nwP?6|ks}=V+mymDWx2jqQUZ_K7eC|q5 zd3cHTQUi(Em7kXCPnN&b)#@oG2bjkjrWZSoeT#{yE4A{P463CKZn(dSbXs{0)FN33 z2isjg`O}7Fi;+eyhtOO8A&aEP^R1s`FJlbn){ier=y`~0VB{*Zt1B$6_Ixqw^8&@h z-WR*Kur=I?hqR*UjectkRU_UF{c*eLk%YXPps4q!Us!kVEpoGek#E?~e&k`ZSw-!3 zERuO728LDSm8zB>mGlQMNHpUk%BNiK@^^2jTxB$K8_HJRa#(fA^iCT~b)!W$D?@ff zargzHL>O2p1cF}wQhUTNy%!S=Bi*vu)Wa;7!JiEp-@A7ci{qbMSM);4z1J|`LkLaE zb(>GF?L3{hLhs5N$QcrKRD1Bs+moq6y8i_BI&`wQ2>Pl@^^8cZoi41_U3M^r;9zK< z9uG*=(@HTE=&NEa=Yw0P5FCO#FXPEuD4E~B%9Bi62BSEnz8Rf^i(Lt+V~n_-Omj_B zZ=lRV@VDX5S^9BuTkJVri!;;s8YMHgQrLJ`jx@3v%k!Qu^-Oo|ROj?~YR^02MQ4u) z#MQN9qi(H4j^;?Hq?Bbj4z(pU4EAyF?UeRvw!K>#(4sjzxQZF1?0}W2TTt5B%mqi; ziU}t}FqqmH!2&g~suJmn?2v(c zPiZ9>swy4zzAY_hRiPMGGuKzp59XT2iir*6k)cLoc)2Y#VVV$wQvu_t@O2o*7q1(N zI8t7Tb64u))^tpUc%ly^*?G*GnvLH=cY|xLUxJ{2272X2b>|279;(*Y*I) z@k!g}xdeFU_aC{!sFz2A@*f^jI_i}tj9k8d2z_-(dd^~Q?mf~A0!6c{>W7*P8m%g* zI&L%FGu)Wa^`jc`G5^+mv&u#dR!#pY3JDTiJrqrK_J&+EI8}zarb1n*ao-FdEj3Dk z)`7nx?rLCTm4qpfa7cOds7LQvICzA(hr@lXYPJIM+>l~R2wD39C zi`#z23SkBPQR(XhubnMEUWd-ON5GgwZ?^q4(ppPbsDU!+L>`e;4OQagJT3CR$LBs) z+~K{~*M50j@g{!jJ-5-uMsh0mlMkUhwfkxxN(s;--mIs~tPsGd-fv&I_xFGAA2Jwc zF1>b){GD#R-udJqS!;*LrA`;qoqqh9K8E;^C~xo&&WwRpkpLl{ndnV7p0T)`tnUVx z>?+p(ga0C9e{u6(wSb*{U44aru4QdkS146q@s+!Uy6@_k$@)xy8xUqz6uO5NVg>A6 zp-740e^_qS(ew36uea^qi@Lqu2sprlQ}s2!<8wMNExV3SPrBeb)qXlN{Hm?F26TRa z^V-)1Z;#Wdoolj|1vo$;8l?^^8Lms#`qa-9-3w-JR5)HsfW+8QHn$MurHK$g3=QT% zDFsUkpDW;rwLV976RLT3oChu7|?e zRL^bFIIRnpf~U>1_72pxTj^EFUErL|#F>pTdyM>6F|pjSz6k?}pu$Z~NpP50;QyXc z{H|&Qy04Go-nSJqz$*6^CcDbT@A1nS#3O{p4V({H!$~KZP5V%zsV6J6mA51XSrBo^ zCd%FWx`_H5S5^$1m{LN(&|J~XKC^%_(pF+Ac0=vVN>K*NeIzWVJ{0NCf`;hNJ%GKa zgP^9__^d%tjExI;cA`5~7cty;zQY8*BQ#{5f#6Qq6k6)tPX!th&7$Z^R_xRvSU2v| ztHh}D2Tc~pc)4+{D?2&LtnK_TQrMNQ64vUORU&m*cH550#%LH;x`IM$^`G(|4!T%X zI*m&2S)G~(>1daT2F-tV8etZNy|qg@lJj=cR&K>X#Cc9408(6$@f#HTUD&;kLhOT! z$4FJc3Of(z^XxnKg4!Q|6c`8xSQG0JDwl@M^=d{Jj?^MA*}kkcDbUZO~t2&Emj zGzBD*-agEf+u$87^gVf;ES=~?p<4(azHvOZSnFv*`g2#nk{PF9^*J2z<87~B;m0x$ zT=Cl&h2GVAPgCiEX8KbBZ`aukqbrfTL|~Ri{X1cWkjUPKY{D$vuy8SPM0z4KfrBhF z%{x=%QPGVT5%_S_!xT}HVjXMs2su;j$_h@CIf>&>c#M?QyUm#uY5JNXe{51@iB?fI}hok=lj)K3&lF&hTSqBU4;zF5W@w zsbiSH*ITvnqgJZxn~{q?AQSNqT?B#rPtjM$WCDLO*kz?)>|~Jpg;bk1?fyF@zOzt{ zo`hNQ8sK7bvGvBArvb^Q$3us<3X`_ufHM7%dQZJ)snjE0pK)AOmgK-3oTGp z*n7p@xBFF6wnTBvnH{vxPn!rnZbrFZ!)_yL&DRfsvP(WEoZ#8mPFG?nCKzakZnrjf z<@IIRn5g=}VSqVvapEL5qy`#fdm<(ZT48P@l$Y~k+0Q-3p>^`FWR{mJL2$n2gxLWqqj` z4KI=n6D>5HS^K>y9R*)n2p>Eyn3KX?NGHHjkPy-;J;vnt_d6d+Q2oE8Huxo9@WWr^6-!q#rOWYo+kly zFOWq_tXc>(764qG|d4cV`#LDm@Xa9)ai73&<_uS;%rt4Wktq`br={vF_8uea~H9y`urJ$ge2xs$45eo1KC?E?X^^XDfwNi1P78yH4i@>_|tH2u1n#Ca9Ocw z*?1JcILKDmk(|?ud%}XPd$S-BEav{M{Jb>i2P1q!v_?7Ze*JfF*ff6ekv-p>nz|&g z8aTaq&iPu}m|%C``yOJ+{y8W}2FvEC0_JOf`_iCV2=84N-Ko#yJYU?sQwdI77d~s~ zN59bIDMxSb+5N8YgCueY4USwWI@)aN__{0|>A1IAIj|TY|3XV%2LDzOgubH!=CP|s z)t#*q7qC}3J?BR1Q$4TOLgQ0XQqGqFWHm_FzF~{CCJah^c-R(7a2nWOP27ao5Md)&}BOVfFmSH0m(W(0olZCl_pN%Q)<>BaDO~0+?ehNb@QKa zb0FP^{4ThZA8EjxXB)P;1^4NbEj?>c-0p$tSga(<_hJS8o_vG^(whlxLN&K{BQLb) z=24eq5$RMzd8idtWN{$QVqq>w4Sgs71;5_7E4*!|0mD;b`!H>9Rjkx}M@0H~8d_#5 zY{6{+*!P`{*!g!u`hF`NH%47%?eT;T@2C_>l&(W_o&rQ_>Y-vrA!u4$kA4n9LJ2-= zNs%E5o5V~NcG$9+Cmt;{0n5f|5!X%o*9>k~HyV%&a|%4Y-EV90yEPT2ti^Ef7iBS+ zGG2VUzlty)ozniceH;%i{#^%mNf-~Yyr;E3lDzZm`EaH)d83kjo&UyGL7wGG;Mu3n z(7+jk3C4}nawlp*sP_b1jS>y0$3tR9Etq|;DDxp;lL3dDU?Wdj?0nLRUdaBvt9AtY z&JK^rbW!JV3%c#XPvsEkum%%SB$OQ*B)-~7W!}n2ifY|u%o@6|^)^#Pky-04Eq=sn z-x^YmIbfc?l!Ogp{WiqUw)rZObgOuLP%zM~c{y&=bg+cJ=*{AIv(30McHH&G`X$P5U*)%euRna_htJ`UXmB#Qzxrc+=#1!8 z`iy+&iIIzvKk4tkdCP68Mhi`o&FJF?T+_ITxCAafDmgm}M$FX}_0#Z&5EO8dFJG=J zm@)Skw}0c@tng6(Fiab=r3Q;&g$?zbcN_N=cjgG3d>yTkd{##wMvAU_w|sfNY!Z+t zlQbL$5@X``Ef2hO7$YNAeJ}%0w=#lvd1t(# z_I`8BFhUeMLisW{k^^uUQ{@Qp9I6N}l00COA^Uwdu^4i=(x!w51KlG1nBSr`HG}#3 z&GV@U@Rn3m;aK3d9y10M$#HZMRRvXrX*m&KuN0YX#FkyZvW$-(mQzMVG)tYybOwG!ZlWy8FJ1*e2iv z!1`!$py`N={_3!D%{3hT|ryIZ~ z&^DMh?A>O}}Ndz`)9 zU!_V4KFh>b-rd$ay$l!ABUZr?64!%<1;Oy9sP$^SiCs#a^PUF0^2;NyD|hb3I$`MrR!2y{8?% zz7mBIeJynM&jM3BA}?dg7}`)EDQj(;z=@raz>P7*IzRb;CQ|N*j~+USd6lLbBk(Fi zm^!Dr9HBGltEF`YXyn!LB@sD`K1V^!z`QTwGG^WyWGDO161#~krl9|m1Z;@Ctt09_MV?Tda{p+#?Bq9(E?$cTbieI5i5 zouyxPSlV*N2l5G|ppErmNY0c0<%@nv8k+mBm?!r6QkyC`Fp}NndZgr_Fk>o78Znjg zwz>xhO=t$lJjZ53Q{Zh!L@(+mP>~|+No3kVlQkr78>5NrkNVxh4#L4Olkb8@hwC7g zA)iv~1ts|woQ5FC(lWw$h?J+vAOb*(kb!Ww3rPYjNMypW#yc1a|M_B$>Z@H%Kmx`6Ax?k{KHdp&y#TOi-pmtAcQ5Elf~Gh-p`%( zpyTWt59M#CRQvc_%QmwJhFHt@ot@e~g_!XZuS5Bh^X7*SF&jaHoeKfCXr)%R!J{Wy zo=P>@r-5Zm{TT<=V)|u7BM-Z3#I*q!uo3>3px!@`${q))X5`bP>p zq~ty)_hl_XyD{mj7yfvdS^nQv1T=oNjI>^G!}gs{N{(q)&M-4P4+Tg)0@N?m{WBoDqy#IPcSR-X zmE$%sJX4p;>sx$63Y>P2=J4T}74ePItQY-q8;>bppClj4raS8%62V?aY7|!q2!FMA zc$jXc_QAG~H1N$@`TYkrqF_TIgWEX~ZEsdI(Iy0nktdXBvd0{ziRrlrginpnYJ`s- zZGj$PAu=s7aSe(U*g}4kx$$-{0?`4Tz zoBgrt#}SRYyV1SGT|zJ6t6b&w3vqfFk;+}k6H0pXdyS(ZJcrzd8*W#fLRABU0>S2F z=&+gPJh0_KjB{V$K)y1)^vIcAjJee2xa_Jr(wdQ^{!32o z3-TY!e!&t3A5+5{R2O`@xW$+1Wo{%X8JUv-MY}$Z=bx?6Oocj>Z`8lMQjMEiaV93G>U2q2Xge+LSBtav;O6EMhe0;l({enw-%&EKhvet`Vt~LQ zv(E1_1cNNtw~V%F!c60EB;jNh_JP!s-$PFalhN20)zbC_^kvd6o|M0ij{aSS(z4ZN zO)bh<^C%!s4=Kh71&ZA;!UD=j0UvM<4DV1i^-QST1}>7${FecsQuvZ>rQv=M6JpHA zuh=x-#6#;c=BLU!A8x^Beuyv?ZTm%=L^!Jbi88P;Cv!j0f*T`02$b2{ff2aT_67!h zv0n~F6TmwMQe7q|wHNLUzDhZ5Xg~ zOuYwzvf|QJkBD?LgNdtFMOFe--qC!kAwYCmeuERoihB~Wwm%lf^cI4g{~D?Ot625~ z3YT?gpM`jx7csdnO;em$jVFFgQiOJCep8FcSUL1ya>zN;bX;?a4;Jsz5olzu*@VxX;EDKYi(=P4J>Q#k{XiAKSL6-({+>1Bkl+LXI7xZ~ z{Fdf+6jO9`X)P2`0saQwUBM~$+fpnOIZ3a27gflsnOLPzyjMSG<7UV~wtN~57ojU3 z7aHC&K!LH(;H=>alp2DDgWHSGxW# zpq!!1idK_A;=g*Lu0k0C+-aeWf&eH4&DA?&Zac)FHWdoDCzqV%ClR4I6(R~EMuRuN zEGonRk+$f3cXwc6rF@T&_*+5uRk`1*a$gb^m2ZkaD{!fG`YNn{T>H+{O@$8I!gAvi)_5BPO`x&v=Kf6`N%s)%ns@`grU;-Mipw4e^AVXCW}J~jb%!8&eVAH!SgcWI-fjQ!isa9(eo z-FNhkf-J?;+|bkvFCr-Mw7r^J!8W>`adz}`ndr}Lp*&jQ7W8hvU$4&;IM9X~ES;GL z4!r{a4K$&zxhbd3QH&=B%EN5LO-_Gs5^eVN+b%R>54`Awa_}M7nq!Swd8Kac##4m8 zTi8A!I8m~Zu==n-__C(mjzd~4rgAVRf=2ty&XJcx{BCkhd)q-mt6lh{Tjk&*eXhKb z%TC)=C4R|7s8L!Ss>n^%sA++U@UWf_y#(I;#>4c1RJL66%Jj@ZhwqgHc-R2HmbE)v z-d1~hJ~+&V`^y2LM#J7A`rHM9Om?0hN3!O&@s$viUuHpDcmm_VmrA<^?f1|W!y8CV z%^Nan+W#xnZ3pgZ8KLQxRAmO?el2$1lyn8LRvqE#m`6?Ztn;;z{6?OW&5hik_o{il zW|YF6FR*~r^0vmfuN>}{x)@0?v(Q{jE@7clwMWzcBg##f(l;dLm3}gDrR~4njL~=< z;U!hVG@_i6X!^^`{D1u(DXk0w*tCSCVJ^#irpRpyAl zZ2C`jwE?I56+ot>xe}|YHQA<9OC_S{o>9Qw%ir4)T5%PiU$LUZTN*;Y@)5`HK~wh( zbCB6K@vu&^!GzZ%x7?N|qQAENxDoMU>T0be;$!MXn@HhI-H(TfldoJxdUJfP6l5VW zom3rKz4kqhX(m5#!Jr}s^HyaYG(DH-KfU3I{#UxI6?w3Px4LR>tmv zh|_tFdNXP~Oj|$3iT@B*^Nr%>@N9HKGXh1XaNz4+YSgF| zde3=uw__(}DkM6)NnOW&VEIZ#%a234Ob|n6&d7CUC#T^T`O_d6HnPI%5=Lsj3` zVg|OUXB7HFuA&xWb+G;4R$#|awNiL}dZqgA7eV`P&q<8{!n&lHZ;J-Go*BuRTqnE& zT%~1p@gcE>w%T2GW`7=*9K8hvTo7sH>m-=C*DNpiovh?|`j`GVi_{c3{`^IbcbqSA zJxH8U*h2F3iZnk1P0`nhtvuj0WB{S%hRu!}5yvSJ!MT0aQ71mp;uXN?9CnhRUcGs$ zr04qJk8mAH+p_tCl!CI248*YV3s*njjzw6;4!o`L?R!84T%kGyB19pKiB=xoSJq3iWan9Mppu4aM7*F|o+8Qo0RG zstO0dssI{yeYnJ_`!Syb)KmRM!%Wx+&seq1;R0%1)3#A34*sH2?fUGYY2quS)T^zeu! zb1x#oDnb$WArkf{>&mH&nyJg{{Z#&g$iE1seUZ@h)|ooFFE&zrc?r$)-nMV_sRen_ zS9jY>Bn4kT|%OGio`yRsM_ed=}i=%xMhE){?x zgUO3fsMCo62(m(jxD@q8FKXIqZ1-_mo<4j885_x6W8Ms^yb(`gmGcR**L9Gvo*GZ} zn8QcLF4|pdaaL2HsW+ioVoCSN>!|918}}%x{Y*LDk~o#UGGCLPt#o zH7i~D^aa?4sJT;C8ix+zpssR+j2eeTWHKMS#>@Vw8UAgT&P~%IrZnfd%UMjSr_`T6 zJnC%Wq@zGbEo8H`Y#Y7dqCw(@03lX)#n`9Jb}r}9YtLKif%SFI=Z(F9!$4g>^2iKf>^80bIwaBN?$_=mn~Q* zHUL!PmyinFE2ksq{|#FWJA-|AdXHbCscLIa2mp;w7aVrZvBESF8$A^(NaFZxgyOyj zTsI>!$&y$(+>F0Z>{1(h}MTc158?ar1;0z_kI2iIY!i{ zx>?66NnR&oCOJzahbaw*fecRfyK#ksw6BW@;Z*eka^5a#x2oQcMwhIQ-vHR`51aWe zsiOJr=PtCcY|S68XGx^e0$N)&5G7ATmHnai#Vbtyr!u@W(1v;-I+gt5C5F#=O)|dkJ$L>*&Vw$H@bpiwk3WVk1}z`rh0VoM-iyKaz8}<3>}}Q z9WF?U;jeh|JZKm6rdNEE*LxV_c?N*jH_9scj_27pQu^Iy!q=0uBGz3-Dk^3di{%b- zcABrW0}dCNBN7?H*LPl*J^O>-u`&wK)Kis1}(GbQr1}&L!@r;J4Rn+ti}g z+cWv_p@b+BJ-4uT9?)E`rB*|4HjnXH*c5{~>eA+7Esdj!6!%CN)1%qa-9I^=9s_F{ zO7t6|19a-%0Csr(`MCnN-*K2_h2+FAONE_Bzz@wq7L1EJyGeo8>jq`Fg3OU}9Jo`w zER(D6jLY61fO^dofBbd^qru3rBl#n9F#KUG#HNOvORT&nLD zfub=Y*XBRCxl6ETt<$!5Vf@{pV*{NkJ|&1xh|-7M30m$oT-4s+<3!iifCNXTA6R2as)iU$pXP z!RfljvqCPGHm9}~=RnHHUkWWqoO5$dO@4kIL`ls37{|Jv=I(PC#l;{cto=J!DkXiK zX+84gRZjdFXD&QA#xMY#%Zvn64o|F`8kt1Ey0Wl^h?wVV{gZBH{3_{8MzOm;qQLT4 z*NB%mF4mZiQuzcytv@h{z>IjxBXdcAK4>Kr8$s&>rb5>6V`+iWMuJM-F9+Sw0<}41 z28!fTEw>fCo5BKlqY6;2JY38`3frS{Q_tNYtP`%7v>6w4B2KcZHajM)I^2z*>7hp@ zzXX6@zx3)IG*;;e&bS)jPtFT8SPO-Z>}-|QSeddu-Vlf>axGYLUNqq@)Cv+gYH!#L zRKR5nWP6L6gEr7-MGC$s?|UMF0ewMFtrAAW!`Kq=2k zGW~%$xfwUl=BMdB zSq*l&p|i5P1Y(1LVyI>qUD&B36rQ;CC)BO2Q`>i3xhay#6BGc9$%Zu=yF`c_Ot}!SXWOuh5XzoL zBo^cH9q={I|0}@m7s4oCX>8;ffCKPfTZzbxRCXJ`OxZ5uL94#Ws~v7$UNMK`IeVmd z06*yxl=Bnev?os&S!}wot&@ITL0s<5YxDHI9LU?@FUpf zW-r0Xy)>}pv+I#BRE_TwljW6EV6fvZ7!D>xk&Hb}uvWL?>HB_h#e-6ud(U*_-k=<^@ zqd%|0x!=#zM<_^_0kAxYi&#*5{So{vK-~`(3!ip^E8m*?VfKwtdHNW(=tdOzZP6=F zw87<*MV8sfn$?81BE8cQKpSvU`rAoWxjt0Mt@+!l<+O*a?WbbfV#9%6wJ0k*oO!WA z#_z}Jec1Qm1QITTgUi8?NTy1Z(&f1cb&C$;G4pb4QwDWz*=k~0?R>rM! zhiTD7d)~4$;X7%k>uRz|^1{h0R8&O5BN-63jhq69S3&W9EFWX2urq|ybd=UVJi%j8 zBAO4IqQ9`@svf2%B@6uUfR-wfzUQKmgvOB2z&1gXG^)>eb@_ zi;;?e$Hcv}8%b7V-$NT)?r#kw*AK@d?vY{7@c*!&G1qE0QJ~Keoc{^J>XJub2(sS_ z4jA;|7T58(S`=?xZ4?ME79_Xf9-6fTJq3O`gLl(Z==}-ksVV6Vuf-e}gBR zB{DQJL-;d=$ADW(?0cxUHCi$uN|z;Ca`1GTik$R&y=^^!odDce!H{e2i8TFA%w<6L zv7yOfKW8jg$MhY`hzN?FH}lE;8SB@-(fri|@PmC1Q)RGp2R{32+xafn>km)k9Csb) zwg>7OT#V~0Pt)uRqjANZu-fq8_P4p8d*m2I_!5GW{&SYLap36FmNynOF$uab*ug_V`1xslVsJw%}-=Lb`#P!-D zSnVNu#tu2q%U>QS`@&y8UR~q`9?o|&AUs~ntj^oaHHQg!0Jl5@c4k|%*a$o?rrI>L zDp2}Z%69K!IDFx*ha6zQ9Tw!UWud4U0|e0>II1CcDS~~ zuYUEY?&}wX!uU{FFQ~WNZO{b=fQtj~_(3LBc>q3#8;vHmpkQf?M%ZhXtQrBcjOR*l zDfPIV%tu(?V~KKW+&H%4H6SHR<9QKXpqg7Pv(aH>UajZD#%TmEAK-bRVONQ(EQ;#; zc&6Z?S%&QGecm@S1!pr;>AvVrR&MlB^ai*EW;UR`qVL0YGOatm=_@+jnt&Bc+HDXm z0QK@34F7EkVFBP%weoWTsCQ3uJUA>KAPf|w6yU^VE3|VLc7lpNI5w;Y?2j2iMZ=-d zQhN^y#UDmonfQsWPeLzV;rLv^Q#~-HOEw(|t$xHl??d@Gi0paDTN`zv>~>`I_i{lY zl#tC>sL3Vo6jjdn1i;1svo@y>2@iiApzJx@sY+8QnVhL2iKTjs8+xy)0k=PGl|m#% zSlhggMp(+19+>r@(0GqR#c5)7VSRZu{v+)QE-sE7b1#s~SHQ>4et2=awA6H?dnE0b`7H>2PuT(RK-ieI!=;f77p=yOp_stEm(NnW($Pc(C*xtKZNZk>jB{6eJ3#hsqK?S zBOIy9vK4~9&sliB{Sr6250f=fbr!3Zjf+Cs;d0q$PSItW)vqCIp$-8B3luBd5f#{YY|h^SEVo_5Uk^g~efaKtPL16= zM>ZuvgK*63F=UYOguFD2EWg=p+7i-=&!sZbhYJWDH`P?A_*(a=abjVH}^t_XrNGKh2>htusM+Ri;xlyAnxM zr4D8j0+pxD+TCUB# z(!1&Ena_aJVRUKo5ehBNu49wHTwhyh@-yxM;rO9)MSv`QT4-y|^>$ zC3?>p8T3akFZo^GPbIUzl-TmolR>?mnOoy@9jRPx)EJ0*USXCRzDwe9oXJUy;EC;;KA{Ny~$y` zyW4QrtK)Lf#vAj?tI*ej7_KD+8R`5yONk6PFJ7eDTaB$vGBLCrF}^pCL0N|pGfRxP zBT72p`~rAlR#Ge?hi{`{vaMvm0-q*1n%fDmd_+Xjjz0o4w8pQ3o$p<*cU3Q6l>F9K zZKwJB-tGEFiuTJr6DpH{6MZHoLAt-Ud3dxP)YJa?3)U$bbIw+hB6 zhGjT>Iw#_OwkAF>>5LLEu%V)7Ia@s*w=d<_-Y#11(NY=lKk=_jPKWR>8+w(!JN*mn z={xN#+Pt)6z(5TZL&Om=t%7 z5R_KBD|W!imPWh?;d#k)h`i-1b!`kB;Fu}xbZF^2;*2_duF$NErTLO~gKfZ)2-wW>(SWaQ@l{u+R^&558Pgz9zKjA|Ko| z?nN>!dg2H-x_3;fzMpM(ukfVLDUQk_A`$}}hMB+-;>nswaG|#)k^K`w?ErULL zy);eixyFs@&iVYd8nQ6KSK9IGxcUG3gu zaOHO272KhK=B>rNedcNu4_;dlnbpkA$6!rQPtK{a-HjuD9Bh;0H6Ua)yp7+RZ$87a zsZzgx;k4wYfD%igmh##&PEff3>vyPV*RWvX>G*vAeAXYBLV6FM@t`aA^EE|uY13Em zH2mS;wm0dI$gwc&5G7-m$>h#RZBZl@W0U^WM@e%94r0l=xcE}zp_X9%t}xk41fvsG zUYbYHkc$o;QKuMuxqqyg;ied@rt_fdAdHdA03*&yuAi(O2xf)rH;!j@s^wK~$nTl` zyDq(+v3~7z2~t|TATZ=1@wqZrVoXSPgVUD$Uc^#`ch*Bzyvu%ZJ&s12=5Dr96DeZ& zQ&HakgR|Hq<$?d}|AVQwjEgGT|8Qq$X{03uB&0*SL^=hO?(UXOrMsj zhX(1+yEy0k@4at$!|-A6wO8#m-{<+2ydefzG{=h%TAm*(zzqIuq+KVNaI}?lt3f4@ zr7SJ);}gYNj2SvTHzD4EXkaXkH>ECZZEal95&WuEUpcOUpyX$gkKd1D4mTES`$DlJ zh4fi@>$O=p{{a@`LsKox%kvq{<@^^S8yie9f(e+1OGA1aRFv%oX2_*EZ}mbJ7N&ZF z$7B#hJ@A>{kP%1}#uPJXs4xdTmzjQ{qyzm5z|tS^aKrK0Hv$VG8kP+Tz3w<3=439`yrr_f^&Ei!Fq?&S%+x<-g zuaC*_>762&QRc)&3$@C@Op7L+e?i!`UMEk5hsS3A3dd6Ar7BHLm9k!lgtE}N_%O6e zBgL`A4WFqdm9JaUh4(C(-}4Tr`>SUT{%vm-}38n2<|B?z#A&S&~|TS(D8|H~EEFRNN4nkfsOw9f^vKwK4UdAFRn zsVKvO(Xe?OrqHq9Am>*p`!JCU^+%_^5OQoxRn6u0@;sunPR@XbV}qH(BsZ$&COXtg zRL5601~h3tD$wQM#7xRy7gMU+KJVN!wUz!Pn&0w1T{_lT6k=O4S5Ro+sN>CN`zn&3 zQgWPrL=rX0g_M~Dr;w#1y)X57V0(l=WY?1^`SXTrjd^AoJQZJ~6F>U*B3wBJS+MFt zCNQ45DgiW3EnC*~pB6#RY@_POzfqOVB?rE;T7m?3@F1z|$x_ zKGP)h7fPg)f8Tm+yENz|j_c;5X!1|XbwXtxel?0GC5Mt{58Tkk%3IUx1G`Ow{LM@_ z8}6^L*=vTE1wX-NdbRR*d&+N5%x@J);)mpLvTrEoq}%%?aXh&}?4K;Q(;C5IghDCG z-@;LppMTa}L4F5*b`~9J>Jlf;sYeyV-Jh5>l@^DIAjz7gXTHux=d}U?cz-C5n5X$n z`TY=)Qr#c3@-&t0t$l@Hd=lqnP+sb8jZE6a@lVdR)aj)YlU2N18*f@9#7Dq1SwS5pvb{rmEK+E8xrhr0cF(iiEm1P%W z>rZ4aD)n0R9v&-N%LVt~IwOp}s=?ytX}A1m|JkU07=l{bm|#UrrrI%g%k4fu+u9;V zL5P2sS&S$CvuQ1guF^yOG``Cj0Swa&=Xa8BB3K{t9pn4l8JOg`H~rm}J!xSQcF&;( z{7v@RP87Ctex0f4TU!XTj1Y` z+_8LcYfc3u6W7<>xhbZm`&!?3j=+IadsydB*}?RlVO0kF<3+c*OIF?2kA4wBwnxSJ z|Ji;B6r;6Er^qu6yk#bNt0k!&{R}q+(_Bj-=D#Z_alYoWH!z7}?E5pzmCi8ts(Mmt zscJ#H1`_>u@DXZO_~sbUbe)x!f(MSAlX)mhGW zs7$8N8}Ld&Ol5@?8&GHuUgx9{cO+FR5#H)5G9${BQGf^~4LnZ*IJ2sU%$qBHbum~H z)Fe;@{e}qH)es0 zi#BFFQ#+iy{wCr6y#>lvnu@NK} z0URgVMkW>bhvfj#$Xdo*j^VFVl&6~vgt>A^L22-CooFj6QQVdj)CHP^lr3-}Y4GvC z58SCzBMBScECgJ=rcFiFPz48~#9Z%ipu}LJk9#Gknn&EWa*$P?2-2iq$qfu@imdL- z*8M|se~(d{H)$}*Plv**sCC~+_qiW?&WWMPCZLSJ=6C9r(1@)v?Pvx&nVxhwApRVx^P=GF z^8KWLzT`tKLE->7IXKs|2SIm9Q83jzfmCX$YUMg9HZu-(ji&G`z6qn}R?34Jq-s(x zlQA=kAKpC$ThBeWGS@HPy-dpaCK1xCS9n@1eMwM&(tCCMpB3`SaIu9x{=BK!%UxPJ zXxoXWg;_f#KwquUHg1FB9_+l{*#TtF=-!zP^wB}WNllyF!>z) z0u*UqVP~sJ?)R#kdXsAH@n+Jv;3&_K{{sLa%F>BS=O)?}+vA-@Ys-Hd)A$%yMkUKL zXgGOVa8wm=eT^h6z^|mqN6E>6r`m8~EU=T-aYUR5nJf_0_l_GvEs=N+xp*{q--02F zxcwZLmhE21QS9N-iP1m?kB}r?e00|zM4{QiAr;4%6Pw)Jk(u@WSLU*SpvyU5g-l-})G)b)HJok?eJP&$O z$c@K7cOXKXr@U!HqoO|&$?*nexhC%F{4ovjqm!sS?IscNCs7V!RyFJ2Z{sX2XB~K3 zCW`_;1ZG*{ayIXfI7w`e-^%t9vwDV`JiQw#EWb~9YS3NL#7IJB@OvC=(M9(-Qfy!Z z0eh*;p>)k~lR_E)KVoY65B-eCm*P0?2Yf##s&$q;scOtQx0t8gaU@*r!Q2X28`CWK zAp438srv9~4AM)mLwyF0;8OFDKv(_J0JZ#SExHa)M)7caOFJZ~f$^d~Cm;&r4YJ9S zpjv1azaqD7coCnO(Lb#uu#NBYJ>c6+$v?>Z8ooG8d%IqZ!=XS`zsOV~%1jdQ-!~+5 zSw@3b)dtW3G6i5M2{Nktd$#YoVoU-5aFTo#by$!*d&sktqUn3%;tEr0KDJ%#*V{;f zB*lVJNRmC$qt+fL*A6%P#pF9FcEe}=5b9Hqvrh{UB_=bHuu8>n@YUV@^ElOb>A8DtH}X7_?4|bI@)uO9g=Ct_X#!*Jc~M zmv1lna{(s93J1H+m-7Ym*z+qL4u=GLh|}|zq-LJUy^V)(oMzwmT06l@xQqY;>T^^m zYilX^-#=Fd3l;is^~O=%oY2~7#25`TN&eHNI*pF+ITGvlIk@FgEd;@)!A?wziW1_m z?9r938^!G!qoP_x9#ik{uV6N6r{RpXeH9w*k;$FH`mI^I zes$d6nY$RnQ>iK6v4^pZeylv>?~v?n9^>k7mu^={08*qaTlQL4_x=gm_wv2fzPOxFs^PvLd9+Aq zJyZFaExl%?gN#*usaB?voXO36YL?JB`ky^Sr77CccHVAw!y2lYf!fEeWv^>fvom`9 zilS-@955QsOWI<5JH>wLKU811>iN9k%z8e-@Y8N6qw&C@lB6}`*l8Ef>*Md85@OZ> zNMq`YsQ;X1f(Z%0O*Te_0Ibp)d3j%6WM%@~Q+X2GU;g=v=gBKF_~}?)a6LDCN2{*Ymep1q$U!d)vwcFQs*(m~qmh_>S#c$@~t#V9olh?v{Mrc)rTJ!kKMYqIVAn z#a}I5D)gL-A~Y<`i4xTMzI+c$Pp4DEXH33p&sK_yYRN)~_yV%!Lgs_Pi&w>Kb6xiP09Ggoc_zlIMyxc~@fmOloNKANd^~@8``GCEm7~7I;yj zoF1(yVZk=1m}fn@Bkpf2y0%0-(R10zb^19paw*$djh{mPuyFGnGi{zW9W%&LOn0l`tK^MkvO^+=XqC!MDg#hb9?Tg8^O8E<|< zgro$3n?^^Cg%n9X34CFCm>i}?2u%qX#?UroKriZp><1i1e0qoX_vmmoz@Qn$;F(~f zuz&JiqL3OFDngLn)lUlG?V<7xB`YejLbD0NnHzx@m8!&V`B#Sx_qL4__pSK&jc$Bc z7bHQ`pC0l+CV+j82-OK*f}c9Fvrw~1ZVRGBO?nV!lOSyOYrw(IgyGEufNd3A3%g4LmACKtql>DUhQr)DpOc?Mff$4 zMiiwR2zVkG^`|JW-7cFecIdiQnF_RCbwyI$Z?P}^g2(SLgIzDz8NVDkpx#@$LOEzE z$H-;Vxwsm?j;W4pyG0IZZ+zL3{_@aeCGis!B0L0*TX$MO8=d1LUfla}6V>=ZXp?ji za*|N~b>j@hT`*zAqX?ChuA)P9`G>_br_BR~qdH#vPx^LD!Rbbjq#W}ZU-vSyYNs)J zYzauNK}3v~;WKY3rH;lNMKwUA@T%VV;s_2!Pc*uTG+|!DI*t5U=i!$xIzW?mzUY3r z8*$kfzm)C{ZTVNK>uo@OS0o5TNI&R}e|Dbhp&$fA%OWi713;x@-kw0t$uIp1e|ydk z9Y7^d?F`wZgm|3@%~#{;BEMK!e9y`_vw(&&jkkz}U?`%ClwI*W(_X#Zh+txUegUe= zLZVb`%-%{4A%M-6+`jkazh0_pAg|zqZqn8uO!2oHka!RONh%^p9=$r;SHzB1~8NR2MqlJ3DY76 zEqQ5&)SYucgS^)CQbRZDMrkPsvBe%%HO*?U?MK|cThS3$>{%Am z;5;X)Z$f zm}S{WlMli>Js)~wfV@<0-R`9C(V9_wy}a$dMfn2jzM@n|VNa$uM8Vy5k3X_?s|W(A zuz%(+0-K~>7VC7PRRHWkJLQA!EthcZRQU1b9iHu&5hQ)=F1=?gMf;UkDYrhd;zd); zmhYJa6F*gMi5s84NbYK!J*ADM!fg{5?#*el$lC|&3d1oRoVZ|-oa9S@w2Tap!`|=V z1O_@a{XCz0I9+u>vGwzN5ykqbt+x3=ojTf!|7d<%_nyPTk1W&PP861v-QT}2l6<4-=N3c@=T00Yazz|YO3~Z@>Grex>U!%@-0x@iGsTQr2nJ{`e6Nw`TD;DW z8X;;c-|eyd`D9?9t-XlImazJwBa%gCkqn+$Q42sif6*kbGe7V_9)g;&?ulTw#~a@+ z9?nR%4}N1jG+{vo1D^xA)S%!ItKjmTkKi%P9;QTrv37Mkv$~qApZhS}S`bD2Dm+|+ zx$og#KD$m*1Sq8}>i1Zfu>9azTdPq|^RnBq%tl1LE_>K*J7#c7AHw&K3r<_xku!^GKfcq?3bDwTR9IbXyAknNTj!7C^-b@9<(mkW3@5aST6Zv zxh3X4B)GNXIz!zl%zYotxgJh!H5Fw32`GMJ zw&=3fUjB3C>K4MMT9GLqFE*mM=nROff| z{HIUliy1vS2od}%@QA+O9a5u6=ygS03Sun$Rd8SXvc)cBFO+uZLF5|6Z^KcA(p|30 z6r{-l_kB9J>y2NJdb(fa;-xooNYPd0#utTp$GgM#ep}gZmZd^#kQ+ieJ-#$oKcB8L zc2rnmFz^MPIg+~VF%Ih(QSq3I~4Xo{V7pr{cd>OwXvUsveOI+w;MLz zC@;s#>3@}2itsuU`Pn?O4jT7BNRiQcoN)i-zEO~jKSjD*xzcFO2r{2YQXb29FTKf{ z@Mj3#V)^xUg*SzF)3DPZUPzIq|0V_&M3YLC7*o|?NW!biwBs!w`m%7m{EMyBxM@9+ z(RfpIBR%XGyU!fMtiKka$E6l6;do;v(Z`)rAuw>1iqeE8CCq@dR z(0C62adJ;=JlqIt>+NoAww}dwbslTXZ=Jo78o1dQf_1OCAzwlv+#!z{G|AzhvDFm> zkQfQO?T;N@n!TuV*$h7|{jRl64a*=UxF6m1rpfAg<596D?}Ff~>W;|2+(7)Z$&4v& zI>+5x#;*1sp7@bP^Vg|$<6IR@<-+Q6$n;kccKX90wFwBETfPI#3=;S%! z!X9Ft!;~FMEx^4b{1Y+bRp|1+C87vqCj?G&*@8ma#My3UU(5?McF&?vIQTr!X6Y?;g0zV-q zRp<}Wt>vkG-`mM!x`dmBexNe@XQ3@rk4J{o)83AZjh%Q{x0ppF@9LM9G0=H(BKTQA zp?rpp5|6EYe>cgea8@1%FO7fPd57C=*p){2SG_e z;;diYkeo*?8Awt}$;D$%U)C88)#5sw2;bG*s@-+F3U!8(G4nJPH)3?o1^l1sEl*`p z%7kXCC4}uxyxbIV&<3OO=67YKDF_*S`(t~Jkp zw3&0Rp33o$v6=so@jCQG5nX;UH>d_5 z492cqr#3l|jgMQ;*qn1n9b@I$_o{eej?A22lMj5R;w^*YCsW){F@pDNL;b}WSlJa6 zJD2R5@e9i~XrTd*FQY-9wnzxcUtLg=X508a)py z9`_6}HhE8O*Z#n|6#!dey}N3O&S?Z~rugyezgy0Ar97 z>Ada`Ib|s2yC+>*bmDw#=vNU->0Lo7Q`mQd z^^Z9dB@%RE6@tIja^B-6o zZeGrk_TLkHqGqK_bj?J~6h=Y4Y>hhq=wQUBb0M9UF@a-z=`+9UC9$~S&4-dH$B!r-F;!6z>2Fl&cIMH>tmm}bK2~`5$%QcQPp{A z8yUW{umnKZyBz*21Za)mkBn0gB9^-qG5hLG+smd7iCo*bp zX*MXA0oER{m+bs&(^%6HcNbN?{iBIQXg0*gs`DmIHwEW=X9}R%P8AN5C9-uKorTS= zJx4z@0SwMN@aowL_nWu>krh^cb07w&%C%1qbA~tW?;^b~{eQ6`Bxxt>8=H-0FFEET za+3;X|C0F|?@oo*YF7;|24;QMv|!0vVuk;vZ`T*7-0Mg{S)_DOZCO$^IEGN)ozutC zOKv?D`jM?+u-95M9IMbVWs{++`i|CxU>IJ#vw+NeuccW-jStCV&06xqSr=q!SufuSX0t z#QbgxnR}`;Xag5jM@^i2`6N zv(;Jug=Z)u!>Nd0_PLpjPR7p60kvDfwvBD1G{0(AB)Z54#X3_8=cigO!!HtVU*Vrz zSL_o!WC@$35;8M?xf(to_`*r*w-Q?`wxBAn#|d~~35S-oeee~$Qh4VVgLfUao#FXr zGsc@FZ})e*fL9!64n{i3$;U%#DiNG9V!29=B21y-ISZ$g`z?`>R)jy3w0MQvl{9q2 zDKVzyPh?(RpI)p>^Ur<3&?htG2v!KQ1mT!n2WY`sd?q-3QUwat%`g;sOG%kTeK<`H=-Wj22*qa|dFty0TyHvf)UIQ0%Nlm0}{&=mFkQ=h%D{kG~2mO=yS z?|4qKoGKyugMzzJOLwQ0A}hk?l*AT_%l^>*_DZyLb9zQn4KCR8~MXVEY6;v zo2SzC<7VK>x~+xNz`$Ugg1CW^SNu|pg%jlq%x}f-S9w>%h*akSMQ|*i}w|5TwGj!Fov1F)^$Y4#-73ZCTFU%3h_;$shs8%xlf7yRP2U zYP7=XuGxKMUrbDH(8}s8i$RLVrU-m!PKEmj%S{OBjWsjDXsPq_(=YFRokXj9ujV?~ zY}Kyk?7BTh(jQ3oFIph-n%QR$yH%$j%x#bwF>ae!A)ItFsrKbvvl6-ty6lkTYgm8Uxhv~>M0?^6@n4W3YROY)G)9)u}yZq~^+-PwsrMDDo(0eKjVK9d3(8rntP_*` z6k;jwBtFXz!E*W7mnzE=X16!L+*6T>(bX&_&MrJ>q$d-w> zzWf_puG)~I@l(Q8-YdxCs&BD*dqbz*@ycv+$&vG?N4C^NpGWAmH_zd-A1%!AWrbCFi14(;q=DZw}O7L%}7%T_ddrvmu6LdW16tiHF3* zsR2q$!lZKH0?Jv*-p$;Hvk|IOmxhBOmw)v7x#?re9gpH#4dh_&>ODHy2R)ApYuTsf zeOQ~$F(Z|TNk}MubpIS4IF52WF|K0tCew<+>{!QZAp3>c@i$kk%TeFY6MwLw<>6Gv z_dbgE&Qf6T0@5$HZkpEELp{3PIV+snHRFq}eZbsv5-%yyTv2w9@K-{YekURIYS*F3 z&6}vDz(KeriO=FYOHVz?Wn0?v=Z%@=(D@OE-VMaWWQJ{@L*1jvhTB`a>vNu57@E*5 zY@jDua$vySfk9C{*?5iaXUF>4`Q^fcfRl2{Lc|G`$A6gA=G@{(VcNd?bvv8}!4Re1 zTE~H^AzrASC&vVv^1!odOpBEc8)V)CT-uH!Z0VLbPIyleOG=5ObG2m~AlZwG4TgVDbJ ze{~_6&==00KLLw$wj?+xo|Lrmc1J76SI0e-uJiaSZ-8-`dw!+K6~o)_>J6#X?@^0# z@xC|7u;XMAxR_;V3@b8X*ZGmQkmWKAd`Q;s!QMT3Ze@fbV%%E&-TMD1$O!UmvSuA@ zsCISf>HHgq`$Ic-#W{o5j8Z#wCGE$82+E-Yp3W;w&uGXl3#|15`&5DsSOh?U3e%0I zx170RrTPal3zFW2E<^ybTtSn&G0lJ27167}#T15kW&af+BUwH6BaeG8f2@)94PTKR z6}V6_NtaS_gST`^N;~kPjZuNYG^cGnq*$r21wu4or6wq8pZJAmkS;Su#G8-`3N7D3 z{fDaQpC}w^^yUtl;M6L?>z=2h;`ex;M!4Fr7%BbU-&*!}Mhsd9>{q+(@t-uO%-k{Z ztGmqyZrW%x`;;f}adZl`=1n3OOStbHfnXgsN*DePtSxA<={ee!pa@hb67Hyl`7<|%+IPASY5qO}gC*9XJ0{kC@E=u?^$ z8kU8tU+Cl$Wd|r2?_2u~)vHGyZAMb}03mOsaZD zvi$xHN7-8RB9&uTyYL6Na1{#vbdiwUy34A!F9GhnL%j>$46%PXx}%=* zgyt81CEf8MBnScu@0{~qxE>e~_yQBLyAV9*bvUX)VaDt=e*g8%{3xYmUi zZ&pud$n1f96{9d_^cKw4AEo{9Geb`T?ZB~M@Ur_WNSp61{C%)zZYfb!Kn+rYNsJ4H z#L{3NN(6VJ;8|>w&LG?{Y1n{=B9+6f6~rI*JLVO$UkZ~soO$^9z|?sbq$eyi+^oy^ zEgMuY0q14z(9rJpzrX&{dXc`}Ze)AL0rf&?CP+BBM)7{!DmYzrW)6MZpldXUkP>go zR*kK!e9ar0+MfFSrfiF57tSVZG}I#QD=yXk%Qo#lmNN`RqXFM64wL0x5Vn*0CCuiJ_Be&I*W^`mBN0^L+v!t>29 zoFQ!saMiFln8#p~@iXxwUYCFW?rqUC$+5Jr9Tk2%2#Z97Im0XBNwA;xga%ZakB|>$ z3J^BB9d>8%IS;!!4_kwRH^ZBmDB|#;wY6>Wl|Bs}`)zmC*P5xHh^0w{uN_%0DBKLU zST2MfAH0NpkI!Y?VZF2(wz>P4p;QJ{p@6R7^}dOxg5?4E_w`^{jJdhPS{fuC+znuL&@zN`@=h2?@ zXNzv6U(>CxU!$E)CB)6UmSp+yV?gO(vVD3$#$WoD)2mhARQshWhz9{hmLIeFG4H}O zKJ4)0F>H`A6RRey=g@aHSjn%BMxngrZJPobA*{!5IR=qYAfbGsG*yr#bKy(RLt3(H z>DB|+bHCdzq9#{&x%qlt=isu&$9_f|SfzM_lwB(VcccH|!*T4s27>?N9r9JLlz-s_ z+>1)w5X_US$+YXM^HVR!zR%hD`Ny>olDXpI!J} zwij>j)UKE-`Nh&)PPV#xui@HcV_PQt5pQU{cjD5!-a-*5?XQ^C?AJzAd<` zZ++b5OW|!M)V~`W^KD>Vtc4iMdL74X=~i<*9u6cp?xx^^YgzJ5HO(Vf*@|&psacMn zx!p5oZtpp;>nzJQyftpRh+)5)a+9Pc-82ztJ^x4LeR?2ww3byyPaoZ8=(-Yu#RSSq z`15e{o0^i}4kPT(rNy+CE=(<#|H@8@S$@zGbxzk*+pH@JffdEbcL%H5=o1t#6ELqk z_PJv{+|_EU9T=Yy!ge3|q6El=zfLY+{vCpiE>SKm7%L@s`86V9EwUl|={^c~%byfQ zm4xFcd!N{Gw%jh7G%VlO<5h?VkUskQc|E!nMoaCGAC3DVpzF6CjHoMUMmM&2Kj> z_|6-=G5pWlhX8l?zh%cdJrGBZHF%rs?LUW~)?jcFG#m{M_+uL{pW2`S_OH_Hvs26K z9%xsW&Lgd3nC;ak6e3Pi4NY(kuQ(rA{!6{(UJNBgh5re8rRn1%6=h2!^;Jk5*a-M@ zw{9%kC|GoW#MCEqsIS70$jDfD_PA_=6sym~2oG^3C(6wJ0{GP)50M2PMq6`dnZn9L zoh{7lpszkNTZavzMVb$%kfUl`hq;829vh^U*Y6V@(*q3c0CHRT@TfiLZ!qvN(W4Kk zo@{5P-v}!17FMdGyd3-)?X}ric1&OgG;q+Px@X=5=|txQCZ=_wGT+HzDSrm1O4~mm zBcIZyr!;w@@zds|;)kkMd+j#@FvT^Q8~6--A@c8->S}Pgy{p?em}utRBacaPTi>(% z9N9}8g3)r+6NLFx2UO4K$-)){u;E5XNh zO6+O=f-JewSXFH#{`fd#CR*8mXZm0TFNjHeuQKMPhd@gZ)zKgB2=Px7GtETyFBiL5 zD#%O9{T_O6#|{rIQB~gbK#olwFOMRie0>tm5#pTMqQ`AzI&N;mO=;%L*#8X2Kz2u? zw3I0QjJOCFrE|a9X%R3P19Z%yL4&-^^xSm+f?4|Rh2RpCf9yGE5(`{}X_3$9{{qq$ zQF;`eRvS1qHTn062?D=P&o$*UbbH^>gfekj6D;6g5)G)mRlwl1;84~f&fw-t?y@uIFcl4>3gh&!LNdP?L}BiZ{()R*j15 z9D(0nw$+MPzG-tNY5WfOud7Y@`DUQAA|A>b@0~~T)H7qL^rgmW^{SE#tfCYHvOd)i{jxybugrq;e&-9Ul9j?zR`{l^CW6e(e0_k6Gu=GK%v_IhWKzL&6Hljf3UmY!ZX5>ECBH}SJ6oTWSA*M0ln$)C#xa9OC`uToM`9b+1A?0zyIvIp?g$MVxHV_CR=>Q1S zUb6@1ZF*iAXDHKC`)44sG~>N{l*c}O^V$}w&LLd8h(3Yy>|elnsVL? zZpi*IG3rtAM3KY#^rWpMgA$kGmC6UF#A$BjNVmhS15mG9I^ww6ll239}`vA)T zlk>|er&kgFHcRNWb(Y!U+Xspi)aD(~xCukbQ=%l!wmRM`C!AR4F(~^B5R`mT0CeU! zz;-M^39vJ6`*{SL#r`Zggg9mp!4Iuz`!oZUih;|%qiNhpPBcl1a~6ezUgViHIb>7= zduq9Cw;WU99i^Hl3P|IOyJ2u44giI|9I6_KiGxfR0AArVZX~TRLb+OMiCAZJdv08& zUUSz}bU|8UuSP>=>$R%j92zx6OvH9)hKJWL1q7z{q;yy|Uc`zT9074EU`4jW^^o<; zk;HFU3P#|+;9lcN5LCq)SLUTC(m5k-94Mu_@QR4&hDgNiRDB4o0Q;-9@5*gyZ~RA) zb-ZlNh(8lC{r673s>4kbRxP)ly|U$S!jx8D4u20i3`h~St-N6U@-fhk2F}PNm|L|G zxO^ug3G?1i!UJ5lJ4Rfa#0xM|kwj1s4!XUM&`-2q4jMdKwSj{|T8fF*3WqpFbAgmLEXN2 z+EVEflo5DPnDF*~w*)3vhbzwg&D8zb8j}GBRKOKdS7#$Z#*9Z;qxqxLNYaw_jViPR z5DdUOe*h)I+*4Y9|61%RrcZ<4A&qnMUR>di;q?P4NWYHZmF7}$KRmazG_25yB!pZ| zWT%|wnBoMX#r04ile>#%W5R<@*Kj6ha0KkolA;`Zd&P?JY0i`q6lc55eTh09vBksU zmcT@xt@DU3@#0lmx$%z{v+>o>aGMS^St0oy9YB!t>SX0h>BT!O(jdxeLoj-$XnF|pE^_QlV2e{^QS>=wL3QCb|D zypM_@1)-@+^o3(6(zL`v!vXmv#pk49j}zl834I!5$!8*b!MP}8vDwn2xgDZTG&Kc5 znG;>r4D^lDpK1u;4?v-`@@6E@3?x{_GBWvkq=F;`@nqWsXiJ< zJ}Aq1{UsH&{F@A$e}Uv-=t~?;fK+`2llZ*^24f$TSPQ^6w>olo6B|Zo@AFwON&vF( z{ctNUNwS!W;1$`pK-f1oI+r>;rl$gLbU2gIoOtPBaQ&E}X_$>VJ0pQf1hTDE!&py4 z{uCh^zQ5mpbMwjz-ci(B_(D|P#{zyARDOt^vVNMH;yK-et*HOB(~I%^#>p8f%=%m~ z1C28q=!5cI(lbYG?ZD4Z&o2 zr=gpnCaFM6)*Y-g-%6zk;UjS9RlnV(5%UsQ76#3QKSoh z;|LAE<&GE~aRvYKCO8R?spM-}Kh02<>!+%p3Gf`X=oFY4e7KK|4Sdeoa*`H5$V>2Z z;J;8n*|mBArSU{)7IR1ffimd0h%fW4DQA}B?TU6QDJ$te6~3xcrKZH|q8TRqPrHX! zSG&MoCBWyZfo4JpUBY)kVnjq^OA^UqKx8BVfcYhqI2@0Oa?X9M(GOx6x!R$*uJU_{ znR(uYNP5ONJEr+{Sv>bFc)&ckWRX;7&Y(2rkRz_dHB5$|=~p86NX=uxZl#MY=gv{3 zVd7_rjO_p3JJ?W2(=2{xdjSRYMkdjBfF&I+($ zuMp9sT}96hJ}VL#UpZX727sx!Xrf9b2XQf2b&we0a@|-dL?3BBgQgPdi8V z`AUle@eop*FckF_p%CI{QXoh8Vq@+C#PGw&|GoDUCo_65qyjdMEdWe5K%wp=${S7G z=Z^^Lr4MLY;W)Oidvc|F*xN*goSqZ0N=O`EPLX1Z z&bcd>K&!M;RcS)v&M?3L%R?-6D)HS>XjgGA%rE$|F}=#86Z7a%Pb`H&nRgU-b_JsOy`y z&P>Iv#$%kE&izm?QmoQEyl{0To9yi~tN{7^9X}`f9S5dv-$)jk95>>nrwO4N$Ixh9 z)lRIfSD4c3lTO=z*B|L&YisMj(Bkj!lV65@Sf}l{+@Jf>;Jk%7$Ibv0`NPzbQAQO- zicmVG>gAqD<>bCX_*^q-VB~mFiV`|DrwTw}(FgBIZU-(cc%!OH*M5L~p%eN?Wp0|8 zq0CH~lFN0ju$iIn+4mWc3aH(m(&A1`O>)UHz~y#=2}t^oQGtqnN&^FcQuF!#83_qj zK298x7f|Kjm(Xr1O3aYsAR3xXA&>Z|qH2zFW1P+gxTi`2NNwk@+CNANh+1=8Da7;y|Qkr%G?wyPgdZB z{Wncz3<$3>BpPg@-&8J^`x%0Lz822|c=f-5;y}D3Dcl&m?y&`qkt9WTeE+T0t>cW| z#!+i40AXFmgsfy@UfrYNyzzk6Pndu#^7OW#d-P>nMQ=k76DJm~MtOiSgLG;tZAT;-PA=tz zpRo6LqPADt#q)5^dNezy5KeK=JRxjE^(NgSI?BJQD2Kt9_zR|0G`|?NKm@kV5bY`v zmOso-ci!HIgAzjfr*U<$))?`NXy_hNEUQ;0>1M$;|19_`)%$9_Lotm^L&&ZKWqg&2 zE(WOxrhMj)8%Lwq@1+P2$5@;GDMS6hy)iRqsTDZ1mJWzVw^X!a=LH_GBxYKj!-pwvuK<`MTL zZ6D$nK?6)ksCq8lqV;`p+a7Wg3Fg^=hY%X%=tqJ$B*On|5=Y2I8x+uI=OeiP>x^0K z#z>$Bwhf*28)TaQrE6mi5Rag{evepO!soJ&j|-m59eWC_EB}7SX>gh;fEc8M!pOIp zd!iTCEmN$Vn?YO0zQzb30cpDwN)n=fa+Rxbj zCm1XL?VJ(iiskFqxeu-QZ7TbgUKD6QoNr7!qj|>eH$f552amMDoN230Hp~j!d%EuB zQ&otl+o3F^RjMPoHQc{}8gz|O64aF-*vMLkw!%Hf!!+y5$wt|qx+IEra1IO?b%%fY zt)%=K^I?;I*VZU03kfFEQ)*C+urHZj$I63>yF7lE7@^{=F*iygC|S4(>z>|kn;cH` z>s#z{_diwNOy_Kf*ylO!RopCi)UJJ;M4CT#)m3VZd;$bf`fEn2=k`)L@kM0OsW{Ey zL~d}74jS8lI1G&%(l;|Vef>Obc{ZwBX!-E*u?nYRHMD}*e&)PCI!bzAW^7MCmOJ>J zht1zdgG)Cyvp%W#v$4HvHvjCkb@LVo9mRICqkMXB8HNCasWUpMEX8E_F!|WM9PWrS z!*d(0clBO|1A-kbdAz z@{$INf2M`+9jk@+?oci4>YbtBr!#vEZ618=iJ$S4NYeF;WCPQ@-kIDdx%w@%XFZas%QM(EsHzY2c&3M4_GY zvC4M?<$I|rEo>^!9twF-IR)bMSQ=wV1m&>|nx~SmAd%@}Mbaj9JloO(a-DoE9bMp{ z%<+f;(iTzos2I=g$Sy9CP%8iNp1?n5HRj)AkjFleKdkMGLUFga>i8B^6dUmj!4s-l z9T^t!l6H|wHTpRD-VYhlq;jd!-%!c0_T54;;d-K_6$;U_zf#g_)%>vT>DjTuWZ1Q5 zvh^7yJUZW6dtwo~w5b#y*I@!jzytwBO$xddvdaatLfgTOU)&6Gg6$P3s=kEW3cOKL zUi|F~lPVci0>B-s^i4MbE>xqry`|FCg60~moO$o`Y%hl1FsTga}d$}3zq~{npZ$&=tK{)wl zVSL6OXDpt^9OPZ47GAVzi@ly<2>piLs<`QBT-5+0lD`<_M~6fZKmm^@_ZZ(z@HS%) zEHe=ZO@>#Vc&`-sB|XtwTnce&PROh}-8kPdT&c#lNg{eY~V_rR*#+-T%!U zuKH7WG(c->KdqGQVx&ya8P`NqN=ay@92WSd~~KN$`dDFoU^iB;a_-hpSi1N(4h^@H2kf)+3urbrwX8d@+N!yd zMv%O$5s@iu&g?rwnxD8CGTH*yv{FMLwhk}jq&Oi~U;26N(`%O-N)0lZnWV=Yf5BfO z3X;)Z6&6 z$%%7EfHoC@qbf6FV^J~;_=thmdK z*J0c<1~&=D(30u35V`+(rZLzS2_Wju73`zec(j<$*Z;59ZrZRbXDFIKh4-@+KFVoS zQmc;-8NEv4Gn3YpgT60ZZ&A25n2@uxW0)~~nTVI8bj*dFw33V#NgCgCnF{hc+;b#2 zQ)3&vRb6sQIXT7oUxw}{FVfPJ znPkaXobw|s50y*W$?7nn!X3XEn%4?7l~4daUUa^o3*UxMQ;n4_YY6rG5)Q9*iir92 z>OBb4{vKO58W0%_}&(WtRN=~~J_^C~WuD@D9F$<75AqwWE2e_JQffyCd__2IuQ8>W^A?wC?I zUYI7Jy%@d5S4idV`^^s;yk#7?KJ|S5ly9jnBWJ@aZ(GBj62D5eoE!s6CEzj2@`^Et z7l`u1DoA;b$Fo+E#C|{%Ui|+k!t<_B>Hhs zzd8x@YC9lZ+!5Y$t=#2Olw*9+jb)p}uYZ;S3FD|Z;(to-seXE}&A zJI3`djZT|qN!8eAoyZEleLvPm>~B$NLAz8LVXRs2Ap-_4-?`)sa@ULm;jMp;vBjR` zi!vNc@i-a#3r@anH;Y|xx_S9W3q-cjIPD^`V9naqxo#Q{qo>N(GnM?5D$>|wW-ypP zSW#O|xz*5rIrM+!`-w^?;aLI!8JTWZ1Ibr4;3Y1?7;F3-p1otoEgyXUw=~xFVr(Sj zY|QqjT$T#*c)!Add?R01a z58j-RC){U5@t5hKF2Hmpq?D{roh};N)4VF7b8ZK3X4~w@DPA_18_fox@V-7+!c#sQ zzwUZ@JT)5(uA>W z9oD@3YbgumHG6mKhOomy*zwgc-p?=f?9*w~B9n@}2pOy7!XF@nQ!pL3jrb{dMN304Hu zV(tBzaesG9$zH{4oYbzX; zI+E%w&@I1u^*weGIJQr#6^(P8j+yqwpVo`#GF-K5dBGhlKIDEFP&MiIV(mb&Xf$j6 z$ziuT@Be%-M~q29(KLj*TnY_3oCP`+xy}iT?u1Cxmm78cedX(4N5~c6b+Kus7)bTj zP0TWi!Q~i8W|+*n`JKB)tHkN|&~N_1>eR#x`D)(+`5#w-8XjrYYkZ_%1_cDaOsCIv&5-@Z`ccd=cc&byIS1$`H5(ehLGg%@ zuXC{8#%l*Qs0kg5BOJ+v&^=MwVZ>HWt$S1B$ix&Atp+M0i z9wlnLm#aq@k{fM`7~WNTeCGD0qzS>P67^7BOr&IG{ye?II;~zK*hwdM!{V+mS`amJ z6*M!79qH;~!sfT9e7Rolx1lZptCx!wI_6wHOG4;2{EIS>4_%%}zFy;%4+y#V1_EAB z%K@pAaaF8a6W+SvfE|C!CY<_3*XQkgNuIBA7;kPDoW4H;ecZ=-o!`Iju)Su0w4!^# z6HZPhy;t{h3kwN&C1EgbQ*j#Wir*Il=JNvUo9EXH!B_I{rOxCzw{^i7O59385UTFf zV2ZlY21*Mx*2ILjBch>Zk)L)lNVawFWrW9tDg3@8711`-yY)`jdry-iaTyss&?6x& z$kv7zahEP{QMjVDU$T1)kMcz?IpWn0E76L-D0!sa*4u-b&_P9@xXw%owoqH~~Qp#5^Sl(cT6Qk6<}549>n zYI*6hv!(W(%%42)PdL&d7_O+@xVN_{aQp%byf|aWSAYzwa!R z;R^8~l`zr7Z!De3{?4iLzTaK?2Z~3ZrO#|`>|2?Zg$U6w4g{lo*5JZtW@fl}^l2gu z4mr(&&Fb2!D_0;963){fM6*yZ?rKMtUnWLqCOfi;-LMVBY8$Lr_hB)c8U}Pa**75R z&Cd8(EzHl}hV`l%nn>g|$836mz-ZB`TylCaZ46JDKUu<4Z} zq1Y(g3M08<&3Oz;v3@W&?{qmu()&!q9A}2S*wF9Z7IC1GTJ6zDH#rA=NHLi#=aQ%! zG8J=tmDkUGIqf@2DFRkN*Ur+`mC;=AfBsoR@HI2@W?gfVv(V)^Lm!%N!B&I1#s{aI z?f^_c;k;Wy%$jX%f$VW%u+rg%%F}-KV8~X6p6FxjNsM$Ma^~{|Oz^!|)l)i6th2ac zx3Mhmdu5YNkt~3HT1`4&7qk?>Xzy7`Z9;Mn{WF8lboouDKNUREe^QQIT)a9*^UZ9f z=SQ5zn*<}G7PYDIf}>=z*dJO6LU5#MvRkKDx6afj)}|_+cSC9>fTvv#0n)hszgMwP!tx0ZCiGNKP{MaG;m$TLJtiBF zn8H4mrh9l4r^65@ntaSR-2vj_?&NqM^dX7~UxWgco2JOify1;*jbX9m(O85sYIzo{ z0@MNm16-1T9Ncqhn(=pGmIJ$?XOCaQ6=_IfzWhi^4^Kt?FZVXqdteEIG~-er$PzkF zfwwy!O0#+rjYCMx`>zx2`jTQlKbT;M_eY~o0n&>OXv+PPXyWcl))$T(wA2$G_Pt?K zHrerBvVfANh+gw!A94dn3=KsrrCixur0?I|a!QPD>CH(?-anU5aR$L+A|M_x>BLW9 zfD@4UJa`l`66Ba%AN{XxCrUg}vb-u^79TG!s6Z2q?u}}suk>S595T9f@ENOb&Yo`{ z#V4XI3fcml0$7eS8xVAzdV_~q0>l~bZemgG4PxChFx~*|YjD#h$OH13IKi4P5&;=q z&9BO%-Cdu>|CIp+YSo_Hask0L&Zpp-Hkve`K$sc_9mIa4yM?1)SB2rS_e&j6ykbS+ zik?$xx!(Bo3i(THe>|W9%R`ML)fg`R;|86SotPmOm4>un;v5j#iWr*y)?b4#Kq#>#J&fo= zfF>8ZI8%i3ARUT?Sa2BT|82g-*`*2!%m#EJ6vHW3pmO@Sin;{=%&oSGICN1_=%mE@ zY#WZ>z0)s?#lclYqf-H5fD&RO&iIYWn=0-$EL*FTqz0PNLE z5$#v}^C=z)WT$Y()+Q7@*Xg%ien}wPb>K?u?Aff3}P7$5GjM3f4)%;JFZ(*`49k)S*IvDHOKd*?5Aom+L{}rH4NnE2C zy0dEtItJDiAXv&gEDS8=9|d0l5g{Hxs4p;%AEb!W7a-&9eJAMRVm4!tX|^F-hDyWt z+He1LHf|s)*DPQaQsn;8d>OR$_5Zo2d|__%vy|A>1s(?wOpA9YH4W~Qkk5VW_Y$d~ zDByCRf5x#PZu8>x?{v}3ep}=TGbJXMyUxwWIv3c9-SqhbM9;xAi!P2^0a~5MtU&xC zF93zLn&{o{&S!k$c+VuN-yi;TUn;%)se9kw@-tM;mKY5m{|hgx7FK*^e2D?GKksfU zU5YV7)#C#{ScU-F=Dk0--6${fWL?nm<)7Q6_whwR7@=NxA z`r7g%`__(m17WlwUp6raHP!<28>a`*Y-B;WaE@dLGmueUSJzc--T)^OLRIeL8Zb;0 z+zRc_*8aI=o7-mk8R+D_nHbRCN|^+XiRMpw`(zRjy7R$L{V{nw$xend!z6vxbph&t z5#O(dnBQoSJucd3D;nT^eIRF+{beg}-4S|q;GgPzdhbjzzX@Pa4pg8GuJw7VHCl37 zlKfgJ!B!@W)@d5Lw21jMXOQH3w@%!)I+^vr4WB6?+xrvzHGbNy;-QaXh~Z3Dq?=`d z;Vj!5M>-osy`}rJ#;5I?ol*_O{_h*1nf>(64%Uu0mahq4WFhqg zLXzOuk}TPcA%6<@NC2 zh>!C{@6Cg?n3O4Ju=`rQwxJD@N*GN6W~EK~3HIo8T2i^uLObed6LJk`7UhxZ6&Xo@ zF?hI>K~&#hUo(d!ip>i3ZSgw?!ZGJYpZ)?3NbSv2mHF$SV8NHV7i(PC4{`5DQF`Kw4s%VykQgr!l zcEXV1t*qGQXBYKhUvjt27#6MC@WR6PY|Y0Xy-(w^Yyo4P4C+0^UR*e%_#pv-bga4+ zZy}duH`r%BwcOlddUY$SZRiXYpnbjHteypP54Gn^8v{)g^uO2udXEuGODdY^#|bhieeghm+6TUve#bBS4SRZCC#ZEM<*S z_4s8FTeyAT%*p@Mfaz5(*pP{;T)1$Y2$$IzOOh9PY`(sMzs?L|C{k&(%ek&d{(wf} zNlH&d+4cDfTe?*1GSLt>*eP9^t>ZluyRSQB8)BBlBN|DT)D+n#?W|5uoidcl!+hW& zm&noqKgaoCAwFPsj9&OI*7@hQ+EREol@xp4k;O* zgir<1M)tL=sY?Di*%-Fi31J*6L!=kuL%Z5Ci|m zkKcs@o<4!CPm5^UAi=l!xD%@wP&IRHbD`BrY@2dwjA)c=&TsTIrK4)E^wa;*7`Rn<0xY@kr_U&ol%WaStcNkMpV;38;z;15b-=d<8 zAZioGYZ! zUvl(MjS&d!xJLWu6%Rg9B7$wFJq%Af4G~b?MVWzmE)w|{J20yy2^uc!UV4`N>bN72 z@cqMp$l+0^vwtU4eE9P`(xv%V7O+r+FJ_I3b(#`^WRHODZBt-L=~cL%7%B?x(j~I# z41_RdO4G!UJT0b0f6Q&kGn2u}BpC(*G-w$k{$2;EKw^8^6534j3o(y{@A@A!VR=3j zQtRRatiNO{#jN>ppI~!=lorUm{=%Hq_ZXP%jYA|_nB|# zwgme!9^6^^89lF+d|o^kugv&{=ebcW^CaAKKSkoZ4XYoI(&Y?RKq6j69cn`sKXmrF zR$XVan?TsWb6y{jMK8XgjIvw0phu%5YVaC zx=5#&mvlF!Yw?0H^k^%KpY(^f;{_{{N%g*&trQY2@1RHKIV`!Olyt-k&aI0yJihD9 zkU_D~m1V{)XKyAWiJLFIQf|~6B)@c)La`vP^~n&2bJOT-U>3X49Nfh@SLvka@-34q z35x{Xt(*DS~-CQZlzqYqi};k!o4n4D{eQ znk&sOd_8(8U*5%-T?miZrN=jnJlxJcMq!vQ9p@}(Q2~2t_XgssA4}BKZjkGng3h!S zW2_?zd-?75%7i;)sm1Bf!!?fP#jRUW(Ju_mT046eqa#j~TXu#S#4}B`GDW7N48Ev~ z&y1Hg*BH3fQ~6VGkKf%s-neK&@453v0&Aa_N7pP+Zr>BCoz+^XIg%f92$elzmZ(~& z>{*(Z<-0w8qx^2JBtYzYHKR9X?l|6fgJ5#Ha8_7rW%b$cOGF2{lyK1~;o=ogdZ?m2 zQUE#@_Ho@{_*O%{q{W!V?u`$mFR?Wkj~Q}xdYy!odxOcZ2ot{m-oC|heD>{JSFN8; zoM!lLtDx?W2)MnM+$TF;}2@#F{mUU*D7$$5#<-WIga+#&$dvlhcyLmYTPwj;YSCVdk73YAjp0 zLfx)Q&s2uYoO9eBJqVRV<@eqXHj(=i5W4=}>aTXdJk(IAzyDdiQN3TZ5iz18JXR^R z0iRjA##w!=xK`229pH#7EKz-2w}MGhez)5UU%PC^@V)?D?Wtj~GT2M=7rW0Ld0#e7 zw(|X$dN(2f1B$TUA5~c|2?9T^JJ~_W*#e&h&@${_RM+Bohe?g53)KHnKuvpD$0|=} z9(h5J{R`vuH)Pw}v^LrpdE?@Z>YiW*+;A%r^wQl)PSaG2R?A2dKFel5ajtPKhye&H z)m@%HWydqT-J>(CALh{2OJ>V6dFQD4HSE>={WzNc-+Xc;0#Z5jsq1SGLC(zd)a?R;$6P=ww(TI7puT&pO?9ntLL7< zQqp~KK_<7S+-!vWKTd?pT=-c%UvOOC@p7dzJnmPz9QVO}&op;hl1ob_ZQw9ZVT?Z( zHY()I+;VrXdwZ{+IS@9=mo>@lU^+4=X51}QY$|rT=G>&w>L~!B^swyrae0TA-9($- z^t%tQsBv8uW##U*w_Y+oTPK_7=ukDj3q06ct32xe<9`;1W*IM zL}d|@;M3!oAUiUZ0(*wZy{i^{>^kNHk|iCTD=&kK{gsS~(r&1?ai&k8Gbtu&+d|xl zc44zA3RijLLbAlLpYyrTL9x4t4T@U#jMh;?*tai;@8LgOXi6fb*}R~tSb#H7sQ45&Zqbs`s1B3nBB=~7D=x;yX zrLI!C_=I%W?X8|>&`MqWd-7eoqcE{RX9@S8Q|?`)qdi+SYcJ_1_h$d&{UJP33`_)z zt|P3;gJK9wx_mblWa~}q2PON0Js3>*!nmGL0+R{3e;xQ5Wx76Eyjx^T&#8Xc{2ID23s=gf}?EPW>IdNdhso$B{eOE%+KE0PffPxW+q zcuQ#{r4c&riVx%9_E5WxMR${fxs49al)&6{U{hfXuby6p&$0&@1j@>Ai1Wqgx8nW& z<)p|Zxu+0=?L;y~HBWlD*H;PyBL?C50Z~1-RKoo54pn%U=m$N7YP3XG?KWR>BBxEJB(o&Jk`}QgxI^^CWfo`Oub}qChP}|gU*{HcP z-&HAKH8ua{#JJ%#<8))W6m50H8{3ykq+v}nY4$I*p!c47d@phIWEUS1%M1K7A}Q_Z zR^eoy8M%(o5~r)jbv%fyO6^eBz47hL+!vLKIG@uKv{ab%+|NghR$7jH{Sl|)K3c$j zc&U>0_K>*23}4V5*N7`!?%)Fbb<~1IYa4d>tvDV1;cc6B?UE`>k%f`VJ!ZG$f_f8A zA7+HX;(2LxJu&Gaug$XR%t{%M!C^l6C*jUzM4dm;4Z@vxcNrT0iMk?LPX|Ajl{a)F zsm$b_NGRi0(NxQU7R2t2BBe@fCa;yw^&hb`%eGKb!;YP#Ir$~YM%fi^W=d;{QySHJ z#P;Zf>w4N$xE98k7GJdb&`nsc?a+=6_ssdUnMGaw~XB|feX|yw+jcQ zN@o|^M@}z_h^#AKo1A44nejw_*%z?2mnKDwHcCiO@#B7#a4l@f z^S0fo#v0ybY5jm5r|6xrXQza`3ti9pcC>cAYT3#`fBt5<`I_dgW|WJe%jt1xFdYq} z`+h^{v7`0aV-|5k5=s77u7Oa5 zl#{kNtvAb;Lp}`WqW7jdyy08+68S(YZ#FjkPKwxBjS*eaMrWm==y{+=@CBX9p1(G- zAT9cddaQn%ppO7dQG3NRWTbBC>J5bJa1kUuzuF76;eh)u&IT)ke=G90X4nQAInu8? zqA#I=Z|(RA0Jr*#m^-4sBv^aL%_s<;?L8WX`mCW2^_f}sorpOytbWpKYsbw zXcpTOR@#X;ht4(CywThE?tP&9$>mh6Y4MhmYYM-=$K~*S^U7>@E30ZfV8Hw#}B2!een4YFkI#_4?2IE+;BL<-k4s>?#zpi@h*uXfk9jH+;1h@)_hH z<|%<~NV+bv=ILL`hG?OuKcoO}--dycPah0>y_mt^+G^)#|KSDCbM=-Z;5K8e(`gUh zQyZO!aG4^b7Hb~S`k)c$(ilggUK14W1;)-!amVidh`N+9KVk;ke&vdQ-;yBumjb0Y z4j*sV@55O)M=(kDu+1`&C@F~V&wM&Q0iA2n2CMGeFjgnRqgl?Ek5PhT{~=EdNkt5 zdK0B5?N|ir;@NLE9raxD$d{*k*k0ImigmV>6fTJz6u4}^3_laZ6fzE8D|Az!qg?1o zW-gNtxfQ=_k5k1UhfQBVZKG1;J$-78a_cM(cN)SqYkpoaV7$yC{mi&c!X3!yd4+n+ zzYo5|))Z_rO~mrxNDnoL@xw_&6BmC0vk^prIDE0zbRx`F4{obf*o0ir@ufesA-PiM zrtYOF?_Dv3_wrm2-I=M5$IIJJ7N)swM9>kVX6ZDi1~7VYlmIye-9G(NCt0d1T8ur}u`}s7i-#d^#LQ==yFLW$X(iT#RQN}sbD0$%?^mSICF)viu8O8V2<#t+9XR1_wD)Od(DT0Gv7&vbM6s@a6}Ba)MIpi=Q%@EpD=%iIOQ&u?mu@DK|X5 z8iLt4Sgpw{_qTRf^fhASsW7aJ#ff>Se*Q(g^3?Tre*O{MmjBX$L7OdDQl-tz>%h^p zbi+2;XCEUJax3`${-wOwX^D!oTiUR^JXa0#YdAa?d(-hxmXENt2+{~oU#w;k!mwpI zJNkM&=@xl_M&o63X(z@Zny5OtS*QU=o7#75v#|kHIKusi{>~fsQX; zse(cSbWC2%-TmgF&s1fAG+H(y)AYTldWIokw5O?9@kp4YQXY{k4{EG*@?#*J<cIG92SM~lLRyl$JFJ7|*@ItE z^`V=wtjMI6)E%iY@(j1Fe2Wq{#$$$tQDcMY%m0PQ#f`@ zoFM?dK*1cd1jQZb_|#4_>T>1(XQ&!J{3T5d^}-23RH#&inT%1`mLq+MkGWKnv{3Q_%X#?PP)ZM zCjYEfLPcdsS54$0cB=8y#7VgrDoCUZk-*7(RW?KYQ3Pa>a)_c@Y7)P-48#%uOxj~t zN{W}o4n`g)V2N@3=V8B$q{G9-3`47r?ADkw?Pzo)U!NM_ndIa^CJmqNXY0Y1rsvBx zlXHAQ;mV)gjBMrHq5qVcM4D*dNUcdO0GkI)rrA6e* zWm_Qo)smNMjLb0=!$@-0}Y(N#`{VasH(nl)=@P??=}TJ0>5&J(#8le z5&VGH0B)yKx_`t@69oF_PfnmQFXG~li8e${q&0xoeu2N@E60_4@@9Y9+bJNxV@fD2 z{FQ~wbWV*zbqb z3Ke#?q{U=8fw$?@x#B}JkeMWag-{&-9=V{~LF|Rb&peXX{R+S}u?ruAbzAG!0?tI2 zf8DQ*IKtkMWImY()bJikz*0h$q)bNAbvjuj1LZF2x0z7=u z_;U;%O1>#?NRt#P1XTs=C0dKi%!7C5q;+RUz zmRXo@yajr0G^tTcSHiz*ui*NL>0$ht9g(ZR?6cMSsVa|R2y6{SM!RjEIYn|A#7q%; zKR|)mrygRmEofLnCef&amyd2Dz~(^c{&w)4kWhB5^YJN75HsL^u6J!zf`T`a=kEpn z#j5Cevy*6$L&S%m^b0!eEHjoW!q(njVG^<(M|?tE`V2JF)4!6~&#|SM__DP`snlB^ zM-c*&0!jJ-A5CMY#zoprTylt^G zK7I*o!!&vNI%@sK-WltLreZIaMh$={1&U!+ip#;^CCO88PQ}Q3e~)W}A9|bjzi&zG zfj8iT9ga4(T7>dGwsb?1{?=YFc5$2q$hwwZy+Kkj`xvT8M+K~Hj_~YO=5Aj($)H#=V)h3hCR5X1Q)X+-#8*`_Vm}G+VupaF+V>lXom@MIGIOFT_XCh0 zU0kwpUen>iXrZ>X3`1rA*YoW1&ELQz&ht!umn=G19aR(!=nDWl*kA>{vN(=ZzxdD9 zn{&F4;QC9&Jfl(ZTG3Y0pR=VIGDd4eq+HnV`IEa));pMq-37k4Qfn zb!(a79sH0M2t2yl2!Xup4540?EBbW%#a}26z~)n0jDqvRz3n4ms_b_R?IDiVv(nV9 zRKD$LQilA!C@CH`-*nWpD8OI`*IU1cc254(tXv`F){Q77j}{60ic)5nmdNUN-IiMc z{kJ2-qZ9YWbO()lJ_idCTaK+)f_^-vGZXjoMEa5gPAhGmXVZ4=+82)q{N_V(STWA6ah!6pbn!(I43-@>?X&(|D3ZU*rH699Y7TsJwhAtshbFr~4j|(Lg5y z4iw%Z0YX1gQ^Jye5OyT_V>RX=KOQS2$5wv)~YR9p+K98!3DrE}omWKl~lmOQ=%&!k);*fM?Ntanubgeq~K~DCE;D*cC(MK6{|T3b|KtTK%V9N;PKyv zZ_wOBn`m~;4^|}hp4Oq#z5?N~Q}?55Gxvl4{3dfyL`sF346R=tkf3u^$DM^;0% z=!oHsdzcicivxPzkBs_n;YT-YQj1rz^G?%`^DcB*MwcZnW9}I4+V`?%uPC5#q^a=&nJqFD9JiPIhDjY_5Sl+hrx2;tdmpQpuVjq9E!EUSGHnVJi zXN4M&&RDmce*-=<@kdVTgOmf}yROf4T$Y>`y)KuS9^*axY&TOt7|?2S{i-h}M>O%p z>;0HcB&|qwS=+tS+(H%mw2CJze z6t^r05?nWh!$zF;<`5ebk=fK22W{TC35XY>af;rtv+bo z9Mbm{!s+$KXAyyid1Jrk4qIDHX3EnabV=GmKoVN@qRgG0x$(+#);I{5LI(0CRV87$TiW zOZ7?B!;VTrYL5$RB5OD{)d$f_RCcWfg((ox`!UuSuIW_ zM)fVc!fR*-NexC0PguD%iK_QVDCK<`I8S%_mD4?Ck50kf=DR}t zCkIsHxtg}p2~gRvUX`fvJNEsl&l{2X6tu^~HfxC95c=?VqkW$(jk{Kk3>^>UksiUU zP3R*Pz(*vkUwR@%ygn7UTIeuFSIK zF&CZdPBh4TrLNjcjUEgu#?iyQY%O_G#>$Dq0()Ue5x4?)L{m7j^~(NEbY+R2b}J6_HwUE)S1tF0F7xMr*> zZj31R!6`ZXk(p%%yZRevOf&Eo8Sq8;yhx}-w)4DX*8hRdQNpeObe4zMr1{tBfx|WS zvs`cn7fb338OOK5Ppr2_>le#T=4dWJbc444BxZTw_x7kV&uy@FrY$c|}p8$+^{jsiAHorYP5S(RBdW&vga{XU4`Wc%a8PpV~W2jqPO?eqhJi@TMgdAh?a8*A=$wWxR+*B z{-e~PfqbCRl}q^Am0gp$g>=Mio&u=AF5&>(+l~+zsS45@-DyRv@TRmkmxM2PP5kC; zwpngc*SzO-hN%OB>Le5yy+&HEy0}g)mz#@<+w?G{pAG*HG2=La#7|!X@OUokA+~0? z$ZpEjHo_;3X=4W2wzs+`MGEs<`f=T0Lo} z@V^+{I@p%{^RR$$-K))w_gMwm9Dnsd)ytZ335U2m{Hj|RY1e>ubYny6cSD~G8Re)i z%^r~)C~R|%cu^|p$j!q!*c@3b*e#5&lX;yfn-b4e_ANcGhzv6%N1%t(V4NB6KlqSu z?2-x?lMqpn@4zaveA(!B7k5rlLP_w2PfC&dlEI{@6d=pOJ~FHuKT!6y4RyObQuD`K z-H1R{-N%XSOI`B^RImZ2QMi%&-QPG#srX*IdqevLujctX z`d+F@$qZGq8^g%^c0?3Qmdt6Ucw;RAgk$PNlV>u$K6;&`i;K0?VDm@j1Xgo|xG&#f-%+zV2s+LIj= zH+q{!8I5r#NgY*S&(}#lj{@Mf(cMtI6SI&ZVjFtM zXptn%P~URs-MF3Iz!Vy7mMk=;T`-N=1C*;XD##0eFQ!Lv`BMufc8o9 zh=)A*quEd*PhbmF_#@1e@6gUjdA$g<(@P!8*-F!&gn3TF#7XLiwu{|Z!1=NMX8L+i zz-3*)78^P{MRFMWf!femIB?@18IHRqxtKCK6U~xtPWaq6o(=1S4$%6TuoM( z&7Br05mqKmD<9HCC1dIO>rvCdi4~>89DlXkxa`RP|CrcrqkVM4{M<6oZ_*@MdfxnT z)8qTDC1+GzId59;5241)X`ki(pK7UEBQ8~?gP@O@kdY3jlEeI1zy+1&hIjvBMYxpuZb^BLNE(f`}zrN;Nf}t>q_^q z*>bm#Mcn?}6t#eZN zH}+(?Md@`>xO{3X<`XQov5~PwJ}!_@vt495QYtcY5%}A5x545LeP=z-ne~~a8&oRT zisosy%a_8D$C&3iy*NkL5)yCZZ}?W^B*%OxRXC8{^n04k9$mVXk@{I_&P3!1)nIG? zRX^Aq20YM}b)1EXboCT1%e;HN&fKQy3nhjLX^gD6m)HPT&$^y|$7}h^ArhQ8h|a_w ziwkeO>D7x`?J!ku&_~Z2-Do_WQUiH+xOqCMK5^|Vu*fgQAA1cVa!d1h+pGm(#*Cr; zo7OA$IwK<3kYHMv+K5~cj8y22&4xVyx-WY-moJ_lp|R=MVlt~`CMcV&$e7x%>!iAe zlVv53G%DnPfZ!4dqJFw)B)6XfzCTB`rqgaB?s2@6H8fRP>YBe$gh1|R;9_XG~>0}R&{x0qZQKFjb;p7Jj0Z-VcvWQ0+_ zza7#QcCt1GGeZ<`PX*+7FG_OFxeQf+w?|(q@Ta~4u?s|U;7 zO~Pa@ffmoZ_MJi}Y@{p-2I!3jr<-A~5gqv<-~e3VI?K6uK2GC6 z_ZNWkh7P^DsWI=6pFMugBKqS!GlSBwrY`Vs?BsN(?Rv~!ZmdYl%m7;s+j0EZsj}d) z^w@*yZF6B!{}?xR3rULc*T~7QGCVy4k}ZVjz|%|zmY1obLO=Gk0mPj4O=Obu@Zv*_ zki)_a)NXEA&;z%aMZo>8IsAxKU*tr1_dw{;1H5{^rzE`tz_UHP*dTxYY%iU}I+34OVSgC3qIcr~&-zpLO z#z?P1QA7dITuY~RVd(d^CTmF=(;x<_tQ#WvEF5CJbUom!$a4+Llx6F7hbf8q{LKho zAgD)z{UG;zs5)GgWRg%K(CEPw1sH;F{i@Cg5*bkd2Sf<0-1p zY6AftbKpRV?-|u!5_QG}PA+=MhGi{kFfvvUB5F;eYSjF0Dx0j8LBvxB3%a{c`J13h=_ga3oc4V1|ky<4yhz_#)rYG9&G z0Bo0rp06BLHxey(b_;kOcF*`ID1Gg$3ve%dQk)kV$fPXDOvZr~$!K&?PW0L$Af>XW zT-uxopii;PJwSpoK!5?v<`jLZ(YogNCo{}c%s7{nq$_|WrgNM2vgX&BD3y?dh_!k`f#W*`;l=>xM2DA~$%+-L7m1^A&2pm8 zFgd?g`SqM8F9!%s$(GKd<&KTW(a%Jg&HGzdnY2zO(oa(2(NlB9#v4+p(Gx~1vv<64 zeRv~|$HWStDP?=>%i~4=`p4WdSXFUF{h0oUZ=}~RBG$6W#rwn1@`b@TY9Z zj{t#3e1=I##>Q|5e~zSAM>P|xMw82}e%b>1&ySkBH?~>CfUzo z`T;XIoGw9DY`}{)#4yWW#1{RDApL`(@!F3gVbl0!Q#iX^HP(>@=k8BLQBQG^-KQ8KCfRD_<)Z?fwwr?}iPmo!x)qV&+9A<(Cyumuiaz z{WH_k`9MGD3=dIBaX{3+rkNZG8@pn>oL?yu;xQY1!48X8jI7kOThvCdqP&#E^wT}{ywIep>uMbM>QAuuC3b5jVIdK2+!k2uA8YLNK-69H* za#<=~|jU=^Z&p-lH=9><2x5*p$Vp3~&qf>ezTjARoa75i(Y!(wJ zTq#XiYAX!orWQ6CuAZ1W6^g%kOB8Kwi1-6aVR{Uo+pv6B(?A^i5n}E$6CMxj zf_^>w9%oE6t^Bq$Cw#my^4xjpZNWtUd?K#o0McS>!S+3j^g(TfXcdVL((>e^imtE0 zyhm?G8|f@4ekIM`H))yQFJvLPzP%>BkQf0nOq0|Bbf|fqdGILnjw@qky2!(4Wjp6V z+@dZGLvvZSsEV2ES$!6T=#8GHmlAS0t01y^R~^*nS4h~(U|~U#a%2U&tMDk#VanYn-uHzpj(4HRW>6EYB^Rk5*<*}NO z>=j}J^QoNeyLso`2a!tAfYOU!h@}vzc}tk(EA4Y}jCLx+CO~26A5QLaIVE7HR6A`L zRtf;yg!tycx>tLo8mDTIR*b%9`{Dh_=cM)-7Hq^9+DxwzJvth|c%jUNuI*?%*X?aCs9l_s4am%6FKF8RK|izSPh4#Qv(ceC zE#QGM#WT)r8_cLZxmer@>kDJJ_>t5&zM?meg8$@SklCN1kf$9fYabHaHK zBRi;&fM?U{|J70!5Mv*aXSCYSaM?QMV&_|vm9*>p{eStZH^X@c(yphY4Ky=kz~ zBs(KqXGA1MnYcQP+dfxSevlxk@U-8V%XIZ-_*1=}5To~Rt!+$Oh|+#)SIhHq1p0q9 zByPQf)Sda9r?IyY^Eep@_+FguNiD#U$}Ws5=1mOO{62lVPT)r~QQ?D_PmM6*S6h1j znRa`d8P}U@BwZk^|D2}r&AeNkqh=%KeT2rp?^EGOx zwz+5Dwqx<=gWs^$xv*KEtqG;kzAs?d`<7?dkk&ol4pLAYBJpdLonLsxxOA7=$wavG zM;0-8P2UDZ;fg!U_lR#T9pm?jtWT&EqnT%}0tKFlM6%Jd+I=c=MX}X%I4Nf;C$pmT zPduim;t1s1+xvCDHCb(>)(?A@2TA+_Lb=a>nmOuqi;Q|YSUhB+-HkG%k{L(*If9MK z@=dqc?UJ~iFe1Z^gzPHGR~%sqO`U;egM`5*$wrf@d{HXpwcG*M`>)VXQ=3$*rJCax zVSBa?nu}~rWX-va`YLmNl9l<)4`HXF( zg;I>b>NEHaA%IXe8`>CxBDeAQ!J?eBA168mhA;^{gTVOggX~gK^nKEpaVN5LZ6|R+ zk(}1K2bDoDXQ?Vnxr+m*%UuZQ^WcUR9B2`x@nkvQdJEVJHyg$qm+kk; zh>r>-+OFihuG_VwpeJZ}o@3agT+Bq=EIqn0&urr?QqmHN$PP(zsfm=u<}HRFBG%W_ zhA>lkBi4z*#5jeV5anecuiVEOl zwPz4c)|}%vr%5_O6*XVU%rk19z3JC)FWjJ^-W^S>gts3{Qw1Md9$NiSB*JU8-Cz0p zifOJ13q#97ajjGImH9oDgwh0D41YN>Ln4n`PP2KS|uiXHs; z+Rk@U^M~GyF@G%)@qC~mn%J!2wEc?Dqj6n4l4KtgK`)BCWr`==w{A0NWllDW2gs4K zW@`+#OzxeRkeH{(>fOE?)O=7E(X0UT-F)aQ1~Dd~C`Is}hE0vclexoF4)x^xve^-? z1+fXcLHL#YfybuI8*W|8Lh#A8jmqi4DTVMtxG`0a37@XGjuT;v2S;4BN(L{&9z5adS}qZJe*1=HGXMn)G0*q;XynRA|{G+h&QZ`@3=#^o?SPY;fFe;t?4%R zd~uGEN&A$ltYEPb+~e&|z!}heV8>?z>C!J_VQT?E#KI=f1#rB!tGIu7TAVwiq7oxd zB>!NADkcvH0w^ju!*kK97-iFn4cw(rqxj2=QFVomEZ6(H3Gd$U2goX^xio^Ghqv9F zI4;)vy|hQiec7xB5H+BEqYBQXeok${?2oj(YR3=aF*Bvrnm5}oVSn{EG);~;=<5D zu*tH;7JBQtaVJaale{x`ilPLNOxa8;m)C#nz~teA&w}$Za-WBFE}rl7Y90*`ny7w7+c}24L@VR{GY+biVggVE_4N2%l+EI>usjr< z1WKd}L@q?B(qh~iz8|rWDvr9%!m^7cP|QWc1_<)}E)qoI`>%pTV?dRh^&f~fqVTL2 zakx0=el_SNT5W~yn#qUCNsuQFIG15ii#}LXRBM&Z$cX%|wJZNBew)j+j2zSv5Ho^7 z2j~kmisRE)m=PFt2=%q4I3;o)tr(rV>~YPgY>uXxnZa){ol2X2i&`R;Cj;vUm^bpBdYePT z?jbGWASd&xDLw)Qmh34Dhyt%|0s_V$g8BnU z-Jgqj7ffQ_-0A($Fy+OjV2xk1eQ`H_qHr;G3m*3=SmETX<8$a~Y2kqAReJeGj5o>X z`!q0$1&mjmFu1@pQ|U|WZ&f0U2uF@5}rme3}Z99?pBu?kK5W}XXVqw!Fen-JYR<@KYX&m zkq%EMh<-{TNKj(&(sM4WJC0L#5Mkc^Oz1J;?_a;b4nyhzrB^`zHr1jk06ZR7Be}?^ z>63?>KwQXTi%eGi$=Ezn^7^vleCHi1lB-VYrq?9XBj@Mm?~6R6`O9KM17>Db2##wCc=b=(S`}RXG=}ur2GbU{pN#15p@SE3m+SyEFhK zEMM^c700AF+;T2g{nqjFU=;HK7cT#3C@z&MOKTW; z^qDr4m?hd<$|Tjbh)6hgE#3gIG-7zacbm`8;0aJilG!Fddcc#^kd%YB;09yl5#xNj zXvq3o(9ceuz(-(tF_@~sVOQQ6-z2JM7-3G@`0B&N>B(N_?}J{W+{((tQ0}?}3HM!y znM11o%f{5wGmUOM2dTn$qe`Qm+Dx-@pCrg~vLY;&@=7<|A8+|c6|YJae(5*4d!ty3 zU&?*MEy7lW%_Ma&Zi3v^UF`d}!gG^z`;>#Uk}*U&28*2O8s^K~i*QS+@89ZMlMZ6M zlVK}3Df`)#%k3uzK_o86Ca=8~-b_Z8dSdF=D-F#D&?B(gshdDT)pg5|kh>T|THbT8 zH-bO-V@uHBn^!oI&JneVFxUz4eYHxa(~bpRe}4+`5ox52F0z_N!%SSxZVz*jX#COg zQr|mPcmuy)`9^)>K*{|t`f8%!*-n9UY}zHWR-WuT;$+l(X(Enx%E^R+G|swV!t*?4 ze}wI%RGC_?(AYA7l-ql@p4>k80Y1FeqIiN#k_xH9!2~D;D#9-HQJzmn4WcT>O)<{o z4N0*1r)~6NYf5a~4i!uE>{pX3ckd^?1H8(vhDh(U{VQ6Kftw{lkp!I(Yfvx^H$qYf znVdYt{n+Zvfsb!*hHFPo6*U8`RYl}r{$AX+bFjkPoBv8^v=*s3HOHQ>!*S}-5u~%K z0McS3m+naS=L?#vP?qU`GE_gR-W>(1na>5cenwarR4$MlMF!-Sj3D|qU^BubenlPK z{QTC;?koGXQKg&5%P(-ogT-63qdIQzJ}3dzX-$P4)S<+StDaKF>91DuKI2Hyeqqd>oPI5b4Z&lk5_vq`k)fvOhA_?P4@mqc|b0D zUGfeQ_l+PDr|ZnBl?PdKC-pD>LpJbtA5w?hz8l@bvD+;Xm_%zL>^W+qL!)BWtH<3@ z_G7ACV0x_bBN5gk>9;a}klRHThNQyZ05~WZ&B_@2X z>3FQdqIRDsxG-jPWCM5i*AlvtbcH-z6ME5l zhmZ3y>>FB@JF>WU+5c^1@6eht$jd}3OIS!n1wL z%5!Vx&x_io!AH}U$mT1Zxf4iqv)R)+++n2JoVA%=9912**S0vrMU1C4iaVD^0T;8ESt=G z1X~d9|M>K!5GOlp%J@XZxN94)QbXzg6}bz;%SSEbV;@=17M8AJ8VF~-DQj!}* zSM8T|B|4a3@1PXL7e^_g2cW$@{pm%#>M=q7y6MW9jwwe(P8Ijv7-F$O1j??fdZmxfH?;#kMj~qHg*KH!(NENR<~?Do;>I+MDp;(gS+l46us%)C+`u*!ajj=r`@S> zY)OQ>exYVcoNk_Sa%_UdujjsGB@gkhXc%fp?CqW`iAC`B=O#Ha2XLd6iH!HVIm3^B zz9qSZ?7KXFt(>LuZJ%<@_UA^FcVWu9NK(XAQ>b=OG%w)7GF!X+0KGiM$0b|#dW2c? zZ0Fo}R%+Jkgi3cgGFBVL9#E5wXc*k`rXJH1qa{HrwmO;{$N(rML#)!5}$vbNIH4R`ahKIugR8pQtMMoZw6`{0% z%LN2VflOpU&7Iw}el?G^RVB;XTL5V7)g2QZKPfxI}wc{!Ewpmcv94QS&S$#lWO|d8w?k=bQJLi1o9qhB*8OG?u zzTf53EVsI7x#w48CI*VNjQlPZYpqe0dC~=M|4_E)c)XgOZ1efc7k`jWzLD4s55Rh0 z-Pxojii`-9;<@IA%+u$_-LbTvHR2)#GeTR7UO5>sMjgqqfg~od%$;3Y(ONATj!_MK zGHds`Sq*qn^-tp80DB;-0uM4fWXS6-({{Uc@P5a)8-APf<#?rHCSvAmX2f*e;hM?b z+E}cO&JYyBH@eeHqHu>;p%`FA@@oL9&Y}Q9-Ti%!ncNIM7ZNW`bRi_%sP(i~HcO4p zeT6qgg%#YM@}V|k)GJg3?F&s!bzR-$jNR63fHU|wwkHUpyYGF8eFLzt0&pw)V^7wG z22ei2Q)JhhIc8oenX?{970fkd@6Y{~r+?iR&9V=#D@{M1I6bBxDYKrzNS%rlJYM&G zdDgxlw3?q7X>9(`!RZ4b+WXC7fPyo@^$!lvI{&j;D|tSgNJ!?s5vWhi!P2)H(1c1j z&wDqf188g}tCkC9gz9lu;`JO9<0g1Y{;4fraHA;kmq7yZNTg44sOc9821XoWnq?y5 zq(OP1(u@VPFK$%f^Xa@Ra3o9fedPN3V%mkFKj*3M(%+v3w|wE%)_j+q$4;L&Z0{Se zv{BR5?O=)&|Cz6i2*|d-`3V|L{!cSM&J7aucp)t%lYjjnl417JsJX#MVBBoV_jZkz zZF|iHw}$_lq;-9#oxSMI^SQ{wPJX+U;#euJ1md9H#o75}+qK%Rg1KSK!a48^=g?x< z*)V=IRT5!bB9ys9aY^6z_3a9OxjVp%ETT+jwTJFn)DcL5+gx4POONezp+I;I8OOb% zmRqZcZ?~}Ei}A@2UV8>xDMo3PzIm>*iN9Xi?Ga(?xs|Nv(8gjlR&`UpAQXOHbQ`T8 zBbNmb`J;(jb_<7EOTfd9uY-krn1C_5qj3elSds!DyH0YBD~u#kk`uqWWJvtcXcYt3 zsSA>)xNh^K%7NuCWo2<-#1wg*dbWg7t4~qHrQ0Cc!Dz`)Dq;}GXwBK`kuoY?nmxSn z+VX{FDrMBlRVX44$x}k1+^V>YGGGfcy&>EY$Jw#JHDKT#UhBTc=e#obm=A{s&@kUq zQ;^TIL=74Dfxx$#sQ-9CxfcmWEa+Fg6-Yt@7FLhDhP~4karAhG$4i>{`)HRs^vnl`Cu$ z6Xmw_^te{}3ZQu3-z~52VwKzozGGp8drJz}HHJAEiV7LY>&P4L^^0a{eieT~;@3b_!-IXCEF>6YL zx~fF;=Zp$YtqW&ewy}y|`A+J4KR|Jq;R57TWlIn?NZnijd(++*fW~^JsfkJwunef8 zMh3*a3T`d>eM)_9E(!lP=n|W3&2$jZmtOM~u!H3oYnd*bm6>^}{_(Q%!%h9V3~KN# zkpu|_e=BK_vKAF}me&T*^~AvjT$ES^(})Gs3L!`sY@Gy;DQi@z=mwGz83?tc-~mX` zLxBXYU2i};F;_cudz%7JIQ9T*K}489MaFrV`(LuN2_sSbR$dd&AgL0yAZQ7Vy3Cy5 z_qcAJPIM5c&XXLF=lIR47*I$*&R;$-(-a6|h<$gwGeEjLA@mU<atlPi&TeIcd*5GNOeC}>bu&hc3Qx*N2*5QIy$Oddx zRYJAGoom&${^?$f#M@PJ5xp->vuR9=L(4f^&DO#zh>HbY7Qd)etkM;;Om6rH&@8}z z5Nd@PDe>6B|K#-|s3&Da)7>H-mZrAWnJjLW$2@(i0@8g^_RK1b>q~-F<$coyi=PkM ztN}rNC#&^()SbTG(aty}ZU_rn+19t$p#<0Gv&~&Oad*oN-KizIy0kk`6K<(!RQhM| zl{DDGRpYXYUWR+$^5?`ncf9bdaHH{W^lOt8n9<7Irzu1_bf7Y0hNqYCMr`+`s(guU zRx5=}szJu`u?ffSpx^njmFYzRue&y+n6Rl|k4bwF)=-Ai)+DFY`N<7RY`a_7*u;S- zp4bV75Wr}&y|QxHSNF#{qom>yzblhH_C{4LJ9NdV-Boxw_?iGo)B0hvGttj-_(9Q- z-N?^v=Fys$*q>&R4u@}T^pp{ZI4EQe!A$;M+QBg4({fB9h~9_IbwwvQk@#u#M0C+% z0>Xu|{wx}HzfQ1Jbc?nzCKeLZRBdq%zFnYZQW2Q;J}@W8wMb+$cuKgKfcL0@ci!#0 zl*aRTn?qODWR5`E;R$FGJ^}No)iIM>lL>aL>z=n9GdKqf>NmMckA%p zHjV`g)AkfWITza4A1audSjcqfYhv4$@>P{V`lgr5&FM43mpxXu!}c7fPp++rk_v=O zl??u0Am#&;DXn?hUSW^|LmdagyGN59j||psUr?k$2RPr2&?Rc1?xn~He?dqdirbh| z+|{IJ&Xz!@4Ve8pk(1y@{2^wh=soySU+jE%Ghg^Mld7Qerp{J*V$qw+KSUxBCyz68 zsp(%EIx@w?)vD}N>}Yp{-iC4%jy9^hw+5Fn&xMw>t8^uvc9ZFn!p-<2UBWXqHc^Y7 zugP?iX%bKB%%?)ngO2el%yA%_5c&Y{xLjaU|(N`5%Ma_te#-#MZRs&sr56!bAK3I=fyz_s| zCO9bjjsp;J<+|#6fDrn;IjfJBFZyfLj|dP5b}fLtM|atrIP3X-`u!Nd%kL6Fjvr=R z(ITm6Q*W71ApjbFswOb`0Ilb{MRK;bSC z)nnm!&M#OLn@DSpuc)CcG<$B|q1#Swb}R+sW(O4aZr=~D$xmP~rP4&W>>b@kRw}@S z#O!xXw3mhT%@U7anvJr~06P`qUo$lzOMuLlH277P`DKBJmv60|(d$gK-@xvh<= zPqI`Pq|Hxpx)fiSLh>JBJUW4`H-WZb>hya}nxVKrc;GiV`{VXyM1QWnn>gO7{ZE04 zWBlL`AjX>Xwe!Q$5k4-RTFvqr zbcs8{$~(#_Jg+&bho@_(b{wXTwnSX!i2cfxWZI!5d6LUOQtpP7>1Vz}?VoKg4}{%h zAcQGu^#XO#!$OREkr(e#>dA6?%*_p}hwws3X6Y_P}Lr7SFNH)p{IpdQ?chSFLxl12JJheKH z#3`1+4pqgX+{IdkwR%nsRvjnD9@aOaI`j7j>+9F4B2b5Q@!?%&E%v7s<JEOBd!coy70lyABd| znE%?}X5>RJqoS#ehb}@gnRAKw#hwNWBta;G)Kri3Pz5s$_&eu$O7>%-3!Bj2#=RyV zArc(E!gl&JajQVZ{io~RSyH^!HkRrJ0?q_XQNeorW)D{s@?}PGG}~Rr=HjvO_5g|`%N-1=fQ{sD?XBU>7o8T{Aq zgXIoA<+(I;!9r)3g)#AFvbRj8hoUT4V{eWT7z213TWW*^8>tzhA!^$t)qSlp(6L)W zX!Vd?vtYKgS7ioxbDDJ}34F0KYyP_^%*(@WIRJ}R1kw4(R3Sa;N95&!)5As!BIytSCFa{!$lIps*65ke0b5nHKUc*0_9v`kTcyo@= z?cLfYBatOKMdauh>xCJpKM_z?#z|Su0~6ROKl-Cr{prlFoR>mL$r2 z_Q33Uh!rH6GMd-$i{+l8!a!_g)^2`h7ijl^KW$Uy%{?5~(5=BZt8OYXYKveQ-#|LI zAbzy4$u9>=1J^tH`ud}wk#1UOFaEPmZZ(zcUlD>iME%KU?utRc!SEz&^NXTaj|J>AVQ67`TE==T}W`A z%MiE?01@t8Wj?8;t6GeevW)Zc>^6k_Jos&_bl=_vo2td^ns%>A0{TgFsY7ZnT!-O)tGuMzcuTAgwDK>$9Z@&32%sQ&IE9d##o0wn zdya%X;cF#8@%`-s)$XBVd{o9vt}`F@W4o$4EHALzqYqHN258Y`4lCHG1utEs<5riD zx}jk=Xr(*=o3o-*&~@jTDLQJ)s=dr}faL{XuwU z_fH`*vPpkR5bKuID`@WoKL6{HUJcz|X@+1yYi>F4Gc$5tv+M?OP)F=Uv4JPGpe?#E zUxDSqIs7uKp`Xv?X$9U?x{9L%CAaU~eVWi9FLm%7veE`f71(CG`|IdH@oQ5pk)?aW z(?Lh=qN($ac==ayE8}bjtvou)F0Iz2csP3=0)GdW@Nm1$HeBqF>Fy`@HkBWK{#_@b zjYE5Prhcns;1fTd|e!3CqHT&~2cc+dNX!|yn z_#*omxs!dAR5sYN6ms1LPRf(O=9$Ynvja-43DeR*A;Ik})`0R$J@rf%b|R;wfuP|1 zms9LDY8B_z;pRm1w6-JMR^@8h2p*_3lkmyf=}K7_}}RYXLpJE@kfIV4$AQq$R8r+YPMMzZKRcfuWo6)%ohpvu3#|=BCpc z798|Zl^4X)&3Od~7@3A>T}B(hhxbh)G%xv`906v9^L5O9X&`7fZCV1!IcD20XepyI z1nlTFGY|>=10X7{q;$EjuMhMj5-0WhkpEE_l$!xuXf{)M$pC5|q*^-_laWi(wXKaM zg-;SZ9t*nzW(XO%d>=43ypzeFqXBCOYPaUmHuM}wv%7r@Yy-ZSQa&N^0wNB=fTO!x z9^juIcUL990TVaCx`Dn@fJS*d_Wn*LL^|68Tso#I{%E!`2Usnvz5SeV?{M5LZH+aZ zIZ?rsgI;Uv+HRUk)r3W+`xZrzvl$0Bs9dq03?-a`x~lgi&vXPCzToXEQjdmQxYUOQ*wVa4%q$HH41v{oHJiR8ND zS;xh!L)+V#RwnW-n~Z7($0j{wmx~W0T}o=_ocd7)oz%!F)QRj~9BqCz#s%+awWa=#(8SRie||y=r%cs(N$s6 ze?kIRT9NqIje<~S_^MvJ-=1XGLN@ha(U*@CQhT?}H01}y+GYJZ7}l1uC6)eOINo)4 zGG(}d*c~antiKO(h7+tkBc7aJzjb{vX`)@YdA+C@wpH+dLTGyNgvlR^bwgYN& z!<`kkd5^x?TAQs#s8dP2rgt~6om2ovU82CTlKq7<6<@RLg z{&NfWZbp$Ue_tEG50Z!D4X*Iw{p;lTKYo?L@vyT4) zSNFKis!WaS?@|=+yP9{zTUWbzkPmcnHDF<$ph0h%G|@;XPhj3Ho~opYJ`Xk#l-+_K z?Ozo#SAkKIz8J8p1tY5IY(&NN8oi?8$*{r43#FkfGIrSA1WRsTFOtKtNx@IW6d;LH zH={qK^TP-={1{>+hol1n?KfqG(Vx{v+?T9sGesaN(>p2 z=6?UKj2j0tXDj#!%W#rY=Q@MDN_XfaF;YY5nC;*_jX~2eJfB$))cA zy(inK+wynl!#1%DJWZE*p)Tz-ppPm>(Sc>^HIXdg)Mge2!DLa;R6ZRSq7;mXFDBTh ztHQ{r^|Bk+&C@;NWJ4)pR}-MOo6NElh%rb{0UaIfvLhL_j>c0tMqG2&P0ZLBNVlVg zjLKSr<{chs#^Yc>obsj=3w?`=@;%Z_56tR#pb$B}u>egOxd(-mqGDa$m$iWe)PWQl zn)r&$mvnvmejBya2Zf14_=~IyIN|IbQR;&XzBe(et9DIfuPx(9 z)T?dxiHlRkdc9sz{qb5b9_2UJsC%x)1HBGD;8rA4DGMoq1Ktv7p-tQ??08+b zWAStiLY)_0p?EiD<#5$j@tw18rXi(Zl()>eiRtDi59KaGhy=QY)ua%+T-xMxu$rtblb~3|Fh+4RoIS|AWbIs zX#$J}sg;j4K7^5r#YDSFj-&H%n4i&s#!bYIYV_(2mVjm|%n z=O6GTc%g~W93OsPfDUc_wS_ydVUALLs$)M69J1)zrrr0*$I3Wox~&2@Eo%=*JwMc* z?p-GLc{m{yqj*d5$1UABr;0YyXd#U>xy~dmY&XxFqejy8bSJY%tKBFhsZ=Lt`RGrK z zO-qKXK#n|rsmD}kt`rZ6_!eeHY@;dPDLn7KoY#?q^XY=EP>+ZWG0Yj)6^FAU_`X9J?V~$A%pF-{yZJT4PW& z0`h3rK~K8n-(q^2sM5UN`qFVeu_9YVAYNUYoVW$P`^2hj1-SR2eX08WKbY(iTxAwd zKejp`U2)D+F$i#J^kqadln)`y_wLdQO~CnGCOklNsLPfotZvPP;o77G7DlRDX2qsN z6L1FHK5Nr{fSdClxM-c#`2b}EmIa_sa3znS6WFCp(8k^(hsCe(U$UltL=*^IJ~v4v z#D!TQde~R;#dW?r&@{F1{Xq<0k?9B=VTxgkPISk4?~&ZUZWqKKeXVMb zHcPraN5Nbp4wi)}9=k(&p18TEsiLv{{Tk3irvrP^l+e0lLAaJFsn4iQDDfAFK}>z| zx_t{)&{1@dTXng=)f_QGz1LT$vEur&BaUy)((uc{AN3CBu!jr{AnsAIH|~zV4;F?f zfdM@vv=#*?-db64j1GVB$ND>t_cpq)X4gB2guA)P$fX?j8_R92E0e`{Ho-Z!v(2OA z>AMQyr{?~%Tcbc4SZF_UsqlJeB7G!qHMSgDMVU=abpn@hV%xezuT=H|Z(1>i$m+rn5?xCDcVy5< z>Tsw#po;Bg4z$3mp)|Ocf3#PpndTg%V2_rnFWM9u*Mk1@`VDbh5OlnN7GKbjO9fg1 zI@EYUAA^o8<5J?$$aRX>3QP+Vbudl`#3t^RUJ@89GwGbD~D4%)l2E+GLNx=m`1u&kVNVp?NxcV4((GakfUljc-;V>aPq4bRy`^qC%iICUo3{ z(z>DJK4kd^9sm99n>H<8boub~E}%cv&i}0%-{+M}Ytvz3=t)ZM6XUPRjIvOV0>|U| z4eQ8W>P3#PXl}y;7Ld%4Y*=o5@tcYPdR>ovy|gE46c6vkYCLOw*R& z6Kg+`Uhw!1whGLvQuVv$&hN1ID!%)>?N0?(&c*5Ok76WIViPzOFnVr5o7BjK-)bJM zbnNgvI&&-ll_LpGw>e_N+q0?*li2~48WnuaOvB#newnWQ0#9zsM#m)+Du3F%i|RdSsfku zr(^O>x;(XrFV0F`j(&Q#(rD%io_OibouoZM%Aw7p&NK;{KqP)cm(WD#n9D>L2nW-U zAXhx4Hkz&P{R`n5WniMv8ALXpiuxfS1A9V>?`INjEMZ!VhAdfpz(Arym#1 zid`--LJ_8o?A*q4PEJjiI7Rrq@i-}{-PIAa8f<`e{uqDHOhvK@sGL}Ck3}VDdYYP{ zKNSYc3qfmZ>zi+TpFuvH;JEp^%d;tFV;nn|bOF4Yj~4r+zOXRhHEMl0)Lo-5T?2YV z1x45Uyq7n33cXUnED$ae>v`Kcs2JiRy%UQYZqzU z<$L5h7fCw(wB)eAyw2Z<3cgo8v3fk-5>5kgeI2KVUZ;^x%l+V8PJ*YStBao?V5y-! z%s2X<_5!sKuRL!1IE##CXgr`@=D*8;2h7m8mt4V)C3j4InsoVgQcL7aPKYH`uT_QS zq`8G4x+Rl4xa(7L)pL4=hui%~C0l{Yz_-h6CM!l|iPLNK$H1mPH{8enGG$yT?U|n^ zcr>|G$z3geSdR}8cJjmBB*~4lNo_1rW=ir(E$@jS*PACv-sjKZ_vdRPYxl-4H|*8z zNkH9nzB|1eoKP|`F?oVZ26GVc^}FlGSd16@bQA=9p#k~lUiUpYnB2=*Eii)yj-Nhj zsb#`bsYYa>zUAhE-MZFv`<=i{>IgpO9PZr`dwAdeO-)v=C8Lfl5<$?>1U6`B_m7oZNK#@E%= z1wlgNild2t9;sjduV3EX7>GlqTgq-_fJ@Q?rNHB1!oLI2;aB$4L}|d1y6>(~Lw~<@ z>sA8a>-G<2z_bB6J~lxT>>5XQWI9myKxd%)jv`y_*R`cxx_di%yDBcwV4$bhQisk> zzvFSCC2YZo7WUD*H7Hvsb@SIUbXrNZ@GUsaNm^SlmoeZp`#;U5Kr(=z@$4d%q-whO z$9M1Et*;=ufw99juc9|uKW%#`lNw3Qs0E$#$L9X(RLa1$E+ zWOIAC)0tXbkrJ`t@uR?3{6S z9?bB`zht3*{+EXSf9;9=HwDSn(4X$3z#}9hYl|`YN(0`94m^Nw2mX(gQ+5eSNwt$j009%-mk>R7nUHdUr%;oISWIg=*i{c?Gi0NJ# ztBwbS*)QehY=Yah`I^ka$OwaYWJV_RY+DQt?84UwCWJPK=*yAZVOsh0UNcW#BBU4)tR??s;6jnQ*o{jm?16sIG_q`Hhh6H=6s*?a|c zs|=g>P`3}dO;<_;b%+03@BaTUJI()(@90Fz Date: Thu, 29 Aug 2024 19:50:57 -0400 Subject: [PATCH 234/241] Increase tolerance for plotting test --- desc/integrals/bounce_utils.py | 4 +++- tests/baseline/test_bounce1d_checks.png | Bin 70191 -> 66853 bytes tests/test_integrals.py | 11 ++++++++--- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 75acf2ed66..efb08e9451 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -763,6 +763,7 @@ def plot_ppoly( stop=None, include_knots=False, knot_transparency=0.1, + include_legend=True, ): """Plot the piecewise polynomial ``ppoly``. @@ -836,7 +837,8 @@ def plot_ppoly( ) ax.set_xlabel(hlabel) ax.set_ylabel(vlabel) - ax.legend(legend.values(), legend.keys(), loc="lower right") + if include_legend: + ax.legend(legend.values(), legend.keys(), loc="lower right") ax.set_title(title) plt.tight_layout() if show: diff --git a/tests/baseline/test_bounce1d_checks.png b/tests/baseline/test_bounce1d_checks.png index 68d13814932f1e8ef72aaa5fb59ae74e58b90b80..51e5a4d94f2a6a677891f57970122ab3c32a0ea4 100644 GIT binary patch delta 26938 zcmZ6zWmuG5_y0`^q9~=(Aqa?cNW-8aBB`Ks*U;T{f*_!@NYB89)P*!iNsBP#jC6N* z3@{8e&x!Z_yN~DiznB*jyVu&Y*JpkAS(Zzb_nD|10R!G+E^#Ipq}&(9O5U={81G)O zjEXj1;%NLA4{T1toNY9fs5QAB_K!YttnyI#dHI|{DP2so8DnIy@GmI~ss46v( zCL^YtW3MJT!CG_*KF2bqlRnUA^8gk7==?H3#z!d`H6V*|gDjyXdz&nv@}D~dmw_ye zJQ3EBX&}sa`$p~xCoA?Oc-WtKS@$IParlaY2y$~^sTV4TraObb9JEp{82a(!hfDh% z@ptv_*^Uz>e~pgmOmXOr(94#bDA{nFOFl`%=l&Xe-{8Z0TY@i=!leU4OFH!;tZ(!P z7LT>XBHwf~7g-96^M7w5$%WNLSoo7UuiiGvZX^OMQOxJ(A^08Tj5zgFB8Bcy*m}N3 zM0>$qhP8lsWqIf_mm&!cR9T z0VKo?QAygADImo2gEa8l+~4L(QcRD5lSy2jrm}pjjtF!Tt|d{!p}W*?{u~QVUEPpz z8D0DV*tLFe=yFQtjjwFmZO96SPS$>y>&AL>y(4e~SHxw;bL?PdFDNC7|7T>@)lmVS zhnJVzPW9N~6{aO?%BjKeg8{cD1Cp#Of&C;aDkd`)a}#B*(e{0nOes4SP?FU6<0_V? z<4!v2!&IRxEs(_DW@ldom4hJ*r34TKX(BtdRyy>iOd=Q=hsh#gwCbs{tay;CgE%bMDMb2H`?{rA>C?lMmcGrABFVi* zv>;ii0$bZo?HrPjTn1?MZbrjDIh88C?@u-J)<4;)!pT69rz>~bcXKVhjK940Pt%YU z&(^U3`{!v_K~(gmz?7+%tP3Vt?p;O|zXV6)j+1n@{quCmUL0eBRac2D9hlabo+mYu zHVpn(Vh>jw#>+#(!--3O0-I03_V-+_=PQ)uBDlF( z?$?4K1lz4qcRXr<#i9VYf&Tl2PoSjQ8gZA-6!ykNhc;2*?dmwkE-W`7kHWcD=7T{_{Z4lRSm1ZlZg5x}0*w#ZVd-78Gf ztJ(%~vw+#mU4a#rjtV#p)b|-jP62g2X<(yN2WTxNi2cz`wRORCRk`!xz{}&CsHMaq z?!Z;Lb9;`^n}S!u%8w!8*%x&2=5f0vKqoWBn#73VhxbKv@%8RIWg4rjmLOVnslwB6owRgoRLx%YE9MHEVGo%St6+r)Yf|HSmA*e9-&wT8 z=)J9?vM?@|ts@orXPD^Dos%_@=pf@L#xnJPiI(fvGA+(ec5;Q~VxkFfnzifC&fH*n zmA|jfPM7jL4gZl7VMRjLSG*K@Cc<`y3lUVs%Hr8He)?}Ea|+_*ZuO?n2nc8;_F5qh za^fbd4BJc(q(-I*HM>FcCu#xGRmypLgZ$prtCZi8)vN_~nUBPj%5*4EvU7Lr0b<8H z*ye#2=xl}rZoBnsgM)*=tWu0R#hVZA%m$$Qi^CL_eMW{we^$75d>-7qr!3#Obx+5g zI5$(J+)kEIv&Ub))9aoJ96wF$&MK<51tn6*gP>f)?b#M2X*UlND#zmX~mcn6*u2oqV@Ii7OBveY=|oAO>SN1z%2tT(!04mlw7W>#&PPj zTO;T>6v_p;SRmJ{bz)k}^ygoLdo!=W-0vn^sd>1+{O*Kg^k+W{zw@y-moKX8WJhWj<7OM%F)yF>B3ozfwH)V4HFMWdNA;d{0zL{arqlwD&VD zBAHTlTGeNZn*I3O!0~URK=}i4WOZ%(?o7F#=fF4NkMIEDX;+eB{r7dMqk*0-+zIVN zH=P3+%lC?O9b`jCDNX&Zqymdb$?Hj-1HE!%-O0%faxgC1@KZlFX>!WhpX^2I!nTN% zp$YHJ-Jae8FYYv))Nk_BicuqR>BDAc+KbU3S{^RqY(uwKf zFy#bik+3vFniOp4zf%qK7<_&GG7LN(!8!<(u6LcoC=D>e=~$$ z0@Fj)5$K4?SFx14P3g<~(2dzjj-mk5W4p6&gwsu1>bo0u{oh}ldLLb(%GQI{8}vMx z_dXHYkJqf>&x(65DK*@EWQ|W=j_cg}GgHo2ge+~=mh$&@*U&K;t;fst0oxZr#5oSA?XTA+XROOElm&TG&Dj6YeoV8p*rhgoHFeXFHIiktTi>rrhQK?E#k zf9g6cKc7#pnVx$|oN4G9>tROPP{OpcGHxb$Llkk|ht2{sU6Mv?t1UE`3&I!Obdm?2 zJ~qr{9&`P&MtqW(z_V7|p!ePj56F3~o@eG%7P0!Oc*EYpVMmVV-B%hnf|OB!$<>`Z z&103D9>*WY>6qs;OEZgUV=>cVNnR^Idi8CfGvYF6a@e%zYi%ZreSE*Y)B7{zP{d^v z$+SX=_9OB8%$=46 zHI&Ho&SCUG5Vq3)i#vv?fk0FqDATt_k77O zF4mR=N)igos_*YM0OUyNGMBXN^CyZjfgU?qwqJ~G$`*uW4nAPMA(Y~k{1rDPF#F9R z?6jrRw9NH;L-j%d&SRIRx&fH_Z7KftPh|nFII8e81`K~OjP|6=@HLw)j=#Mx#y|0% zb|xy>v4MqY&5%kmOW?Y*KJ(RVLJnnbV+@Q1)uy9pD(nFlymo0rO|q&71A+PWRBg>TS5>rzhwxK1^`=K;i`nMj0xFa>2E@?nPRh zHA*6A=@}p0i+24NW2)Uy47p4Wyz-s2(&v1b1=*_?CEC6|{bbcg=jZJ7G-X6HABya78+G^*4KDPJpyIEwRIc3|(Unil9>CVIv50RDhx?_Mu$C1oYUI>=}SnQZ5sVhgXdXohYHKjdng@Vf{;3M z-Fz5eBY{eM{9TDu34iXiQ~djtw6{zk9!g$g1SnIg&g-{2D*gE(Pwhc_a>{Np+SjW( zYRHeqWw3{Nl2~J9)&$aa6w5(NtLLeHCp>M#>07}@fFU0rS%!+w{BBm2*$UNjMd^^s zwV2@*8#ua2_LP2dgEV%ues2u2Qk?9W8}Js;j~lf$I*>clHhShV$B2Yi#AVi?UDfO# zTJwLL^K_r<)o?+6p!3NH4eK1%W03eIx+m&KUlmuuSsmHI`;Mrfu>o+?0GH zpX2B>X4|V8{z3|Fs-Nku2S#X4&oop{T#=HtcdyF1yn~36}EPzg? z`unu02K0v%`;XV}Y23X#uzJ_J<1(=2iW^$DN^M*pp=YuATPLeOJfAr{T_YPIZLd0A z#<9{JbN*6|^KCEvCqAy-T3Pfx20)uXSF*`;8K$_@oH8oo+A>-jy?*$jugjgk-+xM1 ze5?EtD=fBK;-ftcH5OkoC1lX|lNk3C>)s%q@_HrFARAsbl0l7OOPq;iBC$I=PYS+Q zZZq)hI}XWVZ(VtIbn+c%oS=WxVC!_jlhW_Yki^z6M+cL6^Z739_J#v=0~m>$ccNV$ ze7RMrc=T&j>S|G6pgoglr;6F~#1rUFjPl<96MOF4gdX~lRh5{pM~)voe8*sG{Hl!( zCU=Rq!eW=e&no|9 zM&;J_Yy*Bf+|oGH9eaf^XRM&do=$H|m9==?;j3c#=BUcF_S3Q9O0tG2NZQ2ckX2gl z0>8%7+3MBWai28qJw4PB1^S)pxIW@U1JV&1G21Z~H>*Rw6BG<&^%#a$7Ig@8LG}$rFTJar;-&l2rcU5yly9heDo<4eAzC zG*ei}j=<0^;Ho;ErT7ii|JuU*^qeKi{qg;8O*3-g)EM$6^DM3Na3;}yKhYv2&DB34 zou*FHER2oGA>zPu-fnk(auXcc)LqodmWpze|JqNct3-o|(W;LS7=|suehlq$>SYWZ zw~N-wCA;@4a5gA_?)pApRU|9BrKRQD&kc|C(RKM2)v;-q5W6oMhYQy+)ZMP}rFv1Oz4ygL7!wRE|8o2+qz zeOxY4Psdn!GI+1aE76E+vz_(XQmRzNu(7#stdLC}ZRBa>slRJONrG2@^P)L18>cq! zv!R}CDPSR$VJOn7x>qvgh^t2U&sMjjI?j|LPDs^7m1#(j)VQ7wm(|p{FcU*GRe)xn z(+(eeHMO$U(Wmt)H+4QKgEU};BX--aRopu~(8Tqq`S9*?Q`rX{r_SSvtRbK)#h;w~ zhF}cs9pBw^A^~9;ht*D z{^2K{qPuw9G@S$e^XhP&GvmCK!~T7lA6ElhLaK?#c7d4DioQnA#vvNhk7&krHzvo1 zq><8&J13HPgS#-Pjjy+JTI)qK2cI3*0ukup1JgQc-!0nw*HbGD6~=C^rxttJ_VdKW zsZDz0N1cbGl-!+yYLl~GLXCJNkVE62#8;F&W-aDZ`{P>;=$WYOs?qrIoo;vX1|dW2 z-g)bX`dqK(Gy|@kUxHeao{gFBUjf#uqZNknT;UAb-lnoX!E|FLS@$v8o9h|x0R-b( zQnSuMf2zj!+CM^WQ`L57Px2jSl{PuV9kb}QxHg!@!`A{%3HJpVn-4X?wukv?&=!98 z+Sx4Nf~1j=Uj>F;sdfvfryH@>-?|^;hD1`F8wRI6x*>!VE+p$VJ6+$YVIoX>eYbBK z&C0);TwLuJN1tukmJ-V~Io-Agke3iiPg#8OZ1TXcKPx5do?s=wRbt|6jyuiu8=b{c zs4`iprOFLWJSlx?#m&Ys5qK@{Y~Rz2Wa!9&XojfMh3xYbaVXQ0&iUGYTx$#sP9$7N ze^%=3uyPa?l=){xq;fOh%tR{nsdp-bX^1~K{3Y7uxR2EXYpdreRsaw-I*&;EtmRi7 z8aLQN;RLPUqj13)5!&eGB_}!jiwSI&nJeN=+ZLa2=4jQ9_=D2MbxghFK$|=(`vjxW;T942xbp=#cTm zo8+%HDfbwhtaU|irjd2z7{ZPY+JV?Vpjw^HvlAh*HEvjmh})*7Ty-a&F;C^y#wHx|3WcRDcYPnaA9VH5==bl=ReZiWfzXRZL$cK%_nO zNp>J*aCa%1SV^v02Ai8Jj7ukYqH9Spa)Q z;{#l6aWn|5Iqh~<5<~k(^SntR7*<-PP>VPGA%_0<(nl1FVp*Yk3HwRIO;Vy;W0aC_ zx$!9zK0J9XFT;G!vZ>lZMG*&Gq>4X~VoUNeKJ+w%weVOvyc*bOe2kx9u`sQp{@g&B z;EOW7e1a()Z~@Z@Li<@=w*}a{v9QYw3@)Fjq0fu+`&%1-yl_@8o^pyreMZQNZ+Vlb~P>Wu4ivb{uPc&7t z!Ezx)P(@)l^W}ski$d20Ja3rWia2+gLsz}gjx<@xRassk_a9<^0c;l}w_5+h`+j7U zmD)C0mY}BJUJPv=g*m2&g4nyy>Rm<$8;3jDznX3`%wT=_o`r_HsdbokYeK zT}x!&_>8D4^XVuRZ^AF&YjC8}Gb99t95ReWsI7M|pOPvydvQ>Wn0#0<+^Qgr?5d!Z zj->cB{@SsEj-rcM>E997=hfCiA^?=z^NHSv_YTBF+gs^$?f*zT=zewWO<&8;{xp zD+%SNO>y&cgg=x{@Y>zzmWX__E!4Qe-^A!locopNk^2K`(RkJ`ao6imjv$LP8^?`U5a-fo{Nd}B zQ?CFAP*y!=aEgAptnk;$z5|w|+SOSf-XjlVl*MzjE5XG{dGg$gO1p>Cql;PgJptRu z;2)Q5Fjd2>5Zsdq1Ct-|b`H-4ysl#1pcxA4@BZ3_aT9c+817Ae9`Av#0*~PUY!I@H>Cf2VYqdsj=YTEqu1Ac){{P|7lpfeAPed&eCteUF~mH(a)98JSBqAb680OkskO}WEge%2yjV_WKB9N;+?YK%XR!}tI= zmoZv{kz{#@>a;CKRR<&z9aP&P9J4&`P^G0>kI_Zg~eL4N}m5lN9Dc=k}5sZJ!mZ ziBxhaI9S<)OFI>585slf1M>iQiXDI3_c6|+0H$1X(k>sBa%DQ%5n}Da8s)4HPv3(~ z`iwbVzd!h^hy}0PH_hw*-}=bP$h_0~Zqm0<6hF;Dr5xOdU;{(XJsHz_#feEqd=oZp z9DZ2PZnlu*PO6l*8WT&_1gy@b43>gmI;ASw%@i*^10f)AT^;bZzJFQ(k9TF6BCH+B zrXv>Aeov|5eWsiFKV}lFx>#pua-SDT325HQj-R0;mZr3IU~CDf2Ise)bOw7~u_A(N z{%SGoMyf4p$MyR15}c#wpi_6%rDyCQqlQ9AKXwY3?0|`+%n$G55c}Hpq}mmE45lVq zS62h}B>*z&U8p;V)ABq&^ME-#Z%Bkzfj%i{xEeb=$F$&jj0{@>OpHI4>c-ddz&}jT z!=roYWOu-g&Cqr&Z;&otU!d*DC?%ai!MOGGbCvSXF|g;@G5y?oNY>|%%;)x!?y^~4 z&FDJS%5G*k`@Z8~80&p4|9MBG(9*h7@Cjh>VdZzRNc=zGjtRM58h$X-QV3rJ6HUYD zmtpDn35BKg6PcIeRnQ~UQsL3dus6nTZ~E!t?V45ZZMXE+#udT-?iROrDmgS2)D>)d z*(w92wLNI(7Cs)ar1#E7c;glAH&()r`b2~}i@jYoA7y$HLI*K_mcq!%2mp#5y^mI6 zgjbD-a=!=ED5^=RZl_fVZrQFEEICNm zEu;9|pY&5(O|b! zOsfem%$pzKfg(D@%))-o0+|oOBXkZ|Wl?wqOS=JIAKnJZdy;$8%g?U6JSc`|^PqaIC;sQ7Gc$$P1uAmZ>!^j4Mx`tj8B_pV5U(!c1f07g!?p{h@LcD>;W zY=i1PTkU!XB~G<7>JyF} z83CcA^tOgs58SQOg2S-OI?G4{w8=>EEA=Wog%I}wBYu^z5weEo!*_Vc7@hT7)+l1U(ZwAp#6|*9 z4#0>!yWM&<1YwE7HM2H#_ZYGvi2r9e$fz$A4K6A?@Pevu9h`! zHu}bcb<3{#McrGP)@dIY3Nbj{!`0oYJPc-nN_DXyPb9pm(PHb`wGe0}GkF1HhZM>De zOg8V$E_Cs06fheT*wrHI&GqW#t91cimY2nIdM9t#l@VL(;S5gmvC_L;XByoRs*3-| zrJse`$J_i2dA0V#l(>D`uY%ke@%e-2bbiz9!?YTNIPZ|BgZ@Dpl!UXAm5@$C53q0ppF||)s zd=QV_z7F#cT{?&+A4;6(dJx`~|C%HFn&$R}i5)o@S?XP{*VFoKx|}&A>~?FJn4*h- zaTu`D<93Dlga_nc-skiN@fW+BT&^}|H)7teHth*fJsLcThFhh}G|RjZ4PRIf%%U_Q z3BUUy3?aHU+;Y{D(%_Gngu{n#Z%dQYx&K`^syqtEE((8 zkzaEcer}Nh$M&McRAC>47Q5?z{5W!ZM6tY!EV+iujh@j0mqkXiV+wr?i`{v|t8z4I zg+%A%%&Vm|dklqJjO{dn`v_ihV}Hczk2oWgk${~1V;{LY;XFmg?Ve*I`$=V#w>p*l z3E4f3fwMn(M=hs}AfCMn**;t-3%cXn%9^TV#gc;-ue15qozf$zcv@t_POFsgqp#TF zsQKa^_}h@S7kX=pFpV?R6$B*Tubr2=umvR3VISHh*B~d8H(E}!*vF)XC4RShu%Li- z&DC&f$(5JN11>Yir!4_P>w)*9DbsB4HI@M|{}v8fjvaK|bo=YLh}BM|Hd-&N3^?p? z+5Xd+oP!mSzd$;s?2TaT)K<@@ZqB(IqUm1Kb2IfxBC*NhfyQ9_Le7z?(n6YYaxw~! zY5V=jG+J3{%iZD@X5@2hAIN0chTKdRkZ`+dXNG`%4{bU}#4LoA1;p64Jp>j>&-D?| zRy56%aI4Kv;Y%I2nFztH?8njup%1Yi=TAB|j@KEz>qee8LPGC2*zB||r9Jb*KFr^P z#Q6i4L_^Zarl!lopI`xU4D|kyAGHT}?3X7-_>sy!N(Q9)G2m)gOwQUC$D9m^T>h6$ zpYAk4pzS>!VbQ)sfbgane~$O@eTRL|B}dpRjroASUT0*jNIpS5dNgg?WZpcy7~9O=!GQ7v4@WQInKu& z4mXeDo%EmALi($Uj9^$zc5e6hQMNIplrJAcy5GLvloIR^;1oH@>J7boI|+=zwTyG` z*}BuU7r+qV#OKYQ%eKv|=Qd<$!7Q1ikZ%nDiMdMi5Qm|zt27MbKHQRNNG7E0C*)(?wT`6R!89kWVGFcp87s2){lB5OBm>e!IGRgcvi#N_Z;m@jCYd-`Am0b+>9?jLpQgx5^-k*)#Be0<{%Yck82cq#&s6A%7d!Ovrbq z5ubr#tgFj<`_Z05x2*vPz1hbg?Jpvxuu&XzS$zILKaW6Q=0t&49J56qfR({5DCR%C z8w0~7z^f!q?HG7I|K2xBAZEr=LpI zmFkLze?h$L>JB01j8!-#nL~r(;I66pVbhrN;^9b&DgA^&w{=S(JK#{oqtV#zf}l^{-y(AvLA7qbn5%rHvZL0qPkdduht6IlC(yfHilF-n4WYK2SN+&EAIAXyROE50}su# zJdOuIAcp#&CGoWR<>_jZ_EGaKN zr8qF7LNS;e97QQa^cSZ_*Rhi+1c;`+ZTv4Hhxn20^=;EKt3H=-T9pkyM;RA5*FtbY zh7*V`KqkxQdxp(0tt{>l*#ik&pZz8K+@-=L4b?;wtDHb087fyc9Uk~QKE|{h*wVeGK zZaY}H5ujO@=6ZSDIEZ;-L};YTjb5KCaNDHI2Rc(nTUOnRmdnJ?1bW#aCqd zBlOJp9HT1z=14#49vb`%tnionQPAO!Nz9lJCdn-Qv_Rbbk**(^27r>rx4F8scUuIC z-jJQs^l^aa3B>XMtr!_k5t1jcY|!tBmahdzGJz=X`yezO(ET#kW$2_Z+9KD`H!i_f zu1$}Rku>^~nU49iy|`ORT#)+D4Dts}qikGH`mccsQwh_>xs^D&W>qwT{1*$XC85@7 z6OVQN?w^6l1TBQ`+lfQS1>4@_1rP-C-Z21rqiQMwJESJQS~BIc`9#w?{m)9uKtfHO zFS@_6zIXAmm21=(kw()fP>|WXmA{h1VzDz-;EHZe{KT0fv*EaaZZ&~{XBS>tPRazbiZE4@isp!q5w*4KNqBo6uNS2*GZEFB-Q{!vi4@;D99;y z>FkBrHG4j6*CHj-x_B9a_AWi{&v0JVg1|bL{O3Q5Am=Atd!oMnsxNCkgmAe!b2&(P zl4fx)G#}QA37-#p@2~VTDI~tVT3O(b86573xv?ph1%-T>1a`5-HcT;P-t`5p9hJ2% z$N3|Y#}^_6)HW^gz@L>Eb|K$70pwJ$z4|3ZhL}<2`?aomFE=U)fpgk9e@h|qCQ(`E zsdV+)xT!j8GJ96SnWR*92UBm;C&bu1N+FVB+?M5RtSCyiLZZR9yxZCNVJ*wI zqy;_98+38ASYHC}4AO_g>*)PPT6p)F=opjI)S-rM#q_C72eHs z!Im3h$Sinm?9`pYug|1p{rb|{Pv%PC?(__6Yoo69tu~plwyj_t_>mg5xZOr9w8}Kd z2|GM6@P3UJ?-@;>^W6&9t7$NxoNg|*_#N9^>(Kt8Dowm>|EXX)v^)q&hDT49Dt!mU z%&9HFbHxmGqg!#f2mCTt*94hY7yJ`O26~P@sl&9GUG{j1AqARg2A1%9=VaXGaJ4MY zLw;dgRx|#HEcL3kS8ha2NC2+jS^4(hRcEo+$&Kw6X{*&IVn~xDEx9?N-J8#I1b`{w zEgvH61zWSE%0=Pc?$`J7MY-RMkQ8E{E)EBQe?sx+L1u!u&3p|-K>lj=3a^F%Q1 ze-vmT=?QuWPdgXBCcr&Q-H;J4+HkVty?ItiYwzxj0Oi z3YhM$p+0T~2bO+{E^z8x@`}j6mvu_SYfPqNVx;ezpc!yd<;9zHw#i*4e$LDfJPk*8 zsBj3u1e%2tKUH{kMlDo$!o*^|<(M^2pt_Pm0Rt~xHuCnx0jV_M2xo+_ht#J+6ZhgUzO3;lRpVK6KNBi+);Byxw z#tTUP`w0IvCxS(m$)I3(U4IQ-@J zYZm%$X8f)XNX3*>oOQLG;BHb%d0xg(;P{kjWIb?vIstrc02*Easwl^WI%|r)JR|x) zF!#NvbKSOtD&9>|1ML~Ftk=A7V3`$VEpPdqj-h>pH^(D)sDt|A6DVVJAl0@e&_+`6 zQgW+bw+J7z$A#`Zeu_^wwGeWc{A%PITGNKwuL3==UZ`q?wFU)>YogVe z*+NXI@LlC^dnj>9@oZ>vw~RU{q3mRkyb+VxGprLw^7@@UMfP~;qs~!oPr5vF-CVD0 z)I5R>Sm_Uz0L0(=%lB-s-UkeBlTWrj(K4wQl1aE`!ogn~h_-Z@+j-bJt_kJ$qE~BH z-^8+Mz~4=fDoK^7tqUK&6A9m{h$|Xrj#tk7kOykItN-@hiH(|~y&Tu8Gs`Vfqh7y< z&kZI1ElKWaZ22TjIrL_=?uVyd#Mfl`KRq|HUgJmtDxvtB6+zRLJzJ3y2yP6w?n+;< z#Mraz0Us;N+g-ALQl1Nw`+v%HY($RPAN`mn${kfHhp9&^YFIrfR3s3l%lztk_>pscbF(1H_X* z|4!0-kQomS41ow;0#aK%kaev;|8FUbkz9B}M!81#wc40SMWc1%wuDf|l1u&*X6 z`a?YGCdIbo=_L%v)fj;g(u&h`GK^mhnR3baqZ&(TYtzM~VA7%m zB94FUIUL|%EeF+~cN!>?R)Yp%40oFgGw$K#92bEg_ui02y}Qm26#jI>1WSnA0xwhC z(~MDD&+~{stS*Advg&`}(0Xf4pt+fodreuP)IZT^x07JTUVxQgd~G*C4*Yxi{t7hB z+|AfpFiVj5fUoV#l2OxNp#HUzB=?KeSJSKpu$O0|gluf@6KZ(brfMx-M!aqjiqI_% z4VjzPm|Jpe<@S+%V1bYUGmP10^K8Ufdyg7DS?~r*^Jq&YErV38n3haQ4%+d58DtT+ zk`XI$jsX^Qq^%iR(9`#X5*hnT8kDiegcQtV`1X>FQ{;VM?0l2Ic%cG)_)?Wr@j%~UHEdywn*HgYtNx@qvuzmPW|DR7b;bXl7HU zS3wxs1Jhd)5yM*o2{lrcNE3x8e>Vp6|4L|E-{AJ}fnEj!xqjyie*ESRLJa^%ve$6& z;>K2LV{04B+;a?Wmble@L&Npa)!p3%N4v9NIv?PB z4_?;d6b75f>*89ZF9=sICjYu{6UhdlxBaHZ|4&r8w**D3f`-9L=iT&1X`$L zLz*!O%0W+g!m|SQTJRX^Th8T54Sip$GkuG5S%!kBl~6oxn3Nol?L>KBeD(I^>-%(W z)=MJO-Ui?v&eUGs3WiQR2M#a6ajI&QK}WY3jPWl%7y*}Y+qzOWr@LQ3E;(tO&!|F1 zisGN zGnVKkvHqNqSr6Fe8hiOh{SWiiXNwD}@;Ec4nBgRoykdsUuAT=MOX(q6OD)V|8JGIk zACl<^Z_$24LXuDRyEOf0I$JQSnJrzGOVbGBp`arznH?($N7Q5bT?w4mI&f70O^Kckfov>W}GL_$AQp*zF2zG`v|4$U!=$U_mO;-*A(kiYL`rIX_=WS-d35yKuJ&Jn| zQ*b~>EaZGMI7QBbdzAc}=#53+EnSlcJ@SbT{~=x!>rU3b_wZgc zB_=Ggp22tHMatsY6zxM3VKA_CM4#OOi5PjA4LhaHKlQfpgc0t&QL2h}#*E?)3g$S$ zQfM*s(Kp&`LLquJ8sMD#e)C=ifvakk_oMKLrXa9vgdh28j9JRuOHkpu2oIl9Iv9-L z=Wv^2y@qEI=rPh8=CpDq+S4Riu|vyZhn<0QI#(aw2qqs-m~+g8eW+*@+8!2Zxex#$ zp0VM0-#>HES`i{|Z>RTUy`&H6W5)}+v+s50rN)1U z+sZapZZnP^^;@_c^IJ1X{QohV!t=ZJ>O=vj15-+W}U#8T;2u)>nc7&ykq^umoTIIF?6TNwR53~NF%E68^z*yKm zVDXzAJ5I4_W;(gC?I``xv4Oof3;v5N^~W|!mZiXPmT+iXuqh%%8-_>^b*IyB%1HC= z0jCs1WiPm#Enk#JSPlvQ)akQ&VM0D+-g~g?%;Phc#mM-rpDDMYJ9gnI2~CBM6}gBX zs*j=l>auG5&uBX+GCfeWH=hD_Z`w!uhUuILqW?(xZoxJn3RQrV?42A}V0TfDpv{^d z9jIL##J~_B>qAfThsj0GKtGaiw;$`!ErlCds{#l1f=An}$MgdhRHHH*71AbCcmAIg zb9ms(QxUyDzjFog7rFT$nz|dX;8nAOm52%|OUw@JXPg`WdJaKNrOU=QPf7MjGkYct zLs|IB(A_Fr-(Q*L+)@Gc&fUt#8cpTZu?Dzx3e?M#gNYmDl}qg`^9%bs+dD?D9pd6X z`fiFc8n@SI)SyQvod3-33;tC=Hl5rwg0n2;_QQ#+o4&s1awy5!i-MRLV2%TPW}_73 zDw89C_S&K9b3Pv^letp}n$%!x{cIZUFolWO4#c@1dH3h+#aJiDx?iy+_HQo?O?atE zqctXa+0{rUMkbKo=KP80c>U4d&F5ETrN31~`mtky%&;Fj8n3If%-2x5~mm(~7X4?N|os16D0XrjcgOfp#8#m;5 zn6dM`NV_S-Rkc^c3B>iui-u5MW-iVQlF{;=pI)n_d!3>d9^N%o%Y<bu_yWbBN zqB059{v49pU^-3Cg`tkhuUc#dy5hznDIdRq+DN=Mmv}l@>hg>CbYjs4*L0iiG*G<% z_3gf+0W?Il=Q`~{{rsQ88iD4wN0CbJ>GEG+gq{HF|0L}vsGkbXoak%bjuJncn_poU zSQ|bNyCE?>pU?I6ky^S(PmEdq;i!meVy9<;#s6Rtn;JbY_4R*6EkfqZ-TjApkLM9) zWfW8())bWy{DXtj#4)lv)(aYTe^>)$j|bjaP+VY-iqvlV5m}r+Xi+g6!UupKU#!oi z?(6eBx=smKhg8mx8}4-b^@|>4W?BPbVidVE^s??|-6b~#G z<#xbt)xpM><+T(WH2Gata1O2>FHNd(avSVSwc^6{z!|?1T}4#O99p zo^%?Bb4+mN91%mtPF)~EDwCf|!NIDXotIVvxzI<6PFFWi2hRk`J&sM);mI*xY!ye_ zweIM_>nckxR|!U8_ZGF5V5q_j@PtF7j&tAjG2<@-YM(xk1roaRE4n+9hDuGHUZs~2 z0<{|gwwcykbJ$&rxxNiiWK(?0T!6@cJz^~<5tFhg~a zyqv(%r`}}^rj0)7^frE5{(9#p`3ZF`jF3I!gS)d;8=xZ8&{$k$7AGWEiCs)UC5w~X z0)QI4;>>}N-lSC{ksxvtdurUxX`cy*X*wuluzLgYn)hQAKcEa!&kL%0NrvdnmM&tJ zMqL})Z0xMA^?Db7NJ86w9UJnOlac4QDZ%%!!33T#qTet%_gwX#{Z|tc=uy|hiwj2r zC4<#2p#WlD*l1TL7JVyIMbI=0$2|m$9{h9LT8st)&H*QdVCu-vbSt%!ECIbr@P6p8 zl^G|cbNW~2D;*SdMj!Vs1Hc0;wa?5FLN=yf&o!&YbS2u#0oGDH@7k9vHj1hsyo!*! z8Hb)AFs0`Vg79WG-L0Jpx_y?Pcq@e_;qY3y^8Gf^cG$56zO-3qV-%Y|ZjZ zvH0V-6}M+kk}j*>eZGFfxA(Q1Hw){Hom~ErH|*86j_ou*dFoOc(e;k%<~YWAdwpmr zs-t2G&SaAJTX4D4GTX;vGdkwnveF0cW6o<%*tAj-gC*em=5VjtDgM2GMZrL2{vn*WmmY#o zCtdeenJo0O{(e-5*>#+ymhtCh?q;L$Im?b8DmYr4D!r^~iLq=Fyo@QFl>Lx{RoGLK zwCfMW5`UAMkBrya>{~kmVX@|(H2jL#=B`Fq+hS%pNKInFn*KYT80h1yQULL0dv4AucmMZ=^!j&pU`6M`X)rLS*c}>l- z5+Eh+jWR2vSv(kk(5P_2S{O3kd=&w)%7YsM z_?m*G|JzBX#1XepVhJj=pz`uL1;WQ_zd4WGSlSIXsfsIv40r#Evi^#?(rxT2Uy<9V z1xL?>8Sj8lYHh{iJL6_!*E^kEP(0RuE$12?mt|eyZ=+`fen3zPxqg2;81`IH@Xb_# zsb2CWGv2G}u{Z`ruLe+j5;>t_TjQsoo+R~QQt8EbGw%R-HLayPbgGlO(?SqZ*^0!NoW>c7Aty-P7xc+B>6U|MDpzifnx zSE7b%{p*ykD^;%MD~+V!u6Lu;fP;YKbW2N+zq~S?0E6p$d(S<@C8hZ3TJoP_W2QWdv!693s zno93FCJp2MP~Cj@w{BkVV|!4&2T$X+qR4IvJn{#<^xd_fweV9P2ZOQm!Qsewm~j*x z2sTZVzIc36wFV!}&}su42HD9{hil#*ewwuR(Ya>wKcAE~{@PZto|Z9InL6HW>)n^X zhS{@TC}O^i2JQ>+2-DpN&6!K7j2@)3abfN-KyghZRS^}z5c^W#^>H6xN^bbyrrlnV zaoG{)>b}YyqA-YnNJa(@`5+P{2o4|;hKvM3vVcg=ISsEGJ@-F%z5CvJH@sep zH8NCJRafe7@BQu1d&M0A29;#mKaf$t?>i-ExamjZBnVN3VtSLWT}Sex28%KFW8r5w z_)F(`86!PEn{bhH#|&ziAJB`PciJ6~yY|U=kIU!f%ILW^ivWX%3XfOMf7-x#u->h6 z_KIHVizCJ?8S4(lQ(5cTS@(&P_tP(O^5aUQ2)4!nS3aPq!^SYyc&&*Fo9{GxoWr_1 z?UW$|4s+Z4HdL8o8=&v8aw`h>aK><0g)IJLjo(2wwIWZQPHlGj(v?pWdyTTuglQN;K4xjliVYi*sZc3*7wAM9aJo(VYiai6RM>bE z^rVvsfs0=fHX-`f=gzjGci`B0d-Q9W;46`b1 zd_0@!=j0g@#Qvr`J!#c={h6~76|bH$ZO30RoA9Qv`*!l$R;Lh3Os7pXlvu2-SK340 zP(9*a0za18Pb}-;n=I$+jBXJj$YTDXZ`O~Rz2%mRfDqd9@=W=?f-0>H1;ec7adJIf z`S3ie*E}(0_-n#FKb+GMs9X!0H)<{6yXjNqd1Yg3==IRCOWri)k-Q0IlZuYgDZ&+t zjFEd?G#VVLbuK}BRxsy5GxIm0$or8059kHRS*@c!eCFBkKyn0HQ1XudXoicRo;Hua zrRcB=oEN+}o%-Q0+(E52)uD)5VC$_yU9nf69TMwt=aTO}e8uU>aI)U5#kS9mlHX-~ zqU%;W4F!VggcUoRbo@=TAIPct7j|e+O?}ll&cz3xx7j7V^IRr#`J_>U(})nwv2{C= zQTJNbtzFuKkPS#w)a0vrHdWzl``Pc0&1{MN2XK6V!DjVS*_G`I%wN%>136Nu9G* zp@t>J!EA@NKcZP=Zpaid%GejC<^-q4Mxp&TQ5~}!pNH7FC3XF<^F!KV(!ooAazxo~ ztFD&qxXnpVWiQ*=v^y(}^enDD+7h*BZsD}yl>PW!+<3dJkbh8pdt)7a|2~Fx-v@f2 zD_mbQX22eLy3h*$B$40`^C+Fl4@uiof4)npnpRkze7xLc>?%_P6IZro1-w_j$d9@V zT;)s{X`9DFy8AM6c8QzxL~$atHV2cg?fUACu->+BH|1E zjP>m&tkv^TPAlF=?pMtp#w{<}SwgmQmlP{$>rm4QjfeeS#fMEoqfjLp5z!Xk-q9f{ zvHbatUDBrYsAovh1IMbv>|y`?JR_xfu9<>a@tA<#u5zaX+Yaf;mm!WpO&LspXE9pf z-d%I)QyvRHCQ+Q6N74B1jk}}7C{*?Kp3dFsJw{X=FKmI=c*v0!>^YZ~t-%bA{0qQj zZqL84MB?$9%NKfS$lLNOKk*q8z0NgWJ#@Bdj*`!-{>Ns63Zyi~nxNh^g4yt>w=O!8 z=@u}jpAlx6$<0<>#vWpP>u8XVS=EvnT_!G_<>KWmRkL*Eo(pv6aYB(748NS$^G6*i zj%nczABb;U<7sA0N8 z@r!;c5!3oV`&IQWXeLS;=062-+1YTXT%d!^4t*|na?|13USGS2>5%?8a>SJJ4U)~d?#vbX5<)`#42-P7;R^OR<@ z(K95RPVC2?{1k?$!Ka?r9HsNKT=Ymn*X|Qhejs%(@q8KWG~iB5q}_o_dZUNc?HuJ@ zCeN2Uq?7ULkz4OuZO1l9dNDy&&~errw4Hlqf%By6WENsQmLk5wnJP4beY{aeYAu)a z{jU1=Ms3GWlIwAx-=&V#lmWuoeuLNNKf!)S}v%}6lDWwLt&Ocdlzu3RS$0uA-$E`i- zCU9$4a4*urUlI9#53VHw24RuKM;vEhfRij@+9~6Gm*Uc~1qe3ao^7X{7M8mx;x6z~RNp<-PjIfpZmA*lf4@IP;ZGX@0tK#J4(On%`h zch4lx`<{RGx-c|%&d$iKzScM$!Q$&_xzX8djSi%2wq9lRBq(xnIlP%Lw6=sT70udN zT>WHbn0~m9GOP*KH{dcjRgxWjDyAuHE3(&8`8t1wY=zhEP3zQH*suF@f zK&)MWIL948Jd!jv`p9n9pphay-cQ8~I~SQnr%s(TI9qpp&lUzu01qadme>x6me88z zJ&TtBaz32&9(R!^-5BY!)6GK_xMxcaxuUNEovnqpf0w^#`gE$zulEs%0ZSoIBHje_ zT9QL##kPvtrcZCpeGmG(+?sF{YeCfYFa_>R`)mE2pd&J?`VZBi1^x}oK$6{w>RvW_ z5Cu(;KVBrSoT7`FLA+H87h6na>Z^>@ny%NDxy-GL%Iet~jdV@%zca^44_HAcP8cKU zU+|)eMQ`-vCV>HG30b28R}TpC7APgP6G6HKL1n`_(+n^<6BMdoM%QUEAw zs|+TFeV+4`b@9%FnQwfCXBEx}wxJ)WfRTw=IF88z4^K!o1TVW2)6J4o?u_9cF;yTBIy&@kJrig<~5<)dyMbeIEf?=*lGd@NYO913r$x|?Y`gLtmrbb}@o#jTo* zLH+eRUu49j%mTN2Qk~N;2`j45oI8sh-fMrvwzME*sJPkqh(`W^6%<+daabNDIdL$T z%Ec@+c2ozETJ&>4$pFt|@HdAKCTd*u`V~$Ld8~92$O5XQ4iSM2p(a5Pw%n9Q;B6bL zHl*k+I&MngZ8_hmQDLcSCGB%mMYAlU3@n%7{V0g+`}j0;V}+HXX3LgnVL$uL%xLlR zK;O|HgjBrSg#Us|+mk@{-Ki0Gud3Sg=Ik?IQ^D*h)CHiSDz@^Dzt`HyM18=zq;Dy$y7qu(%&oG@Y~Gf%Mvyf^q>fN= zbNYH9qAmtRbc^t-ueg)93YBK?EA{Y}M`;)SBhvsS3xT+x9?b7Q{jv+OF|nm`0sCEp zyJ-Y#QsGQ^j&7AAd;I~$5|RIqe`=HIA*;<%Y8|X`W8{&vmNlkxfz|5UP+RCF?hTvQ zKM4KF4h>Om;vcMCF9btxjv_7;mNEt7`n8&T?priMh6jI-$OQ zu6xfh?WVR(!*glt@?|}aY#ihw28SHk?M*w~awqJ+ji^NVUaegXcLN%MD_%_Im9J=L zIj{Y+Cppm)n)Fu^>zit+`b0w|ev$M`@JkUpTs(tm^fCD!ez+bN?! zaF9MB8rDJ?r)Dr?k0-Yy)idF8vsEDblo{GL_0k|H&%_^sfIOOG}1Ggd++rG3u8W_V2w|Ui4y9dI9a>TG_f0O<8=6sdW*D4eU~C!w0U{fRQT|s%YynPfL21E+ z?XS?m2~q{(Zo8;Y(93%~{|>TSxeplwkj<+^pe&%!Z(F6I*EOc$ZF}kgi)tK&jY$;N z;g6E-7{r4C4f}zo>dH$u_Mvd@a2R}AWD zP7}BHUsY$9X8iN(gJgJQkJoUk%?>@B%1*M|56S7_+N_rx$ghP*)^YYtdL zFmb8Qq?~u2{R!SNcb{YpR*R`K2=GLG1{`5VTD79rirrLPS^cA;_wTK$9U*XjKc?QT z1=(2&V^w;m_#KF1tEj53bJR&D5Jo}#r{G*zjO^a3lrBfMshv}j0k1U+B-$+!$@CSb z9CN)Odi6r`p7#s9^BV3ea1H>(E-b(wbzf*H~2VN8?|;c z1^iTp`2NP@Kp?_wCL_6MpVJe#cQJILKrpWLW>gkm+bF3$aVE3ibZ; z-dQQfN~p*J`*Xd6lndSTIS*_j$*hhw? zPjEYln2s3Bw+r*v*dvyPx@2V=;T0i48`bl3Fhp5aBCLYCF+!av0$&dmqW}-LY20qB zQ^e4jGcRwqRgS7@0&NSV4@N?aWFNzvT=sI)b>kuFZra!r+3lvajk`W%1x?6q7=TM2 z__J~bJvdGBWC9Kr@h>h%HFuGv(`T>0MuqoXx=pcAwy_mnb_2QQHj!Rle!!}msRrE! z>9sCNY}>kbBOrBgA&H6y1s7O&<%@@YnUD zp^0X`ln_V)$d%Giip#eeZ&eh%7Uc)ArPeEn z@1kW%Uk@;YserK}07m%NfS*ah8_9ae(ID+0uH!ophxu^_Nd9WPMoc8SHi7kZjzTB0r57gthw%h|Cb(pc>g!f?QZhh zumRchF(8m~U#)J|;L25I(Mzs8j~<>qtd33~7a+WD?j~ojkX*vH&jXy-lMSRjc}n4s z1Amy6CUD_X<+;;gE?df4sjUJMZ?Uq4q#105@Nr^4h!o;8D%0JhREf-VsDq7 zrNZ=^3>KOB-4H!@Izi|dz`C{wX2xxLFKiP@BlJVt16Uhpit}9MM4iyuv+%v8<#CE8 zGxaG^*P{~ycp1}ui?y)CMx}2{TzT|;0RfWdW^4~S1iyGnQ?U8jd;8qsj!=vNl)?n* z(!?=Fa7JJYrN!^gaXl2eq;xof%`>#?JBF42Gw$ z_#%fw7+#%;x$Z9q1Tb*gjM{+RA{iA&Xp_TNy7pAEfwve;l0p;3E zcVyBOA`!5-y<_B?JLbRAV44iz7(v!ME1(?!?30jb+{bKShQVb9;lYcJDiaFgL_gc< zi6H*6|AmP+jVc_sDx?wtDQXQCuf@_iRdMyN5UDtAh?Q8e@RF~^vP2HLTDqPxU4Nr8iOv`l(U~_TUFlkvWxEtct#WQSkGC~g0?kgA+RZ9IHIT~?CACrP2phkqV4pO4>J^8%h^T!|R6=U%84@I}KxMqEaiHPzOU$Z5EKTAMj+T=^3T)5H^?#zkruUF{*W|znrT7$mqmB z2WT-hO4LF{1)@`rEwEI{2xmgSl@jI);JK#i zfA}1olcye%-glwn?3~(OVYl9^T9p_uXt04iSLfiOS$aMX^Vw*^eefEOKs*cf%LwT);0;RI9dD&sp!}|LO&SjWKji*mFf?s)NlVqb#m8#q z3X%RQH&M_fs(|MWh(o6pkbL#Nr3U1BiJK?6x6leo1zi$%L^I^%!N_Gw=anv6BU_%Y zVMSR50)n_IvPL)@2BiyjOQhiZ52LLpClMz!wGxS3Q(Ya6%oStnWtkNtBlt)eMKudEq=?y+>w1c$9l_2pIZ=yL#zWae_pE=_xT zyOGkzxwp9=zz_dlsysJS`l(e^wzfI-R@XS`co{7cJx#%>V^+Ws6#^k=#glYZB;SHn z6J+98@f`f{&D$Ox9)hfJ##geH<3R}E^1ggLqh}(ri^=N6deI<%1ZRf9w`wbY3g6dn z32oY0qYYBp?7jKU^kbO~-T*!jjDfzpAk}lcm6vDIvZ}SEj5r%m--^;rqN9a);35>VGQYli=nINLkC=p=D10~t0kcY-RQIn?@bP}qgyRO?-p(WpTgf>}!2lNZf_j3da$T#3m4i z!JzDFx!I*HAakv0KKg+1aj2`3N22KIAJTr_8~c_?(p;>7OOLwsca7!!gwg3sc?Vx! z9iDy{6f{_-gM9JmK|mnI;2cl>k|@>Px+tj%31?Q z#B*RZUj_b3pd4;J2P2`IgGFrtf35StjcN9n=ovAe~#`cg*pHWY->aA;4w<&U-Q zCipb6eRTlt)nITqbMv?g{p@&tQ};=3fk;&cOAY0nyEi^w1@ZjE#P8Ee!B{!%0=E&5 zLMJZos?tYsnBH*sg}erk1#S)6FE6zV#u)s>E(;YLY**iTu@fiN$X&e`eK~Q)zGr8(oh2 znq963TmkTPQtMfC0J{{4j@jA7#~=a_n=uN;LN(8_Qv^cw?`hWY5!c$75HQD%kgh8+(OyyeaZR#m7;0S@4tW# ztPiX|_J110$>bUGB+9g|3q)8fmTCC)q#gt*=_zdXBBdrjq|X~8D)YZ)KY~9>Q-A$7 zyunKtv#25z>Hn8cI+{M`z(% zQJ}SiOqUL5>*TK%?@zVFl$4gbv|s5jgpLRyj^RLX6OJ%|JA&@5w3-uM*9FsEb{drz zshr{QKbYjwbAJye2MHf1)-4H2kfPTpT0qWg=|^#^2#{jmA@dq`!6T)#kw~rYV0nS~ z2gcwb;b7NqR@*!+H#Zc?^LreAbwCiSyWZ~PA7vqi8C#y_SoQ|@f^pQc$mp5fKuTRZ zUKP4D;Iuuy{__V2X#c-?g8cUuNOaLV(R*j~7Bd)q zFuMPh&-eMQ$6B*2%xU}VeeUenejj%DU5m$>Zy8V^_;8=7eW2rz`U~UQ*o4 z`-sI=N%rL1pF2Xb#CPN>pYDF}Stvn&FuLk(`tVuDi2~=rgV%yw2KN(xCl;G>ga~dn zGpAaxR@__*ODM5I6ZAx(O0#Ix!3NVDIt?W;j!N~DT3SyJf-=;ZQSwU-=fkLCH5_Dy zk1@t%0Mb}hU+Je3*e<-j(q!!}wNw_z8`E<`@3V)iBluIqo$vcnn!EZ*kie=3o&lKxhU)3N^4fjB!w zLc3ho=WU`7k>X=#B0Nv(#cLwJkAFyF?dJxD#Q?E~?JgT)b|l0sL@u?|5|2)H1?c6y zc%ckub}~Hc8#Z;Mv1UTq6j1DVTYPOkU$EckUy6}J zy&az&W$;olM1wTeuwejg8~0+yG&%5cm7-;NJnQ-ya;lhm>qpp|#7Xb+>mJ$x)+1%f z4$e8&qwD}e#cU|Uy3vO!pyiB*P6xnp`T0m}YCHW;8&}&SJ95%{S(riF87InvfshlL z+il`GktYdWP^LaN6Ha}$lnguxrpwL3*0~;nQ{U>Ua$wtx6h9zPR8r8z|()if!no0b?q4IJRZc zy5$+kpgjbhr9kPm5pOb*UyQ@b;p4wwJg!KX9f@sFW3iNG@Ws+l_0w`8x?RG@MP)Iv zKAun{a?sqv-3C{dkm%DO9dY2&)vl-b`9==s+uGQMz2>yifaAh}G@+P0m1MJunK1dB zdFO6g@yq@8%fD<=R9gW2bK*n_pJo2LZbB3ipA?tJu_84i@j4?}ci=5tv-jmjp6&+$ z-3%Abwx@7zT|6i@zbTg8QVGuAS5{Xo&F2N)#eYAWKDnZcT}cmw6q!rB_?g0%P0nP^=mwno9+eiBk?V_b7Vk zvM{6$d==o>@l{FUY(pLsKDU+avpRP}t<6^X1KTd_teEU>N0lULBaeL%EYq&i%Et_U zz__PVKR|XDkB)cKe`1R6X4Hc*O6#XRu&enFj+A@4@@`-wOKtfY9dKSei8b64N=xxgQ+s2l-ZW|t>oMY|u4qdgmgv%q)}KY^JS88##tIMjWF!!5zc zGA{QzFjEuVWKQBX>q(Dv_ktv;~~0 z#(UDfbJix$^$~dhEugR}i&eVAPzLA1fMiiD9dwgMP`SyXF$mb*WiFUnU9g5*R85Xb z5=9ScpQrOKtBJ@T4Jvgojo&Tu=FDpxS=NZ_t{HNY{BwA@aWvv%YP5>Q7^ z-T#fi70?rO+vso zH4|=G{nm#HqnfM5)EG&qztngy(6@_@;4;qYM106OJ+W@+`T}+ngJ-j_pR<@n4<4%> zYpB!S`%;$9{ruW54JQrQ;@z-#$}ZqbV?NyrP3}Lg&j|HTF1!m1gYopMon1W5_xJ#; z3m(=f)E#}DQZ@Fy4565lS8Qm;cYgI|&A z997%uxY6wMik05r7i-ySZrRzC=6m00(^=c7i%F<6LN{&%8dP09A_vPX^QIu;Z09b^$9ZIVh zC_@xx9T?&&^Q$=cCI{+E=T5QVHR+ie-BZWB7kk%Y#WDF^SN#n%Ata<;-`0C8UGaDI zUe;cHs9dYuE?kQmG87;AEw+Z7S~w$Iz9~P`G|uhgO(-bVx>>bH$kSOn|BYNhH{u1H zKu@=H@VHoA6ord?X7&QmK*M;m(EEueLm~*8=9{a%3Wo(z@XKo>`%_A`$SVbb!q#jM9|l9) z$(iJRG8Te7?4qq5GlELf`pF;rEmw8%mZ_)7tH;o`KUDseFzCB<-*ZvqbnFq;*%opo z>n0>mr6O6WspJnJ8;>`2O@-V03;u{Z4H|!}?>75gHeqN{bV{Hx$CX~t*9U?&LiZj~ z29&KoE@L^}y#LF6Tacrfzw?UKPTpsJPARvu%xZ|{J0Hq(Ci19OC@i68fAO~mG9fr! z<=M_v=j5}rWY4BdrqDrwg1C_T5bW+F8@tV|4)|6tM<@aq7Rnlb=4v_AFKy8wR%9uC zxZ=^E*X$?KP3LRZ3wS;u&Te2zZ}`?jTv)%VjJ0%09$=gz%>9R3YIvaXf!muE^v4-` zSg$|NO~QIYa{lD6hQ0*8z-*)he4iwk4@PfIn4PiZ!t_adZ2J>+~o zO!%}@4R@|rtq`z`Z^6C`z84=V0-eFOeeeGGPEj{Pv1~IQ>J-2fG)?^)(jP{8&!U!8 z4xb&s>|F()s_bWb^Oy77=^6GcH%*LrNzW|HfnAY-BBhZ8r%`=R{^8*Y*Q0B88ovCf z(2_giUVGz_4qt-^43snOG-H_43RF&}M>ds2)0yKEEdgx(@PSrG+l~fT8%IOJ36Yt} z(1wCIL1W_#V9}Sldzy=Nm+&?4P3>wMJq}2sY5c@koyJmVD+SUc{ol}?~AlONU2@QM6*V56Ef-j#CIomW}tzqrYXs){R0eV*2xG+Po% zs>GTT+2_O!eIGANGNPUAlTI3w64-4l_p(J)(5|V}pxr~N8%NCAR+uWMB)WSk`Zr*w zMjdc*nm9e+(zD$9sGU|~NfjU)0 z!&I6Jfx+~ZpRo`Dp|SG(4Qg>tcuF$0HQhdv`V|}h9!}C^<*1=AnSF^f_G-WbXDgq1 z^+LY8EF?|k?>et$qlSOm4ZV4vpy>k)|Kiz2Yi2#(rKq!dAnHP7&Y!NbbAn4AGiTe} z0*SnkWoFsEXmO}s(Bdq#HTS&2?~tFY)dm8;}6nuEhYc*{m(g1J7Kqyvs;5?$sX<)h&Z3*`3h`>hg5O1}dzAyS-Ok|}lAs0zWNbOBV{T5b zW@+!OoGsfm^RgU2T5LRHysR4LXKi=CS?Ev4!RxqM6T4|^-uH!}&MBj5Ua|dNx1KVZ z?7^cp&K^Y_2G&Y_wQdMz!tL1Sk)$(KpI0}}@rH2hPB*>J)Li>LI1%Z~pEr~&r6E_v zBeCo0X(*2{2(YvePVNRu=X++GNY73pFIN|R7ES`zJ*KLQ;pPcOtX?xDbVT!Q)TRC@ zv*-C^M$otw?kycG4U(*zOT^~J#*+dE{?y!(l4vj$-TgMZTtMJY&KmX~F;m51K z3)s@ix_W*ne%-2D=QrrakM0_xPaJ=u9Oqm0P-@*}R88)yjjBLd_w&JA+^%%jM?#DK zlFyk`twsi)C{TFlu;I!(8ZOlp-VOB)J7Lz`3VDku%7lBM8*kDg3Wy-o-Y)rt57jK3 zKI-P*orfyUE_Y2k@*+d#VPP5)-_(SgGn_)r-B?!La2IeSE}Vr)z?}t#Y}{}ONLHQ* zxN0DA{t!Y8gn>N6sXAtXGr9b-<3% z3(rI8hWQIV{)q>@ou0c-8<$`m&0JcwY|(tB9E5LmN`@k*zwFIx!CIWyXnp55r=4mj z+R1-BZFi?%f9K;9d35@@dlND@ZuV}#MrH7HEBqDIRT}V0}9o1lzyu!o1xiD6X{e52XN$H>2@6}JK@(+J!n(1H{`~39%HQNRtRku*lx8P_FM4H zS7pA2&uzNLd)Uc~7f1Jtpj*+AN22&*7GWy|-Y=Qy<~mb2N>#%zWG`D2wD4#Uz~sra zQ%tI=A5d+cTVuPsQ5UIS_2HBik06Wc6&sxLSty&|DfYhbwgeiYFWO?6bl;aZ{j+IY z5MdgQtn596qbOFl>uU#nchYR#u$JnVf4fhZh0?EEFuZ7uzHX&yY@M=3b_tx2U0P|4 zCaOA(7o>TuMKM!gXBjqr{mACe3lx`(BRhazg@Y`uB?C6H`v-e918j5^&O3cb&8^1T zjn>HiJ}8EDf#X&&N)ll`l-c}QMTag`qg&7RRnzLyewJ36OJvJldMi;7QvJf%uDba! z9Gi%z@a9kQewF}h-jEeA)lnNR;b$-1fKl+{<)1}C+#5ZG{0Y4;M~ByS0K)mYZw=b) zkrOR-FR(7(KP0UlaWH_3mxa|FaEM43b?iaJV(&{-*BC9nv;M%KoI=*Hm`-L&d34W;GJaT?#rgptExc>d8^VHNcNEl3 zz`~jkap$SEKv_!Q#upDicblt;7j)SW$!1RE$!gru`?Ssxq^~(*cB|YyM=ooXH}L zNssTW=ZGS(c@6={Gg)RV2O%DEX9dte`5P@6&-PP>`}5@u;wrQSX>DVX%H8xSH~lzs(LqDn)sht1`<% z99-uE&DjLX&3?xd#$8w5eJ%U@(Cb@%{vOgv812gXmOD&dv&G7P$IY^2%_#-g#{1lT zA^P-+Kvnv%SX0qEZBSK}zlwt#iOhxkb^DP8h&xDP3<>na>ldQTnijKT&qk9kF#8zw z2pUWN;Ozo9Qo1x&pG|pNbgUs(5nnXAoo<#=)id7mK{iXDGsjL^RgG8dYW5a*9yzF06W!RgqfsRsc}v?7QwRcR@ZRx^<8C;Ax_EDSEZMaVv7 zYm|~SeJALt~kr8Ky!mZ2B@wk@VFj9>l>7o<35o6t%=15PEA$X z$LIXI$5OM8Dpolv)AFg5ZiX3Yte>TH(NKiEdLEgw0Cv1$%1tPe^T%YU>dsRSTxLxy zeqD%F8s=*(#(1^=oGS%rEGBeo{>OW@`bNjwNMdD3nJ6zW@9#9ic7kJSKmP^dxiG2h z7}~zZsj^-)3(ey_IKLV4+xiQ|*4QB78h0dw^l$;4-hVHsH6FuHez1h237-hKFh1L;Pm?xV6>r&XG3}?z-49wlwp{T* zglVqlXH>YU;>+A(TOVOU#=rFhj@c?acxjluY?zx;5%`Z3iF;*lH&nohNKsOyOt}d! zH+D#c&$i8V;Z9)+RjdVnm60TrA=K34O=6(pFDx$CKeAwSvbomoMXJE3+f#Fb;_!RS3hpY5Gy$^YmnH%bh#?_< zK$UomCqO5Z6=GzE$OFy4Qj5Rv-ij}SA}X`*)suCIV)jMe(>na7SVSP1C^iXU8vXsc z``S-(S#%j^ZhiE*md*|{pFaM^)rmN~;#&M7-hafg662s!1pJt47n6IRB_$>3z3V~8TVP_}^- zy>9c(FiJ#TB-!MnjAi`9Xf|jg`G01vo&u{Cw3IEt{$XAwCe=2#?w48u$6rY%>-zPW zaNC8^Q8aiWGXi-ghC00#0U4`imeThYhE2h`29{Yxs^?BWn)0@&r)eIry~V#KJbjV( z-$N79_ef%AgSUmd8l|xfQLIZgS>FnS6eZX~9jaM$>*Uq4_owxl>A@K1wKm%8BxF3g znUN;^B(jxd^QFC;L_(yh61jZ#*ZO9i$*JZ1RruPr#lV zKZktZLR|j4y=Yu3C6?_Jw#jLu7dgE;zs(5U4ED3!3H;p%jQ&lZm3LSK4L-<@f@r+R z#UaHT^9}wYiqch z15A;jnLe;JO~FjK3oF&vQQJ_*0HBqSaP_(fifZK({+g95hS|Ipqwd=z=Tct6fiRF< zwl-?I5HWc?G&nqVWx>2tzXj~fMXkFvpNa+vSWb;y&61hO_qi{%_#I6;x4u2OK|N^P zl~wk;vxfPNwK%#MAuPjn0di|>#i-us9?5T#d43k-lqQlpL=vIJtVtS+z^4GiClrl?^mVz#hY`S@=Op2KWR(R|RxpB<)Z6Q-{k& zx6Rat@3MgijbUg6i;8qjxJJQcu9B>l=?<52PH`qeXW1GL)o^F z#X{pnQCz5lzHaW4TErY!>mD}l=Hu$u6L8mZ%y9a1M|^lU4NyVSdZMSiH@0rD76(H! z)Lj_aa%pmW(TH%IAvKY%38|&F=#uhhdq5nXz8QNrMpz|as4l(@FP=(OLgek0|XpX&EVp+vpo# zmo5sbUZH4Z&0o$R9S-YF{SHfLp!V&r!BfL4$v=)T&Yl}xG`LxLeiW!sFo^QCyJ!(^S*a{4b@#M` z>{~lRS}NgIc6G?C&!$vU4o%0};A<*z&t12lu1|K;>yDx4g8RHbdV&2W(XYqpZz>dT zy3)p^;`O^d#>Ar4UVL}TW{jFFc-Q1JFF#;c&FB9Jm`4#~0NyC|CPX~)Qb@q3T;w}DB5Sj%usjVa9}}5z zw{=Me$5}Ni)M_YQ=K4Z5uo+nZ%sMPmOx2fMS5=Cw{oABl{0xsY{KE@#$`!JLv6zrl znecfN8;I!LT?eSf(!K&;jUh2<7_A8r^@3O=gw{&+ zjZN-?hAsduEl*3}62*@kXqDe`+R`78;6`+5#*AL_Y)`yei_WKI+8=aWL3M?*L?)VV z1M7O1&Ab_gz8XWFqFH~ZZ|FfjoKjeizD&p_MA4tlo|T-x*t6X!-TmQOnMT1Q z5*!|6t?wP@ce}96(!~)^!?Qm)-Lt=qbf&5RW5oS9H(#I9m6UNMdv{)2$lQV&IE2KI zzIb%=smm<|XMuJlFHy9u{Lb&Rh`JyvAb{@fIU2Q>~PUbZ75Tos$44@!@i~cu@!lC)SOO&I9(zk7))3EtHG{}QchYd+YpJ^h zq_OrbA19Og6XIUO4Tjs3Y;btz&&-VA#2|66%2-P0bsb`P!Lal9xnTMbA9Kun`$mR} z_x@r&)xtAu>gr0-wllVhqWY*ZfUtP?eP)&w;`C>N1!Lb&OmI6QXC*(&WqZuZXZ?>* z$B27@gj%)RF)gIV7?6n6+s^VN?4K{LEnpjkOWcCtDuZCojm+U(kp6yx|47R6CCsUr zEHBGvTBp2eL<=TGO5JICBAy{l8 zg7E@S>tkl`fXs|FK`m0D`k%uGvn+5PGrgD8+&b8!Ig6ISz)*r$5tHm%PO;6p++o}^ z%Td<}jU=tqX)OyiYV*06m2yQmvhQ;*skqp$lgPZPEsNXr!D_n4C4AY;D>}-`3zOa* z+i#ah1!azwES`!w_fgIV&AN$7F=(yORtA8cvfzbW#I!j5L(PeJ%u$}+VZn3ejsKppx z@y_6XJetrQDtkx{g65g5>-eg{LgxuDrzikOV_%!Xbn2??OjW={)3wj43mpy}aBYj9 z<4>uVkNYgmm@ua#W;=i!TcKpWBjKX!FlsY~@A~u2a_VN99va@_m2+01D!#&XiM@4) zB*Odf`vHxV4_=geO!-+m{qjL8ZOF;+`VL(F$Mqb_YpXgp5m<&g?!WQHHqM&z1R}dV zuY+smhFbNe+d3fKwVR*sikau=F2MFEcV5EzhWzQ&4OM25yxeY_wmh~LXDNanbPY+z zn%j21r(|Xs@CNTO1bKzTF6S#VmpT8&*-Mr#`f>RTEvs|0=&M#U{gxx4DTkI1!>ZoV=GvvZ z5?#ByMB@jRhWc~2BRa3kPwm+M6@#8P)!4Np>j?RWAKcf!h~``NGXgH11ab-*e20Oh zv^Ch3K6Zs`$G(5t)6CP)pZ=yn!`&xc|Gu;3O@G~47AgF?g(UVxXib-#mOUMMm5hp2 z9FacMVsybHO7HfMkaDAM2zK z(XoZ*i<#e|WhcL5!2YDh=U3`PYl^GdT35p`n+4H0m76Bw?keT0VG#1x`VehdIG8}tR(YCh^Auwpb3A^) zT0q!0gdd~WkCn_LpCM!)*d^n^4$cetJpr098LNRLfzYODaT$cA(5|z&#%dvSql+P) zubHto8UL7)osY^5XA@HYi%+KZeDbVc#B)`|2{H|vpxpiZoWaaOJap|JMNT-QJee>* zk^-_68^65jnex2m+vsz-zA{D{H_$Ww)l+jqx@~LG9=+Mo0;_d@i+k5rwD6wCF)->i z&0aqe>#jC;{fOZtm{RjsHFmi;y2k6>5{HfZJRQpZUBlAzG%5Lb=H6QDG>N40l0n{| z2HyP6H2;63?7Y@Gyk>K37YYQ8zl&9zwYmB0YrSd57n4vXnASDkccCsBjw9d4H&8wE zxS!7tg&<}+rtBb!oP(KD8RLB#w`(8z8i1g8HFK54_20RL*N%AlPSJ}F*O{*yTy8Dd z_eFy2eK+2{j)?;=cx6T>?aP9{>s ziQ5GY81se-cS>Ia%arc)=Zaf=B=S>~B@$O}R)wukRa?wEcBo=TZ~Xk$wqOWa;`Wn` zZ>5HNw*JfhW3L;;5Vl+{wPO%4x1M~n)G=ta*db<{u)RP)FXDY?Pj?0V;OGB0n20*EoHqpE%6&Wxe9;fR>0h%^3Z?SqvE}(q&d)G6g(sSam_3#} z$gvstDjv#XX`SY{#hh+$u6tCHGZuY7-{0JO+ACoLAi!j=wCyNDvc0o#QRxZ!Do2Zc z;4}q7OmAq(r`Qo3KI?4qiJ)DuYqUgCKQ22@JQ1?~yvsOdxy@3y&IC7Wc&+|$LI)bq z=I!UMbxXmsBqW2`-fz`Osw~Xr!MT1(uw@KvpEsZS)R>bY21L^$bcR$45maK29M@bf z0V^$Z*WbmHyL&j?hEDh#I+C`rin+>zFLBYdcZ|4FADU)^#+`6zu7G<8S=UH0*LG* z|1qJ^K@$-7dJz@Th$CUF+JGwEWz5enfiU?f^WG3=j8jO;TRaZd81Q4(N}$)gJpYX6 zK)D2bnEa*i*M}eihV7UX(ngsEvydiX5L4M-+im_OP2m@5^1E!^C~${hWYMxtpX>2? zn{}Bp)9iwKKqOvcHx|8EV^m6hm z%npx0y@nXG#3cKU9r~-3*!%6bAdgPdmK^8XJ$YUw<~nyi?L0Fm>Pu9_DdKb47`ew~ zB6%Rbxg&PtD*-H{*D2bEhl7_eQ9oiHRW|mlsDk|9uN!({f1RiO&&Rs{*+GLN(oIji z7VAnLzTR8_(RnUTu5zXKW36ND*HT6E&{6A(>Uy!LHXDm3Z6Pg4e(l^_#iy-X8XV*q zEkCi+XPuL&W)V^tryGOMCBK#XmYnMs8|=v($B%}DfKX_s3LjrOd-FLPDK-*9-j0pb zwWrzkOCOp*m&ur&PH3(~LYG^vIXw0LBQU)-D>A1DDHM-X88%(S`jR(0GbR;mrgzet z{TU{om8C|DRR&m#yQMRKKAy4oFn;`5`pFRxyE()bTdpG^<_PUe4LqX%NvX>=$Hxz_ z$k#0zFi2qD|AdNJ(W_Vv!aL;P0E3v-?E$+;a$zk9)$%U;xE z#|}*EdyU{~Q=Q$5TFIu7?Xd#w$)bzrN^SJuA1_RN%OcA2q6%LEe4pY=>3Ai3ndG?q zM83D-{%`9}0?Aw{IJw$L*y;aGXCe0|I;0D}F4-ik<4F|rThGu!p*Wm zB%N??G1EV8n)vo+C}Q$K>BNRjHvTRD(#xO((W9cqXB-RR`8&2{bNlf6`$;Fx;fc4n zlD+QQvrY8YtoctU5&{!lN$ZbAf1((fl(O-AOaNc5r{({#O6V;-Wz5U_w+&=ahR*2B zBp2L{^8m(CbQ7qk2Pc(5L8WxhKPhG~DYJ~|lLRO`#!>P7zvVN%yH&OBs z0L!e#syUB4(Gr(=3CCi2!={8P_8L+8a)X~lZ`$@oR&-h*-5%HCCq3(I3)flY{Q=jg z?h7C~@BbFfJq~F#$)G4?8UY5~OY+q6U?G{dSxV(9W%Iku9V$=;ev*HcnS1KehvB6% zB*g8CT;}XYA`DX(s@0;gKG)$P-ViW?Hvg@RLlBcnSXo3@p(P&r&rrAR!HF=N?P60A zLK%W5+1*I%f%Z6mkB1th#2lutArXmc(az!z;{F`B5bPwZ+H%1hC@wigP1UG1)CZKe&ouLQzDt)W)jMn&VuB&3HETh@@Sb3SD!3XiqiRhs6UXPl;j!hA^9GH zVSIuD8seOWRk3X8vae>!TgB6(k5Cj;4nF3GEMiXL7gG{ZHIE^j@1~LJv8tSHmXtQ@ zq}gEq*D#6BPVfoqOpqNNGIfNkW8%+sMc%CkoGWyD2h2sNw+ zB&l-9tZTwnRM8J64@{y;JJs-&>*HBpOKi{bbt#|+o@&@CrRzCYm0o@Lz7YkdJwkH2 z)M`HBVPskP|2W{v?^IiI9}?~{ETZ&q9_EAl!rE2hLG+ir@r7KZ(FQm7gG|uf1=^Kthfu|t|Dn74H54Gi zQXg<&*e(H)R7Igb8@lOIJxSdtvwY}@V{FLW3)ng$HB~VF0e}}^OeE&H_wtw37qnM= zt-T~HMt_8Mm1zy1ZVm5W{Am=g zO5%>0v&*O7bu`MMOlp^TLd=fb1C?aY)3SiMj};a6Khqv-2r8MAUm!Ar{XGf#<*N?1 zib(ZflHn!Z29!0n>_m$f&vMc%P1=*R5zJ^pzh?Z(b2a#xw+DHPk&2f-9)Xx9Qg5AY z%BM`u*P7a$U*3?`wgy*Z$XL!zSttiOfj?j{^t(XN?3&5O)t7f`3s#w*_Re+oX9O_N z(Ns}iLx>msU}nuSwIyokb$40Q%9AMvg5MT?H0`#pYM@tsMb&j4QD!q6f`WraeSa26 zH#2L#lvDLZKor19FQ9bF2BzhUEXj*2*nSr^MZI2Oa^A7)@mUx~yN@sLkLsIu?f+RA zPm#=z;=73Hr)&ptobJ>}S9NHDGZ0;1x--Ra-#+2)fQ|WfCMk64Kgwl2TX4R5ox8c? zu57w zLh3Z3y{G>f8A=}gZjmqQmM#Cj$4p*Mbk?`Lvo)IX0;;{*m+bB!5UMnGzi(?$ z`nMev83L!Ha^iHg!AZqdYmaI^h3gHm#p+~FA+t9BZO^pp42SQjPG5QQxRSWZ&(uvsu0$Ks{S$S*L-MAN zR#FBWpkm5BbeNV>PH^n$MY6iFJ3S>2b^0D;zhg8#H@?kQ+4wJ0nG#{c*VJv+aGz#Zoi@a^vPo7#fl|&{l`lcIQ=oJQL;Sj~ z`BM3Zp>NN>vnfy7Rj7@?zk}k+xfwYZXrSB#G8*nJ z2@s8+{YcxDO&p>V3z9V(xqVPK?uh^yt#|fe7GiB)V3%pJH5sQ2)BcIc_$ROnGhsR* zr1xz3T!IbX_VtOpsKGp_b=-_9Es;$hDr2&pM|XIT5VB~lHs{O*_mg*}<*9k9M_Q{l z)))r;j5}`X96NRF-@J?sp4G*Q{2PQ?QkkF31H=DA&5uT+=GRAIib^vOAh<^kzG1?+ z#Y*LFE<8|`i%-%xc|cye?aNXeUD(yX_!Q6nno=pAd5sWM^qEVfnZ9Z|r6f@j+sX1H z;>zG+R+9OH!4y{>-EKp3+B~Ym<{-6yF@}UefZn$}x>2d~toLWs2PbldW0sShK zdF^h76=vDq80x&Uj5dwS6*33GMm&+i$L;VR%j63!7A4GYEBP9*^Lef<^9)=#iO=Ar z_rObjGY1)=bW`vnX}C2mm|Qs==pnGnd=`^oW%-#^*ZL*K-hc##(Wm4e=RYb%hn^B8 zEV@6)(sLIFDauW=r=UT-pC0neQmtL^ao=}O!@iedaMI5oPlp@eWkQ>)^$_3AG5;FDTh88F6SP-)N%USJwb%0p5+>pCY?3q? zwN&W9NOs>DU5$TPD$z@j|E~GOmYg2D^>w~+$IEgi{7#wq130sThsf`)h>9Pdzj`*t z$n6zuMqO0w?`O^+4i>+3zbi8L6iIPz>pt5k0O7g;u(&&Ly@DP(R9>H$mFl*M*ONsl;_S$} z(?$S(MlbN5FJ{Kn&VKh_Oc+tGZ6WvyyDra_?E3VeFQM2n!=bc?*ez6v=vD%MXg$$~H2 z`Ew>UsAcm(;AbaCDaT-O{(m_ofJH)xNDdUv_~=f8>*3LQDQb`bXs|Gh{+yX=^BQ1T zJ34xT85h?N(g8KE619eQ)1(@*v_LZc{a95QMm{erekUiUEJ|FIKL-tN+Dkr8jrX%5 z+>9&;msFsVLjs#0rfB_RDB8VIV>nQ^|4n_vm%yH$cPtDG3y}JwzE!uc8NMpb_&u$s zPc3knotK}(KSH#Dzhc;{2iC;TbKm|;YOJCt^)r&Uw&vYGD@mUx<5eWYan0~WHuGQg z8?x=hpC@6(t7Z?Qd zd_6Dmk5QRPzPExbF_Ki0SiA0Wk|iw8Bu{1U;Cu9`V`>XI5ELCwDPlT zkP5H$wI;|Z7)xvR9&sb+Zd1NQb9`<0sh>YBB&-yncF9?2xy#uGJG4=q&f=) zlKIBaVRxF&-SqW8AhIXR~|%vqS2g>N6b->S|)q6PM$tbWUk zJ}wc7d8A|^(m?gwQ6+eb7X&qFfp&A20VO!H^519m@8S#qo%5kWOARKp0}FfiXk`Qc zB;77cDFsv(jg1B0mCNOPZ+juS3`&~r;CbR@$cXW1e@NWh06>u)RquP-L6%Qqp4twe zz%TeNb`0jn9q@`XNK^hf(Jfmd4`>l_xx4O57^XF(u}w#89$U8zUQ!3K%q+N$N$wTC6;M(|M|M9@z8#j2q^R@Hq_x*`YtJkSJ znHhpNvnciGQB)EcyxDuMAfO{TZgIFU9-h&~4|BJwU*X5r0QFMc(K{D!|F4%Pl_(8s zw#VZjxf&+!nVg}50VOg?CQFmQ4|V>6jo)VY{XXN% zlJ!{zHKAGSn$dInCql4{L)l!XdV{uU9wnfT1Nz;W5FNr zLogctdA{4let;@{!LtjBVNw`sI+y*N>Ue3O;wGut3PV9=d|0SM-NR+sbCzk>6+)xu z3BT7B7E6_+)ct<&i6)rfcBH42VX|`pzHszvtS*S;h~fO)Z9G4PBS0BWVx9Ql9#2AD zLKe=v3xbhHjMH>MkM*aZkBuCjm%!3OAVr)yt>@ionUan`-kNJ(Lo*)Bg ztPU!@i`<2C#HHN_d%tl9zHbTYeuYBuwT>tah0B%{-&ZPscGxk)>FY}(I3fzQ5>h2J}i*6pZf#;DXaE8aMM~GKiB<@}-E_ z+bAVo6r^_44iTQ^G5#fNBc;OBbcNcQ9#DPnSbK1L;re;;T8iQbGD*rO3kTw%!?mt*`OiHk+d!WG5e&2PHa_?joYqlk&gXpV)^dfgm)nHva2T2Gim#p1`vOt zbdj40@Ex%_d81H$)J-A7ll$23TkTnHquZOIn&A@#jR3b*7Cw->7qjgcC_nS|-$NeF z1*#5>(dTQhoI134sjSK%tg(?zd!+s21@%=&KyCdOFZQDf%P;`ne9EKx5oV@aK2LNM z?w4Kk8`%eKAf|+gdTenXho4h zqrC9O;SU)dj}(iX`r~si%1M2i)RXi5(|?apN3D`aIlpSjaomu3Rnb~KRlH44whckh0q*ycp^t0Td(~@m$7{V>b|O_Uj%5+4vNe#RkmNM0 zl|rG{M%{6$JQZlI%5O!312~(*Vycw#9LC2lg^(pX!Bx=Y*Yi?>u@04|n5B+XlSy07 zedMq5Rl6-E>$w(lO{qpq*iP^z%N;&WX~b8I3U_#6&yufcc+ViTY5>TfNG=3vWvSRV zkbS%hD4zW4r%B(nYa5;g3n4vPw=6uirhdJsZWwqlX^v{V(w;qr!ZaE`slgtGtIk*& z>%>shto2wIrWxM*is5Nnv;L}Y0*d6;3+|9N7H^VP7esoD36B<? zjZ4%W?<_X#4ma;Gz)VG@WQq49y@6i!|C`kazV&D)eD$v_0ioRk7nnBKOba{ceY8I! zGL)C6R7kG+?f-G~z3#j>fUdPa9W_%wiVENG`~B!MGAd6d1a`ztq^_zn!~FrnKaNiA z*^~1tuyf)8W#``c4G&n5ja|G zS0AQ?<)Upoc3_YT99{61AQAPk^2Tt5aI8d5BFuH9p^@_~Q81$B8?~pRlnX(u$9Ck$ zRH?&dB!^b<7Ynp{abWWYB<01=osw5P)##Wfo4nQ%g;rGl0ew`@P;&{4afo3wl@(10 z17%i4YvOz6?FLP^(DIS*-%FSyzA6`GN6=RKq+f{)#M?V4M)Spxi|7mz`PkB(UV?Bc zM+Et6Cd;SVChU<}72GUQ1Y!e7v`t6R^}xY{SRoJ!eh~i5X_2kkft1&NxOXE3g{q0eYZaT25;J$9MHL>mS{#k4pzCk-4 zWGLCBz}^Y6`x3UdKHm~Jg647(fp6?~x`rGaTA4bC?l<8e=5bsk%3S-6-eJ$5TdQo^ zk{&mmtZSB>EI!C64R3rlRRa{Y92b6!rFkFJLqH%_c;tKJ)0Rx*9|z*U_tlXjIIMYbV=lY$$ly7xX5!JIkVvaT^*txB>(~~vl>d~ZH z>vTBlj(P|uXuSY^r%;p$%fjjV&N0_`nVbp4~4z_z-n<3Mz4Cr{dY$|N(%jdAkn!{DYz15LuE3H8o0-(>(LnO~YAYTX^ zNz!Y;k)1{Z|8@809cEHvx0#^WhxiwQLiL+Zs~@GS(z-75CMmOm`Y9c1GDf^chta%H zSAVXpotU=UoCzEdK921%yvWWw8PRWnrz{V2)PC8Lv|t|JhjPO_M0P!yqh_TNIcovf z!9@Mh-ImTgk^ZzH+X3|_Yd?A^;`k+Bo2a8{;Kqo31( z_SsDMVLQLga$&R-R~$h=&%*Rp7Vxd+uGrlOVG~v~! zh~+}GM)BKcTAR4*<(=+f)*f4lI(99$Ek-KHjfy4e_jLK&IP-78D z?D4hTCLO|KjBKl4#x0b*3B0~`QjRN(L{gGtT^%w+z8H+MuJgnN$y02n$x(U#QbtKh z3|Kb>(5XjL2<3BV;$}>$6^b1!nl!}%dV#d&tgUV-{ldlR!y9kRU#KUOM=W23Gx{*d zB^c&$#iW%0jhe{~;kFpgw*9SsUDwcR*F8SR5d`Klm#pujvjQpsE)gjajJPXcuqM?#SCMG^lvc*6ap7#< z)>rT=F(kkD0YMOBte>pPqavggN;?}k#VPOeKzBV=S4SrZnEO>wqWofB2R0Y{Jf%E0 zlYsjZOp8s3t&tXv1Uy{z7O+8N8E6>IpB4J~^!Z0ZrTd$&YSJjd*F@qZ=zT370)kvb z#2JkaINxIj8*ZWm%5(K=Ao_9chS%L4dojmQB0MMYv zS%6yk)uIr@nZBPVJTO)l2%wLCcf8Y2yfh~C5surT{Ms%|*qkU9960H24%V3+=MMPC znQp%@vXL&tXI3X7wut?wX{aRupy_CGp9rO6#S3X50IcV-`&IZkMN75egqy+nAU1b{ z56qTV5)v*k;mtOG!4@q2A8*%2_9lrlGT5d;wz%!VUR2@+4J2OT5iYZ)H`?dUk@oEY z(DF)iea_@~NEAt+-M9Huz3JT6z-hi*_HI+)Bjq%v3c9z=LwU`xJ{h#4BB4^|%C%x$ z`(!Ul!gPgHL?@$RI)!Ora4Bo6(NcIBc_|Mv>!MJ$NL5HTyx}E4J@5AYqDF|mB9D#R zubdtv<+zM!s!Q1Y;^gKU!-dV#s3%X90nZq1%dE_}z9?8x+B=!I@aeF{5=65Z(wnbG zF!c5ex5p@Q!+xR#WL?`@gBDz$%`|c5#Ils?xl&4WbZEAr$6S&z=+sZ(J1L0ys|F>P zAice>xiexOI}nZqb~yI!tLg-KW{eW|X%fB`Eojb&-sxqW-`4w572X6^E9F9lm4GXK zY{;=Y;B&rYd2&GjVzDBg3z_JWChkUB!d$AYaZV}d{P+g>TB}RQ$k>4>j@ZfEMc`!{ z@0FAyzPL(j4HJup{VY#(-y2pjZ_^g1bXDf%;A;SZoaPUk?eRY5L-z^>ZGQjQW*)A3 zx%R_2-0slSN=FHajDa)mun)@LLo*N}d|En}53>IuGaZp}4n#g0-C-THbAF*h>Aw~r zz0Nmr<_a#6Rt5w@g6b+wj)AxGR1M4gQ{MY$WjPm!Yz9sU7vk|A)$z`{e3NoIVK)~a zvYg%fChD4VYhbysxQx#E7V9%c1y(V4S_@18m}LQ`1Zw}rr_s+^3}Pp6+tP;p$U_}r z@o^Q|!MPn{*17fqwB&TfxTU3hCm4J zkA*#ZL$N%Ztc%VmT^l>S4#+w7hIl7oJOau^C5rHFAJ>^!7sC_an)77~->#|PH(tse zagoxPr!T)_X|D}L3D92GJEQ}-d|oFq70kEBEk@^Ls3pX9>{J+J)n6G_mwj@~5g#1W zo*sXHE+Wm!dzwu>wRre6_VN9@Z@26#Z5&Ik01ks+ayoDDF`#}JD(l$}QwwL|# z``=trMRv%@;f9!UinVCGp*H$jQgu`z<)X%nW#uskb#K$^!R;y@p&v!xEPn|%K~vPr z!dsmN@mbN$?b#fPB@;qTQyr?haB~LcS3GX|iLIAW&5PIx8QL}2paiAR+wa!hdRjQ< z&5T-;1ZACQqQ5K8RYk)xNMGVxwv?_a570HdTxv|65+-zC*$&yWpFFv?DoQL6G*LA0 zpZw2zkl}>JEKQFvVA@c|fZXuWc-sTr)ms-7s4;$yEMeOCb(B3fvqE27Bn`%H%qZ-t zQ!-~tU{d;xyG~>!_>sQ|m~ZwRWYiWq9+Kq>zhP1lbllY1Dvd9AOZZ(R40$5`Ih&I1 zjh;PIR7|ztPEF^U(jn zS2!s7h6VoP@^zJUKOxL{6IL$`Z_Jm7?_nT|?3{;qkLj>Fan$kt>!&pEJ-Uck41ehhPMa!AfxU#hlXD26)`B=`Km&4rDFc-i-}_V4K}~FAKIV& zb3<c)a{`jTwFzXbMb}{Ihssc0w5^LhXFaAcql98_%RXq~mLvvboO`y{) zO>}L7x%>c4Zj!_0*!(1-f8p*iacn(tGOpd#05hAze2LM$E{1qzU)^n;y9ZR@zZY;(5bSIW$>Cv12!Jv?by+L*P@QDANj)^oa-oVBG~J)5pv}>6>XQ z2TH`rN-NAsTb(gq-K zgb_mq2>`lpSH@>?QBLLFbuS~E8`ralXv1+J<*Wyc`fy3RVSKettsgUf^_y^6-=^p?`f0roaRA2sEbLVL-#ZmHuH z$dzieABo>wQsgNr6y+||(5u#QsIzE0Id-?a5z(HzKTun{P7#JatceZnFmAFvh001N zU3kFOIPnN>Ew!$--nq%G8LqW=nU1=40FoP~B(CdrE>b1y%%7424iTgN3DSTsRhY|Y z9JhV%Iv@ry|558r%Y}EQf{Bg$PC_!DXX0@SJ#^=Z0?>HL$?mDa^2Taz?;K~z*^lur zY{GvVg~lHsp*XuQh@D<_>skZe^phNSHj!IbM-NB$Heto-u&}FUkhwW~S4B50 z^v%K{Fts{A%6%ZX$I^31Qpx_M7D-RY#kmy#&i#CQ;&6a2Na^6;99(HCh!3Q=TRIHc5svLQ3E9O2qo;J-HX4&W4i@g+ zySH9@dRV(W^gXx`Yf(WZ>(}{1h=gR^mmFluC3W&z+dToIHXN$!gi`dtG1tVr|HtR> zS@n_|1OaW)V}-gNl!Dfn{9O6l7tZ09SPcGnCPyRiw%l19<1e|*!15%nPEP9J87u)l z32g`ixo=lrEiE|vZKNTxct?0L;HXtJdDb2$_eyqojP-zpdt1rnm8v9nM~_1wGT{^& zYO~pbjs8B_`S{MJ()~|AYa}$W7|-_PuhsN?;>R=G&_vZ1hPpT;+70$6!NkY$Yz!vn z2YDV6gb)YpA{%HX$I3S>f0%}%o@ktv9w5JGyxkr^)zH$Uq8sHD=HH9_V$9=MC9=NE-c9u0`otB zCeCIuFA4Ce2PjrgAu$=*1a0f;Xkxc(+`Qt6n)3plb60_5mjazKzuy0aqjEwJIh6ZZF(0+AN!k@pOr;Rv-Ya9T%H z#2-wTX92l~Rnt#tcMeBgQdU`0nd9Y+IOsH>t!uj}$`xZ~pIx_Z1~?jXa5K)lX*BAW zCI%?}OI6!c^)!cts^?lOzps?{~xZELl4a zczvgpv;MH`c&KgO)C!|f13XAl?a}n(Le|0U?avm5a!s3zs=CL9-6VvChvAS@QT2>N zA6mDa5;cK7d9)WplUq6Ggmbh~(FTx1Od+t*uobtT9ZwLv;!lE4@9lF(*R zgoWU(5{hR@Ric>a?^OXG)yaT7fhMkoBm$v@trM#C9V1Ue_TlOR z?6I&pf`pBmokb4oU97Y{8AMSwQkcZMyu(uG8Blt;mb>}nZn%MS+@%YQ(jc+%JWtZJ z{FFP|^F`O}oa4l@1@)qFz%5dc+N+pL{Q<*fX&{l)nUacso*e7EJDxODN8pMQUV60;$jWh+9$}BquTx!LNEmAs zZd@-YL~P~#2j}fiddU{Rob8&wtY|b3sodk!-*5y7Rftdj=kH(t_&EWvpM#krwFsmf zNV>LG!v6*w=T&vR8Vm&8aucS@=wDg)tIiCym)+t$`f6=yyb|`DLgEdbtFHC9Ja{z8 z4m&O-o868=+cA`VKHa-YfGfvr1V;W!H|^WU^dJI#5@>yZqo5@xqzn``FA705wsn7zyZh*aJ{p#n-}_he}QRfjud5hP3BR{L=SRJ%#x1^L>WfbNB14N|dO+4h8Yvt2sx!HPxF3xd3)o z1pQ1gAij=wPK9RovVA?Mr8;^6cN zJ|{FCm)&5%^OHZ?j5s&0>XX3=Sw#0!kYR?UQ?tK)Q^Jk{8`=y$kgyCVDJ52iYkpNw z%1P8d)=89s>H>;qDX6*2uDm@iL>ABzjYuCd01zqLu*=dn_`^1~1mstjc;Sid(Ekcu zh^7T+*J~o_!pV)d=>-!-L1%fjU5Jp^C%6=6o2r5!q14H&UpGm0k9ib)GkPTs-s)tQ zq(D*vdN=`ac{EG*B$QfePh=Ufjp<}_BO`#`M+s}FH3!VvJy4IuA^^nYZSflXEiUr+ zC}SO9;c-AIvV0?c>N2wT^2tTTIywn80j|`J7+#^c3e1M=o!P>;vr3|&=L7WKWOFMkHVq_i%wve2S6c5A6ef%HK(8r&LGuQ~{3dEO z&s2Hft>F{at-u`AMKYN@pBxDEErLGU#J*k|t?9HcoUB4V=XEPnV2N5jT(MSo=ji=e zk6bXqQ|8>*WaH!evX@|21+K&KVMpi4>QSqclQhqzm(a@@%fx_bwU-wvof1>$qMmBl z)d)M#aR_RZM(&y(U!dWz@R3Pg&U*ax zZm`~wH?Lt+!Rz{Sx^5efaPle^?+RKD-D}{a*802o{Qcf|FVyGMM~B|$VS<~xwy+0Q z5Oaje6D`|Opxi~WUI#R+p)oJ0!X;9NflmeCHcPi}|hq4x< zNFlWp+4cmkwN9S5NA<+(sSd^uRyxrrVySk{(%~O-l6xW*6%}Q(3Fgj+E2ScP!}jrF zPF5}9LXj}6hUD9+WSE;M)3J#J*smbI?aQGpCvS?w&5eo%EdWQKzu0{uI9rN`NPKH< zN^GMc*C90Lt}Jw9=Xg3Vx)u0QkUFf;Fk5V`>OuGdg+#)@?#hBx0NPeL>1l)Jw4nZv z^R#a34{qVd1do6>(!lyaZZqt9Y;7B|q<==>v1pZU!EZp{T?ey?3i53-I*nIuTyK79 zKO0|`sVortT$7Zb$!+(sMaeRd7sPl|^!a`;+{L@fES`F7aX`H6n4_%g=Tz^_h@meX zM4Ife&jr)={`6Dt$;T8ul@9=@`Ec+RFrNoWI{n?8VUiIQP_w(Cg5dbY-pqV(A1 zQyd3f8NWo3u)7zGn4eF+;8kTJ#j!(oM87%+$?>h`MtZ;C=@+-lX6_(cD$s= zdhL?URH)D{#%=j`DY0j``?4W$-Nx9KHvZ_weWC$?SFnVn^Sk2ih168j1x-YJbGmtc z6a1dy<;y3hOL`l%Q#6QvP^?!_yNBD7_Sg>|qP+6igwZw2;j)O+qs~)~C0j`DoaIAu zFeyNy1&j}+ZqVQhtCc;~b9|e?97v7(dfQDHF)4@!7Fx(Xq?hvua_yISHDFlz6&HRCNiK~REWH`f`jY|!oFq` zW4fC_GyT3B%W zE|j}`W(iEMS2Q=PJ-MY77<7>&{i$GL=z$^^+Y+ytnb|%#C=VY3mtL;}d|ve#hTQ}C zF-9gW%WqiTTbTT+UC#g_?#2dvr&8?CHHh0%TPB_F%+xuyvdyC9=sNPKzIEkvfD1JN+uy$g<#5 zXBD~bo!q0n8ub*%0C`)CRBgeg(5MFdpEqv_Vglgn1$-WZuWSm?5AdbR3nm(Tr5h9z zRR1BPAtZC*dq(F8{+n|UDDC6$EYwlyS3wY&1%g|4r+PrH@h?h3G~ZfW6X3IfZ^*Yi zItIow=$}8+TZabcXzhW+8~n)WDghV1Y59mhcp@;7;HpIg!TTqCk-?O5___;g62jMi zsz3Bk1B@CLE;)U8!U7_ts^|YSj_=d*<<-d$G0ZqQ_ld!mM8-$(v;vp(?1p7{59I>K z7Yw)F!EHd7NYtw;AFqhs{_o82pN=w;w-~AnWT&n2UX$86hN_m}XOyU0~8LCRVC;*TnJN+MTj*zApO{{^c_<+WW(El4!9ptTI?cRPZ}@WJ9SMhs*8TJdVyZ z6F~Eb0#mJy*l_kNYWz8qwKsFN+G0Hv7~^s1A9(B49xYXwJP=$Ss4E4s^!>TDnPnHr zUqe6M6NvAR`@m~{4$de8&(UzG*!0Y39v1qG{pkr++r(X~$%sBVoPt4%qa)wcxm?2z z4-JA#(^7@|L7w<46>>;($ zaQ$fAUus%=?BJ$JlI_KCNPJUX^EZDI*cr}-?r@>a9o|RI zGvUOOPl^uvN^5-eDctrd$5xI^% z^%!V}c=>V5$7xg~ef4cWsQpSpMicRnFVG*9k37+TA;r8WfqN?{s3G4Q1_9WoB zI@_7r32rDF8X7*vCIVwbaGhoScx?`1pNxQDzZhPazwUJ2kwwV9oYnxw)PMB!X;U>5 zj&db39sM;s8@z^WRlCmt7*%`lG2w9Sl-R>r2f&cA?d|OXL-f^5FB2>ANg0K>bh?AcMN03`(9fh!(CveKt*O|!^yD{{Lc#$UI;8|pFF zqIG%ICY^7Nn0KH-eDrJ%$P`N6>{^0vE2`w1g4>+L)p;{%{SMRronZx-1KhNymnbAv zQpG>Md-raA8QBRW&#sF6DScr<;O}C-i-TvU^qPD>D9A8|6j`ELijIsIcKoG|9;jR( zgRKC`%wFxO1lXXSwLtj*0try-#Bc!Ud6se`cpaRB{h3iOy6pR~xVRWzirrmZPdC9E zK+f(1YoFNl2>$24_4R)>XwZMRIs9Ko?Y}5VwuCaUsHcBTjS<4h~gI zuhyu*D)%r3EpF0C|!>cZ(ok*|fB@0Pg4KQz6i4K`=;3M)qUB z9+a#Q#RBJhWzyy0Wo0Cjot>=@08R}Cg=tnnpl)YVE#wXeE2gETc`0&Ne@qs0Ivbtz z`+t5>IA{UK#g|7aV?klIi`iM5puSdL5^pmyBH)-aBgBN6Zi&Kz*9_JJ%g_QMf-PB+ zJ8Y`}jFx5v&K%SAyuYr1BL`a__*ywYa+#W%GEjw{J%yS0&r=?=MCrJ$eBXDI6sIMg zOLiK4Dx@GIv-ujHT4`4A;mIFxi?$dao)Z6YxFLAk`M(`z{#m8}#d!axlKy7}f%*6U msigl|K`-EH|IcmNiIm}whp)I}A1>p7OW~2SOy0xS-v0~l;}rt{ diff --git a/tests/test_integrals.py b/tests/test_integrals.py index abb2671623..274b56379f 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1051,7 +1051,7 @@ def _example_denominator(B, pitch): return safediv(1, jnp.sqrt(jnp.abs(1 - pitch * B))) @pytest.mark.unit - @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d) + @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_1d * 4) def test_bounce1d_checks(self): """Test that all the internal correctness checks pass for real example.""" # noqa: D202 @@ -1078,7 +1078,12 @@ def test_bounce1d_checks(self): Bounce1D.required_names + ["min_tz |B|", "max_tz |B|", "g_zz"], grid=grid ) # 5. Make the bounce integration operator. - bounce = Bounce1D(grid.source_grid, data, check=True) + bounce = Bounce1D( + grid.source_grid, + data, + quad=leggauss(3), # not checking quadrature accuracy in this test + check=True, + ) pitch_inv = bounce.get_pitch_inv( grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) @@ -1113,7 +1118,7 @@ def test_bounce1d_checks(self): print("(α, ρ):", nodes[m, l, 0]) # 7. Plotting - fig, ax = bounce.plot(pitch_inv[..., l], m, l, show=False) + fig, ax = bounce.plot(pitch_inv[..., l], m, l, include_legend=False, show=False) return fig @pytest.mark.unit From 8edc31752d1f08164032b90b006d8bb4e3790380 Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 29 Aug 2024 20:52:00 -0400 Subject: [PATCH 235/241] Finishing touch clean up some docstrings --- desc/integrals/bounce_integral.py | 22 +++++++--------- desc/integrals/bounce_utils.py | 44 +++++++++++++++---------------- tests/test_interp_utils.py | 8 +++--- 3 files changed, 35 insertions(+), 39 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 020f365bc6..5891ccf0d7 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -67,10 +67,10 @@ class Bounce1D(IOAble): functions of interest do not vanish at infinity, pseudo-spectral techniques are not used. Instead, function approximation is done with local splines. This is useful if one can efficiently obtain data along field lines and - most efficient if the number of toroidal transit to follow a field line is + most efficient if the number of toroidal transits to follow a field line is not too large. - After obtaining the bounce points, the supplied quadrature is performed. + After computing the bounce points, the supplied quadrature is performed. By default, this is a Gauss quadrature after removing the singularity. Local splines interpolate functions in the integrand to the quadrature nodes. @@ -212,8 +212,7 @@ def points(self, pitch_inv, num_well=None): Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the index into the last axis that corresponds to - that field line. + are interpreted as the indices that corresponds to that field line. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -254,12 +253,11 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the index into the last axis that corresponds to - that field line. + are interpreted as the indices that corresponds to that field line. plot : bool Whether to plot stuff. kwargs - Keyword arguments into ``self.plot_ppoly``. + Keyword arguments into ``desc/integrals/bounce_utils.py::plot_ppoly``. Returns ------- @@ -290,8 +288,7 @@ def integrate( ): """Bounce integrate ∫ f(ℓ) dℓ. - Computes the bounce integral ∫ f(ℓ) dℓ for every specified field line - for every λ value in ``pitch_inv``. + Computes the bounce integral ∫ f(ℓ) dℓ for every field line and pitch. Notes ----- @@ -303,8 +300,7 @@ def integrate( Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the index into the last axis that corresponds to - that field line. + are interpreted as the indices that corresponds to that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -321,7 +317,7 @@ def integrate( Shape (M, L, N). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` - interpolated to the deepest point in the magnetic well. Use the method + interpolated to the deepest point in that magnetic well. Use the method ``self.reshape_data`` to reshape the data into the expected shape. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each @@ -388,7 +384,7 @@ def plot(self, pitch_inv, m, l, **kwargs): pitch_inv : jnp.ndarray Shape (P, ). 1/λ values to evaluate the bounce integral at the field line - specified by the (α(m), ρ(l)) Clebsch coordinate. + specified by Clebsch coordinate α(m), ρ(l). m, l : int, int Indices into the nodes of the grid supplied to make this object. ``alpha, rho = grid.meshgrid_reshape(grid.nodes[:, :2], "arz")[m, l, 0]``. diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index efb08e9451..695577a582 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -86,9 +86,8 @@ def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): pitch_inv : jnp.ndarray Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[:,α,ρ]`` where in the latter the labels - are interpreted as the index into the last axis that corresponds to - that field line. + specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + are interpreted as the indices that corresponds to that field line. """ errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") @@ -130,8 +129,7 @@ def bounce_points( Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the index into the last axis that corresponds to - that field line. + are interpreted as the indices that corresponds to that field line. knots : jnp.ndarray Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. @@ -250,18 +248,18 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) for ml in np.ndindex(B.shape[:-2]): - Bs = PPoly(B[ml].T, knots) + ppoly = PPoly(B[ml].T, knots) for p in range(pitch_inv.shape[0]): idx = (p, *ml) - Bs_midpoint = Bs((z1[idx] + z2[idx]) / 2) - err_3 = jnp.any(Bs_midpoint > pitch_inv[idx] + eps) + B_midpoint = ppoly((z1[idx] + z2[idx]) / 2) + err_3 = jnp.any(B_midpoint > pitch_inv[idx] + eps) if not (err_1[idx] or err_2[idx] or err_3): continue _z1 = z1[idx][mask[idx]] _z2 = z2[idx][mask[idx]] if plot: plot_ppoly( - ppoly=Bs, + ppoly=ppoly, z1=_z1, z2=_z2, k=pitch_inv[idx], @@ -273,7 +271,7 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): assert not err_1[idx], "Intersects have an inversion.\n" assert not err_2[idx], "Detected discontinuity.\n" assert not err_3, ( - f"Detected |B| = {Bs_midpoint[mask[idx]]} > {pitch_inv[idx] + eps} " + f"Detected |B| = {B_midpoint[mask[idx]]} > {pitch_inv[idx] + eps} " "= 1/λ in well, implying the straight line path between " "bounce points is in hypograph(|B|). Use more knots.\n" ) @@ -281,7 +279,7 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): idx = (slice(None), *ml) plots.append( plot_ppoly( - ppoly=Bs, + ppoly=ppoly, z1=z1[idx], z2=z2[idx], k=pitch_inv[idx], @@ -325,8 +323,7 @@ def bounce_quadrature( Shape (P, M, L). 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the index into the last axis that corresponds to - that field line. + are interpreted as the indices that corresponds to that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -355,7 +352,6 @@ def bounce_quadrature( Ignored if ``batch`` is false. plot : bool Whether to plot stuff if ``check`` is true. Default is false. - Only developers doing debugging want to see these plots. Returns ------- @@ -478,7 +474,7 @@ def _interpolate_and_integrate( def _check_interp(shape, Q, f, b_sup_z, B, result, plot): - """Check for floating point errors. + """Check for interpolation failures and floating point issues. Parameters ---------- @@ -499,15 +495,14 @@ def _check_interp(shape, Q, f, b_sup_z, B, result, plot): """ assert jnp.isfinite(Q).all(), "NaN interpolation point." - msg = "|B| has vanished, violating the hairy ball theorem." - assert not jnp.isclose(B, 0).any(), msg - assert not jnp.isclose(b_sup_z, 0).any(), msg + assert not ( + jnp.isclose(B, 0).any() or jnp.isclose(b_sup_z, 0).any() + ), "|B| has vanished, violating the hairy ball theorem." # Integrals that we should be computing. marked = jnp.any(Q.reshape(shape) != 0.0, axis=-1) goal = marked.sum() - msg = "Interpolation failed." assert goal == (marked & jnp.isfinite(b_sup_z).reshape(shape).all(axis=-1)).sum() assert goal == (marked & jnp.isfinite(B).reshape(shape).all(axis=-1)).sum() for f_i in f: @@ -528,7 +523,12 @@ def _check_interp(shape, Q, f, b_sup_z, B, result, plot): def _plot_check_interp(Q, V, name=""): - """Plot V[λ, α, ρ, (ζ₁, ζ₂)](Q).""" + """Plot V[λ, α, ρ, (ζ₁, ζ₂)](Q). + + These are pretty, but likely only useful for developers + doing debugging, so we don't include an option to plot these + in the public API of Bounce1D. + """ for idx in np.ndindex(Q.shape[:-2]): marked = jnp.nonzero(jnp.any(Q[idx] != 0.0, axis=-1))[0] if marked.size == 0: @@ -542,8 +542,8 @@ def _plot_check_interp(Q, V, name=""): fig.text( 0.01, 0.01, - "Each color specifies particular bounce integral with the function " - f"{name} interpolated to the quadrature points.", + f"Each color specifies {name} interpolated to the quadrature " + "points of a particular integral.", ) plt.tight_layout() plt.show() diff --git a/tests/test_interp_utils.py b/tests/test_interp_utils.py index d48a32d6f4..606b0fe090 100644 --- a/tests/test_interp_utils.py +++ b/tests/test_interp_utils.py @@ -11,7 +11,7 @@ class TestPolyUtils: """Test polynomial utilities used for local spline interpolation in integrals.""" @pytest.mark.unit - def test_poly_root(self): + def test_polyroot_vec(self): """Test vectorized computation of cubic polynomial exact roots.""" c = np.arange(-24, 24).reshape(4, 6, -1).transpose(-1, 1, 0) # Ensure broadcasting won't hide error in implementation. @@ -20,7 +20,7 @@ def test_poly_root(self): k = np.broadcast_to(np.arange(c.shape[-2]), c.shape[:-1]) # Now increase dimension so that shapes still broadcast, but stuff like # ``c[...,-1]-=k`` is not allowed because it grows the dimension of ``c``. - # This is needed functionality in ``poly_root`` that requires an awkward + # This is needed functionality in ``polyroot_vec`` that requires an awkward # loop to obtain if using jnp.vectorize. k = np.stack([k, k * 2 + 1]) r = polyroot_vec(c, k, sort=True) @@ -34,7 +34,7 @@ def test_poly_root(self): np.testing.assert_allclose( r[(i, *idx)], np.sort(np.roots(d[idx])), - err_msg=f"Eigenvalue branch of poly_root failed at {i, *idx}.", + err_msg=f"Eigenvalue branch of polyroot_vec failed at {i, *idx}.", ) # Now test analytic formula branch, Ensure it filters distinct roots, @@ -59,7 +59,7 @@ def test_poly_root(self): np.testing.assert_allclose( root, unique_root, - err_msg=f"Analytic branch of poly_root failed at {j}.", + err_msg=f"Analytic branch of polyroot_vec failed at {j}.", ) c = np.array([0, 1, -1, -8, 12]) r = polyroot_vec(c, sort=True, distinct=True) From 75c13fd72d87fdda9257b7d08f2523f1d71cf76a Mon Sep 17 00:00:00 2001 From: unalmis Date: Thu, 29 Aug 2024 21:05:50 -0400 Subject: [PATCH 236/241] Make pitch optional argument for plot function --- desc/integrals/bounce_integral.py | 50 +++++++++++++++---------------- desc/integrals/bounce_utils.py | 6 ++-- tests/test_integrals.py | 2 +- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 5891ccf0d7..87331bcd73 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -210,7 +210,7 @@ def points(self, pitch_inv, num_well=None): ---------- pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce points at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that corresponds to that field line. num_well : int or None @@ -251,7 +251,7 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): epigraph of |B|. pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce points at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that corresponds to that field line. plot : bool @@ -298,7 +298,7 @@ def integrate( ---------- pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce integrals of each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that corresponds to that field line. integrand : callable @@ -376,18 +376,18 @@ def integrate( assert result.shape[-1] == setdefault(num_well, np.prod(self._dB_dz.shape[-2:])) return result - def plot(self, pitch_inv, m, l, **kwargs): + def plot(self, m, l, pitch_inv=None, **kwargs): """Plot the field line and bounce points of the given pitch angles. Parameters ---------- - pitch_inv : jnp.ndarray - Shape (P, ). - 1/λ values to evaluate the bounce integral at the field line - specified by Clebsch coordinate α(m), ρ(l). m, l : int, int Indices into the nodes of the grid supplied to make this object. - ``alpha, rho = grid.meshgrid_reshape(grid.nodes[:, :2], "arz")[m, l, 0]``. + ``alpha,rho=grid.meshgrid_reshape(grid.nodes[:,:2],"arz")[m,l,0]``. + pitch_inv : jnp.ndarray + Shape (P, ). + Optional, 1/λ values whose corresponding bounce points on the field line + specified by Clebsch coordinate α(m), ρ(l) will be plotted. kwargs Keyword arguments into ``desc/integrals/bounce_utils.py::plot_ppoly``. @@ -397,22 +397,22 @@ def plot(self, pitch_inv, m, l, **kwargs): Matplotlib (fig, ax) tuple. """ - pitch_inv = jnp.atleast_1d(jnp.squeeze(pitch_inv)) - errorif( - pitch_inv.ndim != 1, - msg=f"Got pitch_inv.ndim={pitch_inv.ndim}, but expected 1.", - ) - z1, z2 = bounce_points( - pitch_inv[:, jnp.newaxis, jnp.newaxis], - self._zeta, - self.B[m, l], - self._dB_dz[m, l], - ) + if pitch_inv is not None: + pitch_inv = jnp.atleast_1d(jnp.squeeze(pitch_inv)) + errorif( + pitch_inv.ndim != 1, + msg=f"Got pitch_inv.ndim={pitch_inv.ndim}, but expected 1.", + ) + z1, z2 = bounce_points( + pitch_inv[:, jnp.newaxis, jnp.newaxis], + self._zeta, + self.B[m, l], + self._dB_dz[m, l], + ) + kwargs["z1"] = z1 + kwargs["z2"] = z2 + kwargs["k"] = pitch_inv fig, ax = plot_ppoly( - ppoly=PPoly(self.B[m, l].T, self._zeta), - z1=z1, - z2=z2, - k=pitch_inv, - **_set_default_plot_kwargs(kwargs), + PPoly(self.B[m, l].T, self._zeta), **_set_default_plot_kwargs(kwargs) ) return fig, ax diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 695577a582..dc5c094327 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -85,7 +85,7 @@ def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): last axis enumerates the polynomials that compose a particular spline. pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce integrals of each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that corresponds to that field line. @@ -127,7 +127,7 @@ def bounce_points( ---------- pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce points at each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that corresponds to that field line. knots : jnp.ndarray @@ -321,7 +321,7 @@ def bounce_quadrature( epigraph of |B|. pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integral at each field line. 1/λ(ρ,α) is + 1/λ values to evaluate the bounce integrals of each field line. 1/λ(ρ,α) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that corresponds to that field line. integrand : callable diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 274b56379f..922c5896b9 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1118,7 +1118,7 @@ def test_bounce1d_checks(self): print("(α, ρ):", nodes[m, l, 0]) # 7. Plotting - fig, ax = bounce.plot(pitch_inv[..., l], m, l, include_legend=False, show=False) + fig, ax = bounce.plot(m, l, pitch_inv[..., l], include_legend=False, show=False) return fig @pytest.mark.unit From 446c0b7d45817c59e74a315900e96644709d91d9 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 30 Aug 2024 01:14:26 -0400 Subject: [PATCH 237/241] Address review comments and fix regression in batch argument from recent commit Ensure batch=False bounce integration is done in test to catch future regressions --- desc/backend.py | 5 +-- desc/integrals/bounce_integral.py | 18 +++++------ desc/integrals/bounce_utils.py | 53 ++++++++++++++++--------------- desc/integrals/interp_utils.py | 8 ++--- desc/integrals/quad_utils.py | 1 - tests/test_integrals.py | 3 +- 6 files changed, 46 insertions(+), 42 deletions(-) diff --git a/desc/backend.py b/desc/backend.py index c237ba1504..5538c79a8c 100644 --- a/desc/backend.py +++ b/desc/backend.py @@ -71,7 +71,7 @@ imap = jax.lax.map from jax.experimental.ode import odeint from jax.lax import cond, fori_loop, scan, switch, while_loop - from jax.nn import softmax + from jax.nn import softmax as softargmax from jax.numpy import bincount, flatnonzero, repeat, take from jax.numpy.fft import irfft, rfft, rfft2 from jax.scipy.fft import dct, idct @@ -422,7 +422,8 @@ def tangent_solve(g, y): qr, solve_triangular, ) - from scipy.special import gammaln, logsumexp, softmax # noqa: F401 + from scipy.special import gammaln, logsumexp # noqa: F401 + from scipy.special import softmax as softargmax # noqa: F401 trapezoid = np.trapezoid if hasattr(np, "trapezoid") else np.trapz diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 87331bcd73..defc4b3b91 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -175,8 +175,8 @@ def __init__( source=(0, 1), destination=(-1, -2), ) - assert self.B.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 4) self._dB_dz = polyder_vec(self.B) + assert self.B.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 4) assert self._dB_dz.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 3) @staticmethod @@ -210,9 +210,9 @@ def points(self, pitch_inv, num_well=None): ---------- pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to compute the bounce points at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce points at each field line. 1/λ(α,ρ) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the indices that corresponds to that field line. + are interpreted as the indices that correspond to that field line. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each pitch along each field line. This is useful if ``num_well`` tightly @@ -232,7 +232,7 @@ def points(self, pitch_inv, num_well=None): that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. - If there were less than ``num_wells`` wells detected along a field line, + If there were less than ``num_well`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field line and pitch, is padded with zero. @@ -251,9 +251,9 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): epigraph of |B|. pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to compute the bounce points at each field line. 1/λ(ρ,α) is + 1/λ values to compute the bounce points at each field line. 1/λ(α,ρ) is specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the indices that corresponds to that field line. + are interpreted as the indices that correspond to that field line. plot : bool Whether to plot stuff. kwargs @@ -298,9 +298,9 @@ def integrate( ---------- pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to compute the bounce integrals of each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the indices that corresponds to that field line. + 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by + ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted + as the indices that correspond to that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index dc5c094327..8e45c9d44c 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -4,8 +4,7 @@ from interpax import PPoly from matplotlib import pyplot as plt -from desc.backend import imap, jnp -from desc.backend import softmax as softargmax +from desc.backend import imap, jnp, softargmax from desc.integrals.basis import _add2legend, _in_epigraph_and, _plot_intersect from desc.integrals.interp_utils import ( interp1d_Hermite_vec, @@ -85,9 +84,9 @@ def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): last axis enumerates the polynomials that compose a particular spline. pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to compute the bounce integrals of each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the indices that corresponds to that field line. + 1/λ values. 1/λ(α,ρ) is specified by ``pitch_inv[...,α,ρ]`` where in + the latter the labels are interpreted as the indices that correspond + to that field line. """ errorif(knots.ndim != 1, msg=f"knots should be 1d; got shape {knots.shape}.") @@ -127,9 +126,9 @@ def bounce_points( ---------- pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to compute the bounce points at each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the indices that corresponds to that field line. + 1/λ values to compute the bounce points. 1/λ(α,ρ) is specified by + ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted + as the indices that correspond to that field line. knots : jnp.ndarray Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. @@ -168,7 +167,7 @@ def bounce_points( that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. - If there were less than ``num_wells`` wells detected along a field line, + If there were less than ``num_well`` wells detected along a field line, then the last axis, which enumerates bounce points for a particular field line and pitch, is padded with zero. @@ -321,9 +320,9 @@ def bounce_quadrature( epigraph of |B|. pitch_inv : jnp.ndarray Shape (P, M, L). - 1/λ values to evaluate the bounce integrals of each field line. 1/λ(ρ,α) is - specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels - are interpreted as the indices that corresponds to that field line. + 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by + ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted + as the indices that correspond to that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -357,16 +356,15 @@ def bounce_quadrature( ------- result : jnp.ndarray Shape (P, M, L, num_well). - First axis enumerates pitch values. Second axis enumerates the field lines. - Third axis enumerates the flux surfaces. Last axis enumerates the bounce - integrals. + Last axis enumerates the bounce integrals for a given pitch, field line, + and flux surface. """ errorif(x.ndim != 1 or x.shape != w.shape) errorif(z1.ndim != 4 or z1.shape != z2.shape) errorif(pitch_inv.ndim != 3) if not isinstance(f, (list, tuple)): - f = list(f) + f = [f] if isinstance(f, (jnp.ndarray, np.ndarray)) else list(f) # Integrate and complete the change of variable. if batch: @@ -441,17 +439,15 @@ def _interpolate_and_integrate( ------- result : jnp.ndarray Shape Q.shape[:-1]. - Quadrature for every pitch. + Quadrature result. """ assert w.ndim == 1 assert 3 < Q.ndim < 6 and Q.shape[0] == pitch_inv.shape[0] and Q.shape[-1] == w.size assert data["|B|"].shape[-1] == knots.size - if Q.ndim == 5: - pitch_inv = pitch_inv[..., jnp.newaxis] shape = Q.shape - Q = flatten_matrix(Q) + Q = Q.reshape(*Q.shape[:3], -1) b_sup_z = interp1d_Hermite_vec( Q, knots, @@ -464,7 +460,14 @@ def _interpolate_and_integrate( # that do not preserve smoothness can be captured. f = [interp1d_vec(Q, knots, f_i, method=method) for f_i in f] result = jnp.dot( - (integrand(*f, B=B, pitch=1 / pitch_inv) / b_sup_z).reshape(shape), + ( + integrand( + *f, + B=B, + pitch=1 / pitch_inv[..., jnp.newaxis], + ) + / b_sup_z + ).reshape(shape), w, ) if check: @@ -529,7 +532,7 @@ def _plot_check_interp(Q, V, name=""): doing debugging, so we don't include an option to plot these in the public API of Bounce1D. """ - for idx in np.ndindex(Q.shape[:-2]): + for idx in np.ndindex(Q.shape[:3]): marked = jnp.nonzero(jnp.any(Q[idx] != 0.0, axis=-1))[0] if marked.size == 0: continue @@ -663,9 +666,9 @@ def interp_to_argmin( z1 = atleast_nd(4, z1) z2 = atleast_nd(4, z2) ext, g_ext = _get_extrema(knots, g, dg_dz, sentinel=0) - # JAX softmax(x) does the proper shift to compute softmax(x - max(x)), but it's - # still not a good idea to compute over a large length scale, so we warn in - # docstring to choose upper sentinel properly. + # Our softargmax(x) does the proper shift to compute softargmax(x - max(x)), + # but it's still not a good idea to compute over a large length scale, so we + # warn in docstring to choose upper sentinel properly. argmin = softargmax( beta * _where_for_argmin(z1, z2, ext, g_ext, upper_sentinel), axis=-1, diff --git a/desc/integrals/interp_utils.py b/desc/integrals/interp_utils.py index c1b924fc6b..4943be509c 100644 --- a/desc/integrals/interp_utils.py +++ b/desc/integrals/interp_utils.py @@ -2,9 +2,9 @@ Notes ----- -These polynomial utilities are chosen for performance on gpu when among -methods that have the best (asymptotic) algorithmic complexity. For example, -we prefer not to use Horner's method. +These polynomial utilities are chosen for performance on gpu among +methods that have the best (asymptotic) algorithmic complexity. +For example, we prefer to not use Horner's method. """ from functools import partial @@ -159,7 +159,7 @@ def polyroot_vec( ------- r : jnp.ndarray Shape (..., *c.shape[:-1], c.shape[-1] - 1). - The roots of the polynomial, iterated over the last axis.First + The roots of the polynomial, iterated over the last axis. """ get_only_real_roots = not (a_min is None and a_max is None) diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index 187d76367e..b7bfbb6a6c 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -213,7 +213,6 @@ def get_quadrature(quad, automorphism): x, w = quad assert x.ndim == w.ndim == 1 if automorphism is not None: - # Apply automorphisms to supress singularities. auto, grad_auto = automorphism w = w * grad_auto(x) # Recall bijection_from_disc(auto(x), ζ₁, ζ₂) = ζ. diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 922c5896b9..cb59fc9f67 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -785,7 +785,7 @@ def test_z1_before_extrema(self): @pytest.mark.unit def test_z2_before_extrema(self): """Case where local minimum is the shared intersect between two wells.""" - # To make sure both regions in hypgraph left and right of extrema are not + # To make sure both regions in hypograph left and right of extrema are not # integrated over. start = -1.2 * np.pi end = -2 * start @@ -1097,6 +1097,7 @@ def test_bounce1d_checks(self): pitch_inv, integrand=TestBounce1D._example_denominator, check=True, + batch=False, ) avg = safediv(num, den) assert np.isfinite(avg).all() and np.count_nonzero(avg) From 239e44185ced73697f0b1565836f642cb1524790 Mon Sep 17 00:00:00 2001 From: unalmis Date: Fri, 30 Aug 2024 14:25:17 -0400 Subject: [PATCH 238/241] Increase coverage --- desc/equilibrium/coords.py | 4 +-- desc/integrals/bounce_integral.py | 7 ++++- desc/integrals/bounce_utils.py | 30 ++++++++----------- desc/utils.py | 8 ++--- tests/test_integrals.py | 50 ++++++++++++++++++++----------- 5 files changed, 55 insertions(+), 44 deletions(-) diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index 9d722ce52d..c7b51b24ab 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -685,12 +685,12 @@ def get_rtz_grid( rvp : rho, theta_PEST, phi rtz : rho, theta, zeta period : tuple of float - Assumed periodicity for each quantity in inbasis. + Assumed periodicity for functions of the given coordinates. Use ``np.inf`` to denote no periodicity. jitable : bool, optional If false the returned grid has additional attributes. Required to be false to retain nodes at magnetic axis. - kwargs : dict + kwargs Additional parameters to supply to the coordinate mapping function. See ``desc.equilibrium.coords.map_coordinates``. diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index defc4b3b91..f02dbc942f 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -255,7 +255,7 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted as the indices that correspond to that field line. plot : bool - Whether to plot stuff. + Whether to plot the field lines and bounce points of the given pitch angles. kwargs Keyword arguments into ``desc/integrals/bounce_utils.py::plot_ppoly``. @@ -285,6 +285,7 @@ def integrate( method="cubic", batch=True, check=False, + plot=False, ): """Bounce integrate ∫ f(ℓ) dℓ. @@ -337,6 +338,9 @@ def integrate( Whether to perform computation in a batched manner. Default is true. check : bool Flag for debugging. Must be false for JAX transformations. + plot : bool + Whether to plot the quantities in the integrand interpolated to the + quadrature points of each integral. Ignored if ``check`` is false. Returns ------- @@ -361,6 +365,7 @@ def integrate( method=method, batch=batch, check=check, + plot=plot, ) if weight is not None: result *= interp_to_argmin( diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 8e45c9d44c..2a8adfcdb1 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -30,7 +30,7 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): - """Return 1/λ values uniformly spaced between ``min_B`` and ``max_B``. + """Return 1/λ values for quadrature between ``min_B`` and ``max_B``. Parameters ---------- @@ -262,6 +262,7 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): z1=_z1, z2=_z2, k=pitch_inv[idx], + title=kwargs.pop("title") + f", (p,m,l)={idx}", **kwargs, ) @@ -350,7 +351,8 @@ def bounce_quadrature( Flag for debugging. Must be false for JAX transformations. Ignored if ``batch`` is false. plot : bool - Whether to plot stuff if ``check`` is true. Default is false. + Whether to plot the quantities in the integrand interpolated to the + quadrature points of each integral. Ignored if ``check`` is false. Returns ------- @@ -418,8 +420,8 @@ def _interpolate_and_integrate( data, knots, method, - check=False, - plot=False, + check, + plot, ): """Interpolate given functions to points ``Q`` and perform quadrature. @@ -526,12 +528,7 @@ def _check_interp(shape, Q, f, b_sup_z, B, result, plot): def _plot_check_interp(Q, V, name=""): - """Plot V[λ, α, ρ, (ζ₁, ζ₂)](Q). - - These are pretty, but likely only useful for developers - doing debugging, so we don't include an option to plot these - in the public API of Bounce1D. - """ + """Plot V[λ, α, ρ, (ζ₁, ζ₂)](Q).""" for idx in np.ndindex(Q.shape[:3]): marked = jnp.nonzero(jnp.any(Q[idx] != 0.0, axis=-1))[0] if marked.size == 0: @@ -539,15 +536,10 @@ def _plot_check_interp(Q, V, name=""): fig, ax = plt.subplots() ax.set_xlabel(r"$\zeta$") ax.set_ylabel(name) - ax.set_title(f"Interpolation of {name} to quadrature points. Index {idx}.") + ax.set_title(f"Interpolation of {name} to quadrature points, (p,m,l)={idx}") for i in marked: ax.plot(Q[(*idx, i)], V[(*idx, i)], marker="o") - fig.text( - 0.01, - 0.01, - f"Each color specifies {name} interpolated to the quadrature " - "points of a particular integral.", - ) + fig.text(0.01, 0.01, "Each color specifies a particular integral.") plt.tight_layout() plt.show() @@ -765,7 +757,7 @@ def plot_ppoly( start=None, stop=None, include_knots=False, - knot_transparency=0.1, + knot_transparency=0.2, include_legend=True, ): """Plot the piecewise polynomial ``ppoly``. @@ -805,6 +797,8 @@ def plot_ppoly( Whether to plot vertical lines at the knots. knot_transparency : float Transparency of knot lines. + include_legend : bool + Whether to include the legend in the plot. Default is true. Returns ------- diff --git a/desc/utils.py b/desc/utils.py index 0f6553b67f..6ead7a5078 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -739,21 +739,17 @@ def flatten_matrix(y): # https://github.com/numpy/numpy/issues/25805 def atleast_nd(ndmin, ary): """Adds dimensions to front if necessary.""" - if ndmin == 1: - return jnp.atleast_1d(ary) - if ndmin == 2: - return jnp.atleast_2d(ary) return jnp.array(ary, ndmin=ndmin) if jnp.ndim(ary) < ndmin else ary def atleast_3d_mid(ary): - """Like np.atleast3d but if adds dim at axis 1 for 2d arrays.""" + """Like np.atleast_3d but if adds dim at axis 1 for 2d arrays.""" ary = jnp.atleast_2d(ary) return ary[:, jnp.newaxis] if ary.ndim == 2 else ary def atleast_2d_end(ary): - """Like np.atleast2d but if adds dim at axis 1 for 1d arrays.""" + """Like np.atleast_2d but if adds dim at axis 1 for 1d arrays.""" ary = jnp.atleast_1d(ary) return ary[:, jnp.newaxis] if ary.ndim == 1 else ary diff --git a/tests/test_integrals.py b/tests/test_integrals.py index cb59fc9f67..a368987853 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -40,11 +40,11 @@ get_pitch_inv, interp_to_argmin, interp_to_argmin_hard, - plot_ppoly, ) from desc.integrals.quad_utils import ( automorphism_sin, bijection_from_disc, + get_quadrature, grad_automorphism_sin, grad_bijection_from_disc, leggauss_lob, @@ -738,7 +738,9 @@ def test_z1_first(self): B = CubicHermiteSpline(knots, np.cos(knots), -np.sin(knots)) pitch_inv = 0.5 intersect = B.solve(pitch_inv, extrapolate=False) - z1, z2 = bounce_points(pitch_inv, knots, B.c.T, B.derivative().c.T, check=True) + z1, z2 = bounce_points( + pitch_inv, knots, B.c.T, B.derivative().c.T, check=True, include_knots=True + ) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[0::2]) @@ -753,7 +755,9 @@ def test_z2_first(self): B = CubicHermiteSpline(k, np.cos(k), -np.sin(k)) pitch_inv = 0.5 intersect = B.solve(pitch_inv, extrapolate=False) - z1, z2 = bounce_points(pitch_inv, k, B.c.T, B.derivative().c.T, check=True) + z1, z2 = bounce_points( + pitch_inv, k, B.c.T, B.derivative().c.T, check=True, include_knots=True + ) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size np.testing.assert_allclose(z1, intersect[1:-1:2]) @@ -772,7 +776,9 @@ def test_z1_before_extrema(self): ) dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[3] - 1e-13 - z1, z2 = bounce_points(pitch_inv, k, B.c.T, dB_dz.c.T, check=True) + z1, z2 = bounce_points( + pitch_inv, k, B.c.T, dB_dz.c.T, check=True, include_knots=True + ) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(pitch_inv, extrapolate=False) @@ -797,7 +803,9 @@ def test_z2_before_extrema(self): ) dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[2] - z1, z2 = bounce_points(pitch_inv, k, B.c.T, dB_dz.c.T, check=True) + z1, z2 = bounce_points( + pitch_inv, k, B.c.T, dB_dz.c.T, check=True, include_knots=True + ) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(pitch_inv, extrapolate=False) @@ -819,9 +827,14 @@ def test_extrema_first_and_before_z1(self): dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[2] + 1e-13 z1, z2 = bounce_points( - pitch_inv, k[2:], B.c[:, 2:].T, dB_dz.c[:, 2:].T, check=True, plot=False + pitch_inv, + k[2:], + B.c[:, 2:].T, + dB_dz.c[:, 2:].T, + check=True, + start=k[2], + include_knots=True, ) - plot_ppoly(B, z1=z1, z2=z2, k=pitch_inv, start=k[2]) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size intersect = B.solve(pitch_inv, extrapolate=False) @@ -844,7 +857,9 @@ def test_extrema_first_and_before_z2(self): ) dB_dz = B.derivative() pitch_inv = B(dB_dz.roots(extrapolate=False))[1] - 1e-13 - z1, z2 = bounce_points(pitch_inv, k, B.c.T, dB_dz.c.T, check=True) + z1, z2 = bounce_points( + pitch_inv, k, B.c.T, dB_dz.c.T, check=True, include_knots=True + ) z1, z2 = TestBounce1DPoints.filter(z1, z2) assert z1.size and z2.size # Our routine correctly detects intersection, while scipy, jnp.root fails. @@ -937,7 +952,7 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): check=True, **kwargs, ) - result = bounce.integrate(pitch_inv, integrand, check=True) + result = bounce.integrate(pitch_inv, integrand, check=True, plot=True) assert np.count_nonzero(result) == 1 np.testing.assert_allclose(result.sum(), truth, rtol=1e-4) @@ -950,14 +965,10 @@ def _adaptive_elliptic(integrand, k): @staticmethod def _fixed_elliptic(integrand, k, deg): - # Can use this test to benchmark quadrature performance. - # Just k = np.atleast_1d(k) a = np.zeros_like(k) b = 2 * np.arcsin(k) - x, w = leggauss(deg) - w = w * grad_automorphism_sin(x) - x = automorphism_sin(x) + x, w = get_quadrature(leggauss(deg), (automorphism_sin, grad_automorphism_sin)) Z = bijection_from_disc(x, a[..., np.newaxis], b[..., np.newaxis]) k = k[..., np.newaxis] quad = np.dot(integrand(Z, k), w) * grad_bijection_from_disc(a, b) @@ -1118,7 +1129,10 @@ def test_bounce1d_checks(self): nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes[:, :2], "arz") print("(α, ρ):", nodes[m, l, 0]) - # 7. Plotting + # 7. Optionally check for correctness of bounce points + bounce.check_points(*bounce.points(pitch_inv), pitch_inv, plot=False) + + # 8. Plotting fig, ax = bounce.plot(m, l, pitch_inv[..., l], include_legend=False, show=False) return fig @@ -1343,6 +1357,8 @@ def test_binormal_drift_bounce1d(self): Lref=data["a"], check=True, ) + bounce.check_points(*bounce.points(pitch_inv), pitch_inv, plot=False) + f = Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift) drift_numerical_num = bounce.integrate( pitch_inv=pitch_inv, @@ -1389,8 +1405,8 @@ def _test_bounce_autodiff(bounce, integrand, **kwargs): If the AD tool works properly, then these operations should be assigned zero gradients while the gradients wrt parameters of our physics computations accumulate correctly. Less mature AD tools may have subtle bugs that cause - the gradients to not accumulate correctly. (There's more than a few - GitHub issues that JAX has fixed related to this in the past!) + the gradients to not accumulate correctly. (There's a few + GitHub issues that JAX has fixed related to this in the past.) This test first confirms the gradients computed by reverse mode AD matches the analytic approximation of the true gradient. Then we confirm that the From 8376d03e09e5755a7d219987e9b9295b85d8f824 Mon Sep 17 00:00:00 2001 From: unalmis Date: Sat, 31 Aug 2024 20:41:26 -0400 Subject: [PATCH 239/241] Make broadcasting simpler for end user --- desc/integrals/bounce_integral.py | 60 ++++------ desc/integrals/bounce_utils.py | 186 +++++++++++++----------------- desc/integrals/quad_utils.py | 2 +- tests/test_integrals.py | 50 ++++---- tests/test_quad_utils.py | 4 +- 5 files changed, 129 insertions(+), 173 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index f02dbc942f..afe7e8493c 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -6,10 +6,10 @@ from desc.backend import jnp from desc.integrals.bounce_utils import ( + _bounce_quadrature, _check_bounce_points, _set_default_plot_kwargs, bounce_points, - bounce_quadrature, get_pitch_inv, interp_to_argmin, plot_ppoly, @@ -21,7 +21,7 @@ grad_automorphism_sin, ) from desc.io import IOAble -from desc.utils import atleast_nd, errorif, setdefault, warnif +from desc.utils import errorif, setdefault, warnif class Bounce1D(IOAble): @@ -167,8 +167,8 @@ def __init__( self.B = jnp.moveaxis( CubicHermiteSpline( x=self._zeta, - y=self._data["|B|"], - dydx=self._data["|B|_z|r,a"], + y=self._data["|B|"].squeeze(axis=-2), + dydx=self._data["|B|_z|r,a"].squeeze(axis=-2), axis=-1, check=check, ).c, @@ -196,22 +196,18 @@ def reshape_data(grid, *arys): List of reshaped data which may be given to ``integrate``. """ - f = [grid.meshgrid_reshape(d, "arz") for d in arys] + f = [grid.meshgrid_reshape(d, "arz")[..., jnp.newaxis, :] for d in arys] return f def points(self, pitch_inv, num_well=None): """Compute bounce points. - Notes - ----- - Only the dimensions following L are required. The leading axes are batch axes. - Parameters ---------- pitch_inv : jnp.ndarray - Shape (P, M, L). + Shape (M, L, P). 1/λ values to compute the bounce points at each field line. 1/λ(α,ρ) is - specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + specified by ``pitch_inv[α,ρ]`` where in the latter the labels are interpreted as the indices that correspond to that field line. num_well : int or None Specify to return the first ``num_well`` pairs of bounce points for each @@ -227,7 +223,7 @@ def points(self, pitch_inv, num_well=None): Returns ------- z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, M, L, num_well). + Shape (M, L, P, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. @@ -245,14 +241,14 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): Parameters ---------- z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, M, L, num_well). + Shape (M, L, P, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. pitch_inv : jnp.ndarray - Shape (P, M, L). + Shape (M, L, P). 1/λ values to compute the bounce points at each field line. 1/λ(α,ρ) is - specified by ``pitch_inv[...,α,ρ]`` where in the latter the labels + specified by ``pitch_inv[α,ρ]`` where in the latter the labels are interpreted as the indices that correspond to that field line. plot : bool Whether to plot the field lines and bounce points of the given pitch angles. @@ -268,7 +264,7 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): return _check_bounce_points( z1=z1, z2=z2, - pitch_inv=atleast_nd(3, pitch_inv), + pitch_inv=pitch_inv, knots=self._zeta, B=self.B, plot=plot, @@ -291,16 +287,12 @@ def integrate( Computes the bounce integral ∫ f(ℓ) dℓ for every field line and pitch. - Notes - ----- - Only the dimensions following L are required. The leading axes are batch axes. - Parameters ---------- pitch_inv : jnp.ndarray - Shape (P, M, L). + Shape (M, L, P). 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by - ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted + ``pitch_inv[α,ρ]`` where in the latter the labels are interpreted as the indices that correspond to that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the @@ -309,13 +301,13 @@ def integrate( ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list[jnp.ndarray] - Shape (M, L, N). + Shape (M, L, 1, N). Real scalar-valued functions evaluated on the ``grid`` supplied to construct this object. These functions should be arguments to the callable ``integrand``. Use the method ``self.reshape_data`` to reshape the data into the expected shape. weight : jnp.ndarray - Shape (M, L, N). + Shape (M, L, 1, N). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` interpolated to the deepest point in that magnetic well. Use the method @@ -345,14 +337,13 @@ def integrate( Returns ------- result : jnp.ndarray - Shape (P, M, L, num_well). - Last axis enumerates the bounce integrals for a given pitch, field line, - and flux surface. + Shape (M, L, P, num_well). + Last axis enumerates the bounce integrals for a given field line, + flux surface, and pitch value. """ - pitch_inv = atleast_nd(3, pitch_inv) z1, z2 = self.points(pitch_inv, num_well) - result = bounce_quadrature( + result = _bounce_quadrature( x=self._x, w=self._w, z1=z1, @@ -369,7 +360,7 @@ def integrate( ) if weight is not None: result *= interp_to_argmin( - weight, + weight.squeeze(axis=-2), z1, z2, self._zeta, @@ -377,7 +368,6 @@ def integrate( self._dB_dz, method, ) - assert result.shape[0] == pitch_inv.shape[0] assert result.shape[-1] == setdefault(num_well, np.prod(self._dB_dz.shape[-2:])) return result @@ -403,16 +393,12 @@ def plot(self, m, l, pitch_inv=None, **kwargs): """ if pitch_inv is not None: - pitch_inv = jnp.atleast_1d(jnp.squeeze(pitch_inv)) errorif( - pitch_inv.ndim != 1, + pitch_inv.ndim > 1, msg=f"Got pitch_inv.ndim={pitch_inv.ndim}, but expected 1.", ) z1, z2 = bounce_points( - pitch_inv[:, jnp.newaxis, jnp.newaxis], - self._zeta, - self.B[m, l], - self._dB_dz[m, l], + pitch_inv, self._zeta, self.B[m, l], self._dB_dz[m, l] ) kwargs["z1"] = z1 kwargs["z2"] = z2 diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 2a8adfcdb1..304a443961 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -13,13 +13,11 @@ polyval_vec, ) from desc.integrals.quad_utils import ( + _composite_linspace, bijection_from_disc, - composite_linspace, grad_bijection_from_disc, ) from desc.utils import ( - atleast_2d_end, - atleast_3d_mid, atleast_nd, errorif, flatten_matrix, @@ -35,10 +33,8 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): Parameters ---------- min_B : jnp.ndarray - Shape (..., L). Minimum |B| value. max_B : jnp.ndarray - Shape (..., L). Maximum |B| value. num : int Number of values, not including endpoints. @@ -49,7 +45,7 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): Returns ------- pitch_inv : jnp.ndarray - Shape (num + 2, ..., L) with ndim > 2. + Shape (*min_B.shape, num + 2). 1/λ values. """ @@ -58,14 +54,13 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B # Samples should be uniformly spaced in |B| and not λ (GitHub issue #1228). - pitch_inv = atleast_3d_mid( - atleast_2d_end(composite_linspace(jnp.stack([min_B, max_B]), num)) - ) + pitch_inv = jnp.moveaxis(_composite_linspace(jnp.stack([min_B, max_B]), num), 0, -1) + assert pitch_inv.shape == (*min_B.shape, num + 2) return pitch_inv def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): - """Ensure inputs have compatible shape, and return them with full dimension. + """Ensure inputs have compatible shape. Parameters ---------- @@ -73,18 +68,18 @@ def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1]). + Shape (..., N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1] - 1). + Shape (..., N - 1, g.shape[-1] - 1). Polynomial coefficients of the spline of ∂g/∂ζ in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. pitch_inv : jnp.ndarray - Shape (P, M, L). - 1/λ values. 1/λ(α,ρ) is specified by ``pitch_inv[...,α,ρ]`` where in + Shape (..., P). + 1/λ values. 1/λ(α,ρ) is specified by ``pitch_inv[α,ρ]`` where in the latter the labels are interpreted as the indices that correspond to that field line. @@ -102,12 +97,12 @@ def _check_spline_shape(knots, g, dg_dz, pitch_inv=None): or g.shape != (*dg_dz.shape[:-1], dg_dz.shape[-1] + 1), msg=f"Invalid shape {g.shape} for spline and derivative {dg_dz.shape}.", ) - g = atleast_nd(4, g) - dg_dz = atleast_nd(4, dg_dz) + g, dg_dz = jnp.atleast_2d(g, dg_dz) if pitch_inv is not None: - pitch_inv = atleast_nd(3, pitch_inv) + pitch_inv = jnp.atleast_1d(pitch_inv) errorif( - pitch_inv.ndim > 3 or not is_broadcastable(pitch_inv.shape, g.shape[:2]), + pitch_inv.ndim > 3 + or not is_broadcastable(pitch_inv.shape[:-1], g.shape[:-2]), msg=f"Invalid shape {pitch_inv.shape} for pitch angles.", ) return g, dg_dz, pitch_inv @@ -118,27 +113,21 @@ def bounce_points( ): """Compute the bounce points given spline of |B| and pitch λ. - Notes - ----- - Only the dimensions following L are required. The leading axes are batch axes. - Parameters ---------- pitch_inv : jnp.ndarray - Shape (P, M, L). - 1/λ values to compute the bounce points. 1/λ(α,ρ) is specified by - ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted - as the indices that correspond to that field line. + Shape (..., P). + 1/λ values to compute the bounce points. knots : jnp.ndarray Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. B : jnp.ndarray - Shape (M, L, N - 1, B.shape[-1]). + Shape (..., N - 1, B.shape[-1]). Polynomial coefficients of the spline of |B| in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. dB_dz : jnp.ndarray - Shape (M, L, N - 1, B.shape[-1] - 1). + Shape (..., N - 1, B.shape[-1] - 1). Polynomial coefficients of the spline of (∂|B|/∂ζ)|(ρ,α) in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. @@ -162,7 +151,7 @@ def bounce_points( Returns ------- z1, z2 : (jnp.ndarray, jnp.ndarray) - Shape (P, M, L, num_well). + Shape (..., P, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. @@ -174,25 +163,23 @@ def bounce_points( """ B, dB_dz, pitch_inv = _check_spline_shape(knots, B, dB_dz, pitch_inv) intersect = polyroot_vec( - c=B, - k=pitch_inv[..., jnp.newaxis], + c=B[..., jnp.newaxis, :, :], # Add P axis + k=pitch_inv[..., jnp.newaxis], # Add N axis a_min=jnp.array([0.0]), a_max=jnp.diff(knots), sort=True, sentinel=-1.0, distinct=True, ) - assert intersect.shape == ( - pitch_inv.shape[0], - B.shape[0], - B.shape[1], + assert intersect.shape[-3:] == ( + pitch_inv.shape[-1], knots.size - 1, B.shape[-1] - 1, ) # Reshape so that last axis enumerates intersects of a pitch along a field line. dB_sign = flatten_matrix( - jnp.sign(polyval_vec(x=intersect, c=dB_dz[..., jnp.newaxis, :])) + jnp.sign(polyval_vec(x=intersect, c=dB_dz[..., jnp.newaxis, :, jnp.newaxis, :])) ) # Only consider intersect if it is within knots that bound that polynomial. is_intersect = flatten_matrix(intersect) >= 0 @@ -234,6 +221,11 @@ def _set_default_plot_kwargs(kwargs): def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): """Check that bounce points are computed correctly.""" + z1 = atleast_nd(4, z1) + z2 = atleast_nd(4, z2) + pitch_inv = atleast_nd(3, pitch_inv) + B = atleast_nd(4, B) + kwargs = _set_default_plot_kwargs(kwargs) plots = [] @@ -248,8 +240,8 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): eps = kwargs.pop("eps", jnp.finfo(jnp.array(1.0).dtype).eps * 10) for ml in np.ndindex(B.shape[:-2]): ppoly = PPoly(B[ml].T, knots) - for p in range(pitch_inv.shape[0]): - idx = (p, *ml) + for p in range(pitch_inv.shape[-1]): + idx = (*ml, p) B_midpoint = ppoly((z1[idx] + z2[idx]) / 2) err_3 = jnp.any(B_midpoint > pitch_inv[idx] + eps) if not (err_1[idx] or err_2[idx] or err_3): @@ -262,7 +254,7 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): z1=_z1, z2=_z2, k=pitch_inv[idx], - title=kwargs.pop("title") + f", (p,m,l)={idx}", + title=kwargs.pop("title") + f", (m,l,p)={idx}", **kwargs, ) @@ -276,20 +268,19 @@ def _check_bounce_points(z1, z2, pitch_inv, knots, B, plot=True, **kwargs): "bounce points is in hypograph(|B|). Use more knots.\n" ) if plot: - idx = (slice(None), *ml) plots.append( plot_ppoly( ppoly=ppoly, - z1=z1[idx], - z2=z2[idx], - k=pitch_inv[idx], + z1=z1[ml], + z2=z2[ml], + k=pitch_inv[ml], **kwargs, ) ) return plots -def bounce_quadrature( +def _bounce_quadrature( x, w, z1, @@ -315,15 +306,13 @@ def bounce_quadrature( Shape (w.size, ). Quadrature weights. z1, z2 : jnp.ndarray - Shape (P, M, L, num_well). + Shape (..., P, num_well). ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. pitch_inv : jnp.ndarray - Shape (P, M, L). - 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by - ``pitch_inv[...,α,ρ]`` where in the latter the labels are interpreted - as the indices that correspond to that field line. + Shape (..., P). + 1/λ values to compute the bounce integrals. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the @@ -331,11 +320,11 @@ def bounce_quadrature( ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. f : list[jnp.ndarray] - Shape (M, L, N). + Shape (..., 1, N). Real scalar-valued functions evaluated on the ``knots``. These functions should be arguments to the callable ``integrand``. data : dict[str, jnp.ndarray] - Shape (M, L, N). + Shape (..., 1, N). Required data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. Must include names in ``Bounce1D.required_names``. knots : jnp.ndarray @@ -357,14 +346,14 @@ def bounce_quadrature( Returns ------- result : jnp.ndarray - Shape (P, M, L, num_well). - Last axis enumerates the bounce integrals for a given pitch, field line, - and flux surface. + Shape (..., P, num_well). + Last axis enumerates the bounce integrals for a field line, + flux surface, and pitch. """ errorif(x.ndim != 1 or x.shape != w.shape) - errorif(z1.ndim != 4 or z1.shape != z2.shape) - errorif(pitch_inv.ndim != 3) + errorif(z1.ndim < 2 or z1.shape != z2.shape) + pitch_inv = jnp.atleast_1d(pitch_inv) if not isinstance(f, (list, tuple)): f = [f] if isinstance(f, (jnp.ndarray, np.ndarray)) else list(f) @@ -384,7 +373,7 @@ def bounce_quadrature( ) else: # TODO: Use batched vmap. - def loop(z): + def loop(z): # over num well axis z1, z2 = z # Need to return tuple because input was tuple; artifact of JAX map. return None, _interpolate_and_integrate( @@ -398,6 +387,7 @@ def loop(z): method=method, check=False, plot=False, + batch=True, ) result = jnp.moveaxis( @@ -407,7 +397,6 @@ def loop(z): ) result = result * grad_bijection_from_disc(z1, z2) - assert result.shape == z1.shape return result @@ -422,6 +411,7 @@ def _interpolate_and_integrate( method, check, plot, + batch=False, ): """Interpolate given functions to points ``Q`` and perform quadrature. @@ -431,11 +421,8 @@ def _interpolate_and_integrate( Shape (w.size, ). Quadrature weights. Q : jnp.ndarray - Shape (P, M, L, Q.shape[-2], w.size). + Shape (..., P, Q.shape[-2], w.size). Quadrature points in ζ coordinates. - data : dict[str, jnp.ndarray] - Data evaluated on ``grid`` and reshaped with ``Bounce1D.reshape_data``. - Must include names in ``Bounce1D.required_names``. Returns ------- @@ -444,12 +431,13 @@ def _interpolate_and_integrate( Quadrature result. """ - assert w.ndim == 1 - assert 3 < Q.ndim < 6 and Q.shape[0] == pitch_inv.shape[0] and Q.shape[-1] == w.size + assert w.ndim == 1 and Q.shape[-1] == w.size + assert Q.shape[-3 + batch] == pitch_inv.shape[-1] assert data["|B|"].shape[-1] == knots.size shape = Q.shape - Q = Q.reshape(*Q.shape[:3], -1) + if not batch: + Q = flatten_matrix(Q) b_sup_z = interp1d_Hermite_vec( Q, knots, @@ -484,7 +472,7 @@ def _check_interp(shape, Q, f, b_sup_z, B, result, plot): Parameters ---------- shape : tuple - (P, M, L, Q.shape[-2], w.size). + (..., P, Q.shape[-2], w.size). Q : jnp.ndarray Quadrature points in ζ coordinates. f : list[jnp.ndarray] @@ -528,7 +516,7 @@ def _check_interp(shape, Q, f, b_sup_z, B, result, plot): def _plot_check_interp(Q, V, name=""): - """Plot V[λ, α, ρ, (ζ₁, ζ₂)](Q).""" + """Plot V[..., λ, (ζ₁, ζ₂)](Q).""" for idx in np.ndindex(Q.shape[:3]): marked = jnp.nonzero(jnp.any(Q[idx] != 0.0, axis=-1))[0] if marked.size == 0: @@ -536,7 +524,7 @@ def _plot_check_interp(Q, V, name=""): fig, ax = plt.subplots() ax.set_xlabel(r"$\zeta$") ax.set_ylabel(name) - ax.set_title(f"Interpolation of {name} to quadrature points, (p,m,l)={idx}") + ax.set_title(f"Interpolation of {name} to quadrature points, (m,l,p)={idx}") for i in marked: ax.plot(Q[(*idx, i)], V[(*idx, i)], marker="o") fig.text(0.01, 0.01, "Each color specifies a particular integral.") @@ -547,22 +535,18 @@ def _plot_check_interp(Q, V, name=""): def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): """Return extrema (z*, g(z*)). - Notes - ----- - Only the dimensions following L are required. The leading axes are batch axes. - Parameters ---------- knots : jnp.ndarray Shape (N, ). ζ coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1]). + Shape (..., N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1] - 1). + Shape (..., N - 1, g.shape[-1] - 1). Polynomial coefficients of the spline of ∂g/∂z in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. @@ -572,7 +556,7 @@ def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): Returns ------- ext, g_ext : jnp.ndarray - Shape (M, L, (N - 1) * (g.shape[-1] - 2)). + Shape (..., (N - 1) * (g.shape[-1] - 2)). First array enumerates z*. Second array enumerates g(z*) Sorting order of extrema is arbitrary. @@ -584,15 +568,15 @@ def _get_extrema(knots, g, dg_dz, sentinel=jnp.nan): g_ext = flatten_matrix(polyval_vec(x=ext, c=g[..., jnp.newaxis, :])) # Transform out of local power basis expansion. ext = flatten_matrix(ext + knots[:-1, jnp.newaxis]) + assert ext.shape == g_ext.shape and ext.shape[-1] == g.shape[-2] * (g.shape[-1] - 2) return ext, g_ext def _where_for_argmin(z1, z2, ext, g_ext, upper_sentinel): - assert z1.shape[1:3] == z2.shape[1:3] == ext.shape[:2] == g_ext.shape[:2] return jnp.where( - (z1[..., jnp.newaxis] < ext[:, :, jnp.newaxis]) - & (ext[:, :, jnp.newaxis] < z2[..., jnp.newaxis]), - g_ext[:, :, jnp.newaxis], + (z1[..., jnp.newaxis] < ext[..., jnp.newaxis, jnp.newaxis, :]) + & (ext[..., jnp.newaxis, jnp.newaxis, :] < z2[..., jnp.newaxis]), + g_ext[..., jnp.newaxis, jnp.newaxis, :], upper_sentinel, ) @@ -604,28 +588,24 @@ def interp_to_argmin( Let E = {ζ ∣ ζ₁ < ζ < ζ₂} and A = argmin_E g(ζ). Returns mean_A h(ζ). - Notes - ----- - Only the dimensions following L are required. The leading axes are batch axes. - Parameters ---------- h : jnp.ndarray - Shape (M, L, N). + Shape (..., N). Values evaluated on ``knots`` to interpolate. z1, z2 : jnp.ndarray - Shape (P, M, L, num_well). + Shape (..., P, W). Boundaries to detect argmin between. knots : jnp.ndarray Shape (N, ). z coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1]). + Shape (..., N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1] - 1). + Shape (..., N - 1, g.shape[-1] - 1). Polynomial coefficients of the spline of ∂g/∂z in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. @@ -651,12 +631,10 @@ def interp_to_argmin( Returns ------- h : jnp.ndarray - Shape (P, M, L, num_well). - mean_A h(ζ) + Shape (..., P, W). """ - z1 = atleast_nd(4, z1) - z2 = atleast_nd(4, z2) + assert z1.ndim == z2.ndim >= 2 and z1.shape == z2.shape ext, g_ext = _get_extrema(knots, g, dg_dz, sentinel=0) # Our softargmax(x) does the proper shift to compute softargmax(x - max(x)), # but it's still not a good idea to compute over a large length scale, so we @@ -667,9 +645,9 @@ def interp_to_argmin( ) h = jnp.linalg.vecdot( argmin, - interp1d_vec(ext, knots, h, method=method)[:, :, jnp.newaxis], + interp1d_vec(ext, knots, h, method=method)[..., jnp.newaxis, jnp.newaxis, :], ) - assert h.shape == z1.shape or h.shape == z2.shape + assert h.shape == z1.shape return h @@ -684,28 +662,24 @@ def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): Accomplishes the same task, but handles the case of non-unique global minima more correctly. It is also more efficient if P >> 1. - Notes - ----- - Only the dimensions following L are required. The leading axes are batch axes. - Parameters ---------- h : jnp.ndarray - Shape (M, L, N). + Shape (..., N). Values evaluated on ``knots`` to interpolate. z1, z2 : jnp.ndarray - Shape (P, M, L, num_well). + Shape (..., P, W). Boundaries to detect argmin between. knots : jnp.ndarray Shape (N, ). z coordinates of spline knots. Must be strictly increasing. g : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1]). + Shape (..., N - 1, g.shape[-1]). Polynomial coefficients of the spline of g in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. dg_dz : jnp.ndarray - Shape (M, L, N - 1, g.shape[-1] - 1). + Shape (..., N - 1, g.shape[-1] - 1). Polynomial coefficients of the spline of ∂g/∂z in local power basis. Last axis enumerates the coefficients of power series. Second to last axis enumerates the polynomials that compose a particular spline. @@ -717,12 +691,10 @@ def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): Returns ------- h : jnp.ndarray - Shape (P, M, L, num_well). - h(A) + Shape (..., P, W). """ - z1 = atleast_nd(4, z1) - z2 = atleast_nd(4, z2) + assert z1.ndim == z2.ndim >= 2 and z1.shape == z2.shape ext, g_ext = _get_extrema(knots, g, dg_dz, sentinel=0) # We can use the non-differentiable max because we actually want the gradients # to accumulate through only the minimum since we are differentiating how our @@ -735,10 +707,10 @@ def interp_to_argmin_hard(h, z1, z2, knots, g, dg_dz, method="cubic"): h = interp1d_vec( jnp.take_along_axis(ext[jnp.newaxis], argmin, axis=-1), knots, - h, + h[..., jnp.newaxis, :], method=method, ) - assert h.shape == z1.shape or h.shape == z2.shape + assert h.shape == z1.shape, h.shape return h diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index b7bfbb6a6c..2a00801d8c 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -220,7 +220,7 @@ def get_quadrature(quad, automorphism): return x, w -def composite_linspace(x, num): +def _composite_linspace(x, num): """Returns linearly spaced values between every pair of values in ``x``. Parameters diff --git a/tests/test_integrals.py b/tests/test_integrals.py index a368987853..10dbdb96e3 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -1117,14 +1117,12 @@ def test_bounce1d_checks(self): # Sum all bounce averages across a particular field line, for every field line. result = avg.sum(axis=-1) # Group the result by pitch and flux surface. - result = result.reshape(pitch_inv.shape[0], alpha.size, rho.size) + result = result.reshape(alpha.size, rho.size, pitch_inv.shape[-1]) # The result stored at - p, m, l = 3, 0, 1 - print("Result(λ, α, ρ):", result[p, m, l]) + m, l, p = 0, 1, 3 + print("Result(α, ρ, λ):", result[m, l, p]) # corresponds to the 1/λ value - print( - "1/λ(α, ρ):", pitch_inv[p, m % pitch_inv.shape[1], l % pitch_inv.shape[-1]] - ) + print("1/λ(α, ρ):", pitch_inv[l, p]) # for the Clebsch-type field line coordinates nodes = grid.source_grid.meshgrid_reshape(grid.source_grid.nodes[:, :2], "arz") print("(α, ρ):", nodes[m, l, 0]) @@ -1133,7 +1131,7 @@ def test_bounce1d_checks(self): bounce.check_points(*bounce.points(pitch_inv), pitch_inv, plot=False) # 8. Plotting - fig, ax = bounce.plot(m, l, pitch_inv[..., l], include_legend=False, show=False) + fig, ax = bounce.plot(m, l, pitch_inv[l], include_legend=False, show=False) return fig @pytest.mark.unit @@ -1166,20 +1164,20 @@ def dg_dz(z): "|B|_z|r,a": dg_dz(zeta), }, ) - np.testing.assert_allclose(bounce._zeta, zeta) + z1 = np.array(0, ndmin=4) + z2 = np.array(2 * np.pi, ndmin=4) argmin = 5.61719 - np.testing.assert_allclose( - h(argmin), - func( - h=h(zeta), - z1=np.array(0, ndmin=3), - z2=np.array(2 * np.pi, ndmin=3), - knots=zeta, - g=bounce.B, - dg_dz=bounce._dB_dz, - ), - rtol=1e-3, - ) + h_min = h(argmin) + result = func( + h=h(zeta), + z1=z1, + z2=z2, + knots=zeta, + g=bounce.B, + dg_dz=bounce._dB_dz, + ) + assert result.shape == z1.shape + np.testing.assert_allclose(h_min, result, rtol=1e-3) @staticmethod def drift_analytic(data): @@ -1262,7 +1260,7 @@ def drift_analytic(data): # Exclude singularity not captured by analytic approximation for pitch near # the maximum |B|. (This is captured by the numerical integration). - pitch_inv = get_pitch_inv(np.min(B), np.max(B), 100).squeeze()[:-1] + pitch_inv = get_pitch_inv(np.min(B), np.max(B), 100)[:-1] k2 = 0.5 * ((1 - B0 / pitch_inv) / (epsilon * B0 / pitch_inv) + 1) I_0, I_1, I_2, I_3, I_4, I_5, I_6, I_7 = ( TestBounce1DQuadrature.elliptic_incomplete(k2) @@ -1283,7 +1281,7 @@ def drift_analytic(data): ) / G0 drift_analytic_den = I_0 / G0 drift_analytic = drift_analytic_num / drift_analytic_den - return drift_analytic, cvdrift, gbdrift, pitch_inv.reshape(-1, 1, 1) + return drift_analytic, cvdrift, gbdrift, pitch_inv @staticmethod def drift_num_integrand(cvdrift, gbdrift, B, pitch): @@ -1371,7 +1369,7 @@ def test_binormal_drift_bounce1d(self): pitch_inv=pitch_inv, integrand=TestBounce1D.drift_den_integrand, num_well=1, - weight=np.ones(zeta.size), + weight=np.ones(zeta.size)[np.newaxis], check=True, ) drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) @@ -1385,12 +1383,12 @@ def test_binormal_drift_bounce1d(self): bounce, TestBounce1D.drift_num_integrand, f=f, - weight=np.ones(zeta.size), + weight=np.ones(zeta.size)[np.newaxis], ) fig, ax = plt.subplots() - ax.plot(pitch_inv.squeeze(), drift_analytic) - ax.plot(pitch_inv.squeeze(), drift_numerical) + ax.plot(pitch_inv, drift_analytic) + ax.plot(pitch_inv, drift_numerical) return fig @staticmethod diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index 5a7c3d00e7..ce9408f12a 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -6,11 +6,11 @@ from desc.backend import jnp from desc.integrals.quad_utils import ( + _composite_linspace, automorphism_arcsin, automorphism_sin, bijection_from_disc, bijection_to_disc, - composite_linspace, grad_automorphism_arcsin, grad_automorphism_sin, grad_bijection_from_disc, @@ -26,7 +26,7 @@ def test_composite_linspace(): B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) breaks = np.linspace(B_min_tz, B_max_tz, num=5) - b = composite_linspace(breaks, num=3) + b = _composite_linspace(breaks, num=3) for i in range(breaks.shape[0]): for j in range(breaks.shape[1]): assert only1(np.isclose(breaks[i, j], b[:, j]).tolist()) From e39dc14bc06ac541e132ecde5993b2a4f8edba20 Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 2 Sep 2024 18:39:11 -0400 Subject: [PATCH 240/241] Pull down changes from ripple branch --- desc/integrals/bounce_integral.py | 83 +++++++++++++++++++------------ desc/integrals/bounce_utils.py | 29 ++++------- tests/test_integrals.py | 18 +++---- 3 files changed, 71 insertions(+), 59 deletions(-) diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index afe7e8493c..31a8ab9d91 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -1,6 +1,5 @@ """Methods for computing bounce integrals (singular or otherwise).""" -import numpy as np from interpax import CubicHermiteSpline, PPoly from orthax.legendre import leggauss @@ -108,6 +107,8 @@ def __init__( automorphism=(automorphism_sin, grad_automorphism_sin), Bref=1.0, Lref=1.0, + *, + is_reshaped=False, check=False, **kwargs, ): @@ -137,6 +138,13 @@ def __init__( Optional. Reference magnetic field strength for normalization. Lref : float Optional. Reference length scale for normalization. + is_reshaped : bool + Whether the arrays in ``data`` are already reshaped to the expected form of + shape (..., N) or (..., L, N) or (M, L, N). This option can be used to + iteratively compute bounce integrals one field line or one flux surface + at a time, respectively, potentially reducing memory usage. To do so, + set to true and provide only those axes of the reshaped data. + Default is false. check : bool Flag for debugging. Must be false for JAX transformations. @@ -159,7 +167,11 @@ def __init__( "|B|": data["|B|"] / Bref, "|B|_z|r,a": data["|B|_z|r,a"] / Bref, # This is already the correct sign. } - self._data = dict(zip(data.keys(), Bounce1D.reshape_data(grid, *data.values()))) + self._data = ( + data + if is_reshaped + else dict(zip(data.keys(), Bounce1D.reshape_data(grid, *data.values()))) + ) self._x, self._w = get_quadrature(quad, automorphism) # Compute local splines. @@ -167,8 +179,8 @@ def __init__( self.B = jnp.moveaxis( CubicHermiteSpline( x=self._zeta, - y=self._data["|B|"].squeeze(axis=-2), - dydx=self._data["|B|_z|r,a"].squeeze(axis=-2), + y=self._data["|B|"], + dydx=self._data["|B|_z|r,a"], axis=-1, check=check, ).c, @@ -176,8 +188,10 @@ def __init__( destination=(-1, -2), ) self._dB_dz = polyder_vec(self.B) - assert self.B.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 4) - assert self._dB_dz.shape == (grid.num_alpha, grid.num_rho, grid.num_zeta - 1, 3) + + # Add axis here instead of in ``_bounce_quadrature``. + for name in self._data: + self._data[name] = self._data[name][..., jnp.newaxis, :] @staticmethod def reshape_data(grid, *arys): @@ -192,14 +206,15 @@ def reshape_data(grid, *arys): Returns ------- - f : list[jnp.ndarray] - List of reshaped data which may be given to ``integrate``. + f : jnp.ndarray + Shape (M, L, N). + Reshaped data which may be given to ``integrate``. """ - f = [grid.meshgrid_reshape(d, "arz")[..., jnp.newaxis, :] for d in arys] - return f + f = [grid.meshgrid_reshape(d, "arz") for d in arys] + return f if len(f) > 1 else f[0] - def points(self, pitch_inv, num_well=None): + def points(self, pitch_inv, *, num_well=None): """Compute bounce points. Parameters @@ -235,7 +250,7 @@ def points(self, pitch_inv, num_well=None): """ return bounce_points(pitch_inv, self._zeta, self.B, self._dB_dz, num_well) - def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): + def check_points(self, z1, z2, pitch_inv, *, plot=True, **kwargs): """Check that bounce points are computed correctly. Parameters @@ -273,10 +288,11 @@ def check_points(self, z1, z2, pitch_inv, plot=True, **kwargs): def integrate( self, - pitch_inv, integrand, + pitch_inv, f=None, weight=None, + *, num_well=None, method="cubic", batch=True, @@ -289,25 +305,25 @@ def integrate( Parameters ---------- - pitch_inv : jnp.ndarray - Shape (M, L, P). - 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by - ``pitch_inv[α,ρ]`` where in the latter the labels are interpreted - as the indices that correspond to that field line. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the arrays in ``f`` as arguments as well as the additional keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. - f : list[jnp.ndarray] - Shape (M, L, 1, N). + pitch_inv : jnp.ndarray + Shape (M, L, P). + 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by + ``pitch_inv[α,ρ]`` where in the latter the labels are interpreted + as the indices that correspond to that field line. + f : list[jnp.ndarray] or jnp.ndarray + Shape (M, L, N). Real scalar-valued functions evaluated on the ``grid`` supplied to construct this object. These functions should be arguments to the callable ``integrand``. Use the method ``self.reshape_data`` to reshape the data into the expected shape. weight : jnp.ndarray - Shape (M, L, 1, N). + Shape (M, L, N). If supplied, the bounce integral labeled by well j is weighted such that the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` interpolated to the deepest point in that magnetic well. Use the method @@ -342,14 +358,14 @@ def integrate( flux surface, and pitch value. """ - z1, z2 = self.points(pitch_inv, num_well) + z1, z2 = self.points(pitch_inv, num_well=num_well) result = _bounce_quadrature( x=self._x, w=self._w, z1=z1, z2=z2, - pitch_inv=pitch_inv, integrand=integrand, + pitch_inv=pitch_inv, f=setdefault(f, []), data=self._data, knots=self._zeta, @@ -360,7 +376,7 @@ def integrate( ) if weight is not None: result *= interp_to_argmin( - weight.squeeze(axis=-2), + weight, z1, z2, self._zeta, @@ -368,10 +384,10 @@ def integrate( self._dB_dz, method, ) - assert result.shape[-1] == setdefault(num_well, np.prod(self._dB_dz.shape[-2:])) + assert result.shape == z1.shape return result - def plot(self, m, l, pitch_inv=None, **kwargs): + def plot(self, m, l, pitch_inv=None, /, **kwargs): """Plot the field line and bounce points of the given pitch angles. Parameters @@ -392,18 +408,21 @@ def plot(self, m, l, pitch_inv=None, **kwargs): Matplotlib (fig, ax) tuple. """ + B, dB_dz = self.B, self._dB_dz + if B.ndim == 4: + B = B[m, l] + dB_dz = dB_dz[m, l] + elif B.ndim == 3: + B = B[l] + dB_dz = dB_dz[l] if pitch_inv is not None: errorif( pitch_inv.ndim > 1, msg=f"Got pitch_inv.ndim={pitch_inv.ndim}, but expected 1.", ) - z1, z2 = bounce_points( - pitch_inv, self._zeta, self.B[m, l], self._dB_dz[m, l] - ) + z1, z2 = bounce_points(pitch_inv, self._zeta, B, dB_dz) kwargs["z1"] = z1 kwargs["z2"] = z2 kwargs["k"] = pitch_inv - fig, ax = plot_ppoly( - PPoly(self.B[m, l].T, self._zeta), **_set_default_plot_kwargs(kwargs) - ) + fig, ax = plot_ppoly(PPoly(B.T, self._zeta), **_set_default_plot_kwargs(kwargs)) return fig, ax diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index 304a443961..e6fe1f9657 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -285,8 +285,8 @@ def _bounce_quadrature( w, z1, z2, - pitch_inv, integrand, + pitch_inv, f, data, knots, @@ -310,17 +310,17 @@ def _bounce_quadrature( ζ coordinates of bounce points. The points are ordered and grouped such that the straight line path between ``z1`` and ``z2`` resides in the epigraph of |B|. - pitch_inv : jnp.ndarray - Shape (..., P). - 1/λ values to compute the bounce integrals. integrand : callable The composition operator on the set of functions in ``f`` that maps the functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the arrays in ``f`` as arguments as well as the additional keyword arguments: ``B`` and ``pitch``. A quadrature will be performed to approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + pitch_inv : jnp.ndarray + Shape (..., P). + 1/λ values to compute the bounce integrals. f : list[jnp.ndarray] - Shape (..., 1, N). + Shape (..., N). Real scalar-valued functions evaluated on the ``knots``. These functions should be arguments to the callable ``integrand``. data : dict[str, jnp.ndarray] @@ -396,8 +396,7 @@ def loop(z): # over num well axis destination=-1, ) - result = result * grad_bijection_from_disc(z1, z2) - return result + return result * grad_bijection_from_disc(z1, z2) def _interpolate_and_integrate( @@ -448,17 +447,11 @@ def _interpolate_and_integrate( B = interp1d_Hermite_vec(Q, knots, data["|B|"], data["|B|_z|r,a"]) # Spline each function separately so that operations in the integrand # that do not preserve smoothness can be captured. - f = [interp1d_vec(Q, knots, f_i, method=method) for f_i in f] - result = jnp.dot( - ( - integrand( - *f, - B=B, - pitch=1 / pitch_inv[..., jnp.newaxis], - ) - / b_sup_z - ).reshape(shape), - w, + f = [interp1d_vec(Q, knots, f_i[..., jnp.newaxis, :], method=method) for f_i in f] + result = ( + (integrand(*f, B=B, pitch=1 / pitch_inv[..., jnp.newaxis]) / b_sup_z) + .reshape(shape) + .dot(w) ) if check: _check_interp(shape, Q, f, b_sup_z, B, result, plot) diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 10dbdb96e3..23f7539dd3 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -952,7 +952,7 @@ def test_bounce_quadrature(self, is_strong, quad, automorphism): check=True, **kwargs, ) - result = bounce.integrate(pitch_inv, integrand, check=True, plot=True) + result = bounce.integrate(integrand, pitch_inv, check=True, plot=True) assert np.count_nonzero(result) == 1 np.testing.assert_allclose(result.sum(), truth, rtol=1e-4) @@ -1099,14 +1099,14 @@ def test_bounce1d_checks(self): grid.compress(data["min_tz |B|"]), grid.compress(data["max_tz |B|"]), 10 ) num = bounce.integrate( - pitch_inv, integrand=TestBounce1D._example_numerator, + pitch_inv=pitch_inv, f=Bounce1D.reshape_data(grid.source_grid, data["g_zz"]), check=True, ) den = bounce.integrate( - pitch_inv, integrand=TestBounce1D._example_denominator, + pitch_inv=pitch_inv, check=True, batch=False, ) @@ -1359,17 +1359,17 @@ def test_binormal_drift_bounce1d(self): f = Bounce1D.reshape_data(grid.source_grid, cvdrift, gbdrift) drift_numerical_num = bounce.integrate( - pitch_inv=pitch_inv, integrand=TestBounce1D.drift_num_integrand, + pitch_inv=pitch_inv, f=f, num_well=1, check=True, ) drift_numerical_den = bounce.integrate( - pitch_inv=pitch_inv, integrand=TestBounce1D.drift_den_integrand, + pitch_inv=pitch_inv, num_well=1, - weight=np.ones(zeta.size)[np.newaxis], + weight=np.ones(zeta.size), check=True, ) drift_numerical = np.squeeze(drift_numerical_num / drift_numerical_den) @@ -1383,7 +1383,7 @@ def test_binormal_drift_bounce1d(self): bounce, TestBounce1D.drift_num_integrand, f=f, - weight=np.ones(zeta.size)[np.newaxis], + weight=np.ones(zeta.size), ) fig, ax = plt.subplots() @@ -1440,11 +1440,11 @@ def integrand_grad(*args, **kwargs2): return grad_fun(*args, *kwargs2.values()) def fun1(pitch): - return bounce.integrate(1 / pitch, integrand, check=False, **kwargs).sum() + return bounce.integrate(integrand, 1 / pitch, check=False, **kwargs).sum() def fun2(pitch): return bounce.integrate( - 1 / pitch, integrand_grad, check=True, **kwargs + integrand_grad, 1 / pitch, check=True, **kwargs ).sum() pitch = 1.0 From 917ad1ce9da5ac8461cd534f177f3f244ce3b79f Mon Sep 17 00:00:00 2001 From: unalmis Date: Mon, 2 Sep 2024 23:41:10 -0400 Subject: [PATCH 241/241] Tweak documentation as requested in pull request review --- desc/grid.py | 3 ++- desc/integrals/basis.py | 5 +++++ desc/integrals/bounce_integral.py | 30 +++++++++++++++--------------- desc/integrals/bounce_utils.py | 14 +++++++------- desc/integrals/quad_utils.py | 4 ++-- desc/utils.py | 12 ------------ tests/test_integrals.py | 8 ++++++-- tests/test_quad_utils.py | 4 ++-- 8 files changed, 39 insertions(+), 41 deletions(-) diff --git a/desc/grid.py b/desc/grid.py index 2eb22a6c5c..6a8ab78fe3 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -638,7 +638,8 @@ def meshgrid_reshape(self, x, order): vec = True shape += (-1,) x = x.reshape(shape, order="F") - x = jnp.swapaxes(x, 1, 0) # now shape rtz/raz etc + # swap to change shape from trz/arz to rtz/raz etc. + x = jnp.swapaxes(x, 1, 0) newax = tuple(self.coordinates.index(c) for c in order) if vec: newax += (3,) diff --git a/desc/integrals/basis.py b/desc/integrals/basis.py index 0fb68ece3f..91a31edf60 100644 --- a/desc/integrals/basis.py +++ b/desc/integrals/basis.py @@ -27,6 +27,11 @@ def _in_epigraph_and(is_intersect, df_dy_sign, /): Boolean array indicating whether element is an intersect and satisfies the stated condition. + Examples + -------- + See ``desc/integrals/bounce_utils.py::bounce_points``. + This is used there to ensure the domains of integration are magnetic wells. + """ # The pairs ``y1`` and ``y2`` are boundaries of an integral only if ``y1 <= y2``. # For the integrals to be over wells, it is required that the first intersect diff --git a/desc/integrals/bounce_integral.py b/desc/integrals/bounce_integral.py index 31a8ab9d91..dff4db396c 100644 --- a/desc/integrals/bounce_integral.py +++ b/desc/integrals/bounce_integral.py @@ -26,12 +26,12 @@ class Bounce1D(IOAble): """Computes bounce integrals using one-dimensional local spline methods. - The bounce integral is defined as ∫ f(ℓ) dℓ, where + The bounce integral is defined as ∫ f(λ, ℓ) dℓ, where dℓ parameterizes the distance along the field line in meters, - f(ℓ) is the quantity to integrate along the field line, - and the boundaries of the integral are bounce points ζ₁, ζ₂ s.t. λ|B|(ζᵢ) = 1, - where λ is a constant proportional to the magnetic moment over energy - and |B| is the norm of the magnetic field. + f(λ, ℓ) is the quantity to integrate along the field line, + and the boundaries of the integral are bounce points ℓ₁, ℓ₂ s.t. λ|B|(ℓᵢ) = 1, + where λ is a constant defining the integral proportional to the magnetic moment + over energy and |B| is the norm of the magnetic field. For a particle with fixed λ, bounce points are defined to be the location on the field line such that the particle's velocity parallel to the magnetic field is zero. @@ -299,18 +299,18 @@ def integrate( check=False, plot=False, ): - """Bounce integrate ∫ f(ℓ) dℓ. + """Bounce integrate ∫ f(λ, ℓ) dℓ. - Computes the bounce integral ∫ f(ℓ) dℓ for every field line and pitch. + Computes the bounce integral ∫ f(λ, ℓ) dℓ for every field line and pitch. Parameters ---------- integrand : callable The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + functions in ``f`` to the integrand f(λ, ℓ) in ∫ f(λ, ℓ) dℓ. It should + accept the arrays in ``f`` as arguments as well as the additional keyword + arguments: ``B`` and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. pitch_inv : jnp.ndarray Shape (M, L, P). 1/λ values to compute the bounce integrals. 1/λ(α,ρ) is specified by @@ -325,7 +325,7 @@ def integrate( weight : jnp.ndarray Shape (M, L, N). If supplied, the bounce integral labeled by well j is weighted such that - the returned value is w(j) ∫ f(ℓ) dℓ, where w(j) is ``weight`` + the returned value is w(j) ∫ f(λ, ℓ) dℓ, where w(j) is ``weight`` interpolated to the deepest point in that magnetic well. Use the method ``self.reshape_data`` to reshape the data into the expected shape. num_well : int or None @@ -410,9 +410,9 @@ def plot(self, m, l, pitch_inv=None, /, **kwargs): """ B, dB_dz = self.B, self._dB_dz if B.ndim == 4: - B = B[m, l] - dB_dz = dB_dz[m, l] - elif B.ndim == 3: + B = B[m] + dB_dz = dB_dz[m] + if B.ndim == 3: B = B[l] dB_dz = dB_dz[l] if pitch_inv is not None: diff --git a/desc/integrals/bounce_utils.py b/desc/integrals/bounce_utils.py index e6fe1f9657..c63477c0cc 100644 --- a/desc/integrals/bounce_utils.py +++ b/desc/integrals/bounce_utils.py @@ -13,8 +13,8 @@ polyval_vec, ) from desc.integrals.quad_utils import ( - _composite_linspace, bijection_from_disc, + composite_linspace, grad_bijection_from_disc, ) from desc.utils import ( @@ -54,7 +54,7 @@ def get_pitch_inv(min_B, max_B, num, relative_shift=1e-6): min_B = (1 + relative_shift) * min_B max_B = (1 - relative_shift) * max_B # Samples should be uniformly spaced in |B| and not λ (GitHub issue #1228). - pitch_inv = jnp.moveaxis(_composite_linspace(jnp.stack([min_B, max_B]), num), 0, -1) + pitch_inv = jnp.moveaxis(composite_linspace(jnp.stack([min_B, max_B]), num), 0, -1) assert pitch_inv.shape == (*min_B.shape, num + 2) return pitch_inv @@ -295,7 +295,7 @@ def _bounce_quadrature( check=False, plot=False, ): - """Bounce integrate ∫ f(ℓ) dℓ. + """Bounce integrate ∫ f(λ, ℓ) dℓ. Parameters ---------- @@ -312,10 +312,10 @@ def _bounce_quadrature( epigraph of |B|. integrand : callable The composition operator on the set of functions in ``f`` that maps the - functions in ``f`` to the integrand f(ℓ) in ∫ f(ℓ) dℓ. It should accept the - arrays in ``f`` as arguments as well as the additional keyword arguments: - ``B`` and ``pitch``. A quadrature will be performed to approximate the - bounce integral of ``integrand(*f,B=B,pitch=pitch)``. + functions in ``f`` to the integrand f(λ, ℓ) in ∫ f(λ, ℓ) dℓ. It should + accept the arrays in ``f`` as arguments as well as the additional keyword + arguments: ``B`` and ``pitch``. A quadrature will be performed to + approximate the bounce integral of ``integrand(*f,B=B,pitch=pitch)``. pitch_inv : jnp.ndarray Shape (..., P). 1/λ values to compute the bounce integrals. diff --git a/desc/integrals/quad_utils.py b/desc/integrals/quad_utils.py index 2a00801d8c..692149e84e 100644 --- a/desc/integrals/quad_utils.py +++ b/desc/integrals/quad_utils.py @@ -19,7 +19,7 @@ def bijection_from_disc(x, a, b): def grad_bijection_from_disc(a, b): - """Gradient of affine bijection from disc.""" + """Gradient wrt ``x`` of ``bijection_from_disc``.""" dy_dx = 0.5 * (b - a) return dy_dx @@ -220,7 +220,7 @@ def get_quadrature(quad, automorphism): return x, w -def _composite_linspace(x, num): +def composite_linspace(x, num): """Returns linearly spaced values between every pair of values in ``x``. Parameters diff --git a/desc/utils.py b/desc/utils.py index 6ead7a5078..41b32677ea 100644 --- a/desc/utils.py +++ b/desc/utils.py @@ -742,16 +742,4 @@ def atleast_nd(ndmin, ary): return jnp.array(ary, ndmin=ndmin) if jnp.ndim(ary) < ndmin else ary -def atleast_3d_mid(ary): - """Like np.atleast_3d but if adds dim at axis 1 for 2d arrays.""" - ary = jnp.atleast_2d(ary) - return ary[:, jnp.newaxis] if ary.ndim == 2 else ary - - -def atleast_2d_end(ary): - """Like np.atleast_2d but if adds dim at axis 1 for 1d arrays.""" - ary = jnp.atleast_1d(ary) - return ary[:, jnp.newaxis] if ary.ndim == 1 else ary - - PRINT_WIDTH = 60 # current longest name is BootstrapRedlConsistency with pre-text diff --git a/tests/test_integrals.py b/tests/test_integrals.py index 23f7539dd3..26798f3fbc 100644 --- a/tests/test_integrals.py +++ b/tests/test_integrals.py @@ -921,7 +921,7 @@ class TestBounce1DQuadrature: ], ) def test_bounce_quadrature(self, is_strong, quad, automorphism): - """Test bounce integral matches singular elliptic integrals.""" + """Test quadrature matches singular (strong and weak) elliptic integrals.""" p = 1e-4 m = 1 - p # Some prime number that doesn't appear anywhere in calculation. @@ -971,9 +971,12 @@ def _fixed_elliptic(integrand, k, deg): x, w = get_quadrature(leggauss(deg), (automorphism_sin, grad_automorphism_sin)) Z = bijection_from_disc(x, a[..., np.newaxis], b[..., np.newaxis]) k = k[..., np.newaxis] - quad = np.dot(integrand(Z, k), w) * grad_bijection_from_disc(a, b) + quad = integrand(Z, k).dot(w) * grad_bijection_from_disc(a, b) return quad + # TODO: add the analytical test that converts incomplete elliptic integrals to + # complete ones using the Reciprocal Modulus transformation + # https://dlmf.nist.gov/19.7#E4. @staticmethod def elliptic_incomplete(k2): """Calculate elliptic integrals for bounce averaged binormal drift. @@ -1179,6 +1182,7 @@ def dg_dz(z): assert result.shape == z1.shape np.testing.assert_allclose(h_min, result, rtol=1e-3) + # TODO: stellarator geometry test with ripples @staticmethod def drift_analytic(data): """Compute analytic approximation for bounce-averaged binormal drift. diff --git a/tests/test_quad_utils.py b/tests/test_quad_utils.py index ce9408f12a..5a7c3d00e7 100644 --- a/tests/test_quad_utils.py +++ b/tests/test_quad_utils.py @@ -6,11 +6,11 @@ from desc.backend import jnp from desc.integrals.quad_utils import ( - _composite_linspace, automorphism_arcsin, automorphism_sin, bijection_from_disc, bijection_to_disc, + composite_linspace, grad_automorphism_arcsin, grad_automorphism_sin, grad_bijection_from_disc, @@ -26,7 +26,7 @@ def test_composite_linspace(): B_min_tz = np.array([0.1, 0.2]) B_max_tz = np.array([1, 3]) breaks = np.linspace(B_min_tz, B_max_tz, num=5) - b = _composite_linspace(breaks, num=3) + b = composite_linspace(breaks, num=3) for i in range(breaks.shape[0]): for j in range(breaks.shape[1]): assert only1(np.isclose(breaks[i, j], b[:, j]).tolist())