Skip to content

Commit 152e756

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 1aa4cc7 commit 152e756

11 files changed

+45
-45
lines changed

adaptive/types.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
else:
99
from typing_extensions import TypeAlias
1010

11-
Float: TypeAlias = Union[float, np.float_]
11+
Float: TypeAlias = Union[float, np.float64]
1212
Bool: TypeAlias = Union[bool, np.bool_]
1313
Int: TypeAlias = Union[int, np.int_]
1414
Real: TypeAlias = Union[Float, Int]

docs/source/algorithms_and_examples.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
@@ -217,7 +217,7 @@ scatter = fig.data[0]
217217
coords_col = [
218218
(x, y, z, color)
219219
for x, y, z, color in zip(
220-
scatter["x"], scatter["y"], scatter["z"], scatter.marker["color"]
220+
scatter["x"], scatter["y"], scatter["z"], scatter.marker["color"],
221221
)
222222
if not (x > 0 and y > 0)
223223
]

docs/source/benchmarks.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: adaptive
1010
language: python
@@ -138,7 +138,7 @@ def run_and_plot(learner, **goal):
138138
bms[learner.function.__name__] = bm
139139
display(pd.DataFrame([bm])) # noqa: F821
140140
return plot(learner, homo_learner).relabel(
141-
f"{learner.function.__name__} function with {learner.npoints} points"
141+
f"{learner.function.__name__} function with {learner.npoints} points",
142142
)
143143
144144
@@ -207,7 +207,7 @@ Nonetheless, the algorithm still focuses on areas of the function that have more
207207
```{code-cell} ipython3
208208
def gaussian(x, mu=0, sigma=0.5):
209209
return (1 / np.sqrt(2 * np.pi * sigma**2)) * np.exp(
210-
-((x - mu) ** 2) / (2 * sigma**2)
210+
-((x - mu) ** 2) / (2 * sigma**2),
211211
)
212212
213213
@@ -359,7 +359,7 @@ def gaussian_surface(xy, mu=(0, 0), sigma=(1, 1)):
359359
mu_x, mu_y = mu
360360
sigma_x, sigma_y = sigma
361361
return (1 / (2 * np.pi * sigma_x * sigma_y)) * np.exp(
362-
-((x - mu_x) ** 2 / (2 * sigma_x**2) + (y - mu_y) ** 2 / (2 * sigma_y**2))
362+
-((x - mu_x) ** 2 / (2 * sigma_x**2) + (y - mu_y) ** 2 / (2 * sigma_y**2)),
363363
)
364364
365365
@@ -380,7 +380,7 @@ def sinusoidal_surface(xy, amplitude=1, frequency=(0.3, 3)):
380380
381381
382382
learner = adaptive.Learner2D(
383-
sinusoidal_surface, bounds=[(-2 * np.pi, 2 * np.pi), (-2 * np.pi, 2 * np.pi)]
383+
sinusoidal_surface, bounds=[(-2 * np.pi, 2 * np.pi), (-2 * np.pi, 2 * np.pi)],
384384
)
385385
run_and_plot(learner, loss_goal=0.01)
386386
```

docs/source/logo.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,11 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: Python 3 (ipykernel)
1010
language: python
1111
name: python3
12-
execution:
13-
timeout: 300
1412
---
1513

1614
```{code-cell} ipython3
@@ -197,7 +195,7 @@ def save_webm(fname, fnames):
197195
"-y",
198196
fname,
199197
]
200-
return subprocess.run(args, capture_output=True)
198+
return subprocess.run(args, capture_output=True, check=False)
201199
202200
203201
if __name__ == "__main__":

docs/source/tutorial/tutorial.BalancingLearner.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
@@ -88,7 +88,7 @@ combos = {
8888
}
8989
9090
learner = adaptive.BalancingLearner.from_product(
91-
jacobi, adaptive.Learner1D, {"bounds": (0, 1)}, combos
91+
jacobi, adaptive.Learner1D, {"bounds": (0, 1)}, combos,
9292
)
9393
9494
runner = adaptive.BlockingRunner(learner, loss_goal=0.01)

docs/source/tutorial/tutorial.IntegratorLearner.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.7
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
@@ -80,13 +80,13 @@ runner is done.
8080
```{code-cell} ipython3
8181
if not runner.task.done():
8282
raise RuntimeError(
83-
"Wait for the runner to finish before executing the cells below!"
83+
"Wait for the runner to finish before executing the cells below!",
8484
)
8585
```
8686

8787
```{code-cell} ipython3
8888
print(
89-
f"The integral value is {learner.igral} with the corresponding error of {learner.err}"
89+
f"The integral value is {learner.igral} with the corresponding error of {learner.err}",
9090
)
9191
learner.plot()
9292
```

docs/source/tutorial/tutorial.Learner1D.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
@@ -92,7 +92,7 @@ We can now compare the adaptive sampling to a homogeneous sampling with the same
9292
```{code-cell} ipython3
9393
if not runner.task.done():
9494
raise RuntimeError(
95-
"Wait for the runner to finish before executing the cells below!"
95+
"Wait for the runner to finish before executing the cells below!",
9696
)
9797
```
9898

@@ -119,7 +119,7 @@ offsets = [random.uniform(-0.8, 0.8) for _ in range(3)]
119119
def f_levels(x, offsets=offsets):
120120
a = 0.01
121121
return np.array(
122-
[offset + x + a**2 / (a**2 + (x - offset) ** 2) for offset in offsets]
122+
[offset + x + a**2 / (a**2 + (x - offset) ** 2) for offset in offsets],
123123
)
124124
```
125125

@@ -129,7 +129,7 @@ The `Learner1D` can be used for such functions:
129129
```{code-cell} ipython3
130130
learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))
131131
runner = adaptive.Runner(
132-
learner, loss_goal=0.01
132+
learner, loss_goal=0.01,
133133
) # continue until `learner.loss()<=0.01`
134134
```
135135

docs/source/tutorial/tutorial.LearnerND.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
@@ -88,7 +88,7 @@ def plot_cut(x1, x2, directions, learner=learner):
8888
8989
dm = hv.DynamicMap(plot_cut, kdims=["v1", "v2", "directions"])
9090
dm = dm.redim.values(
91-
v1=np.linspace(-1, 1, 6), v2=np.linspace(-1, 1, 6), directions=["xy", "xz", "yz"]
91+
v1=np.linspace(-1, 1, 6), v2=np.linspace(-1, 1, 6), directions=["xy", "xz", "yz"],
9292
)
9393
9494
# In a notebook one would run `dm` however we want a statically generated

docs/source/tutorial/tutorial.advanced-topics.md

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,12 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
1111
---
12+
1213
(TutorialAdvancedTopics)=
1314
# Advanced Topics
1415

@@ -92,7 +93,7 @@ def slow_f(x):
9293
learner = adaptive.Learner1D(slow_f, bounds=[0, 1])
9394
runner = adaptive.Runner(learner, npoints_goal=100)
9495
runner.start_periodic_saving(
95-
save_kwargs={"fname": "data/periodic_example.p"}, interval=6
96+
save_kwargs={"fname": "data/periodic_example.p"}, interval=6,
9697
)
9798
```
9899

@@ -241,7 +242,7 @@ def will_raise(x):
241242
242243
learner = adaptive.Learner1D(will_raise, (-1, 1))
243244
runner = adaptive.Runner(
244-
learner
245+
learner,
245246
) # without 'goal' the runner will run forever unless cancelled
246247
```
247248

@@ -365,6 +366,7 @@ await runner.task # This is not needed in a notebook environment!
365366
# The result will only be set when the runner is done.
366367
timer.result()
367368
```
369+
368370
(CustomParallelization)=
369371
## Custom parallelization using coroutines
370372

@@ -378,8 +380,7 @@ We will focus on a function `f(x)` that consists of two distinct components: a s
378380

379381
```{code-cell} ipython3
380382
def f(x): # example function without caching
381-
"""
382-
Integer part of `x` repeats and should be reused
383+
"""Integer part of `x` repeats and should be reused
383384
Decimal part requires a new computation
384385
"""
385386
return g(int(x)) + h(x % 1)

docs/source/tutorial/tutorial.custom_loss.md

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.5
7+
jupytext_version: 1.16.1
88
kernelspec:
99
display_name: python3
1010
name: python3
@@ -72,7 +72,7 @@ def f_divergent_1d(x):
7272
7373
7474
learner = adaptive.Learner1D(
75-
f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d
75+
f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d,
7676
)
7777
runner = adaptive.BlockingRunner(learner, loss_goal=0.01)
7878
learner.plot().select(y=(0, 10000))
@@ -92,12 +92,12 @@ def f_divergent_2d(xy):
9292
9393
9494
learner = adaptive.Learner2D(
95-
f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=uniform_sampling_2d
95+
f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=uniform_sampling_2d,
9696
)
9797
9898
# this takes a while, so use the async Runner so we know *something* is happening
9999
runner = adaptive.Runner(
100-
learner, goal=lambda lrn: lrn.loss() < 0.03 or lrn.npoints > 1000
100+
learner, goal=lambda lrn: lrn.loss() < 0.03 or lrn.npoints > 1000,
101101
)
102102
```
103103

@@ -134,7 +134,8 @@ After all subdomains are appropriately small it will prioritise places where the
134134
```{code-cell} ipython3
135135
def resolution_loss_function(min_distance=0, max_distance=1):
136136
"""min_distance and max_distance should be in between 0 and 1
137-
because the total area is normalized to 1."""
137+
because the total area is normalized to 1.
138+
"""
138139
139140
def resolution_loss(ip):
140141
from adaptive.learner.learner2D import areas, default_loss
@@ -143,10 +144,10 @@ def resolution_loss_function(min_distance=0, max_distance=1):
143144
144145
A = areas(ip)
145146
# Setting areas with a small area to zero such that they won't be chosen again
146-
loss[A < min_distance**2] = 0
147+
loss[min_distance**2 > A] = 0
147148
148149
# Setting triangles that have a size larger than max_distance to infinite loss
149-
loss[A > max_distance**2] = np.inf
150+
loss[max_distance**2 < A] = np.inf
150151
151152
return loss
152153
@@ -158,7 +159,7 @@ loss = resolution_loss_function(min_distance=0.01)
158159
learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)
159160
runner = adaptive.BlockingRunner(learner, loss_goal=0.02)
160161
learner.plot(tri_alpha=0.3).relabel("1 / (x^2 + y^2) in log scale").opts(
161-
hv.opts.EdgePaths(color="w"), hv.opts.Image(logz=True, colorbar=True)
162+
hv.opts.EdgePaths(color="w"), hv.opts.Image(logz=True, colorbar=True),
162163
)
163164
```
164165

example-notebook.ipynb

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,7 @@
490490
"\n",
491491
"print(\n",
492492
" f\"The integral value is {learner.igral} \"\n",
493-
" f\"with a corresponding error of {learner.err}\"\n",
493+
" f\"with a corresponding error of {learner.err}\",\n",
494494
")\n",
495495
"learner.plot()"
496496
]
@@ -683,7 +683,7 @@
683683
"\n",
684684
"\n",
685685
"learner = adaptive.Learner1D(\n",
686-
" f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d\n",
686+
" f_divergent_1d, (-1, 1), loss_per_interval=uniform_sampling_1d,\n",
687687
")\n",
688688
"runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n",
689689
"learner.plot().select(y=(0, 10000))"
@@ -755,8 +755,8 @@
755755
"source": [
756756
"def resolution_loss(ip, min_distance=0, max_distance=1):\n",
757757
" \"\"\"min_distance and max_distance should be in between 0 and 1\n",
758-
" because the total area is normalized to 1.\"\"\"\n",
759-
"\n",
758+
" because the total area is normalized to 1.\n",
759+
" \"\"\"\n",
760760
" from adaptive.learner.learner2D import areas, deviations\n",
761761
"\n",
762762
" A = areas(ip)\n",
@@ -773,10 +773,10 @@
773773
" loss = np.sqrt(A) * dev + A\n",
774774
"\n",
775775
" # Setting areas with a small area to zero such that they won't be chosen again\n",
776-
" loss[A < min_distance**2] = 0\n",
776+
" loss[min_distance**2 > A] = 0\n",
777777
"\n",
778778
" # Setting triangles that have a size larger than max_distance to infinite loss\n",
779-
" loss[A > max_distance**2] = np.inf\n",
779+
" loss[max_distance**2 < A] = np.inf\n",
780780
"\n",
781781
" return loss\n",
782782
"\n",
@@ -874,7 +874,7 @@
874874
"}\n",
875875
"\n",
876876
"learner = adaptive.BalancingLearner.from_product(\n",
877-
" jacobi, adaptive.Learner1D, {\"bounds\": (0, 1)}, combos\n",
877+
" jacobi, adaptive.Learner1D, {\"bounds\": (0, 1)}, combos,\n",
878878
")\n",
879879
"\n",
880880
"runner = adaptive.BlockingRunner(learner, loss_goal=0.01)\n",
@@ -1249,7 +1249,7 @@
12491249
"runner = adaptive.Runner(learner, npoints_goal=100)\n",
12501250
"\n",
12511251
"runner.start_periodic_saving(\n",
1252-
" save_kwargs={\"fname\": \"data/periodic_example.p\"}, interval=6\n",
1252+
" save_kwargs={\"fname\": \"data/periodic_example.p\"}, interval=6,\n",
12531253
")\n",
12541254
"\n",
12551255
"runner.live_info()"
@@ -1487,7 +1487,7 @@
14871487
"\n",
14881488
"learner = adaptive.Learner1D(will_raise, (-1, 1))\n",
14891489
"runner = adaptive.Runner(\n",
1490-
" learner\n",
1490+
" learner,\n",
14911491
") # without 'goal' the runner will run forever unless cancelled\n",
14921492
"runner.live_info()\n",
14931493
"runner.live_plot()"

0 commit comments

Comments
 (0)