From ab147a089c70c4f5bdf97024714b6ddef28d775d Mon Sep 17 00:00:00 2001 From: Eric Shi Date: Sun, 29 Sep 2024 18:33:30 -0700 Subject: [PATCH] Fix doc build errors --- docs/conf.py | 1 - docs/modules/interoperability.rst | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 2e6ab2e1..400d0c77 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -68,7 +68,6 @@ "numpy": ("https://numpy.org/doc/stable", None), "jax": ("https://jax.readthedocs.io/en/latest", None), "pytorch": ("https://pytorch.org/docs/stable", None), - "paddle": ("https://www.paddlepaddle.org.cn/", None), } extlinks = { diff --git a/docs/modules/interoperability.rst b/docs/modules/interoperability.rst index 21daa09b..ef215f7c 100644 --- a/docs/modules/interoperability.rst +++ b/docs/modules/interoperability.rst @@ -768,7 +768,7 @@ To convert a Paddle CUDA stream to a Warp CUDA stream and vice versa, Warp provi .. autofunction:: warp.stream_from_paddle Example: Optimization using ``warp.from_paddle()`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ An example usage of minimizing a loss function over an array of 2D points written in Warp via Paddle's Adam optimizer using :func:`warp.from_paddle` is as follows:: @@ -812,7 +812,7 @@ using :func:`warp.from_paddle` is as follows:: print(f"{i}\tloss: {l.item()}") Example: Optimization using ``warp.to_paddle`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Less code is needed when we declare the optimization variables directly in Warp and use :func:`warp.to_paddle` to convert them to Paddle tensors. Here, we revisit the same example from above where now only a single conversion to a paddle tensor is needed to supply Adam with the optimization variables::