Skip to content

Commit

Permalink
Clean up async code (#631)
Browse files Browse the repository at this point in the history
* Fix asynchronous commands

* Fix lint problems and codegen problems.

* Remove assertion.

* _api.GPU should be the owner of its internal representation.

* Thank you, ruff!

* For now, use anyio

* Continue to find the minimum changes

* Continue to find the minimum changes

* Allow "await WgpuAwaitable(..)"

The object returned by WgpuAwaitable is now directly awaitable. Make Almar happy.

* Fix ruff format

* Attempt to delay installing anyio

* Attempt to delay installing anyio

* Add another test.

* Changes requested by reviewers

* Change sleep to 0

* Small tweaks/cleanup

* Implement new_array to prevent premature substruct cg

* add little debug function that was very useful

* fix import in test script

* codegen

* Make array store sub-refs, not sub-elements

* use sniffio instead of anyio

* at least one sleep

* fix new_array

* fix wheel test

---------

Co-authored-by: Almar Klein <[email protected]>
  • Loading branch information
fyellin and almarklein authored Dec 6, 2024
1 parent c5108ac commit 1cc0e24
Show file tree
Hide file tree
Showing 10 changed files with 436 additions and 221 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/cd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ jobs:
- name: Install and test wheel
shell: bash
run: |
rm -rf ./wgpu
# Install 'normally' to install deps, then force the install from dist-folder and nothing else
pip install --find-links dist wgpu
pip install .
rm -rf ./wgpu
pip install --force-reinstall --no-deps --no-index --find-links dist wgpu
pushd $HOME
python -c 'import wgpu.backends.wgpu_native; print(wgpu.backends.wgpu_native._ffi.lib_path)'
Expand Down
19 changes: 17 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@ license = { file = "LICENSE" }
authors = [{ name = "Almar Klein" }, { name = "Korijn van Golen" }]
keywords = ["webgpu", "wgpu", "vulkan", "metal", "DX12", "opengl"]
requires-python = ">= 3.9"
dependencies = ["cffi>=1.15.0", "rubicon-objc>=0.4.1; sys_platform == 'darwin'"]
dependencies = [
"cffi>=1.15.0",
"rubicon-objc>=0.4.1; sys_platform == 'darwin'",
"sniffio",
]

[project.optional-dependencies]
# For users
Expand All @@ -20,7 +24,15 @@ imgui = ["imgui-bundle>=1.2.1"]
build = ["build", "hatchling", "requests", "twine"]
codegen = ["pytest", "numpy", "ruff"]
lint = ["ruff", "pre-commit"]
tests = ["numpy", "pytest", "psutil", "imageio"]
tests = [
"numpy",
"pytest",
"pytest-anyio",
"psutil",
"imageio",
"anyio",
"trio",
]
examples = []
docs = ["sphinx>7.2", "sphinx_rtd_theme"]
dev = ["wgpu[build,codegen,lint,tests,examples,docs]"]
Expand Down Expand Up @@ -63,6 +75,9 @@ artifacts = ["*.so", "*.dll", "*.dylib"]
[tool.hatch.build.targets.wheel.hooks.custom]
path = "tools/hatch_build.py"

[tool.pytest.ini_options]
asyncio_default_fixture_loop_scope = "function"

# ===== Tooling

[tool.ruff]
Expand Down
130 changes: 130 additions & 0 deletions tests/test_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import anyio

from pytest import mark

import wgpu.utils
from testutils import can_use_wgpu_lib, run_tests
from wgpu import GPUDevice, MapMode, TextureFormat
from wgpu.backends.wgpu_native import WgpuAwaitable


@mark.anyio
@mark.parametrize("use_async", [False, True])
async def test_awaitable_async(use_async):
count = 0

def finalizer(i):
return i * i

def callback(i):
awaitable.set_result(i)

def poll_function():
nonlocal count
count += 1
if count >= 3:
callback(10)

awaitable = WgpuAwaitable("test", callback, finalizer, poll_function)

if use_async:
result = await awaitable
else:
result = awaitable.sync_wait()
assert result == 10 * 10


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def test_enumerate_adapters_async():
adapters = await wgpu.gpu.enumerate_adapters_async()
assert len(adapters) > 0
for adapter in adapters:
device = await adapter.request_device_async()
assert isinstance(device, GPUDevice)


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def test_request_device_async():
adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance")
device = await adapter.request_device_async()
assert device is not None


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def test_buffer_map_async():
device = wgpu.utils.get_default_device()

data = b"1" * 10000
buffer1 = device.create_buffer(size=len(data), usage="MAP_WRITE|COPY_SRC")
buffer2 = device.create_buffer(size=len(data), usage="MAP_READ|COPY_DST")
await buffer1.map_async(MapMode.WRITE)
buffer1.write_mapped(data)
buffer1.unmap()

command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_buffer(buffer1, 0, buffer2, 0, len(data))
device.queue.submit([command_encoder.finish()])

await buffer2.map_async(MapMode.READ)
data2 = buffer2.read_mapped()
buffer2.unmap()

assert bytes(data2) == data


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def make_pipeline_async():
device = wgpu.utils.get_default_device()

shader_source = """
@vertex
fn vertex_main() -> @builtin(position) vec4f {
return vec4f(0, 0, 0, 1.);
}
@compute @workgroup_size(1)
fn compute_main() { }
"""

shader = device.create_shader_module(code=shader_source)

results = [None, None]
async with anyio.create_task_group() as tg:
# It's unfortunate anyio doesn't have async.gather. This code would just be
# compute_pipeline, render_pipeline = asyncio.gather(.....)
async def create_compute_pipeline():
results[0] = await device.create_compute_pipeline_async(
layout="auto", compute={"module": shader}
)

async def create_render_pipeline():
results[1] = await device.create_render_pipeline_async(
layout="auto",
vertex={
"module": shader,
},
depth_stencil={"format": TextureFormat.rgba8unorm},
)

tg.start_soon(create_compute_pipeline)
tg.start_soon(create_render_pipeline)

compute_pipeline, render_pipeline = results
assert compute_pipeline is not None
assert render_pipeline is not None

command_encoder = device.create_command_encoder()
compute_pass = command_encoder.begin_compute_pass()
compute_pass.set_pipeline(compute_pipeline)
compute_pass.dispatch_workgroups(10, 10)
compute_pass.end()
device.queue.submit([command_encoder.finish()])
await device.queue.on_submitted_work_done_async()


if __name__ == "__main__":
run_tests(globals())
5 changes: 0 additions & 5 deletions tests/test_wgpu_native_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from testutils import run_tests, can_use_wgpu_lib, is_ci
from pytest import mark, raises


is_win = sys.platform.startswith("win")


Expand Down Expand Up @@ -468,7 +467,3 @@ def test_limits_are_not_legal():

if __name__ == "__main__":
run_tests(globals())


if __name__ == "__main__":
run_tests(globals())
21 changes: 21 additions & 0 deletions tests/test_wgpu_native_buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,5 +548,26 @@ def test_create_buffer_with_data(size):
assert copy[size:] == bytes(buffer._nbytes - size)


@pytest.mark.skip
def test_show_bug_wgpu_native_305_still_not_fixed():
# When this bug is fixed, we can remove READ_NOSYNC, and just tread "READ" as if
# it were READ_NOSYNC. No need to handle the command buffer.
device = wgpu.utils.get_default_device()
data1 = b"abcdefghijkl"

# Create buffer with data
buf = device.create_buffer(
size=len(data1), usage=wgpu.BufferUsage.MAP_READ, mapped_at_creation=True
)
buf.write_mapped(data1)
buf.unmap()

# Download from buffer to CPU
buf.map("READ_NOSYNC")
data2 = bytes(buf.read_mapped())
buf.unmap()
assert data1 == data2


if __name__ == "__main__":
run_tests(globals())
2 changes: 1 addition & 1 deletion wgpu/_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def enumerate_adapters_sync(self):
async def enumerate_adapters_async(self):
"""Get a list of adapter objects available on the current system.
An adapter can then be selected (e.g. using it's summary), and a device
An adapter can then be selected (e.g. using its summary), and a device
then created from it.
The order of the devices is such that Vulkan adapters go first, then
Expand Down
1 change: 1 addition & 0 deletions wgpu/backends/wgpu_native/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@
_register_backend(gpu)

from .extras import request_device_sync, request_device
from ._helpers import WgpuAwaitable
Loading

0 comments on commit 1cc0e24

Please sign in to comment.