Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up async code #631

Merged
merged 31 commits into from
Dec 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
fe9931a
Fix asynchronous commands
fyellin Nov 1, 2024
c733c5f
Fix lint problems and codegen problems.
fyellin Nov 1, 2024
f8a71e6
Remove assertion.
fyellin Nov 2, 2024
fbfccb7
Merge branch 'main' into async_handler
fyellin Nov 4, 2024
bebac48
_api.GPU should be the owner of its internal representation.
fyellin Nov 4, 2024
3ae7780
Thank you, ruff!
fyellin Nov 4, 2024
5fb6502
For now, use anyio
fyellin Nov 5, 2024
780ff9b
Continue to find the minimum changes
fyellin Nov 5, 2024
f831e6d
Continue to find the minimum changes
fyellin Nov 5, 2024
3b024be
Merge branch 'main' into async_handler
fyellin Nov 5, 2024
81cf2f7
Allow "await WgpuAwaitable(..)"
fyellin Nov 6, 2024
b08dc8b
Fix ruff format
fyellin Nov 6, 2024
84db283
Attempt to delay installing anyio
fyellin Nov 6, 2024
16e082e
Attempt to delay installing anyio
fyellin Nov 6, 2024
3eb1a59
Add another test.
fyellin Nov 7, 2024
466d3d9
Merge remote-tracking branch 'origin/main' into async_handler
fyellin Nov 7, 2024
54488dc
Changes requested by reviewers
fyellin Nov 7, 2024
45361ce
Change sleep to 0
fyellin Nov 14, 2024
1b903fa
Merge branch 'main' into async_handler
fyellin Nov 19, 2024
cd574c8
Small tweaks/cleanup
almarklein Nov 20, 2024
bf5862c
Implement new_array to prevent premature substruct cg
almarklein Nov 20, 2024
2f150c1
add little debug function that was very useful
almarklein Nov 20, 2024
b207bb7
fix import in test script
almarklein Nov 20, 2024
b1022ed
codegen
almarklein Nov 20, 2024
eedd8c9
Merge branch 'main' into async_handler
fyellin Nov 21, 2024
f650208
Make array store sub-refs, not sub-elements
almarklein Nov 21, 2024
c4d2388
use sniffio instead of anyio
almarklein Dec 6, 2024
9faec92
Merge branch 'main' into async_handler
almarklein Dec 6, 2024
7e6e87e
at least one sleep
almarklein Dec 6, 2024
f5acb60
fix new_array
almarklein Dec 6, 2024
6975af0
fix wheel test
almarklein Dec 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/cd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ jobs:
- name: Install and test wheel
shell: bash
run: |
rm -rf ./wgpu
# Install 'normally' to install deps, then force the install from dist-folder and nothing else
pip install --find-links dist wgpu
pip install .
rm -rf ./wgpu
pip install --force-reinstall --no-deps --no-index --find-links dist wgpu
pushd $HOME
python -c 'import wgpu.backends.wgpu_native; print(wgpu.backends.wgpu_native._ffi.lib_path)'
Expand Down
19 changes: 17 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@ license = { file = "LICENSE" }
authors = [{ name = "Almar Klein" }, { name = "Korijn van Golen" }]
keywords = ["webgpu", "wgpu", "vulkan", "metal", "DX12", "opengl"]
requires-python = ">= 3.9"
dependencies = ["cffi>=1.15.0", "rubicon-objc>=0.4.1; sys_platform == 'darwin'"]
dependencies = [
"cffi>=1.15.0",
"rubicon-objc>=0.4.1; sys_platform == 'darwin'",
"sniffio",
]

[project.optional-dependencies]
# For users
Expand All @@ -20,7 +24,15 @@ imgui = ["imgui-bundle>=1.2.1"]
build = ["build", "hatchling", "requests", "twine"]
codegen = ["pytest", "numpy", "ruff"]
lint = ["ruff", "pre-commit"]
tests = ["numpy", "pytest", "psutil", "imageio"]
tests = [
"numpy",
"pytest",
"pytest-anyio",
"psutil",
"imageio",
"anyio",
"trio",
]
examples = []
docs = ["sphinx>7.2", "sphinx_rtd_theme"]
dev = ["wgpu[build,codegen,lint,tests,examples,docs]"]
Expand Down Expand Up @@ -63,6 +75,9 @@ artifacts = ["*.so", "*.dll", "*.dylib"]
[tool.hatch.build.targets.wheel.hooks.custom]
path = "tools/hatch_build.py"

[tool.pytest.ini_options]
asyncio_default_fixture_loop_scope = "function"

# ===== Tooling

[tool.ruff]
Expand Down
130 changes: 130 additions & 0 deletions tests/test_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
import anyio

from pytest import mark

import wgpu.utils
from testutils import can_use_wgpu_lib, run_tests
from wgpu import GPUDevice, MapMode, TextureFormat
from wgpu.backends.wgpu_native import WgpuAwaitable


@mark.anyio
@mark.parametrize("use_async", [False, True])
async def test_awaitable_async(use_async):
count = 0

def finalizer(i):
return i * i

def callback(i):
awaitable.set_result(i)

def poll_function():
nonlocal count
count += 1
if count >= 3:
callback(10)

awaitable = WgpuAwaitable("test", callback, finalizer, poll_function)

if use_async:
result = await awaitable
else:
result = awaitable.sync_wait()
assert result == 10 * 10


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def test_enumerate_adapters_async():
adapters = await wgpu.gpu.enumerate_adapters_async()
assert len(adapters) > 0
for adapter in adapters:
device = await adapter.request_device_async()
assert isinstance(device, GPUDevice)


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def test_request_device_async():
adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance")
device = await adapter.request_device_async()
assert device is not None


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def test_buffer_map_async():
device = wgpu.utils.get_default_device()

data = b"1" * 10000
buffer1 = device.create_buffer(size=len(data), usage="MAP_WRITE|COPY_SRC")
buffer2 = device.create_buffer(size=len(data), usage="MAP_READ|COPY_DST")
await buffer1.map_async(MapMode.WRITE)
buffer1.write_mapped(data)
buffer1.unmap()

command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_buffer(buffer1, 0, buffer2, 0, len(data))
device.queue.submit([command_encoder.finish()])

await buffer2.map_async(MapMode.READ)
data2 = buffer2.read_mapped()
buffer2.unmap()

assert bytes(data2) == data


@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib")
@mark.anyio
async def make_pipeline_async():
device = wgpu.utils.get_default_device()

shader_source = """
@vertex
fn vertex_main() -> @builtin(position) vec4f {
return vec4f(0, 0, 0, 1.);
}

@compute @workgroup_size(1)
fn compute_main() { }
"""

shader = device.create_shader_module(code=shader_source)

results = [None, None]
async with anyio.create_task_group() as tg:
# It's unfortunate anyio doesn't have async.gather. This code would just be
# compute_pipeline, render_pipeline = asyncio.gather(.....)
async def create_compute_pipeline():
results[0] = await device.create_compute_pipeline_async(
layout="auto", compute={"module": shader}
)

async def create_render_pipeline():
results[1] = await device.create_render_pipeline_async(
layout="auto",
vertex={
"module": shader,
},
depth_stencil={"format": TextureFormat.rgba8unorm},
)

tg.start_soon(create_compute_pipeline)
tg.start_soon(create_render_pipeline)

compute_pipeline, render_pipeline = results
assert compute_pipeline is not None
assert render_pipeline is not None

command_encoder = device.create_command_encoder()
compute_pass = command_encoder.begin_compute_pass()
compute_pass.set_pipeline(compute_pipeline)
compute_pass.dispatch_workgroups(10, 10)
compute_pass.end()
device.queue.submit([command_encoder.finish()])
await device.queue.on_submitted_work_done_async()


if __name__ == "__main__":
run_tests(globals())
5 changes: 0 additions & 5 deletions tests/test_wgpu_native_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from testutils import run_tests, can_use_wgpu_lib, is_ci
from pytest import mark, raises


is_win = sys.platform.startswith("win")


Expand Down Expand Up @@ -468,7 +467,3 @@ def test_limits_are_not_legal():

if __name__ == "__main__":
run_tests(globals())


if __name__ == "__main__":
run_tests(globals())
21 changes: 21 additions & 0 deletions tests/test_wgpu_native_buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,5 +548,26 @@ def test_create_buffer_with_data(size):
assert copy[size:] == bytes(buffer._nbytes - size)


@pytest.mark.skip
def test_show_bug_wgpu_native_305_still_not_fixed():
# When this bug is fixed, we can remove READ_NOSYNC, and just tread "READ" as if
# it were READ_NOSYNC. No need to handle the command buffer.
device = wgpu.utils.get_default_device()
data1 = b"abcdefghijkl"

# Create buffer with data
buf = device.create_buffer(
size=len(data1), usage=wgpu.BufferUsage.MAP_READ, mapped_at_creation=True
)
buf.write_mapped(data1)
buf.unmap()

# Download from buffer to CPU
buf.map("READ_NOSYNC")
data2 = bytes(buf.read_mapped())
buf.unmap()
assert data1 == data2


if __name__ == "__main__":
run_tests(globals())
2 changes: 1 addition & 1 deletion wgpu/_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def enumerate_adapters_sync(self):
async def enumerate_adapters_async(self):
"""Get a list of adapter objects available on the current system.

An adapter can then be selected (e.g. using it's summary), and a device
An adapter can then be selected (e.g. using its summary), and a device
then created from it.

The order of the devices is such that Vulkan adapters go first, then
Expand Down
1 change: 1 addition & 0 deletions wgpu/backends/wgpu_native/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@
_register_backend(gpu)

from .extras import request_device_sync, request_device
from ._helpers import WgpuAwaitable
Loading
Loading