Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up async code #631

Merged
merged 31 commits into from
Dec 6, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
fe9931a
Fix asynchronous commands
fyellin Nov 1, 2024
c733c5f
Fix lint problems and codegen problems.
fyellin Nov 1, 2024
f8a71e6
Remove assertion.
fyellin Nov 2, 2024
fbfccb7
Merge branch 'main' into async_handler
fyellin Nov 4, 2024
bebac48
_api.GPU should be the owner of its internal representation.
fyellin Nov 4, 2024
3ae7780
Thank you, ruff!
fyellin Nov 4, 2024
5fb6502
For now, use anyio
fyellin Nov 5, 2024
780ff9b
Continue to find the minimum changes
fyellin Nov 5, 2024
f831e6d
Continue to find the minimum changes
fyellin Nov 5, 2024
3b024be
Merge branch 'main' into async_handler
fyellin Nov 5, 2024
81cf2f7
Allow "await WgpuAwaitable(..)"
fyellin Nov 6, 2024
b08dc8b
Fix ruff format
fyellin Nov 6, 2024
84db283
Attempt to delay installing anyio
fyellin Nov 6, 2024
16e082e
Attempt to delay installing anyio
fyellin Nov 6, 2024
3eb1a59
Add another test.
fyellin Nov 7, 2024
466d3d9
Merge remote-tracking branch 'origin/main' into async_handler
fyellin Nov 7, 2024
54488dc
Changes requested by reviewers
fyellin Nov 7, 2024
45361ce
Change sleep to 0
fyellin Nov 14, 2024
1b903fa
Merge branch 'main' into async_handler
fyellin Nov 19, 2024
cd574c8
Small tweaks/cleanup
almarklein Nov 20, 2024
bf5862c
Implement new_array to prevent premature substruct cg
almarklein Nov 20, 2024
2f150c1
add little debug function that was very useful
almarklein Nov 20, 2024
b207bb7
fix import in test script
almarklein Nov 20, 2024
b1022ed
codegen
almarklein Nov 20, 2024
eedd8c9
Merge branch 'main' into async_handler
fyellin Nov 21, 2024
f650208
Make array store sub-refs, not sub-elements
almarklein Nov 21, 2024
c4d2388
use sniffio instead of anyio
almarklein Dec 6, 2024
9faec92
Merge branch 'main' into async_handler
almarklein Dec 6, 2024
7e6e87e
at least one sleep
almarklein Dec 6, 2024
f5acb60
fix new_array
almarklein Dec 6, 2024
6975af0
fix wheel test
almarklein Dec 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ imgui = ["imgui-bundle>=1.2.1"]
build = ["build", "hatchling", "requests", "twine"]
codegen = ["pytest", "numpy", "ruff"]
lint = ["ruff", "pre-commit"]
tests = ["numpy", "pytest", "psutil", "imageio"]
tests = ["numpy", "pytest", "pytest-asyncio", "psutil", "imageio"]
examples = []
docs = ["sphinx>7.2", "sphinx_rtd_theme"]
dev = ["wgpu[build,codegen,lint,tests,examples,docs]"]
Expand Down Expand Up @@ -63,6 +63,9 @@ artifacts = ["*.so", "*.dll", "*.dylib"]
[tool.hatch.build.targets.wheel.hooks.custom]
path = "tools/hatch_build.py"

[tool.pytest.ini_options]
asyncio_default_fixture_loop_scope = "function"

# ===== Tooling

[tool.ruff]
Expand Down
106 changes: 106 additions & 0 deletions tests/test_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import asyncio

import pytest

import wgpu.utils
from tests.testutils import run_tests
from wgpu import MapMode, TextureFormat
from wgpu.backends.wgpu_native import WgpuAwaitable


@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [False, True])
async def test_awaitable_async(use_async, loop_scope="function"):
count = 0

def finalizer(i):
return i * i

def callback(i):
awaitable.set_result(i)

def poll_function():
nonlocal count
count += 1
if count >= 3:
callback(10)

awaitable = WgpuAwaitable("test", callback, finalizer, poll_function)

if use_async:
result = await awaitable.wait_async()
fyellin marked this conversation as resolved.
Show resolved Hide resolved
else:
result = awaitable.wait_sync()
assert result == 10 * 10


@pytest.mark.asyncio
async def test_asynchronous_get_device(loop_scope="function"):
adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance")
device = await adapter.request_device_async()
assert device is not None


@pytest.mark.asyncio
async def test_asynchronous_buffer_map(loop_scope="function"):
device = wgpu.utils.get_default_device()

data = b"1" * 10000
buffer1 = device.create_buffer(size=len(data), usage="MAP_WRITE|COPY_SRC")
buffer2 = device.create_buffer(size=len(data), usage="MAP_READ|COPY_DST")
await buffer1.map_async(MapMode.WRITE)
buffer1.write_mapped(data)
buffer1.unmap()

command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_buffer(buffer1, 0, buffer2, 0, len(data))
device.queue.submit([command_encoder.finish()])

await buffer2.map_async(MapMode.READ)
data2 = buffer2.read_mapped()
buffer2.unmap()

assert bytes(data2) == data


@pytest.mark.asyncio
async def test_asynchronous_make_pipeline(loop_scope="function"):
device = wgpu.utils.get_default_device()

shader_source = """
@vertex
fn vertex_main() -> @builtin(position) vec4f {
return vec4f(0, 0, 0, 1.);
}

@compute @workgroup_size(1)
fn compute_main() { }
"""

shader = device.create_shader_module(code=shader_source)

render_pipeline, compute_pipeline = await asyncio.gather(
device.create_render_pipeline_async(
layout="auto",
vertex={
"module": shader,
},
depth_stencil={"format": TextureFormat.rgba8unorm},
),
device.create_compute_pipeline_async(layout="auto", compute={"module": shader}),
)

assert compute_pipeline is not None
assert render_pipeline is not None

command_encoder = device.create_command_encoder()
compute_pass = command_encoder.begin_compute_pass()
compute_pass.set_pipeline(compute_pipeline)
compute_pass.dispatch_workgroups(10, 10)
compute_pass.end()
device.queue.submit([command_encoder.finish()])
await device.queue.on_submitted_work_done_async()


if __name__ == "__main__":
run_tests(globals())
5 changes: 0 additions & 5 deletions tests/test_wgpu_native_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from testutils import run_tests, can_use_wgpu_lib, is_ci
from pytest import mark, raises


is_win = sys.platform.startswith("win")


Expand Down Expand Up @@ -468,7 +467,3 @@ def test_limits_are_not_legal():

if __name__ == "__main__":
run_tests(globals())


if __name__ == "__main__":
run_tests(globals())
21 changes: 21 additions & 0 deletions tests/test_wgpu_native_buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,5 +548,26 @@ def test_create_buffer_with_data(size):
assert copy[size:] == bytes(buffer._nbytes - size)


@pytest.mark.skip
def test_show_bug_wgpu_native_305_still_not_fixed():
# When this bug is fixed, we can remove READ_NOSYNC, and just tread "READ" as if
# it were READ_NOSYNC. No need to handle the command buffer.
device = wgpu.utils.get_default_device()
data1 = b"abcdefghijkl"

# Create buffer with data
buf = device.create_buffer(
size=len(data1), usage=wgpu.BufferUsage.MAP_READ, mapped_at_creation=True
)
buf.write_mapped(data1)
buf.unmap()

# Download from buffer to CPU
buf.map("READ_NOSYNC")
data2 = bytes(buf.read_mapped())
buf.unmap()
assert data1 == data2


if __name__ == "__main__":
run_tests(globals())
1 change: 1 addition & 0 deletions wgpu/backends/wgpu_native/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@
_register_backend(gpu)

from .extras import request_device_sync, request_device
from ._helpers import WgpuAwaitable
Loading