Skip to content

Commit

Permalink
Merge branch 'lwawrzyniak/release-1.2/v1.2.2' into 'release-1.2'
Browse files Browse the repository at this point in the history
Release v1.2.2

See merge request omniverse/warp!580
  • Loading branch information
c0d1f1ed committed Jul 3, 2024
2 parents acfb585 + 1bfb95e commit 1b4013a
Show file tree
Hide file tree
Showing 11 changed files with 84 additions and 47 deletions.
6 changes: 6 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,9 @@ windows-x86_64 test:
- .\_venv\Scripts\Activate.ps1
- python -m pip install --upgrade pip
- python -m pip install --upgrade usd-core coverage[toml]
# Temporary HACK: use NumPy < 2.0 on Windows due to issues with Torch wheels that are not compatible
# https://github.com/pytorch/pytorch/issues/128860
- python -m pip install "numpy<2"
- python -m pip install --upgrade torch --extra-index-url https://download.pytorch.org/whl/cu121
- python -m pip install -e .
- Write-Output "$([char]27)[0Ksection_end:$(GetTime):install_dependencies$([char]13)$([char]27)[0K"
Expand Down Expand Up @@ -285,6 +288,9 @@ windows-x86_64 test mgpu:
- .\_venv\Scripts\Activate.ps1
- python -m pip install --upgrade pip
- python -m pip install --upgrade usd-core
# Temporary HACK: use NumPy < 2.0 on Windows due to issues with Torch wheels that are not compatible
# https://github.com/pytorch/pytorch/issues/128860
- python -m pip install "numpy<2"
- python -m pip install --upgrade torch --extra-index-url https://download.pytorch.org/whl/cu121
- python -m pip install -e .
- Write-Output "$([char]27)[0Ksection_end:$(GetTime):install_dependencies$([char]13)$([char]27)[0K"
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# CHANGELOG

## [1.2.2] - 2024-06-19

- Support for NumPy >= 2.0

## [1.2.1] - 2024-06-14

- Fix generic function caching
Expand Down
2 changes: 1 addition & 1 deletion VERSION.md
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.2.1
1.2.2
2 changes: 1 addition & 1 deletion exts/omni.warp.core/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.2.1"
version = "1.2.2"
authors = ["NVIDIA"]
title = "Warp Core"
description="The core Warp Python module"
Expand Down
4 changes: 4 additions & 0 deletions exts/omni.warp.core/docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# CHANGELOG

## [1.2.2] - 2024-06-19

- Support for NumPy >= 2.0

## [1.2.1] - 2024-06-14

- Fix generic function caching
Expand Down
4 changes: 2 additions & 2 deletions exts/omni.warp/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
# Semantic Versioning is used: https://semver.org/
version = "1.2.1"
version = "1.2.2"
authors = ["NVIDIA"]
title = "Warp"
description="Warp OmniGraph Nodes and Sample Scenes"
Expand Down Expand Up @@ -35,7 +35,7 @@ exclude = ["Ogn*Database.py", "*/ogn*"]
"omni.timeline" = {}
"omni.ui" = {optional = true}
"omni.usd" = {}
"omni.warp.core" = {version = "1.2.1", exact = true}
"omni.warp.core" = {version = "1.2.2", exact = true}

[[python.module]]
name = "omni.warp._extension"
Expand Down
4 changes: 4 additions & 0 deletions exts/omni.warp/docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# CHANGELOG

## [1.2.2] - 2024-06-19

- Support for NumPy >= 2.0

## [1.2.1] - 2024-06-14

- Fix generic function caching
Expand Down
2 changes: 1 addition & 1 deletion warp/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from typing import Optional

version: str = "1.2.1"
version: str = "1.2.2"

verify_fp: bool = False # verify inputs and outputs are finite after each launch
verify_cuda: bool = False # if true will check CUDA errors after each kernel launch / memory operation
Expand Down
2 changes: 1 addition & 1 deletion warp/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -4045,7 +4045,7 @@ def full(
# a sequence, assume it's a vector or matrix value
try:
# try to convert to a numpy array first
na = np.array(value, copy=False)
na = np.asarray(value)
except Exception as e:
raise ValueError(f"Failed to interpret the value as a vector or matrix: {e}") from e

Expand Down
95 changes: 57 additions & 38 deletions warp/tests/test_vec_scalar_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,10 +687,22 @@ def test_equality(test, device, dtype, register_kernels=False):
vec4 = wp.types.vector(length=4, dtype=wptype)
vec5 = wp.types.vector(length=5, dtype=wptype)

def check_equality(
def check_unsigned_equality(
v20: wp.array(dtype=vec2),
v21: wp.array(dtype=vec2),
v22: wp.array(dtype=vec2),
v30: wp.array(dtype=vec3),
v40: wp.array(dtype=vec4),
v50: wp.array(dtype=vec5),
):
wp.expect_eq(v20[0], v20[0])
wp.expect_neq(v21[0], v20[0])
wp.expect_neq(v22[0], v20[0])
wp.expect_eq(v30[0], v30[0])
wp.expect_eq(v40[0], v40[0])
wp.expect_eq(v50[0], v50[0])

def check_signed_equality(
v30: wp.array(dtype=vec3),
v31: wp.array(dtype=vec3),
v32: wp.array(dtype=vec3),
Expand All @@ -707,29 +719,21 @@ def check_equality(
v54: wp.array(dtype=vec5),
v55: wp.array(dtype=vec5),
):
wp.expect_eq(v20[0], v20[0])
wp.expect_neq(v21[0], v20[0])
wp.expect_neq(v22[0], v20[0])

wp.expect_eq(v30[0], v30[0])
wp.expect_neq(v31[0], v30[0])
wp.expect_neq(v32[0], v30[0])
wp.expect_neq(v33[0], v30[0])

wp.expect_eq(v40[0], v40[0])
wp.expect_neq(v41[0], v40[0])
wp.expect_neq(v42[0], v40[0])
wp.expect_neq(v43[0], v40[0])
wp.expect_neq(v44[0], v40[0])

wp.expect_eq(v50[0], v50[0])
wp.expect_neq(v51[0], v50[0])
wp.expect_neq(v52[0], v50[0])
wp.expect_neq(v53[0], v50[0])
wp.expect_neq(v54[0], v50[0])
wp.expect_neq(v55[0], v50[0])

kernel = getkernel(check_equality, suffix=dtype.__name__)
unsigned_kernel = getkernel(check_unsigned_equality, suffix=dtype.__name__)
signed_kernel = getkernel(check_signed_equality, suffix=dtype.__name__)

if register_kernels:
return
Expand All @@ -739,49 +743,64 @@ def check_equality(
v22 = wp.array([3.0, 2.0], dtype=vec2, requires_grad=True, device=device)

v30 = wp.array([1.0, 2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v31 = wp.array([-1.0, 2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v32 = wp.array([1.0, -2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v33 = wp.array([1.0, 2.0, -3.0], dtype=vec3, requires_grad=True, device=device)

v40 = wp.array([1.0, 2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v41 = wp.array([-1.0, 2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v42 = wp.array([1.0, -2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v43 = wp.array([1.0, 2.0, -3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v44 = wp.array([1.0, 2.0, 3.0, -4.0], dtype=vec4, requires_grad=True, device=device)

v50 = wp.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v51 = wp.array([-1.0, 2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v52 = wp.array([1.0, -2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v53 = wp.array([1.0, 2.0, -3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v54 = wp.array([1.0, 2.0, 3.0, -4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v55 = wp.array([1.0, 2.0, 3.0, 4.0, -5.0], dtype=vec5, requires_grad=True, device=device)

wp.launch(
kernel,
unsigned_kernel,
dim=1,
inputs=[
v20,
v21,
v22,
v30,
v31,
v32,
v33,
v40,
v41,
v42,
v43,
v44,
v50,
v51,
v52,
v53,
v54,
v55,
],
outputs=[],
device=device,
)

if dtype not in np_unsigned_int_types:
v31 = wp.array([-1.0, 2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v32 = wp.array([1.0, -2.0, 3.0], dtype=vec3, requires_grad=True, device=device)
v33 = wp.array([1.0, 2.0, -3.0], dtype=vec3, requires_grad=True, device=device)

v41 = wp.array([-1.0, 2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v42 = wp.array([1.0, -2.0, 3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v43 = wp.array([1.0, 2.0, -3.0, 4.0], dtype=vec4, requires_grad=True, device=device)
v44 = wp.array([1.0, 2.0, 3.0, -4.0], dtype=vec4, requires_grad=True, device=device)

v51 = wp.array([-1.0, 2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v52 = wp.array([1.0, -2.0, 3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v53 = wp.array([1.0, 2.0, -3.0, 4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v54 = wp.array([1.0, 2.0, 3.0, -4.0, 5.0], dtype=vec5, requires_grad=True, device=device)
v55 = wp.array([1.0, 2.0, 3.0, 4.0, -5.0], dtype=vec5, requires_grad=True, device=device)

wp.launch(
signed_kernel,
dim=1,
inputs=[
v30,
v31,
v32,
v33,
v40,
v41,
v42,
v43,
v44,
v50,
v51,
v52,
v53,
v54,
v55,
],
outputs=[],
device=device,
)


def test_scalar_multiplication(test, device, dtype, register_kernels=False):
rng = np.random.default_rng(123)
Expand Down
6 changes: 3 additions & 3 deletions warp/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -1685,7 +1685,7 @@ def _init_from_data(self, data, dtype, shape, device, copy, pinned):
if dtype == Any:
# infer dtype from data
try:
arr = np.array(data, copy=False, ndmin=1)
arr = np.asarray(data)
except Exception as e:
raise RuntimeError(f"Failed to convert input data to an array: {e}") from e
dtype = np_dtype_to_warp_type.get(arr.dtype)
Expand Down Expand Up @@ -1724,7 +1724,7 @@ def _init_from_data(self, data, dtype, shape, device, copy, pinned):
f"Failed to convert input data to an array with Warp type {warp.context.type_str(dtype)}"
)
try:
arr = np.array(data, dtype=npdtype, copy=False, ndmin=1)
arr = np.asarray(data, dtype=npdtype)
except Exception as e:
raise RuntimeError(f"Failed to convert input data to an array with type {npdtype}: {e}") from e

Expand Down Expand Up @@ -2333,7 +2333,7 @@ def numpy(self):
a = self.to("cpu", requires_grad=False)
# convert through __array_interface__
# Note: this handles arrays of structs using `descr`, so the result will be a structured NumPy array
return np.array(a, copy=False)
return np.asarray(a)
else:
# return an empty numpy array with the correct dtype and shape
if isinstance(self.dtype, warp.codegen.Struct):
Expand Down

0 comments on commit 1b4013a

Please sign in to comment.