Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added array_equal function and tests #87

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
9 changes: 6 additions & 3 deletions tripy/docs/post0_developer_guides/how-to-add-new-ops.md
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def theta(shape: Tuple[int], dim: int = 0, dtype: datatype.dtype = datatype.floa

output = tp.theta([3])

assert np.array_equal(cp.from_dlpack(output).get(), np.arange(0, 3, dtype=np.float32))
assert tp.array_equal(output, tp.Tensor(np.arange(0, 3, dtype=np.float32)))
"""

# Next we build the trace operator. The `build()` function is also responsible for constructing
Expand Down Expand Up @@ -410,9 +410,12 @@ import tripy as tp

def test_multi_dimensional():
output = tp.theta([2, 3], dim=1)
expected = np.broadcast_to(np.arange(0, 3, dtype=np.float32), (2, 3))
expected = tp.Tensor([
[0., 1., 2.],
[0., 1., 2.]
], dtype=tp.float32)

assert np.array_equal(cp.from_dlpack(output).get(), expected)
assert tp.array_equal(output, expected)

```

Expand Down
2 changes: 1 addition & 1 deletion tripy/docs/pre0_user_guides/00-introduction-to-tripy.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ But enough talk; let's see some code:
a = tp.arange(5)
c = a + 1.5
print(c)
assert np.array_equal(cp.from_dlpack(c).get(), np.arange(5, dtype=np.float32) + 1.5) # doc: omit
assert tp.array_equal(c, tp.Tensor(np.arange(5, dtype=np.float32) + 1.5)) # doc: omit
```

This should look familiar if you've used linear algebra or deep learning libraries like
Expand Down
23 changes: 11 additions & 12 deletions tripy/tests/backend/test_compiler_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def test_file_io(self, single_return_executable):
inp = tp.iota((2, 2), dtype=tp.float32)
out1 = single_return_executable(inp, inp)
out2 = loaded_executable(inp, inp)
assert cp.array_equal(cp.from_dlpack(out1), cp.from_dlpack(out2))
assert tp.array_equal(out1, out2)


class TestCompile:
Expand All @@ -185,8 +185,7 @@ def test_function(self):
inp = tp.ones((2, 2), dtype=tp.float32)
out = compiled_gelu(inp)

# TODO (#225): Replace with tp.all
assert cp.array_equal(cp.from_dlpack(out), cp.from_dlpack(tp.relu(inp)))
assert tp.array_equal(out, tp.relu(inp))

def test_module(self):
layernorm = tp.LayerNorm(2)
Expand All @@ -195,7 +194,7 @@ def test_module(self):
inp = tp.ones((2, 2), dtype=tp.float32)
out = compiled_layernorm(inp)

assert cp.array_equal(cp.from_dlpack(out), cp.from_dlpack(layernorm(inp)))
assert tp.array_equal(out, layernorm(inp))

def test_compile_arg_order_irrelevant(self):
compiler = tp.Compiler(sub)
Expand All @@ -212,7 +211,7 @@ def test_compile_arg_order_irrelevant(self):

# Compiled function should still take arguments in (a, b) order.
out = compiled_sub(a, b)
assert cp.array_equal(cp.from_dlpack(out), cp.ones((2, 2), dtype=cp.float32))
assert tp.array_equal(out, tp.ones((2, 2), dtype=tp.float32))

@pytest.mark.parametrize("b", [2, tp.ones((2, 2), dtype=tp.float32) * 2])
def test_constants_baked(self, b):
Expand All @@ -225,7 +224,7 @@ def test_constants_baked(self, b):

out = compiled_add(a)

assert cp.array_equal(cp.from_dlpack(out), cp.ones((2, 2), dtype=cp.float32) * 2)
assert tp.array_equal(out, tp.ones((2, 2), dtype=tp.float32) * 2)

@pytest.mark.parametrize("func", [variadic_positional, variadic_keyword])
def test_variadic_arguments_rejected(self, func):
Expand All @@ -247,8 +246,8 @@ def test_multiple_return_values(self):

plus, minus = compiled_func(a, b)

assert cp.array_equal(cp.from_dlpack(plus), cp.ones((2, 2), dtype=cp.float32) * 3)
assert cp.array_equal(cp.from_dlpack(minus), cp.ones((2, 2), dtype=cp.float32))
assert tp.array_equal(plus, tp.ones((2, 2), dtype=tp.float32) * 3)
assert tp.array_equal(minus, tp.ones((2, 2), dtype=tp.float32))

def test_incorrect_dtype_rejected(self):
compiler = tp.Compiler(add)
Expand Down Expand Up @@ -288,10 +287,10 @@ def test_dynamic_shapes(self):
)

out = compiled_add(tp.ones((2, 1), dtype=tp.float32), tp.ones((2, 1), dtype=tp.float32))
assert cp.array_equal(cp.from_dlpack(out), cp.ones((2, 1), dtype=cp.float32) * 2)
assert tp.array_equal(out, tp.ones((2, 1), dtype=tp.float32) * 2)

out = compiled_add(tp.ones((3, 1), dtype=tp.float32), tp.ones((3, 1), dtype=tp.float32))
assert cp.array_equal(cp.from_dlpack(out), cp.ones((3, 1), dtype=cp.float32) * 2)
assert tp.array_equal(out, tp.ones((3, 1), dtype=tp.float32) * 2)


# TODO (#256): Remove these tests and replace with exhaustive integration testing
Expand All @@ -303,7 +302,7 @@ def test_cast(self):
a = tp.ones((2, 2), dtype=tp.float32)
out = compiled_cast(a)

assert cp.array_equal(cp.from_dlpack(out), cp.ones((2, 2), dtype=cp.int32))
assert tp.array_equal(out, tp.ones((2, 2), dtype=tp.int32))

def test_linear(self):
linear = tp.Linear(2, 3)
Expand All @@ -315,4 +314,4 @@ def test_linear(self):

out = compiled_linear(a)

assert cp.array_equal(cp.from_dlpack(out), cp.from_dlpack(linear(a)))
assert tp.array_equal(out, linear(a))
4 changes: 2 additions & 2 deletions tripy/tests/frontend/module/test_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ def test_basic(self, all_network_modes):
assert len(dict(test_net.named_parameters())) == 1
assert len(dict(test_net.named_children())) == 2

result = np.array([1.0, 2.0]) + np.full(2, sum(call_args), dtype=np.float32)
assert np.array_equal(cp.from_dlpack(test_net(*inputs)).get(), result)
result = tp.Tensor(np.array([1.0, 2.0], dtype=np.float32) + np.full(2, sum(call_args), dtype=np.float32))
assert tp.array_equal(test_net(*inputs), result)

def test_get_set_attr(self, network):
network.new_attr = True
Expand Down
6 changes: 3 additions & 3 deletions tripy/tests/frontend/module/test_parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,11 @@ def test_is_equivalent_to_tensor(self):
tensor = tp.Tensor([1, 2, 3])
param = tp.Parameter(tensor)

assert np.array_equal(cp.from_dlpack(param).get(), cp.from_dlpack(tensor).get())
assert tp.array_equal(param, tensor)

def test_can_construct_from_non_tensor(self):
param = tp.Parameter([1, 2, 3])
assert np.array_equal(cp.from_dlpack(param).get(), np.array([1, 2, 3], dtype=np.int32))
assert tp.array_equal(param, tp.Tensor(np.array([1, 2, 3], dtype=np.int32)))

@pytest.mark.parametrize(
"other,is_compatible",
Expand Down Expand Up @@ -83,4 +83,4 @@ def test_is_compatible_does_not_materialize_data(self):

def test_data_can_be_materialized(self):
param = DefaultParameter((1, 2), dtype=tp.float32)
assert np.array_equal(cp.from_dlpack(param).get(), np.array([[0, 1]], dtype=np.float32))
assert tp.array_equal(param, tp.Tensor(np.array([[0, 1]], dtype=np.float32)))
37 changes: 37 additions & 0 deletions tripy/tests/integration/test_array_equal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import pytest
import numpy as np
import torch

import tripy as tp


class TestArrayEqual:
@pytest.mark.parametrize(
"a, b",
[
(tp.Tensor([1, 2], dtype=tp.float32), tp.Tensor([1, 2], dtype=tp.float32)),
(tp.ones((2, 2), dtype=tp.int32), tp.Tensor([[1, 1], [1, 1]], dtype=tp.int32)),
(tp.ones((1, 4)), tp.ones((4, 1))),
],
)
def test_array_equal(self, a, b):
torch_result = torch.equal(torch.from_dlpack(a), torch.from_dlpack(b))
tp_result = tp.array_equal(a, b)
assert torch_result == tp_result
4 changes: 1 addition & 3 deletions tripy/tests/integration/test_concatenate.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@ class TestConcatenate:
def test_concat(self, tensor_shapes, dim):
tensors = [tp.ones(shape) for shape in tensor_shapes]
out = tp.concatenate(tensors, dim=dim)
assert np.array_equal(
cp.from_dlpack(out).get(), np.concatenate([np.ones(shape) for shape in tensor_shapes], axis=dim)
)
assert tp.array_equal(out, tp.Tensor(np.concatenate([np.ones(shape, dtype=np.float32) for shape in tensor_shapes], axis=dim)))

@pytest.mark.parametrize(
"tensor_shapes, dim",
Expand Down
8 changes: 4 additions & 4 deletions tripy/tests/integration/test_expand.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,21 +25,21 @@ class TestExpand:
def test_int_sizes(self):
input = tp.ones((2, 1))
out = tp.expand(input, (-1, 2))
assert np.array_equal(cp.from_dlpack(out).get(), np.ones((2, 2), dtype=np.float32))
assert tp.array_equal(out, tp.Tensor(np.ones((2, 2), dtype=np.float32)))

def test_shape_sizes(self):
input = tp.ones((2, 1))
a = tp.ones((2, 4))
out = tp.expand(input, a.shape)
assert np.array_equal(cp.from_dlpack(out).get(), np.ones((2, 4), dtype=np.float32))
assert tp.array_equal(out, tp.Tensor(np.ones((2, 4), dtype=np.float32)))

def test_extra_dims(self):
input = tp.ones((2, 1))
out = tp.expand(input, (1, -1, 2))
assert np.array_equal(cp.from_dlpack(out).get(), np.ones((1, 2, 2), dtype=np.float32))
assert tp.array_equal(out, tp.Tensor(np.ones((1, 2, 2), dtype=np.float32)))

def test_mixed_sizes(self):
input = tp.ones((2, 1, 1))
a = tp.ones((4, 4))
out = tp.expand(input, (-1, a.shape[0], a.shape[1]))
assert np.array_equal(cp.from_dlpack(out).get(), np.ones((2, 4, 4), dtype=np.float32))
assert tp.array_equal(out, tp.Tensor(np.ones((2, 4, 4), dtype=np.float32)))
9 changes: 5 additions & 4 deletions tripy/tests/integration/test_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,22 +30,23 @@ def test_flip(self, dims):
cp_a = cp.arange(16).reshape((4, 4)).astype(cp.float32)
a = tp.Tensor(cp_a, device=tp.device("gpu"))
f = tp.flip(a, dims=dims)
#TODO(129): The tensor we get from np.flip will have negative strides. We cannot currently construct a tensor with negatives strides
assert np.array_equal(cp.from_dlpack(f).get(), np.flip(cp_a.get(), axis=dims))

# also ensure that flipping a second time restores the original value
f2 = tp.flip(f, dims=dims)
assert cp.array_equal(cp.from_dlpack(f2), cp_a)
assert np.array_equal(cp.from_dlpack(f2).get(), cp_a.get())

def test_no_op(self):
cp_a = cp.arange(16).reshape((4, 4)).astype(cp.float32)
a = tp.Tensor(cp_a, device=tp.device("gpu"))
f = tp.flip(a, dims=[])
assert cp.array_equal(cp.from_dlpack(a), cp.from_dlpack(f))
assert tp.array_equal(a, f)

def test_zero_rank(self):
t = tp.Tensor(1)
f = tp.flip(t)
assert cp.array_equal(cp.from_dlpack(t), cp.from_dlpack(f))
assert tp.array_equal(t, f)

@pytest.mark.parametrize(
"dims1, dims2",
Expand All @@ -56,4 +57,4 @@ def test_equivalences(self, dims1, dims2):
a = tp.Tensor(cp_a, device=tp.device("gpu"))
f1 = tp.flip(a, dims=dims1)
f2 = tp.flip(a, dims=dims2)
assert cp.array_equal(cp.from_dlpack(f1), cp.from_dlpack(f2))
assert tp.array_equal(f1, f2)
6 changes: 3 additions & 3 deletions tripy/tests/integration/test_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@
class TestFull:
def test_normal_shape(self):
out = tp.full((2, 2), 5.0, tp.float32)
assert np.array_equal(cp.from_dlpack(out).get(), np.full((2, 2), 5.0, np.float32))
assert tp.array_equal(out, tp.Tensor(np.full((2, 2), 5.0, np.float32)))

def test_shape_tensor(self):
a = tp.ones((2, 3))
out = tp.full(a.shape, 5.0, tp.float32)
assert np.array_equal(cp.from_dlpack(out).get(), np.full((2, 3), 5.0, np.float32))
assert tp.array_equal(out, tp.Tensor(np.full((2, 3), 5.0, np.float32)))

def test_mixed_shape(self):
a = tp.ones((2, 3))
out = tp.full((a.shape[0], 4), 5.0, tp.float32)
assert np.array_equal(cp.from_dlpack(out).get(), np.full((2, 4), 5.0, np.float32))
assert tp.array_equal(out, tp.Tensor(np.full((2, 4), 5.0, np.float32)))
20 changes: 15 additions & 5 deletions tripy/tests/integration/test_iota.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,28 @@ class TestIota:
(("int32", tp.common.datatype.int32)),
]

def _compute_ref_iota(self, dtype, shape, dim):
def _compute_ref_iota(self, dtype, shape, dim):
if dim is None:
dim = 0
elif dim < 0:
dim += len(shape)

expected = np.arange(0, shape[dim], dtype=dtype)

if dim < len(shape) - 1:
expand_dims = [1 + i for i in range(len(shape) - 1 - dim)]
expected = np.expand_dims(expected, expand_dims)
expected = np.broadcast_to(expected, shape)

repeats = [1] * len(shape)
for i in range(len(shape)):
if i != dim:
repeats[i] = shape[i]

expected = np.tile(expected, repeats)

return expected


@pytest.mark.parametrize("dtype", DTYPE_PARAMS)
@pytest.mark.parametrize(
"shape, dim",
Expand Down Expand Up @@ -103,9 +113,9 @@ def test_negative_no_casting(self, dtype):
def test_iota_from_shape_tensor(self):
a = tp.ones((2, 2))
output = tp.iota(a.shape)
assert np.array_equal(cp.from_dlpack(output).get(), self._compute_ref_iota("float32", (2, 2), 0))
assert tp.array_equal(output, tp.Tensor(self._compute_ref_iota("float32", (2, 2), 0)))

def test_iota_from_mixed_seqence(self):
def test_iota_from_mixed_sequence(self):
a = tp.ones((2, 2))
output = tp.iota((3, a.shape[0]))
assert np.array_equal(cp.from_dlpack(output).get(), self._compute_ref_iota("float32", (3, 2), 0))
assert tp.array_equal(output, tp.Tensor(self._compute_ref_iota("float32", (3, 2), 0)))
2 changes: 1 addition & 1 deletion tripy/tests/integration/test_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,4 +132,4 @@ def test_quant_linear_int4_weight_only(self, weight_quant_dim, scale):

np_out = cp_input.get() @ (np_weight.transpose()) + np_bias

assert np.array_equal(cp.from_dlpack(out).get(), np_out)
assert tp.array_equal(out, tp.Tensor(np_out))
15 changes: 9 additions & 6 deletions tripy/tests/integration/test_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ def test_all(self, x_shape, axis, keepdim):
out = tp.all(a, dim=axis, keepdim=keepdim)
expected = tp.Tensor(np.array(x.all(axis=axis, keepdims=keepdim)))
#np.array is necessary to deal with case where x.all returns a numpy scalar (5th case)
assert out.shape == expected.shape
assert tp.allclose(out, expected)
assert tp.array_equal(out, expected)

@pytest.mark.parametrize(
"x_shape, axis, keepdim",
Expand All @@ -57,7 +56,6 @@ def test_all(self, x_shape, axis, keepdim):
((2, 3, 4, 5), (-2, -1), True),
],
)

def test_any(self, x_shape, axis, keepdim):
x = np.array([i % 2 == 0 for i in np.arange(np.prod(x_shape))]).reshape(x_shape)
a = tp.Tensor(x)
Expand All @@ -68,7 +66,12 @@ def test_any(self, x_shape, axis, keepdim):
"x_shape, axis, keepdim",
[
((2, 3), 1, True),
pytest.param((2, 3, 4), (1, 2), True, marks=pytest.mark.skip(reason="For this test case without out.eval() tp.allclose fails. (Issue #)")),
pytest.param(
(2, 3, 4),
(1, 2),
True,
marks=pytest.mark.skip(reason="For this test case without out.eval() tp.allclose fails. (Issue #)"),
),
((2, 3), 1, False),
((2, 3, 4), (1, 2), False),
((2, 3, 4), None, False),
Expand Down Expand Up @@ -122,7 +125,7 @@ def test_argmax(self, x_shape, axis, keepdim: bool):
x = np.arange(np.prod(x_shape)).reshape(x_shape).astype(np.float32)
a = tp.Tensor(x)
out = tp.argmax(a, dim=axis, keepdim=keepdim)
assert np.array_equal(cp.from_dlpack(out).get(), np.array(x.argmax(axis=axis, keepdims=keepdim)))
assert tp.array_equal(out, tp.Tensor(np.array(x.argmax(axis=axis, keepdims=keepdim))))

@pytest.mark.parametrize(
"x_shape, axis, keepdim",
Expand All @@ -139,4 +142,4 @@ def test_argmin(self, x_shape, axis, keepdim: bool):
x = np.arange(np.prod(x_shape)).reshape(x_shape).astype(np.float32)
a = tp.Tensor(x)
out = tp.argmin(a, dim=axis, keepdim=keepdim)
assert np.array_equal(cp.from_dlpack(out).get(), np.array(x.argmin(axis=axis, keepdims=keepdim)))
assert tp.array_equal(out, tp.Tensor(np.array(x.argmin(axis=axis, keepdims=keepdim))))
4 changes: 2 additions & 2 deletions tripy/tests/integration/test_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def test_static_reshape(self, shape, new_shape):
b = tp.reshape(a, new_shape)
if -1 in new_shape:
new_shape = tuple(np.prod(shape) // -np.prod(new_shape) if d == -1 else d for d in new_shape)
assert np.array_equal(cp.from_dlpack(b).get(), cp_a.reshape(new_shape).get())
assert tp.array_equal(b, tp.Tensor(cp_a.reshape(new_shape).get()))

def test_invalid_neg_dim_reshape(self):
shape = (1, 30)
Expand All @@ -52,7 +52,7 @@ def test_reshape_shape_tensor(self):
a = tp.ones((2, 3, 4))
b = tp.ones((2, 3, 2, 2))
out = tp.reshape(a, (a.shape[0], a.shape[1], b.shape[2], b.shape[3]))
assert np.array_equal(cp.from_dlpack(out).get(), np.ones((2, 3, 2, 2), dtype=np.float32))
assert tp.array_equal(out, tp.ones((2, 3, 2, 2), dtype=tp.float32))

def test_reshape_shape_with_unknown(self):
a = tp.ones((2, 3, 4))
Expand Down
Loading