diff --git a/ivy/__init__.py b/ivy/__init__.py index c7e9c36bf38ea..d9afb1632c9ba 100644 --- a/ivy/__init__.py +++ b/ivy/__init__.py @@ -83,6 +83,10 @@ class CPTensor: pass +class TRTensor: + pass + + class Parafac2Tensor: pass @@ -766,8 +770,13 @@ class Node(str): add_ivy_container_instance_methods, ) from .data_classes.nested_array import NestedArray -from .data_classes.factorized_tensor import TuckerTensor, CPTensor, Parafac2Tensor -from .data_classes.factorized_tensor import TuckerTensor, CPTensor, TTTensor +from .data_classes.factorized_tensor import ( + TuckerTensor, + CPTensor, + TRTensor, + TTTensor, + Parafac2Tensor, +) from ivy.utils.backend import ( current_backend, compiled_backends, diff --git a/ivy/data_classes/factorized_tensor/__init__.py b/ivy/data_classes/factorized_tensor/__init__.py index a784c9ed5dca8..5f62ed706cdae 100644 --- a/ivy/data_classes/factorized_tensor/__init__.py +++ b/ivy/data_classes/factorized_tensor/__init__.py @@ -1,4 +1,5 @@ from .tucker_tensor import TuckerTensor from .cp_tensor import CPTensor +from .tr_tensor import TRTensor from .parafac2_tensor import Parafac2Tensor from .tt_tensor import TTTensor diff --git a/ivy/data_classes/factorized_tensor/tr_tensor.py b/ivy/data_classes/factorized_tensor/tr_tensor.py new file mode 100644 index 0000000000000..ac7f76f27b4e8 --- /dev/null +++ b/ivy/data_classes/factorized_tensor/tr_tensor.py @@ -0,0 +1,197 @@ +# local + +from .base import FactorizedTensor +import ivy + +# global +import warnings + + +class TRTensor(FactorizedTensor): + def __init__(self, factors): + super().__init__() + shape, rank = TRTensor.validate_tr_tensor(factors) + self.shape = tuple(shape) + self.rank = tuple(rank) + self.factors = factors + + # Built-ins # + # ----------# + def __getitem__(self, index): + return self.factors[index] + + def __setitem__(self, index, value): + self.factors[index] = value + + def __iter__(self): + for index in range(len(self)): + yield self[index] + + def __len__(self): + return len(self.factors) + + def __repr__(self): + message = ( + f"factors list : rank-{self.rank} tensor ring tensor of shape {self.shape}" + ) + return message + + # Public Methods # + # ---------------# + + def to_tensor(self): + return TRTensor.tr_to_tensor(self.factors) + + def to_unfolded(self, mode): + return TRTensor.tr_to_unfolded(self.factors, mode) + + def to_vec(self): + return TRTensor.tr_to_vec(self.factors) + + # Properties # + # ---------------# + @property + def n_param(self): + factors = self.factors + total_params = sum(int(ivy.prod(tensor.shape)) for tensor in factors) + return total_params + + # Class Methods # + # ---------------# + @staticmethod + def validate_tr_tensor(factors): + n_factors = len(factors) + + if n_factors < 2: + raise ValueError( + "A Tensor Ring tensor should be composed of at least two factors." + f"However, {n_factors} factor was given." + ) + + rank = [] + shape = [] + next_rank = None + for index, factor in enumerate(factors): + current_rank, current_shape, next_rank = ivy.shape(factor) + + # Check that factors are third order tensors + if not len(factor.shape) == 3: + raise ValueError( + "TR expresses a tensor as third order factors (tr-cores).\n" + f"However, ivy.ndim(factors[{index}]) = {len(factor.shape)}" + ) + + # Consecutive factors should have matching ranks + if ivy.shape(factors[index - 1])[2] != current_rank: + raise ValueError( + "Consecutive factors should have matching ranks\n -- e.g." + " ivy.shape(factors[0])[2]) == ivy.shape(factors[1])[0])\nHowever," + f" ivy.shape(factor[{index-1}])[2] ==" + f" {ivy.shape(factors[index-1])[2]} but" + f" ivy.shape(factor[{index}])[0] == {current_rank}" + ) + + shape.append(current_shape) + rank.append(current_rank) + + # Add last rank (boundary condition) + rank.append(next_rank) + + return tuple(shape), tuple(rank) + + @staticmethod + def tr_to_tensor(factors): + full_shape = [f.shape[1] for f in factors] + full_tensor = ivy.reshape(factors[0], (-1, factors[0].shape[2])) + + for factor in factors[1:-1]: + rank_prev, _, rank_next = factor.shape + factor = ivy.reshape(factor, (rank_prev, -1)) + full_tensor = ivy.dot(full_tensor, factor) + full_tensor = ivy.reshape(full_tensor, (-1, rank_next)) + + full_tensor = ivy.reshape( + full_tensor, (factors[-1].shape[2], -1, factors[-1].shape[0]) + ) + full_tensor = ivy.moveaxis(full_tensor, 0, -1) + full_tensor = ivy.reshape( + full_tensor, (-1, factors[-1].shape[0] * factors[-1].shape[2]) + ) + factor = ivy.moveaxis(factors[-1], -1, 1) + factor = ivy.reshape(factor, (-1, full_shape[-1])) + full_tensor = ivy.dot(full_tensor, factor) + return ivy.reshape(full_tensor, full_shape) + + @staticmethod + def tr_to_unfolded(factors, mode): + return ivy.unfold(TRTensor.tr_to_tensor(factors), mode) + + @staticmethod + def tr_to_vec(factors): + return ivy.reshape( + TRTensor.tr_to_tensor(factors), + (-1,), + ) + + @staticmethod + def validate_tr_rank(tensor_shape, rank="same", rounding="round"): + if rounding == "ceil": + rounding_fun = ivy.ceil + elif rounding == "floor": + rounding_fun = ivy.floor + elif rounding == "round": + rounding_fun = ivy.round + else: + raise ValueError( + f"Rounding should be round, floor or ceil, but got {rounding}" + ) + + if rank == "same": + rank = float(1) + + n_dim = len(tensor_shape) + if n_dim == 2: + warnings.warn( + "Determining the TR-rank for the trivial case of a matrix" + f" (order 2 tensor) of shape {tensor_shape}, not a higher-order tensor." + ) + + if isinstance(rank, float): + # Choose the *same* rank for each mode + n_param_tensor = ivy.prod(tensor_shape) * rank + + # R_k I_k R_{k+1} = R^2 I_k + solution = int( + rounding_fun(ivy.sqrt(n_param_tensor / ivy.sum(tensor_shape))) + ) + rank = (solution,) * (n_dim + 1) + + else: + # Check user input for potential errors + n_dim = len(tensor_shape) + if isinstance(rank, int): + rank = (rank,) * (n_dim + 1) + elif n_dim + 1 != len(rank): + message = ( + "Provided incorrect number of ranks. Should verify len(rank) ==" + f" len(tensor.shape)+1, but len(rank) = {len(rank)} while" + f" len(tensor.shape)+1 = {n_dim + 1}" + ) + raise ValueError(message) + + # Check first and last rank + if rank[0] != rank[-1]: + message = ( + f"Provided rank[0] == {rank[0]} and rank[-1] == {rank[-1]}" + " but boundary conditions dictate rank[0] == rank[-1]" + ) + raise ValueError(message) + + return list(rank) + + @staticmethod + def tr_n_param(tensor_shape, rank): + factor_params = [] + for i, s in enumerate(tensor_shape): + factor_params.append(rank[i] * s * rank[i + 1]) + return ivy.sum(factor_params) diff --git a/ivy/functional/backends/tensorflow/random.py b/ivy/functional/backends/tensorflow/random.py index b0f3b97a3d877..e29c5f48101cb 100644 --- a/ivy/functional/backends/tensorflow/random.py +++ b/ivy/functional/backends/tensorflow/random.py @@ -13,7 +13,7 @@ # local import ivy -from ivy.func_wrapper import with_unsupported_dtypes +from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes from ivy.functional.ivy.random import ( _check_bounds_and_get_shape, _randint_check_dtype_and_bound, @@ -26,6 +26,9 @@ # ------# +@with_supported_dtypes( + {"2.13.0 and below": ("float", "int32", "int64")}, backend_version +) def random_uniform( *, low: Union[float, tf.Tensor, tf.Variable] = 0.0, diff --git a/ivy/functional/ivy/experimental/creation.py b/ivy/functional/ivy/experimental/creation.py index 09624e57aaddc..6fb8a4909609c 100644 --- a/ivy/functional/ivy/experimental/creation.py +++ b/ivy/functional/ivy/experimental/creation.py @@ -861,6 +861,64 @@ def random_cp( return ivy.CPTensor((weights, factors)) +@handle_exceptions +@handle_nestable +@infer_dtype +def random_tr( + shape: Sequence[int], + rank: Sequence[int], + /, + *, + dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None, + full: Optional[bool] = False, + seed: Optional[int] = None, +) -> Union[ivy.TRTensor, ivy.Array]: + """ + Generate a random TR tensor. + + Parameters + ---------- + shape : tuple + shape of the tensor to generate + rank : Sequence[int] + rank of the TR decomposition + must verify rank[0] == rank[-1] (boundary conditions) + and len(rank) == len(shape)+1 + full : bool, optional, default is False + if True, a full tensor is returned + otherwise, the decomposed tensor is returned + seed : + seed for generating random numbers + context : dict + context in which to create the tensor + + Returns + ------- + ivy.TRTensor or ivy.Array if full is True + """ + rank = ivy.TRTensor.validate_tr_rank(shape, rank) + # Make sure it's not a tuple but a list + rank = list(rank) + _check_first_and_last_rank_elements_are_equal(rank) + factors = [ + ivy.random_uniform(shape=(rank[i], s, rank[i + 1]), dtype=dtype, seed=seed) + for i, s in enumerate(shape) + ] + if full: + return ivy.TRTensor.tr_to_tensor(factors) + else: + return ivy.TRTensor(factors) + + +def _check_first_and_last_rank_elements_are_equal(rank): + if rank[0] != rank[-1]: + message = ( + f"Provided rank[0] == {rank[0]} and rank[-1] == {rank[-1]} " + "but boundary conditions dictate rank[0] == rank[-1]." + ) + raise ValueError(message) + + @handle_exceptions @handle_nestable @infer_dtype @@ -993,8 +1051,9 @@ def trilu( out: Optional[ivy.Array] = None, ) -> ivy.Array: """ - Return the upper or lower triangular part of a matrix (or a stack of matrices) ``x`` - .. note:: + Return the upper or lower triangular part of a matrix + (or a stack of matrices) ``x``. + note:: The upper triangular part of the matrix is defined as the elements on and above the specified diagonal ``k``. The lower triangular part of the matrix is defined as the elements on and below the specified diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py index 0f1ee860595aa..694601fe0c45e 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_creation.py @@ -1,3 +1,4 @@ +import pytest from hypothesis import strategies as st import numpy as np @@ -45,6 +46,18 @@ def _random_parafac2_data(draw): return shapes, rank, dtype[0], full, seed, normalise_factors +@st.composite +def _random_tr_data(draw): + shape = draw( + st.lists(helpers.ints(min_value=1, max_value=5), min_size=2, max_size=4) + ) + rank = min(shape) + dtype = draw(helpers.get_dtypes("valid", full=False)) + full = draw(st.booleans()) + seed = draw(st.one_of((st.just(None), helpers.ints(min_value=0, max_value=2000)))) + return shape, rank, dtype[0], full, seed + + @st.composite def _random_tt_data(draw): shape = draw( @@ -398,6 +411,7 @@ def test_ndindex(dtype_x_shape): data=_random_cp_data(), test_with_out=st.just(False), test_instance_method=st.just(False), + test_gradients=st.just(False), ) def test_random_cp( *, @@ -453,6 +467,73 @@ def test_random_cp( assert np.prod(f.shape) == np.prod(f_gt.shape) +@handle_test( + fn_tree="functional.ivy.experimental.random_tr", + data=_random_tr_data(), + test_with_out=st.just(False), + test_instance_method=st.just(False), + test_gradients=st.just(False), +) +def test_random_tr( + *, + data, + test_flags, + backend_fw, + fn_name, + on_device, +): + shape, rank, dtype, full, seed = data + results = helpers.test_function( + input_dtypes=[], + backend_to_test=backend_fw, + test_flags=test_flags, + on_device=on_device, + fn_name=fn_name, + shape=shape, + rank=rank, + dtype=dtype, + full=full, + seed=seed, + test_values=False, + ) + + ret_np, ret_from_gt_np = results + + if full: + reconstructed_tensor = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw) + reconstructed_tensor_gt = helpers.flatten_and_to_np( + ret=ret_from_gt_np, backend=test_flags.ground_truth_backend + ) + for x, x_gt in zip(reconstructed_tensor, reconstructed_tensor_gt): + assert np.prod(shape) == np.prod(x.shape) + assert np.prod(shape) == np.prod(x_gt.shape) + + else: + core = helpers.flatten_and_to_np(ret=ret_np[0], backend=backend_fw) + factors = helpers.flatten_and_to_np(ret=ret_np[1], backend=backend_fw) + core_gt = helpers.flatten_and_to_np( + ret=ret_from_gt_np[0], backend=test_flags.ground_truth_backend + ) + factors_gt = helpers.flatten_and_to_np( + ret=ret_from_gt_np[1], backend=test_flags.ground_truth_backend + ) + + for c, c_gt in zip(core, core_gt): + assert len(c) == rank + assert len(c_gt) == rank + + for f, f_gt in zip(factors, factors_gt): + assert np.prod(f.shape) == np.prod(f_gt.shape) + + +def test_random_tr_throws_error_when_rank_first_last_elem_not_equal(): + rank = [2, 3] + shape = [1, 2, 3] + with pytest.raises(ValueError) as e: + ivy.random_tr(shape, rank) + assert e.value.args + + # **Uncomment when Tensorly validation issue is resolved.** # https://github.com/tensorly/tensorly/issues/528 # @handle_test( @@ -575,6 +656,7 @@ def test_random_tt( data=_random_tucker_data(), test_with_out=st.just(False), test_instance_method=st.just(False), + test_gradients=st.just(False), ) def test_random_tucker( *, diff --git a/ivy_tests/test_ivy/test_misc/test_factorized_tensor/__init__.py b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/ivy_tests/test_ivy/test_misc/test_cp_tensor.py b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_cp_tensor.py similarity index 100% rename from ivy_tests/test_ivy/test_misc/test_cp_tensor.py rename to ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_cp_tensor.py diff --git a/ivy_tests/test_ivy/test_misc/test_parafac2_tensor.py b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_parafac2_tensor.py similarity index 100% rename from ivy_tests/test_ivy/test_misc/test_parafac2_tensor.py rename to ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_parafac2_tensor.py diff --git a/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tr_tensor.py b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tr_tensor.py new file mode 100644 index 0000000000000..e48dca6ccb866 --- /dev/null +++ b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tr_tensor.py @@ -0,0 +1,100 @@ +import ivy + +import numpy as np +import pytest + + +@pytest.mark.parametrize( + "shape1, shape2, shape3", + [ + ( + (2, 4, 3), + (3, 5, 2), + (2, 6, 2), + ) + ], +) +def test_tr_to_tensor(shape1, shape2, shape3): + # Create ground truth TR factors + factors = [ + ivy.random_uniform(shape=shape1), + ivy.random_uniform(shape=shape2), + ivy.random_uniform(shape=shape3), + ] + + # Create tensor + tensor = ivy.einsum("iaj,jbk,kci->abc", *factors) + + # Check that TR factors re-assemble to the original tensor + assert np.allclose(tensor, ivy.TRTensor.tr_to_tensor(factors), atol=1e-6, rtol=1e-6) + + +@pytest.mark.parametrize( + "rank1, rank2", + [((2, 3, 4, 2), (2, 3, 4, 2, 3))], +) +def test_validate_tr_rank(rank1, rank2): + tensor_shape = tuple(np.random.randint(1, 100, size=4)) + n_param_tensor = np.prod(tensor_shape) + + # Rounding = floor + rank = ivy.TRTensor.validate_tr_rank(tensor_shape, rank="same", rounding="floor") + n_param = ivy.TRTensor.tr_n_param(tensor_shape, rank) + assert n_param <= n_param_tensor + + # Rounding = ceil + rank = ivy.TRTensor.validate_tr_rank(tensor_shape, rank="same", rounding="ceil") + n_param = ivy.TRTensor.tr_n_param(tensor_shape, rank) + assert n_param >= n_param_tensor + + # Integer rank + with np.testing.assert_raises(ValueError): + ivy.TRTensor.validate_tr_rank(tensor_shape, rank=rank1) + + with np.testing.assert_raises(ValueError): + ivy.TRTensor.validate_tr_rank(tensor_shape, rank=rank2) + + +# These tests have been adapted from Tensorly +# https://github.com/tensorly/tensorly/blob/main/tensorly/tests/test_tr_tensor.py + + +@pytest.mark.parametrize( + "true_shape, true_rank", + [ + ( + (6, 4, 5), + (3, 2, 2, 3), + ) + ], +) +def test_validate_tr_tensor(true_shape, true_rank): + factors = ivy.random_tr(true_shape, true_rank).factors + + # Check correct rank and shapes are returned + shape, rank = ivy.TRTensor.validate_tr_tensor(factors) + np.testing.assert_equal( + shape, + true_shape, + err_msg=f"Returned incorrect shape (got {shape}, expected {true_shape})", + ) + np.testing.assert_equal( + rank, + true_rank, + err_msg=f"Returned incorrect rank (got {rank}, expected {true_rank})", + ) + + # One of the factors has the wrong ndim + factors[0] = ivy.random_uniform(shape=(4, 4)) + with np.testing.assert_raises(ValueError): + ivy.TRTensor.validate_tr_tensor(factors) + + # Consecutive factors ranks don't match + factors[0] = ivy.random_uniform(shape=(3, 6, 4)) + with np.testing.assert_raises(ValueError): + ivy.TRTensor.validate_tr_tensor(factors) + + # Boundary conditions not respected + factors[0] = ivy.random_uniform(shape=(2, 6, 2)) + with np.testing.assert_raises(ValueError): + ivy.TRTensor.validate_tr_tensor(factors) diff --git a/ivy_tests/test_ivy/test_misc/test_tt_tensor.py b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tt_tensor.py similarity index 100% rename from ivy_tests/test_ivy/test_misc/test_tt_tensor.py rename to ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tt_tensor.py diff --git a/ivy_tests/test_ivy/test_misc/test_tucker_tensor.py b/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tucker_tensor.py similarity index 100% rename from ivy_tests/test_ivy/test_misc/test_tucker_tensor.py rename to ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tucker_tensor.py