From 5690a2df233c206e596d0e76a1f6c2f07abefa54 Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Sat, 17 Feb 2024 22:01:48 +0100
Subject: [PATCH 01/10] [init] lu

---
 .../ivy/experimental/linear_algebra.py        | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/ivy/functional/ivy/experimental/linear_algebra.py b/ivy/functional/ivy/experimental/linear_algebra.py
index f98ec8a896e14..863fa8217e846 100644
--- a/ivy/functional/ivy/experimental/linear_algebra.py
+++ b/ivy/functional/ivy/experimental/linear_algebra.py
@@ -227,6 +227,25 @@ def diagflat(
         out=out,
     )
 
+@handle_exceptions
+@handle_backend_invalid
+@handle_nestable
+@handle_array_like_without_promotion
+@handle_out_argument
+@to_native_arrays_and_back
+@handle_device
+def lu(
+    x: Union[ivy.Array, ivy.NativeArray],
+    /,
+    *,
+    offset: int = 0,
+    padding_value: float = 0,
+    align: str = "RIGHT_LEFT",
+    num_rows: int = -1,
+    num_cols: int = -1,
+    out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+) -> ivy.Array:
+    pass
 
 @handle_exceptions
 @handle_backend_invalid

From 9e359b4800e1247d9d3383f2aea31d024abe08a0 Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Sat, 17 Feb 2024 22:07:12 +0100
Subject: [PATCH 02/10] [init] lu

---
 .../ivy/experimental/linear_algebra.py        | 35 +++++++++++++++----
 1 file changed, 28 insertions(+), 7 deletions(-)

diff --git a/ivy/functional/ivy/experimental/linear_algebra.py b/ivy/functional/ivy/experimental/linear_algebra.py
index 863fa8217e846..c3c6d17311add 100644
--- a/ivy/functional/ivy/experimental/linear_algebra.py
+++ b/ivy/functional/ivy/experimental/linear_algebra.py
@@ -238,14 +238,35 @@ def lu(
     x: Union[ivy.Array, ivy.NativeArray],
     /,
     *,
-    offset: int = 0,
-    padding_value: float = 0,
-    align: str = "RIGHT_LEFT",
-    num_rows: int = -1,
-    num_cols: int = -1,
-    out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+    out: Optional[ivy.Array] = None,
 ) -> ivy.Array:
-    pass
+    """
+    Perform LU decomposition of a square matrix using Doolittle's method.
+
+    Args:
+    - x: a square numpy array representing the input matrix
+
+    Returns:
+    - L: Lower triangular matrix
+    - U: Upper triangular matrix
+    """
+    n = len(x)
+    L = ivy.zeros((n,n))
+    U = ivy.zeros((n,n))
+
+    for j in range(n):
+        L[j][j] = 1.0
+        for i in range(j + 1):
+            s1 = sum(U[k][j] * L[i][k] for k in range(i))
+            U[i][j] = x[i][j] - s1
+
+        for i in range(j, n):
+            s2 = sum(U[k][j] * L[i][k] for k in range(j))
+            L[i][j] = (x[i][j] - s2) / U[j][j]
+
+    # TODO: return something like that:
+    # TODO: implement lu in container linear_algebra.py and in array/.../linear_algebra.py
+    return current_backend(a, b).kron(a, b, out=out)
 
 @handle_exceptions
 @handle_backend_invalid

From ad759c5d828e78897c9732608d5e133f5ef479d9 Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Sat, 17 Feb 2024 22:13:28 +0100
Subject: [PATCH 03/10] [init] lu

---
 ivy/functional/ivy/experimental/linear_algebra.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/ivy/functional/ivy/experimental/linear_algebra.py b/ivy/functional/ivy/experimental/linear_algebra.py
index c3c6d17311add..4a3ab941aaaf7 100644
--- a/ivy/functional/ivy/experimental/linear_algebra.py
+++ b/ivy/functional/ivy/experimental/linear_algebra.py
@@ -265,8 +265,10 @@ def lu(
             L[i][j] = (x[i][j] - s2) / U[j][j]
 
     # TODO: return something like that:
-    # TODO: implement lu in container linear_algebra.py and in array/.../linear_algebra.py
-    return current_backend(a, b).kron(a, b, out=out)
+    # TODO: implement lu in container/.../linear_algebra.py (a static method and a call for it) the implementation in the static
+    #  and in array/.../linear_algebra.py
+    #   Ask a question
+    return current_backend(x).lu(x, out=out)
 
 @handle_exceptions
 @handle_backend_invalid

From 91b0fe279b9011d1d1b2eac6dfe5987cf8f6455b Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Fri, 23 Feb 2024 10:13:36 +0100
Subject: [PATCH 04/10] lu_extension

---
 .../array/experimental/linear_algebra.py      | 293 ++++++++++--------
 .../container/experimental/linear_algebra.py  |  49 +++
 .../jax/experimental/linear_algebra.py        |   8 +
 .../numpy/experimental/linear_algebra.py      |   9 +
 .../tensorflow/experimental/linear_algebra.py | 181 ++++++-----
 .../torch/experimental/linear_algebra.py      | 109 ++++---
 .../ivy/experimental/linear_algebra.py        |  20 +-
 7 files changed, 378 insertions(+), 291 deletions(-)

diff --git a/ivy/data_classes/array/experimental/linear_algebra.py b/ivy/data_classes/array/experimental/linear_algebra.py
index 865c4a1296942..999b5ae9c0d24 100644
--- a/ivy/data_classes/array/experimental/linear_algebra.py
+++ b/ivy/data_classes/array/experimental/linear_algebra.py
@@ -8,16 +8,16 @@
 
 class _ArrayWithLinearAlgebraExperimental(abc.ABC):
     def eigh_tridiagonal(
-        self: Union[ivy.Array, ivy.NativeArray],
-        beta: Union[ivy.Array, ivy.NativeArray],
-        /,
-        *,
-        eigvals_only: bool = True,
-        select: str = "a",
-        select_range: Optional[
-            Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
-        ] = None,
-        tol: Optional[float] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            beta: Union[ivy.Array, ivy.NativeArray],
+            /,
+            *,
+            eigvals_only: bool = True,
+            select: str = "a",
+            select_range: Optional[
+                Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
+            ] = None,
+            tol: Optional[float] = None,
     ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array]]:
         """ivy.Array instance method variant of ivy.eigh_tridiagonal. This
         method simply wraps the function, and so the docstring for
@@ -78,15 +78,15 @@ def eigh_tridiagonal(
         )
 
     def diagflat(
-        self: Union[ivy.Array, ivy.NativeArray],
-        /,
-        *,
-        offset: int = 0,
-        padding_value: float = 0,
-        align: str = "RIGHT_LEFT",
-        num_rows: int = -1,
-        num_cols: int = -1,
-        out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            /,
+            *,
+            offset: int = 0,
+            padding_value: float = 0,
+            align: str = "RIGHT_LEFT",
+            num_rows: int = -1,
+            num_cols: int = -1,
+            out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.diagflat. This method
         simply wraps the function, and so the docstring for ivy.diagflat also
@@ -111,11 +111,30 @@ def diagflat(
         )
 
     def kron(
-        self: ivy.Array,
-        b: ivy.Array,
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: ivy.Array,
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
+    ) -> ivy.Array:
+        """ivy.Array instance method variant of ivy.lu. This method simply
+        wraps the function, and so the docstring for ivy.lu also applies to
+        this method with minimal changes.
+
+        Examples
+        --------
+        >>> a = ivy.array([1,2])
+        >>> b = ivy.array([3,4])
+        >>> a.diagflat(b)
+        ivy.array([3, 4, 6, 8])
+        """
+        return ivy.lu(self._data, out=out)
+
+    def kron(
+            self: ivy.Array,
+            b: ivy.Array,
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.kron. This method simply
         wraps the function, and so the docstring for ivy.kron also applies to
@@ -150,8 +169,8 @@ def matrix_exp(self: ivy.Array, /, *, out: Optional[ivy.Array] = None) -> ivy.Ar
         return ivy.matrix_exp(self._data, out=out)
 
     def eig(
-        self: ivy.Array,
-        /,
+            self: ivy.Array,
+            /,
     ) -> Tuple[ivy.Array, ...]:
         """ivy.Array instance method variant of ivy.eig. This method simply
         wraps the function, and so the docstring for ivy.eig also applies to
@@ -170,8 +189,8 @@ def eig(
         return ivy.eig(self._data)
 
     def eigvals(
-        self: ivy.Array,
-        /,
+            self: ivy.Array,
+            /,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.eigvals. This method simply
         wraps the function, and so the docstring for ivy.eigvals also applies
@@ -186,10 +205,10 @@ def eigvals(
         return ivy.eigvals(self._data)
 
     def adjoint(
-        self: ivy.Array,
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: ivy.Array,
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.adjoint. This method simply
         wraps the function, and so the docstring for ivy.adjoint also applies
@@ -210,11 +229,11 @@ def adjoint(
         )
 
     def multi_dot(
-        self: ivy.Array,
-        x: Sequence[Union[ivy.Array, ivy.NativeArray]],
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: ivy.Array,
+            x: Sequence[Union[ivy.Array, ivy.NativeArray]],
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.multi_dot. This method
         simply wraps the function, and so the docstring for ivy.multi_dot also
@@ -232,7 +251,7 @@ def multi_dot(
         return ivy.multi_dot((self._data, *x), out=out)
 
     def cond(
-        self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None
+            self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.cond. This method simply
         wraps the function, and so the docstring for ivy.cond also applies to
@@ -251,13 +270,13 @@ def cond(
         return ivy.cond(self._data, p=p)
 
     def mode_dot(
-        self: Union[ivy.Array, ivy.NativeArray],
-        /,
-        matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
-        mode: int,
-        transpose: Optional[bool] = False,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            /,
+            matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
+            mode: int,
+            transpose: Optional[bool] = False,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.mode_dot. This method
         simply wraps the function, and so the docstring for ivy.mode_dot also
@@ -291,14 +310,14 @@ def mode_dot(
         return ivy.mode_dot(self._data, matrix_or_vector, mode, transpose, out=out)
 
     def multi_mode_dot(
-        self: Union[ivy.Array, ivy.NativeArray],
-        mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
-        /,
-        modes: Optional[Sequence[int]] = None,
-        skip: Optional[Sequence[int]] = None,
-        transpose: Optional[bool] = False,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
+            /,
+            modes: Optional[Sequence[int]] = None,
+            skip: Optional[Sequence[int]] = None,
+            transpose: Optional[bool] = False,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         r"""ivy.Array instance method variant of ivy.multi_mode_dot. This method
         simply wraps the function, and so the docstring for ivy.multi_mode_dot
@@ -341,10 +360,10 @@ def multi_mode_dot(
         )
 
     def svd_flip(
-        self: Union[ivy.Array, ivy.NativeArray],
-        V: Union[ivy.Array, ivy.NativeArray],
-        /,
-        u_based_decision: Optional[bool] = True,
+            self: Union[ivy.Array, ivy.NativeArray],
+            V: Union[ivy.Array, ivy.NativeArray],
+            /,
+            u_based_decision: Optional[bool] = True,
     ) -> Tuple[ivy.Array, ivy.Array]:
         """ivy.Array instance method variant of ivy.svd_flip. This method
         simply wraps the function, and so the docstring for ivy.svd_flip also
@@ -368,13 +387,13 @@ def svd_flip(
         return ivy.svd_flip(self._data, V, u_based_decision)
 
     def make_svd_non_negative(
-        self: Union[ivy.Array, ivy.NativeArray],
-        U: Union[ivy.Array, ivy.NativeArray],
-        S: Union[ivy.Array, ivy.NativeArray],
-        V: Union[ivy.Array, ivy.NativeArray],
-        /,
-        *,
-        nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
+            self: Union[ivy.Array, ivy.NativeArray],
+            U: Union[ivy.Array, ivy.NativeArray],
+            S: Union[ivy.Array, ivy.NativeArray],
+            V: Union[ivy.Array, ivy.NativeArray],
+            /,
+            *,
+            nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
     ) -> Tuple[ivy.Array, ivy.Array]:
         """ivy.Array instance method variant of ivy.make_svd_non_negative. This
         method simply wraps the function, and so the docstring for
@@ -400,11 +419,11 @@ def make_svd_non_negative(
         return ivy.make_svd_non_negative(self._data, U, S, V, nntype=nntype)
 
     def tensor_train(
-        self: Union[ivy.Array, ivy.NativeArray],
-        rank: Union[int, Sequence[int]],
-        /,
-        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-        verbose: Optional[bool] = False,
+            self: Union[ivy.Array, ivy.NativeArray],
+            rank: Union[int, Sequence[int]],
+            /,
+            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+            verbose: Optional[bool] = False,
     ) -> ivy.TTTensor:
         """ivy.Array instance method variant of ivy.tensor_train. This method
         simply wraps the function, and so the docstring for ivy.tensor_train
@@ -430,10 +449,10 @@ def tensor_train(
         return ivy.tensor_train(self._data, rank, svd=svd, verbose=verbose)
 
     def truncated_svd(
-        self: Union[ivy.Array, ivy.NativeArray],
-        /,
-        compute_uv: bool = True,
-        n_eigenvecs: Optional[int] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            /,
+            compute_uv: bool = True,
+            n_eigenvecs: Optional[int] = None,
     ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array, ivy.Array]]:
         """ivy.Array instance method variant of ivy.make_svd_non_negative. This
         method simply wraps the function, and so the docstring for
@@ -462,17 +481,17 @@ def truncated_svd(
         return ivy.truncated_svd(self._data, compute_uv, n_eigenvecs)
 
     def initialize_tucker(
-        self: Union[ivy.Array, ivy.NativeArray],
-        rank: Sequence[int],
-        modes: Sequence[int],
-        /,
-        *,
-        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-        seed: Optional[int] = None,
-        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-        non_negative: Optional[bool] = False,
-        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-        svd_mask_repeats: Optional[int] = 5,
+            self: Union[ivy.Array, ivy.NativeArray],
+            rank: Sequence[int],
+            modes: Sequence[int],
+            /,
+            *,
+            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+            seed: Optional[int] = None,
+            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+            non_negative: Optional[bool] = False,
+            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+            svd_mask_repeats: Optional[int] = 5,
     ) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
         """ivy.Array instance method variant of ivy.initialize_tucker. This
         method simply wraps the function, and so the docstring for
@@ -524,20 +543,20 @@ def initialize_tucker(
         )
 
     def partial_tucker(
-        self: Union[ivy.Array, ivy.NativeArray],
-        rank: Optional[Sequence[int]] = None,
-        modes: Optional[Sequence[int]] = None,
-        /,
-        *,
-        n_iter_max: Optional[int] = 100,
-        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-        seed: Optional[int] = None,
-        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-        svd_mask_repeats: Optional[int] = 5,
-        tol: Optional[float] = 10e-5,
-        verbose: Optional[bool] = False,
-        return_errors: Optional[bool] = False,
+            self: Union[ivy.Array, ivy.NativeArray],
+            rank: Optional[Sequence[int]] = None,
+            modes: Optional[Sequence[int]] = None,
+            /,
+            *,
+            n_iter_max: Optional[int] = 100,
+            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+            seed: Optional[int] = None,
+            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+            svd_mask_repeats: Optional[int] = 5,
+            tol: Optional[float] = 10e-5,
+            verbose: Optional[bool] = False,
+            return_errors: Optional[bool] = False,
     ) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
         """ivy.Array instance method variant of ivy.partial_tucker. This method
         simply wraps the function, and so the docstring for ivy.partial_tucker
@@ -605,20 +624,20 @@ def partial_tucker(
         )
 
     def tucker(
-        self: Union[ivy.Array, ivy.NativeArray],
-        rank: Optional[Sequence[int]] = None,
-        /,
-        *,
-        fixed_factors: Optional[Sequence[int]] = None,
-        n_iter_max: Optional[int] = 100,
-        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-        seed: Optional[int] = None,
-        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-        svd_mask_repeats: Optional[int] = 5,
-        tol: Optional[float] = 10e-5,
-        verbose: Optional[bool] = False,
-        return_errors: Optional[bool] = False,
+            self: Union[ivy.Array, ivy.NativeArray],
+            rank: Optional[Sequence[int]] = None,
+            /,
+            *,
+            fixed_factors: Optional[Sequence[int]] = None,
+            n_iter_max: Optional[int] = 100,
+            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+            seed: Optional[int] = None,
+            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+            svd_mask_repeats: Optional[int] = 5,
+            tol: Optional[float] = 10e-5,
+            verbose: Optional[bool] = False,
+            return_errors: Optional[bool] = False,
     ):
         """ivy.Array instance method variant of ivy.tucker. This method simply
         wraps the function, and so the docstring for ivy.tucker also applies to
@@ -692,10 +711,10 @@ def tucker(
         )
 
     def tt_matrix_to_tensor(
-        self: Union[ivy.Array, ivy.NativeArray],
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """Ivy.Array instance method variant of ivy.tt_matrix_to_tensor. This
         method simply wraps the function, and so the docstring for
@@ -744,11 +763,11 @@ def tt_matrix_to_tensor(
         return ivy.tt_matrix_to_tensor(self._data, out=out)
 
     def dot(
-        self: Union[ivy.Array, ivy.NativeArray],
-        b: Union[ivy.Array, ivy.NativeArray],
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            b: Union[ivy.Array, ivy.NativeArray],
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ):
         """Compute the dot product between two arrays `a` and `b` using the
         current backend's implementation. The dot product is defined as the sum
@@ -796,12 +815,12 @@ def dot(
         return ivy.dot(self._data, b, out=out)
 
     def general_inner_product(
-        self: Union[ivy.Array, ivy.NativeArray],
-        b: Union[ivy.Array, ivy.NativeArray],
-        n_modes: Optional[int] = None,
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            b: Union[ivy.Array, ivy.NativeArray],
+            n_modes: Optional[int] = None,
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.general_inner_product. This
         method simply wraps the function, and so the docstring for
@@ -852,11 +871,11 @@ def general_inner_product(
         return ivy.general_inner_product(self, b, n_modes, out=out)
 
     def higher_order_moment(
-        self: Union[ivy.Array, ivy.NativeArray],
-        order: int,
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: Union[ivy.Array, ivy.NativeArray],
+            order: int,
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.higher_order_moment. This
         method simply wraps the function, and so the docstring for
@@ -893,11 +912,11 @@ def higher_order_moment(
         return ivy.higher_order_moment(self._data, order, out=out)
 
     def batched_outer(
-        self: ivy.Array,
-        tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
-        /,
-        *,
-        out: Optional[ivy.Array] = None,
+            self: ivy.Array,
+            tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
+            /,
+            *,
+            out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """Ivy Array instance method variant of ivy.batched_outer. This method
         simply wraps the function, and so the docstring for ivy.batched_outer
diff --git a/ivy/data_classes/container/experimental/linear_algebra.py b/ivy/data_classes/container/experimental/linear_algebra.py
index 697d0ef749e52..866e7a7a4633d 100644
--- a/ivy/data_classes/container/experimental/linear_algebra.py
+++ b/ivy/data_classes/container/experimental/linear_algebra.py
@@ -349,6 +349,55 @@ def kron(
             out=out,
         )
 
+    @staticmethod
+    def static_lu(
+            x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
+            /,
+            *,
+            key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
+            to_apply: Union[bool, ivy.Container] = True,
+            out: Optional[ivy.Container] = None,
+    ) -> ivy.Container:
+        return ContainerBase.cont_multi_map_in_function(
+            "lu",
+            x,
+            out=out,
+            key_chains=key_chains,
+            to_apply=to_apply,
+        )
+
+    def lu(
+            self: ivy.Container,
+            /,
+            *,
+            key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
+            to_apply: Union[bool, ivy.Container] = True,
+            out: Optional[ivy.Container] = None,
+    ) -> ivy.Container:
+        """ivy.Container instance method variant of ivy.diagflat. This method
+        simply wraps the function, and so the docstring for ivy.diagflat also
+        applies to this method with minimal changes.
+
+        Examples
+        --------
+        >>> x = ivy.array([[[1., 0.],
+                            [0., 1.]],
+                            [[2., 0.],
+                            [0., 2.]]])
+        >>> ivy.matrix_exp(x)
+        ivy.array([[[2.7183, 1.0000],
+                    [1.0000, 2.7183]],
+                    [[7.3891, 1.0000],
+                    [1.0000, 7.3891]]])
+        """
+        return self.static_lu(
+            self,
+            key_chains=key_chains,
+            to_apply=to_apply,
+            out=out,
+        )
+
+
     @staticmethod
     def static_matrix_exp(
         x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
diff --git a/ivy/functional/backends/jax/experimental/linear_algebra.py b/ivy/functional/backends/jax/experimental/linear_algebra.py
index 0f1841e41f1a6..2e3f277e6d922 100644
--- a/ivy/functional/backends/jax/experimental/linear_algebra.py
+++ b/ivy/functional/backends/jax/experimental/linear_algebra.py
@@ -13,6 +13,14 @@
 from . import backend_version
 
 
+def lu(
+    x: JaxArray,
+    /,
+    *,
+    out: Optional[JaxArray] = None,
+) -> JaxArray:
+    return jla.lu(x)
+
 def diagflat(
     x: JaxArray,
     /,
diff --git a/ivy/functional/backends/numpy/experimental/linear_algebra.py b/ivy/functional/backends/numpy/experimental/linear_algebra.py
index 98e87e7efafa3..882fb99a8b502 100644
--- a/ivy/functional/backends/numpy/experimental/linear_algebra.py
+++ b/ivy/functional/backends/numpy/experimental/linear_algebra.py
@@ -1,6 +1,7 @@
 import math
 from typing import Optional, Tuple, Sequence, Union, Any
 import numpy as np
+import scipy.linalg as sla
 
 import ivy
 from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes
@@ -10,6 +11,14 @@
 from ivy.functional.ivy.experimental.linear_algebra import _check_valid_dimension_size
 
 
+def lu(
+    x: np.ndarray,
+    /,
+    *,
+    out: Optional[np.ndarray] = None,
+) -> np.ndarray:
+    return sla.lu(x)
+
 def diagflat(
     x: np.ndarray,
     /,
diff --git a/ivy/functional/backends/tensorflow/experimental/linear_algebra.py b/ivy/functional/backends/tensorflow/experimental/linear_algebra.py
index 4d54923e2e850..bc80507ee868c 100644
--- a/ivy/functional/backends/tensorflow/experimental/linear_algebra.py
+++ b/ivy/functional/backends/tensorflow/experimental/linear_algebra.py
@@ -15,16 +15,16 @@
     {"2.15.0 and below": ("int", "float16", "bfloat16")}, backend_version
 )
 def eigh_tridiagonal(
-    alpha: Union[tf.Tensor, tf.Variable],
-    beta: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    eigvals_only: bool = True,
-    select: str = "a",
-    select_range: Optional[
-        Union[Tuple[int, int], List[int], tf.Tensor, tf.Variable]
-    ] = None,
-    tol: Optional[float] = None,
+        alpha: Union[tf.Tensor, tf.Variable],
+        beta: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        eigvals_only: bool = True,
+        select: str = "a",
+        select_range: Optional[
+            Union[Tuple[int, int], List[int], tf.Tensor, tf.Variable]
+        ] = None,
+        tol: Optional[float] = None,
 ) -> Union[
     tf.Tensor,
     tf.Variable,
@@ -41,15 +41,15 @@ def eigh_tridiagonal(
 
 
 def diagflat(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    offset: int = 0,
-    padding_value: float = 0,
-    align: str = "RIGHT_LEFT",
-    num_rows: Optional[int] = None,
-    num_cols: Optional[int] = None,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        offset: int = 0,
+        padding_value: float = 0,
+        align: str = "RIGHT_LEFT",
+        num_rows: Optional[int] = None,
+        num_cols: Optional[int] = None,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ):
     if len(x.shape) > 1:
         x = tf.reshape(x, [-1])
@@ -75,21 +75,30 @@ def diagflat(
     return ret
 
 
+def lu(
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+) -> Union[tf.Tensor, tf.Variable]:
+    return tf.linalg.lu(x)
+
+
 def kron(
-    a: Union[tf.Tensor, tf.Variable],
-    b: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        a: Union[tf.Tensor, tf.Variable],
+        b: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.experimental.numpy.kron(a, b)
 
 
 def matrix_exp(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.linalg.expm(x)
 
@@ -97,18 +106,18 @@ def matrix_exp(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-            "complex",
-            "float32",
-            "float64",
+                "complex",
+                "float32",
+                "float64",
         )
     },
     backend_version,
 )
 def eig(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Tuple[tf.Tensor]:
     return tf.linalg.eig(x)
 
@@ -116,25 +125,25 @@ def eig(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-            "complex",
-            "float32",
-            "float64",
+                "complex",
+                "float32",
+                "float64",
         )
     },
     backend_version,
 )
 def eigvals(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.linalg.eigvals(x)
 
 
 def adjoint(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     _check_valid_dimension_size(x)
     return tf.linalg.adjoint(x)
@@ -144,14 +153,14 @@ def adjoint(
     {"2.13.0 and below": ("int", "float16", "bfloat16", "float64")}, backend_version
 )
 def solve_triangular(
-    x1: Union[tf.Tensor, tf.Variable],
-    x2: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    upper: bool = True,
-    adjoint: bool = False,
-    unit_diagonal: bool = False,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x1: Union[tf.Tensor, tf.Variable],
+        x2: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        upper: bool = True,
+        adjoint: bool = False,
+        unit_diagonal: bool = False,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     # Multiplying with a mask matrix can stop gradients on the diagonal.
     if unit_diagonal:
@@ -163,21 +172,21 @@ def solve_triangular(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-            "bfloat16",
-            "float16",
-            "float32",
-            "float64",
-            "int32",
-            "int64",
+                "bfloat16",
+                "float16",
+                "float32",
+                "float64",
+                "int32",
+                "int64",
         )
     },
     backend_version,
 )
 def multi_dot(
-    x: Sequence[Union[tf.Tensor, tf.Variable]],
-    /,
-    *,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Sequence[Union[tf.Tensor, tf.Variable]],
+        /,
+        *,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> tf.Tensor:
     # This implementation simply chains tf.tensordot multiple times
     # TODO: reimplement this function once tf adds multi_dot or inplace updates
@@ -189,11 +198,11 @@ def multi_dot(
 
 @with_unsupported_dtypes({"1.25.0 and below": ("float16", "bfloat16")}, backend_version)
 def cond(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    p: Optional[Union[None, int, str]] = None,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        p: Optional[Union[None, int, str]] = None,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     svd = tf.linalg.svd(x, compute_uv=False)
     if len(x.shape) >= 3:
@@ -230,11 +239,11 @@ def cond(
 
 
 def lu_factor(
-    x: Union[tf.Tensor, tf.Variable],
-    /,
-    *,
-    pivot: Optional[bool] = True,
-    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+        x: Union[tf.Tensor, tf.Variable],
+        /,
+        *,
+        pivot: Optional[bool] = True,
+        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Tuple[tf.Tensor]:
     raise IvyNotImplementedException()
 
@@ -242,25 +251,25 @@ def lu_factor(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-            "bfloat16",
-            "float16",
-            "float32",
-            "float64",
-            "int32",
-            "int64",
-            "complex64",
-            "complex128",
-            "bfloat16",
+                "bfloat16",
+                "float16",
+                "float32",
+                "float64",
+                "int32",
+                "int64",
+                "complex64",
+                "complex128",
+                "bfloat16",
         )
     },
     backend_version,
 )
 def dot(
-    a: tf.Tensor,
-    b: tf.Tensor,
-    /,
-    *,
-    out: Optional[tf.Tensor] = None,
+        a: tf.Tensor,
+        b: tf.Tensor,
+        /,
+        *,
+        out: Optional[tf.Tensor] = None,
 ) -> tf.Tensor:
     a, b = ivy.promote_types_of_inputs(a, b)
     return tf.experimental.numpy.dot(a, b)
diff --git a/ivy/functional/backends/torch/experimental/linear_algebra.py b/ivy/functional/backends/torch/experimental/linear_algebra.py
index ed3dee38a717d..544159dc713ef 100644
--- a/ivy/functional/backends/torch/experimental/linear_algebra.py
+++ b/ivy/functional/backends/torch/experimental/linear_algebra.py
@@ -14,15 +14,15 @@
 
 @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
 def diagflat(
-    x: torch.Tensor,
-    /,
-    *,
-    offset: int = 0,
-    padding_value: float = 0,
-    align: str = "RIGHT_LEFT",
-    num_rows: int = -1,
-    num_cols: int = -1,
-    out: Optional[torch.Tensor] = None,
+        x: torch.Tensor,
+        /,
+        *,
+        offset: int = 0,
+        padding_value: float = 0,
+        align: str = "RIGHT_LEFT",
+        num_rows: int = -1,
+        num_cols: int = -1,
+        out: Optional[torch.Tensor] = None,
 ):
     if len(x.shape) > 1:
         x = torch.flatten(x)
@@ -100,12 +100,21 @@ def diagflat(
 diagflat.support_native_out = False
 
 
+def lu(
+        x: torch.Tensor,
+        /,
+        *,
+        out: Optional[torch.Tensor] = None,
+) -> torch.tensor:
+    return torch.linalg.lu(x)
+
+
 def kron(
-    a: torch.Tensor,
-    b: torch.Tensor,
-    /,
-    *,
-    out: Optional[torch.Tensor] = None,
+        a: torch.Tensor,
+        b: torch.Tensor,
+        /,
+        *,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.tensor:
     return torch.kron(a, b, out=out)
 
@@ -114,10 +123,10 @@ def kron(
 
 
 def matrix_exp(
-    x: torch.Tensor,
-    /,
-    *,
-    out: Optional[torch.Tensor] = None,
+        x: torch.Tensor,
+        /,
+        *,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     return torch.linalg.matrix_exp(x)
 
@@ -126,7 +135,7 @@ def matrix_exp(
 
 
 def eig(
-    x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None
+        x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None
 ) -> Tuple[torch.Tensor]:
     if not torch.is_complex(x):
         x = x.to(torch.complex128)
@@ -146,24 +155,24 @@ def eigvals(x: torch.Tensor, /) -> torch.Tensor:
 
 
 def adjoint(
-    x: torch.Tensor,
-    /,
-    *,
-    out: Optional[torch.Tensor] = None,
+        x: torch.Tensor,
+        /,
+        *,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     _check_valid_dimension_size(x)
     return torch.adjoint(x).resolve_conj()
 
 
 def solve_triangular(
-    x1: torch.Tensor,
-    x2: torch.Tensor,
-    /,
-    *,
-    upper: bool = True,
-    adjoint: bool = False,
-    unit_diagonal: bool = False,
-    out: Optional[torch.Tensor] = None,
+        x1: torch.Tensor,
+        x2: torch.Tensor,
+        /,
+        *,
+        upper: bool = True,
+        adjoint: bool = False,
+        unit_diagonal: bool = False,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     if adjoint:
         x1 = torch.adjoint(x1)
@@ -178,10 +187,10 @@ def solve_triangular(
 
 @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
 def multi_dot(
-    x: Sequence[torch.Tensor],
-    /,
-    *,
-    out: Optional[torch.Tensor] = None,
+        x: Sequence[torch.Tensor],
+        /,
+        *,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     return torch.linalg.multi_dot(x, out=out)
 
@@ -191,11 +200,11 @@ def multi_dot(
 
 @with_unsupported_dtypes({"2.0.0 and below": ("float16", "bfloat16")}, backend_version)
 def cond(
-    x: torch.Tensor,
-    /,
-    *,
-    p: Optional[Union[None, int, str]] = None,
-    out: Optional[torch.Tensor] = None,
+        x: torch.Tensor,
+        /,
+        *,
+        p: Optional[Union[None, int, str]] = None,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     return torch.linalg.cond(x, p=p, out=out)
 
@@ -204,22 +213,22 @@ def cond(
 
 
 def lu_factor(
-    x: torch.Tensor,
-    /,
-    *,
-    pivot: Optional[bool] = True,
-    out: Optional[torch.Tensor] = None,
+        x: torch.Tensor,
+        /,
+        *,
+        pivot: Optional[bool] = True,
+        out: Optional[torch.Tensor] = None,
 ) -> Tuple[torch.Tensor]:
     raise IvyNotImplementedException()
 
 
 @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
 def dot(
-    a: torch.Tensor,
-    b: torch.Tensor,
-    /,
-    *,
-    out: Optional[torch.Tensor] = None,
+        a: torch.Tensor,
+        b: torch.Tensor,
+        /,
+        *,
+        out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     a, b = ivy.promote_types_of_inputs(a, b)
     if a.dim() == 0 or b.dim() == 0:
diff --git a/ivy/functional/ivy/experimental/linear_algebra.py b/ivy/functional/ivy/experimental/linear_algebra.py
index 4a3ab941aaaf7..ab152902b12c6 100644
--- a/ivy/functional/ivy/experimental/linear_algebra.py
+++ b/ivy/functional/ivy/experimental/linear_algebra.py
@@ -250,24 +250,6 @@ def lu(
     - L: Lower triangular matrix
     - U: Upper triangular matrix
     """
-    n = len(x)
-    L = ivy.zeros((n,n))
-    U = ivy.zeros((n,n))
-
-    for j in range(n):
-        L[j][j] = 1.0
-        for i in range(j + 1):
-            s1 = sum(U[k][j] * L[i][k] for k in range(i))
-            U[i][j] = x[i][j] - s1
-
-        for i in range(j, n):
-            s2 = sum(U[k][j] * L[i][k] for k in range(j))
-            L[i][j] = (x[i][j] - s2) / U[j][j]
-
-    # TODO: return something like that:
-    # TODO: implement lu in container/.../linear_algebra.py (a static method and a call for it) the implementation in the static
-    #  and in array/.../linear_algebra.py
-    #   Ask a question
     return current_backend(x).lu(x, out=out)
 
 @handle_exceptions
@@ -312,6 +294,8 @@ def kron(
     return current_backend(a, b).kron(a, b, out=out)
 
 
+
+
 @handle_exceptions
 @handle_backend_invalid
 @handle_nestable

From 6bf4bdd2943f21e61107acb8c5c107e1ef622d9a Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Fri, 23 Feb 2024 10:15:12 +0100
Subject: [PATCH 05/10] lu_extension

---
 .../mxnet/experimental/linear_algebra.py      | 133 +++++++++--------
 .../paddle/experimental/linear_algebra.py     | 136 ++++++++++--------
 2 files changed, 144 insertions(+), 125 deletions(-)

diff --git a/ivy/functional/backends/mxnet/experimental/linear_algebra.py b/ivy/functional/backends/mxnet/experimental/linear_algebra.py
index dd31f5eeb070d..1bb0f5fb1cf38 100644
--- a/ivy/functional/backends/mxnet/experimental/linear_algebra.py
+++ b/ivy/functional/backends/mxnet/experimental/linear_algebra.py
@@ -5,92 +5,101 @@
 
 
 def eigh_tridiagonal(
-    alpha: Union[(None, mx.ndarray.NDArray)],
-    beta: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    eigvals_only: bool = True,
-    select: str = "a",
-    select_range: Optional[
-        Union[(Tuple[(int, int)], List[int], None, mx.ndarray.NDArray)]
-    ] = None,
-    tol: Optional[float] = None,
+        alpha: Union[(None, mx.ndarray.NDArray)],
+        beta: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        eigvals_only: bool = True,
+        select: str = "a",
+        select_range: Optional[
+            Union[(Tuple[(int, int)], List[int], None, mx.ndarray.NDArray)]
+        ] = None,
+        tol: Optional[float] = None,
 ) -> Union[
     (
-        None,
-        mx.ndarray.NDArray,
-        Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])],
+            None,
+            mx.ndarray.NDArray,
+            Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])],
     )
 ]:
     raise IvyNotImplementedException()
 
 
 def diagflat(
-    x: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    offset: int = 0,
-    padding_value: float = 0,
-    align: str = "RIGHT_LEFT",
-    num_rows: Optional[int] = None,
-    num_cols: Optional[int] = None,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        offset: int = 0,
+        padding_value: float = 0,
+        align: str = "RIGHT_LEFT",
+        num_rows: Optional[int] = None,
+        num_cols: Optional[int] = None,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ):
     raise IvyNotImplementedException()
 
 
 def kron(
-    a: Union[(None, mx.ndarray.NDArray)],
-    b: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        a: Union[(None, mx.ndarray.NDArray)],
+        b: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+) -> Union[(None, mx.ndarray.NDArray)]:
+    raise IvyNotImplementedException()
+
+
+def lu(
+        x: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def matrix_exp(
-    x: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def eig(
-    x: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Tuple[None]:
     raise IvyNotImplementedException()
 
 
 def eigvals(
-    x: Union[(None, mx.ndarray.NDArray)], /
+        x: Union[(None, mx.ndarray.NDArray)], /
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def adjoint(
-    x: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def solve_triangular(
-    x1: Union[(None, mx.ndarray.NDArray)],
-    x2: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    upper: bool = True,
-    adjoint: bool = False,
-    unit_diagonal: bool = False,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x1: Union[(None, mx.ndarray.NDArray)],
+        x2: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        upper: bool = True,
+        adjoint: bool = False,
+        unit_diagonal: bool = False,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     # Multiplying with a mask matrix can stop gradients on the diagonal.
     if unit_diagonal:
@@ -102,30 +111,30 @@ def solve_triangular(
 
 
 def multi_dot(
-    x: Sequence[Union[(None, mx.ndarray.NDArray)]],
-    /,
-    *,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x: Sequence[Union[(None, mx.ndarray.NDArray)]],
+        /,
+        *,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> None:
     raise IvyNotImplementedException()
 
 
 def cond(
-    x: Union[(None, mx.ndarray.NDArray)],
-    /,
-    *,
-    p: Optional[Union[(None, int, str)]] = None,
-    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+        x: Union[(None, mx.ndarray.NDArray)],
+        /,
+        *,
+        p: Optional[Union[(None, int, str)]] = None,
+        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def dot(
-    a: mx.ndarray.NDArray,
-    b: mx.ndarray.NDArray,
-    /,
-    *,
-    out: Optional[mx.ndarray.NDArray] = None,
+        a: mx.ndarray.NDArray,
+        b: mx.ndarray.NDArray,
+        /,
+        *,
+        out: Optional[mx.ndarray.NDArray] = None,
 ) -> mx.ndarray.NDArray:
     return mx.symbol.dot(a, b, out=out)
 
diff --git a/ivy/functional/backends/paddle/experimental/linear_algebra.py b/ivy/functional/backends/paddle/experimental/linear_algebra.py
index eacf1acf4278b..9b3eab7f5c7ba 100644
--- a/ivy/functional/backends/paddle/experimental/linear_algebra.py
+++ b/ivy/functional/backends/paddle/experimental/linear_algebra.py
@@ -17,15 +17,15 @@
     backend_version,
 )
 def diagflat(
-    x: paddle.Tensor,
-    /,
-    *,
-    offset: Optional[int] = 0,
-    padding_value: Optional[float] = 0,
-    align: Optional[str] = "RIGHT_LEFT",
-    num_rows: Optional[int] = None,
-    num_cols: Optional[int] = None,
-    out: Optional[paddle.Tensor] = None,
+        x: paddle.Tensor,
+        /,
+        *,
+        offset: Optional[int] = 0,
+        padding_value: Optional[float] = 0,
+        align: Optional[str] = "RIGHT_LEFT",
+        num_rows: Optional[int] = None,
+        num_cols: Optional[int] = None,
+        out: Optional[paddle.Tensor] = None,
 ):
     diag = paddle.diag(x.flatten(), padding_value=padding_value, offset=offset)
     num_rows = num_rows if num_rows is not None else diag.shape[0]
@@ -50,20 +50,30 @@ def diagflat(
     {"2.6.0 and below": {"cpu": ("int8", "uint8", "int16")}}, backend_version
 )
 def kron(
-    a: paddle.Tensor,
-    b: paddle.Tensor,
-    /,
-    *,
-    out: Optional[paddle.Tensor] = None,
+        a: paddle.Tensor,
+        b: paddle.Tensor,
+        /,
+        *,
+        out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     return paddle.kron(a, b)
 
 
 def matrix_exp(
-    x: paddle.Tensor,
-    /,
-    *,
-    out: Optional[paddle.Tensor] = None,
+        x: paddle.Tensor,
+        /,
+        *,
+        out: Optional[paddle.Tensor] = None,
+) -> paddle.Tensor:
+    # return paddle.lu(x)
+    raise IvyNotImplementedException()
+
+
+def matrix_exp(
+        x: paddle.Tensor,
+        /,
+        *,
+        out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # TODO: this is elementwise exp, should be changed to matrix exp ASAP
     # return paddle.exp(x)
@@ -71,7 +81,7 @@ def matrix_exp(
 
 
 def eig(
-    x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
+        x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
 ) -> Tuple[paddle.Tensor]:
     return paddle.linalg.eig(x)
 
@@ -81,10 +91,10 @@ def eigvals(x: paddle.Tensor, /) -> paddle.Tensor:
 
 
 def adjoint(
-    x: paddle.Tensor,
-    /,
-    *,
-    out: Optional[paddle.Tensor] = None,
+        x: paddle.Tensor,
+        /,
+        *,
+        out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     _check_valid_dimension_size(x)
     return paddle.moveaxis(x, -2, -1).conj()
@@ -95,14 +105,14 @@ def adjoint(
     backend_version,
 )
 def solve_triangular(
-    x1: paddle.Tensor,
-    x2: paddle.Tensor,
-    /,
-    *,
-    upper: bool = True,
-    adjoint: bool = False,
-    unit_diagonal: bool = False,
-    out: Optional[paddle.Tensor] = None,
+        x1: paddle.Tensor,
+        x2: paddle.Tensor,
+        /,
+        *,
+        upper: bool = True,
+        adjoint: bool = False,
+        unit_diagonal: bool = False,
+        out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # Paddle does not support complex tensors for this operation (cpu and gpu),
     # so adjoint always equals transpose.
@@ -112,21 +122,21 @@ def solve_triangular(
 
 
 def cond(
-    x: paddle.Tensor,
-    /,
-    *,
-    p: Optional[Union[None, int, str]] = None,
-    out: Optional[paddle.Tensor] = None,
+        x: paddle.Tensor,
+        /,
+        *,
+        p: Optional[Union[None, int, str]] = None,
+        out: Optional[paddle.Tensor] = None,
 ) -> Any:
     raise IvyNotImplementedException()
 
 
 def lu_factor(
-    x: paddle.Tensor,
-    /,
-    *,
-    pivot: Optional[bool] = True,
-    out: Optional[paddle.Tensor] = None,
+        x: paddle.Tensor,
+        /,
+        *,
+        pivot: Optional[bool] = True,
+        out: Optional[paddle.Tensor] = None,
 ) -> Any:
     raise IvyNotImplementedException()
 
@@ -135,31 +145,31 @@ def lu_factor(
     {
         "2.6.0 and below": {
             "cpu": (
-                "float32",
-                "float64",
+                    "float32",
+                    "float64",
             ),
             "gpu": (
-                "float16",
-                "float32",
-                "float64",
+                    "float16",
+                    "float32",
+                    "float64",
             ),
         }
     },
     backend_version,
 )
 def dot(
-    a: paddle.Tensor,
-    b: paddle.Tensor,
-    /,
-    *,
-    out: Optional[paddle.Tensor] = None,
+        a: paddle.Tensor,
+        b: paddle.Tensor,
+        /,
+        *,
+        out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     if len(a.shape) == 0 or len(b.shape) == 0:
         return paddle.multiply(a, b)
     if (
-        len(a.shape) in [1, 2]
-        and len(b.shape) in [1, 2]
-        or (len(a.shape) >= 1 and len(b.shape) == 1)
+            len(a.shape) in [1, 2]
+            and len(b.shape) in [1, 2]
+            or (len(a.shape) >= 1 and len(b.shape) == 1)
     ):
         return paddle.matmul(a, b)
 
@@ -170,22 +180,22 @@ def dot(
     {
         "2.6.0 and below": {
             "cpu": (
-                "float32",
-                "float64",
+                    "float32",
+                    "float64",
             ),
             "gpu": (
-                "float16",
-                "float32",
-                "float64",
+                    "float16",
+                    "float32",
+                    "float64",
             ),
         }
     },
     backend_version,
 )
 def multi_dot(
-    x: paddle.Tensor,
-    /,
-    *,
-    out: Optional[paddle.Tensor] = None,
+        x: paddle.Tensor,
+        /,
+        *,
+        out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     return paddle.linalg.multi_dot(x)

From 01b152e9235a73de00a56658142ea714553ffa28 Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Fri, 23 Feb 2024 10:17:06 +0100
Subject: [PATCH 06/10] [TEST] lu_extension

---
 .../test_core/test_linalg.py                  | 26 +++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py
index 655b8149eb478..69bfc2d03ce07 100644
--- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py
+++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py
@@ -1426,6 +1426,32 @@ def test_make_svd_non_negative(*, data, test_flags, backend_fw, fn_name, on_devi
         backend=backend_fw,
         ground_truth_backend=test_flags.ground_truth_backend,
     )
+@handle_test(
+    fn_tree="functional.ivy.experimental.lu",
+    dtype_x=helpers.dtype_and_values(
+        available_dtypes=helpers.get_dtypes("valid"),
+        min_num_dims=2,
+        max_num_dims=2,
+        min_dim_size=2,
+        max_dim_size=2,
+        min_value=-100,
+        max_value=100,
+        allow_nan=False,
+        shared_dtype=True,
+    ),
+    test_gradients=st.just(False),
+)
+def test_lu(dtype_x, test_flags, backend_fw, fn_name, on_device):
+    dtype, x = dtype_x
+    helpers.test_function(
+        input_dtypes=dtype,
+        test_flags=test_flags,
+        on_device=on_device,
+        backend_to_test=backend_fw,
+        fn_name=fn_name,
+        x=x[0],
+    )
+
 
 
 # matrix_exp

From 1adae1df3662dd5bebc43ca671c831724924d2d9 Mon Sep 17 00:00:00 2001
From: ivy-branch <ivy.branch@lets-unify.ai>
Date: Fri, 23 Feb 2024 09:23:07 +0000
Subject: [PATCH 07/10] =?UTF-8?q?=F0=9F=A4=96=20Lint=20code?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .../array/experimental/linear_algebra.py      | 282 +++++++++---------
 .../container/experimental/linear_algebra.py  |  25 +-
 .../jax/experimental/linear_algebra.py        |   1 +
 .../mxnet/experimental/linear_algebra.py      | 132 ++++----
 .../numpy/experimental/linear_algebra.py      |   1 +
 .../paddle/experimental/linear_algebra.py     | 134 ++++-----
 .../tensorflow/experimental/linear_algebra.py | 180 +++++------
 .../torch/experimental/linear_algebra.py      | 108 +++----
 .../ivy/experimental/linear_algebra.py        |   9 +-
 .../test_core/test_linalg.py                  |  53 ++--
 10 files changed, 464 insertions(+), 461 deletions(-)

diff --git a/ivy/data_classes/array/experimental/linear_algebra.py b/ivy/data_classes/array/experimental/linear_algebra.py
index 999b5ae9c0d24..92b212303eaeb 100644
--- a/ivy/data_classes/array/experimental/linear_algebra.py
+++ b/ivy/data_classes/array/experimental/linear_algebra.py
@@ -8,16 +8,16 @@
 
 class _ArrayWithLinearAlgebraExperimental(abc.ABC):
     def eigh_tridiagonal(
-            self: Union[ivy.Array, ivy.NativeArray],
-            beta: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            eigvals_only: bool = True,
-            select: str = "a",
-            select_range: Optional[
-                Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
-            ] = None,
-            tol: Optional[float] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        beta: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        eigvals_only: bool = True,
+        select: str = "a",
+        select_range: Optional[
+            Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
+        ] = None,
+        tol: Optional[float] = None,
     ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array]]:
         """ivy.Array instance method variant of ivy.eigh_tridiagonal. This
         method simply wraps the function, and so the docstring for
@@ -78,15 +78,15 @@ def eigh_tridiagonal(
         )
 
     def diagflat(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            offset: int = 0,
-            padding_value: float = 0,
-            align: str = "RIGHT_LEFT",
-            num_rows: int = -1,
-            num_cols: int = -1,
-            out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        offset: int = 0,
+        padding_value: float = 0,
+        align: str = "RIGHT_LEFT",
+        num_rows: int = -1,
+        num_cols: int = -1,
+        out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.diagflat. This method
         simply wraps the function, and so the docstring for ivy.diagflat also
@@ -111,10 +111,10 @@ def diagflat(
         )
 
     def kron(
-            self: ivy.Array,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.lu. This method simply
         wraps the function, and so the docstring for ivy.lu also applies to
@@ -130,11 +130,11 @@ def kron(
         return ivy.lu(self._data, out=out)
 
     def kron(
-            self: ivy.Array,
-            b: ivy.Array,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        b: ivy.Array,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.kron. This method simply
         wraps the function, and so the docstring for ivy.kron also applies to
@@ -169,8 +169,8 @@ def matrix_exp(self: ivy.Array, /, *, out: Optional[ivy.Array] = None) -> ivy.Ar
         return ivy.matrix_exp(self._data, out=out)
 
     def eig(
-            self: ivy.Array,
-            /,
+        self: ivy.Array,
+        /,
     ) -> Tuple[ivy.Array, ...]:
         """ivy.Array instance method variant of ivy.eig. This method simply
         wraps the function, and so the docstring for ivy.eig also applies to
@@ -189,8 +189,8 @@ def eig(
         return ivy.eig(self._data)
 
     def eigvals(
-            self: ivy.Array,
-            /,
+        self: ivy.Array,
+        /,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.eigvals. This method simply
         wraps the function, and so the docstring for ivy.eigvals also applies
@@ -205,10 +205,10 @@ def eigvals(
         return ivy.eigvals(self._data)
 
     def adjoint(
-            self: ivy.Array,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.adjoint. This method simply
         wraps the function, and so the docstring for ivy.adjoint also applies
@@ -229,11 +229,11 @@ def adjoint(
         )
 
     def multi_dot(
-            self: ivy.Array,
-            x: Sequence[Union[ivy.Array, ivy.NativeArray]],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        x: Sequence[Union[ivy.Array, ivy.NativeArray]],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.multi_dot. This method
         simply wraps the function, and so the docstring for ivy.multi_dot also
@@ -251,7 +251,7 @@ def multi_dot(
         return ivy.multi_dot((self._data, *x), out=out)
 
     def cond(
-            self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None
+        self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.cond. This method simply
         wraps the function, and so the docstring for ivy.cond also applies to
@@ -270,13 +270,13 @@ def cond(
         return ivy.cond(self._data, p=p)
 
     def mode_dot(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
-            mode: int,
-            transpose: Optional[bool] = False,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
+        mode: int,
+        transpose: Optional[bool] = False,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.mode_dot. This method
         simply wraps the function, and so the docstring for ivy.mode_dot also
@@ -310,14 +310,14 @@ def mode_dot(
         return ivy.mode_dot(self._data, matrix_or_vector, mode, transpose, out=out)
 
     def multi_mode_dot(
-            self: Union[ivy.Array, ivy.NativeArray],
-            mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
-            /,
-            modes: Optional[Sequence[int]] = None,
-            skip: Optional[Sequence[int]] = None,
-            transpose: Optional[bool] = False,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
+        /,
+        modes: Optional[Sequence[int]] = None,
+        skip: Optional[Sequence[int]] = None,
+        transpose: Optional[bool] = False,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         r"""ivy.Array instance method variant of ivy.multi_mode_dot. This method
         simply wraps the function, and so the docstring for ivy.multi_mode_dot
@@ -360,10 +360,10 @@ def multi_mode_dot(
         )
 
     def svd_flip(
-            self: Union[ivy.Array, ivy.NativeArray],
-            V: Union[ivy.Array, ivy.NativeArray],
-            /,
-            u_based_decision: Optional[bool] = True,
+        self: Union[ivy.Array, ivy.NativeArray],
+        V: Union[ivy.Array, ivy.NativeArray],
+        /,
+        u_based_decision: Optional[bool] = True,
     ) -> Tuple[ivy.Array, ivy.Array]:
         """ivy.Array instance method variant of ivy.svd_flip. This method
         simply wraps the function, and so the docstring for ivy.svd_flip also
@@ -387,13 +387,13 @@ def svd_flip(
         return ivy.svd_flip(self._data, V, u_based_decision)
 
     def make_svd_non_negative(
-            self: Union[ivy.Array, ivy.NativeArray],
-            U: Union[ivy.Array, ivy.NativeArray],
-            S: Union[ivy.Array, ivy.NativeArray],
-            V: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
+        self: Union[ivy.Array, ivy.NativeArray],
+        U: Union[ivy.Array, ivy.NativeArray],
+        S: Union[ivy.Array, ivy.NativeArray],
+        V: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
     ) -> Tuple[ivy.Array, ivy.Array]:
         """ivy.Array instance method variant of ivy.make_svd_non_negative. This
         method simply wraps the function, and so the docstring for
@@ -419,11 +419,11 @@ def make_svd_non_negative(
         return ivy.make_svd_non_negative(self._data, U, S, V, nntype=nntype)
 
     def tensor_train(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Union[int, Sequence[int]],
-            /,
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            verbose: Optional[bool] = False,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Union[int, Sequence[int]],
+        /,
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        verbose: Optional[bool] = False,
     ) -> ivy.TTTensor:
         """ivy.Array instance method variant of ivy.tensor_train. This method
         simply wraps the function, and so the docstring for ivy.tensor_train
@@ -449,10 +449,10 @@ def tensor_train(
         return ivy.tensor_train(self._data, rank, svd=svd, verbose=verbose)
 
     def truncated_svd(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            compute_uv: bool = True,
-            n_eigenvecs: Optional[int] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        compute_uv: bool = True,
+        n_eigenvecs: Optional[int] = None,
     ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array, ivy.Array]]:
         """ivy.Array instance method variant of ivy.make_svd_non_negative. This
         method simply wraps the function, and so the docstring for
@@ -481,17 +481,17 @@ def truncated_svd(
         return ivy.truncated_svd(self._data, compute_uv, n_eigenvecs)
 
     def initialize_tucker(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Sequence[int],
-            modes: Sequence[int],
-            /,
-            *,
-            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-            seed: Optional[int] = None,
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            non_negative: Optional[bool] = False,
-            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-            svd_mask_repeats: Optional[int] = 5,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Sequence[int],
+        modes: Sequence[int],
+        /,
+        *,
+        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+        seed: Optional[int] = None,
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        non_negative: Optional[bool] = False,
+        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        svd_mask_repeats: Optional[int] = 5,
     ) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
         """ivy.Array instance method variant of ivy.initialize_tucker. This
         method simply wraps the function, and so the docstring for
@@ -543,20 +543,20 @@ def initialize_tucker(
         )
 
     def partial_tucker(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Optional[Sequence[int]] = None,
-            modes: Optional[Sequence[int]] = None,
-            /,
-            *,
-            n_iter_max: Optional[int] = 100,
-            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            seed: Optional[int] = None,
-            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-            svd_mask_repeats: Optional[int] = 5,
-            tol: Optional[float] = 10e-5,
-            verbose: Optional[bool] = False,
-            return_errors: Optional[bool] = False,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Optional[Sequence[int]] = None,
+        modes: Optional[Sequence[int]] = None,
+        /,
+        *,
+        n_iter_max: Optional[int] = 100,
+        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        seed: Optional[int] = None,
+        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        svd_mask_repeats: Optional[int] = 5,
+        tol: Optional[float] = 10e-5,
+        verbose: Optional[bool] = False,
+        return_errors: Optional[bool] = False,
     ) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
         """ivy.Array instance method variant of ivy.partial_tucker. This method
         simply wraps the function, and so the docstring for ivy.partial_tucker
@@ -624,20 +624,20 @@ def partial_tucker(
         )
 
     def tucker(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Optional[Sequence[int]] = None,
-            /,
-            *,
-            fixed_factors: Optional[Sequence[int]] = None,
-            n_iter_max: Optional[int] = 100,
-            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            seed: Optional[int] = None,
-            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-            svd_mask_repeats: Optional[int] = 5,
-            tol: Optional[float] = 10e-5,
-            verbose: Optional[bool] = False,
-            return_errors: Optional[bool] = False,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Optional[Sequence[int]] = None,
+        /,
+        *,
+        fixed_factors: Optional[Sequence[int]] = None,
+        n_iter_max: Optional[int] = 100,
+        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        seed: Optional[int] = None,
+        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        svd_mask_repeats: Optional[int] = 5,
+        tol: Optional[float] = 10e-5,
+        verbose: Optional[bool] = False,
+        return_errors: Optional[bool] = False,
     ):
         """ivy.Array instance method variant of ivy.tucker. This method simply
         wraps the function, and so the docstring for ivy.tucker also applies to
@@ -711,10 +711,10 @@ def tucker(
         )
 
     def tt_matrix_to_tensor(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """Ivy.Array instance method variant of ivy.tt_matrix_to_tensor. This
         method simply wraps the function, and so the docstring for
@@ -763,11 +763,11 @@ def tt_matrix_to_tensor(
         return ivy.tt_matrix_to_tensor(self._data, out=out)
 
     def dot(
-            self: Union[ivy.Array, ivy.NativeArray],
-            b: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        b: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ):
         """Compute the dot product between two arrays `a` and `b` using the
         current backend's implementation. The dot product is defined as the sum
@@ -815,12 +815,12 @@ def dot(
         return ivy.dot(self._data, b, out=out)
 
     def general_inner_product(
-            self: Union[ivy.Array, ivy.NativeArray],
-            b: Union[ivy.Array, ivy.NativeArray],
-            n_modes: Optional[int] = None,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        b: Union[ivy.Array, ivy.NativeArray],
+        n_modes: Optional[int] = None,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.general_inner_product. This
         method simply wraps the function, and so the docstring for
@@ -871,11 +871,11 @@ def general_inner_product(
         return ivy.general_inner_product(self, b, n_modes, out=out)
 
     def higher_order_moment(
-            self: Union[ivy.Array, ivy.NativeArray],
-            order: int,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        order: int,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.higher_order_moment. This
         method simply wraps the function, and so the docstring for
@@ -912,11 +912,11 @@ def higher_order_moment(
         return ivy.higher_order_moment(self._data, order, out=out)
 
     def batched_outer(
-            self: ivy.Array,
-            tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """Ivy Array instance method variant of ivy.batched_outer. This method
         simply wraps the function, and so the docstring for ivy.batched_outer
diff --git a/ivy/data_classes/container/experimental/linear_algebra.py b/ivy/data_classes/container/experimental/linear_algebra.py
index 866e7a7a4633d..1381211c911a1 100644
--- a/ivy/data_classes/container/experimental/linear_algebra.py
+++ b/ivy/data_classes/container/experimental/linear_algebra.py
@@ -351,12 +351,12 @@ def kron(
 
     @staticmethod
     def static_lu(
-            x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
-            /,
-            *,
-            key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
-            to_apply: Union[bool, ivy.Container] = True,
-            out: Optional[ivy.Container] = None,
+        x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
+        /,
+        *,
+        key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
+        to_apply: Union[bool, ivy.Container] = True,
+        out: Optional[ivy.Container] = None,
     ) -> ivy.Container:
         return ContainerBase.cont_multi_map_in_function(
             "lu",
@@ -367,12 +367,12 @@ def static_lu(
         )
 
     def lu(
-            self: ivy.Container,
-            /,
-            *,
-            key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
-            to_apply: Union[bool, ivy.Container] = True,
-            out: Optional[ivy.Container] = None,
+        self: ivy.Container,
+        /,
+        *,
+        key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
+        to_apply: Union[bool, ivy.Container] = True,
+        out: Optional[ivy.Container] = None,
     ) -> ivy.Container:
         """ivy.Container instance method variant of ivy.diagflat. This method
         simply wraps the function, and so the docstring for ivy.diagflat also
@@ -397,7 +397,6 @@ def lu(
             out=out,
         )
 
-
     @staticmethod
     def static_matrix_exp(
         x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
diff --git a/ivy/functional/backends/jax/experimental/linear_algebra.py b/ivy/functional/backends/jax/experimental/linear_algebra.py
index 2e3f277e6d922..3389e8c5ff7c5 100644
--- a/ivy/functional/backends/jax/experimental/linear_algebra.py
+++ b/ivy/functional/backends/jax/experimental/linear_algebra.py
@@ -21,6 +21,7 @@ def lu(
 ) -> JaxArray:
     return jla.lu(x)
 
+
 def diagflat(
     x: JaxArray,
     /,
diff --git a/ivy/functional/backends/mxnet/experimental/linear_algebra.py b/ivy/functional/backends/mxnet/experimental/linear_algebra.py
index 1bb0f5fb1cf38..7d2a97dd5081b 100644
--- a/ivy/functional/backends/mxnet/experimental/linear_algebra.py
+++ b/ivy/functional/backends/mxnet/experimental/linear_algebra.py
@@ -5,101 +5,101 @@
 
 
 def eigh_tridiagonal(
-        alpha: Union[(None, mx.ndarray.NDArray)],
-        beta: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        eigvals_only: bool = True,
-        select: str = "a",
-        select_range: Optional[
-            Union[(Tuple[(int, int)], List[int], None, mx.ndarray.NDArray)]
-        ] = None,
-        tol: Optional[float] = None,
+    alpha: Union[(None, mx.ndarray.NDArray)],
+    beta: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    eigvals_only: bool = True,
+    select: str = "a",
+    select_range: Optional[
+        Union[(Tuple[(int, int)], List[int], None, mx.ndarray.NDArray)]
+    ] = None,
+    tol: Optional[float] = None,
 ) -> Union[
     (
-            None,
-            mx.ndarray.NDArray,
-            Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])],
+        None,
+        mx.ndarray.NDArray,
+        Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])],
     )
 ]:
     raise IvyNotImplementedException()
 
 
 def diagflat(
-        x: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        offset: int = 0,
-        padding_value: float = 0,
-        align: str = "RIGHT_LEFT",
-        num_rows: Optional[int] = None,
-        num_cols: Optional[int] = None,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    offset: int = 0,
+    padding_value: float = 0,
+    align: str = "RIGHT_LEFT",
+    num_rows: Optional[int] = None,
+    num_cols: Optional[int] = None,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ):
     raise IvyNotImplementedException()
 
 
 def kron(
-        a: Union[(None, mx.ndarray.NDArray)],
-        b: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    a: Union[(None, mx.ndarray.NDArray)],
+    b: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def lu(
-        x: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def matrix_exp(
-        x: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def eig(
-        x: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Tuple[None]:
     raise IvyNotImplementedException()
 
 
 def eigvals(
-        x: Union[(None, mx.ndarray.NDArray)], /
+    x: Union[(None, mx.ndarray.NDArray)], /
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def adjoint(
-        x: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def solve_triangular(
-        x1: Union[(None, mx.ndarray.NDArray)],
-        x2: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        upper: bool = True,
-        adjoint: bool = False,
-        unit_diagonal: bool = False,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x1: Union[(None, mx.ndarray.NDArray)],
+    x2: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    upper: bool = True,
+    adjoint: bool = False,
+    unit_diagonal: bool = False,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     # Multiplying with a mask matrix can stop gradients on the diagonal.
     if unit_diagonal:
@@ -111,30 +111,30 @@ def solve_triangular(
 
 
 def multi_dot(
-        x: Sequence[Union[(None, mx.ndarray.NDArray)]],
-        /,
-        *,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Sequence[Union[(None, mx.ndarray.NDArray)]],
+    /,
+    *,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> None:
     raise IvyNotImplementedException()
 
 
 def cond(
-        x: Union[(None, mx.ndarray.NDArray)],
-        /,
-        *,
-        p: Optional[Union[(None, int, str)]] = None,
-        out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
+    x: Union[(None, mx.ndarray.NDArray)],
+    /,
+    *,
+    p: Optional[Union[(None, int, str)]] = None,
+    out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
 ) -> Union[(None, mx.ndarray.NDArray)]:
     raise IvyNotImplementedException()
 
 
 def dot(
-        a: mx.ndarray.NDArray,
-        b: mx.ndarray.NDArray,
-        /,
-        *,
-        out: Optional[mx.ndarray.NDArray] = None,
+    a: mx.ndarray.NDArray,
+    b: mx.ndarray.NDArray,
+    /,
+    *,
+    out: Optional[mx.ndarray.NDArray] = None,
 ) -> mx.ndarray.NDArray:
     return mx.symbol.dot(a, b, out=out)
 
diff --git a/ivy/functional/backends/numpy/experimental/linear_algebra.py b/ivy/functional/backends/numpy/experimental/linear_algebra.py
index 882fb99a8b502..4936ccf8f124d 100644
--- a/ivy/functional/backends/numpy/experimental/linear_algebra.py
+++ b/ivy/functional/backends/numpy/experimental/linear_algebra.py
@@ -19,6 +19,7 @@ def lu(
 ) -> np.ndarray:
     return sla.lu(x)
 
+
 def diagflat(
     x: np.ndarray,
     /,
diff --git a/ivy/functional/backends/paddle/experimental/linear_algebra.py b/ivy/functional/backends/paddle/experimental/linear_algebra.py
index 9b3eab7f5c7ba..48236ff7272bd 100644
--- a/ivy/functional/backends/paddle/experimental/linear_algebra.py
+++ b/ivy/functional/backends/paddle/experimental/linear_algebra.py
@@ -17,15 +17,15 @@
     backend_version,
 )
 def diagflat(
-        x: paddle.Tensor,
-        /,
-        *,
-        offset: Optional[int] = 0,
-        padding_value: Optional[float] = 0,
-        align: Optional[str] = "RIGHT_LEFT",
-        num_rows: Optional[int] = None,
-        num_cols: Optional[int] = None,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    offset: Optional[int] = 0,
+    padding_value: Optional[float] = 0,
+    align: Optional[str] = "RIGHT_LEFT",
+    num_rows: Optional[int] = None,
+    num_cols: Optional[int] = None,
+    out: Optional[paddle.Tensor] = None,
 ):
     diag = paddle.diag(x.flatten(), padding_value=padding_value, offset=offset)
     num_rows = num_rows if num_rows is not None else diag.shape[0]
@@ -50,30 +50,30 @@ def diagflat(
     {"2.6.0 and below": {"cpu": ("int8", "uint8", "int16")}}, backend_version
 )
 def kron(
-        a: paddle.Tensor,
-        b: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    a: paddle.Tensor,
+    b: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     return paddle.kron(a, b)
 
 
 def matrix_exp(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # return paddle.lu(x)
     raise IvyNotImplementedException()
 
 
 def matrix_exp(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # TODO: this is elementwise exp, should be changed to matrix exp ASAP
     # return paddle.exp(x)
@@ -81,7 +81,7 @@ def matrix_exp(
 
 
 def eig(
-        x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
+    x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
 ) -> Tuple[paddle.Tensor]:
     return paddle.linalg.eig(x)
 
@@ -91,10 +91,10 @@ def eigvals(x: paddle.Tensor, /) -> paddle.Tensor:
 
 
 def adjoint(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     _check_valid_dimension_size(x)
     return paddle.moveaxis(x, -2, -1).conj()
@@ -105,14 +105,14 @@ def adjoint(
     backend_version,
 )
 def solve_triangular(
-        x1: paddle.Tensor,
-        x2: paddle.Tensor,
-        /,
-        *,
-        upper: bool = True,
-        adjoint: bool = False,
-        unit_diagonal: bool = False,
-        out: Optional[paddle.Tensor] = None,
+    x1: paddle.Tensor,
+    x2: paddle.Tensor,
+    /,
+    *,
+    upper: bool = True,
+    adjoint: bool = False,
+    unit_diagonal: bool = False,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # Paddle does not support complex tensors for this operation (cpu and gpu),
     # so adjoint always equals transpose.
@@ -122,21 +122,21 @@ def solve_triangular(
 
 
 def cond(
-        x: paddle.Tensor,
-        /,
-        *,
-        p: Optional[Union[None, int, str]] = None,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    p: Optional[Union[None, int, str]] = None,
+    out: Optional[paddle.Tensor] = None,
 ) -> Any:
     raise IvyNotImplementedException()
 
 
 def lu_factor(
-        x: paddle.Tensor,
-        /,
-        *,
-        pivot: Optional[bool] = True,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    pivot: Optional[bool] = True,
+    out: Optional[paddle.Tensor] = None,
 ) -> Any:
     raise IvyNotImplementedException()
 
@@ -145,31 +145,31 @@ def lu_factor(
     {
         "2.6.0 and below": {
             "cpu": (
-                    "float32",
-                    "float64",
+                "float32",
+                "float64",
             ),
             "gpu": (
-                    "float16",
-                    "float32",
-                    "float64",
+                "float16",
+                "float32",
+                "float64",
             ),
         }
     },
     backend_version,
 )
 def dot(
-        a: paddle.Tensor,
-        b: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    a: paddle.Tensor,
+    b: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     if len(a.shape) == 0 or len(b.shape) == 0:
         return paddle.multiply(a, b)
     if (
-            len(a.shape) in [1, 2]
-            and len(b.shape) in [1, 2]
-            or (len(a.shape) >= 1 and len(b.shape) == 1)
+        len(a.shape) in [1, 2]
+        and len(b.shape) in [1, 2]
+        or (len(a.shape) >= 1 and len(b.shape) == 1)
     ):
         return paddle.matmul(a, b)
 
@@ -180,22 +180,22 @@ def dot(
     {
         "2.6.0 and below": {
             "cpu": (
-                    "float32",
-                    "float64",
+                "float32",
+                "float64",
             ),
             "gpu": (
-                    "float16",
-                    "float32",
-                    "float64",
+                "float16",
+                "float32",
+                "float64",
             ),
         }
     },
     backend_version,
 )
 def multi_dot(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     return paddle.linalg.multi_dot(x)
diff --git a/ivy/functional/backends/tensorflow/experimental/linear_algebra.py b/ivy/functional/backends/tensorflow/experimental/linear_algebra.py
index bc80507ee868c..022cad415f099 100644
--- a/ivy/functional/backends/tensorflow/experimental/linear_algebra.py
+++ b/ivy/functional/backends/tensorflow/experimental/linear_algebra.py
@@ -15,16 +15,16 @@
     {"2.15.0 and below": ("int", "float16", "bfloat16")}, backend_version
 )
 def eigh_tridiagonal(
-        alpha: Union[tf.Tensor, tf.Variable],
-        beta: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        eigvals_only: bool = True,
-        select: str = "a",
-        select_range: Optional[
-            Union[Tuple[int, int], List[int], tf.Tensor, tf.Variable]
-        ] = None,
-        tol: Optional[float] = None,
+    alpha: Union[tf.Tensor, tf.Variable],
+    beta: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    eigvals_only: bool = True,
+    select: str = "a",
+    select_range: Optional[
+        Union[Tuple[int, int], List[int], tf.Tensor, tf.Variable]
+    ] = None,
+    tol: Optional[float] = None,
 ) -> Union[
     tf.Tensor,
     tf.Variable,
@@ -41,15 +41,15 @@ def eigh_tridiagonal(
 
 
 def diagflat(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        offset: int = 0,
-        padding_value: float = 0,
-        align: str = "RIGHT_LEFT",
-        num_rows: Optional[int] = None,
-        num_cols: Optional[int] = None,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    offset: int = 0,
+    padding_value: float = 0,
+    align: str = "RIGHT_LEFT",
+    num_rows: Optional[int] = None,
+    num_cols: Optional[int] = None,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ):
     if len(x.shape) > 1:
         x = tf.reshape(x, [-1])
@@ -76,29 +76,29 @@ def diagflat(
 
 
 def lu(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.linalg.lu(x)
 
 
 def kron(
-        a: Union[tf.Tensor, tf.Variable],
-        b: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    a: Union[tf.Tensor, tf.Variable],
+    b: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.experimental.numpy.kron(a, b)
 
 
 def matrix_exp(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.linalg.expm(x)
 
@@ -106,18 +106,18 @@ def matrix_exp(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-                "complex",
-                "float32",
-                "float64",
+            "complex",
+            "float32",
+            "float64",
         )
     },
     backend_version,
 )
 def eig(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Tuple[tf.Tensor]:
     return tf.linalg.eig(x)
 
@@ -125,25 +125,25 @@ def eig(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-                "complex",
-                "float32",
-                "float64",
+            "complex",
+            "float32",
+            "float64",
         )
     },
     backend_version,
 )
 def eigvals(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
 ) -> Union[tf.Tensor, tf.Variable]:
     return tf.linalg.eigvals(x)
 
 
 def adjoint(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     _check_valid_dimension_size(x)
     return tf.linalg.adjoint(x)
@@ -153,14 +153,14 @@ def adjoint(
     {"2.13.0 and below": ("int", "float16", "bfloat16", "float64")}, backend_version
 )
 def solve_triangular(
-        x1: Union[tf.Tensor, tf.Variable],
-        x2: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        upper: bool = True,
-        adjoint: bool = False,
-        unit_diagonal: bool = False,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x1: Union[tf.Tensor, tf.Variable],
+    x2: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    upper: bool = True,
+    adjoint: bool = False,
+    unit_diagonal: bool = False,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     # Multiplying with a mask matrix can stop gradients on the diagonal.
     if unit_diagonal:
@@ -172,21 +172,21 @@ def solve_triangular(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-                "bfloat16",
-                "float16",
-                "float32",
-                "float64",
-                "int32",
-                "int64",
+            "bfloat16",
+            "float16",
+            "float32",
+            "float64",
+            "int32",
+            "int64",
         )
     },
     backend_version,
 )
 def multi_dot(
-        x: Sequence[Union[tf.Tensor, tf.Variable]],
-        /,
-        *,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Sequence[Union[tf.Tensor, tf.Variable]],
+    /,
+    *,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> tf.Tensor:
     # This implementation simply chains tf.tensordot multiple times
     # TODO: reimplement this function once tf adds multi_dot or inplace updates
@@ -198,11 +198,11 @@ def multi_dot(
 
 @with_unsupported_dtypes({"1.25.0 and below": ("float16", "bfloat16")}, backend_version)
 def cond(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        p: Optional[Union[None, int, str]] = None,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    p: Optional[Union[None, int, str]] = None,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Union[tf.Tensor, tf.Variable]:
     svd = tf.linalg.svd(x, compute_uv=False)
     if len(x.shape) >= 3:
@@ -239,11 +239,11 @@ def cond(
 
 
 def lu_factor(
-        x: Union[tf.Tensor, tf.Variable],
-        /,
-        *,
-        pivot: Optional[bool] = True,
-        out: Optional[Union[tf.Tensor, tf.Variable]] = None,
+    x: Union[tf.Tensor, tf.Variable],
+    /,
+    *,
+    pivot: Optional[bool] = True,
+    out: Optional[Union[tf.Tensor, tf.Variable]] = None,
 ) -> Tuple[tf.Tensor]:
     raise IvyNotImplementedException()
 
@@ -251,25 +251,25 @@ def lu_factor(
 @with_supported_dtypes(
     {
         "2.15.0 and below": (
-                "bfloat16",
-                "float16",
-                "float32",
-                "float64",
-                "int32",
-                "int64",
-                "complex64",
-                "complex128",
-                "bfloat16",
+            "bfloat16",
+            "float16",
+            "float32",
+            "float64",
+            "int32",
+            "int64",
+            "complex64",
+            "complex128",
+            "bfloat16",
         )
     },
     backend_version,
 )
 def dot(
-        a: tf.Tensor,
-        b: tf.Tensor,
-        /,
-        *,
-        out: Optional[tf.Tensor] = None,
+    a: tf.Tensor,
+    b: tf.Tensor,
+    /,
+    *,
+    out: Optional[tf.Tensor] = None,
 ) -> tf.Tensor:
     a, b = ivy.promote_types_of_inputs(a, b)
     return tf.experimental.numpy.dot(a, b)
diff --git a/ivy/functional/backends/torch/experimental/linear_algebra.py b/ivy/functional/backends/torch/experimental/linear_algebra.py
index 544159dc713ef..ada63491551cc 100644
--- a/ivy/functional/backends/torch/experimental/linear_algebra.py
+++ b/ivy/functional/backends/torch/experimental/linear_algebra.py
@@ -14,15 +14,15 @@
 
 @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
 def diagflat(
-        x: torch.Tensor,
-        /,
-        *,
-        offset: int = 0,
-        padding_value: float = 0,
-        align: str = "RIGHT_LEFT",
-        num_rows: int = -1,
-        num_cols: int = -1,
-        out: Optional[torch.Tensor] = None,
+    x: torch.Tensor,
+    /,
+    *,
+    offset: int = 0,
+    padding_value: float = 0,
+    align: str = "RIGHT_LEFT",
+    num_rows: int = -1,
+    num_cols: int = -1,
+    out: Optional[torch.Tensor] = None,
 ):
     if len(x.shape) > 1:
         x = torch.flatten(x)
@@ -101,20 +101,20 @@ def diagflat(
 
 
 def lu(
-        x: torch.Tensor,
-        /,
-        *,
-        out: Optional[torch.Tensor] = None,
+    x: torch.Tensor,
+    /,
+    *,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.tensor:
     return torch.linalg.lu(x)
 
 
 def kron(
-        a: torch.Tensor,
-        b: torch.Tensor,
-        /,
-        *,
-        out: Optional[torch.Tensor] = None,
+    a: torch.Tensor,
+    b: torch.Tensor,
+    /,
+    *,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.tensor:
     return torch.kron(a, b, out=out)
 
@@ -123,10 +123,10 @@ def kron(
 
 
 def matrix_exp(
-        x: torch.Tensor,
-        /,
-        *,
-        out: Optional[torch.Tensor] = None,
+    x: torch.Tensor,
+    /,
+    *,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     return torch.linalg.matrix_exp(x)
 
@@ -135,7 +135,7 @@ def matrix_exp(
 
 
 def eig(
-        x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None
+    x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None
 ) -> Tuple[torch.Tensor]:
     if not torch.is_complex(x):
         x = x.to(torch.complex128)
@@ -155,24 +155,24 @@ def eigvals(x: torch.Tensor, /) -> torch.Tensor:
 
 
 def adjoint(
-        x: torch.Tensor,
-        /,
-        *,
-        out: Optional[torch.Tensor] = None,
+    x: torch.Tensor,
+    /,
+    *,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     _check_valid_dimension_size(x)
     return torch.adjoint(x).resolve_conj()
 
 
 def solve_triangular(
-        x1: torch.Tensor,
-        x2: torch.Tensor,
-        /,
-        *,
-        upper: bool = True,
-        adjoint: bool = False,
-        unit_diagonal: bool = False,
-        out: Optional[torch.Tensor] = None,
+    x1: torch.Tensor,
+    x2: torch.Tensor,
+    /,
+    *,
+    upper: bool = True,
+    adjoint: bool = False,
+    unit_diagonal: bool = False,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     if adjoint:
         x1 = torch.adjoint(x1)
@@ -187,10 +187,10 @@ def solve_triangular(
 
 @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
 def multi_dot(
-        x: Sequence[torch.Tensor],
-        /,
-        *,
-        out: Optional[torch.Tensor] = None,
+    x: Sequence[torch.Tensor],
+    /,
+    *,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     return torch.linalg.multi_dot(x, out=out)
 
@@ -200,11 +200,11 @@ def multi_dot(
 
 @with_unsupported_dtypes({"2.0.0 and below": ("float16", "bfloat16")}, backend_version)
 def cond(
-        x: torch.Tensor,
-        /,
-        *,
-        p: Optional[Union[None, int, str]] = None,
-        out: Optional[torch.Tensor] = None,
+    x: torch.Tensor,
+    /,
+    *,
+    p: Optional[Union[None, int, str]] = None,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     return torch.linalg.cond(x, p=p, out=out)
 
@@ -213,22 +213,22 @@ def cond(
 
 
 def lu_factor(
-        x: torch.Tensor,
-        /,
-        *,
-        pivot: Optional[bool] = True,
-        out: Optional[torch.Tensor] = None,
+    x: torch.Tensor,
+    /,
+    *,
+    pivot: Optional[bool] = True,
+    out: Optional[torch.Tensor] = None,
 ) -> Tuple[torch.Tensor]:
     raise IvyNotImplementedException()
 
 
 @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
 def dot(
-        a: torch.Tensor,
-        b: torch.Tensor,
-        /,
-        *,
-        out: Optional[torch.Tensor] = None,
+    a: torch.Tensor,
+    b: torch.Tensor,
+    /,
+    *,
+    out: Optional[torch.Tensor] = None,
 ) -> torch.Tensor:
     a, b = ivy.promote_types_of_inputs(a, b)
     if a.dim() == 0 or b.dim() == 0:
diff --git a/ivy/functional/ivy/experimental/linear_algebra.py b/ivy/functional/ivy/experimental/linear_algebra.py
index ab152902b12c6..e559691e48590 100644
--- a/ivy/functional/ivy/experimental/linear_algebra.py
+++ b/ivy/functional/ivy/experimental/linear_algebra.py
@@ -227,6 +227,7 @@ def diagflat(
         out=out,
     )
 
+
 @handle_exceptions
 @handle_backend_invalid
 @handle_nestable
@@ -240,18 +241,20 @@ def lu(
     *,
     out: Optional[ivy.Array] = None,
 ) -> ivy.Array:
-    """
-    Perform LU decomposition of a square matrix using Doolittle's method.
+    """Perform LU decomposition of a square matrix using Doolittle's method.
 
     Args:
+    ----
     - x: a square numpy array representing the input matrix
 
     Returns:
+    -------
     - L: Lower triangular matrix
     - U: Upper triangular matrix
     """
     return current_backend(x).lu(x, out=out)
 
+
 @handle_exceptions
 @handle_backend_invalid
 @handle_nestable
@@ -294,8 +297,6 @@ def kron(
     return current_backend(a, b).kron(a, b, out=out)
 
 
-
-
 @handle_exceptions
 @handle_backend_invalid
 @handle_nestable
diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py
index 69bfc2d03ce07..c140f86fd90be 100644
--- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py
+++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_linalg.py
@@ -1380,6 +1380,33 @@ def test_kronecker(*, data, test_flags, backend_fw, fn_name, on_device):
     )
 
 
+@handle_test(
+    fn_tree="functional.ivy.experimental.lu",
+    dtype_x=helpers.dtype_and_values(
+        available_dtypes=helpers.get_dtypes("valid"),
+        min_num_dims=2,
+        max_num_dims=2,
+        min_dim_size=2,
+        max_dim_size=2,
+        min_value=-100,
+        max_value=100,
+        allow_nan=False,
+        shared_dtype=True,
+    ),
+    test_gradients=st.just(False),
+)
+def test_lu(dtype_x, test_flags, backend_fw, fn_name, on_device):
+    dtype, x = dtype_x
+    helpers.test_function(
+        input_dtypes=dtype,
+        test_flags=test_flags,
+        on_device=on_device,
+        backend_to_test=backend_fw,
+        fn_name=fn_name,
+        x=x[0],
+    )
+
+
 @handle_test(
     fn_tree="functional.ivy.experimental.make_svd_non_negative",
     data=_make_svd_nn_data(),
@@ -1426,32 +1453,6 @@ def test_make_svd_non_negative(*, data, test_flags, backend_fw, fn_name, on_devi
         backend=backend_fw,
         ground_truth_backend=test_flags.ground_truth_backend,
     )
-@handle_test(
-    fn_tree="functional.ivy.experimental.lu",
-    dtype_x=helpers.dtype_and_values(
-        available_dtypes=helpers.get_dtypes("valid"),
-        min_num_dims=2,
-        max_num_dims=2,
-        min_dim_size=2,
-        max_dim_size=2,
-        min_value=-100,
-        max_value=100,
-        allow_nan=False,
-        shared_dtype=True,
-    ),
-    test_gradients=st.just(False),
-)
-def test_lu(dtype_x, test_flags, backend_fw, fn_name, on_device):
-    dtype, x = dtype_x
-    helpers.test_function(
-        input_dtypes=dtype,
-        test_flags=test_flags,
-        on_device=on_device,
-        backend_to_test=backend_fw,
-        fn_name=fn_name,
-        x=x[0],
-    )
-
 
 
 # matrix_exp

From 0f277125de5a5379b6a09cfbb42bc94af344cee7 Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Wed, 6 Mar 2024 12:29:25 +0100
Subject: [PATCH 08/10] fix: lu backend naming

---
 ivy/data_classes/array/experimental/linear_algebra.py         | 4 ++--
 ivy/functional/backends/paddle/experimental/linear_algebra.py | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/ivy/data_classes/array/experimental/linear_algebra.py b/ivy/data_classes/array/experimental/linear_algebra.py
index 999b5ae9c0d24..99fd01811348c 100644
--- a/ivy/data_classes/array/experimental/linear_algebra.py
+++ b/ivy/data_classes/array/experimental/linear_algebra.py
@@ -110,7 +110,7 @@ def diagflat(
             out=out,
         )
 
-    def kron(
+    def lu(
             self: ivy.Array,
             /,
             *,
@@ -124,7 +124,7 @@ def kron(
         --------
         >>> a = ivy.array([1,2])
         >>> b = ivy.array([3,4])
-        >>> a.diagflat(b)
+        >>> a.lu(b)
         ivy.array([3, 4, 6, 8])
         """
         return ivy.lu(self._data, out=out)
diff --git a/ivy/functional/backends/paddle/experimental/linear_algebra.py b/ivy/functional/backends/paddle/experimental/linear_algebra.py
index 9b3eab7f5c7ba..5ee4ff9c5540c 100644
--- a/ivy/functional/backends/paddle/experimental/linear_algebra.py
+++ b/ivy/functional/backends/paddle/experimental/linear_algebra.py
@@ -59,7 +59,7 @@ def kron(
     return paddle.kron(a, b)
 
 
-def matrix_exp(
+def lu(
         x: paddle.Tensor,
         /,
         *,

From 2d66e1511f731175669da4c2d2017c8c575050b0 Mon Sep 17 00:00:00 2001
From: HamzaGbada <hamza.gbada@gmail.com>
Date: Wed, 6 Mar 2024 13:58:15 +0100
Subject: [PATCH 09/10] fix: lu backend naming

---
 .../array/experimental/linear_algebra.py      | 304 +++++++++---------
 1 file changed, 154 insertions(+), 150 deletions(-)

diff --git a/ivy/data_classes/array/experimental/linear_algebra.py b/ivy/data_classes/array/experimental/linear_algebra.py
index 99fd01811348c..003b928e2c450 100644
--- a/ivy/data_classes/array/experimental/linear_algebra.py
+++ b/ivy/data_classes/array/experimental/linear_algebra.py
@@ -8,16 +8,16 @@
 
 class _ArrayWithLinearAlgebraExperimental(abc.ABC):
     def eigh_tridiagonal(
-            self: Union[ivy.Array, ivy.NativeArray],
-            beta: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            eigvals_only: bool = True,
-            select: str = "a",
-            select_range: Optional[
-                Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
-            ] = None,
-            tol: Optional[float] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        beta: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        eigvals_only: bool = True,
+        select: str = "a",
+        select_range: Optional[
+            Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray]
+        ] = None,
+        tol: Optional[float] = None,
     ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array]]:
         """ivy.Array instance method variant of ivy.eigh_tridiagonal. This
         method simply wraps the function, and so the docstring for
@@ -78,15 +78,15 @@ def eigh_tridiagonal(
         )
 
     def diagflat(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            offset: int = 0,
-            padding_value: float = 0,
-            align: str = "RIGHT_LEFT",
-            num_rows: int = -1,
-            num_cols: int = -1,
-            out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        offset: int = 0,
+        padding_value: float = 0,
+        align: str = "RIGHT_LEFT",
+        num_rows: int = -1,
+        num_cols: int = -1,
+        out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.diagflat. This method
         simply wraps the function, and so the docstring for ivy.diagflat also
@@ -111,30 +111,34 @@ def diagflat(
         )
 
     def lu(
-            self: ivy.Array,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.lu. This method simply
-        wraps the function, and so the docstring for ivy.lu also applies to
-        this method with minimal changes.
-
-        Examples
-        --------
-        >>> a = ivy.array([1,2])
-        >>> b = ivy.array([3,4])
-        >>> a.lu(b)
-        ivy.array([3, 4, 6, 8])
+               wraps the function, and so the docstring for ivy.lu also applies to
+               this method with minimal changes.
+
+               Examples
+               --------
+               >>> x = ivy.array([[1.0,2.0],[3.0,4.0]])
+               >>> ivy.lu(x)
+               ivy.array([[0., 1.],
+               [1., 0.]]),
+        ivy.array([[1.        , 0.        ],
+               [0.33333334, 1.        ]]),
+        ivy.array([[3.        , 4.        ],
+               [0.        , 0.66666663]])
         """
         return ivy.lu(self._data, out=out)
 
     def kron(
-            self: ivy.Array,
-            b: ivy.Array,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        b: ivy.Array,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.kron. This method simply
         wraps the function, and so the docstring for ivy.kron also applies to
@@ -169,8 +173,8 @@ def matrix_exp(self: ivy.Array, /, *, out: Optional[ivy.Array] = None) -> ivy.Ar
         return ivy.matrix_exp(self._data, out=out)
 
     def eig(
-            self: ivy.Array,
-            /,
+        self: ivy.Array,
+        /,
     ) -> Tuple[ivy.Array, ...]:
         """ivy.Array instance method variant of ivy.eig. This method simply
         wraps the function, and so the docstring for ivy.eig also applies to
@@ -189,8 +193,8 @@ def eig(
         return ivy.eig(self._data)
 
     def eigvals(
-            self: ivy.Array,
-            /,
+        self: ivy.Array,
+        /,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.eigvals. This method simply
         wraps the function, and so the docstring for ivy.eigvals also applies
@@ -205,10 +209,10 @@ def eigvals(
         return ivy.eigvals(self._data)
 
     def adjoint(
-            self: ivy.Array,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.adjoint. This method simply
         wraps the function, and so the docstring for ivy.adjoint also applies
@@ -229,11 +233,11 @@ def adjoint(
         )
 
     def multi_dot(
-            self: ivy.Array,
-            x: Sequence[Union[ivy.Array, ivy.NativeArray]],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        x: Sequence[Union[ivy.Array, ivy.NativeArray]],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.multi_dot. This method
         simply wraps the function, and so the docstring for ivy.multi_dot also
@@ -251,7 +255,7 @@ def multi_dot(
         return ivy.multi_dot((self._data, *x), out=out)
 
     def cond(
-            self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None
+        self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.cond. This method simply
         wraps the function, and so the docstring for ivy.cond also applies to
@@ -270,13 +274,13 @@ def cond(
         return ivy.cond(self._data, p=p)
 
     def mode_dot(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
-            mode: int,
-            transpose: Optional[bool] = False,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        matrix_or_vector: Union[ivy.Array, ivy.NativeArray],
+        mode: int,
+        transpose: Optional[bool] = False,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.mode_dot. This method
         simply wraps the function, and so the docstring for ivy.mode_dot also
@@ -310,14 +314,14 @@ def mode_dot(
         return ivy.mode_dot(self._data, matrix_or_vector, mode, transpose, out=out)
 
     def multi_mode_dot(
-            self: Union[ivy.Array, ivy.NativeArray],
-            mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
-            /,
-            modes: Optional[Sequence[int]] = None,
-            skip: Optional[Sequence[int]] = None,
-            transpose: Optional[bool] = False,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]],
+        /,
+        modes: Optional[Sequence[int]] = None,
+        skip: Optional[Sequence[int]] = None,
+        transpose: Optional[bool] = False,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         r"""ivy.Array instance method variant of ivy.multi_mode_dot. This method
         simply wraps the function, and so the docstring for ivy.multi_mode_dot
@@ -360,10 +364,10 @@ def multi_mode_dot(
         )
 
     def svd_flip(
-            self: Union[ivy.Array, ivy.NativeArray],
-            V: Union[ivy.Array, ivy.NativeArray],
-            /,
-            u_based_decision: Optional[bool] = True,
+        self: Union[ivy.Array, ivy.NativeArray],
+        V: Union[ivy.Array, ivy.NativeArray],
+        /,
+        u_based_decision: Optional[bool] = True,
     ) -> Tuple[ivy.Array, ivy.Array]:
         """ivy.Array instance method variant of ivy.svd_flip. This method
         simply wraps the function, and so the docstring for ivy.svd_flip also
@@ -387,13 +391,13 @@ def svd_flip(
         return ivy.svd_flip(self._data, V, u_based_decision)
 
     def make_svd_non_negative(
-            self: Union[ivy.Array, ivy.NativeArray],
-            U: Union[ivy.Array, ivy.NativeArray],
-            S: Union[ivy.Array, ivy.NativeArray],
-            V: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
+        self: Union[ivy.Array, ivy.NativeArray],
+        U: Union[ivy.Array, ivy.NativeArray],
+        S: Union[ivy.Array, ivy.NativeArray],
+        V: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd",
     ) -> Tuple[ivy.Array, ivy.Array]:
         """ivy.Array instance method variant of ivy.make_svd_non_negative. This
         method simply wraps the function, and so the docstring for
@@ -419,11 +423,11 @@ def make_svd_non_negative(
         return ivy.make_svd_non_negative(self._data, U, S, V, nntype=nntype)
 
     def tensor_train(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Union[int, Sequence[int]],
-            /,
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            verbose: Optional[bool] = False,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Union[int, Sequence[int]],
+        /,
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        verbose: Optional[bool] = False,
     ) -> ivy.TTTensor:
         """ivy.Array instance method variant of ivy.tensor_train. This method
         simply wraps the function, and so the docstring for ivy.tensor_train
@@ -449,10 +453,10 @@ def tensor_train(
         return ivy.tensor_train(self._data, rank, svd=svd, verbose=verbose)
 
     def truncated_svd(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            compute_uv: bool = True,
-            n_eigenvecs: Optional[int] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        compute_uv: bool = True,
+        n_eigenvecs: Optional[int] = None,
     ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array, ivy.Array]]:
         """ivy.Array instance method variant of ivy.make_svd_non_negative. This
         method simply wraps the function, and so the docstring for
@@ -481,17 +485,17 @@ def truncated_svd(
         return ivy.truncated_svd(self._data, compute_uv, n_eigenvecs)
 
     def initialize_tucker(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Sequence[int],
-            modes: Sequence[int],
-            /,
-            *,
-            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-            seed: Optional[int] = None,
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            non_negative: Optional[bool] = False,
-            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-            svd_mask_repeats: Optional[int] = 5,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Sequence[int],
+        modes: Sequence[int],
+        /,
+        *,
+        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+        seed: Optional[int] = None,
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        non_negative: Optional[bool] = False,
+        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        svd_mask_repeats: Optional[int] = 5,
     ) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
         """ivy.Array instance method variant of ivy.initialize_tucker. This
         method simply wraps the function, and so the docstring for
@@ -543,20 +547,20 @@ def initialize_tucker(
         )
 
     def partial_tucker(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Optional[Sequence[int]] = None,
-            modes: Optional[Sequence[int]] = None,
-            /,
-            *,
-            n_iter_max: Optional[int] = 100,
-            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            seed: Optional[int] = None,
-            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-            svd_mask_repeats: Optional[int] = 5,
-            tol: Optional[float] = 10e-5,
-            verbose: Optional[bool] = False,
-            return_errors: Optional[bool] = False,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Optional[Sequence[int]] = None,
+        modes: Optional[Sequence[int]] = None,
+        /,
+        *,
+        n_iter_max: Optional[int] = 100,
+        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        seed: Optional[int] = None,
+        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        svd_mask_repeats: Optional[int] = 5,
+        tol: Optional[float] = 10e-5,
+        verbose: Optional[bool] = False,
+        return_errors: Optional[bool] = False,
     ) -> Tuple[ivy.Array, Sequence[ivy.Array]]:
         """ivy.Array instance method variant of ivy.partial_tucker. This method
         simply wraps the function, and so the docstring for ivy.partial_tucker
@@ -624,20 +628,20 @@ def partial_tucker(
         )
 
     def tucker(
-            self: Union[ivy.Array, ivy.NativeArray],
-            rank: Optional[Sequence[int]] = None,
-            /,
-            *,
-            fixed_factors: Optional[Sequence[int]] = None,
-            n_iter_max: Optional[int] = 100,
-            init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
-            svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
-            seed: Optional[int] = None,
-            mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
-            svd_mask_repeats: Optional[int] = 5,
-            tol: Optional[float] = 10e-5,
-            verbose: Optional[bool] = False,
-            return_errors: Optional[bool] = False,
+        self: Union[ivy.Array, ivy.NativeArray],
+        rank: Optional[Sequence[int]] = None,
+        /,
+        *,
+        fixed_factors: Optional[Sequence[int]] = None,
+        n_iter_max: Optional[int] = 100,
+        init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd",
+        svd: Optional[Literal["truncated_svd"]] = "truncated_svd",
+        seed: Optional[int] = None,
+        mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
+        svd_mask_repeats: Optional[int] = 5,
+        tol: Optional[float] = 10e-5,
+        verbose: Optional[bool] = False,
+        return_errors: Optional[bool] = False,
     ):
         """ivy.Array instance method variant of ivy.tucker. This method simply
         wraps the function, and so the docstring for ivy.tucker also applies to
@@ -711,10 +715,10 @@ def tucker(
         )
 
     def tt_matrix_to_tensor(
-            self: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """Ivy.Array instance method variant of ivy.tt_matrix_to_tensor. This
         method simply wraps the function, and so the docstring for
@@ -763,11 +767,11 @@ def tt_matrix_to_tensor(
         return ivy.tt_matrix_to_tensor(self._data, out=out)
 
     def dot(
-            self: Union[ivy.Array, ivy.NativeArray],
-            b: Union[ivy.Array, ivy.NativeArray],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        b: Union[ivy.Array, ivy.NativeArray],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ):
         """Compute the dot product between two arrays `a` and `b` using the
         current backend's implementation. The dot product is defined as the sum
@@ -815,12 +819,12 @@ def dot(
         return ivy.dot(self._data, b, out=out)
 
     def general_inner_product(
-            self: Union[ivy.Array, ivy.NativeArray],
-            b: Union[ivy.Array, ivy.NativeArray],
-            n_modes: Optional[int] = None,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        b: Union[ivy.Array, ivy.NativeArray],
+        n_modes: Optional[int] = None,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.general_inner_product. This
         method simply wraps the function, and so the docstring for
@@ -871,11 +875,11 @@ def general_inner_product(
         return ivy.general_inner_product(self, b, n_modes, out=out)
 
     def higher_order_moment(
-            self: Union[ivy.Array, ivy.NativeArray],
-            order: int,
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: Union[ivy.Array, ivy.NativeArray],
+        order: int,
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.higher_order_moment. This
         method simply wraps the function, and so the docstring for
@@ -912,11 +916,11 @@ def higher_order_moment(
         return ivy.higher_order_moment(self._data, order, out=out)
 
     def batched_outer(
-            self: ivy.Array,
-            tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
-            /,
-            *,
-            out: Optional[ivy.Array] = None,
+        self: ivy.Array,
+        tensors: Sequence[Union[ivy.Array, ivy.NativeArray]],
+        /,
+        *,
+        out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """Ivy Array instance method variant of ivy.batched_outer. This method
         simply wraps the function, and so the docstring for ivy.batched_outer

From 1447d51481271bb0812be61b7935f7f8975a9f88 Mon Sep 17 00:00:00 2001
From: ivy-branch <ivy.branch@lets-unify.ai>
Date: Wed, 6 Mar 2024 14:09:05 +0000
Subject: [PATCH 10/10] =?UTF-8?q?=F0=9F=A4=96=20Lint=20code?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .../array/experimental/linear_algebra.py      |   8 +-
 .../paddle/experimental/linear_algebra.py     | 134 +++++++++---------
 2 files changed, 71 insertions(+), 71 deletions(-)

diff --git a/ivy/data_classes/array/experimental/linear_algebra.py b/ivy/data_classes/array/experimental/linear_algebra.py
index 003b928e2c450..a7f1c744ccea0 100644
--- a/ivy/data_classes/array/experimental/linear_algebra.py
+++ b/ivy/data_classes/array/experimental/linear_algebra.py
@@ -117,11 +117,11 @@ def lu(
         out: Optional[ivy.Array] = None,
     ) -> ivy.Array:
         """ivy.Array instance method variant of ivy.lu. This method simply
-               wraps the function, and so the docstring for ivy.lu also applies to
-               this method with minimal changes.
+        wraps the function, and so the docstring for ivy.lu also applies to
+        this method with minimal changes.
 
-               Examples
-               --------
+        Examples
+        --------
                >>> x = ivy.array([[1.0,2.0],[3.0,4.0]])
                >>> ivy.lu(x)
                ivy.array([[0., 1.],
diff --git a/ivy/functional/backends/paddle/experimental/linear_algebra.py b/ivy/functional/backends/paddle/experimental/linear_algebra.py
index 5ee4ff9c5540c..d0334bf39e5e8 100644
--- a/ivy/functional/backends/paddle/experimental/linear_algebra.py
+++ b/ivy/functional/backends/paddle/experimental/linear_algebra.py
@@ -17,15 +17,15 @@
     backend_version,
 )
 def diagflat(
-        x: paddle.Tensor,
-        /,
-        *,
-        offset: Optional[int] = 0,
-        padding_value: Optional[float] = 0,
-        align: Optional[str] = "RIGHT_LEFT",
-        num_rows: Optional[int] = None,
-        num_cols: Optional[int] = None,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    offset: Optional[int] = 0,
+    padding_value: Optional[float] = 0,
+    align: Optional[str] = "RIGHT_LEFT",
+    num_rows: Optional[int] = None,
+    num_cols: Optional[int] = None,
+    out: Optional[paddle.Tensor] = None,
 ):
     diag = paddle.diag(x.flatten(), padding_value=padding_value, offset=offset)
     num_rows = num_rows if num_rows is not None else diag.shape[0]
@@ -50,30 +50,30 @@ def diagflat(
     {"2.6.0 and below": {"cpu": ("int8", "uint8", "int16")}}, backend_version
 )
 def kron(
-        a: paddle.Tensor,
-        b: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    a: paddle.Tensor,
+    b: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     return paddle.kron(a, b)
 
 
 def lu(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # return paddle.lu(x)
     raise IvyNotImplementedException()
 
 
 def matrix_exp(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # TODO: this is elementwise exp, should be changed to matrix exp ASAP
     # return paddle.exp(x)
@@ -81,7 +81,7 @@ def matrix_exp(
 
 
 def eig(
-        x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
+    x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
 ) -> Tuple[paddle.Tensor]:
     return paddle.linalg.eig(x)
 
@@ -91,10 +91,10 @@ def eigvals(x: paddle.Tensor, /) -> paddle.Tensor:
 
 
 def adjoint(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     _check_valid_dimension_size(x)
     return paddle.moveaxis(x, -2, -1).conj()
@@ -105,14 +105,14 @@ def adjoint(
     backend_version,
 )
 def solve_triangular(
-        x1: paddle.Tensor,
-        x2: paddle.Tensor,
-        /,
-        *,
-        upper: bool = True,
-        adjoint: bool = False,
-        unit_diagonal: bool = False,
-        out: Optional[paddle.Tensor] = None,
+    x1: paddle.Tensor,
+    x2: paddle.Tensor,
+    /,
+    *,
+    upper: bool = True,
+    adjoint: bool = False,
+    unit_diagonal: bool = False,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     # Paddle does not support complex tensors for this operation (cpu and gpu),
     # so adjoint always equals transpose.
@@ -122,21 +122,21 @@ def solve_triangular(
 
 
 def cond(
-        x: paddle.Tensor,
-        /,
-        *,
-        p: Optional[Union[None, int, str]] = None,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    p: Optional[Union[None, int, str]] = None,
+    out: Optional[paddle.Tensor] = None,
 ) -> Any:
     raise IvyNotImplementedException()
 
 
 def lu_factor(
-        x: paddle.Tensor,
-        /,
-        *,
-        pivot: Optional[bool] = True,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    pivot: Optional[bool] = True,
+    out: Optional[paddle.Tensor] = None,
 ) -> Any:
     raise IvyNotImplementedException()
 
@@ -145,31 +145,31 @@ def lu_factor(
     {
         "2.6.0 and below": {
             "cpu": (
-                    "float32",
-                    "float64",
+                "float32",
+                "float64",
             ),
             "gpu": (
-                    "float16",
-                    "float32",
-                    "float64",
+                "float16",
+                "float32",
+                "float64",
             ),
         }
     },
     backend_version,
 )
 def dot(
-        a: paddle.Tensor,
-        b: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    a: paddle.Tensor,
+    b: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     if len(a.shape) == 0 or len(b.shape) == 0:
         return paddle.multiply(a, b)
     if (
-            len(a.shape) in [1, 2]
-            and len(b.shape) in [1, 2]
-            or (len(a.shape) >= 1 and len(b.shape) == 1)
+        len(a.shape) in [1, 2]
+        and len(b.shape) in [1, 2]
+        or (len(a.shape) >= 1 and len(b.shape) == 1)
     ):
         return paddle.matmul(a, b)
 
@@ -180,22 +180,22 @@ def dot(
     {
         "2.6.0 and below": {
             "cpu": (
-                    "float32",
-                    "float64",
+                "float32",
+                "float64",
             ),
             "gpu": (
-                    "float16",
-                    "float32",
-                    "float64",
+                "float16",
+                "float32",
+                "float64",
             ),
         }
     },
     backend_version,
 )
 def multi_dot(
-        x: paddle.Tensor,
-        /,
-        *,
-        out: Optional[paddle.Tensor] = None,
+    x: paddle.Tensor,
+    /,
+    *,
+    out: Optional[paddle.Tensor] = None,
 ) -> paddle.Tensor:
     return paddle.linalg.multi_dot(x)