Skip to content

Commit

Permalink
minor: small stylistic changes
Browse files Browse the repository at this point in the history
  • Loading branch information
mrava87 committed Mar 8, 2024
1 parent bed0059 commit 90f61fc
Show file tree
Hide file tree
Showing 5 changed files with 153 additions and 152 deletions.
5 changes: 2 additions & 3 deletions examples/plot_stacked_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@
print('Dot-product:', dot_arr)
print('Dot-product (np):', np.dot(full_arr1, full_arr2))


###############################################################################
# **Norms**
l0norm = arr1.norm(0)
Expand All @@ -120,8 +119,8 @@

###############################################################################
# Now that we have a way to stack multiple :py:class:`pylops_mpi.StackedDistributedArray` objects,
# let's see how we can apply operators on them. More specifically this can be
# done using the :py:class:`pylops_mpi.StackedVStack` operator that takes multiple
# let's see how we can apply operators to them. More specifically this can be
# done using the :py:class:`pylops_mpi.MPIStackedVStack` operator that takes multiple
# :py:class:`pylops_mpi.MPILinearOperator` objects, each acting on one specific
# distributed array
x = pylops_mpi.DistributedArray(global_shape=size * 10,
Expand Down
7 changes: 3 additions & 4 deletions pylops_mpi/StackedLinearOperator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class MPIStackedLinearOperator(ABC):
for StackedLinearOperators.
This class provides methods to perform matrix-vector product and adjoint matrix-vector
products using MPI.
products on a stack of MPILinearOperator objects.
.. note:: End users of pylops-mpi should not use this class directly but simply
use operators that are already implemented. This class is meant for
Expand All @@ -33,7 +33,8 @@ class MPIStackedLinearOperator(ABC):
"""

def __init__(self, shape: Optional[ShapeLike] = None,
dtype: Optional[DTypeLike] = None, base_comm: MPI.Comm = MPI.COMM_WORLD):
dtype: Optional[DTypeLike] = None,
base_comm: MPI.Comm = MPI.COMM_WORLD):
if shape:
self.shape = shape
if dtype:
Expand Down Expand Up @@ -110,7 +111,6 @@ def adjoint(self):
Adjoint of Operator
"""

return self._adjoint()

H = property(adjoint)
Expand All @@ -124,7 +124,6 @@ def transpose(self):
Transpose MPIStackedLinearOperator
"""

return self._transpose()

T = property(transpose)
Expand Down
178 changes: 89 additions & 89 deletions pylops_mpi/basicoperators/BlockDiag.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,81 +14,81 @@
class MPIBlockDiag(MPILinearOperator):
r"""MPI Block-diagonal operator.
Create a block-diagonal operator from a set of linear operators using MPI.
Each rank must initialize this operator by providing one or more linear operators
which will be computed within such rank.
Both model and data vectors must be of :class:`pylops_mpi.DistributedArray` type and partitioned between ranks
according to the shapes of the different linear operators.
Parameters
----------
ops : :obj:`list`
One or more :class:`pylops.LinearOperator` to be stacked.
base_comm : :obj:`mpi4py.MPI.Comm`, optional
Base MPI Communicator. Defaults to ``mpi4py.MPI.COMM_WORLD``.
dtype : :obj:`str`, optional
Type of elements in input array.
Attributes
----------
shape : :obj:`tuple`
Operator shape
Notes
-----
An MPI Block Diagonal operator is composed of N linear operators, represented by **L**.
Each rank has one or more :class:`pylops.LinearOperator`, which we represent here compactly
as :math:`\mathbf{L}_i` for rank :math:`i`.
Each operator performs forward mode operations using its corresponding model vector, denoted as **m**.
This vector is effectively a :class:`pylops_mpi.DistributedArray` partitioned at each rank in such a way that
its local shapes agree with those of the corresponding linear operators.
The forward mode of each operator is then collected from all ranks as a DistributedArray, referred to as **d**.
.. math::
\begin{bmatrix}
\mathbf{d}_1 \\
\mathbf{d}_2 \\
\vdots \\
\mathbf{d}_n
\end{bmatrix} =
\begin{bmatrix}
\mathbf{L}_1 & \mathbf{0} & \ldots & \mathbf{0} \\
\mathbf{0} & \mathbf{L}_2 & \ldots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
\mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n
\end{bmatrix}
\begin{bmatrix}
\mathbf{m}_1 \\
\mathbf{m}_2 \\
\vdots \\
\mathbf{m}_n
\end{bmatrix}
Likewise, for the adjoint mode, each operator executes operations in the adjoint mode,
the adjoint mode of each operator is then collected from all ranks as a DistributedArray
referred as **d**.
.. math::
\begin{bmatrix}
\mathbf{d}_1 \\
\mathbf{d}_2 \\
\vdots \\
\mathbf{d}_n
\end{bmatrix} =
\begin{bmatrix}
\mathbf{L}_1^H & \mathbf{0} & \ldots & \mathbf{0} \\
\mathbf{0} & \mathbf{L}_2^H & \ldots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
\mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n^H
\end{bmatrix}
\begin{bmatrix}
\mathbf{m}_1 \\
\mathbf{m}_2 \\
\vdots \\
\mathbf{m}_n
\end{bmatrix}
Create a block-diagonal operator from a set of linear operators using MPI.
Each rank must initialize this operator by providing one or more linear operators
which will be computed within such rank.
Both model and data vectors must be of :class:`pylops_mpi.DistributedArray` type and partitioned between ranks
according to the shapes of the different linear operators.
Parameters
----------
ops : :obj:`list`
One or more :class:`pylops.LinearOperator` to be stacked.
base_comm : :obj:`mpi4py.MPI.Comm`, optional
Base MPI Communicator. Defaults to ``mpi4py.MPI.COMM_WORLD``.
dtype : :obj:`str`, optional
Type of elements in input array.
Attributes
----------
shape : :obj:`tuple`
Operator shape
Notes
-----
An MPI Block Diagonal operator is composed of N linear operators, represented by **L**.
Each rank has one or more :class:`pylops.LinearOperator`, which we represent here compactly
as :math:`\mathbf{L}_i` for rank :math:`i`.
Each operator performs forward mode operations using its corresponding model vector, denoted as **m**.
This vector is effectively a :class:`pylops_mpi.DistributedArray` partitioned at each rank in such a way that
its local shapes agree with those of the corresponding linear operators.
The forward mode of each operator is then collected from all ranks as a DistributedArray, referred to as **d**.
.. math::
\begin{bmatrix}
\mathbf{d}_1 \\
\mathbf{d}_2 \\
\vdots \\
\mathbf{d}_n
\end{bmatrix} =
\begin{bmatrix}
\mathbf{L}_1 & \mathbf{0} & \ldots & \mathbf{0} \\
\mathbf{0} & \mathbf{L}_2 & \ldots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
\mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n
\end{bmatrix}
\begin{bmatrix}
\mathbf{m}_1 \\
\mathbf{m}_2 \\
\vdots \\
\mathbf{m}_n
\end{bmatrix}
Likewise, for the adjoint mode, each operator executes operations in the adjoint mode,
the adjoint mode of each operator is then collected from all ranks as a DistributedArray
referred as **d**.
.. math::
\begin{bmatrix}
\mathbf{d}_1 \\
\mathbf{d}_2 \\
\vdots \\
\mathbf{d}_n
\end{bmatrix} =
\begin{bmatrix}
\mathbf{L}_1^H & \mathbf{0} & \ldots & \mathbf{0} \\
\mathbf{0} & \mathbf{L}_2^H & \ldots & \mathbf{0} \\
\vdots & \vdots & \ddots & \vdots \\
\mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n^H
\end{bmatrix}
\begin{bmatrix}
\mathbf{m}_1 \\
\mathbf{m}_2 \\
\vdots \\
\mathbf{m}_n
\end{bmatrix}
"""

Expand Down Expand Up @@ -135,23 +135,23 @@ def _rmatvec(self, x: DistributedArray) -> DistributedArray:
class MPIStackedBlockDiag(MPIStackedLinearOperator):
r"""MPI Stacked BlockDiag Operator
Create a stack of :class:`pylops_mpi.MPILinearOperator` operators stacked diagonally.
Create a diagonal stack of :class:`pylops_mpi.MPILinearOperator` operators.
Parameters
----------
ops : :obj:`list`
One or more :class:`pylops_mpi.MPILinearOperator` to be stacked.
Parameters
----------
ops : :obj:`list`
One or more :class:`pylops_mpi.MPILinearOperator` to be stacked.
Attributes
----------
shape : :obj:`tuple`
Operator shape
Attributes
----------
shape : :obj:`tuple`
Operator shape
Notes
-----
A MPIStackedBlockDiag is composed of N :class:pylops_mpi.MPILinearOperator instances stacked along the diagonal.
These MPI operators will be applied sequentially, with distributed computations
performed within each operator.
Notes
-----
A MPIStackedBlockDiag is composed of N :class:pylops_mpi.MPILinearOperator instances stacked along the diagonal.
These MPI operators will be applied sequentially, with distributed computations
performed within each operator.
"""

Expand Down
22 changes: 11 additions & 11 deletions pylops_mpi/optimization/basic.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
from typing import Optional, Tuple, Callable

from pylops.utils import NDArray
from pylops_mpi import MPILinearOperator, DistributedArray
from pylops_mpi import MPILinearOperator, DistributedArray, StackedDistributedArray
from pylops_mpi.optimization.cls_basic import CG, CGLS


def cg(
Op: MPILinearOperator,
y: DistributedArray,
y: Union[DistributedArray, StackedDistributedArray] ,
x0: Optional[DistributedArray] = None,
niter: int = 10,
tol: float = 1e-4,
show: bool = False,
itershow: Tuple[int, int, int] = (10, 10, 10),
callback: Optional[Callable] = None,
) -> Tuple[DistributedArray, int, NDArray]:
) -> Tuple[Union[DistributedArray, StackedDistributedArray], int, NDArray]:
r"""Conjugate gradient
Solve a square system of equations given an MPILinearOperator ``Op`` and
Expand All @@ -24,9 +24,9 @@ def cg(
----------
Op : :obj:`pylops_mpi.MPILinearOperator`
Operator to invert of size :math:`[N \times N]`
y : :obj:`pylops_mpi.DistributedArray`
y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`
DistributedArray of size (N,)
x0 : :obj:`pylops_mpi.DistributedArray`, optional
x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional
Initial guess
niter : :obj:`int`, optional
Number of iterations
Expand All @@ -44,7 +44,7 @@ def cg(
Returns
-------
x : :obj:`pylops_mpi.DistributedArray`
x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`
Estimated model of size (N,)
iit : :obj:`int`
Number of executed iterations
Expand All @@ -67,8 +67,8 @@ def cg(

def cgls(
Op: MPILinearOperator,
y: DistributedArray,
x0: Optional[DistributedArray] = None,
y: Union[DistributedArray, StackedDistributedArray],
x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None,
niter: int = 10,
damp: float = 0.0,
tol: float = 1e-4,
Expand All @@ -85,9 +85,9 @@ def cgls(
----------
Op : :obj:`pylops_mpi.MPILinearOperator`
MPI Linear Operator to invert of size :math:`[N \times M]`
y : :obj:`pylops_mpi.DistributedArray`
y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`
DistributedArray of size (N,)
x0 : :obj:`pylops_mpi.DistributedArray`, optional
x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional
Initial guess
niter : :obj:`int`, optional
Number of iterations
Expand All @@ -107,7 +107,7 @@ def cgls(
Returns
-------
x : :obj:`pylops_mpi.DistributedArray`
x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`
Estimated model of size (M, )
istop : :obj:`int`
Gives the reason for termination
Expand Down
Loading

0 comments on commit 90f61fc

Please sign in to comment.