Skip to content

Commit

Permalink
Refactor la.Vector.norm (#3108)
Browse files Browse the repository at this point in the history
* refactor dolfinx norm functions

* refactor norm at python level from method to function

* refactor usage of Vector.norm to la.norm

* add int type vectors and scatterer test

* rewrite vector creation test

* fix typo in vector creation test

* ruff formatting and check

* add norm tests

* fix function copy/paste typos

* documentation update

* address PR comments

* remove unused dtypes

---------

Co-authored-by: nate-sime <>
Co-authored-by: Jørgen Schartum Dokken <[email protected]>
  • Loading branch information
nate-sime and jorgensd authored Mar 26, 2024
1 parent a0a17fd commit 6189a7e
Show file tree
Hide file tree
Showing 13 changed files with 147 additions and 61 deletions.
2 changes: 1 addition & 1 deletion cpp/dolfinx/la/Vector.h
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ auto squared_norm(const V& a)
/// Compute the norm of the vector
/// @note Collective MPI operation
/// @param x A vector
/// @param type Norm type (supported types are \f$L^2\f$ and \f$L^\infty\f$)
/// @param type Norm type
template <class V>
auto norm(const V& x, Norm type = Norm::l2)
{
Expand Down
2 changes: 1 addition & 1 deletion python/demo/demo_elasticity.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def σ(v):
# be called from all MPI ranks), but we print the norm only on rank 0.

# +
unorm = uh.x.norm()
unorm = la.norm(uh.x)
if msh.comm.rank == 0:
print("Solution vector norm:", unorm)
# -
Expand Down
10 changes: 5 additions & 5 deletions python/demo/demo_stokes.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,8 +296,8 @@ def nested_iterative_solver():
pfile_xdmf.write_function(p)

# Compute norms of the solution vectors
norm_u = u.x.norm()
norm_p = p.x.norm()
norm_u = la.norm(u.x)
norm_p = la.norm(p.x)
if MPI.COMM_WORLD.rank == 0:
print(f"(A) Norm of velocity coefficient vector (nested, iterative): {norm_u}")
print(f"(A) Norm of pressure coefficient vector (nested, iterative): {norm_p}")
Expand Down Expand Up @@ -397,7 +397,7 @@ def block_iterative_solver():
p.x.array[: (len(x.array_r) - offset)] = x.array_r[offset:]

# Compute the $L^2$ norms of the solution vectors
norm_u, norm_p = u.x.norm(), p.x.norm()
norm_u, norm_p = la.norm(u.x), la.norm(p.x)
if MPI.COMM_WORLD.rank == 0:
print(f"(B) Norm of velocity coefficient vector (blocked, iterative): {norm_u}")
print(f"(B) Norm of pressure coefficient vector (blocked, iterative): {norm_p}")
Expand Down Expand Up @@ -451,7 +451,7 @@ def block_direct_solver():
p.x.array[: (len(x.array_r) - offset)] = x.array_r[offset:]

# Compute the $L^2$ norms of the u and p vectors
norm_u, norm_p = u.x.norm(), p.x.norm()
norm_u, norm_p = la.norm(u.x), la.norm(p.x)
if MPI.COMM_WORLD.rank == 0:
print(f"(C) Norm of velocity coefficient vector (blocked, direct): {norm_u}")
print(f"(C) Norm of pressure coefficient vector (blocked, direct): {norm_p}")
Expand Down Expand Up @@ -537,7 +537,7 @@ def mixed_direct():
u, p = U.sub(0).collapse(), U.sub(1).collapse()

# Compute norms
norm_u, norm_p = u.x.norm(), p.x.norm()
norm_u, norm_p = la.norm(u.x), la.norm(p.x)
if MPI.COMM_WORLD.rank == 0:
print(f"(D) Norm of velocity coefficient vector (monolithic, direct): {norm_u}")
print(f"(D) Norm of pressure coefficient vector (monolithic, direct): {norm_p}")
Expand Down
36 changes: 25 additions & 11 deletions python/dolfinx/la.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,9 @@ class Vector:
_cpp.la.Vector_float64,
_cpp.la.Vector_complex64,
_cpp.la.Vector_complex128,
_cpp.la.Vector_int8,
_cpp.la.Vector_int32,
_cpp.la.Vector_int64,
]

def __init__(
Expand All @@ -217,6 +220,9 @@ def __init__(
_cpp.la.Vector_float64,
_cpp.la.Vector_complex64,
_cpp.la.Vector_complex128,
_cpp.la.Vector_int8,
_cpp.la.Vector_int32,
_cpp.la.Vector_int64,
],
):
"""A distributed vector object.
Expand Down Expand Up @@ -258,17 +264,6 @@ def scatter_reverse(self, mode: InsertMode) -> None:
"""
self._cpp_object.scatter_reverse(mode)

def norm(self, type: _cpp.la.Norm = _cpp.la.Norm.l2) -> np.floating:
"""Compute a norm of the vector.
Args:
type: Norm type to compute.
Returns:
Computed norm.
"""
return self._cpp_object.norm(type)


def vector(map, bs=1, dtype: npt.DTypeLike = np.float64) -> Vector:
"""Create a distributed vector.
Expand All @@ -290,6 +285,12 @@ def vector(map, bs=1, dtype: npt.DTypeLike = np.float64) -> Vector:
vtype = _cpp.la.Vector_complex64
elif np.issubdtype(dtype, np.complex128):
vtype = _cpp.la.Vector_complex128
elif np.issubdtype(dtype, np.int8):
vtype = _cpp.la.Vector_int8
elif np.issubdtype(dtype, np.int32):
vtype = _cpp.la.Vector_int32
elif np.issubdtype(dtype, np.int64):
vtype = _cpp.la.Vector_int64
else:
raise NotImplementedError(f"Type {dtype} not supported.")

Expand Down Expand Up @@ -355,3 +356,16 @@ def is_orthonormal(basis, eps: float = 1.0e-12) -> bool:
if abs(x.dot(y)) > eps:
return False
return True


def norm(x: Vector, type: _cpp.la.Norm = _cpp.la.Norm.l2) -> np.floating:
"""Compute a norm of the vector.
Args:
x: Vector to measure.
type: Norm type to compute.
Returns:
Computed norm.
"""
return _cpp.la.norm(x._cpp_object, type)
13 changes: 8 additions & 5 deletions python/dolfinx/wrappers/la.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,6 @@ void declare_objects(nb::module_& m, const std::string& type)
.def(nb::init<const dolfinx::la::Vector<T>&>(), nb::arg("vec"))
.def_prop_ro("dtype", [](const dolfinx::la::Vector<T>&)
{ return dolfinx_wrappers::numpy_dtype<T>(); })
.def(
"norm",
[](const dolfinx::la::Vector<T>& self, dolfinx::la::Norm type)
{ return dolfinx::la::norm(self, type); },
"type"_a = dolfinx::la::Norm::l2)
.def_prop_ro("index_map", &dolfinx::la::Vector<T>::index_map)
.def_prop_ro("bs", &dolfinx::la::Vector<T>::bs)
.def_prop_ro(
Expand Down Expand Up @@ -178,6 +173,11 @@ void declare_objects(nb::module_& m, const std::string& type)
template <typename T>
void declare_functions(nb::module_& m)
{
m.def(
"norm",
[](const dolfinx::la::Vector<T>& x, dolfinx::la::Norm type)
{ return dolfinx::la::norm(x, type); },
"vector"_a, "type"_a);
m.def(
"inner_product",
[](const dolfinx::la::Vector<T>& x, const dolfinx::la::Vector<T>& y)
Expand Down Expand Up @@ -285,6 +285,9 @@ void la(nb::module_& m)
nb::rv_policy::reference_internal);

// Declare objects that are templated over type
declare_objects<std::int8_t>(m, "int8");
declare_objects<std::int32_t>(m, "int32");
declare_objects<std::int64_t>(m, "int64");
declare_objects<float>(m, "float32");
declare_objects<double>(m, "float64");
declare_objects<std::complex<float>>(m, "complex64");
Expand Down
8 changes: 4 additions & 4 deletions python/test/unit/fem/test_assemble_submesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def test_submesh_cell_assembly(d, n, k, space, ghost_mode):
assert A_mesh_0.squared_norm() == pytest.approx(
A_submesh.squared_norm(), rel=1.0e-4, abs=1.0e-4
)
assert b_mesh_0.norm() == pytest.approx(b_submesh.norm(), rel=1.0e-4)
assert la.norm(b_mesh_0) == pytest.approx(la.norm(b_submesh), rel=1.0e-4)
assert np.isclose(s_mesh_0, s_submesh)


Expand All @@ -114,7 +114,7 @@ def test_submesh_facet_assembly(n, k, space, ghost_mode):
assert A_submesh.squared_norm() == pytest.approx(
A_square_mesh.squared_norm(), rel=1.0e-5, abs=1.0e-5
)
assert b_submesh.norm() == pytest.approx(b_square_mesh.norm())
assert la.norm(b_submesh) == pytest.approx(la.norm(b_square_mesh))
assert np.isclose(s_submesh, s_square_mesh)


Expand Down Expand Up @@ -260,7 +260,7 @@ def coeff_expr(x):
b0 = fem.assemble_vector(L0)
fem.apply_lifting(b0.array, [a0], bcs=[[bc]])
b0.scatter_reverse(la.InsertMode.add)
assert np.isclose(b0.norm(), b.norm())
assert np.isclose(la.norm(b0), la.norm(b))

M0 = fem.form(M_ufl(f, g, measure_msh), entity_maps=entity_maps)
c0 = msh.comm.allreduce(fem.assemble_scalar(M0), op=MPI.SUM)
Expand All @@ -284,7 +284,7 @@ def coeff_expr(x):
b1 = fem.assemble_vector(L1)
fem.apply_lifting(b1.array, [a1], bcs=[[bc]])
b1.scatter_reverse(la.InsertMode.add)
assert np.isclose(b1.norm(), b.norm())
assert np.isclose(la.norm(b1), la.norm(b))

M1 = fem.form(M_ufl(f, g, measure_smsh), entity_maps=entity_maps)
c1 = msh.comm.allreduce(fem.assemble_scalar(M1), op=MPI.SUM)
Expand Down
6 changes: 3 additions & 3 deletions python/test/unit/fem/test_assembler.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,17 +151,17 @@ def test_basic_assembly(mode, dtype):
A.scatter_reverse()
assert isinstance(A, la.MatrixCSR)
assert normA == pytest.approx(A.squared_norm())
normb = b.norm()
normb = la.norm(b)
b.array[:] = 0
fem.assemble_vector(b.array, L)
b.scatter_reverse(la.InsertMode.add)
assert normb == pytest.approx(b.norm())
assert normb == pytest.approx(la.norm(b))

# Vector re-assembly - no zeroing (but need to zero ghost entries)
b.array[b.index_map.size_local * b.block_size :] = 0
fem.assemble_vector(b.array, L)
b.scatter_reverse(la.InsertMode.add)
assert 2 * normb == pytest.approx(b.norm())
assert 2 * normb == pytest.approx(la.norm(b))

# Matrix re-assembly (no zeroing)
fem.assemble_matrix(A, a)
Expand Down
6 changes: 3 additions & 3 deletions python/test/unit/fem/test_complex_assembler.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_complex_assembly(complex_dtype):

b = assemble_vector(L1)
b.scatter_reverse(la.InsertMode.add)
bnorm = b.norm(la.Norm.l1)
bnorm = la.norm(b, la.Norm.l1)
b_norm_ref = abs(-2 + 3.0j)
assert bnorm == pytest.approx(b_norm_ref, rel=1e-5)

Expand All @@ -55,7 +55,7 @@ def test_complex_assembly(complex_dtype):

b = assemble_vector(L0)
b.scatter_reverse(la.InsertMode.add)
b1_norm = b.norm()
b1_norm = la.norm(b)

a_complex = form((1 + 1j) * inner(u, v) * dx, dtype=complex_dtype)
f = ufl.sin(2 * np.pi * x[0])
Expand All @@ -66,7 +66,7 @@ def test_complex_assembly(complex_dtype):
assert A1_norm == pytest.approx(A2_norm / 2)
b = assemble_vector(L2)
b.scatter_reverse(la.InsertMode.add)
b2_norm = b.norm(la.Norm.l2)
b2_norm = la.norm(b, la.Norm.l2)
assert b2_norm == pytest.approx(b1_norm)


Expand Down
6 changes: 3 additions & 3 deletions python/test/unit/fem/test_custom_jit_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def test_numba_assembly(dtype):
b.scatter_reverse(dolfinx.la.InsertMode.add)

Anorm = np.sqrt(A.squared_norm())
bnorm = b.norm()
bnorm = la.norm(b)
assert np.isclose(Anorm, 56.124860801609124)
assert np.isclose(bnorm, 0.0739710713711999)

Expand All @@ -134,7 +134,7 @@ def test_coefficient(dtype):

b = dolfinx.fem.assemble_vector(L)
b.scatter_reverse(la.InsertMode.add)
bnorm = b.norm()
bnorm = la.norm(b)
assert np.isclose(bnorm, 2.0 * 0.0739710713711999)


Expand Down Expand Up @@ -273,4 +273,4 @@ def test_cffi_assembly():

b = fem.assemble_vector(L)
b.scatter_reverse(la.InsertMode.add)
assert np.isclose(b.norm(), 0.0739710713711999)
assert np.isclose(la.norm(b), 0.0739710713711999)
6 changes: 3 additions & 3 deletions python/test/unit/fem/test_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def eval(self, x):
assert (w.x.array[:] == 1.0).all() # /NOSONAR

num_vertices = V.mesh.topology.index_map(0).size_global
assert np.isclose(w.x.norm(la.Norm.l1) - num_vertices, 0)
assert np.isclose(la.norm(w.x, la.Norm.l1) - num_vertices, 0)

f.t = 2.0
w.interpolate(f.eval)
Expand All @@ -172,7 +172,7 @@ def f(x):
assert x.min() == 1.0 # /NOSONAR

num_vertices = W.mesh.topology.index_map(0).size_global
assert round(w.x.norm(la.Norm.l1) - 6 * num_vertices, 7) == 0
assert round(la.norm(w.x, la.Norm.l1) - 6 * num_vertices, 7) == 0


@pytest.mark.parametrize("dtype,cdtype", [(np.float32, "float"), (np.float64, "double")])
Expand Down Expand Up @@ -213,7 +213,7 @@ def test_cffi_expression(dtype, cdtype):
f2.interpolate(lambda x: x[0] + x[1])

f1.x.array[:] -= f2.x.array
assert f1.x.norm() < 1.0e-12
assert la.norm(f1.x) < 1.0e-12


def test_interpolation_function(mesh):
Expand Down
2 changes: 1 addition & 1 deletion python/test/unit/fem/test_ghost_mesh_assembly.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_ghost_mesh_assembly(mode, dx, ds):
# Check that the norms are the same for all three modes
normA = np.sqrt(A.squared_norm())
assert normA == pytest.approx(0.6713621455570528, rel=1.0e-5, abs=1.0e-8)
normb = b.norm()
normb = la.norm(b)
assert normb == pytest.approx(1.582294032953906, rel=1.0e-5, abs=1.0e-8)


Expand Down
83 changes: 62 additions & 21 deletions python/test/unit/la/test_matrix_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from mpi4py import MPI

import numpy as np
import pytest

from dolfinx import cpp as _cpp
from dolfinx import la
Expand Down Expand Up @@ -46,26 +47,66 @@ def test_create_matrix_csr():
assert np.allclose(A.to_dense(), zero)


def test_create_vector():
@pytest.mark.parametrize(
"dtype",
[
np.float32,
np.float64,
np.complex64,
np.complex128,
np.int8,
np.int32,
np.int64,
],
)
def test_create_vector(dtype):
"""Test creation of a distributed vector"""
mesh = create_unit_square(MPI.COMM_WORLD, 10, 11)
V = functionspace(mesh, ("Lagrange", 1))
map = V.dofmap.index_map
bs = V.dofmap.index_map_bs
mesh = create_unit_square(MPI.COMM_WORLD, 5, 5)
im = mesh.topology.index_map(0)

for bs in range(1, 4):
x = la.vector(im, bs=bs, dtype=dtype)
assert x.array.dtype == dtype
assert x.array.size == bs * (im.size_local + im.num_ghosts)


def xfail_norm_of_integral_type_vector(dtype):
return pytest.param(
dtype,
marks=pytest.mark.xfail(
reason="Norm of vector of integers not implemented", strict=True, raises=TypeError
),
)


x = la.vector(map)
assert x.array.dtype == np.float64
x = la.vector(map, bs=bs, dtype=np.float64)
assert x.array.dtype == np.float64

x = la.vector(map, dtype=np.float32)
assert x.array.dtype == np.float32
x = la.vector(map, dtype=np.complex64)
assert x.array.dtype == np.complex64
x = la.vector(map, dtype=np.complex128)
assert x.array.dtype == np.complex128

x0 = la.vector(map, dtype=np.complex128)
x1 = la.vector(map, bs=1, dtype=np.complex128)
x2 = la.vector(map, bs=4, dtype=np.complex128)
assert 4 * x0.array.shape[0] == 4 * x1.array.shape[0] == x2.array.shape[0]
@pytest.mark.parametrize(
"dtype",
[
np.float32,
np.float64,
np.complex64,
np.complex128,
xfail_norm_of_integral_type_vector(np.int8),
xfail_norm_of_integral_type_vector(np.int32),
xfail_norm_of_integral_type_vector(np.int64),
],
)
@pytest.mark.parametrize(
"norm_type",
[
la.Norm.l1,
la.Norm.l2,
la.Norm.linf,
pytest.param(
la.Norm.frobenius,
marks=pytest.mark.xfail(reason="Norm type not supported for vector", strict=True),
),
],
)
def test_vector_norm(dtype, norm_type):
mesh = create_unit_square(MPI.COMM_WORLD, 5, 5)
im = mesh.topology.index_map(0)
x = la.vector(im, dtype=dtype)
x.array[:] = 0.0
normed_value = la.norm(x, norm_type)
assert np.isclose(normed_value, 0.0)
Loading

0 comments on commit 6189a7e

Please sign in to comment.