diff --git a/docs/changelog.rst b/docs/changelog.rst index b98820e2..47ce508d 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -33,6 +33,10 @@ Version TBD (unreleased) documentation for more details, important caveats, and an example policy. (PR `#767 ` types from the `Eigen library + `__. (PR `#782 + `_). + Version 2.2.0 (October 3, 2024) ------------------------------- diff --git a/docs/eigen.rst b/docs/eigen.rst index 44a6c611..38b84f8b 100644 --- a/docs/eigen.rst +++ b/docs/eigen.rst @@ -112,9 +112,11 @@ Eigen types: #include -The ``Eigen::SparseMatrix<..>`` type maps to either ``scipy.sparse.csr_matrix`` -or ``scipy.sparse.csc_matrix`` depending on whether row- or column-major -storage is used. +The ``Eigen::SparseMatrix<..>`` and ``Eigen::Map>`` +types map to either ``scipy.sparse.csr_matrix`` or ``scipy.sparse.csc_matrix`` +depending on whether row- or column-major storage is used. There is no support for Eigen sparse vectors because an equivalent type does not exist as part of ``scipy.sparse``. + + diff --git a/include/nanobind/eigen/sparse.h b/include/nanobind/eigen/sparse.h index 718fef2f..6149c93a 100644 --- a/include/nanobind/eigen/sparse.h +++ b/include/nanobind/eigen/sparse.h @@ -146,15 +146,127 @@ template struct type_caster, still needs to be implemented. template struct type_caster, enable_if_t>> { + using Scalar = typename T::Scalar; + using StorageIndex = typename T::StorageIndex; + using Index = typename T::Index; + using SparseMap = Eigen::Map; using Map = Eigen::Map; using SparseMatrixCaster = type_caster; - static constexpr auto Name = SparseMatrixCaster::Name; + static constexpr bool RowMajor = T::IsRowMajor; + + using ScalarNDArray = ndarray>; + using StorageIndexNDArray = ndarray>; + + using ScalarCaster = make_caster; + using StorageIndexCaster = make_caster; + + static constexpr auto Name = const_name("scipy.sparse.csr_matrix[", + "scipy.sparse.csc_matrix[") + + make_caster::Name + const_name("]"); + template using Cast = Map; template static constexpr bool can_cast() { return true; } - bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept = delete; + ScalarCaster data_caster; + StorageIndexCaster indices_caster, indptr_caster; + Index rows, cols, nnz; + + bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { + // Disable implicit conversions + // + // I'm not convinced this manipulation of the flags works. It seems in + // ndarray caster the flag is just reset to true. + return from_python_(src, flags & ~(uint8_t)cast_flags::convert, cleanup); + } + + bool from_python_(handle src, uint8_t flags, cleanup_list *cleanup) noexcept + { + object obj = borrow(src); + try { + object matrix_type = module_::import_("scipy.sparse").attr(RowMajor ? "csr_matrix" : "csc_matrix"); + if (!obj.type().is(matrix_type)) + obj = matrix_type(obj); + } catch (const python_error &) { + return false; + } + + // I thought this would not allow conversions, but it does + // I think conversion is ok if std::is_const_v is true, so long as + // each *_caster is the owner. + // For non-const, conversion seems to be a bad idea. I think the dense + // version will throw a RunTime error rather than convert + if (object data_o = obj.attr("data"); !data_caster.from_python(data_o, flags, cleanup)) + return false; + + if (object indices_o = obj.attr("indices"); !indices_caster.from_python(indices_o, flags, cleanup)) + return false; + + if (object indptr_o = obj.attr("indptr"); !indptr_caster.from_python(indptr_o, flags, cleanup)) + return false; - static handle from_cpp(const Map &v, rv_policy policy, cleanup_list *cleanup) noexcept = delete; + object shape_o = obj.attr("shape"), nnz_o = obj.attr("nnz"); + try { + if (len(shape_o) != 2) + return false; + rows = cast(shape_o[0]); + cols = cast(shape_o[1]); + nnz = cast(nnz_o); + } catch (const python_error &) { + return false; + } + return true; + } + + static handle from_cpp(const Map &v, rv_policy policy, cleanup_list *) noexcept + { + if (!v.isCompressed()) { + PyErr_SetString(PyExc_ValueError, + "nanobind: unable to return an Eigen sparse matrix that is not in a compressed format. " + "Please call `.makeCompressed()` before returning the value on the C++ end."); + return handle(); + } + + object matrix_type; + try { + matrix_type = module_::import_("scipy.sparse").attr(RowMajor ? "csr_matrix" : "csc_matrix"); + } catch (python_error &e) { + e.restore(); + return handle(); + } + + const Index rows = v.rows(), cols = v.cols(); + const size_t data_shape[] = { (size_t) v.nonZeros() }; + const size_t outer_indices_shape[] = { (size_t) ((RowMajor ? rows : cols) + 1) }; + + T *src = std::addressof(const_cast(v)); + object owner; + if (policy == rv_policy::move) { + src = new T(std::move(v)); + owner = capsule(src, [](void *p) noexcept { delete (T *) p; }); + } + + ScalarNDArray data(src->valuePtr(), 1, data_shape, owner); + StorageIndexNDArray outer_indices(src->outerIndexPtr(), 1, outer_indices_shape, owner); + StorageIndexNDArray inner_indices(src->innerIndexPtr(), 1, data_shape, owner); + + try { + return matrix_type(nanobind::make_tuple( + std::move(data), std::move(inner_indices), std::move(outer_indices)), + nanobind::make_tuple(rows, cols)) + .release(); + } catch (python_error &e) { + e.restore(); + return handle(); + } + }; + + operator Map() + { + ScalarNDArray& values = data_caster.value; + StorageIndexNDArray& inner_indices = indices_caster.value; + StorageIndexNDArray& outer_indices = indptr_caster.value; + return SparseMap(rows, cols, nnz, outer_indices.data(), inner_indices.data(), values.data()); + } }; diff --git a/tests/test_eigen.cpp b/tests/test_eigen.cpp index 8e555546..6fde0311 100644 --- a/tests/test_eigen.cpp +++ b/tests/test_eigen.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace nb = nanobind; @@ -166,8 +167,20 @@ NB_MODULE(test_eigen_ext, m) { assert(!m.isCompressed()); return m.markAsRValue(); }); + // This function doesn't appear to be called in tests/test_eigen.py m.def("sparse_complex", []() -> Eigen::SparseMatrix> { return {}; }); + m.def("sparse_map_c", [](const Eigen::Map &) { }); + m.def("sparse_map_r", [](const Eigen::Map &) { }); + m.def("sparse_update_map_to_zero_c", [](nb::object obj) { + Eigen::Map c = nb::cast>(obj); + for (int i = 0; i < c.nonZeros(); ++i) { c.valuePtr()[i] = 0; } + }); + m.def("sparse_update_map_to_zero_r", [](nb::object obj) { + Eigen::Map r = nb::cast>(obj); + for (int i = 0; i < r.nonZeros(); ++i) { r.valuePtr()[i] = 0; } + }); + /// issue #166 using Matrix1d = Eigen::Matrix; try { diff --git a/tests/test_eigen.py b/tests/test_eigen.py index 4e402ee3..ef80f2d8 100644 --- a/tests/test_eigen.py +++ b/tests/test_eigen.py @@ -374,3 +374,29 @@ def test14_single_element(): a = np.array([[1]], dtype=np.uint32) assert a.ndim == 2 and a.shape == (1, 1) t.addMXuCC(a, a) + +@needs_numpy_and_eigen +def test15_sparse_map(): + pytest.importorskip("scipy") + import scipy + c = scipy.sparse.csc_matrix([[1, 0], [0, 1]], dtype=np.float32) + # These should be copy-less + t.sparse_map_c(c) + r = scipy.sparse.csr_matrix([[1, 0], [0, 1]], dtype=np.float32) + t.sparse_map_r(r) + # These should be ok, but will copy(?) + t.sparse_map_c(r) + t.sparse_map_r(c) + + t.sparse_update_map_to_zero_c(c); + assert c.sum() == 0 + t.sparse_update_map_to_zero_r(r); + assert r.sum() == 0 + + c = scipy.sparse.csc_matrix([[1, 0], [0, 1]], dtype=np.float32) + # Shouldn't this fail list t.castToMapVXi above? + t.sparse_update_map_to_zero_r(c); + + + +